--- /dev/null
+#!/usr/bin/env python
+# Anna Agents-Oriented Testing Setup Launcher
+
+# Requires PyYAML: pip install pyyaml
+
+import os, sys, datetime, glob, re, json, time, shutil, filecmp, datetime
+from argparse import ArgumentParser
+import xml.etree.ElementTree
+#from pathlib import Path
+
+from json import loads
+
+# PyYAML
+from yaml import load, dump
+try:
+ from yaml import CLoader as Loader, CDumper as Dumper
+except ImportError:
+ from yaml import Loader, Dumper
+
+from enum import Enum
+
+
+class AttributeType(Enum):
+ Mandatory = "Mandatory"
+ Optional = "Optional"
+
+
+def _exit(message = None, rc=0):
+ if (message): printMsg(message)
+ printMsg("Exiting [rc={}]".format(rc))
+ sys.exit(rc)
+
+
+def timeFormat():
+ return '%b.%d.%Y-%H.%M.%S'
+
+
+def printMsg(message):
+ print ("[{}] {}".format(datetime.datetime.now().strftime(timeFormat()), message))
+
+
+class YamlConfigParser():
+ """
+ Yaml parser
+ """
+ def __init__(self, yaml_config_file):
+ """
+ Convert the yaml file into a Python object
+ """
+ self.data = None
+ try:
+ with open(yaml_config_file, 'r') as ss:
+ self.data = load(ss, Loader=Loader)
+ ss.close()
+
+ except Exception:
+ raise
+
+ def getData(self):
+ return self.data
+
+ def show(self, options):
+ output = dump(self.data, Dumper=Dumper)
+ print(output)
+
+
+def writeFileContent(filename, content):
+ _file = open(filename, "w")
+ _file.write(content)
+ _file.close()
+
+
+def getNodeValue(dictionary, key, attributeType = AttributeType.Mandatory):
+
+ # Type checking:
+ if not isinstance(attributeType, AttributeType):
+ raise TypeError("'attributeType' must be an instance of AttributeType Enum")
+
+ value = None
+ try:
+ value = dictionary[key]
+ except Exception as e:
+ if (attributeType == AttributeType.Mandatory): _exit ("Missing mandatory key: {}".format(e) , 1)
+
+ #printMsg("getNodeValue -> {}: {}".format(key, value))
+ return value
+
+
+def get_parent_dir(_file):
+ abspath = os.path.abspath(_file)
+ return os.path.dirname(abspath)
+
+
+def is_absolute_path(path):
+ return os.path.isabs(path)
+
+
+def provision_begin(iplimit):
+ global GProvisionContent
+
+ GProvisionContent += '''test|report
+test|ip-limit|{}
+test|dump-stdout|yes
+'''.format(iplimit)
+
+def assertFile(filepath):
+ if not (os.path.exists(filepath)):
+ raise Exception ("File '{}' not found !".format(filepath))
+
+
+def assertAgentId(agent_id):
+ global agents_ADML_dir
+ global agents_KAFKA_dir
+ global agents_HTTPMOCK_dir
+
+ adml_node_file = agents_ADML_dir + "/{}.node".format(agent_id)
+ kafka_node_file = agents_KAFKA_dir + "/{}.node".format(agent_id)
+ httpmock_node_file = agents_HTTPMOCK_dir + "/{}.node".format(agent_id)
+ if (os.path.exists(adml_node_file)):
+ return "ADML"
+ elif (os.path.exists(kafka_node_file)):
+ return "KAFKA"
+ elif (os.path.exists(httpmock_node_file)):
+ return "HTTPMOCK"
+ else:
+ raise Exception ("Agent id '{}' not found as registered. Check the agents yaml configuration loaded !".format(agent_id))
+
+
+def modify_xml_avp_data(stepNumber, arguments):
+ assertSupportedKeys(stepNumber, arguments, ["xml", "new_xml", "xpath_value_list"])
+
+ _xml = getNodeValue(arguments, "xml")
+ if not is_absolute_path(_xml): _xml = GTcDir + "/" + _xml
+ assertFile(_xml)
+
+ new_xml = getNodeValue(arguments, "new_xml")
+ if not is_absolute_path(new_xml): new_xml = GTcDir + "/" + new_xml
+
+ # xml tree:
+ et = xml.etree.ElementTree.parse(_xml)
+ root = et.getroot()
+
+ xv_list = getNodeValue(arguments, "xpath_value_list")
+ for item in xv_list:
+ assertSupportedKeys(stepNumber, item, ["xpath", "value"])
+ xpath = getNodeValue(item, "xpath")
+ value = getNodeValue(item, "value")
+
+ # Replace operation:
+ targets = root.findall(xpath)
+ for target in targets:
+ if "data" in target.attrib:
+ target.attrib["data"] = value
+ elif "hex-data" in target.attrib:
+ target.attrib["hex-data"] = value
+
+ et.write(new_xml)
+
+
+def modify_json_key_value(stepNumber, arguments):
+ assertSupportedKeys(stepNumber, arguments, ["json", "new_json", "kpath_value_list"])
+
+ _json = getNodeValue(arguments, "json")
+ if not is_absolute_path(_json): _json = GTcDir + "/" + _json
+ assertFile(_json)
+
+ new_json = getNodeValue(arguments, "new_json")
+ if not is_absolute_path(new_json): new_json = GTcDir + "/" + new_json
+
+ # Replace operation:
+ with open(_json, 'r') as json_file:
+ json_data = json_file.read()
+ json_dict = json.loads(json_data)
+
+ kv_list = getNodeValue(arguments, "kpath_value_list")
+ for item in kv_list:
+ assertSupportedKeys(stepNumber, item, ["kpath", "value"])
+ kpath = getNodeValue(item, "kpath")
+ value = getNodeValue(item, "value")
+
+ key_path_list = kpath.split(".")
+ elem = key_path_list[0]
+ if (len(key_path_list) == 1):
+ json_dict[elem] = value
+
+ else:
+ aux_dict = json_dict[elem]
+ #print("aux dict: " + str(aux_dict))
+ for elem in key_path_list[1:-1]:
+ aux_dict = aux_dict[elem]
+
+ aux_dict[key_path_list[-1]] = value
+
+
+ content = json.dumps(json_dict)
+ writeFileContent(new_json, content)
+
+
+def system_cmd(stepNumber, arguments):
+ global GProvisionContent
+
+ assertSupportedKeys(stepNumber, arguments, ["shell", "file", "file_parameters"])
+
+ shell = getNodeValue(arguments, "shell", AttributeType.Optional)
+ _file = getNodeValue(arguments, "file", AttributeType.Optional)
+ fp = getNodeValue(arguments, "file_parameters", AttributeType.Optional)
+ file_parameters = ""
+ if (fp): file_parameters = fp
+
+ if (shell and _file):
+ raise Exception ("Both 'shell' and 'file' cannot be provided at system_cmd action !")
+
+ if (not shell and not _file):
+ raise Exception ("Neither 'shell' nor 'file' have been provided at system_cmd action !")
+
+ if (shell and fp):
+ raise Exception ("Both 'shell' and 'file_parameters' cannot be provided at system_cmd action !")
+
+ if (shell):
+ GProvisionContent += 'test|{}|sh-command|{}\n'.format(GTcNumber, shell)
+
+ elif (_file):
+ if not is_absolute_path(_file): _file = GTcDir + "/" + _file
+ assertFile(_file)
+ GProvisionContent += 'test|{}|sh-command|{} {}\n'.format(GTcNumber, _file, file_parameters)
+
+
+def process_test_case_step_GENERIC(stepNumber, action_id, arguments):
+ global GProvisionContent
+ global GTcNumber
+ global GIpLimit
+
+ if action_id == "sh_command":
+ GProvisionContent += 'test|{}|sh-command|{}\n'.format(GTcNumber, getNodeValue(arguments, "value"))
+ elif action_id == "ip_limit":
+ il = getNodeValue(arguments, "value")
+ if (il):
+ value = None
+ if (il == "launcher"):
+ value = GIpLimit
+ else:
+ value = il
+
+ GProvisionContent += 'test|{}|ip-limit|{}\n'.format(GTcNumber, value)
+
+ elif action_id == "timeout_ms":
+ GProvisionContent += 'test|{}|timeout|{}\n'.format(GTcNumber, getNodeValue(arguments, "value"))
+ elif action_id == "delay_ms":
+ GProvisionContent += 'test|{}|delay|{}\n'.format(GTcNumber, getNodeValue(arguments, "value"))
+ elif action_id == "modify_xml_avp_data":
+ # Always create the step or step references will be impossible to determine:
+ GProvisionContent += 'test|{}|sh-command|echo "{}"\n'.format(GTcNumber, "Replacing xml file: " + str(arguments))
+ modify_xml_avp_data(stepNumber, arguments)
+ elif action_id == "modify_json_key_value":
+ # Always create the step or step references will be impossible to determine:
+ GProvisionContent += 'test|{}|sh-command|echo "{}"\n'.format(GTcNumber, "Replacing json file: " + str(arguments))
+ modify_json_key_value(stepNumber, arguments)
+ elif action_id == "system_cmd":
+ system_cmd(stepNumber, arguments)
+ else:
+ raise Exception("ERROR: Step {}: unsupported generic action-id '{}'".format(stepNumber, action_id))
+
+
+def assertSupportedKeys(stepNumber, arguments, supported):
+ for key in arguments:
+ if not key in supported:
+ raise Exception("ERROR: Step {}: unsupported argument '{}' (allowed: {})".format(stepNumber, key, str(supported)))
+
+
+def process_test_case_step_ADML(stepNumber, agent_id, action_id, arguments):
+ global GProvisionContent
+ global GTcDir
+
+ if action_id == "send_xml_to_entity":
+ assertSupportedKeys(stepNumber, arguments, ["xml", "answers_to"])
+
+ xml = getNodeValue(arguments, "xml")
+ if not is_absolute_path(xml): xml = GTcDir + "/" + xml
+ assertFile(xml)
+
+ at = getNodeValue(arguments, "answers_to", AttributeType.Optional)
+ answers_to = ""
+ if (at): answers_to = "|{}".format(at)
+
+ GProvisionContent += 'test|{}|sendxml2e|{}{}\n'.format(GTcNumber, xml, answers_to)
+
+ elif action_id == "wait_xml_from_entity":
+ assertSupportedKeys(stepNumber, arguments, ["xml"])
+
+ xml = getNodeValue(arguments, "xml")
+ if not is_absolute_path(xml): xml = GTcDir + "/" + xml
+ assertFile(xml)
+
+ GProvisionContent += 'test|{}|waitfe-xml|{}\n'.format(GTcNumber, xml)
+
+ elif action_id == "send_xml_to_client":
+ assertSupportedKeys(stepNumber, arguments, ["xml", "answers_to"])
+
+ xml = getNodeValue(arguments, "xml")
+ if not is_absolute_path(xml): xml = GTcDir + "/" + xml
+ assertFile(xml)
+
+ at = getNodeValue(arguments, "answers_to", AttributeType.Optional)
+ answers_to = ""
+ if (at): answers_to = "|{}".format(at)
+
+ GProvisionContent += 'test|{}|sendxml2c|{}{}\n'.format(GTcNumber, xml, answers_to)
+
+ elif action_id == "wait_xml_from_client":
+ assertSupportedKeys(stepNumber, arguments, ["xml"])
+
+ xml = getNodeValue(arguments, "xml")
+ if not is_absolute_path(xml): xml = GTcDir + "/" + xml
+ assertFile(xml)
+
+ GProvisionContent += 'test|{}|waitfc-xml|{}\n'.format(GTcNumber, xml)
+
+ else:
+ raise Exception("ERROR: Step {}: unsupported generic action-id '{}' for ADML node type (agent '{}')".format(stepNumber, action_id, agent_id))
+
+
+def process_test_case_step_HTTPMOCK(stepNumber, agent_id, action_id, arguments):
+ global GProvisionContent
+ global GTcDir
+ global agents_HTTPMOCK_dir
+
+ if action_id == "serve_json":
+ assertSupportedKeys(stepNumber, arguments, ["json", "method", "uri"])
+
+ json = getNodeValue(arguments, "json")
+ if not is_absolute_path(json): json = GTcDir + "/" + json
+ assertFile(json)
+
+ method = getNodeValue(arguments, "method")
+ uri = getNodeValue(arguments, "uri")
+
+
+ # Provision script is in the form: agents/HTTPMOCK/<agent-id>-provision.sh
+ provision_script = agents_HTTPMOCK_dir + "/" + agent_id + "-provision.sh "
+ args = str(GTcNumber) + " " + str(stepNumber) + " " + GTcDir + " \"" + json + "\" " + method + " \"" + uri + "\""
+ GProvisionContent += 'test|{}|sh-command|{}\n'.format(GTcNumber, provision_script + args)
+
+ else:
+ raise Exception("ERROR: Step {}: unsupported generic action-id '{}' for HTTPMOCK node type (agent '{}')".format(stepNumber, action_id, agent_id))
+
+
+def process_test_case_step_KAFKA(stepNumber, agent_id, action_id, arguments):
+ global GProvisionContent
+ global GTcDir
+ global agents_KAFKA_dir
+
+ if action_id == "consume_json":
+ assertSupportedKeys(stepNumber, arguments, ["json", "timeout", "auto_offset_reset", "background", "debug"])
+
+ json = getNodeValue(arguments, "json")
+ if json != "any":
+ if not is_absolute_path(json): json = GTcDir + "/" + json
+ assertFile(json)
+
+ autoOffsetReset = ""
+ aor = getNodeValue(arguments, "auto_offset_reset", AttributeType.Optional)
+ if (aor): autoOffsetReset = aor
+
+ timeout = 0
+ to = getNodeValue(arguments, "timeout", AttributeType.Optional)
+ if(to): timeout = to
+
+ background = ""
+ bck = getNodeValue(arguments, "background", AttributeType.Optional)
+ if (bck == "yes"): background = "&"
+
+ debug = "no"
+ deb = getNodeValue(arguments, "debug", AttributeType.Optional)
+ if (deb): debug = deb
+
+ # Consumer script is in the form: agents/KAFKA/<agent-id>-consumer.sh
+ consumer_script = agents_KAFKA_dir + "/" + agent_id + "-consumer.sh "
+ args = json + " " + autoOffsetReset + " " + str(timeout) + " " + debug
+ GProvisionContent += 'test|{}|sh-command|{}{}\n'.format(GTcNumber, consumer_script + args, background)
+
+ elif action_id == "produce_json":
+ assertSupportedKeys(stepNumber, arguments, ["json", "delay_ms", "background", "debug"])
+
+ json = getNodeValue(arguments, "json")
+ if not is_absolute_path(json): json = GTcDir + "/" + json
+ assertFile(json)
+
+ delay_ms = 0
+ dl = getNodeValue(arguments, "delay_ms", AttributeType.Optional)
+ if(dl): delay_ms = dl
+
+ background = ""
+ bck = getNodeValue(arguments, "background", AttributeType.Optional)
+ if (bck == "yes"): background = "&"
+
+ debug = "no"
+ deb = getNodeValue(arguments, "debug", AttributeType.Optional)
+ if (deb): debug = deb
+
+ # Producer script is in the form: agents/KAFKA/<agent-id>-producer.sh
+ producer_script = agents_KAFKA_dir + "/" + agent_id + "-producer.sh "
+ args = json + " " + str(delay_ms) + " " + debug
+ GProvisionContent += 'test|{}|sh-command|{}{}\n'.format(GTcNumber, producer_script + args, background)
+
+ elif action_id == "admin":
+ assertSupportedKeys(stepNumber, arguments, ["operation", "debug"])
+
+ operation = getNodeValue(arguments, "operation")
+
+ debug = "no"
+ deb = getNodeValue(arguments, "debug", AttributeType.Optional)
+ if (deb): debug = deb
+
+ # Admin script is in the form: agents/KAFKA/<agent-id>-admin.sh
+ admin_script = agents_KAFKA_dir + "/" + agent_id + "-admin.sh "
+ args = operation + " " + debug
+ GProvisionContent += 'test|{}|sh-command|{}\n'.format(GTcNumber, admin_script + args)
+
+ else:
+ raise Exception("ERROR: Step {}: unsupported generic action-id '{}' for KAFKA node type (agent '{}')".format(stepNumber, action_id, agent_id))
+
+
+def process_test_case_step(stepNumber, dictionary):
+
+ action = getNodeValue(dictionary, "action")
+ arguments = getNodeValue(dictionary, "arguments")
+
+ # Action is in the form '[agent id/]<action id>':
+ # OPTIONAL: agent_id
+ # MANDATORY: action_id
+ agent_id = None
+ agent_template = None
+ try:
+ agent_id, action_id = action.split('/')
+ agent_template = assertAgentId(agent_id)
+
+ except ValueError:
+ action_id = action
+ pass
+
+ # Actions for ADML
+ if agent_template == "ADML":
+ process_test_case_step_ADML(stepNumber, agent_id, action_id, arguments)
+ elif agent_template == "KAFKA":
+ process_test_case_step_KAFKA(stepNumber, agent_id, action_id, arguments)
+ elif agent_template == "HTTPMOCK":
+ process_test_case_step_HTTPMOCK(stepNumber, agent_id, action_id, arguments)
+ else:
+ process_test_case_step_GENERIC(stepNumber, action_id, arguments)
+
+ #trace = "Step {}, Agent-Id '{}', Action-Id '{}', Parameters: {}"
+ #print(trace.format(stepNumber, str(agent_id), action_id, str(arguments)))
+
+
+def process_test_case_yml(testcaseList):
+ for step in testcaseList:
+ indx = testcaseList.index(step)
+ process_test_case_step(indx+1, step)
+
+
+def provision_test_case(filename, testcaseList):
+
+ global GTcNumber
+ global GTcDir
+ global GIdsVsDescs
+ global GProvisionContent
+ GTcNumber += 1
+ GTcDir = get_parent_dir(filename)
+
+ id_desc = "{} : {}".format(GTcNumber, filename)
+ GIdsVsDescs += id_desc + "\n"
+ tc_desc = "test case '{}'".format(id_desc)
+
+ printMsg("Provisioning {} ...".format(tc_desc))
+
+ # Set test case description
+ GProvisionContent += 'test|{}|description|{}\n'.format(GTcNumber, filename)
+
+ # Process the yml definition for the test case
+ process_test_case_yml(testcaseList)
+
+
+def provision_tests(files):
+ for filename in files.splitlines():
+ if (filename[0] == "#"):
+ printMsg("Ignoring commented test case: '{}'".format(filename))
+ else:
+ # Test case is a list of steps:
+ tc = YamlConfigParser(filename)
+ provision_test_case(filename, tc.getData())
+
+
+def parse_arguments():
+
+ parser = ArgumentParser(description='Anna Agents-Oriented Testing Setup Launcher')
+ parser.add_argument('-t', '--tests-dir', help='Tests parent directory where to find .yml files (from the next directories level)', required=True)
+ parser.add_argument('-k', '--keep-list-if-exists', help='Keeps intact the list of test cases (<test-dir>/launcher.list), creates it if missing', required=False, action='store_true')
+ parser.add_argument('-s', '--stop-adml-at-the-end', help='At the end, ADML keeps running to ease debugging. You could force stop with this option', required=False, action='store_true')
+ parser.add_argument('-i', '--interactive', help='Interactive execution to ease debugging of test cases', required=False, action='store_true')
+ parser.add_argument('-d', '--dry-run', help='Used to test and debug provision, no execution is launched', required=False, action='store_true')
+ parser.add_argument('-p', '--ip-limit', help="In-Progress limit is the number of coexisting In-Progress State test cases. Defaults to 1 (sequential), -1 would be 'no limit').", required=False)
+ parser.add_argument('-r', '--ttps', help="Rate of test cases launched (test ticks per second). By default 50 (recommended for monothread version).", required=False)
+
+
+ arguments = parser.parse_args()
+
+ return arguments
+
+
+def start_agents():
+ # At the moment, only ADML is started (KAFKA/HTTPMOCK agents uses scripts):
+ global agents_ADML_dir
+
+ rc = 1
+ maxRetries = 5
+ retry = 0
+ os.system(agents_ADML_dir + "/stop.sh")
+ os.system(agents_ADML_dir + "/start.sh")
+
+ rc = adml_operation("node >/dev/null")
+ while rc != 0:
+ retry += 1
+ if (retry > maxRetries): break
+ time.sleep(1)
+ printMsg("Check ADML health retry ({}/{}) ...".format(retry, maxRetries))
+ rc = adml_operation("node >/dev/null")
+
+ return rc
+
+
+def adml_operation(arguments):
+ global agents_ADML_dir
+
+ rc = os.system(agents_ADML_dir + "/operation.sh " + arguments)
+ return rc
+
+
+def adml_operation_output(operation):
+ global agents_ADML_dir
+
+ output = os.popen(agents_ADML_dir + "/operation.sh {}".format(operation)).read()
+ return output
+
+
+def collect_results(abs_tdir):
+ global agents_ADML_dir
+
+ # Logs directory:
+ logs_dir = abs_tdir + ".logs"
+ shutil.rmtree(logs_dir, ignore_errors=True)
+ os.mkdir(logs_dir)
+ os.mkdir(logs_dir + "/traffic")
+ os.mkdir(logs_dir + "/counters")
+ os.mkdir(logs_dir + "/test-reports")
+ os.mkdir(logs_dir + "/debug")
+
+ # Summary states:
+ print("\n\n")
+ printMsg("Retrieving tests summary states ...")
+ statesOUTPUT = adml_operation_output("\"test|summary-states\"")
+ writeFileContent(logs_dir + "/tests.summary-states", statesOUTPUT)
+
+ # Summary counts:
+ print("\n\n")
+ printMsg("Retrieving tests summary counts ...")
+ verdictOUTPUT = adml_operation_output("\"test|summary-counts\"")
+ writeFileContent(logs_dir + "/tests.summary-counts", verdictOUTPUT)
+ verdictRC = 1
+ if "PASS" in verdictOUTPUT: verdictRC = 0
+
+ # Traffic logs:
+ # ADML
+ printMsg("Retrieving diameter traffic logs ...")
+ for f in glob.glob(agents_ADML_dir + "/*.log"):
+ shutil.copy(f, logs_dir + "/traffic")
+ for f in glob.glob(agents_ADML_dir + "/*.xml"):
+ shutil.copy(f, logs_dir + "/traffic")
+ # KAFKA
+ printMsg("Retrieving kafka traffic logs ...")
+ for f in glob.glob(agents_KAFKA_dir + "/*.log"):
+ shutil.copy(f, logs_dir + "/traffic")
+ for f in glob.glob(agents_KAFKA_dir + "/*.json"):
+ shutil.copy(f, logs_dir + "/traffic")
+
+ # HTTPMOCK
+ #printMsg("Retrieving httpmock activity logs ...")
+ #...
+
+ printMsg("Generating junit report ...")
+ writeFileContent(logs_dir + "/junit.xml", adml_operation_output("\"test|junit\""))
+ printMsg("Generating tests summary ...")
+ writeFileContent(logs_dir + "/tests.summary", adml_operation_output("\"test|summary\""))
+
+ printMsg("Generating tests oam reports ...")
+ writeFileContent(logs_dir + "/tests.oam", adml_operation_output("\"show-oam\""))
+ printMsg("Generating tests statistics ...")
+ writeFileContent(logs_dir + "/tests.stats", adml_operation_output("\"show-stats\""))
+ printMsg("Dumping ADML process context information ...")
+ adml_operation("\"context|{}\" >/dev/null".format(logs_dir + "/adml.context"))
+ adml_operation("forceCountersRecord >/dev/null")
+
+ # Move 'counters' and 'test-reports'
+ printMsg("Retrieving counters information ...")
+ for f in glob.glob(agents_ADML_dir + "/counters/*"):
+ shutil.copy(f, logs_dir + "/counters")
+ printMsg("Retrieving tests reports information ...")
+ for f in glob.glob(agents_ADML_dir + "/test-reports/*"):
+ shutil.copy(f, logs_dir + "/test-reports")
+
+ # Debug:
+ printMsg("Retrieving debug information ...")
+ for f in glob.glob(agents_ADML_dir + "/launcher.trace*"):
+ shutil.copy(f, logs_dir + "/debug")
+
+ # Backup tests directory itself (some replacements could be happened):
+ target_once_executed = logs_dir + "/debug/" + os.path.basename(abs_tdir)
+ shutil.copytree(abs_tdir, target_once_executed) # , symlinks=True)
+
+ return statesOUTPUT, verdictOUTPUT, verdictRC
+
+
+def interactive_execution():
+ printMsg("Starting interactive execution of test cases")
+
+ stay = True
+ while stay:
+ print("--------------------------------------------")
+ adml_operation("\"test|summary-counts\"")
+ print("--------------------------------------------")
+
+ print ('''
+ MAIN INTERACTIVE MENU
+ =====================
+ (prefix option with 'h' to get detailed help)
+
+ General:
+ 0. Exit/Quit
+
+ Position:
+ 1. Go to
+ 2. Look
+
+ Test cases execution:
+ 3. Start with test rate
+ 4. Start next N test cases
+ 5. In-progress limit
+
+ Low level execution: test case steps
+ 6. Execute next N steps
+
+ Status & cycling:
+ 7. Reset
+ 8. Pool repeats
+ 9. Reset statistics & counters
+ 10. Auto reset
+ 11. Reports configuration
+ ''')
+
+ opt=input("Input option: \n")
+ if opt=="h0":
+ print("Just exit this menu")
+ elif opt=="0":
+ stay = False
+
+ elif opt=="h1":
+ print("Updates the current test pointer position.")
+ elif opt=="1":
+ _id=input("Input the test case id: ")
+ adml_operation("\"test|goto|{}\"".format(_id))
+
+ elif opt=="h2":
+ print("Show programmed test case for id provided, current 'in-process' test case when missing.")
+ elif opt=="2":
+ _id=input("Input the test case id [empty will show current]: ")
+ if _id: _id="|" + _id
+ adml_operation("\"test|look{}\"".format(_id))
+
+ elif opt=="h3":
+ print('''
+Starts/resume the provided number of test ticks per second (ttps). The ADML starts
+with the event trigger system suspended, and this operation is neccessary to begin
+those cases which need this time event (internal triggering). Some other test cases
+could be started through external events (first test case event could be programmed
+to wait specific message), but is not usual this external mode and neither usual to
+mix triggering types. Normally, you will pause/stop new test launchs providing 0 as
+ttps value, and also you could dynamically modify the load rate updating that value.
+If a test case has N messages then 'ttps * N' will be the virtual number of messages
+managed per second when no bottleneck exists.
+
+Provide 0 in order to stop the timer triggering.
+
+The timer manager resolution currently harcoded allows a maximum of 50 events
+per second. To reach greater rates ADML will join synchronously the needed number of
+new time-triggered test cases per a single event, writting a warning-level trace to
+advice about the risk of burst sendings and recommend launching multiple instances
+to achieve such load with a lower rate per instance.''')
+ elif opt=="3":
+ ttps=input("Input the test ticks per second (0: stops the test rate clock): ")
+ adml_operation("\"test|ttps|{}\"".format(ttps))
+
+ elif opt=="h4":
+ print('''
+Forces the execution of the next test case(s) without waiting for test manager tick.
+Provide an integer value for 'sync-amount' to send a burst synchronous amount of the
+next programmed test cases (1 by default). This event works regardless the timer tick
+function, but it is normally used with the test manager tick stopped.''')
+ elif opt=="4":
+ amount=input("Input the number of tests to execute synchronously [1 by default]: ")
+ if not amount: amount=1
+ adml_operation("\"test|next|{}\"".format(amount))
+
+ elif opt=="h5":
+ print('''
+In-progress limit of test cases. No new test cases will be launched over this value
+(test Manager tick work will be ignored). Zero-value is equivalent to stop the clock.
+tick, -1 is used to specify 'no limit'. If missing amount, the limit and current
+amount of in-progress test cases will be shown.
+
+Default is 1 to ensure sequential execution for testcases (function test mode).
+For system test, it is recommended to set '-1' (no limit).''')
+ elif opt=="5":
+ print("\nTypical 'in-progress limit' values:")
+ print("-1: no limit")
+ print(" 0: similar to stop the test rate clock")
+ print(" 1: sequential execution for testcases (function test mode)\n")
+ limit=input("Input the desired in-progress limit amount [show current by default]: ")
+ if limit: limit = "|" + limit
+ adml_operation("\"test|ip-limit{}\"".format(limit))
+
+ elif opt=="h6":
+ print('''
+Makes interactive a specific test case id. The amount is the margin of execution steps
+to be done. With amount of '0' the test case is selected to be interactive, but no step
+is executed. Then you have to interact with positive amounts (usually 1), executing the
+provided number of steps if they are ready and fulfill the needed conditions. The value
+of 0, implies no execution steps margin, which could be useful to 'freeze' a test in the
+middle of its execution. You could also provide -1 to make it non-interactive resuming
+it from the current step.
+By default, current test case id is selected for interaction.''')
+ elif opt=="6":
+ amount=input("Input the number of steps to execute (-1: resumes the test case; 0: freezes it) [1 by default]: ")
+ if not amount: amount = 1
+ test_id=input("Input the test id [current by default (remember to set a valid pointer with 'go to')]: ")
+ if test_id: test_id = "|" + test_id
+ adml_operation("\"test|interact|{}{}\"".format(amount, test_id))
+
+ elif opt=="h7":
+ print('''
+Reset the test case for id provided, all the tests when missing. It could be hard/soft:
+- hard: you probably may need to stop the load rate before. This operation initializes
+ all test cases regardless their states.
+- soft: only for finished cases (those with 'Success' or 'Failed' states). It does not
+ affect to test cases with 'InProgress' state.''')
+ elif opt=="7":
+ rtype=input("Input the reset type (soft/hard) [hard]: ")
+ if not rtype: rtype = "hard"
+ test_id=input("Input the test id [all tests by default]: ")
+ if test_id: test_id = "|" + test_id
+ adml_operation("\"test|reset|{}{}\"".format(rtype, test_id))
+
+ elif opt=="h8":
+ print('''
+Restarts the whole programmed test list when finished the amount number of times (repeats
+forever if value -1 is provided). This is disabled by default (amount = 0): testing trigger
+system will enter suspended state until new ttps operation is received and a soft reset has
+been done before. Test cases state & data will be reset (when achieved again), but general
+statistics and counters will continue measuring until reset with 'collect' operation.''')
+ elif opt=="8":
+ amount=input("Input the number of cycle repeats (-1: repeats forever; 0: disables repetitions): ")
+ adml_operation("\"test|repeats|{}\"".format(amount))
+
+ elif opt=="h9":
+ print('''
+Reset statistics and counters to start a new test stage of performance measurement.
+This affects to final logs retrieved on exit (option 0).''')
+ elif opt=="9":
+ adml_operation("\"collect\"")
+
+ elif opt=="h10":
+ print('''
+When cycling, current test cases can be soft (default) or hard reset. If no timeout has
+been configured for the test case, hard reset could prevent stuck on the next cycle for
+those test cases still in progress.''')
+ elif opt=="10":
+ rtype=input("Input the reset type (soft/hard) [hard]: ")
+ if not rtype: rtype = "hard"
+ adml_operation("\"test|auto-reset|{}\"".format(rtype))
+
+ elif opt=="h11":
+ print('''
+Enables/disables report generation for a certain test case state: initialized, in-progress,
+failed or success (also 'all' and 'none' reserved words could be used). This applies to report
+summary (final logs retrieved) and automatic dumps during testing where only failed or
+successful states will appear: every time a test case is finished its xml representation will
+be dump on a file under (logs test-reports) with the name:
+
+ 'cycle-<cycle id>.testcase-<test case id>.xml'.
+
+By default, all the states are disabled to avoid IO overload. In most of cases not all the
+tests are going to fail then you could enable only such failed dumps. Anyway you could set
+the reports visibility to fit your needs in a given situation.
+
+Also you could enable hexadecimal representation for diameter messages on reports.''')
+ elif opt=="11":
+ print("\nInput the report target operation, capital letters to enable:")
+ rtype=input("(I/i)nitialized, in-(P/p)rogress, (F/f)ailed, (S/s)uccess, (A/a)ll, (N/n)one [A: all will be enabled by default]: ")
+ if not rtype: rtype = "A"
+ target = None
+ enable = "no"
+ upper_rtype = rtype.upper()
+ if (upper_rtype == rtype): enable = "yes"
+ if (upper_rtype == "I" ):
+ target="initialized"
+ elif (upper_rtype == "P"):
+ target="in-progress"
+ elif (upper_rtype == "F"):
+ target="failed"
+ elif (upper_rtype == "S"):
+ target="success"
+ elif (upper_rtype == "A"):
+ target="all"
+ elif (upper_rtype == "N"):
+ target="none"
+
+ if not target:
+ print("Invalid target. Try again !")
+ else:
+ print(" - Target selected: " + target)
+ print(" - Enable: " + enable)
+ adml_operation("\"test|report|{}|{}\"".format(target, enable))
+
+ enableHex=input("\nEnable/Disable hexadecimal dumps for diameter messages in test reports (yes/no) [no]: ")
+ if not enableHex: enableHex = "no"
+ adml_operation("\"test|report-hex|{}\"".format(enableHex))
+
+ elif opt !="":
+ print("\nInvalid choice. Try again !")
+
+
+ if stay: input("\nPress ENTER to continue ...\n")
+
+
+
+#####################
+# M A I N #
+#####################
+
+if __name__ == "__main__":
+
+ # Agents:
+ abspath = os.path.abspath(__file__)
+ dname = os.path.dirname(abspath)
+ agents_KAFKA_dir = dname + "/agents/KAFKA"
+ agents_HTTPMOCK_dir = dname + "/agents/HTTPMOCK"
+ agents_ADML_dir = dname + "/agents/ADML"
+
+ # Injected environment variables (could be used in system_cmd commands:
+ os.putenv("AGENTS_DIR", dname + "/agents")
+
+ arguments = parse_arguments()
+
+ tdir = arguments.tests_dir
+ keep = arguments.keep_list_if_exists
+ stopAdml = arguments.stop_adml_at_the_end
+ interactive = arguments.interactive
+ dryrun = arguments.dry_run
+ iplimit = arguments.ip_limit
+ ttps = arguments.ttps
+
+ # Tests list: 'launcher.list':
+ llist = tdir + "/launcher.list"
+ llist_current = llist + ".updated"
+
+ # Auxiliary:
+ abs_tdir = get_parent_dir(llist)
+ llist_exists = os.path.exists(llist)
+
+ # Create the current list of test cases:
+ list_content = None
+ list_content = os.popen("find {} -mindepth 2 -name \"*.yml\" | sort -t'/'".format(tdir)).read()
+
+ # Performance:
+ if not ttps: ttps = 50
+ if not iplimit: iplimit = 1
+ if iplimit == "auto":
+ printMsg("AUTO IP-LIMIT ALGORITHM IS UNDER CONSTRUCTION (configuring value 1) ... ")
+ iplimit = 1
+
+ GIpLimit = iplimit
+
+
+ if llist_exists:
+ # Detect updates:
+ writeFileContent(llist_current, list_content)
+ if filecmp.cmp(llist, llist_current):
+ os.remove(llist_current)
+ else:
+ printMsg("The list '{}' has been created".format(llist_current))
+ printMsg("Warning: detected local changes (may be commented test cases) in '{}' regarding current tests list '{}'".format(llist, llist_current))
+
+ # Respect existing list (except if -k is not provided):
+ if keep:
+ printMsg("Reuse the current list '{}'".format(llist))
+ with open(llist) as llist_file:
+ list_content = llist_file.read()
+
+ if not llist_exists or not keep:
+ writeFileContent(llist, list_content)
+ printMsg("The list '{}' has been created".format(llist))
+
+
+ try:
+ global GTcNumber
+ global GProvisionContent
+ global GIdsVsDescs
+ GTcNumber = 0
+ GProvisionContent = ""
+ GIdsVsDescs = ""
+
+ provision_begin(iplimit)
+ provision_tests(list_content)
+
+ llist_of_operations = abs_tdir + "/launcher.list.adml_operations"
+ lids = abs_tdir + "/launcher.ids"
+ writeFileContent(llist_of_operations, GProvisionContent)
+ writeFileContent(lids, GIdsVsDescs)
+
+ if dryrun:
+ if interactive: printMsg("Interactive mode is not reached at dry-run mode")
+ _exit("Dry-run execution finished", 0)
+
+ rc = start_agents()
+ if (rc != 0):
+ _exit("Failed to initiate ADML director!", 1)
+
+ # Provision test cases in ADML:
+ adml_operation("-f " + llist_of_operations + " >/dev/null")
+
+
+ if (interactive):
+ interactive_execution()
+
+ else:
+ # Initial time:
+ t_initial = datetime.datetime.now()
+
+ # Start test cases:
+ adml_operation("\"test|ttps|" + str(ttps) + "\"")
+
+ # Estimated time to finish: worst case is timeout (we assume maximum timeout of 15 seconds per test case):
+ # worstTime = 15*GTcNumber
+
+ # We will check the progress: initialized test cases must be 0 again (cycle completed).
+ # But this only is guaranteed if all the tests define a timeout. If not, a wait step could stuck
+ # any test eternally (worstTime is used to exit the following loop):
+ cycle = 0
+ #sleepTime=15
+ #maxCycles=GTcNumber
+ # Better resolution:
+ #sleepTime=1
+ maxCycles=15*GTcNumber
+
+ while True:
+ cycle += 1
+ #time.sleep(sleepTime)
+ time.sleep(1)
+ output = adml_operation_output("\"test|finished\"")
+ if "test|finished => {}".format(GTcNumber) in output: break
+ if (cycle > maxCycles): break
+ #printMsg("Checking progress ({}/{}) ...".format(cycle, maxCycles))
+ if not (cycle % 60): printMsg("Working ...")
+
+ # Final time:
+ t_final = datetime.datetime.now()
+ t_elapsed = t_final - t_initial
+ elapsedOUTPUT = "\nElapsed time in seconds: {}\n\n".format(t_elapsed.total_seconds())
+
+ # Collect results at tests-directory:
+ statesOUTPUT, verdictOUTPUT, verdictRC = collect_results(abs_tdir)
+
+ # Stop ADML:
+ if stopAdml:
+ printMsg("Stoping ADML director ...")
+ time.sleep(6)
+ os.system(agents_ADML_dir + "/stop.sh >/dev/null")
+
+ # Exit with verdict:
+ _exit(statesOUTPUT + elapsedOUTPUT + verdictOUTPUT, verdictRC)
+
+ except Exception as e:
+ _exit(e, 1)