2 # Anna Agents-Oriented Testing Setup Launcher
4 # Requires PyYAML: pip install pyyaml
6 import os, sys, datetime, glob, re, json, time, shutil, filecmp, datetime
7 from argparse import ArgumentParser
8 import xml.etree.ElementTree
9 #from pathlib import Path
11 from json import loads
14 from yaml import load, dump
16 from yaml import CLoader as Loader, CDumper as Dumper
18 from yaml import Loader, Dumper
23 class AttributeType(Enum):
24 Mandatory = "Mandatory"
28 def _exit(message = None, rc=0):
29 if (message): printMsg(message)
30 printMsg("Exiting [rc={}]".format(rc))
35 return '%b.%d.%Y-%H.%M.%S'
38 def printMsg(message):
39 print ("[{}] {}".format(datetime.datetime.now().strftime(timeFormat()), message))
42 class YamlConfigParser():
46 def __init__(self, yaml_config_file):
48 Convert the yaml file into a Python object
52 with open(yaml_config_file, 'r') as ss:
53 self.data = load(ss, Loader=Loader)
62 def show(self, options):
63 output = dump(self.data, Dumper=Dumper)
67 def writeFileContent(filename, content):
68 _file = open(filename, "w")
73 def getNodeValue(dictionary, key, attributeType = AttributeType.Mandatory):
76 if not isinstance(attributeType, AttributeType):
77 raise TypeError("'attributeType' must be an instance of AttributeType Enum")
81 value = dictionary[key]
82 except Exception as e:
83 if (attributeType == AttributeType.Mandatory): _exit ("Missing mandatory key: {}".format(e) , 1)
85 #printMsg("getNodeValue -> {}: {}".format(key, value))
89 def get_parent_dir(_file):
90 abspath = os.path.abspath(_file)
91 return os.path.dirname(abspath)
94 def is_absolute_path(path):
95 return os.path.isabs(path)
98 def provision_begin(iplimit):
99 global GProvisionContent
101 GProvisionContent += '''test|report
106 def assertFile(filepath):
107 if not (os.path.exists(filepath)):
108 raise Exception ("File '{}' not found !".format(filepath))
111 def assertAgentId(agent_id):
112 global agents_ADML_dir
113 global agents_KAFKA_dir
114 global agents_HTTPMOCK_dir
116 adml_node_file = agents_ADML_dir + "/{}.node".format(agent_id)
117 kafka_node_file = agents_KAFKA_dir + "/{}.node".format(agent_id)
118 httpmock_node_file = agents_HTTPMOCK_dir + "/{}.node".format(agent_id)
119 if (os.path.exists(adml_node_file)):
121 elif (os.path.exists(kafka_node_file)):
123 elif (os.path.exists(httpmock_node_file)):
126 raise Exception ("Agent id '{}' not found as registered. Check the agents yaml configuration loaded !".format(agent_id))
129 def modify_xml_avp_data(stepNumber, arguments):
130 assertSupportedKeys(stepNumber, arguments, ["xml", "new_xml", "xpath_value_list"])
132 _xml = getNodeValue(arguments, "xml")
133 if not is_absolute_path(_xml): _xml = GTcDir + "/" + _xml
136 new_xml = getNodeValue(arguments, "new_xml")
137 if not is_absolute_path(new_xml): new_xml = GTcDir + "/" + new_xml
140 et = xml.etree.ElementTree.parse(_xml)
143 xv_list = getNodeValue(arguments, "xpath_value_list")
145 assertSupportedKeys(stepNumber, item, ["xpath", "value"])
146 xpath = getNodeValue(item, "xpath")
147 value = getNodeValue(item, "value")
150 targets = root.findall(xpath)
151 for target in targets:
152 if "data" in target.attrib:
153 target.attrib["data"] = value
154 elif "hex-data" in target.attrib:
155 target.attrib["hex-data"] = value
160 def modify_json_key_value(stepNumber, arguments):
161 assertSupportedKeys(stepNumber, arguments, ["json", "new_json", "kpath_value_list"])
163 _json = getNodeValue(arguments, "json")
164 if not is_absolute_path(_json): _json = GTcDir + "/" + _json
167 new_json = getNodeValue(arguments, "new_json")
168 if not is_absolute_path(new_json): new_json = GTcDir + "/" + new_json
171 with open(_json, 'r') as json_file:
172 json_data = json_file.read()
173 json_dict = json.loads(json_data)
175 kv_list = getNodeValue(arguments, "kpath_value_list")
177 assertSupportedKeys(stepNumber, item, ["kpath", "value"])
178 kpath = getNodeValue(item, "kpath")
179 value = getNodeValue(item, "value")
181 key_path_list = kpath.split(".")
182 elem = key_path_list[0]
183 if (len(key_path_list) == 1):
184 json_dict[elem] = value
187 aux_dict = json_dict[elem]
188 #print("aux dict: " + str(aux_dict))
189 for elem in key_path_list[1:-1]:
190 aux_dict = aux_dict[elem]
192 aux_dict[key_path_list[-1]] = value
195 content = json.dumps(json_dict)
196 writeFileContent(new_json, content)
199 def system_cmd(stepNumber, arguments):
200 global GProvisionContent
202 assertSupportedKeys(stepNumber, arguments, ["shell", "file", "file_parameters"])
204 shell = getNodeValue(arguments, "shell", AttributeType.Optional)
205 _file = getNodeValue(arguments, "file", AttributeType.Optional)
206 fp = getNodeValue(arguments, "file_parameters", AttributeType.Optional)
208 if (fp): file_parameters = fp
210 if (shell and _file):
211 raise Exception ("Both 'shell' and 'file' cannot be provided at system_cmd action !")
213 if (not shell and not _file):
214 raise Exception ("Neither 'shell' nor 'file' have been provided at system_cmd action !")
217 raise Exception ("Both 'shell' and 'file_parameters' cannot be provided at system_cmd action !")
220 GProvisionContent += 'test|{}|sh-command|{}\n'.format(GTcNumber, shell)
223 if not is_absolute_path(_file): _file = GTcDir + "/" + _file
225 GProvisionContent += 'test|{}|sh-command|{} {}\n'.format(GTcNumber, _file, file_parameters)
228 def process_test_case_step_GENERIC(stepNumber, action_id, arguments):
229 global GProvisionContent
233 if action_id == "sh_command":
234 GProvisionContent += 'test|{}|sh-command|{}\n'.format(GTcNumber, getNodeValue(arguments, "value"))
235 elif action_id == "ip_limit":
236 il = getNodeValue(arguments, "value")
239 if (il == "launcher"):
244 GProvisionContent += 'test|{}|ip-limit|{}\n'.format(GTcNumber, value)
246 elif action_id == "timeout_ms":
247 GProvisionContent += 'test|{}|timeout|{}\n'.format(GTcNumber, getNodeValue(arguments, "value"))
248 elif action_id == "delay_ms":
249 GProvisionContent += 'test|{}|delay|{}\n'.format(GTcNumber, getNodeValue(arguments, "value"))
250 elif action_id == "modify_xml_avp_data":
251 # Always create the step or step references will be impossible to determine:
252 GProvisionContent += 'test|{}|sh-command|echo "{}"\n'.format(GTcNumber, "Replacing xml file: " + str(arguments))
253 modify_xml_avp_data(stepNumber, arguments)
254 elif action_id == "modify_json_key_value":
255 # Always create the step or step references will be impossible to determine:
256 GProvisionContent += 'test|{}|sh-command|echo "{}"\n'.format(GTcNumber, "Replacing json file: " + str(arguments))
257 modify_json_key_value(stepNumber, arguments)
258 elif action_id == "system_cmd":
259 system_cmd(stepNumber, arguments)
261 raise Exception("ERROR: Step {}: unsupported generic action-id '{}'".format(stepNumber, action_id))
264 def assertSupportedKeys(stepNumber, arguments, supported):
265 for key in arguments:
266 if not key in supported:
267 raise Exception("ERROR: Step {}: unsupported argument '{}' (allowed: {})".format(stepNumber, key, str(supported)))
270 def process_test_case_step_ADML(stepNumber, agent_id, action_id, arguments):
271 global GProvisionContent
274 if action_id == "send_xml_to_entity":
275 assertSupportedKeys(stepNumber, arguments, ["xml", "answers_to"])
277 xml = getNodeValue(arguments, "xml")
278 if not is_absolute_path(xml): xml = GTcDir + "/" + xml
281 at = getNodeValue(arguments, "answers_to", AttributeType.Optional)
283 if (at): answers_to = "|{}".format(at)
285 GProvisionContent += 'test|{}|sendxml2e|{}{}\n'.format(GTcNumber, xml, answers_to)
287 elif action_id == "wait_xml_from_entity":
288 assertSupportedKeys(stepNumber, arguments, ["xml"])
290 xml = getNodeValue(arguments, "xml")
291 if not is_absolute_path(xml): xml = GTcDir + "/" + xml
294 GProvisionContent += 'test|{}|waitfe-xml|{}\n'.format(GTcNumber, xml)
296 elif action_id == "send_xml_to_client":
297 assertSupportedKeys(stepNumber, arguments, ["xml", "answers_to"])
299 xml = getNodeValue(arguments, "xml")
300 if not is_absolute_path(xml): xml = GTcDir + "/" + xml
303 at = getNodeValue(arguments, "answers_to", AttributeType.Optional)
305 if (at): answers_to = "|{}".format(at)
307 GProvisionContent += 'test|{}|sendxml2c|{}{}\n'.format(GTcNumber, xml, answers_to)
309 elif action_id == "wait_xml_from_client":
310 assertSupportedKeys(stepNumber, arguments, ["xml"])
312 xml = getNodeValue(arguments, "xml")
313 if not is_absolute_path(xml): xml = GTcDir + "/" + xml
316 GProvisionContent += 'test|{}|waitfc-xml|{}\n'.format(GTcNumber, xml)
319 raise Exception("ERROR: Step {}: unsupported generic action-id '{}' for ADML node type (agent '{}')".format(stepNumber, action_id, agent_id))
322 def process_test_case_step_HTTPMOCK(stepNumber, agent_id, action_id, arguments):
323 global GProvisionContent
325 global agents_HTTPMOCK_dir
327 if action_id == "serve_json":
328 assertSupportedKeys(stepNumber, arguments, ["json", "method", "uri"])
330 json = getNodeValue(arguments, "json")
331 if not is_absolute_path(json): json = GTcDir + "/" + json
334 method = getNodeValue(arguments, "method")
335 uri = getNodeValue(arguments, "uri")
338 # Provision script is in the form: agents/HTTPMOCK/<agent-id>-provision.sh
339 provision_script = agents_HTTPMOCK_dir + "/" + agent_id + "-provision.sh "
340 args = str(GTcNumber) + " " + str(stepNumber) + " " + GTcDir + " \"" + json + "\" " + method + " \"" + uri + "\""
341 GProvisionContent += 'test|{}|sh-command|{}\n'.format(GTcNumber, provision_script + args)
344 raise Exception("ERROR: Step {}: unsupported generic action-id '{}' for HTTPMOCK node type (agent '{}')".format(stepNumber, action_id, agent_id))
347 def process_test_case_step_KAFKA(stepNumber, agent_id, action_id, arguments):
348 global GProvisionContent
350 global agents_KAFKA_dir
352 if action_id == "consume_json":
353 assertSupportedKeys(stepNumber, arguments, ["json", "timeout", "auto_offset_reset", "background", "debug"])
355 json = getNodeValue(arguments, "json")
357 if not is_absolute_path(json): json = GTcDir + "/" + json
361 aor = getNodeValue(arguments, "auto_offset_reset", AttributeType.Optional)
362 if (aor): autoOffsetReset = aor
365 to = getNodeValue(arguments, "timeout", AttributeType.Optional)
369 bck = getNodeValue(arguments, "background", AttributeType.Optional)
370 if (bck == "yes"): background = "&"
373 deb = getNodeValue(arguments, "debug", AttributeType.Optional)
374 if (deb): debug = deb
376 # Consumer script is in the form: agents/KAFKA/<agent-id>-consumer.sh
377 consumer_script = agents_KAFKA_dir + "/" + agent_id + "-consumer.sh "
378 args = json + " " + autoOffsetReset + " " + str(timeout) + " " + debug
379 GProvisionContent += 'test|{}|sh-command|{}{}\n'.format(GTcNumber, consumer_script + args, background)
381 elif action_id == "produce_json":
382 assertSupportedKeys(stepNumber, arguments, ["json", "delay_ms", "background", "debug"])
384 json = getNodeValue(arguments, "json")
385 if not is_absolute_path(json): json = GTcDir + "/" + json
389 dl = getNodeValue(arguments, "delay_ms", AttributeType.Optional)
390 if(dl): delay_ms = dl
393 bck = getNodeValue(arguments, "background", AttributeType.Optional)
394 if (bck == "yes"): background = "&"
397 deb = getNodeValue(arguments, "debug", AttributeType.Optional)
398 if (deb): debug = deb
400 # Producer script is in the form: agents/KAFKA/<agent-id>-producer.sh
401 producer_script = agents_KAFKA_dir + "/" + agent_id + "-producer.sh "
402 args = json + " " + str(delay_ms) + " " + debug
403 GProvisionContent += 'test|{}|sh-command|{}{}\n'.format(GTcNumber, producer_script + args, background)
405 elif action_id == "admin":
406 assertSupportedKeys(stepNumber, arguments, ["operation", "debug"])
408 operation = getNodeValue(arguments, "operation")
411 deb = getNodeValue(arguments, "debug", AttributeType.Optional)
412 if (deb): debug = deb
414 # Admin script is in the form: agents/KAFKA/<agent-id>-admin.sh
415 admin_script = agents_KAFKA_dir + "/" + agent_id + "-admin.sh "
416 args = operation + " " + debug
417 GProvisionContent += 'test|{}|sh-command|{}\n'.format(GTcNumber, admin_script + args)
420 raise Exception("ERROR: Step {}: unsupported generic action-id '{}' for KAFKA node type (agent '{}')".format(stepNumber, action_id, agent_id))
423 def process_test_case_step(stepNumber, dictionary):
425 action = getNodeValue(dictionary, "action")
426 arguments = getNodeValue(dictionary, "arguments")
428 # Action is in the form '[agent id/]<action id>':
430 # MANDATORY: action_id
432 agent_template = None
434 agent_id, action_id = action.split('/')
435 agent_template = assertAgentId(agent_id)
442 if agent_template == "ADML":
443 process_test_case_step_ADML(stepNumber, agent_id, action_id, arguments)
444 elif agent_template == "KAFKA":
445 process_test_case_step_KAFKA(stepNumber, agent_id, action_id, arguments)
446 elif agent_template == "HTTPMOCK":
447 process_test_case_step_HTTPMOCK(stepNumber, agent_id, action_id, arguments)
449 process_test_case_step_GENERIC(stepNumber, action_id, arguments)
451 #trace = "Step {}, Agent-Id '{}', Action-Id '{}', Parameters: {}"
452 #print(trace.format(stepNumber, str(agent_id), action_id, str(arguments)))
455 def process_test_case_yml(testcaseList):
456 for step in testcaseList:
457 indx = testcaseList.index(step)
458 process_test_case_step(indx+1, step)
461 def provision_test_case(filename, testcaseList):
466 global GProvisionContent
468 GTcDir = get_parent_dir(filename)
470 id_desc = "{} : {}".format(GTcNumber, filename)
471 GIdsVsDescs += id_desc + "\n"
472 tc_desc = "test case '{}'".format(id_desc)
474 printMsg("Provisioning {} ...".format(tc_desc))
476 # Set test case description
477 GProvisionContent += 'test|{}|description|{}\n'.format(GTcNumber, filename)
479 # Process the yml definition for the test case
480 process_test_case_yml(testcaseList)
483 def provision_tests(files):
484 for filename in files.splitlines():
485 if (filename[0] == "#"):
486 printMsg("Ignoring commented test case: '{}'".format(filename))
488 # Test case is a list of steps:
489 tc = YamlConfigParser(filename)
490 provision_test_case(filename, tc.getData())
493 def parse_arguments():
495 parser = ArgumentParser(description='Anna Agents-Oriented Testing Setup Launcher')
496 parser.add_argument('-t', '--tests-dir', help='Tests parent directory where to find .yml files (from the next directories level)', required=True)
497 parser.add_argument('-k', '--keep-list-if-exists', help='Keeps intact the list of test cases (<test-dir>/launcher.list), creates it if missing', required=False, action='store_true')
498 parser.add_argument('-s', '--stop-adml-at-the-end', help='At the end, ADML keeps running to ease debugging. You could force stop with this option', required=False, action='store_true')
499 parser.add_argument('-i', '--interactive', help='Interactive execution to ease debugging of test cases', required=False, action='store_true')
500 parser.add_argument('-d', '--dry-run', help='Used to test and debug provision, no execution is launched', required=False, action='store_true')
501 parser.add_argument('-p', '--ip-limit', help="In-Progress limit is the number of coexisting In-Progress State test cases. Defaults to 1 (sequential), -1 would be 'no limit').", required=False)
502 parser.add_argument('-r', '--ttps', help="Rate of test cases launched (test ticks per second). By default 50 (recommended for monothread version).", required=False)
505 arguments = parser.parse_args()
511 # At the moment, only ADML is started (KAFKA/HTTPMOCK agents uses scripts):
512 global agents_ADML_dir
517 os.system(agents_ADML_dir + "/stop.sh")
518 os.system(agents_ADML_dir + "/start.sh")
520 rc = adml_operation("node >/dev/null")
523 if (retry > maxRetries): break
525 printMsg("Check ADML health retry ({}/{}) ...".format(retry, maxRetries))
526 rc = adml_operation("node >/dev/null")
531 def adml_operation(arguments):
532 global agents_ADML_dir
534 rc = os.system(agents_ADML_dir + "/operation.sh " + arguments)
538 def adml_operation_output(operation):
539 global agents_ADML_dir
541 output = os.popen(agents_ADML_dir + "/operation.sh {}".format(operation)).read()
545 def collect_results(abs_tdir):
546 global agents_ADML_dir
549 logs_dir = abs_tdir + ".logs"
550 shutil.rmtree(logs_dir, ignore_errors=True)
552 os.mkdir(logs_dir + "/traffic")
553 os.mkdir(logs_dir + "/counters")
554 os.mkdir(logs_dir + "/test-reports")
555 os.mkdir(logs_dir + "/debug")
559 printMsg("Retrieving tests summary states ...")
560 statesOUTPUT = adml_operation_output("\"test|summary-states\"")
561 writeFileContent(logs_dir + "/tests.summary-states", statesOUTPUT)
565 printMsg("Retrieving tests summary counts ...")
566 verdictOUTPUT = adml_operation_output("\"test|summary-counts\"")
567 writeFileContent(logs_dir + "/tests.summary-counts", verdictOUTPUT)
569 if "PASS" in verdictOUTPUT: verdictRC = 0
573 printMsg("Retrieving diameter traffic logs ...")
574 for f in glob.glob(agents_ADML_dir + "/*.log"):
575 shutil.copy(f, logs_dir + "/traffic")
576 for f in glob.glob(agents_ADML_dir + "/*.xml"):
577 shutil.copy(f, logs_dir + "/traffic")
579 printMsg("Retrieving kafka traffic logs ...")
580 for f in glob.glob(agents_KAFKA_dir + "/*.log"):
581 shutil.copy(f, logs_dir + "/traffic")
582 for f in glob.glob(agents_KAFKA_dir + "/*.json"):
583 shutil.copy(f, logs_dir + "/traffic")
586 #printMsg("Retrieving httpmock activity logs ...")
589 printMsg("Generating junit report ...")
590 writeFileContent(logs_dir + "/junit.xml", adml_operation_output("\"test|junit\""))
591 printMsg("Generating tests summary ...")
592 writeFileContent(logs_dir + "/tests.summary", adml_operation_output("\"test|summary\""))
594 printMsg("Generating tests oam reports ...")
595 writeFileContent(logs_dir + "/tests.oam", adml_operation_output("\"show-oam\""))
596 printMsg("Generating tests statistics ...")
597 writeFileContent(logs_dir + "/tests.stats", adml_operation_output("\"show-stats\""))
598 printMsg("Dumping ADML process context information ...")
599 adml_operation("\"context|{}\" >/dev/null".format(logs_dir + "/adml.context"))
600 adml_operation("forceCountersRecord >/dev/null")
602 # Move 'counters' and 'test-reports'
603 printMsg("Retrieving counters information ...")
604 for f in glob.glob(agents_ADML_dir + "/counters/*"):
605 shutil.copy(f, logs_dir + "/counters")
606 printMsg("Retrieving tests reports information ...")
607 for f in glob.glob(agents_ADML_dir + "/test-reports/*"):
608 shutil.copy(f, logs_dir + "/test-reports")
611 printMsg("Retrieving debug information ...")
612 for f in glob.glob(agents_ADML_dir + "/launcher.trace*"):
613 shutil.copy(f, logs_dir + "/debug")
615 # Backup tests directory itself (some replacements could be happened):
616 target_once_executed = logs_dir + "/debug/" + os.path.basename(abs_tdir)
617 shutil.copytree(abs_tdir, target_once_executed) # , symlinks=True)
619 return statesOUTPUT, verdictOUTPUT, verdictRC
622 def interactive_execution():
623 printMsg("Starting interactive execution of test cases")
627 print("--------------------------------------------")
628 adml_operation("\"test|summary-counts\"")
629 print("--------------------------------------------")
632 MAIN INTERACTIVE MENU
633 =====================
634 (prefix option with 'h' to get detailed help)
643 Test cases execution:
645 4. Start with test rate
646 5. Start next N test cases
649 Low level execution: test case steps
650 7. Execute next N steps
655 10. Reset statistics & counters
657 12. Reports configuration
660 opt=input("Input option: \n")
662 print("Just exit this menu")
667 print("Updates the current test pointer position.")
669 _id=input("Input the test case id: ")
670 adml_operation("\"test|goto|{}\"".format(_id))
673 print("Show programmed test case for id provided, current 'in-process' test case when missing.")
675 _id=input("Input the test case id [empty will show current]: ")
676 if _id: _id="|" + _id
677 adml_operation("\"test|look{}\"".format(_id))
680 print("Run specific test case for id provided.")
682 _id=input("Input the test case id: ")
683 if _id: _id="|" + _id
684 adml_operation("\"test|run{}\"".format(_id))
688 Starts/resume the provided number of test ticks per second (ttps). The ADML starts
689 with the event trigger system suspended, and this operation is neccessary to begin
690 those cases which need this time event (internal triggering). Some other test cases
691 could be started through external events (first test case event could be programmed
692 to wait specific message), but is not usual this external mode and neither usual to
693 mix triggering types. Normally, you will pause/stop new test launchs providing 0 as
694 ttps value, and also you could dynamically modify the load rate updating that value.
695 If a test case has N messages then 'ttps * N' will be the virtual number of messages
696 managed per second when no bottleneck exists.
698 Provide 0 in order to stop the timer triggering.
700 The timer manager resolution currently harcoded allows a maximum of 50 events
701 per second. To reach greater rates ADML will join synchronously the needed number of
702 new time-triggered test cases per a single event, writting a warning-level trace to
703 advice about the risk of burst sendings and recommend launching multiple instances
704 to achieve such load with a lower rate per instance.''')
706 ttps=input("Input the test ticks per second (0: stops the test rate clock): ")
707 adml_operation("\"test|ttps|{}\"".format(ttps))
711 Forces the execution of the next test case(s) without waiting for test manager tick.
712 Provide an integer value for 'sync-amount' to send a burst synchronous amount of the
713 next programmed test cases (1 by default). This event works regardless the timer tick
714 function, but it is normally used with the test manager tick stopped.''')
716 amount=input("Input the number of tests to execute synchronously [1 by default]: ")
717 if not amount: amount=1
718 adml_operation("\"test|next|{}\"".format(amount))
722 In-progress limit of test cases. No new test cases will be launched over this value
723 (test Manager tick work will be ignored). Zero-value is equivalent to stop the clock.
724 tick, -1 is used to specify 'no limit'. If missing amount, the limit and current
725 amount of in-progress test cases will be shown.
727 Default is 1 to ensure sequential execution for testcases (function test mode).
728 For system test, it is recommended to set '-1' (no limit).''')
730 print("\nTypical 'in-progress limit' values:")
731 print("-1: no limit")
732 print(" 0: similar to stop the test rate clock")
733 print(" 1: sequential execution for testcases (function test mode)\n")
734 limit=input("Input the desired in-progress limit amount [show current by default]: ")
735 if limit: limit = "|" + limit
736 adml_operation("\"test|ip-limit{}\"".format(limit))
740 Makes interactive a specific test case id. The amount is the margin of execution steps
741 to be done. With amount of '0' the test case is selected to be interactive, but no step
742 is executed. Then you have to interact with positive amounts (usually 1), executing the
743 provided number of steps if they are ready and fulfill the needed conditions. The value
744 of 0, implies no execution steps margin, which could be useful to 'freeze' a test in the
745 middle of its execution. You could also provide -1 to make it non-interactive resuming
746 it from the current step.
747 By default, current test case id is selected for interaction.''')
749 amount=input("Input the number of steps to execute (-1: resumes the test case; 0: freezes it) [1 by default]: ")
750 if not amount: amount = 1
751 test_id=input("Input the test id [current by default (remember to set a valid pointer with 'go to')]: ")
752 if test_id: test_id = "|" + test_id
753 adml_operation("\"test|interact|{}{}\"".format(amount, test_id))
757 Reset the test case for id provided, all the tests when missing. It could be hard/soft:
758 - hard: you probably may need to stop the load rate before. This operation initializes
759 all test cases regardless their states.
760 - soft: only for finished cases (those with 'Success' or 'Failed' states). It does not
761 affect to test cases with 'InProgress' state.''')
763 rtype=input("Input the reset type (soft/hard) [hard]: ")
764 if not rtype: rtype = "hard"
765 test_id=input("Input the test id [all tests by default]: ")
766 if test_id: test_id = "|" + test_id
767 adml_operation("\"test|reset|{}{}\"".format(rtype, test_id))
771 Restarts the whole programmed test list when finished the amount number of times (repeats
772 forever if value -1 is provided). This is disabled by default (amount = 0): testing trigger
773 system will enter suspended state until new ttps operation is received and a soft reset has
774 been done before. Test cases state & data will be reset (when achieved again), but general
775 statistics and counters will continue measuring until reset with 'collect' operation.''')
777 amount=input("Input the number of cycle repeats (-1: repeats forever; 0: disables repetitions): ")
778 adml_operation("\"test|repeats|{}\"".format(amount))
782 Reset statistics and counters to start a new test stage of performance measurement.
783 This affects to final logs retrieved on exit (option 0).''')
785 adml_operation("\"collect\"")
789 When cycling, current test cases can be soft (default) or hard reset. If no timeout has
790 been configured for the test case, hard reset could prevent stuck on the next cycle for
791 those test cases still in progress.''')
793 rtype=input("Input the reset type (soft/hard) [hard]: ")
794 if not rtype: rtype = "hard"
795 adml_operation("\"test|auto-reset|{}\"".format(rtype))
799 Enables/disables report generation for a certain test case state: initialized, in-progress,
800 failed or success (also 'all' and 'none' reserved words could be used). This applies to report
801 summary (final logs retrieved) and automatic dumps during testing where only failed or
802 successful states will appear: every time a test case is finished its xml representation will
803 be dump on a file under (logs test-reports) with the name:
805 'cycle-<cycle id>.testcase-<test case id>.xml'.
807 By default, all the states are disabled to avoid IO overload. In most of cases not all the
808 tests are going to fail then you could enable only such failed dumps. Anyway you could set
809 the reports visibility to fit your needs in a given situation.
811 Also you could enable hexadecimal representation for diameter messages on reports.''')
813 print("\nInput the report target operation, capital letters to enable:")
814 rtype=input("(I/i)nitialized, in-(P/p)rogress, (F/f)ailed, (S/s)uccess, (A/a)ll, (N/n)one [A: all will be enabled by default]: ")
815 if not rtype: rtype = "A"
818 upper_rtype = rtype.upper()
819 if (upper_rtype == rtype): enable = "yes"
820 if (upper_rtype == "I" ):
822 elif (upper_rtype == "P"):
824 elif (upper_rtype == "F"):
826 elif (upper_rtype == "S"):
828 elif (upper_rtype == "A"):
830 elif (upper_rtype == "N"):
834 print("Invalid target. Try again !")
836 print(" - Target selected: " + target)
837 print(" - Enable: " + enable)
838 adml_operation("\"test|report|{}|{}\"".format(target, enable))
840 enableHex=input("\nEnable/Disable hexadecimal dumps for diameter messages in test reports (yes/no) [no]: ")
841 if not enableHex: enableHex = "no"
842 adml_operation("\"test|report-hex|{}\"".format(enableHex))
845 print("\nInvalid choice. Try again !")
848 if stay: input("\nPress ENTER to continue ...\n")
852 #####################
854 #####################
856 if __name__ == "__main__":
859 abspath = os.path.abspath(__file__)
860 dname = os.path.dirname(abspath)
861 agents_KAFKA_dir = dname + "/agents/KAFKA"
862 agents_HTTPMOCK_dir = dname + "/agents/HTTPMOCK"
863 agents_ADML_dir = dname + "/agents/ADML"
865 # Injected environment variables (could be used in system_cmd commands:
866 os.putenv("AGENTS_DIR", dname + "/agents")
868 arguments = parse_arguments()
870 tdir = arguments.tests_dir
871 keep = arguments.keep_list_if_exists
872 stopAdml = arguments.stop_adml_at_the_end
873 interactive = arguments.interactive
874 dryrun = arguments.dry_run
875 iplimit = arguments.ip_limit
876 ttps = arguments.ttps
878 # Tests list: 'launcher.list':
879 llist = tdir + "/launcher.list"
880 llist_current = llist + ".updated"
883 abs_tdir = get_parent_dir(llist)
884 llist_exists = os.path.exists(llist)
886 # Create the current list of test cases:
888 list_content = os.popen("find {} -mindepth 2 -name \"*.yml\" | sort -t'/'".format(tdir)).read()
891 if not ttps: ttps = 50
892 if not iplimit: iplimit = 1
893 if iplimit == "auto":
894 printMsg("AUTO IP-LIMIT ALGORITHM IS UNDER CONSTRUCTION (configuring value 1) ... ")
902 writeFileContent(llist_current, list_content)
903 if filecmp.cmp(llist, llist_current):
904 os.remove(llist_current)
906 printMsg("The list '{}' has been created".format(llist_current))
907 printMsg("Warning: detected local changes (may be commented test cases) in '{}' regarding current tests list '{}'".format(llist, llist_current))
909 # Respect existing list (except if -k is not provided):
911 printMsg("Reuse the current list '{}'".format(llist))
912 with open(llist) as llist_file:
913 list_content = llist_file.read()
915 if not llist_exists or not keep:
916 writeFileContent(llist, list_content)
917 printMsg("The list '{}' has been created".format(llist))
922 global GProvisionContent
925 GProvisionContent = ""
928 provision_begin(iplimit)
929 provision_tests(list_content)
931 llist_of_operations = abs_tdir + "/launcher.list.adml_operations"
932 lids = abs_tdir + "/launcher.ids"
933 writeFileContent(llist_of_operations, GProvisionContent)
934 writeFileContent(lids, GIdsVsDescs)
937 if interactive: printMsg("Interactive mode is not reached at dry-run mode")
938 _exit("Dry-run execution finished", 0)
942 _exit("Failed to initiate ADML director!", 1)
944 # Provision test cases in ADML:
945 adml_operation("-f " + llist_of_operations + " >/dev/null")
949 interactive_execution()
953 t_initial = datetime.datetime.now()
956 adml_operation("\"test|ttps|" + str(ttps) + "\"")
958 # Estimated time to finish: worst case is timeout (we assume maximum timeout of 15 seconds per test case):
959 # worstTime = 15*GTcNumber
961 # We will check the progress: initialized test cases must be 0 again (cycle completed).
962 # But this only is guaranteed if all the tests define a timeout. If not, a wait step could stuck
963 # any test eternally (worstTime is used to exit the following loop):
969 maxCycles=15*GTcNumber
973 #time.sleep(sleepTime)
975 output = adml_operation_output("\"test|finished\"")
976 if "test|finished => {}".format(GTcNumber) in output: break
977 if (cycle > maxCycles): break
978 #printMsg("Checking progress ({}/{}) ...".format(cycle, maxCycles))
979 if not (cycle % 60): printMsg("Working ...")
982 t_final = datetime.datetime.now()
983 t_elapsed = t_final - t_initial
984 elapsedOUTPUT = "\nElapsed time in seconds: {}\n\n".format(t_elapsed.total_seconds())
986 # Collect results at tests-directory:
987 statesOUTPUT, verdictOUTPUT, verdictRC = collect_results(abs_tdir)
991 printMsg("Stoping ADML director ...")
993 os.system(agents_ADML_dir + "/stop.sh >/dev/null")
996 _exit(statesOUTPUT + elapsedOUTPUT + verdictOUTPUT, verdictRC)
998 except Exception as e: