2 # Anna Agents-Oriented Testing Setup Launcher
4 # Requires PyYAML: pip install pyyaml
6 import os, sys, datetime, glob, re, json, time, shutil, filecmp, datetime
7 from argparse import ArgumentParser
8 import xml.etree.ElementTree
9 #from pathlib import Path
11 from json import loads
14 from yaml import load, dump
16 from yaml import CLoader as Loader, CDumper as Dumper
18 from yaml import Loader, Dumper
23 class AttributeType(Enum):
24 Mandatory = "Mandatory"
28 def _exit(message = None, rc=0):
29 if (message): printMsg(message)
30 printMsg("Exiting [rc={}]".format(rc))
35 return '%b.%d.%Y-%H.%M.%S'
38 def printMsg(message):
39 print ("[{}] {}".format(datetime.datetime.now().strftime(timeFormat()), message))
42 class YamlConfigParser():
46 def __init__(self, yaml_config_file):
48 Convert the yaml file into a Python object
52 with open(yaml_config_file, 'r') as ss:
53 self.data = load(ss, Loader=Loader)
62 def show(self, options):
63 output = dump(self.data, Dumper=Dumper)
67 def writeFileContent(filename, content):
68 _file = open(filename, "w")
73 def getNodeValue(dictionary, key, attributeType = AttributeType.Mandatory):
76 if not isinstance(attributeType, AttributeType):
77 raise TypeError("'attributeType' must be an instance of AttributeType Enum")
81 value = dictionary[key]
82 except Exception as e:
83 if (attributeType == AttributeType.Mandatory): _exit ("Missing mandatory key: {}".format(e) , 1)
85 #printMsg("getNodeValue -> {}: {}".format(key, value))
89 def get_parent_dir(_file):
90 abspath = os.path.abspath(_file)
91 return os.path.dirname(abspath)
94 def is_absolute_path(path):
95 return os.path.isabs(path)
98 def provision_begin(iplimit):
99 global GProvisionContent
101 GProvisionContent += '''test|report
106 def assertFile(filepath):
107 if not (os.path.exists(filepath)):
108 raise Exception ("File '{}' not found !".format(filepath))
111 def assertAgentId(agent_id):
112 global agents_ADML_dir
113 global agents_KAFKA_dir
114 global agents_HTTPMOCK_dir
116 adml_node_file = agents_ADML_dir + "/{}.node".format(agent_id)
117 kafka_node_file = agents_KAFKA_dir + "/{}.node".format(agent_id)
118 httpmock_node_file = agents_HTTPMOCK_dir + "/{}.node".format(agent_id)
119 if (os.path.exists(adml_node_file)):
121 elif (os.path.exists(kafka_node_file)):
123 elif (os.path.exists(httpmock_node_file)):
126 raise Exception ("Agent id '{}' not found as registered. Check the agents yaml configuration loaded !".format(agent_id))
129 def modify_xml_avp_data(stepNumber, arguments):
130 assertSupportedKeys(stepNumber, arguments, ["xml", "new_xml", "xpath_value_list"])
132 _xml = getNodeValue(arguments, "xml")
133 if not is_absolute_path(_xml): _xml = GTcDir + "/" + _xml
136 new_xml = getNodeValue(arguments, "new_xml")
137 if not is_absolute_path(new_xml): new_xml = GTcDir + "/" + new_xml
140 et = xml.etree.ElementTree.parse(_xml)
143 xv_list = getNodeValue(arguments, "xpath_value_list")
145 assertSupportedKeys(stepNumber, item, ["xpath", "value"])
146 xpath = getNodeValue(item, "xpath")
147 value = getNodeValue(item, "value")
150 targets = root.findall(xpath)
151 for target in targets:
152 if "data" in target.attrib:
153 target.attrib["data"] = value
154 elif "hex-data" in target.attrib:
155 target.attrib["hex-data"] = value
160 def modify_json_key_value(stepNumber, arguments):
161 assertSupportedKeys(stepNumber, arguments, ["json", "new_json", "kpath_value_list"])
163 _json = getNodeValue(arguments, "json")
164 if not is_absolute_path(_json): _json = GTcDir + "/" + _json
167 new_json = getNodeValue(arguments, "new_json")
168 if not is_absolute_path(new_json): new_json = GTcDir + "/" + new_json
171 with open(_json, 'r') as json_file:
172 json_data = json_file.read()
173 json_dict = json.loads(json_data)
175 kv_list = getNodeValue(arguments, "kpath_value_list")
177 assertSupportedKeys(stepNumber, item, ["kpath", "value"])
178 kpath = getNodeValue(item, "kpath")
179 value = getNodeValue(item, "value")
181 key_path_list = kpath.split(".")
182 elem = key_path_list[0]
183 if (len(key_path_list) == 1):
184 json_dict[elem] = value
187 aux_dict = json_dict[elem]
188 #print("aux dict: " + str(aux_dict))
189 for elem in key_path_list[1:-1]:
190 aux_dict = aux_dict[elem]
192 aux_dict[key_path_list[-1]] = value
195 content = json.dumps(json_dict)
196 writeFileContent(new_json, content)
199 def system_cmd(stepNumber, arguments):
200 global GProvisionContent
202 assertSupportedKeys(stepNumber, arguments, ["shell", "file", "file_parameters"])
204 shell = getNodeValue(arguments, "shell", AttributeType.Optional)
205 _file = getNodeValue(arguments, "file", AttributeType.Optional)
206 fp = getNodeValue(arguments, "file_parameters", AttributeType.Optional)
208 if (fp): file_parameters = fp
210 if (shell and _file):
211 raise Exception ("Both 'shell' and 'file' cannot be provided at system_cmd action !")
213 if (not shell and not _file):
214 raise Exception ("Neither 'shell' nor 'file' have been provided at system_cmd action !")
217 raise Exception ("Both 'shell' and 'file_parameters' cannot be provided at system_cmd action !")
220 GProvisionContent += 'test|{}|sh-command|{}\n'.format(GTcNumber, shell)
223 if not is_absolute_path(_file): _file = GTcDir + "/" + _file
225 GProvisionContent += 'test|{}|sh-command|{} {}\n'.format(GTcNumber, _file, file_parameters)
228 def process_test_case_step_GENERIC(stepNumber, action_id, arguments):
229 global GProvisionContent
233 if action_id == "sh_command":
234 GProvisionContent += 'test|{}|sh-command|{}\n'.format(GTcNumber, getNodeValue(arguments, "value"))
235 elif action_id == "ip_limit":
236 il = getNodeValue(arguments, "value")
239 if (il == "launcher"):
244 GProvisionContent += 'test|{}|ip-limit|{}\n'.format(GTcNumber, value)
246 elif action_id == "timeout_ms":
247 GProvisionContent += 'test|{}|timeout|{}\n'.format(GTcNumber, getNodeValue(arguments, "value"))
248 elif action_id == "delay_ms":
249 GProvisionContent += 'test|{}|delay|{}\n'.format(GTcNumber, getNodeValue(arguments, "value"))
250 elif action_id == "modify_xml_avp_data":
251 # Always create the step or step references will be impossible to determine:
252 GProvisionContent += 'test|{}|sh-command|echo "{}"\n'.format(GTcNumber, "Replacing xml file: " + str(arguments))
253 modify_xml_avp_data(stepNumber, arguments)
254 elif action_id == "modify_json_key_value":
255 # Always create the step or step references will be impossible to determine:
256 GProvisionContent += 'test|{}|sh-command|echo "{}"\n'.format(GTcNumber, "Replacing json file: " + str(arguments))
257 modify_json_key_value(stepNumber, arguments)
258 elif action_id == "system_cmd":
259 system_cmd(stepNumber, arguments)
261 raise Exception("ERROR: Step {}: unsupported generic action-id '{}'".format(stepNumber, action_id))
264 def assertSupportedKeys(stepNumber, arguments, supported):
265 for key in arguments:
266 if not key in supported:
267 raise Exception("ERROR: Step {}: unsupported argument '{}' (allowed: {})".format(stepNumber, key, str(supported)))
270 def process_test_case_step_ADML(stepNumber, agent_id, action_id, arguments):
271 global GProvisionContent
274 if action_id == "send_xml_to_entity":
275 assertSupportedKeys(stepNumber, arguments, ["xml", "answers_to"])
277 xml = getNodeValue(arguments, "xml")
278 if not is_absolute_path(xml): xml = GTcDir + "/" + xml
281 at = getNodeValue(arguments, "answers_to", AttributeType.Optional)
283 if (at): answers_to = "|{}".format(at)
285 GProvisionContent += 'test|{}|sendxml2e|{}{}\n'.format(GTcNumber, xml, answers_to)
287 elif action_id == "wait_xml_from_entity":
288 assertSupportedKeys(stepNumber, arguments, ["xml"])
290 xml = getNodeValue(arguments, "xml")
291 if not is_absolute_path(xml): xml = GTcDir + "/" + xml
294 GProvisionContent += 'test|{}|waitfe-xml|{}\n'.format(GTcNumber, xml)
296 elif action_id == "send_xml_to_client":
297 assertSupportedKeys(stepNumber, arguments, ["xml", "answers_to"])
299 xml = getNodeValue(arguments, "xml")
300 if not is_absolute_path(xml): xml = GTcDir + "/" + xml
303 at = getNodeValue(arguments, "answers_to", AttributeType.Optional)
305 if (at): answers_to = "|{}".format(at)
307 GProvisionContent += 'test|{}|sendxml2c|{}{}\n'.format(GTcNumber, xml, answers_to)
309 elif action_id == "wait_xml_from_client":
310 assertSupportedKeys(stepNumber, arguments, ["xml"])
312 xml = getNodeValue(arguments, "xml")
313 if not is_absolute_path(xml): xml = GTcDir + "/" + xml
316 GProvisionContent += 'test|{}|waitfc-xml|{}\n'.format(GTcNumber, xml)
319 raise Exception("ERROR: Step {}: unsupported generic action-id '{}' for ADML node type (agent '{}')".format(stepNumber, action_id, agent_id))
322 def process_test_case_step_HTTPMOCK(stepNumber, agent_id, action_id, arguments):
323 global GProvisionContent
325 global agents_HTTPMOCK_dir
327 if action_id == "serve_json":
328 assertSupportedKeys(stepNumber, arguments, ["json", "method", "uri"])
330 json = getNodeValue(arguments, "json")
331 if not is_absolute_path(json): json = GTcDir + "/" + json
334 method = getNodeValue(arguments, "method")
335 uri = getNodeValue(arguments, "uri")
338 # Provision script is in the form: agents/HTTPMOCK/<agent-id>-provision.sh
339 provision_script = agents_HTTPMOCK_dir + "/" + agent_id + "-provision.sh "
340 args = str(GTcNumber) + " " + str(stepNumber) + " " + GTcDir + " \"" + json + "\" " + method + " \"" + uri + "\""
341 GProvisionContent += 'test|{}|sh-command|{}\n'.format(GTcNumber, provision_script + args)
344 raise Exception("ERROR: Step {}: unsupported generic action-id '{}' for HTTPMOCK node type (agent '{}')".format(stepNumber, action_id, agent_id))
347 def process_test_case_step_KAFKA(stepNumber, agent_id, action_id, arguments):
348 global GProvisionContent
350 global agents_KAFKA_dir
352 if action_id == "consume_json":
353 assertSupportedKeys(stepNumber, arguments, ["json", "timeout", "auto_offset_reset", "background", "debug"])
355 json = getNodeValue(arguments, "json")
357 if not is_absolute_path(json): json = GTcDir + "/" + json
361 aor = getNodeValue(arguments, "auto_offset_reset", AttributeType.Optional)
362 if (aor): autoOffsetReset = aor
365 to = getNodeValue(arguments, "timeout", AttributeType.Optional)
369 bck = getNodeValue(arguments, "background", AttributeType.Optional)
370 if (bck == "yes"): background = "&"
373 deb = getNodeValue(arguments, "debug", AttributeType.Optional)
374 if (deb): debug = deb
376 # Consumer script is in the form: agents/KAFKA/<agent-id>-consumer.sh
377 consumer_script = agents_KAFKA_dir + "/" + agent_id + "-consumer.sh "
378 args = json + " " + autoOffsetReset + " " + str(timeout) + " " + debug
379 GProvisionContent += 'test|{}|sh-command|{}{}\n'.format(GTcNumber, consumer_script + args, background)
381 elif action_id == "produce_json":
382 assertSupportedKeys(stepNumber, arguments, ["json", "delay_ms", "background", "debug"])
384 json = getNodeValue(arguments, "json")
385 if not is_absolute_path(json): json = GTcDir + "/" + json
389 dl = getNodeValue(arguments, "delay_ms", AttributeType.Optional)
390 if(dl): delay_ms = dl
393 bck = getNodeValue(arguments, "background", AttributeType.Optional)
394 if (bck == "yes"): background = "&"
397 deb = getNodeValue(arguments, "debug", AttributeType.Optional)
398 if (deb): debug = deb
400 # Producer script is in the form: agents/KAFKA/<agent-id>-producer.sh
401 producer_script = agents_KAFKA_dir + "/" + agent_id + "-producer.sh "
402 args = json + " " + str(delay_ms) + " " + debug
403 GProvisionContent += 'test|{}|sh-command|{}{}\n'.format(GTcNumber, producer_script + args, background)
405 elif action_id == "admin":
406 assertSupportedKeys(stepNumber, arguments, ["operation", "debug"])
408 operation = getNodeValue(arguments, "operation")
411 deb = getNodeValue(arguments, "debug", AttributeType.Optional)
412 if (deb): debug = deb
414 # Admin script is in the form: agents/KAFKA/<agent-id>-admin.sh
415 admin_script = agents_KAFKA_dir + "/" + agent_id + "-admin.sh "
416 args = operation + " " + debug
417 GProvisionContent += 'test|{}|sh-command|{}\n'.format(GTcNumber, admin_script + args)
420 raise Exception("ERROR: Step {}: unsupported generic action-id '{}' for KAFKA node type (agent '{}')".format(stepNumber, action_id, agent_id))
423 def process_test_case_step(stepNumber, dictionary):
425 action = getNodeValue(dictionary, "action")
426 arguments = getNodeValue(dictionary, "arguments")
428 # Action is in the form '[agent id/]<action id>':
430 # MANDATORY: action_id
432 agent_template = None
434 agent_id, action_id = action.split('/')
435 agent_template = assertAgentId(agent_id)
442 if agent_template == "ADML":
443 process_test_case_step_ADML(stepNumber, agent_id, action_id, arguments)
444 elif agent_template == "KAFKA":
445 process_test_case_step_KAFKA(stepNumber, agent_id, action_id, arguments)
446 elif agent_template == "HTTPMOCK":
447 process_test_case_step_HTTPMOCK(stepNumber, agent_id, action_id, arguments)
449 process_test_case_step_GENERIC(stepNumber, action_id, arguments)
451 #trace = "Step {}, Agent-Id '{}', Action-Id '{}', Parameters: {}"
452 #print(trace.format(stepNumber, str(agent_id), action_id, str(arguments)))
455 def process_test_case_yml(testcaseList):
456 for step in testcaseList:
457 indx = testcaseList.index(step)
458 process_test_case_step(indx+1, step)
461 def provision_test_case(filename, testcaseList):
466 global GProvisionContent
468 GTcDir = get_parent_dir(filename)
470 id_desc = "{} : {}".format(GTcNumber, filename)
471 GIdsVsDescs += id_desc + "\n"
472 tc_desc = "test case '{}'".format(id_desc)
474 printMsg("Provisioning {} ...".format(tc_desc))
476 # Set test case description
477 GProvisionContent += 'test|{}|description|{}\n'.format(GTcNumber, filename)
479 # Process the yml definition for the test case
480 process_test_case_yml(testcaseList)
483 def provision_tests(files):
484 for filename in files.splitlines():
485 if (filename[0] == "#"):
486 printMsg("Ignoring commented test case: '{}'".format(filename))
488 # Test case is a list of steps:
489 tc = YamlConfigParser(filename)
490 provision_test_case(filename, tc.getData())
493 def parse_arguments():
495 parser = ArgumentParser(description='Anna Agents-Oriented Testing Setup Launcher')
496 parser.add_argument('-t', '--tests-dir', help='Tests parent directory where to find .yml files (from the next directories level)', required=True)
497 parser.add_argument('-k', '--keep-list-if-exists', help='Keeps intact the list of test cases (<test-dir>/launcher.list), creates it if missing', required=False, action='store_true')
498 parser.add_argument('-s', '--stop-adml-at-the-end', help='At the end, ADML keeps running to ease debugging. You could force stop with this option', required=False, action='store_true')
499 parser.add_argument('-i', '--interactive', help='Interactive execution to ease debugging of test cases', required=False, action='store_true')
500 parser.add_argument('-d', '--dry-run', help='Used to test and debug provision, no execution is launched', required=False, action='store_true')
501 parser.add_argument('-p', '--ip-limit', help="In-Progress limit is the number of coexisting In-Progress State test cases. Defaults to 1 (sequential), -1 would be 'no limit').", required=False)
502 parser.add_argument('-r', '--ttps', help="Rate of test cases launched (test ticks per second). By default 50 (recommended for monothread version).", required=False)
505 arguments = parser.parse_args()
511 # At the moment, only ADML is started (KAFKA/HTTPMOCK agents uses scripts):
512 global agents_ADML_dir
517 os.system(agents_ADML_dir + "/stop.sh")
518 os.system(agents_ADML_dir + "/start.sh")
520 rc = adml_operation("node >/dev/null")
523 if (retry > maxRetries): break
525 printMsg("Check ADML health retry ({}/{}) ...".format(retry, maxRetries))
526 rc = adml_operation("node >/dev/null")
531 def adml_operation(arguments):
532 global agents_ADML_dir
534 rc = os.system(agents_ADML_dir + "/operation.sh " + arguments)
538 def adml_operation_output(operation):
539 global agents_ADML_dir
541 output = os.popen(agents_ADML_dir + "/operation.sh {}".format(operation)).read()
545 def collect_results(abs_tdir):
546 global agents_ADML_dir
549 logs_dir = abs_tdir + ".logs"
550 shutil.rmtree(logs_dir, ignore_errors=True)
552 os.mkdir(logs_dir + "/traffic")
553 os.mkdir(logs_dir + "/counters")
554 os.mkdir(logs_dir + "/test-reports")
555 os.mkdir(logs_dir + "/debug")
559 printMsg("Retrieving tests summary states ...")
560 statesOUTPUT = adml_operation_output("\"test|summary-states\"")
561 writeFileContent(logs_dir + "/tests.summary-states", statesOUTPUT)
565 printMsg("Retrieving tests summary counts ...")
566 verdictOUTPUT = adml_operation_output("\"test|summary-counts\"")
567 writeFileContent(logs_dir + "/tests.summary-counts", verdictOUTPUT)
569 if "PASS" in verdictOUTPUT: verdictRC = 0
573 printMsg("Retrieving diameter traffic logs ...")
574 for f in glob.glob(agents_ADML_dir + "/*.log"):
575 shutil.copy(f, logs_dir + "/traffic")
576 for f in glob.glob(agents_ADML_dir + "/*.xml"):
577 shutil.copy(f, logs_dir + "/traffic")
579 printMsg("Retrieving kafka traffic logs ...")
580 for f in glob.glob(agents_KAFKA_dir + "/*.log"):
581 shutil.copy(f, logs_dir + "/traffic")
582 for f in glob.glob(agents_KAFKA_dir + "/*.json"):
583 shutil.copy(f, logs_dir + "/traffic")
586 #printMsg("Retrieving httpmock activity logs ...")
589 printMsg("Generating junit report ...")
590 writeFileContent(logs_dir + "/junit.xml", adml_operation_output("\"test|junit\""))
591 printMsg("Generating tests summary ...")
592 writeFileContent(logs_dir + "/tests.summary", adml_operation_output("\"test|summary\""))
594 printMsg("Generating tests oam reports ...")
595 writeFileContent(logs_dir + "/tests.oam", adml_operation_output("\"show-oam\""))
596 printMsg("Generating tests statistics ...")
597 writeFileContent(logs_dir + "/tests.stats", adml_operation_output("\"show-stats\""))
598 printMsg("Dumping ADML process context information ...")
599 adml_operation("\"context|{}\" >/dev/null".format(logs_dir + "/adml.context"))
600 adml_operation("forceCountersRecord >/dev/null")
602 # Move 'counters' and 'test-reports'
603 printMsg("Retrieving counters information ...")
604 for f in glob.glob(agents_ADML_dir + "/counters/*"):
605 shutil.copy(f, logs_dir + "/counters")
606 printMsg("Retrieving tests reports information ...")
607 for f in glob.glob(agents_ADML_dir + "/test-reports/*"):
608 shutil.copy(f, logs_dir + "/test-reports")
611 printMsg("Retrieving debug information ...")
612 for f in glob.glob(agents_ADML_dir + "/launcher.trace*"):
613 shutil.copy(f, logs_dir + "/debug")
615 # Backup tests directory itself (some replacements could be happened):
616 target_once_executed = logs_dir + "/debug/" + os.path.basename(abs_tdir)
617 shutil.copytree(abs_tdir, target_once_executed) # , symlinks=True)
619 return statesOUTPUT, verdictOUTPUT, verdictRC
622 def interactive_execution():
623 printMsg("Starting interactive execution of test cases")
627 print("--------------------------------------------")
628 adml_operation("\"test|summary-counts\"")
629 print("--------------------------------------------")
632 MAIN INTERACTIVE MENU
633 =====================
634 (prefix option with 'h' to get detailed help)
643 Test cases execution:
644 3. Start with test rate
645 4. Start next N test cases
648 Low level execution: test case steps
649 6. Execute next N steps
654 9. Reset statistics & counters
656 11. Reports configuration
659 opt=input("Input option: \n")
661 print("Just exit this menu")
666 print("Updates the current test pointer position.")
668 _id=input("Input the test case id: ")
669 adml_operation("\"test|goto|{}\"".format(_id))
672 print("Show programmed test case for id provided, current 'in-process' test case when missing.")
674 _id=input("Input the test case id [empty will show current]: ")
675 if _id: _id="|" + _id
676 adml_operation("\"test|look{}\"".format(_id))
680 Starts/resume the provided number of test ticks per second (ttps). The ADML starts
681 with the event trigger system suspended, and this operation is neccessary to begin
682 those cases which need this time event (internal triggering). Some other test cases
683 could be started through external events (first test case event could be programmed
684 to wait specific message), but is not usual this external mode and neither usual to
685 mix triggering types. Normally, you will pause/stop new test launchs providing 0 as
686 ttps value, and also you could dynamically modify the load rate updating that value.
687 If a test case has N messages then 'ttps * N' will be the virtual number of messages
688 managed per second when no bottleneck exists.
690 Provide 0 in order to stop the timer triggering.
692 The timer manager resolution currently harcoded allows a maximum of 50 events
693 per second. To reach greater rates ADML will join synchronously the needed number of
694 new time-triggered test cases per a single event, writting a warning-level trace to
695 advice about the risk of burst sendings and recommend launching multiple instances
696 to achieve such load with a lower rate per instance.''')
698 ttps=input("Input the test ticks per second (0: stops the test rate clock): ")
699 adml_operation("\"test|ttps|{}\"".format(ttps))
703 Forces the execution of the next test case(s) without waiting for test manager tick.
704 Provide an integer value for 'sync-amount' to send a burst synchronous amount of the
705 next programmed test cases (1 by default). This event works regardless the timer tick
706 function, but it is normally used with the test manager tick stopped.''')
708 amount=input("Input the number of tests to execute synchronously [1 by default]: ")
709 if not amount: amount=1
710 adml_operation("\"test|next|{}\"".format(amount))
714 In-progress limit of test cases. No new test cases will be launched over this value
715 (test Manager tick work will be ignored). Zero-value is equivalent to stop the clock.
716 tick, -1 is used to specify 'no limit'. If missing amount, the limit and current
717 amount of in-progress test cases will be shown.
719 Default is 1 to ensure sequential execution for testcases (function test mode).
720 For system test, it is recommended to set '-1' (no limit).''')
722 print("\nTypical 'in-progress limit' values:")
723 print("-1: no limit")
724 print(" 0: similar to stop the test rate clock")
725 print(" 1: sequential execution for testcases (function test mode)\n")
726 limit=input("Input the desired in-progress limit amount [show current by default]: ")
727 if limit: limit = "|" + limit
728 adml_operation("\"test|ip-limit{}\"".format(limit))
732 Makes interactive a specific test case id. The amount is the margin of execution steps
733 to be done. With amount of '0' the test case is selected to be interactive, but no step
734 is executed. Then you have to interact with positive amounts (usually 1), executing the
735 provided number of steps if they are ready and fulfill the needed conditions. The value
736 of 0, implies no execution steps margin, which could be useful to 'freeze' a test in the
737 middle of its execution. You could also provide -1 to make it non-interactive resuming
738 it from the current step.
739 By default, current test case id is selected for interaction.''')
741 amount=input("Input the number of steps to execute (-1: resumes the test case; 0: freezes it) [1 by default]: ")
742 if not amount: amount = 1
743 test_id=input("Input the test id [current by default (remember to set a valid pointer with 'go to')]: ")
744 if test_id: test_id = "|" + test_id
745 adml_operation("\"test|interact|{}{}\"".format(amount, test_id))
749 Reset the test case for id provided, all the tests when missing. It could be hard/soft:
750 - hard: you probably may need to stop the load rate before. This operation initializes
751 all test cases regardless their states.
752 - soft: only for finished cases (those with 'Success' or 'Failed' states). It does not
753 affect to test cases with 'InProgress' state.''')
755 rtype=input("Input the reset type (soft/hard) [hard]: ")
756 if not rtype: rtype = "hard"
757 test_id=input("Input the test id [all tests by default]: ")
758 if test_id: test_id = "|" + test_id
759 adml_operation("\"test|reset|{}{}\"".format(rtype, test_id))
763 Restarts the whole programmed test list when finished the amount number of times (repeats
764 forever if value -1 is provided). This is disabled by default (amount = 0): testing trigger
765 system will enter suspended state until new ttps operation is received and a soft reset has
766 been done before. Test cases state & data will be reset (when achieved again), but general
767 statistics and counters will continue measuring until reset with 'collect' operation.''')
769 amount=input("Input the number of cycle repeats (-1: repeats forever; 0: disables repetitions): ")
770 adml_operation("\"test|repeats|{}\"".format(amount))
774 Reset statistics and counters to start a new test stage of performance measurement.
775 This affects to final logs retrieved on exit (option 0).''')
777 adml_operation("\"collect\"")
781 When cycling, current test cases can be soft (default) or hard reset. If no timeout has
782 been configured for the test case, hard reset could prevent stuck on the next cycle for
783 those test cases still in progress.''')
785 rtype=input("Input the reset type (soft/hard) [hard]: ")
786 if not rtype: rtype = "hard"
787 adml_operation("\"test|auto-reset|{}\"".format(rtype))
791 Enables/disables report generation for a certain test case state: initialized, in-progress,
792 failed or success (also 'all' and 'none' reserved words could be used). This applies to report
793 summary (final logs retrieved) and automatic dumps during testing where only failed or
794 successful states will appear: every time a test case is finished its xml representation will
795 be dump on a file under (logs test-reports) with the name:
797 'cycle-<cycle id>.testcase-<test case id>.xml'.
799 By default, all the states are disabled to avoid IO overload. In most of cases not all the
800 tests are going to fail then you could enable only such failed dumps. Anyway you could set
801 the reports visibility to fit your needs in a given situation.
803 Also you could enable hexadecimal representation for diameter messages on reports.''')
805 print("\nInput the report target operation, capital letters to enable:")
806 rtype=input("(I/i)nitialized, in-(P/p)rogress, (F/f)ailed, (S/s)uccess, (A/a)ll, (N/n)one [A: all will be enabled by default]: ")
807 if not rtype: rtype = "A"
810 upper_rtype = rtype.upper()
811 if (upper_rtype == rtype): enable = "yes"
812 if (upper_rtype == "I" ):
814 elif (upper_rtype == "P"):
816 elif (upper_rtype == "F"):
818 elif (upper_rtype == "S"):
820 elif (upper_rtype == "A"):
822 elif (upper_rtype == "N"):
826 print("Invalid target. Try again !")
828 print(" - Target selected: " + target)
829 print(" - Enable: " + enable)
830 adml_operation("\"test|report|{}|{}\"".format(target, enable))
832 enableHex=input("\nEnable/Disable hexadecimal dumps for diameter messages in test reports (yes/no) [no]: ")
833 if not enableHex: enableHex = "no"
834 adml_operation("\"test|report-hex|{}\"".format(enableHex))
837 print("\nInvalid choice. Try again !")
840 if stay: input("\nPress ENTER to continue ...\n")
844 #####################
846 #####################
848 if __name__ == "__main__":
851 abspath = os.path.abspath(__file__)
852 dname = os.path.dirname(abspath)
853 agents_KAFKA_dir = dname + "/agents/KAFKA"
854 agents_HTTPMOCK_dir = dname + "/agents/HTTPMOCK"
855 agents_ADML_dir = dname + "/agents/ADML"
857 # Injected environment variables (could be used in system_cmd commands:
858 os.putenv("AGENTS_DIR", dname + "/agents")
860 arguments = parse_arguments()
862 tdir = arguments.tests_dir
863 keep = arguments.keep_list_if_exists
864 stopAdml = arguments.stop_adml_at_the_end
865 interactive = arguments.interactive
866 dryrun = arguments.dry_run
867 iplimit = arguments.ip_limit
868 ttps = arguments.ttps
870 # Tests list: 'launcher.list':
871 llist = tdir + "/launcher.list"
872 llist_current = llist + ".updated"
875 abs_tdir = get_parent_dir(llist)
876 llist_exists = os.path.exists(llist)
878 # Create the current list of test cases:
880 list_content = os.popen("find {} -mindepth 2 -name \"*.yml\" | sort -t'/'".format(tdir)).read()
883 if not ttps: ttps = 50
884 if not iplimit: iplimit = 1
885 if iplimit == "auto":
886 printMsg("AUTO IP-LIMIT ALGORITHM IS UNDER CONSTRUCTION (configuring value 1) ... ")
894 writeFileContent(llist_current, list_content)
895 if filecmp.cmp(llist, llist_current):
896 os.remove(llist_current)
898 printMsg("The list '{}' has been created".format(llist_current))
899 printMsg("Warning: detected local changes (may be commented test cases) in '{}' regarding current tests list '{}'".format(llist, llist_current))
901 # Respect existing list (except if -k is not provided):
903 printMsg("Reuse the current list '{}'".format(llist))
904 with open(llist) as llist_file:
905 list_content = llist_file.read()
907 if not llist_exists or not keep:
908 writeFileContent(llist, list_content)
909 printMsg("The list '{}' has been created".format(llist))
914 global GProvisionContent
917 GProvisionContent = ""
920 provision_begin(iplimit)
921 provision_tests(list_content)
923 llist_of_operations = abs_tdir + "/launcher.list.adml_operations"
924 lids = abs_tdir + "/launcher.ids"
925 writeFileContent(llist_of_operations, GProvisionContent)
926 writeFileContent(lids, GIdsVsDescs)
929 if interactive: printMsg("Interactive mode is not reached at dry-run mode")
930 _exit("Dry-run execution finished", 0)
934 _exit("Failed to initiate ADML director!", 1)
936 # Provision test cases in ADML:
937 adml_operation("-f " + llist_of_operations + " >/dev/null")
941 interactive_execution()
945 t_initial = datetime.datetime.now()
948 adml_operation("\"test|ttps|" + str(ttps) + "\"")
950 # Estimated time to finish: worst case is timeout (we assume maximum timeout of 15 seconds per test case):
951 # worstTime = 15*GTcNumber
953 # We will check the progress: initialized test cases must be 0 again (cycle completed).
954 # But this only is guaranteed if all the tests define a timeout. If not, a wait step could stuck
955 # any test eternally (worstTime is used to exit the following loop):
961 maxCycles=15*GTcNumber
965 #time.sleep(sleepTime)
967 output = adml_operation_output("\"test|finished\"")
968 if "test|finished => {}".format(GTcNumber) in output: break
969 if (cycle > maxCycles): break
970 #printMsg("Checking progress ({}/{}) ...".format(cycle, maxCycles))
971 if not (cycle % 60): printMsg("Working ...")
974 t_final = datetime.datetime.now()
975 t_elapsed = t_final - t_initial
976 elapsedOUTPUT = "\nElapsed time in seconds: {}\n\n".format(t_elapsed.total_seconds())
978 # Collect results at tests-directory:
979 statesOUTPUT, verdictOUTPUT, verdictRC = collect_results(abs_tdir)
983 printMsg("Stoping ADML director ...")
985 os.system(agents_ADML_dir + "/stop.sh >/dev/null")
988 _exit(statesOUTPUT + elapsedOUTPUT + verdictOUTPUT, verdictRC)
990 except Exception as e: