From 080dc0740d8b02011dee032f5d44eeb5f2ffe23f Mon Sep 17 00:00:00 2001 From: "Eduardo Ramos Testillano (eramedu)" Date: Tue, 12 May 2020 02:14:37 +0200 Subject: [PATCH] Add fourth work package for REST API implementation Implementation completed with last FSM testing use cases --- example/diameter/launcher/EventOperation.cpp | 247 +++++++++++++++--- example/diameter/launcher/EventOperation.hpp | 2 +- example/diameter/launcher/Launcher.cpp | 7 +- example/diameter/launcher/MyHandler.cpp | 23 +- example/diameter/launcher/resources/HELP.md | 30 ++- .../ct/fsm-testing/test-auto-reset_test.py | 19 ++ .../ct/fsm-testing/test-clear_test.py | 2 +- .../ct/fsm-testing/test-dump-stdout_test.py | 21 ++ .../ct/fsm-testing/test-finished_test.py | 23 ++ .../rest_api/ct/fsm-testing/test-goto_test.py | 50 ++++ .../ct/fsm-testing/test-initialized_test.py | 23 ++ .../ct/fsm-testing/test-interact_test.py | 46 ++++ .../ct/fsm-testing/test-ip-limit_test.py | 25 ++ .../ct/fsm-testing/test-junit_test.py | 36 +++ .../rest_api/ct/fsm-testing/test-look_test.py | 33 +++ .../rest_api/ct/fsm-testing/test-next_test.py | 93 +++++++ .../ct/fsm-testing/test-repeats_test.py | 19 ++ .../ct/fsm-testing/test-report-hex_test.py | 21 ++ .../ct/fsm-testing/test-report_test.py | 36 +++ .../ct/fsm-testing/test-reset_test.py | 18 ++ .../rest_api/ct/fsm-testing/test-run_test.py | 39 +++ .../ct/fsm-testing/test-state_test.py | 32 +++ .../fsm-testing/test-summary-counts_test.py | 13 + .../fsm-testing/test-summary-states_test.py | 13 + .../ct/fsm-testing/test-summary_test.py | 14 + .../rest_api/ct/fsm-testing/test-ttps_test.py | 34 +++ .../rest_api/ct/resources/look_output.txt | 1 + .../ct/resources/summary-counts_output.txt | 8 + .../ct/resources/summary-states_output.txt | 4 + source/testing/TestManager.cpp | 5 +- 30 files changed, 871 insertions(+), 66 deletions(-) create mode 100644 example/diameter/launcher/resources/rest_api/ct/fsm-testing/test-auto-reset_test.py create mode 100644 example/diameter/launcher/resources/rest_api/ct/fsm-testing/test-dump-stdout_test.py create mode 100644 example/diameter/launcher/resources/rest_api/ct/fsm-testing/test-finished_test.py create mode 100644 example/diameter/launcher/resources/rest_api/ct/fsm-testing/test-goto_test.py create mode 100644 example/diameter/launcher/resources/rest_api/ct/fsm-testing/test-initialized_test.py create mode 100644 example/diameter/launcher/resources/rest_api/ct/fsm-testing/test-interact_test.py create mode 100644 example/diameter/launcher/resources/rest_api/ct/fsm-testing/test-ip-limit_test.py create mode 100644 example/diameter/launcher/resources/rest_api/ct/fsm-testing/test-junit_test.py create mode 100644 example/diameter/launcher/resources/rest_api/ct/fsm-testing/test-look_test.py create mode 100644 example/diameter/launcher/resources/rest_api/ct/fsm-testing/test-next_test.py create mode 100644 example/diameter/launcher/resources/rest_api/ct/fsm-testing/test-repeats_test.py create mode 100644 example/diameter/launcher/resources/rest_api/ct/fsm-testing/test-report-hex_test.py create mode 100644 example/diameter/launcher/resources/rest_api/ct/fsm-testing/test-report_test.py create mode 100644 example/diameter/launcher/resources/rest_api/ct/fsm-testing/test-reset_test.py create mode 100644 example/diameter/launcher/resources/rest_api/ct/fsm-testing/test-run_test.py create mode 100644 example/diameter/launcher/resources/rest_api/ct/fsm-testing/test-state_test.py create mode 100644 example/diameter/launcher/resources/rest_api/ct/fsm-testing/test-summary-counts_test.py create mode 100644 example/diameter/launcher/resources/rest_api/ct/fsm-testing/test-summary-states_test.py create mode 100644 example/diameter/launcher/resources/rest_api/ct/fsm-testing/test-summary_test.py create mode 100644 example/diameter/launcher/resources/rest_api/ct/fsm-testing/test-ttps_test.py create mode 100644 example/diameter/launcher/resources/rest_api/ct/resources/look_output.txt create mode 100644 example/diameter/launcher/resources/rest_api/ct/resources/summary-counts_output.txt create mode 100644 example/diameter/launcher/resources/rest_api/ct/resources/summary-states_output.txt diff --git a/example/diameter/launcher/EventOperation.cpp b/example/diameter/launcher/EventOperation.cpp index 7709fb1..8d8a97d 100644 --- a/example/diameter/launcher/EventOperation.cpp +++ b/example/diameter/launcher/EventOperation.cpp @@ -20,6 +20,7 @@ //// Standard //#include // std::istringstream //#include // std::cout +#include //#include // ceil //#include #include // chdir @@ -857,26 +858,59 @@ bool EventOperation::test_id__waitfefc(std::string &response, unsigned int id, b bool EventOperation::test__ttps(std::string &response, int amount) { Launcher& my_app = static_cast (anna::app::functions::getApp()); + anna::testing::TestManager &testManager = anna::testing::TestManager::instantiate(); + bool success = testManager.configureTTPS(amount); + if (success) { + response = "Assigned new test launch rate to "; + response += anna::functions::asString(amount); + response += " events per second"; + } + else { + response += "unable to configure the test rate provided"; + } - - return true; // OK + return success; // OK } bool EventOperation::test__next(std::string &response, int syncAmount) { Launcher& my_app = static_cast (anna::app::functions::getApp()); + anna::testing::TestManager &testManager = anna::testing::TestManager::instantiate(); + if (syncAmount < 1) { + response += "the parameter 'sync-amount' must be a positive integer value"; + return false; + } + bool success = testManager.execTestCases(syncAmount); - return true; // OK + response = (success ? "P" : "Not completely p" /* completed cycle and no repeats, rare case */); + response += "rocessed "; + response += anna::functions::asString(syncAmount); + response += ((syncAmount > 1) ? " test cases synchronously" : " test case"); + + return success; } bool EventOperation::test__ip_limit(std::string &response, int amount) { Launcher& my_app = static_cast (anna::app::functions::getApp()); + anna::testing::TestManager &testManager = anna::testing::TestManager::instantiate(); - + if (amount > -2) { + testManager.setInProgressLimit(amount); + response = "New in-progress limit: "; + response += (amount != -1) ? anna::functions::asString(amount) : ""; + } + else { + response = "In-progress limit amount: "; + int limit = testManager.getInProgressLimit(); + response += (limit != -1) ? anna::functions::asString(limit) : ""; + response += "; currently there are "; + response += anna::functions::asString(testManager.getInProgressCount()); + response += " test cases running"; + } return true; // OK } @@ -884,26 +918,64 @@ bool EventOperation::test__ip_limit(std::string &response, int amount) { bool EventOperation::test__goto(std::string &response, int id) { Launcher& my_app = static_cast (anna::app::functions::getApp()); + anna::testing::TestManager &testManager = anna::testing::TestManager::instantiate(); + bool success = testManager.gotoTestCase(id); + if (success) { + response = "Position updated for id provided ("; + } + else { + response += "cannot found test id ("; + } + response += anna::functions::asString(id); + response += ")"; - return true; // OK + return success; } bool EventOperation::test__run(std::string &response, int id) { Launcher& my_app = static_cast (anna::app::functions::getApp()); + anna::testing::TestManager &testManager = anna::testing::TestManager::instantiate(); + bool success = testManager.runTestCase(id); + if (success) { + response = "Test executed for id provided ("; + } + else { + response += "cannot found test id ("; + } + response += anna::functions::asString(id); + response += ")"; - return true; // OK + return success; } bool EventOperation::test__look(std::string &response, int id) { Launcher& my_app = static_cast (anna::app::functions::getApp()); + anna::testing::TestManager &testManager = anna::testing::TestManager::instantiate(); + anna::testing::TestCase *testCase = testManager.findTestCase(id); + if (!testCase) { + if (id == -1) { + response += "no current test case detected (testing started ?)"; + } + else { + response += "cannot found test id ("; + response += anna::functions::asString(id); + response += ")"; + } + return false; + } + + if (a_http) + response = anna::functions::encodeBase64(testCase->asXMLString()); + else + response = testCase->asXMLString(); return true; // OK } @@ -911,26 +983,89 @@ bool EventOperation::test__look(std::string &response, int id) { bool EventOperation::test__state(std::string &response, int id) { Launcher& my_app = static_cast (anna::app::functions::getApp()); + anna::testing::TestManager &testManager = anna::testing::TestManager::instantiate(); + anna::testing::TestCase *testCase = testManager.findTestCase(id); + if (!testCase) { + if (id == -1) { + response += "no current test case detected (testing started ?)"; + } + else { + response += "cannot found test id ("; + response += anna::functions::asString(id); + response += ")"; + } + return false; + } - return true; // OK + response = anna::testing::TestCase::asText(testCase->getState()); + return testCase->isSuccess(); } bool EventOperation::test__interact(std::string &response, int amount, unsigned int id) { Launcher& my_app = static_cast (anna::app::functions::getApp()); + anna::testing::TestManager &testManager = anna::testing::TestManager::instantiate(); + if (amount < -1) { + response += "interactive amount must be -1 (to disable interactive mode) or a positive number."; + return false; + } + anna::testing::TestCase *testCase = testManager.findTestCase(id); + if (testCase) { + if (amount == -1) { + testCase->makeInteractive(false); + response = "Interactive mode disabled"; + } + else { + testCase->addInteractiveAmount(amount); + response = "Added interactive amount of "; + response += anna::functions::asString(amount); + response += " units"; + if (amount == 0) response += " (0: freezing a non-interactive testcase, no effect on already interactive)"; + } + response += " for test case id "; + response += anna::functions::asString(id); + } + else { + response += "cannot found test id ("; + response += anna::functions::asString(id); + response += ")"; + return false; + } return true; // OK } -bool EventOperation::test__reset(std::string &response, bool soft_hard, unsigned int id) { +bool EventOperation::test__reset(std::string &response, bool soft_hard, int id) { Launcher& my_app = static_cast (anna::app::functions::getApp()); + anna::testing::TestManager &testManager = anna::testing::TestManager::instantiate(); - + anna::testing::TestCase *testCase = ((id != -1) ? testManager.findTestCase(id) : NULL); + if (testCase) { + bool done = testCase->reset(!soft_hard); + response = "Test "; + response += (soft_hard ? "soft":"hard"); + response += " reset for id "; + response += anna::functions::asString(id); + response += done ? ": done": ": not done"; + } + else { + if (id == -1) { + bool anyReset = testManager.resetPool(!soft_hard); + response = (soft_hard ? "Soft":"Hard"); + response += " reset have been sent to all programmed tests: "; response += anyReset ? "some/all have been reset" : "nothing was reset"; + } + else { + response += "cannot found test id ("; + response += anna::functions::asString(id); + response += ")"; + return false; + } + } return true; // OK } @@ -938,8 +1073,12 @@ bool EventOperation::test__reset(std::string &response, bool soft_hard, unsigned bool EventOperation::test__repeats(std::string &response, int amount) { Launcher& my_app = static_cast (anna::app::functions::getApp()); + anna::testing::TestManager &testManager = anna::testing::TestManager::instantiate(); - + if (amount < 0) amount = -1; + testManager.setPoolRepeats(amount); + std::string nolimit = (amount != -1) ? "":" [no limit]"; + response = anna::functions::asString("Pool repeats: %d%s (current cycle: %d)", amount, nolimit.c_str(), testManager.getPoolCycle()); return true; // OK } @@ -947,8 +1086,10 @@ bool EventOperation::test__repeats(std::string &response, int amount) { bool EventOperation::test__auto_reset(std::string &response, bool soft_hard) { Launcher& my_app = static_cast (anna::app::functions::getApp()); + anna::testing::TestManager &testManager = anna::testing::TestManager::instantiate(); - + testManager.setAutoResetHard(!soft_hard); + response = anna::functions::asString("Auto-reset configured to '%s'", (soft_hard ? "soft":"hard")); return true; // OK } @@ -956,8 +1097,9 @@ bool EventOperation::test__auto_reset(std::string &response, bool soft_hard) { bool EventOperation::test__initialized(std::string &response) { Launcher& my_app = static_cast (anna::app::functions::getApp()); + anna::testing::TestManager &testManager = anna::testing::TestManager::instantiate(); - + response = anna::functions::asString("%lu", testManager.getInitializedCount()); return true; // OK } @@ -965,8 +1107,9 @@ bool EventOperation::test__initialized(std::string &response) { bool EventOperation::test__finished(std::string &response) { Launcher& my_app = static_cast (anna::app::functions::getApp()); + anna::testing::TestManager &testManager = anna::testing::TestManager::instantiate(); - + response = anna::functions::asString("%lu", testManager.getFinishedCount()); return true; // OK } @@ -976,28 +1119,30 @@ bool EventOperation::test__clear(std::string &response) { Launcher& my_app = static_cast (anna::app::functions::getApp()); anna::testing::TestManager &testManager = anna::testing::TestManager::instantiate(); - try { - if (testManager.clearPool()) { - response = "All the programmed test cases have been dropped"; - } - else { - response = "There are not programmed test cases to be removed"; - } - } - catch(anna::RuntimeException &ex) { - ex.trace(); - response += "failed"; - return false; - } - - return true; // OK + return testManager.clearPool(response); } bool EventOperation::test__junit(std::string &response, const std::string & targetFile) { Launcher& my_app = static_cast (anna::app::functions::getApp()); + anna::testing::TestManager &testManager = anna::testing::TestManager::instantiate(); + std::ofstream out; + out.open(targetFile.c_str()); + if(out.is_open() == false) { + response += "error opening '"; + response += targetFile; + response += "'"; + return false; + } + + out << testManager.junitAsXMLString() << std::endl; + out.close(); + + response = "Junit report written on '"; + response += targetFile; + response += "'"; return true; // OK } @@ -1005,8 +1150,9 @@ bool EventOperation::test__junit(std::string &response, const std::string & targ bool EventOperation::test__summary_counts(std::string &response) { Launcher& my_app = static_cast (anna::app::functions::getApp()); + anna::testing::TestManager &testManager = anna::testing::TestManager::instantiate(); - + response = anna::functions::encodeBase64(testManager.summaryCounts()); return true; // OK } @@ -1014,8 +1160,9 @@ bool EventOperation::test__summary_counts(std::string &response) { bool EventOperation::test__summary_states(std::string &response) { Launcher& my_app = static_cast (anna::app::functions::getApp()); + anna::testing::TestManager &testManager = anna::testing::TestManager::instantiate(); - + response = anna::functions::encodeBase64(testManager.summaryStates()); return true; // OK } @@ -1023,8 +1170,9 @@ bool EventOperation::test__summary_states(std::string &response) { bool EventOperation::test__summary(std::string &response) { Launcher& my_app = static_cast (anna::app::functions::getApp()); + anna::testing::TestManager &testManager = anna::testing::TestManager::instantiate(); - + response = anna::functions::encodeBase64(testManager.asXMLString()); return true; // OK } @@ -1032,8 +1180,36 @@ bool EventOperation::test__summary(std::string &response) { bool EventOperation::test__report(std::string &response, const std::string & state, bool enable) { Launcher& my_app = static_cast (anna::app::functions::getApp()); + anna::testing::TestManager &testManager = anna::testing::TestManager::instantiate(); + std::string _state = state; + + if(_state == "initialized") + testManager.setDumpInitializedReports(enable); + else if(_state == "in-progress") + testManager.setDumpInProgressReports(enable); + else if(_state == "failed") + testManager.setDumpFailedReports(enable); + else if(_state == "success") + testManager.setDumpSuccessReports(enable); + else if(_state == "all") { + _state = "any"; + testManager.setDumpAllReports(enable); + } + else if(_state == "none") { + enable = !enable; + _state = "any"; + testManager.setDumpAllReports(enable); + } + else { + response += "invalid state (allowed: initialized|in-progress|failed|success|[all]|none)"; + return false; + } + response = (enable ? "Report enabled " : "Report disabled "); + response += "for tests in '"; + response += _state; + response += "' state"; return true; // OK } @@ -1041,8 +1217,10 @@ bool EventOperation::test__report(std::string &response, const std::string & sta bool EventOperation::test__report_hex(std::string &response, bool enable) { Launcher& my_app = static_cast (anna::app::functions::getApp()); + anna::testing::TestManager &testManager = anna::testing::TestManager::instantiate(); - + testManager.setDumpHex(enable); + response = (testManager.getDumpHex() ? "Report includes hexadecimal messages" : "Report excludes hexadecimal messages"); return true; // OK } @@ -1050,10 +1228,11 @@ bool EventOperation::test__report_hex(std::string &response, bool enable) { bool EventOperation::test__dump_stdout(std::string &response, bool enable) { Launcher& my_app = static_cast (anna::app::functions::getApp()); + anna::testing::TestManager &testManager = anna::testing::TestManager::instantiate(); - + testManager.setDumpStdout(enable); + response = (testManager.getDumpStdout() ? "Test manager dumps progress into stdout" : "Test manager does not dump progress into stdout"); return true; // OK } - diff --git a/example/diameter/launcher/EventOperation.hpp b/example/diameter/launcher/EventOperation.hpp index 6fceb85..1f0a4ab 100644 --- a/example/diameter/launcher/EventOperation.hpp +++ b/example/diameter/launcher/EventOperation.hpp @@ -136,7 +136,7 @@ public: bool test__look(std::string &response, int id = -1 /* current */); bool test__state(std::string &response, int id = -1 /* current */); bool test__interact(std::string &response, int amount, unsigned int id = -1 /* current */); - bool test__reset(std::string &response, bool soft_hard = true, unsigned int id = -2 /* apply to all the tests */); + bool test__reset(std::string &response, bool soft_hard = true, int id = -1 /* apply to all the tests */); bool test__repeats(std::string &response, int amount); bool test__auto_reset(std::string &response, bool soft_hard); bool test__initialized(std::string &response); diff --git a/example/diameter/launcher/Launcher.cpp b/example/diameter/launcher/Launcher.cpp index 507bea3..2ad9a59 100644 --- a/example/diameter/launcher/Launcher.cpp +++ b/example/diameter/launcher/Launcher.cpp @@ -1675,12 +1675,7 @@ bool Launcher::eventOperation(const std::string &operation, std::string &respons if (numParams > 1) throw anna::RuntimeException("Wrong body content format on HTTP Request. Check 'HELP.md' for more information.", ANNA_FILE_LOCATION); - if (testManager.clearPool()) { - opt_response = "all the programmed test cases have been dropped"; - } - else { - opt_response = "there are not programmed test cases to be removed"; - } + result = testManager.clearPool(opt_response); } else if(param1 == "junit") { response = testManager.junitAsXMLString(); diff --git a/example/diameter/launcher/MyHandler.cpp b/example/diameter/launcher/MyHandler.cpp index 7d312ef..d2433c8 100644 --- a/example/diameter/launcher/MyHandler.cpp +++ b/example/diameter/launcher/MyHandler.cpp @@ -477,19 +477,16 @@ bool MyHandler::doPOST(const std::string &uri, const nlohmann::json &j, std::str } else if (opType == "/test-reset") { auto it = j.find("type"); - if (it != j.end() && it->is_string()) { + std::string type = (it != j.end() && it->is_string()) ? *it : "soft"; - auto itI = j.find("id"); - int id = (itI != j.end() && itI->is_number_integer()) ? itI->get() : -2; // default is: apply to all the tests + auto itI = j.find("id"); + int id = (itI != j.end() && itI->is_number_integer()) ? itI->get() : -1; // default is: apply to all the tests - if ((*it == "soft") || (*it == "hard")) { - result = eop.test__reset(response, (*it == "soft"), id); - } - else - response += "invalid 'type' string field (allowed: soft|hard)"; + if ((type == "soft") || (type == "hard")) { + result = eop.test__reset(response, (type == "soft"), id); } else - response += "missing 'type' string field"; + response += "invalid 'type' string field (allowed: [soft]|hard)"; } else if (opType == "/test-repeats") { auto it = j.find("amount"); @@ -522,10 +519,8 @@ bool MyHandler::doPOST(const std::string &uri, const nlohmann::json &j, std::str } else if (opType == "/test-junit") { auto it = j.find("targetFile"); - if (it != j.end() && it->is_string()) - result = eop.test__junit(response, *it); - else - response += "missing 'targetFile' string field"; + std::string targetFile = (it != j.end() && it->is_string()) ? *it : "/tmp/junit.xml"; + result = eop.test__junit(response, targetFile); } else if (opType == "/test-summary-counts") { result = eop.test__summary_counts(response); @@ -549,7 +544,7 @@ bool MyHandler::doPOST(const std::string &uri, const nlohmann::json &j, std::str else response += "invalid 'action' string field (allowed: enable|disable)"; } - else if ((opType == "/test-report-hex")||(opType == "/test-dump_stdout")) { + else if ((opType == "/test-report-hex")||(opType == "/test-dump-stdout")) { auto itA = j.find("action"); std::string action = (itA != j.end() && itA->is_string()) ? *itA : "enable"; // default is: enable diff --git a/example/diameter/launcher/resources/HELP.md b/example/diameter/launcher/resources/HELP.md index bef7e3f..40f1a5e 100644 --- a/example/diameter/launcher/resources/HELP.md +++ b/example/diameter/launcher/resources/HELP.md @@ -1267,6 +1267,10 @@ ADML implements a bulting *Finite State Machine* to plan testing flows with a gr In-Progress limit is the maximum number of tests which can be executed in parallel. This operation allows a specific test to set this global pool behaviour. +Zero-value is equivalent to stop the clock (no tests will be executed). + +Value '-1' means 'no limit' (full parallel). Be careful with resources consumption. + **Request body**: ``` @@ -1563,6 +1567,12 @@ This operation allows a specific test to set this global pool behaviour. Global operation (also at test identifier level is accessible). +Zero-value is equivalent to stop the clock (no tests will be executed). + +Value '-1' means 'no limit' (full parallel). Be careful with resources consumption. + +Defaults to '-2' (no '-1' which is default for *testid-ip-limit* version), which wil show the current pool *ip-limit* and also the number of test cases which are *in progress*. + **Request body**: ``` @@ -1633,7 +1643,7 @@ Global operation (also at test identifier level is accessible). ``` { "result":"", - "response":"" + "response":"" } ``` @@ -1651,13 +1661,15 @@ Global operation (also at test identifier level is accessible). ``` { - "result":"", + "result":"", "response":"" } ``` #### POST /test-interact +Makes interactive a specific test case id. The amount of 0, implies no execution steps margin, which could be useful to 'freeze' a test in the middle of its execution. Value -1 makes it non-interactive resuming it from the current step. + **Request body**: ``` @@ -1682,8 +1694,8 @@ Global operation (also at test identifier level is accessible). ``` { - "type":"" - [,"id":[test identifier (integer: apply to all the tests (-2))]] + ["type":"<[soft]|hard>"] + [,"id":[test identifier (integer: apply to all the tests (-1))]] } ``` @@ -1775,11 +1787,13 @@ Global operation (also at test identifier level is accessible). #### POST /test-junit +As it could be very large, it will be dumped on provided target directory, '*/tmp/junit.xml*' by default. + **Request body**: ``` { - "targetFile":"" + ["targetFile":""] } ``` @@ -1801,7 +1815,7 @@ Global operation (also at test identifier level is accessible). ``` { "result":"", - "response":"" + "response":"" } ``` @@ -1814,7 +1828,7 @@ Global operation (also at test identifier level is accessible). ``` { "result":"", - "response":"" + "response":"" } ``` @@ -1827,7 +1841,7 @@ Global operation (also at test identifier level is accessible). ``` { "result":"", - "response":"" + "response":"" } ``` diff --git a/example/diameter/launcher/resources/rest_api/ct/fsm-testing/test-auto-reset_test.py b/example/diameter/launcher/resources/rest_api/ct/fsm-testing/test-auto-reset_test.py new file mode 100644 index 0000000..3e699b4 --- /dev/null +++ b/example/diameter/launcher/resources/rest_api/ct/fsm-testing/test-auto-reset_test.py @@ -0,0 +1,19 @@ +import pytest + + +def test_001_i_want_to_set_auto_reset_to_hard(admlc): + + response = admlc.postDict("/test-auto-reset", { "type": "hard" }) + + # Verify response + responseBodyRef = { "success":"true", "response":"Auto-reset configured to 'hard'" } + admlc.assert_response__status_body_headers(response, 200, responseBodyRef) + +def test_002_i_want_to_set_auto_reset_to_soft(admlc): + + response = admlc.postDict("/test-auto-reset", { "type": "soft" }) + + # Verify response + responseBodyRef = { "success":"true", "response":"Auto-reset configured to 'soft'" } + admlc.assert_response__status_body_headers(response, 200, responseBodyRef) + diff --git a/example/diameter/launcher/resources/rest_api/ct/fsm-testing/test-clear_test.py b/example/diameter/launcher/resources/rest_api/ct/fsm-testing/test-clear_test.py index 2c07810..25029b0 100644 --- a/example/diameter/launcher/resources/rest_api/ct/fsm-testing/test-clear_test.py +++ b/example/diameter/launcher/resources/rest_api/ct/fsm-testing/test-clear_test.py @@ -4,7 +4,7 @@ import pytest @pytest.mark.run(order=1) def test_001_i_want_to_test_pool_when_it_is_already_empty(admlc): - responseBodyRef = { "success":"true", "response":"There are not programmed test cases to be removed" } + responseBodyRef = { "success":"false", "response":"There are not programmed test cases to be removed" } # Send POST response = admlc.post("/test-clear") diff --git a/example/diameter/launcher/resources/rest_api/ct/fsm-testing/test-dump-stdout_test.py b/example/diameter/launcher/resources/rest_api/ct/fsm-testing/test-dump-stdout_test.py new file mode 100644 index 0000000..1b3b6cf --- /dev/null +++ b/example/diameter/launcher/resources/rest_api/ct/fsm-testing/test-dump-stdout_test.py @@ -0,0 +1,21 @@ +import pytest + + +def test_001_i_want_to_enable_dump_stdout_for_test_manager_progress(admlc): + + # Send POST + response = admlc.postDict("/test-dump-stdout", { "action":"enable" }) + + # Verify response + responseBodyRef = { "success":"true", "response":"Test manager dumps progress into stdout" } + admlc.assert_response__status_body_headers(response, 200, responseBodyRef) + +def test_002_i_want_to_disable_dump_stdout_for_test_manager_progress(admlc): + + # Send POST + response = admlc.postDict("/test-dump-stdout", { "action":"disable" }) + + # Verify response + responseBodyRef = { "success":"true", "response":"Test manager does not dump progress into stdout" } + admlc.assert_response__status_body_headers(response, 200, responseBodyRef) + diff --git a/example/diameter/launcher/resources/rest_api/ct/fsm-testing/test-finished_test.py b/example/diameter/launcher/resources/rest_api/ct/fsm-testing/test-finished_test.py new file mode 100644 index 0000000..73e9534 --- /dev/null +++ b/example/diameter/launcher/resources/rest_api/ct/fsm-testing/test-finished_test.py @@ -0,0 +1,23 @@ +import pytest + + +def test_001_i_want_check_the_number_of_finished_test_cases_in_the_pool(admlc, admlf): + + # Send POST + flow = admlf.getId() + response = admlc.postDict("/testid-description/{}".format(flow), { "description":"My test" }) + + # Verify response + responseBodyRef = { "success":"true", "response":"Done" } + admlc.assert_response__status_body_headers(response, 200, responseBodyRef) + + # Send POST + response = admlc.post("/test-finished") + + # Verify response + responseBodyRef = { "success":"true", "response":"0" } + admlc.assert_response__status_body_headers(response, 200, responseBodyRef) + + # Cleanup + response = admlc.post("/test-clear") + diff --git a/example/diameter/launcher/resources/rest_api/ct/fsm-testing/test-goto_test.py b/example/diameter/launcher/resources/rest_api/ct/fsm-testing/test-goto_test.py new file mode 100644 index 0000000..5adea9a --- /dev/null +++ b/example/diameter/launcher/resources/rest_api/ct/fsm-testing/test-goto_test.py @@ -0,0 +1,50 @@ +import pytest + + +def test_001_i_want_to_move_to_specific_test_and_check_it(admlc, admlf): + + # Send POST + flow = admlf.getId() + response = admlc.postDict("/testid-description/{}".format(flow), { "description":"testid-description.test_001.flow{}".format(flow) }) + flow = admlf.getId() + response = admlc.postDict("/testid-description/{}".format(flow), { "description":"testid-description.test_001.flow{}".format(flow) }) + + # Verify response + responseBodyRef = { "success":"true", "response":"Done" } + admlc.assert_response__status_body_headers(response, 200, responseBodyRef) + + # Go to the second test: + + # Send POST + response = admlc.postDict("/test-goto", { "id":flow }) + + # Verify response + responseBodyRef = { "success":"true", "response":"Position updated for id provided ({})".format(flow) } + admlc.assert_response__status_body_headers(response, 200, responseBodyRef) + + # Cleanup + response = admlc.post("/test-clear") + +def test_002_i_want_to_move_to_a_test_out_of_range_and_check_it(admlc, admlf): + + # Send POST + flow = admlf.getId() + response = admlc.postDict("/testid-description/{}".format(flow), { "description":"testid-description.test_001.flow{}".format(flow) }) + flow = admlf.getId() + response = admlc.postDict("/testid-description/{}".format(flow), { "description":"testid-description.test_001.flow{}".format(flow) }) + + # Verify response + responseBodyRef = { "success":"true", "response":"Done" } + admlc.assert_response__status_body_headers(response, 200, responseBodyRef) + + # Go to the second test: + + # Send POST + response = admlc.postDict("/test-goto", { "id":(flow+1) }) # flow+1 is out of range + + # Verify response + responseBodyRef = { "success":"false", "response":"Internal error (check ADML traces): cannot found test id ({})".format(flow+1) } + admlc.assert_response__status_body_headers(response, 200, responseBodyRef) + + # Cleanup + response = admlc.post("/test-clear") diff --git a/example/diameter/launcher/resources/rest_api/ct/fsm-testing/test-initialized_test.py b/example/diameter/launcher/resources/rest_api/ct/fsm-testing/test-initialized_test.py new file mode 100644 index 0000000..dde5330 --- /dev/null +++ b/example/diameter/launcher/resources/rest_api/ct/fsm-testing/test-initialized_test.py @@ -0,0 +1,23 @@ +import pytest + + +def test_001_i_want_check_the_number_of_initialized_test_cases_in_the_pool(admlc, admlf): + + # Send POST + flow = admlf.getId() + response = admlc.postDict("/testid-description/{}".format(flow), { "description":"My test" }) + + # Verify response + responseBodyRef = { "success":"true", "response":"Done" } + admlc.assert_response__status_body_headers(response, 200, responseBodyRef) + + # Send POST + response = admlc.post("/test-initialized") + + # Verify response + responseBodyRef = { "success":"true", "response":"1" } + admlc.assert_response__status_body_headers(response, 200, responseBodyRef) + + # Cleanup + response = admlc.post("/test-clear") + diff --git a/example/diameter/launcher/resources/rest_api/ct/fsm-testing/test-interact_test.py b/example/diameter/launcher/resources/rest_api/ct/fsm-testing/test-interact_test.py new file mode 100644 index 0000000..d11086b --- /dev/null +++ b/example/diameter/launcher/resources/rest_api/ct/fsm-testing/test-interact_test.py @@ -0,0 +1,46 @@ +import pytest + + +def test_001_i_want_to_make_one_test_non_interactive(admlc, admlf): + + # Send POST (test flow with a single step changing ip-limit to 15) + flow = admlf.getId() + response = admlc.postDict("/testid-ip-limit/{}".format(flow), { "amount":15 }) + + # Verify response (test & step programmed) + responseBodyRef = { "success":"true", "response":"Done" } + admlc.assert_response__status_body_headers(response, 200, responseBodyRef) + + # Send POST (execute the former test) + response = admlc.postDict("/test-interact", { "amount":-1, "id":flow }) + + # Verify response + responseBodyRef = { "success":"true", "response":"Interactive mode disabled for test case id {}".format(flow) } + admlc.assert_response__status_body_headers(response, 200, responseBodyRef) + + # Cleanup & restore ip-limit (at global level because it is easier) + response = admlc.post("/test-clear") + response = admlc.postDict("/test-ip-limit", { "amount":-1 }) + +def test_002_i_want_to_freeze_a_test(admlc, admlf): + + # Send POST (test flow with a single step changing ip-limit to 15) + flow = admlf.getId() + response = admlc.postDict("/testid-ip-limit/{}".format(flow), { "amount":15 }) + + # Verify response (test & step programmed) + responseBodyRef = { "success":"true", "response":"Done" } + admlc.assert_response__status_body_headers(response, 200, responseBodyRef) + + # Send POST (execute the former test) + response = admlc.postDict("/test-interact", { "amount":0, "id":flow }) + + # Verify response + respMsg = "Added interactive amount of 0 units (0: freezing a non-interactive testcase, no effect on already interactive) for test case id {}".format(flow) + responseBodyRef = { "success":"true", "response":respMsg } + admlc.assert_response__status_body_headers(response, 200, responseBodyRef) + + # Cleanup & restore ip-limit (at global level because it is easier) + response = admlc.post("/test-clear") + response = admlc.postDict("/test-ip-limit", { "amount":-1 }) + diff --git a/example/diameter/launcher/resources/rest_api/ct/fsm-testing/test-ip-limit_test.py b/example/diameter/launcher/resources/rest_api/ct/fsm-testing/test-ip-limit_test.py new file mode 100644 index 0000000..ec71a7d --- /dev/null +++ b/example/diameter/launcher/resources/rest_api/ct/fsm-testing/test-ip-limit_test.py @@ -0,0 +1,25 @@ +import pytest + + +def test_001_i_want_to_set_the_global_ip_limit_and_then_check_it(admlc): + + requestBody = { "amount":2 } + responseBodyRef = { "success":"true", "response":"New in-progress limit: 2" } + + # Send POST + response = admlc.postDict("/test-ip-limit", requestBody) + + # Verify response + admlc.assert_response__status_body_headers(response, 200, responseBodyRef) + + # Now check: + + requestBody = {} # -2 by default: shows current ip-limit and in-progress test cases amount + responseBodyRef = { "success":"true", "response":"In-progress limit amount: 2; currently there are 0 test cases running" } + + # Send POST + response = admlc.postDict("/test-ip-limit", requestBody) + + # Verify response + admlc.assert_response__status_body_headers(response, 200, responseBodyRef) + diff --git a/example/diameter/launcher/resources/rest_api/ct/fsm-testing/test-junit_test.py b/example/diameter/launcher/resources/rest_api/ct/fsm-testing/test-junit_test.py new file mode 100644 index 0000000..05d058d --- /dev/null +++ b/example/diameter/launcher/resources/rest_api/ct/fsm-testing/test-junit_test.py @@ -0,0 +1,36 @@ +import pytest + + +def test_001_i_want_to_dump_the_junit_report_of_adml(admlc): + + requestBody = { "targetFile":"/opt/adml/junit.xml" } + responseBodyRef = { "success":"true", "response":"Junit report written on '/opt/adml/junit.xml'" } + + # Send POST + response = admlc.postDict("/test-junit", requestBody) + + # Verify response + admlc.assert_response__status_body_headers(response, 200, responseBodyRef) + +def test_002_i_want_to_dump_the_junit_report_of_adml_at_invalid_location(admlc): + + requestBody = { "targetFile":"/invalid/path/junit.xml" } + responseBodyRef = { "success":"false", "response":"Internal error (check ADML traces): error opening '/invalid/path/junit.xml'" } + + # Send POST + response = admlc.postDict("/test-junit", requestBody) + + # Verify response + admlc.assert_response__status_body_headers(response, 200, responseBodyRef) + +def test_003_i_want_to_dump_the_junit_report_of_adml_at_default_location(admlc): + + responseBodyRef = { "success":"true", "response":"Junit report written on '/tmp/junit.xml'" } + + # Send POST + response = admlc.postDict("/test-junit", {}) + + # Verify response + admlc.assert_response__status_body_headers(response, 200, responseBodyRef) + + diff --git a/example/diameter/launcher/resources/rest_api/ct/fsm-testing/test-look_test.py b/example/diameter/launcher/resources/rest_api/ct/fsm-testing/test-look_test.py new file mode 100644 index 0000000..0d2fb9a --- /dev/null +++ b/example/diameter/launcher/resources/rest_api/ct/fsm-testing/test-look_test.py @@ -0,0 +1,33 @@ +import pytest + + +def test_001_i_want_to_look_an_specific_test(b64_encode, resources, admlc, admlf): + + # Send POST + flow = admlf.getId() + response = admlc.postDict("/testid-description/{}".format(flow), { "description":"Look test" }) + + # Verify response + responseBodyRef = { "success":"true", "response":"Done" } + admlc.assert_response__status_body_headers(response, 200, responseBodyRef) + + # Send POST + response = admlc.postDict("/test-look", { "id":flow }) + + # Verify response + lookExpected = resources("look_output.txt").format(flow) + responseBodyRef = { "success":"true", "response":"{}".format(b64_encode(lookExpected)) } + admlc.assert_response__status_body_headers(response, 200, responseBodyRef) + + # Cleanup + response = admlc.post("/test-clear") + +def test_002_i_want_to_look_an_unexisting_test(admlc): + + # Send POST + response = admlc.postDict("/test-look", { "id":666 }) + + # Verify response + responseBodyRef = { "success":"false", "response":"Internal error (check ADML traces): cannot found test id (666)" } + admlc.assert_response__status_body_headers(response, 200, responseBodyRef) + diff --git a/example/diameter/launcher/resources/rest_api/ct/fsm-testing/test-next_test.py b/example/diameter/launcher/resources/rest_api/ct/fsm-testing/test-next_test.py new file mode 100644 index 0000000..c9e6abf --- /dev/null +++ b/example/diameter/launcher/resources/rest_api/ct/fsm-testing/test-next_test.py @@ -0,0 +1,93 @@ +import pytest + + +def test_001_i_want_to_launch_the_next_testcase_with_test_next(admlc, admlf): + + flow = admlf.getId() + + requestBody = { "amount":5 } + responseBodyRef = { "success":"true", "response":"Done" } + + # Send POST + response = admlc.postDict("/testid-ip-limit/{}".format(flow), requestBody) + + ## Launch the next + requestBody = {} # 1 by default + responseBodyRef = { "success":"true", "response":"Processed 1 test case" } + response = admlc.postDict("/test-next", requestBody) + + # Verify response + admlc.assert_response__status_body_headers(response, 200, responseBodyRef) + + # Cleanup + response = admlc.post("/test-clear") + +def test_002_i_want_to_launch_a_bunch_of_tests_with_test_next(admlc, admlf): + + flow1 = admlf.getId() + flow2 = admlf.getId() + + requestBody = { "amount":5 } + responseBodyRef = { "success":"true", "response":"Done" } + + # Send POSTs + response = admlc.postDict("/testid-ip-limit/{}".format(flow1), requestBody) + response = admlc.postDict("/testid-ip-limit/{}".format(flow2), requestBody) + + ## Launch the second & third: + requestBody = { "syncAmount":2 } + responseBodyRef = { "success":"true", "response":"Processed 2 test cases synchronously" } + response = admlc.postDict("/test-next", requestBody) + + # Verify response + admlc.assert_response__status_body_headers(response, 200, responseBodyRef) + + # Cleanup + response = admlc.post("/test-clear") + +@pytest.mark.skip(reason="pytest is quite fast and dedicated thread is still running on clear operation, so, clear is not completed") +def test_003_i_want_to_launch_the_next_testcase_with_test_next(admlc, admlf): + + flow = admlf.getId() + + requestBody = { "script":"date +'%s.%N'" } + responseBodyRef = { "success":"true", "response":"Done" } + + # Send POST + response = admlc.postDict("/testid-sh-command/{}".format(flow), requestBody) + + ## Launch the next + requestBody = {} # 1 by default + responseBodyRef = { "success":"true", "response":"Processed 1 test case" } + response = admlc.postDict("/test-next", requestBody) + + # Verify response + admlc.assert_response__status_body_headers(response, 200, responseBodyRef) + + # Cleanup + response = admlc.post("/test-clear") + +@pytest.mark.skip(reason="pytest is quite fast and dedicated thread is still running on clear operation, so, clear is not completed") +def test_004_i_want_to_launch_a_bunch_of_tests_with_test_next(admlc, admlf): + + flow1 = admlf.getId() + flow2 = admlf.getId() + + requestBody = { "script":"date +'%s.%N'" } + responseBodyRef = { "success":"true", "response":"Done" } + + # Send POSTs + response = admlc.postDict("/testid-sh-command/{}".format(flow1), requestBody) + response = admlc.postDict("/testid-sh-command/{}".format(flow2), requestBody) + + ## Launch the second & third: + requestBody = { "syncAmount":2 } + responseBodyRef = { "success":"true", "response":"Processed 2 test cases synchronously" } + response = admlc.postDict("/test-next", requestBody) + + # Verify response + admlc.assert_response__status_body_headers(response, 200, responseBodyRef) + + # Cleanup + response = admlc.post("/test-clear") + diff --git a/example/diameter/launcher/resources/rest_api/ct/fsm-testing/test-repeats_test.py b/example/diameter/launcher/resources/rest_api/ct/fsm-testing/test-repeats_test.py new file mode 100644 index 0000000..337b1fe --- /dev/null +++ b/example/diameter/launcher/resources/rest_api/ct/fsm-testing/test-repeats_test.py @@ -0,0 +1,19 @@ +import pytest + + +def test_001_i_want_to_repeat_the_test_pool_forever(admlc): + + response = admlc.postDict("/test-repeats", { "amount": -1 }) + + # Verify response + responseBodyRef = { "success":"true", "response":"Pool repeats: -1 [no limit] (current cycle: 1)" } + admlc.assert_response__status_body_headers(response, 200, responseBodyRef) + +def test_002_i_want_to_disable_test_pool_cycle_repeats(admlc): + + response = admlc.postDict("/test-repeats", { "amount":0 }) + + # Verify response + responseBodyRef = { "success":"true", "response":"Pool repeats: 0 (current cycle: 1)" } + admlc.assert_response__status_body_headers(response, 200, responseBodyRef) + diff --git a/example/diameter/launcher/resources/rest_api/ct/fsm-testing/test-report-hex_test.py b/example/diameter/launcher/resources/rest_api/ct/fsm-testing/test-report-hex_test.py new file mode 100644 index 0000000..08e70e2 --- /dev/null +++ b/example/diameter/launcher/resources/rest_api/ct/fsm-testing/test-report-hex_test.py @@ -0,0 +1,21 @@ +import pytest + + +def test_001_i_want_to_enable_report_hex_for_test_manager_progress(admlc): + + # Send POST + response = admlc.postDict("/test-report-hex", { "action":"enable" }) + + # Verify response + responseBodyRef = { "success":"true", "response":"Report includes hexadecimal messages" } + admlc.assert_response__status_body_headers(response, 200, responseBodyRef) + +def test_002_i_want_to_disable_report_hex_for_test_manager_progress(admlc): + + # Send POST + response = admlc.postDict("/test-report-hex", { "action":"disable" }) + + # Verify response + responseBodyRef = { "success":"true", "response":"Report excludes hexadecimal messages" } + admlc.assert_response__status_body_headers(response, 200, responseBodyRef) + diff --git a/example/diameter/launcher/resources/rest_api/ct/fsm-testing/test-report_test.py b/example/diameter/launcher/resources/rest_api/ct/fsm-testing/test-report_test.py new file mode 100644 index 0000000..b8c09a0 --- /dev/null +++ b/example/diameter/launcher/resources/rest_api/ct/fsm-testing/test-report_test.py @@ -0,0 +1,36 @@ +import pytest + + +def test_001_i_want_to_enable_test_reports_for_any_state(admlc): + + response = admlc.postDict("/test-report", {}) + + # Verify response + responseBodyRef = { "success":"true", "response":"Report enabled for tests in 'any' state" } + admlc.assert_response__status_body_headers(response, 200, responseBodyRef) + +def test_002_i_want_to_disable_test_reports_for_any_state(admlc): + + response = admlc.postDict("/test-report", { "action":"disable" }) + + # Verify response + responseBodyRef = { "success":"true", "response":"Report disabled for tests in 'any' state" } + admlc.assert_response__status_body_headers(response, 200, responseBodyRef) + + +def test_003_i_want_to_enable_test_reports_for_failed_state(admlc): + + response = admlc.postDict("/test-report", { "state":"failed" }) + + # Verify response + responseBodyRef = { "success":"true", "response":"Report enabled for tests in 'failed' state" } + admlc.assert_response__status_body_headers(response, 200, responseBodyRef) + +def test_004_i_want_to_enable_test_reports_for_invalid_state(admlc): + + response = admlc.postDict("/test-report", { "state":"INVALID STATE" }) + + # Verify response + responseBodyRef = { "success":"false", "response":"Internal error (check ADML traces): invalid state (allowed: initialized|in-progress|failed|success|[all]|none)" } + admlc.assert_response__status_body_headers(response, 200, responseBodyRef) + diff --git a/example/diameter/launcher/resources/rest_api/ct/fsm-testing/test-reset_test.py b/example/diameter/launcher/resources/rest_api/ct/fsm-testing/test-reset_test.py new file mode 100644 index 0000000..63db2b8 --- /dev/null +++ b/example/diameter/launcher/resources/rest_api/ct/fsm-testing/test-reset_test.py @@ -0,0 +1,18 @@ +import pytest + +def test_001_i_want_to_do_reset_soft_for_every_test(admlc): + + response = admlc.postDict("/test-reset", {}) + + # Verify response + responseBodyRef = { "success":"true", "response":"Soft reset have been sent to all programmed tests: nothing was reset" } + admlc.assert_response__status_body_headers(response, 200, responseBodyRef) + +def test_002_i_want_to_do_reset_hard_for_every_test(admlc): + + response = admlc.postDict("/test-reset", { "type":"hard" }) + + # Verify response + responseBodyRef = { "success":"true", "response":"Hard reset have been sent to all programmed tests: nothing was reset" } + admlc.assert_response__status_body_headers(response, 200, responseBodyRef) + diff --git a/example/diameter/launcher/resources/rest_api/ct/fsm-testing/test-run_test.py b/example/diameter/launcher/resources/rest_api/ct/fsm-testing/test-run_test.py new file mode 100644 index 0000000..9db4edd --- /dev/null +++ b/example/diameter/launcher/resources/rest_api/ct/fsm-testing/test-run_test.py @@ -0,0 +1,39 @@ +import pytest + + +def test_001_i_want_to_run_specific_test(admlc, admlf): + + # Send POST (test flow with a single step changing ip-limit to 15) + flow = admlf.getId() + response = admlc.postDict("/testid-ip-limit/{}".format(flow), { "amount":15 }) + + # Verify response (test & step programmed) + responseBodyRef = { "success":"true", "response":"Done" } + admlc.assert_response__status_body_headers(response, 200, responseBodyRef) + + # Send POST (execute the former test) + response = admlc.postDict("/test-run", { "id":flow }) + + # Verify response + responseBodyRef = { "success":"true", "response":"Test executed for id provided ({})".format(flow) } + admlc.assert_response__status_body_headers(response, 200, responseBodyRef) + + # Verify ip-limit now: + response = admlc.postDict("/test-ip-limit", {}) # default {} to query current state + responseBodyRef = { "success":"true", "response":"In-progress limit amount: 15; currently there are 0 test cases running" } + admlc.assert_response__status_body_headers(response, 200, responseBodyRef) + + # Cleanup & restore ip-limit (at global level because it is easier) + response = admlc.post("/test-clear") + response = admlc.postDict("/test-ip-limit", { "amount":-1 }) + + +def test_002_i_want_to_run_unexisting_test(admlc): + + # Send POST + response = admlc.postDict("/test-run", { "id":666 }) + + # Verify response + responseBodyRef = { "success":"false", "response":"Internal error (check ADML traces): cannot found test id (666)" } + admlc.assert_response__status_body_headers(response, 200, responseBodyRef) + diff --git a/example/diameter/launcher/resources/rest_api/ct/fsm-testing/test-state_test.py b/example/diameter/launcher/resources/rest_api/ct/fsm-testing/test-state_test.py new file mode 100644 index 0000000..f2332f2 --- /dev/null +++ b/example/diameter/launcher/resources/rest_api/ct/fsm-testing/test-state_test.py @@ -0,0 +1,32 @@ +import pytest + + +def test_001_i_want_to_get_the_state_for_specific_test(admlc, admlf): + + # Send POST + flow = admlf.getId() + response = admlc.postDict("/testid-description/{}".format(flow), { "description":"Look test" }) + + # Verify response + responseBodyRef = { "success":"true", "response":"Done" } + admlc.assert_response__status_body_headers(response, 200, responseBodyRef) + + # Send POST + response = admlc.postDict("/test-state", { "id":flow }) + + # Verify response + responseBodyRef = { "success":"false", "response":"Initialized" } # success is false, because test-state returns true when test is Success. + admlc.assert_response__status_body_headers(response, 200, responseBodyRef) + + # Cleanup + response = admlc.post("/test-clear") + +def test_002_i_want_to_get_the_state_for_unexisting_test(admlc): + + # Send POST + response = admlc.postDict("/test-state", { "id":666 }) + + # Verify response + responseBodyRef = { "success":"false", "response":"Internal error (check ADML traces): cannot found test id (666)" } + admlc.assert_response__status_body_headers(response, 200, responseBodyRef) + diff --git a/example/diameter/launcher/resources/rest_api/ct/fsm-testing/test-summary-counts_test.py b/example/diameter/launcher/resources/rest_api/ct/fsm-testing/test-summary-counts_test.py new file mode 100644 index 0000000..43b7b48 --- /dev/null +++ b/example/diameter/launcher/resources/rest_api/ct/fsm-testing/test-summary-counts_test.py @@ -0,0 +1,13 @@ +import pytest + + +def test_001_i_want_to_retrieve_the_summary_counts(b64_encode, resources, admlc): + + # Send POST + response = admlc.post("/test-summary-counts") + + # Verify response + summaryCountsExpected = resources("summary-counts_output.txt") + responseBodyRef = { "success":"true", "response":"{}".format(b64_encode(summaryCountsExpected)) } + admlc.assert_response__status_body_headers(response, 200, responseBodyRef) + diff --git a/example/diameter/launcher/resources/rest_api/ct/fsm-testing/test-summary-states_test.py b/example/diameter/launcher/resources/rest_api/ct/fsm-testing/test-summary-states_test.py new file mode 100644 index 0000000..d6091dc --- /dev/null +++ b/example/diameter/launcher/resources/rest_api/ct/fsm-testing/test-summary-states_test.py @@ -0,0 +1,13 @@ +import pytest + + +def test_001_i_want_to_retrieve_the_summary_states(b64_encode, resources, admlc): + + # Send POST + response = admlc.post("/test-summary-states") + + # Verify response + summaryStatesExpected = resources("summary-states_output.txt") + responseBodyRef = { "success":"true", "response":"{}".format(b64_encode(summaryStatesExpected)) } + admlc.assert_response__status_body_headers(response, 200, responseBodyRef) + diff --git a/example/diameter/launcher/resources/rest_api/ct/fsm-testing/test-summary_test.py b/example/diameter/launcher/resources/rest_api/ct/fsm-testing/test-summary_test.py new file mode 100644 index 0000000..1c22ac2 --- /dev/null +++ b/example/diameter/launcher/resources/rest_api/ct/fsm-testing/test-summary_test.py @@ -0,0 +1,14 @@ +import pytest + + +def test_001_i_want_to_retrieve_the_tests_pool_summary(b64_decode, mylogger, admlc): + + # Send POST + response = admlc.post("/test-summary") + + # Verify response is ok (omit response content because it is dynamic: will be logged) + assert response["status"] == 200 + assert response["body"]["success"] == "true" + body_response = b64_decode(response["body"]["response"]) + mylogger.info("\nBASE64 DECODED RESPONSE: \n\n" + body_response) + diff --git a/example/diameter/launcher/resources/rest_api/ct/fsm-testing/test-ttps_test.py b/example/diameter/launcher/resources/rest_api/ct/fsm-testing/test-ttps_test.py new file mode 100644 index 0000000..481fccb --- /dev/null +++ b/example/diameter/launcher/resources/rest_api/ct/fsm-testing/test-ttps_test.py @@ -0,0 +1,34 @@ +import pytest + + +def test_001_i_want_to_set_a_valid_test_ttps(admlc): + + requestBody = { "amount":5 } + responseBodyRef = { "success":"true", "response":"Assigned new test launch rate to 5 events per second" } + + # Send POST + response = admlc.postDict("/test-ttps", requestBody) + + # Verify response + admlc.assert_response__status_body_headers(response, 200, responseBodyRef) + + # Leave stopped + response = admlc.postDict("/test-ttps", { "amount":0 }) + responseBodyRef = { "success":"true", "response":"Assigned new test launch rate to 0 events per second" } + admlc.assert_response__status_body_headers(response, 200, responseBodyRef) + +def test_002_i_want_to_set_an_invalid_test_ttps(admlc): + + requestBody = { "amount":-8 } + responseBodyRef = { "success":"false", "response":"Internal error (check ADML traces): unable to configure the test rate provided" } + + # Send POST + response = admlc.postDict("/test-ttps", requestBody) + + # Verify response + admlc.assert_response__status_body_headers(response, 200, responseBodyRef) + + # Leave stopped + #response = admlc.postDict("/test-ttps", { "amount":0 }) + #responseBodyRef = { "success":"true", "response":"Assigned new test launch rate to 0 events per second" } + #admlc.assert_response__status_body_headers(response, 200, responseBodyRef) diff --git a/example/diameter/launcher/resources/rest_api/ct/resources/look_output.txt b/example/diameter/launcher/resources/rest_api/ct/resources/look_output.txt new file mode 100644 index 0000000..69a2984 --- /dev/null +++ b/example/diameter/launcher/resources/rest_api/ct/resources/look_output.txt @@ -0,0 +1 @@ + diff --git a/example/diameter/launcher/resources/rest_api/ct/resources/summary-counts_output.txt b/example/diameter/launcher/resources/rest_api/ct/resources/summary-counts_output.txt new file mode 100644 index 0000000..b7655df --- /dev/null +++ b/example/diameter/launcher/resources/rest_api/ct/resources/summary-counts_output.txt @@ -0,0 +1,8 @@ + +Summary Counts: + +Total: 0 +Initialized: 0 +In Progress: 0 +Failed: 0 +Success: 0 \ No newline at end of file diff --git a/example/diameter/launcher/resources/rest_api/ct/resources/summary-states_output.txt b/example/diameter/launcher/resources/rest_api/ct/resources/summary-states_output.txt new file mode 100644 index 0000000..e3b5341 --- /dev/null +++ b/example/diameter/launcher/resources/rest_api/ct/resources/summary-states_output.txt @@ -0,0 +1,4 @@ + +Summary States: + +No test cases programmed ! \ No newline at end of file diff --git a/source/testing/TestManager.cpp b/source/testing/TestManager.cpp index 280a198..c8c65e3 100644 --- a/source/testing/TestManager.cpp +++ b/source/testing/TestManager.cpp @@ -300,7 +300,7 @@ bool TestManager::clearPool(std::string &result) throw() { result = ""; if (!tests()) { - result = "there are not programmed test cases to be removed"; + result = "There are not programmed test cases to be removed"; return false; } @@ -315,7 +315,7 @@ bool TestManager::clearPool(std::string &result) throw() { } if (unsafe > 0) { - result = "some test cases cannot be removed ("; + result = "Some test cases cannot be removed ("; result += anna::functions::asString(unsafe); result += "/"; result += anna::functions::asString(total); @@ -336,6 +336,7 @@ bool TestManager::clearPool(std::string &result) throw() { configureTTPS(0); // stop a_statSummary.clear(); + result = "all the programmed test cases have been dropped"; return true; } -- 2.20.1