//// Standard
//#include <sstream> // std::istringstream
//#include <iostream> // std::cout
+#include <fstream>
//#include <math.h> // ceil
//#include <climits>
#include <unistd.h> // chdir
bool EventOperation::test__ttps(std::string &response, int amount) {
Launcher& my_app = static_cast <Launcher&>(anna::app::functions::getApp());
+ anna::testing::TestManager &testManager = anna::testing::TestManager::instantiate();
+ bool success = testManager.configureTTPS(amount);
+ if (success) {
+ response = "Assigned new test launch rate to ";
+ response += anna::functions::asString(amount);
+ response += " events per second";
+ }
+ else {
+ response += "unable to configure the test rate provided";
+ }
-
- return true; // OK
+ return success; // OK
}
bool EventOperation::test__next(std::string &response, int syncAmount) {
Launcher& my_app = static_cast <Launcher&>(anna::app::functions::getApp());
+ anna::testing::TestManager &testManager = anna::testing::TestManager::instantiate();
+ if (syncAmount < 1) {
+ response += "the parameter 'sync-amount' must be a positive integer value";
+ return false;
+ }
+ bool success = testManager.execTestCases(syncAmount);
- return true; // OK
+ response = (success ? "P" : "Not completely p" /* completed cycle and no repeats, rare case */);
+ response += "rocessed ";
+ response += anna::functions::asString(syncAmount);
+ response += ((syncAmount > 1) ? " test cases synchronously" : " test case");
+
+ return success;
}
bool EventOperation::test__ip_limit(std::string &response, int amount) {
Launcher& my_app = static_cast <Launcher&>(anna::app::functions::getApp());
+ anna::testing::TestManager &testManager = anna::testing::TestManager::instantiate();
-
+ if (amount > -2) {
+ testManager.setInProgressLimit(amount);
+ response = "New in-progress limit: ";
+ response += (amount != -1) ? anna::functions::asString(amount) : "<no limit>";
+ }
+ else {
+ response = "In-progress limit amount: ";
+ int limit = testManager.getInProgressLimit();
+ response += (limit != -1) ? anna::functions::asString(limit) : "<no limit>";
+ response += "; currently there are ";
+ response += anna::functions::asString(testManager.getInProgressCount());
+ response += " test cases running";
+ }
return true; // OK
}
bool EventOperation::test__goto(std::string &response, int id) {
Launcher& my_app = static_cast <Launcher&>(anna::app::functions::getApp());
+ anna::testing::TestManager &testManager = anna::testing::TestManager::instantiate();
+ bool success = testManager.gotoTestCase(id);
+ if (success) {
+ response = "Position updated for id provided (";
+ }
+ else {
+ response += "cannot found test id (";
+ }
+ response += anna::functions::asString(id);
+ response += ")";
- return true; // OK
+ return success;
}
bool EventOperation::test__run(std::string &response, int id) {
Launcher& my_app = static_cast <Launcher&>(anna::app::functions::getApp());
+ anna::testing::TestManager &testManager = anna::testing::TestManager::instantiate();
+ bool success = testManager.runTestCase(id);
+ if (success) {
+ response = "Test executed for id provided (";
+ }
+ else {
+ response += "cannot found test id (";
+ }
+ response += anna::functions::asString(id);
+ response += ")";
- return true; // OK
+ return success;
}
bool EventOperation::test__look(std::string &response, int id) {
Launcher& my_app = static_cast <Launcher&>(anna::app::functions::getApp());
+ anna::testing::TestManager &testManager = anna::testing::TestManager::instantiate();
+ anna::testing::TestCase *testCase = testManager.findTestCase(id);
+ if (!testCase) {
+ if (id == -1) {
+ response += "no current test case detected (testing started ?)";
+ }
+ else {
+ response += "cannot found test id (";
+ response += anna::functions::asString(id);
+ response += ")";
+ }
+ return false;
+ }
+
+ if (a_http)
+ response = anna::functions::encodeBase64(testCase->asXMLString());
+ else
+ response = testCase->asXMLString();
return true; // OK
}
bool EventOperation::test__state(std::string &response, int id) {
Launcher& my_app = static_cast <Launcher&>(anna::app::functions::getApp());
+ anna::testing::TestManager &testManager = anna::testing::TestManager::instantiate();
+ anna::testing::TestCase *testCase = testManager.findTestCase(id);
+ if (!testCase) {
+ if (id == -1) {
+ response += "no current test case detected (testing started ?)";
+ }
+ else {
+ response += "cannot found test id (";
+ response += anna::functions::asString(id);
+ response += ")";
+ }
+ return false;
+ }
- return true; // OK
+ response = anna::testing::TestCase::asText(testCase->getState());
+ return testCase->isSuccess();
}
bool EventOperation::test__interact(std::string &response, int amount, unsigned int id) {
Launcher& my_app = static_cast <Launcher&>(anna::app::functions::getApp());
+ anna::testing::TestManager &testManager = anna::testing::TestManager::instantiate();
+ if (amount < -1) {
+ response += "interactive amount must be -1 (to disable interactive mode) or a positive number.";
+ return false;
+ }
+ anna::testing::TestCase *testCase = testManager.findTestCase(id);
+ if (testCase) {
+ if (amount == -1) {
+ testCase->makeInteractive(false);
+ response = "Interactive mode disabled";
+ }
+ else {
+ testCase->addInteractiveAmount(amount);
+ response = "Added interactive amount of ";
+ response += anna::functions::asString(amount);
+ response += " units";
+ if (amount == 0) response += " (0: freezing a non-interactive testcase, no effect on already interactive)";
+ }
+ response += " for test case id ";
+ response += anna::functions::asString(id);
+ }
+ else {
+ response += "cannot found test id (";
+ response += anna::functions::asString(id);
+ response += ")";
+ return false;
+ }
return true; // OK
}
-bool EventOperation::test__reset(std::string &response, bool soft_hard, unsigned int id) {
+bool EventOperation::test__reset(std::string &response, bool soft_hard, int id) {
Launcher& my_app = static_cast <Launcher&>(anna::app::functions::getApp());
+ anna::testing::TestManager &testManager = anna::testing::TestManager::instantiate();
-
+ anna::testing::TestCase *testCase = ((id != -1) ? testManager.findTestCase(id) : NULL);
+ if (testCase) {
+ bool done = testCase->reset(!soft_hard);
+ response = "Test ";
+ response += (soft_hard ? "soft":"hard");
+ response += " reset for id ";
+ response += anna::functions::asString(id);
+ response += done ? ": done": ": not done";
+ }
+ else {
+ if (id == -1) {
+ bool anyReset = testManager.resetPool(!soft_hard);
+ response = (soft_hard ? "Soft":"Hard");
+ response += " reset have been sent to all programmed tests: "; response += anyReset ? "some/all have been reset" : "nothing was reset";
+ }
+ else {
+ response += "cannot found test id (";
+ response += anna::functions::asString(id);
+ response += ")";
+ return false;
+ }
+ }
return true; // OK
}
bool EventOperation::test__repeats(std::string &response, int amount) {
Launcher& my_app = static_cast <Launcher&>(anna::app::functions::getApp());
+ anna::testing::TestManager &testManager = anna::testing::TestManager::instantiate();
-
+ if (amount < 0) amount = -1;
+ testManager.setPoolRepeats(amount);
+ std::string nolimit = (amount != -1) ? "":" [no limit]";
+ response = anna::functions::asString("Pool repeats: %d%s (current cycle: %d)", amount, nolimit.c_str(), testManager.getPoolCycle());
return true; // OK
}
bool EventOperation::test__auto_reset(std::string &response, bool soft_hard) {
Launcher& my_app = static_cast <Launcher&>(anna::app::functions::getApp());
+ anna::testing::TestManager &testManager = anna::testing::TestManager::instantiate();
-
+ testManager.setAutoResetHard(!soft_hard);
+ response = anna::functions::asString("Auto-reset configured to '%s'", (soft_hard ? "soft":"hard"));
return true; // OK
}
bool EventOperation::test__initialized(std::string &response) {
Launcher& my_app = static_cast <Launcher&>(anna::app::functions::getApp());
+ anna::testing::TestManager &testManager = anna::testing::TestManager::instantiate();
-
+ response = anna::functions::asString("%lu", testManager.getInitializedCount());
return true; // OK
}
bool EventOperation::test__finished(std::string &response) {
Launcher& my_app = static_cast <Launcher&>(anna::app::functions::getApp());
+ anna::testing::TestManager &testManager = anna::testing::TestManager::instantiate();
-
+ response = anna::functions::asString("%lu", testManager.getFinishedCount());
return true; // OK
}
Launcher& my_app = static_cast <Launcher&>(anna::app::functions::getApp());
anna::testing::TestManager &testManager = anna::testing::TestManager::instantiate();
- try {
- if (testManager.clearPool()) {
- response = "All the programmed test cases have been dropped";
- }
- else {
- response = "There are not programmed test cases to be removed";
- }
- }
- catch(anna::RuntimeException &ex) {
- ex.trace();
- response += "failed";
- return false;
- }
-
- return true; // OK
+ return testManager.clearPool(response);
}
bool EventOperation::test__junit(std::string &response, const std::string & targetFile) {
Launcher& my_app = static_cast <Launcher&>(anna::app::functions::getApp());
+ anna::testing::TestManager &testManager = anna::testing::TestManager::instantiate();
+ std::ofstream out;
+ out.open(targetFile.c_str());
+ if(out.is_open() == false) {
+ response += "error opening '";
+ response += targetFile;
+ response += "'";
+ return false;
+ }
+
+ out << testManager.junitAsXMLString() << std::endl;
+ out.close();
+
+ response = "Junit report written on '";
+ response += targetFile;
+ response += "'";
return true; // OK
}
bool EventOperation::test__summary_counts(std::string &response) {
Launcher& my_app = static_cast <Launcher&>(anna::app::functions::getApp());
+ anna::testing::TestManager &testManager = anna::testing::TestManager::instantiate();
-
+ response = anna::functions::encodeBase64(testManager.summaryCounts());
return true; // OK
}
bool EventOperation::test__summary_states(std::string &response) {
Launcher& my_app = static_cast <Launcher&>(anna::app::functions::getApp());
+ anna::testing::TestManager &testManager = anna::testing::TestManager::instantiate();
-
+ response = anna::functions::encodeBase64(testManager.summaryStates());
return true; // OK
}
bool EventOperation::test__summary(std::string &response) {
Launcher& my_app = static_cast <Launcher&>(anna::app::functions::getApp());
+ anna::testing::TestManager &testManager = anna::testing::TestManager::instantiate();
-
+ response = anna::functions::encodeBase64(testManager.asXMLString());
return true; // OK
}
bool EventOperation::test__report(std::string &response, const std::string & state, bool enable) {
Launcher& my_app = static_cast <Launcher&>(anna::app::functions::getApp());
+ anna::testing::TestManager &testManager = anna::testing::TestManager::instantiate();
+ std::string _state = state;
+
+ if(_state == "initialized")
+ testManager.setDumpInitializedReports(enable);
+ else if(_state == "in-progress")
+ testManager.setDumpInProgressReports(enable);
+ else if(_state == "failed")
+ testManager.setDumpFailedReports(enable);
+ else if(_state == "success")
+ testManager.setDumpSuccessReports(enable);
+ else if(_state == "all") {
+ _state = "any";
+ testManager.setDumpAllReports(enable);
+ }
+ else if(_state == "none") {
+ enable = !enable;
+ _state = "any";
+ testManager.setDumpAllReports(enable);
+ }
+ else {
+ response += "invalid state (allowed: initialized|in-progress|failed|success|[all]|none)";
+ return false;
+ }
+ response = (enable ? "Report enabled " : "Report disabled ");
+ response += "for tests in '";
+ response += _state;
+ response += "' state";
return true; // OK
}
bool EventOperation::test__report_hex(std::string &response, bool enable) {
Launcher& my_app = static_cast <Launcher&>(anna::app::functions::getApp());
+ anna::testing::TestManager &testManager = anna::testing::TestManager::instantiate();
-
+ testManager.setDumpHex(enable);
+ response = (testManager.getDumpHex() ? "Report includes hexadecimal messages" : "Report excludes hexadecimal messages");
return true; // OK
}
bool EventOperation::test__dump_stdout(std::string &response, bool enable) {
Launcher& my_app = static_cast <Launcher&>(anna::app::functions::getApp());
+ anna::testing::TestManager &testManager = anna::testing::TestManager::instantiate();
-
+ testManager.setDumpStdout(enable);
+ response = (testManager.getDumpStdout() ? "Test manager dumps progress into stdout" : "Test manager does not dump progress into stdout");
return true; // OK
}
-
bool test__look(std::string &response, int id = -1 /* current */);
bool test__state(std::string &response, int id = -1 /* current */);
bool test__interact(std::string &response, int amount, unsigned int id = -1 /* current */);
- bool test__reset(std::string &response, bool soft_hard = true, unsigned int id = -2 /* apply to all the tests */);
+ bool test__reset(std::string &response, bool soft_hard = true, int id = -1 /* apply to all the tests */);
bool test__repeats(std::string &response, int amount);
bool test__auto_reset(std::string &response, bool soft_hard);
bool test__initialized(std::string &response);
if (numParams > 1)
throw anna::RuntimeException("Wrong body content format on HTTP Request. Check 'HELP.md' for more information.", ANNA_FILE_LOCATION);
- if (testManager.clearPool()) {
- opt_response = "all the programmed test cases have been dropped";
- }
- else {
- opt_response = "there are not programmed test cases to be removed";
- }
+ result = testManager.clearPool(opt_response);
}
else if(param1 == "junit") {
response = testManager.junitAsXMLString();
}
else if (opType == "/test-reset") {
auto it = j.find("type");
- if (it != j.end() && it->is_string()) {
+ std::string type = (it != j.end() && it->is_string()) ? *it : "soft";
- auto itI = j.find("id");
- int id = (itI != j.end() && itI->is_number_integer()) ? itI->get<int>() : -2; // default is: apply to all the tests
+ auto itI = j.find("id");
+ int id = (itI != j.end() && itI->is_number_integer()) ? itI->get<int>() : -1; // default is: apply to all the tests
- if ((*it == "soft") || (*it == "hard")) {
- result = eop.test__reset(response, (*it == "soft"), id);
- }
- else
- response += "invalid 'type' string field (allowed: soft|hard)";
+ if ((type == "soft") || (type == "hard")) {
+ result = eop.test__reset(response, (type == "soft"), id);
}
else
- response += "missing 'type' string field";
+ response += "invalid 'type' string field (allowed: [soft]|hard)";
}
else if (opType == "/test-repeats") {
auto it = j.find("amount");
}
else if (opType == "/test-junit") {
auto it = j.find("targetFile");
- if (it != j.end() && it->is_string())
- result = eop.test__junit(response, *it);
- else
- response += "missing 'targetFile' string field";
+ std::string targetFile = (it != j.end() && it->is_string()) ? *it : "/tmp/junit.xml";
+ result = eop.test__junit(response, targetFile);
}
else if (opType == "/test-summary-counts") {
result = eop.test__summary_counts(response);
else
response += "invalid 'action' string field (allowed: enable|disable)";
}
- else if ((opType == "/test-report-hex")||(opType == "/test-dump_stdout")) {
+ else if ((opType == "/test-report-hex")||(opType == "/test-dump-stdout")) {
auto itA = j.find("action");
std::string action = (itA != j.end() && itA->is_string()) ? *itA : "enable"; // default is: enable
In-Progress limit is the maximum number of tests which can be executed in parallel.
This operation allows a specific test to set this global pool behaviour.
+Zero-value is equivalent to stop the clock (no tests will be executed).
+
+Value '-1' means 'no limit' (full parallel). Be careful with resources consumption.
+
**Request body**:
```
Global operation (also at test identifier level is accessible).
+Zero-value is equivalent to stop the clock (no tests will be executed).
+
+Value '-1' means 'no limit' (full parallel). Be careful with resources consumption.
+
+Defaults to '-2' (no '-1' which is default for *testid-ip-limit* version), which wil show the current pool *ip-limit* and also the number of test cases which are *in progress*.
+
**Request body**:
```
```
{
"result":"<true or false>",
- "response":"<response>"
+ "response":"<base64-encoded output>"
}
```
```
{
- "result":"<true or false>",
+ "result":"<true: if state is 'Success' or false>",
"response":"<response>"
}
```
#### POST /test-interact
+Makes interactive a specific test case id. The amount of 0, implies no execution steps margin, which could be useful to 'freeze' a test in the middle of its execution. Value -1 makes it non-interactive resuming it from the current step.
+
**Request body**:
```
```
{
- "type":"<soft|hard>"
- [,"id":[test identifier (integer: apply to all the tests (-2))]]
+ ["type":"<[soft]|hard>"]
+ [,"id":[test identifier (integer: apply to all the tests (-1))]]
}
```
#### POST /test-junit
+As it could be very large, it will be dumped on provided target directory, '*/tmp/junit.xml*' by default.
+
**Request body**:
```
{
- "targetFile":"<file path>"
+ ["targetFile":"<file path>"]
}
```
```
{
"result":"<true or false>",
- "response":"<response>"
+ "response":"<base64-encoded output>"
}
```
```
{
"result":"<true or false>",
- "response":"<response>"
+ "response":"<base64-encoded output>"
}
```
```
{
"result":"<true or false>",
- "response":"<response>"
+ "response":"<base64-encoded output>"
}
```
--- /dev/null
+import pytest
+
+
+def test_001_i_want_to_set_auto_reset_to_hard(admlc):
+
+ response = admlc.postDict("/test-auto-reset", { "type": "hard" })
+
+ # Verify response
+ responseBodyRef = { "success":"true", "response":"Auto-reset configured to 'hard'" }
+ admlc.assert_response__status_body_headers(response, 200, responseBodyRef)
+
+def test_002_i_want_to_set_auto_reset_to_soft(admlc):
+
+ response = admlc.postDict("/test-auto-reset", { "type": "soft" })
+
+ # Verify response
+ responseBodyRef = { "success":"true", "response":"Auto-reset configured to 'soft'" }
+ admlc.assert_response__status_body_headers(response, 200, responseBodyRef)
+
@pytest.mark.run(order=1)
def test_001_i_want_to_test_pool_when_it_is_already_empty(admlc):
- responseBodyRef = { "success":"true", "response":"There are not programmed test cases to be removed" }
+ responseBodyRef = { "success":"false", "response":"There are not programmed test cases to be removed" }
# Send POST
response = admlc.post("/test-clear")
--- /dev/null
+import pytest
+
+
+def test_001_i_want_to_enable_dump_stdout_for_test_manager_progress(admlc):
+
+ # Send POST
+ response = admlc.postDict("/test-dump-stdout", { "action":"enable" })
+
+ # Verify response
+ responseBodyRef = { "success":"true", "response":"Test manager dumps progress into stdout" }
+ admlc.assert_response__status_body_headers(response, 200, responseBodyRef)
+
+def test_002_i_want_to_disable_dump_stdout_for_test_manager_progress(admlc):
+
+ # Send POST
+ response = admlc.postDict("/test-dump-stdout", { "action":"disable" })
+
+ # Verify response
+ responseBodyRef = { "success":"true", "response":"Test manager does not dump progress into stdout" }
+ admlc.assert_response__status_body_headers(response, 200, responseBodyRef)
+
--- /dev/null
+import pytest
+
+
+def test_001_i_want_check_the_number_of_finished_test_cases_in_the_pool(admlc, admlf):
+
+ # Send POST
+ flow = admlf.getId()
+ response = admlc.postDict("/testid-description/{}".format(flow), { "description":"My test" })
+
+ # Verify response
+ responseBodyRef = { "success":"true", "response":"Done" }
+ admlc.assert_response__status_body_headers(response, 200, responseBodyRef)
+
+ # Send POST
+ response = admlc.post("/test-finished")
+
+ # Verify response
+ responseBodyRef = { "success":"true", "response":"0" }
+ admlc.assert_response__status_body_headers(response, 200, responseBodyRef)
+
+ # Cleanup
+ response = admlc.post("/test-clear")
+
--- /dev/null
+import pytest
+
+
+def test_001_i_want_to_move_to_specific_test_and_check_it(admlc, admlf):
+
+ # Send POST
+ flow = admlf.getId()
+ response = admlc.postDict("/testid-description/{}".format(flow), { "description":"testid-description.test_001.flow{}".format(flow) })
+ flow = admlf.getId()
+ response = admlc.postDict("/testid-description/{}".format(flow), { "description":"testid-description.test_001.flow{}".format(flow) })
+
+ # Verify response
+ responseBodyRef = { "success":"true", "response":"Done" }
+ admlc.assert_response__status_body_headers(response, 200, responseBodyRef)
+
+ # Go to the second test:
+
+ # Send POST
+ response = admlc.postDict("/test-goto", { "id":flow })
+
+ # Verify response
+ responseBodyRef = { "success":"true", "response":"Position updated for id provided ({})".format(flow) }
+ admlc.assert_response__status_body_headers(response, 200, responseBodyRef)
+
+ # Cleanup
+ response = admlc.post("/test-clear")
+
+def test_002_i_want_to_move_to_a_test_out_of_range_and_check_it(admlc, admlf):
+
+ # Send POST
+ flow = admlf.getId()
+ response = admlc.postDict("/testid-description/{}".format(flow), { "description":"testid-description.test_001.flow{}".format(flow) })
+ flow = admlf.getId()
+ response = admlc.postDict("/testid-description/{}".format(flow), { "description":"testid-description.test_001.flow{}".format(flow) })
+
+ # Verify response
+ responseBodyRef = { "success":"true", "response":"Done" }
+ admlc.assert_response__status_body_headers(response, 200, responseBodyRef)
+
+ # Go to the second test:
+
+ # Send POST
+ response = admlc.postDict("/test-goto", { "id":(flow+1) }) # flow+1 is out of range
+
+ # Verify response
+ responseBodyRef = { "success":"false", "response":"Internal error (check ADML traces): cannot found test id ({})".format(flow+1) }
+ admlc.assert_response__status_body_headers(response, 200, responseBodyRef)
+
+ # Cleanup
+ response = admlc.post("/test-clear")
--- /dev/null
+import pytest
+
+
+def test_001_i_want_check_the_number_of_initialized_test_cases_in_the_pool(admlc, admlf):
+
+ # Send POST
+ flow = admlf.getId()
+ response = admlc.postDict("/testid-description/{}".format(flow), { "description":"My test" })
+
+ # Verify response
+ responseBodyRef = { "success":"true", "response":"Done" }
+ admlc.assert_response__status_body_headers(response, 200, responseBodyRef)
+
+ # Send POST
+ response = admlc.post("/test-initialized")
+
+ # Verify response
+ responseBodyRef = { "success":"true", "response":"1" }
+ admlc.assert_response__status_body_headers(response, 200, responseBodyRef)
+
+ # Cleanup
+ response = admlc.post("/test-clear")
+
--- /dev/null
+import pytest
+
+
+def test_001_i_want_to_make_one_test_non_interactive(admlc, admlf):
+
+ # Send POST (test flow with a single step changing ip-limit to 15)
+ flow = admlf.getId()
+ response = admlc.postDict("/testid-ip-limit/{}".format(flow), { "amount":15 })
+
+ # Verify response (test & step programmed)
+ responseBodyRef = { "success":"true", "response":"Done" }
+ admlc.assert_response__status_body_headers(response, 200, responseBodyRef)
+
+ # Send POST (execute the former test)
+ response = admlc.postDict("/test-interact", { "amount":-1, "id":flow })
+
+ # Verify response
+ responseBodyRef = { "success":"true", "response":"Interactive mode disabled for test case id {}".format(flow) }
+ admlc.assert_response__status_body_headers(response, 200, responseBodyRef)
+
+ # Cleanup & restore ip-limit (at global level because it is easier)
+ response = admlc.post("/test-clear")
+ response = admlc.postDict("/test-ip-limit", { "amount":-1 })
+
+def test_002_i_want_to_freeze_a_test(admlc, admlf):
+
+ # Send POST (test flow with a single step changing ip-limit to 15)
+ flow = admlf.getId()
+ response = admlc.postDict("/testid-ip-limit/{}".format(flow), { "amount":15 })
+
+ # Verify response (test & step programmed)
+ responseBodyRef = { "success":"true", "response":"Done" }
+ admlc.assert_response__status_body_headers(response, 200, responseBodyRef)
+
+ # Send POST (execute the former test)
+ response = admlc.postDict("/test-interact", { "amount":0, "id":flow })
+
+ # Verify response
+ respMsg = "Added interactive amount of 0 units (0: freezing a non-interactive testcase, no effect on already interactive) for test case id {}".format(flow)
+ responseBodyRef = { "success":"true", "response":respMsg }
+ admlc.assert_response__status_body_headers(response, 200, responseBodyRef)
+
+ # Cleanup & restore ip-limit (at global level because it is easier)
+ response = admlc.post("/test-clear")
+ response = admlc.postDict("/test-ip-limit", { "amount":-1 })
+
--- /dev/null
+import pytest
+
+
+def test_001_i_want_to_set_the_global_ip_limit_and_then_check_it(admlc):
+
+ requestBody = { "amount":2 }
+ responseBodyRef = { "success":"true", "response":"New in-progress limit: 2" }
+
+ # Send POST
+ response = admlc.postDict("/test-ip-limit", requestBody)
+
+ # Verify response
+ admlc.assert_response__status_body_headers(response, 200, responseBodyRef)
+
+ # Now check:
+
+ requestBody = {} # -2 by default: shows current ip-limit and in-progress test cases amount
+ responseBodyRef = { "success":"true", "response":"In-progress limit amount: 2; currently there are 0 test cases running" }
+
+ # Send POST
+ response = admlc.postDict("/test-ip-limit", requestBody)
+
+ # Verify response
+ admlc.assert_response__status_body_headers(response, 200, responseBodyRef)
+
--- /dev/null
+import pytest
+
+
+def test_001_i_want_to_dump_the_junit_report_of_adml(admlc):
+
+ requestBody = { "targetFile":"/opt/adml/junit.xml" }
+ responseBodyRef = { "success":"true", "response":"Junit report written on '/opt/adml/junit.xml'" }
+
+ # Send POST
+ response = admlc.postDict("/test-junit", requestBody)
+
+ # Verify response
+ admlc.assert_response__status_body_headers(response, 200, responseBodyRef)
+
+def test_002_i_want_to_dump_the_junit_report_of_adml_at_invalid_location(admlc):
+
+ requestBody = { "targetFile":"/invalid/path/junit.xml" }
+ responseBodyRef = { "success":"false", "response":"Internal error (check ADML traces): error opening '/invalid/path/junit.xml'" }
+
+ # Send POST
+ response = admlc.postDict("/test-junit", requestBody)
+
+ # Verify response
+ admlc.assert_response__status_body_headers(response, 200, responseBodyRef)
+
+def test_003_i_want_to_dump_the_junit_report_of_adml_at_default_location(admlc):
+
+ responseBodyRef = { "success":"true", "response":"Junit report written on '/tmp/junit.xml'" }
+
+ # Send POST
+ response = admlc.postDict("/test-junit", {})
+
+ # Verify response
+ admlc.assert_response__status_body_headers(response, 200, responseBodyRef)
+
+
--- /dev/null
+import pytest
+
+
+def test_001_i_want_to_look_an_specific_test(b64_encode, resources, admlc, admlf):
+
+ # Send POST
+ flow = admlf.getId()
+ response = admlc.postDict("/testid-description/{}".format(flow), { "description":"Look test" })
+
+ # Verify response
+ responseBodyRef = { "success":"true", "response":"Done" }
+ admlc.assert_response__status_body_headers(response, 200, responseBodyRef)
+
+ # Send POST
+ response = admlc.postDict("/test-look", { "id":flow })
+
+ # Verify response
+ lookExpected = resources("look_output.txt").format(flow)
+ responseBodyRef = { "success":"true", "response":"{}".format(b64_encode(lookExpected)) }
+ admlc.assert_response__status_body_headers(response, 200, responseBodyRef)
+
+ # Cleanup
+ response = admlc.post("/test-clear")
+
+def test_002_i_want_to_look_an_unexisting_test(admlc):
+
+ # Send POST
+ response = admlc.postDict("/test-look", { "id":666 })
+
+ # Verify response
+ responseBodyRef = { "success":"false", "response":"Internal error (check ADML traces): cannot found test id (666)" }
+ admlc.assert_response__status_body_headers(response, 200, responseBodyRef)
+
--- /dev/null
+import pytest
+
+
+def test_001_i_want_to_launch_the_next_testcase_with_test_next(admlc, admlf):
+
+ flow = admlf.getId()
+
+ requestBody = { "amount":5 }
+ responseBodyRef = { "success":"true", "response":"Done" }
+
+ # Send POST
+ response = admlc.postDict("/testid-ip-limit/{}".format(flow), requestBody)
+
+ ## Launch the next
+ requestBody = {} # 1 by default
+ responseBodyRef = { "success":"true", "response":"Processed 1 test case" }
+ response = admlc.postDict("/test-next", requestBody)
+
+ # Verify response
+ admlc.assert_response__status_body_headers(response, 200, responseBodyRef)
+
+ # Cleanup
+ response = admlc.post("/test-clear")
+
+def test_002_i_want_to_launch_a_bunch_of_tests_with_test_next(admlc, admlf):
+
+ flow1 = admlf.getId()
+ flow2 = admlf.getId()
+
+ requestBody = { "amount":5 }
+ responseBodyRef = { "success":"true", "response":"Done" }
+
+ # Send POSTs
+ response = admlc.postDict("/testid-ip-limit/{}".format(flow1), requestBody)
+ response = admlc.postDict("/testid-ip-limit/{}".format(flow2), requestBody)
+
+ ## Launch the second & third:
+ requestBody = { "syncAmount":2 }
+ responseBodyRef = { "success":"true", "response":"Processed 2 test cases synchronously" }
+ response = admlc.postDict("/test-next", requestBody)
+
+ # Verify response
+ admlc.assert_response__status_body_headers(response, 200, responseBodyRef)
+
+ # Cleanup
+ response = admlc.post("/test-clear")
+
+@pytest.mark.skip(reason="pytest is quite fast and dedicated thread is still running on clear operation, so, clear is not completed")
+def test_003_i_want_to_launch_the_next_testcase_with_test_next(admlc, admlf):
+
+ flow = admlf.getId()
+
+ requestBody = { "script":"date +'%s.%N'" }
+ responseBodyRef = { "success":"true", "response":"Done" }
+
+ # Send POST
+ response = admlc.postDict("/testid-sh-command/{}".format(flow), requestBody)
+
+ ## Launch the next
+ requestBody = {} # 1 by default
+ responseBodyRef = { "success":"true", "response":"Processed 1 test case" }
+ response = admlc.postDict("/test-next", requestBody)
+
+ # Verify response
+ admlc.assert_response__status_body_headers(response, 200, responseBodyRef)
+
+ # Cleanup
+ response = admlc.post("/test-clear")
+
+@pytest.mark.skip(reason="pytest is quite fast and dedicated thread is still running on clear operation, so, clear is not completed")
+def test_004_i_want_to_launch_a_bunch_of_tests_with_test_next(admlc, admlf):
+
+ flow1 = admlf.getId()
+ flow2 = admlf.getId()
+
+ requestBody = { "script":"date +'%s.%N'" }
+ responseBodyRef = { "success":"true", "response":"Done" }
+
+ # Send POSTs
+ response = admlc.postDict("/testid-sh-command/{}".format(flow1), requestBody)
+ response = admlc.postDict("/testid-sh-command/{}".format(flow2), requestBody)
+
+ ## Launch the second & third:
+ requestBody = { "syncAmount":2 }
+ responseBodyRef = { "success":"true", "response":"Processed 2 test cases synchronously" }
+ response = admlc.postDict("/test-next", requestBody)
+
+ # Verify response
+ admlc.assert_response__status_body_headers(response, 200, responseBodyRef)
+
+ # Cleanup
+ response = admlc.post("/test-clear")
+
--- /dev/null
+import pytest
+
+
+def test_001_i_want_to_repeat_the_test_pool_forever(admlc):
+
+ response = admlc.postDict("/test-repeats", { "amount": -1 })
+
+ # Verify response
+ responseBodyRef = { "success":"true", "response":"Pool repeats: -1 [no limit] (current cycle: 1)" }
+ admlc.assert_response__status_body_headers(response, 200, responseBodyRef)
+
+def test_002_i_want_to_disable_test_pool_cycle_repeats(admlc):
+
+ response = admlc.postDict("/test-repeats", { "amount":0 })
+
+ # Verify response
+ responseBodyRef = { "success":"true", "response":"Pool repeats: 0 (current cycle: 1)" }
+ admlc.assert_response__status_body_headers(response, 200, responseBodyRef)
+
--- /dev/null
+import pytest
+
+
+def test_001_i_want_to_enable_report_hex_for_test_manager_progress(admlc):
+
+ # Send POST
+ response = admlc.postDict("/test-report-hex", { "action":"enable" })
+
+ # Verify response
+ responseBodyRef = { "success":"true", "response":"Report includes hexadecimal messages" }
+ admlc.assert_response__status_body_headers(response, 200, responseBodyRef)
+
+def test_002_i_want_to_disable_report_hex_for_test_manager_progress(admlc):
+
+ # Send POST
+ response = admlc.postDict("/test-report-hex", { "action":"disable" })
+
+ # Verify response
+ responseBodyRef = { "success":"true", "response":"Report excludes hexadecimal messages" }
+ admlc.assert_response__status_body_headers(response, 200, responseBodyRef)
+
--- /dev/null
+import pytest
+
+
+def test_001_i_want_to_enable_test_reports_for_any_state(admlc):
+
+ response = admlc.postDict("/test-report", {})
+
+ # Verify response
+ responseBodyRef = { "success":"true", "response":"Report enabled for tests in 'any' state" }
+ admlc.assert_response__status_body_headers(response, 200, responseBodyRef)
+
+def test_002_i_want_to_disable_test_reports_for_any_state(admlc):
+
+ response = admlc.postDict("/test-report", { "action":"disable" })
+
+ # Verify response
+ responseBodyRef = { "success":"true", "response":"Report disabled for tests in 'any' state" }
+ admlc.assert_response__status_body_headers(response, 200, responseBodyRef)
+
+
+def test_003_i_want_to_enable_test_reports_for_failed_state(admlc):
+
+ response = admlc.postDict("/test-report", { "state":"failed" })
+
+ # Verify response
+ responseBodyRef = { "success":"true", "response":"Report enabled for tests in 'failed' state" }
+ admlc.assert_response__status_body_headers(response, 200, responseBodyRef)
+
+def test_004_i_want_to_enable_test_reports_for_invalid_state(admlc):
+
+ response = admlc.postDict("/test-report", { "state":"INVALID STATE" })
+
+ # Verify response
+ responseBodyRef = { "success":"false", "response":"Internal error (check ADML traces): invalid state (allowed: initialized|in-progress|failed|success|[all]|none)" }
+ admlc.assert_response__status_body_headers(response, 200, responseBodyRef)
+
--- /dev/null
+import pytest
+
+def test_001_i_want_to_do_reset_soft_for_every_test(admlc):
+
+ response = admlc.postDict("/test-reset", {})
+
+ # Verify response
+ responseBodyRef = { "success":"true", "response":"Soft reset have been sent to all programmed tests: nothing was reset" }
+ admlc.assert_response__status_body_headers(response, 200, responseBodyRef)
+
+def test_002_i_want_to_do_reset_hard_for_every_test(admlc):
+
+ response = admlc.postDict("/test-reset", { "type":"hard" })
+
+ # Verify response
+ responseBodyRef = { "success":"true", "response":"Hard reset have been sent to all programmed tests: nothing was reset" }
+ admlc.assert_response__status_body_headers(response, 200, responseBodyRef)
+
--- /dev/null
+import pytest
+
+
+def test_001_i_want_to_run_specific_test(admlc, admlf):
+
+ # Send POST (test flow with a single step changing ip-limit to 15)
+ flow = admlf.getId()
+ response = admlc.postDict("/testid-ip-limit/{}".format(flow), { "amount":15 })
+
+ # Verify response (test & step programmed)
+ responseBodyRef = { "success":"true", "response":"Done" }
+ admlc.assert_response__status_body_headers(response, 200, responseBodyRef)
+
+ # Send POST (execute the former test)
+ response = admlc.postDict("/test-run", { "id":flow })
+
+ # Verify response
+ responseBodyRef = { "success":"true", "response":"Test executed for id provided ({})".format(flow) }
+ admlc.assert_response__status_body_headers(response, 200, responseBodyRef)
+
+ # Verify ip-limit now:
+ response = admlc.postDict("/test-ip-limit", {}) # default {} to query current state
+ responseBodyRef = { "success":"true", "response":"In-progress limit amount: 15; currently there are 0 test cases running" }
+ admlc.assert_response__status_body_headers(response, 200, responseBodyRef)
+
+ # Cleanup & restore ip-limit (at global level because it is easier)
+ response = admlc.post("/test-clear")
+ response = admlc.postDict("/test-ip-limit", { "amount":-1 })
+
+
+def test_002_i_want_to_run_unexisting_test(admlc):
+
+ # Send POST
+ response = admlc.postDict("/test-run", { "id":666 })
+
+ # Verify response
+ responseBodyRef = { "success":"false", "response":"Internal error (check ADML traces): cannot found test id (666)" }
+ admlc.assert_response__status_body_headers(response, 200, responseBodyRef)
+
--- /dev/null
+import pytest
+
+
+def test_001_i_want_to_get_the_state_for_specific_test(admlc, admlf):
+
+ # Send POST
+ flow = admlf.getId()
+ response = admlc.postDict("/testid-description/{}".format(flow), { "description":"Look test" })
+
+ # Verify response
+ responseBodyRef = { "success":"true", "response":"Done" }
+ admlc.assert_response__status_body_headers(response, 200, responseBodyRef)
+
+ # Send POST
+ response = admlc.postDict("/test-state", { "id":flow })
+
+ # Verify response
+ responseBodyRef = { "success":"false", "response":"Initialized" } # success is false, because test-state returns true when test is Success.
+ admlc.assert_response__status_body_headers(response, 200, responseBodyRef)
+
+ # Cleanup
+ response = admlc.post("/test-clear")
+
+def test_002_i_want_to_get_the_state_for_unexisting_test(admlc):
+
+ # Send POST
+ response = admlc.postDict("/test-state", { "id":666 })
+
+ # Verify response
+ responseBodyRef = { "success":"false", "response":"Internal error (check ADML traces): cannot found test id (666)" }
+ admlc.assert_response__status_body_headers(response, 200, responseBodyRef)
+
--- /dev/null
+import pytest
+
+
+def test_001_i_want_to_retrieve_the_summary_counts(b64_encode, resources, admlc):
+
+ # Send POST
+ response = admlc.post("/test-summary-counts")
+
+ # Verify response
+ summaryCountsExpected = resources("summary-counts_output.txt")
+ responseBodyRef = { "success":"true", "response":"{}".format(b64_encode(summaryCountsExpected)) }
+ admlc.assert_response__status_body_headers(response, 200, responseBodyRef)
+
--- /dev/null
+import pytest
+
+
+def test_001_i_want_to_retrieve_the_summary_states(b64_encode, resources, admlc):
+
+ # Send POST
+ response = admlc.post("/test-summary-states")
+
+ # Verify response
+ summaryStatesExpected = resources("summary-states_output.txt")
+ responseBodyRef = { "success":"true", "response":"{}".format(b64_encode(summaryStatesExpected)) }
+ admlc.assert_response__status_body_headers(response, 200, responseBodyRef)
+
--- /dev/null
+import pytest
+
+
+def test_001_i_want_to_retrieve_the_tests_pool_summary(b64_decode, mylogger, admlc):
+
+ # Send POST
+ response = admlc.post("/test-summary")
+
+ # Verify response is ok (omit response content because it is dynamic: will be logged)
+ assert response["status"] == 200
+ assert response["body"]["success"] == "true"
+ body_response = b64_decode(response["body"]["response"])
+ mylogger.info("\nBASE64 DECODED RESPONSE: \n\n" + body_response)
+
--- /dev/null
+import pytest
+
+
+def test_001_i_want_to_set_a_valid_test_ttps(admlc):
+
+ requestBody = { "amount":5 }
+ responseBodyRef = { "success":"true", "response":"Assigned new test launch rate to 5 events per second" }
+
+ # Send POST
+ response = admlc.postDict("/test-ttps", requestBody)
+
+ # Verify response
+ admlc.assert_response__status_body_headers(response, 200, responseBodyRef)
+
+ # Leave stopped
+ response = admlc.postDict("/test-ttps", { "amount":0 })
+ responseBodyRef = { "success":"true", "response":"Assigned new test launch rate to 0 events per second" }
+ admlc.assert_response__status_body_headers(response, 200, responseBodyRef)
+
+def test_002_i_want_to_set_an_invalid_test_ttps(admlc):
+
+ requestBody = { "amount":-8 }
+ responseBodyRef = { "success":"false", "response":"Internal error (check ADML traces): unable to configure the test rate provided" }
+
+ # Send POST
+ response = admlc.postDict("/test-ttps", requestBody)
+
+ # Verify response
+ admlc.assert_response__status_body_headers(response, 200, responseBodyRef)
+
+ # Leave stopped
+ #response = admlc.postDict("/test-ttps", { "amount":0 })
+ #responseBodyRef = { "success":"true", "response":"Assigned new test launch rate to 0 events per second" }
+ #admlc.assert_response__status_body_headers(response, 200, responseBodyRef)
--- /dev/null
+<TestCase Id="{}" Description="Look test" State="Initialized" StartTimestamp="0 ms" Interactive="no"/>
--- /dev/null
+
+Summary Counts:
+
+Total: 0
+Initialized: 0
+In Progress: 0
+Failed: 0
+Success: 0
\ No newline at end of file
--- /dev/null
+
+Summary States:
+
+No test cases programmed !
\ No newline at end of file
result = "";
if (!tests()) {
- result = "there are not programmed test cases to be removed";
+ result = "There are not programmed test cases to be removed";
return false;
}
}
if (unsafe > 0) {
- result = "some test cases cannot be removed (";
+ result = "Some test cases cannot be removed (";
result += anna::functions::asString(unsafe);
result += "/";
result += anna::functions::asString(total);
configureTTPS(0); // stop
a_statSummary.clear();
+ result = "all the programmed test cases have been dropped";
return true;
}