diff --git a/README.md b/README.md
index dfdedc25e..3a3667d44 100644
--- a/README.md
+++ b/README.md
@@ -5,10 +5,10 @@ This repository contains Taurus scripts for performance testing of Atlassian Dat
## Supported versions
* Supported Jira versions:
- * Jira [Long Term Support release](https://confluence.atlassian.com/enterprise/atlassian-enterprise-releases-948227420.html): `9.12.12` and `9.4.25`
+ * Jira [Long Term Support release](https://confluence.atlassian.com/enterprise/atlassian-enterprise-releases-948227420.html): `9.12.12`, `9.4.25` and `10.0.1` Platform release
* Supported Jira Service Management versions:
- * Jira Service Management [Long Term Support release](https://confluence.atlassian.com/enterprise/atlassian-enterprise-releases-948227420.html): `5.12.12` and `5.4.25`
+ * Jira Service Management [Long Term Support release](https://confluence.atlassian.com/enterprise/atlassian-enterprise-releases-948227420.html): `5.12.12`, `5.4.25` and `10.0.1` Platform release
* Supported Confluence versions:
* Confluence [Long Term Support release](https://confluence.atlassian.com/enterprise/atlassian-enterprise-releases-948227420.html): `8.5.14`, `7.19.26` and `9.0.2` Platform release
diff --git a/app/bamboo.yml b/app/bamboo.yml
index a5f218873..595e1c577 100644
--- a/app/bamboo.yml
+++ b/app/bamboo.yml
@@ -51,7 +51,7 @@ services:
- python util/post_run/cleanup_results_dir.py
- module: pip-install
packages:
- - selenium==4.24.0
+ - selenium==4.25.0
execution:
- scenario: jmeter
executor: jmeter
@@ -121,7 +121,7 @@ modules:
httpsampler.ignore_failed_embedded_resources: "true"
selenium:
chromedriver:
- version: "128.0.6613.137" # Supports Chrome version 128. You can refer to https://googlechromelabs.github.io/chrome-for-testing
+ version: "130.0.6723.91" # Supports Chrome version 130. You can refer to https://googlechromelabs.github.io/chrome-for-testing
reporting:
- data-source: sample-labels
module: junit-xml
diff --git a/app/bitbucket.yml b/app/bitbucket.yml
index 5ecc4963b..acffec757 100644
--- a/app/bitbucket.yml
+++ b/app/bitbucket.yml
@@ -37,7 +37,7 @@ services:
- python util/post_run/cleanup_results_dir.py
- module: pip-install
packages:
- - selenium==4.24.0
+ - selenium==4.25.0
execution:
- scenario: ${load_executor}
concurrency: ${concurrency}
@@ -87,7 +87,7 @@ modules:
httpsampler.ignore_failed_embedded_resources: "true"
selenium:
chromedriver:
- version: "128.0.6613.137" # Supports Chrome version 128. You can refer to https://googlechromelabs.github.io/chrome-for-testing
+ version: "130.0.6723.91" # Supports Chrome version 130. You can refer to https://googlechromelabs.github.io/chrome-for-testing
reporting:
- data-source: sample-labels
module: junit-xml
diff --git a/app/confluence.yml b/app/confluence.yml
index 815e0cb00..14b073efa 100644
--- a/app/confluence.yml
+++ b/app/confluence.yml
@@ -52,7 +52,7 @@ services:
- python util/post_run/cleanup_results_dir.py
- module: pip-install
packages:
- - selenium==4.24.0
+ - selenium==4.25.0
execution:
- scenario: ${load_executor}
executor: ${load_executor}
@@ -114,7 +114,7 @@ modules:
httpsampler.ignore_failed_embedded_resources: "true"
selenium:
chromedriver:
- version: "128.0.6613.137" # Supports Chrome version 128. You can refer to https://googlechromelabs.github.io/chrome-for-testing
+ version: "130.0.6723.91" # Supports Chrome version 130. You can refer to https://googlechromelabs.github.io/chrome-for-testing
reporting:
- data-source: sample-labels
module: junit-xml
diff --git a/app/jira.yml b/app/jira.yml
index 13242f819..c5127e650 100644
--- a/app/jira.yml
+++ b/app/jira.yml
@@ -52,7 +52,7 @@ services:
- python util/post_run/cleanup_results_dir.py
- module: pip-install
packages:
- - selenium==4.24.0
+ - selenium==4.25.0
execution:
- scenario: ${load_executor}
executor: ${load_executor}
@@ -115,7 +115,7 @@ modules:
httpsampler.ignore_failed_embedded_resources: "true"
selenium:
chromedriver:
- version: "128.0.6613.137" # Supports Chrome version 128. You can refer to https://googlechromelabs.github.io/chrome-for-testing
+ version: "130.0.6723.91" # Supports Chrome version 130. You can refer to https://googlechromelabs.github.io/chrome-for-testing
reporting:
- data-source: sample-labels
module: junit-xml
diff --git a/app/jmeter/bamboo.jmx b/app/jmeter/bamboo.jmx
index cff4f7f5d..024f5e91c 100644
--- a/app/jmeter/bamboo.jmx
+++ b/app/jmeter/bamboo.jmx
@@ -173,7 +173,7 @@
-
+
true
false
@@ -228,94 +228,187 @@
16
-
-
- User login action
- UTF-8
- ${application.postfix}/userlogin.action
- POST
- true
- false
-
-
-
- false
- os_username
- ${username}
- =
- true
-
-
- false
- os_password
- ${password}
- =
- true
-
-
- true
- os_destination
- /allPlans.action
- =
- true
-
-
- false
- atl_token
-
- =
- true
-
-
- false
- Log in
- =
- true
- save
-
-
-
-
-
-
-
-
- Accept-Language
- en-US,en;q=0.5
-
-
- Accept
- text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8
-
-
- Upgrade-Insecure-Requests
- 1
-
-
- Content-Type
- application/x-www-form-urlencoded
-
-
- Cache-Control
- no-cache
-
-
- Accept-Encoding
- gzip, deflate
-
-
-
+
+ false
+ loginform
+ loginForm
+ $1$
+ NOT_FOUND
+ 1
+ all
+
+ false
+
-
-
- 302
-
-
- Assertion.response_code
- false
- 1
-
+
+ groovy
+
+
+ true
+ String loginform = vars.get("loginform");
+
+if ("NOT_FOUND".equals(loginform)) {
+ vars.put("legacy_login_form", "false");
+ log.info("2SVlogin flow detected");
+} else {
+ vars.put("legacy_login_form", "true");
+ log.info("legacy login flow detected");
+}
+
+
+
+ groovy
+
+
+ true
+ log.info("Legacy login flow: ${legacy_login_form}")
+
+
+
+
+
+ ${__groovy(vars.get("legacy_login_form") == 'false')}
+ false
+ true
+
+
+
+ 2sv login flow
+ ${application.postfix}/rest/tsv/1.0/authenticate
+ POST
+ true
+ true
+
+
+
+ false
+ {"username": "${username}",
+ "password": "${password}",
+ "rememberMe": "True",
+ "targetUrl": ""
+}
+ =
+
+
+
+
+
+
+
+
+ Content-Type
+ application/json
+
+
+
+
+
+
+ 200
+
+
+ Assertion.response_code
+ false
+ 2
+
+
+
+
+
+ ${__groovy(vars.get("legacy_login_form") == 'true')}
+ false
+ true
+
+
+
+ User login action
+ UTF-8
+ ${application.postfix}/userlogin.action
+ POST
+ true
+ false
+
+
+
+ false
+ os_username
+ ${username}
+ =
+ true
+
+
+ false
+ os_password
+ ${password}
+ =
+ true
+
+
+ true
+ os_destination
+ /allPlans.action
+ =
+ true
+
+
+ false
+ atl_token
+
+ =
+ true
+
+
+ false
+ Log in
+ =
+ true
+ save
+
+
+
+
+
+
+
+
+ Accept-Language
+ en-US,en;q=0.5
+
+
+ Accept
+ text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8
+
+
+ Upgrade-Insecure-Requests
+ 1
+
+
+ Content-Type
+ application/x-www-form-urlencoded
+
+
+ Cache-Control
+ no-cache
+
+
+ Accept-Encoding
+ gzip, deflate
+
+
+
+
+
+
+ 302
+
+
+ Assertion.response_code
+ false
+ 1
+
+
+
@@ -1189,96 +1282,174 @@ if ( sleep_time > 0 ) {
-
- Detected the start of a redirect chain
- UTF-8
- ${application.postfix}/userlogin.action
- POST
- true
- false
-
-
-
- false
- os_username
- ${app_specific_username}
- =
- true
-
-
- false
- os_password
- ${app_specific_password}
- =
- true
-
-
- true
- os_destination
- /allPlans.action
- =
- true
-
-
- false
- atl_token
-
- =
- true
-
-
- false
- save
- Log In
- =
- true
-
-
-
-
+
+ ${__groovy(vars.get("legacy_login_form") == 'false')}
+ false
+ true
+
-
-
-
- Accept-Language
- en-US,en;q=0.5
-
-
- Pragma
- no-cache
-
-
- Accept
- text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8
-
-
- Upgrade-Insecure-Requests
- 1
-
-
- Content-Type
- application/x-www-form-urlencoded
-
-
- Cache-Control
- no-cache
-
-
- Accept-Encoding
- gzip, deflate
-
-
-
-
-
- groovy
-
-
- true
- vars.put("run_as_specific_user", "true")
-prev.setIgnore()
-
-
+
+ 2sv login flow
+ ${application.postfix}/rest/tsv/1.0/authenticate
+ POST
+ true
+ true
+
+
+
+ false
+ {"username": "${app_specific_username}",
+ "password": "${app_specific_password}",
+ "rememberMe": "True",
+ "targetUrl": ""
+}
+ =
+
+
+
+
+
+
+
+
+ Content-Type
+ application/json
+
+
+
+
+
+
+ 200
+
+
+ Assertion.response_code
+ false
+ 2
+
+
+
+ groovy
+
+
+ true
+ vars.put("run_as_specific_user", "true")
+
+
+
+ groovy
+
+
+ true
+ prev.setIgnore()
+
+
+
+
+
+ ${__groovy(vars.get("legacy_login_form") == 'true')}
+ false
+ true
+
+
+
+ Detected the start of a redirect chain
+ UTF-8
+ ${application.postfix}/userlogin.action
+ POST
+ true
+ false
+
+
+
+ false
+ os_username
+ ${app_specific_username}
+ =
+ true
+
+
+ false
+ os_password
+ ${app_specific_password}
+ =
+ true
+
+
+ true
+ os_destination
+ /allPlans.action
+ =
+ true
+
+
+ false
+ atl_token
+
+ =
+ true
+
+
+ false
+ save
+ Log In
+ =
+ true
+
+
+
+
+
+
+
+
+ Accept-Language
+ en-US,en;q=0.5
+
+
+ Pragma
+ no-cache
+
+
+ Accept
+ text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8
+
+
+ Upgrade-Insecure-Requests
+ 1
+
+
+ Content-Type
+ application/x-www-form-urlencoded
+
+
+ Cache-Control
+ no-cache
+
+
+ Accept-Encoding
+ gzip, deflate
+
+
+
+
+
+ groovy
+
+
+ true
+ vars.put("run_as_specific_user", "true")
+
+
+
+ groovy
+
+
+ true
+ prev.setIgnore()
+
+
+
${application.postfix}/profile/userProfile.action
@@ -1315,7 +1486,8 @@ if (actual_username != app_specific_username) {
} else {
prev.setIgnore()
}
-
+
+log.info("Actual username: ${actual_username}")
@@ -1411,95 +1583,164 @@ if (actual_username != app_specific_username) {
true
-
- Detected the start of a redirect chain
- UTF-8
- ${application.postfix}/userlogin.action
- POST
- true
- false
-
-
-
- false
- os_username
- ${username}
- =
- true
-
-
- false
- os_password
- ${password}
- =
- true
-
-
- true
- os_destination
- /allPlans.action
- =
- true
-
-
- false
- atl_token
-
- =
- true
-
-
- false
- save
- Log In
- =
- true
-
-
-
-
+
+ ${__groovy(vars.get("legacy_login_form") == 'false')}
+ false
+ true
+
-
-
-
- Accept-Language
- en-US,en;q=0.5
-
-
- Pragma
- no-cache
-
-
- Accept
- text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8
-
-
- Upgrade-Insecure-Requests
- 1
-
-
- Content-Type
- application/x-www-form-urlencoded
-
-
- Cache-Control
- no-cache
-
-
- Accept-Encoding
- gzip, deflate
-
-
-
-
-
- groovy
-
-
- true
- prev.setIgnore()
-
-
+
+ 2sv login flow
+ ${application.postfix}/rest/tsv/1.0/authenticate
+ POST
+ true
+ true
+
+
+
+ false
+ {"username": "${username}",
+ "password": "${password}",
+ "rememberMe": "True",
+ "targetUrl": ""
+}
+ =
+
+
+
+
+
+
+
+
+ Content-Type
+ application/json
+
+
+
+
+
+
+ 200
+
+
+ Assertion.response_code
+ false
+ 2
+
+
+
+ groovy
+
+
+ true
+ prev.setIgnore()
+
+
+
+
+
+ ${__groovy(vars.get("legacy_login_form") == 'true')}
+ false
+ true
+
+
+
+ User login action
+ UTF-8
+ ${application.postfix}/userlogin.action
+ POST
+ true
+ false
+
+
+
+ false
+ os_username
+ ${username}
+ =
+ true
+
+
+ false
+ os_password
+ ${password}
+ =
+ true
+
+
+ true
+ os_destination
+ /allPlans.action
+ =
+ true
+
+
+ false
+ atl_token
+
+ =
+ true
+
+
+ false
+ Log in
+ =
+ true
+ save
+
+
+
+
+
+
+
+
+ Accept-Language
+ en-US,en;q=0.5
+
+
+ Accept
+ text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8
+
+
+ Upgrade-Insecure-Requests
+ 1
+
+
+ Content-Type
+ application/x-www-form-urlencoded
+
+
+ Cache-Control
+ no-cache
+
+
+ Accept-Encoding
+ gzip, deflate
+
+
+
+
+
+
+ 302
+
+
+ Assertion.response_code
+ false
+ 1
+
+
+
+ groovy
+
+
+ true
+ prev.setIgnore()
+
+
+
${application.postfix}/profile/userProfile.action
diff --git a/app/jmeter/confluence.jmx b/app/jmeter/confluence.jmx
index 14a846e4c..3b22e2aca 100644
--- a/app/jmeter/confluence.jmx
+++ b/app/jmeter/confluence.jmx
@@ -67,7 +67,7 @@
-
+
1
0
3600
@@ -253,13 +253,13 @@ if ("NOT_FOUND".equals(loginform)) {
-
+
${__groovy(vars.get("legacy_login_form") == 'false')}
false
true
-
+
2sv login flow
${application.postfix}/rest/tsv/1.0/authenticate
POST
@@ -301,13 +301,13 @@ if ("NOT_FOUND".equals(loginform)) {
-
+
${__groovy(vars.get("legacy_login_form") == 'true')}
false
true
-
+
Detected the start of a redirect chain
${application.postfix}/dologin.action
POST
@@ -354,7 +354,7 @@ if ("NOT_FOUND".equals(loginform)) {
-
+
Accept-Language
@@ -379,7 +379,7 @@ if ("NOT_FOUND".equals(loginform)) {
-
+
302
@@ -5367,7 +5367,7 @@ if ( sleep_time > 0 ) {
-
+
app_specific_username
@@ -5382,90 +5382,177 @@ if ( sleep_time > 0 ) {
-
- Detected the start of a redirect chain
- ${application.postfix}/dologin.action
- POST
- true
- false
-
-
-
- false
- os_username
- ${app_specific_username}
- =
- true
-
-
- false
- os_password
- ${app_specific_password}
- =
- true
-
-
- false
- os_cookie
- true
- =
- true
-
-
- false
- login
- Log+in
- =
- true
-
-
- false
- os_destination
- %2Findex.action
- =
- true
-
-
-
-
+
+ ${__groovy(vars.get("legacy_login_form") == 'false')}
+ false
+ true
+
-
-
-
- Accept-Language
- en-US,en;q=0.5
-
-
- Upgrade-Insecure-Requests
- 1
-
-
- Content-Type
- application/x-www-form-urlencoded
-
-
- Accept-Encoding
- gzip, deflate
-
-
- Accept
- text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8
-
-
-
-
-
- groovy
-
-
- true
- vars.put("run_as_specific_user", "true")
-prev.setIgnore()
-
-
-
+
+ 2sv login flow
+ ${application.postfix}/rest/tsv/1.0/authenticate
+ POST
+ true
+ true
+
+
+
+ false
+ {"username": "${app_specific_username}",
+ "password": "${app_specific_password}",
+ "rememberMe": "True",
+ "targetUrl": ""
+}
+ =
+
+
+
+
+
+
+
+
+ Content-Type
+ application/json
+
+
+
+
+
+
+ 200
+
+
+ Assertion.response_code
+ false
+ 2
+
+
+
+ groovy
+
+
+ true
+ vars.put("run_as_specific_user", "true")
+
+
+
+ groovy
+
+
+ true
+ prev.setIgnore()
+
+
+
-
+
+ ${__groovy(vars.get("legacy_login_form") == 'true')}
+ false
+ true
+
+
+
+ Detected the start of a redirect chain
+ ${application.postfix}/dologin.action
+ POST
+ true
+ false
+
+
+
+ false
+ os_username
+ ${app_specific_username}
+ =
+ true
+
+
+ false
+ os_password
+ ${app_specific_password}
+ =
+ true
+
+
+ false
+ os_cookie
+ true
+ =
+ true
+
+
+ false
+ login
+ Log+in
+ =
+ true
+
+
+ false
+ os_destination
+ %2Findex.action
+ =
+ true
+
+
+
+
+
+
+
+
+ Accept-Language
+ en-US,en;q=0.5
+
+
+ Upgrade-Insecure-Requests
+ 1
+
+
+ Content-Type
+ application/x-www-form-urlencoded
+
+
+ Accept-Encoding
+ gzip, deflate
+
+
+ Accept
+ text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8
+
+
+
+
+
+
+ 302
+
+
+ Assertion.response_code
+ false
+ 2
+
+
+
+ groovy
+
+
+ true
+ vars.put("run_as_specific_user", "true")
+
+
+
+ groovy
+
+
+ true
+ prev.setIgnore()
+
+
+
+
+
${application.postfix}/rest/api/user/current
true
GET
@@ -5476,7 +5563,7 @@ prev.setIgnore()
-
+
groovy
@@ -5500,7 +5587,7 @@ if (response_data.username != vars.get("app_specific_username")) {
-
+
true
false
@@ -5548,7 +5635,7 @@ if (response_data.username != vars.get("app_specific_username")) {
-
+
${application.postfix}/app/post_endpoint
true
GET
@@ -5586,94 +5673,183 @@ if (response_data.username != vars.get("app_specific_username")) {
-
+
${run_as_specific_user}
false
true
-
- Detected the start of a redirect chain
- ${application.postfix}/dologin.action
- POST
- true
- false
-
-
-
- false
- os_username
- ${username}
- =
- true
-
-
- false
- os_password
- ${password}
- =
- true
-
-
- false
- os_cookie
- true
- =
- true
-
-
- false
- login
- Log+in
- =
- true
-
-
- false
- os_destination
- %2Findex.action
- =
- true
-
-
-
-
+
+ ${__groovy(vars.get("legacy_login_form") == 'false')}
+ false
+ true
+
-
-
-
- Accept-Language
- en-US,en;q=0.5
-
-
- Upgrade-Insecure-Requests
- 1
-
-
- Content-Type
- application/x-www-form-urlencoded
-
-
- Accept-Encoding
- gzip, deflate
-
-
- Accept
- text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8
-
-
-
-
-
- groovy
-
-
- true
- prev.setIgnore()
-
-
+
+ 2sv login flow
+ ${application.postfix}/rest/tsv/1.0/authenticate
+ POST
+ true
+ true
+
+
+
+ false
+ {"username": "${username}",
+ "password": "${password}",
+ "rememberMe": "True",
+ "targetUrl": ""
+}
+ =
+
+
+
+
+
+
+
+
+ Content-Type
+ application/json
+
+
+
+
+
+
+ 200
+
+
+ Assertion.response_code
+ false
+ 2
+
+
+
+ groovy
+
+
+ true
+ vars.put("run_as_specific_user", "true")
+
+
+
+ groovy
+
+
+ true
+ prev.setIgnore()
+
+
+
-
+
+ ${__groovy(vars.get("legacy_login_form") == 'true')}
+ false
+ true
+
+
+
+ Detected the start of a redirect chain
+ ${application.postfix}/dologin.action
+ POST
+ true
+ false
+
+
+
+ false
+ os_username
+ ${username}
+ =
+ true
+
+
+ false
+ os_password
+ ${password}
+ =
+ true
+
+
+ false
+ os_cookie
+ true
+ =
+ true
+
+
+ false
+ login
+ Log+in
+ =
+ true
+
+
+ false
+ os_destination
+ %2Findex.action
+ =
+ true
+
+
+
+
+
+
+
+
+ Accept-Language
+ en-US,en;q=0.5
+
+
+ Upgrade-Insecure-Requests
+ 1
+
+
+ Content-Type
+ application/x-www-form-urlencoded
+
+
+ Accept-Encoding
+ gzip, deflate
+
+
+ Accept
+ text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8
+
+
+
+
+
+
+ 302
+
+
+ Assertion.response_code
+ false
+ 2
+
+
+
+ groovy
+
+
+ true
+ vars.put("run_as_specific_user", "true")
+
+
+
+ groovy
+
+
+ true
+ prev.setIgnore()
+
+
+
+
+
${application.postfix}/rest/api/user/current
true
GET
@@ -5684,7 +5860,7 @@ if (response_data.username != vars.get("app_specific_username")) {
-
+
groovy
@@ -5706,7 +5882,7 @@ if (response_data.username != vars.get("username")) {
-
+
1
0
0
diff --git a/app/jmeter/jira.jmx b/app/jmeter/jira.jmx
index 8eccc8c0b..8a783bfb6 100644
--- a/app/jmeter/jira.jmx
+++ b/app/jmeter/jira.jmx
@@ -2005,7 +2005,7 @@ if ( sleep_time > 0 ) {
false
-
+
${application.postfix}/browse/${issue_key}
true
GET
@@ -2045,10 +2045,10 @@ if ( sleep_time > 0 ) {
-
+
false
x_issue_id
- id="key-val" rel="(.+?)">
+ id="key-val" rel="(\d+)"
$1$
NOT FOUND
1
diff --git a/app/jmeter/jsm_customers.jmx b/app/jmeter/jsm_customers.jmx
index 01df03265..753a1493d 100644
--- a/app/jmeter/jsm_customers.jmx
+++ b/app/jmeter/jsm_customers.jmx
@@ -275,7 +275,7 @@ import org.apache.commons.io.FileUtils;
-
+
Accept
@@ -289,6 +289,10 @@ import org.apache.commons.io.FileUtils;
Content-Type
application/x-www-form-urlencoded
+
+ X-Atlassian-Token
+ no-check
+
diff --git a/app/jsm.yml b/app/jsm.yml
index e36900c1a..9eb6eba43 100644
--- a/app/jsm.yml
+++ b/app/jsm.yml
@@ -68,7 +68,7 @@ services:
- python util/post_run/cleanup_results_dir.py
- module: pip-install
packages:
- - selenium==4.24.0
+ - selenium==4.25.0
execution:
- scenario: ${load_executor}_agents
executor: ${load_executor}
@@ -167,7 +167,7 @@ modules:
httpsampler.ignore_failed_embedded_resources: "true"
selenium:
chromedriver:
- version: "128.0.6613.137" # Supports Chrome version 128. You can refer to https://googlechromelabs.github.io/chrome-for-testing
+ version: "130.0.6723.91" # Supports Chrome version 130. You can refer to https://googlechromelabs.github.io/chrome-for-testing
reporting:
- data-source: sample-labels
module: junit-xml
diff --git a/app/locustio/bamboo/http_actions.py b/app/locustio/bamboo/http_actions.py
index a0e99aeeb..dda7776fb 100644
--- a/app/locustio/bamboo/http_actions.py
+++ b/app/locustio/bamboo/http_actions.py
@@ -103,20 +103,50 @@ def locust_bamboo_login(locust):
username = user[0]
password = user[1]
- login_body = params.login_body
- login_body['os_username'] = username
- login_body['os_password'] = password
-
- # login
- r = locust.post('/userlogin.action',
- login_body,
- TEXT_HEADERS,
- catch_response=True)
-
+ # 10 get userlogin.action
+ r = locust.get('/userlogin.action', catch_response=True)
+ content = r.content.decode('utf-8')
+ is_legacy_login_form = 'loginForm' in content
+ print(f"Is legacy login form: {is_legacy_login_form}")
+ logger.locust_info(f"Is legacy login form: {is_legacy_login_form}")
+
+ if is_legacy_login_form:
+ logger.locust_info(f"Legacy login flow for user {username}")
+ login_body = params.login_body
+ login_body['os_username'] = username
+ login_body['os_password'] = password
+
+ # login
+ locust.post('/userlogin.action',
+ login_body,
+ TEXT_HEADERS,
+ catch_response=True)
+
+ else:
+ logger.locust_info(f"2SV login flow for user {username}")
+
+ login_body = {'username': username,
+ 'password': password,
+ 'rememberMe': 'True',
+ 'targetUrl': ''
+ }
+
+ headers = {
+ "Content-Type": "application/json"
+ }
+
+ # 15 /rest/tsv/1.0/authenticate
+ locust.post('/rest/tsv/1.0/authenticate',
+ json=login_body,
+ headers=headers,
+ catch_response=True)
+
+ r = locust.get(url='/', catch_response=True)
content = r.content.decode('utf-8')
if 'Log Out' not in content:
logger.error(f'Login with {username}, {password} failed: {content}')
+ print(f'Login with {username}, {password} failed: {content}')
assert 'Log Out' in content, 'User authentication failed.'
logger.locust_info(f'User {username} is successfully logged in')
diff --git a/app/locustio/common_utils.py b/app/locustio/common_utils.py
index bcae3a23b..1c9149ff8 100644
--- a/app/locustio/common_utils.py
+++ b/app/locustio/common_utils.py
@@ -59,6 +59,13 @@
"Accept-Encoding": "gzip, deflate",
"Accept": "application/json, text/javascript, */*; q=0.01"
}
+JSM_CUSTOMERS_HEADERS = {
+ 'Accept-Language': 'en-US,en;q=0.5',
+ 'Content-Type': 'application/x-www-form-urlencoded',
+ 'Accept-Encoding': 'gzip, deflate',
+ 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',
+ "X-Atlassian-Token": "no-check"
+}
JIRA_API_URL = '/'
CONFLUENCE_API_URL = '/'
@@ -122,7 +129,6 @@ class MyBaseTaskSet(TaskSet):
login_failed = False
def failure_check(self, response, action_name):
- print(dir(response))
if (hasattr(response, 'error') and response.error) or not response:
if 'login' in action_name:
self.login_failed = True
diff --git a/app/locustio/jira/requests_params.py b/app/locustio/jira/requests_params.py
index b1c20a6b8..2bdad38c2 100644
--- a/app/locustio/jira/requests_params.py
+++ b/app/locustio/jira/requests_params.py
@@ -42,7 +42,7 @@ class Login(JiraResource):
class BrowseIssue(JiraResource):
action_name = "view_issue"
- issue_id_pattern = r'id="key-val" rel="(.+?)">'
+ issue_id_pattern = r'id="key-val" rel="(.+?)"'
project_avatar_id_pattern = r'projectavatar\?avatarId\=(.+?)" '
edit_allow_pattern = "secure\/EditLabels\!default" # noqa W605
browse_project_payload = {"id": "com.atlassian.jira.jira-projects-issue-navigator:sidebar-issue-navigator"}
diff --git a/app/locustio/jsm/customers/customers_http_actions.py b/app/locustio/jsm/customers/customers_http_actions.py
index 3e249e492..a378cd05d 100644
--- a/app/locustio/jsm/customers/customers_http_actions.py
+++ b/app/locustio/jsm/customers/customers_http_actions.py
@@ -4,7 +4,7 @@
import random
from locustio.common_utils import init_logger, jsm_customer_measure, TEXT_HEADERS, RESOURCE_HEADERS, \
- generate_random_string
+ generate_random_string, NO_TOKEN_HEADERS, JSM_CUSTOMERS_HEADERS
from locustio.jsm.customers.customers_requests_params import Login, ViewPortal, ViewRequests, ViewRequest, \
AddComment, ShareRequest, ShareRequestOrg, CreateRequest, jsm_customer_datasets
@@ -50,7 +50,7 @@ def customer_login_and_view_portals(locust):
r = locust.post(
'/servicedesk/customer/user/login',
body,
- TEXT_HEADERS,
+ headers=JSM_CUSTOMERS_HEADERS,
catch_response=True)
locust.get('/servicedesk/customer/portals', catch_response=True)
diff --git a/app/selenium_ui/bamboo/modules.py b/app/selenium_ui/bamboo/modules.py
index a07a8d0be..539c45cda 100644
--- a/app/selenium_ui/bamboo/modules.py
+++ b/app/selenium_ui/bamboo/modules.py
@@ -25,6 +25,7 @@ def measure():
@print_timing("selenium_login:open_login_page")
def sub_measure():
login_page.go_to()
+ login_page.wait_for_page_loaded()
sub_measure()
login_page.set_credentials(username=datasets['username'], password=datasets['password'])
diff --git a/app/selenium_ui/bamboo/pages/pages.py b/app/selenium_ui/bamboo/pages/pages.py
index d28d38503..979e7d1ed 100644
--- a/app/selenium_ui/bamboo/pages/pages.py
+++ b/app/selenium_ui/bamboo/pages/pages.py
@@ -8,13 +8,32 @@
class Login(BasePage):
page_url = LoginPageLocators.login_page_url
+ def __init__(self, driver):
+ super().__init__(driver)
+ self.is_2sv_login = False
+
+ def wait_for_page_loaded(self):
+ self.wait_until_visible(LoginPageLocators.login_page_content)
+ if not self.get_elements(LoginPageLocators.login_button):
+ self.is_2sv_login = True
+ self.wait_until_visible(LoginPageLocators.login_button_2sv)
+ print("INFO: 2sv login form")
+
def click_login_button(self):
- self.wait_until_visible(LoginPageLocators.login_submit_button).click()
- self.wait_until_invisible(LoginPageLocators.login_submit_button)
+ if self.is_2sv_login:
+ self.wait_until_visible(LoginPageLocators.login_button_2sv).click()
+ self.wait_until_invisible(LoginPageLocators.login_button_2sv)
+ else:
+ self.wait_until_visible(LoginPageLocators.login_button).click()
+ self.wait_until_invisible(LoginPageLocators.login_button)
def set_credentials(self, username, password):
- self.get_element(LoginPageLocators.login_username_field).send_keys(username)
- self.get_element(LoginPageLocators.login_password_field).send_keys(password)
+ if self.is_2sv_login:
+ self.get_element(LoginPageLocators.login_username_field_2sv).send_keys(username)
+ self.get_element(LoginPageLocators.login_password_field_2sv).send_keys(password)
+ else:
+ self.get_element(LoginPageLocators.login_username_field).send_keys(username)
+ self.get_element(LoginPageLocators.login_password_field).send_keys(password)
class ProjectList(BasePage):
diff --git a/app/selenium_ui/bamboo/pages/selectors.py b/app/selenium_ui/bamboo/pages/selectors.py
index 57c66cb28..efbd715e5 100644
--- a/app/selenium_ui/bamboo/pages/selectors.py
+++ b/app/selenium_ui/bamboo/pages/selectors.py
@@ -34,9 +34,18 @@ def logout_url(self):
class LoginPageLocators:
login_page_url = UrlManager().login_url()
+
+ login_page_content = (By.ID, "content")
+
+ # legacy login form
login_username_field = (By.ID, "loginForm_os_username")
login_password_field = (By.ID, "loginForm_os_password")
- login_submit_button = (By.ID, "loginForm_save")
+ login_button = (By.ID, "loginForm_save")
+
+ # 2sv login form
+ login_button_2sv = (By.ID, "login-button")
+ login_username_field_2sv = (By.ID, "username-field")
+ login_password_field_2sv = (By.ID, "password-field")
class AllProjectsLocators:
diff --git a/app/selenium_ui/base_page.py b/app/selenium_ui/base_page.py
index 9878e6d3a..93c94bbea 100644
--- a/app/selenium_ui/base_page.py
+++ b/app/selenium_ui/base_page.py
@@ -159,6 +159,10 @@ def app_version(self):
def generate_random_string(length):
return "".join([random.choice(string.digits + string.ascii_letters + ' ') for _ in range(length)])
+ @staticmethod
+ def generate_no_whitespace_string(length):
+ return "".join([random.choice(string.digits + string.ascii_letters) for _ in range(length)])
+
def select(self, element):
return Select(element)
diff --git a/app/selenium_ui/confluence/modules.py b/app/selenium_ui/confluence/modules.py
index b54266801..fc699e313 100644
--- a/app/selenium_ui/confluence/modules.py
+++ b/app/selenium_ui/confluence/modules.py
@@ -310,7 +310,8 @@ def cql_search(webdriver, datasets):
@print_timing("selenium_cql_search")
def measure():
page.get_element(PageLocators.search_box).send_keys(random_cql)
- page.wait_until_any_ec_presented((PageLocators.empty_search_results, PageLocators.search_results))
+ page.wait_until_any_ec_presented((PageLocators.empty_search_results, PageLocators.search_results),
+ timeout=30)
page.get_element(PageLocators.close_search_button).click()
measure()
diff --git a/app/selenium_ui/jsm/pages/agent_pages.py b/app/selenium_ui/jsm/pages/agent_pages.py
index cc6b94b0c..888db905f 100644
--- a/app/selenium_ui/jsm/pages/agent_pages.py
+++ b/app/selenium_ui/jsm/pages/agent_pages.py
@@ -113,7 +113,7 @@ def check_comment_text_is_displayed(self, text, rte_status=None):
def add_request_comment(self, rte_status):
comment_text = f"Add comment from selenium - {self.generate_random_string(30)}"
- self.wait_until_visible(ViewCustomerRequestLocators.customers_sidebar_selector)
+ self.wait_until_visible(ViewCustomerRequestLocators.comment_area)
textarea = self.get_element(ViewCustomerRequestLocators.comment_collapsed_textarea)
self.driver.execute_script("arguments[0].scrollIntoView(true);", textarea)
textarea.click()
@@ -220,7 +220,7 @@ def wait_for_page_loaded(self):
self.wait_until_visible(InsightNewSchemaLocators.create_object_schemas)
def create_new_schema(self):
- new_schema_name = self.generate_random_string(4).strip()
+ new_schema_name = self.generate_no_whitespace_string(4).strip()
self.wait_until_clickable(InsightNewSchemaLocators.create_object_schemas).click()
self.wait_until_visible(InsightNewSchemaLocators.new_object_schema)
self.wait_until_clickable(InsightNewSchemaLocators.new_object_schema).click()
@@ -254,8 +254,13 @@ def insight_create_new_objects(self):
self.wait_until_clickable(InsightNewObjectLocators.create_object_button).click()
self.wait_until_visible(InsightNewObjectLocators.object_name_field)
self.get_element(InsightNewObjectLocators.object_name_field).send_keys(self.generate_random_string(10))
- self.wait_until_visible(InsightNewObjectLocators.create_button)
- self.wait_until_clickable(InsightNewObjectLocators.create_button).click()
+ self.wait_until_visible(InsightNewObjectLocators.create_another)
+ if not self.get_elements(InsightNewObjectLocators.create_button):
+ self.wait_until_visible(InsightNewObjectLocators.create_button_jsm10)
+ self.wait_until_clickable(InsightNewObjectLocators.create_button_jsm10).click()
+ else:
+ self.wait_until_visible(InsightNewObjectLocators.create_button)
+ self.wait_until_clickable(InsightNewObjectLocators.create_button).click()
self.wait_until_invisible(InsightNewObjectLocators.pop_up_after_create_object)
@@ -280,9 +285,14 @@ def delete_new_schema(self, schema_name):
self.wait_until_clickable(InsightDeleteSchemaLocators.
new_object_schema_delete_button_locator(schema_name)).click()
self.wait_until_visible(InsightDeleteSchemaLocators.delete_window_selector)
- self.wait_until_clickable(InsightDeleteSchemaLocators.submit_delete_button).click()
- self.wait_until_clickable(InsightDeleteSchemaLocators.submit_delete_button).click()
- self.wait_until_invisible(InsightDeleteSchemaLocators.submit_delete_button)
+ if not self.get_elements(InsightNewObjectLocators.create_button):
+ self.wait_until_clickable(InsightDeleteSchemaLocators.submit_delete_button_jsm10).click()
+ self.wait_until_clickable(InsightDeleteSchemaLocators.submit_delete_button_jsm10).click()
+ self.wait_until_invisible(InsightDeleteSchemaLocators.submit_delete_button_jsm10)
+ else:
+ self.wait_until_clickable(InsightDeleteSchemaLocators.submit_delete_button).click()
+ self.wait_until_clickable(InsightDeleteSchemaLocators.submit_delete_button).click()
+ self.wait_until_invisible(InsightDeleteSchemaLocators.submit_delete_button)
class InsightViewQueue(BasePage):
@@ -296,7 +306,11 @@ def wait_for_page_loaded(self):
self.wait_until_visible(InsightViewQueueLocators.view_queue_page)
def view_random_queue_with_insight(self):
- self.wait_until_visible(InsightViewQueueLocators.view_queue_insight_column)
+ self.wait_until_visible(InsightViewQueueLocators.table_container)
+ if not self.get_elements(InsightViewQueueLocators.navigation):
+ self.wait_until_visible(InsightViewQueueLocators.view_queue_insight_column)
+ else:
+ self.wait_until_visible(InsightViewQueueLocators.view_queue_insight_column_jsm10)
class InsightSearchByIql(BasePage):
@@ -328,4 +342,5 @@ def wait_for_page_loaded(self):
self.wait_until_visible(InsightViewIssue.issue_title)
def view_issue_with_insight_custom_field(self):
- self.wait_until_visible(InsightViewIssue.custom_field_insight)
+ if self.get_elements(InsightViewQueueLocators.view_queue_page):
+ self.wait_until_visible(InsightViewIssue.custom_field_insight)
diff --git a/app/selenium_ui/jsm/pages/agent_selectors.py b/app/selenium_ui/jsm/pages/agent_selectors.py
index b0729f6c2..6c0af3819 100644
--- a/app/selenium_ui/jsm/pages/agent_selectors.py
+++ b/app/selenium_ui/jsm/pages/agent_selectors.py
@@ -110,6 +110,7 @@ class BrowseCustomersLocators:
class ViewCustomerRequestLocators:
bread_crumbs = (By.CSS_SELECTOR, ".aui-nav.aui-nav-breadcrumbs")
+ comment_area = (By.CLASS_NAME, 'sd-comment-collapse')
comment_collapsed_textarea = (By.ID, "sd-comment-collapsed-textarea")
comment_text_field_RTE = (By.XPATH, "//div[textarea[@id='comment']]//iframe")
comment_text_field = (By.XPATH, "//textarea[@id='comment']")
@@ -145,6 +146,7 @@ def get_new_object_schema_name_locator(name):
class InsightDeleteSchemaLocators:
delete_window_selector = (By.CSS_SELECTOR, "#rlabs-insight-dialog > div")
submit_delete_button = (By.CSS_SELECTOR, "#rlabs-insight-dialog > div > div.dialog-button-panel > button")
+ submit_delete_button_jsm10 = (By.ID, "submit-button")
schema_list = (By.ID, "rlabs-manage-main")
@staticmethod
@@ -161,11 +163,16 @@ class InsightNewObjectLocators:
object_name_field = (By.CSS_SELECTOR, "input[id^=rlabs-insight-attribute-]")
create_button = (By.XPATH, "//body/div[@id='rlabs-insight-dialog']/div[1]/div[2]/button[1]")
pop_up_after_create_object = (By.XPATH, "//body/div[@id='aui-flag-container']/div[1]/div[1]")
+ create_another = (By.ID, "rlabs-create-another-label")
+ create_button_jsm10 = (By.ID, "submit-button")
class InsightViewQueueLocators:
view_queue_page = (By.XPATH, "//section[@id='sd-page-panel']")
view_queue_insight_column = (By.XPATH, "//span[contains(text(),'Insight')]")
+ table_container = (By.XPATH, "//div[@class='queue-react-table-container']")
+ navigation = (By.XPATH, "//div[@role='navigation']")
+ view_queue_insight_column_jsm10 = (By.XPATH, "//a[normalize-space()='Insight']")
class InsightSearchObjectIql:
diff --git a/app/util/api/bamboo_clients.py b/app/util/api/bamboo_clients.py
index a098455a6..7d796950f 100644
--- a/app/util/api/bamboo_clients.py
+++ b/app/util/api/bamboo_clients.py
@@ -158,11 +158,65 @@ def get_server_info(self):
r = self.get(f'{self.host}/rest/applinks/1.0/manifest', error_msg="Could not get Bamboo server info")
return r.json()
+ def get_system_page(self):
+ login_url = f'{self.host}/userlogin.action'
+ auth_url = f'{self.host}/admin/webSudoSubmit.action'
+ tsv_auth_url = f'{self.host}/rest/tsv/1.0/authenticate'
+
+ legacy_login_body = {
+ 'os_username': self.user,
+ 'os_password': self.password,
+ 'os_destination': '/admin/systemInfo.action',
+ 'atl_token': '',
+ 'save': 'Log in'
+ }
+
+ tsv_login_body = {
+ 'username': self.user,
+ 'password': self.password,
+ 'rememberMe': True,
+ 'targetUrl': ''
+ }
+
+ auth_body = {
+ 'web_sudo_destination': '/admin/systemInfo.action',
+ 'save': 'Submit',
+ 'password': self.password
+ }
+
+ login_page_response = self.session.get(login_url)
+ if login_page_response.status_code == 200:
+ login_page_content = login_page_response.text
+ is_legacy_login_form = 'loginForm' in login_page_content
+ else:
+ raise Exception(f"Failed to fetch login page. Status code: {login_page_response.status_code}")
+
+ self.headers['X-Atlassian-Token'] = 'no-check'
+ if is_legacy_login_form:
+ r = self.session.post(url=login_url, params=legacy_login_body, headers=self.headers)
+ content = r.content.decode("utf-8")
+ # Bamboo version 9 does not have web sudo auth
+ if "System information" in content:
+ print("INFO: No web sudo auth")
+ return content
+ elif "Administrator Access" in content:
+ print("INFO: Web sudo page detected")
+ else:
+ print(f"Warning: Unexpected login page: Content {content}")
+ else:
+ self.session.post(url=tsv_auth_url, json=tsv_login_body)
+
+ system_info_html = self.session.post(url=auth_url, params=auth_body, headers=self.headers)
+ content = system_info_html.content.decode("utf-8")
+ if "System information" not in content:
+ print(f"Warning: failed to get System information page: Content {content}")
+ return content
+
def get_available_processors(self):
try:
processors = None
- page = self.get(f'{self.host}/admin/systemInfo.action', 'Could not get Page content')
- tree = html.fromstring(page.content)
+ page = self.get_system_page()
+ tree = html.fromstring(page)
try:
processors = tree.xpath('//*[@id="systemInfo_availableProcessors"]/text()')[0]
except Exception as e:
@@ -177,7 +231,7 @@ def get_nodes_count(self):
return len(r.json()["nodeStatuses"])
def get_deployment_type(self):
- bamboo_system_info_html = self._session.get(f'{self.host}/admin/systemInfo.action').content.decode("utf-8")
+ bamboo_system_info_html = self.get_system_page()
html_pattern = 'com.atlassian.dcapt.deployment=terraform'
if bamboo_system_info_html.count(html_pattern):
return 'terraform'
diff --git a/app/util/api/jira_clients.py b/app/util/api/jira_clients.py
index 99f8eef97..81d16da73 100644
--- a/app/util/api/jira_clients.py
+++ b/app/util/api/jira_clients.py
@@ -1,10 +1,11 @@
+import json
+import string
from selenium.common.exceptions import WebDriverException
from util.api.abstract_clients import RestClient, JSM_EXPERIMENTAL_HEADERS
from selenium_ui.conftest import retry
BATCH_SIZE_BOARDS = 1000
-BATCH_SIZE_USERS = 1000
BATCH_SIZE_ISSUES = 1000
@@ -48,38 +49,53 @@ def get_boards(self, start_at=0, max_results=100, board_type=None, name=None, pr
return boards_list
@retry()
- def get_users(self, username='.', start_at=0, max_results=1000, include_active=True, include_inactive=False):
+ def get_users(self, username='.', include_active=True, include_inactive=False):
"""
- Returns a list of users that match the search string. This resource cannot be accessed anonymously.
- :param username: A query string used to search username, name or e-mail address. "." - search for all users.
- :param start_at: the index of the first user to return (0-based).
- :param max_results: the maximum number of users to return (defaults to 50).
- The maximum allowed value is 1000.
- If you specify a value that is higher than this number, your search results will be truncated.
- :param include_active: If true, then active users are included in the results (default true)
- :param include_inactive: If true, then inactive users are included in the results (default false)
- :return: Returns the requested users
+ Starting from Jira 10 there is no way to get more than 100 users with get_users() API
+ startAt param will be deprecated in Jira 10.3+
"""
+ max_results = 100
- loop_count = max_results // BATCH_SIZE_USERS + 1
- last_loop_remainder = max_results % BATCH_SIZE_USERS
-
- users_list = list()
- max_results = BATCH_SIZE_USERS if max_results > BATCH_SIZE_USERS else max_results
-
- while loop_count > 0:
- api_url = f'{self.host}/rest/api/2/user/search?username={username}&startAt={start_at}' \
- f'&maxResults={max_results}&includeActive={include_active}&includeInactive={include_inactive}'
- response = self.get(api_url, "Could not retrieve users")
-
- users_list.extend(response.json())
- loop_count -= 1
- start_at += len(response.json())
- if loop_count == 1:
- max_results = last_loop_remainder
+ api_url = f'{self.host}/rest/api/2/user/search?username={username}' \
+ f'&maxResults={max_results}' \
+ f'&includeActive={include_active}' \
+ f'&includeInactive={include_inactive}'
+ response = self.get(api_url, "Could not retrieve users")
+ users_list = response.json()
return users_list
+ @retry()
+ def get_users_by_name_search(self, username, users_count, include_active=True, include_inactive=False):
+ """
+ Starting from Jira 10 there is no way to get more than 100 users with get_users() API
+ Getting more than 100 users by batch search.
+ """
+ print(f"INFO: Users search. Prefix: '{username}', users_count: {users_count}")
+ perf_users = list()
+
+ first_100 = self.get_users(username=username, include_active=True, include_inactive=False)
+ if users_count <= 100 or len(first_100) < 100:
+ perf_users = first_100[:users_count]
+ else:
+ name_start_list = list(string.digits + "_" + string.ascii_lowercase)
+ for i in name_start_list:
+ users_batch = self.get_users(username=username+i, include_active=True, include_inactive=False)
+ if len(users_batch) == 100:
+ print(f"Warning: found 100 users starts with: {username+i}. Checking if there are more.")
+ users_batch = self.get_users_by_name_search(username=username+i,
+ users_count=users_count-len(perf_users))
+ perf_users.extend(users_batch)
+
+ # get rid of any duplicates by creating a set from json objects
+ set_of_jsons = {json.dumps(d, sort_keys=True) for d in perf_users}
+ perf_users = [json.loads(t) for t in set_of_jsons]
+ print(f"INFO: Current found users count: {len(perf_users)}")
+
+ if len(perf_users) >= users_count:
+ perf_users = perf_users[:users_count]
+ break
+ return perf_users
@retry()
def issues_search(self, jql='order by key', start_at=0, max_results=1000, fields=None):
diff --git a/app/util/bamboo/bamboo_dataset_generator/pom.xml b/app/util/bamboo/bamboo_dataset_generator/pom.xml
index 1d44f8b3d..558a4b3a9 100644
--- a/app/util/bamboo/bamboo_dataset_generator/pom.xml
+++ b/app/util/bamboo/bamboo_dataset_generator/pom.xml
@@ -18,7 +18,7 @@
org.codehaus.mojo
exec-maven-plugin
- 3.4.1
+ 3.5.0
@@ -49,30 +49,30 @@
com.atlassian.buildeng
bamboo-plan-ownership-specs
- 2.1.7
+ 2.1.11
com.atlassian.buildeng
bamboo-pbc-specs
- 2.1.7
+ 2.1.11
com.google.guava
guava
- 33.3.0-jre
+ 33.3.1-jre
com.fasterxml.jackson.core
jackson-core
- 2.17.2
+ 2.18.1
com.fasterxml.jackson.core
jackson-databind
- 2.17.2
+ 2.18.1
com.jayway.jsonpath
@@ -97,17 +97,17 @@
org.apache.logging.log4j
log4j-api
- 2.24.0
+ 2.24.1
org.apache.logging.log4j
log4j-core
- 2.24.0
+ 2.24.1
org.apache.logging.log4j
log4j-slf4j-impl
- 2.24.0
+ 2.24.1
diff --git a/app/util/conf.py b/app/util/conf.py
index d00427552..8e367c966 100644
--- a/app/util/conf.py
+++ b/app/util/conf.py
@@ -2,8 +2,8 @@
from util.project_paths import JIRA_YML, CONFLUENCE_YML, BITBUCKET_YML, JSM_YML, CROWD_YML, BAMBOO_YML
-TOOLKIT_VERSION = '8.4.0'
-UNSUPPORTED_VERSION = '8.0.0'
+TOOLKIT_VERSION = '8.5.0'
+UNSUPPORTED_VERSION = '8.1.0'
def read_yml_file(file):
diff --git a/app/util/data_preparation/jira_prepare_data.py b/app/util/data_preparation/jira_prepare_data.py
index cc6ffeba6..77867e7c8 100644
--- a/app/util/data_preparation/jira_prepare_data.py
+++ b/app/util/data_preparation/jira_prepare_data.py
@@ -137,7 +137,8 @@ def __get_boards(jira_api, board_type):
def __get_users(jira_api):
- perf_users = jira_api.get_users(username=DEFAULT_USER_PREFIX, max_results=performance_users_count)
+ perf_users = jira_api.get_users_by_name_search(username=DEFAULT_USER_PREFIX, users_count=performance_users_count)
+
users = generate_perf_users(api=jira_api, cur_perf_user=perf_users)
if not users:
raise SystemExit(f"There are no users in Jira accessible by a random performance user: {jira_api.user}")
diff --git a/app/util/data_preparation/jsm_prepare_data.py b/app/util/data_preparation/jsm_prepare_data.py
index 8b1c41770..b79a77d35 100644
--- a/app/util/data_preparation/jsm_prepare_data.py
+++ b/app/util/data_preparation/jsm_prepare_data.py
@@ -17,7 +17,7 @@
MAX_WORKERS = None
ERROR_LIMIT = 10
-DEFAULT_AGENT_PREFIX = 'performance_agent_'
+DEFAULT_AGENT_PREFIX = 'performance_agent_0_'
DEFAULT_AGENT_APP_KEYS = ["jira-servicedesk"]
DEFAULT_CUSTOMER_PREFIX = 'performance_customer_'
DEFAULT_PASSWORD = 'password'
@@ -93,15 +93,16 @@ def __filter_customer_with_requests(customer, jsm_client):
def __get_customers_with_requests(jira_client, jsm_client, count):
customers_with_requests = []
customers_without_requests = []
- start_at = 0
- max_count_iteration = 1000
+ count_to_search = 0
+ limit_users_to_search = 3000
+ max_count_iteration = 500
customers_chunk_size = 150
- while len(customers_with_requests) < count:
- customers = jira_client.get_users(username=DEFAULT_CUSTOMER_PREFIX, max_results=max_count_iteration,
- start_at=start_at)
+ while len(customers_with_requests) < count and count_to_search < limit_users_to_search:
+ count_to_search += max_count_iteration
+ customers = jira_client.get_users_by_name_search(username=DEFAULT_CUSTOMER_PREFIX,
+ users_count=count_to_search)
if not customers:
break
- start_at = start_at + max_count_iteration
customer_chunks = [customers[x:x + customers_chunk_size]
for x in range(0, len(customers), customers_chunk_size)]
@@ -117,26 +118,34 @@ def __get_customers_with_requests(jira_client, jsm_client, count):
if customer_data['has_requests']:
if len(customers_with_requests) >= count:
break
- customers_with_requests.append(customer_data)
+ if customer_data['name'] not in [i['name'] for i in customers_with_requests]:
+ customers_with_requests.append(customer_data)
else:
customers_without_requests.append(customer_data)
- print(f'Retrieved customers with requests: {len(customers_with_requests)}')
+ customers_with_requests_count = len(customers_with_requests)
+ print(f'Retrieved customers with requests: {customers_with_requests_count}')
+
+ if customers_with_requests_count < count:
+ raise Exception(f"ERROR: Found {customers_with_requests_count} customers with open requests, "
+ f"but 'concurrency_customers' in jsm.yml is {count}. "
+ f"Create more customers with open requests or reduce 'concurrency_customers'.")
return customers_with_requests
@print_timing('Retrieving agents')
def __get_agents(jira_client):
- prefix_name, application_keys, count = DEFAULT_AGENT_PREFIX, DEFAULT_AGENT_APP_KEYS, performance_agents_count
- perf_users = jira_client.get_users(username=prefix_name, max_results=count)
- users_to_create = count - len(perf_users)
+ perf_users = jira_client.get_users_by_name_search(username=DEFAULT_AGENT_PREFIX,
+ users_count=performance_agents_count)
+
+ users_to_create = performance_agents_count - len(perf_users)
if users_to_create > 0:
- add_users = __generate_users(api=jira_client, num_to_create=users_to_create, prefix_name=prefix_name,
- application_keys=application_keys)
+ add_users = __generate_users(api=jira_client, num_to_create=users_to_create, prefix_name=DEFAULT_AGENT_PREFIX,
+ application_keys=DEFAULT_AGENT_APP_KEYS)
if not add_users:
raise Exception(f"ERROR: Jira Service Management could not create agent"
- f"There were {len(perf_users)}/{count} retrieved.")
+ f"There were {len(perf_users)}/{performance_agents_count} retrieved.")
perf_users.extend(add_users)
return perf_users
@@ -145,14 +154,14 @@ def __get_agents(jira_client):
def __get_customers(jira_client, jsm_client, servicedesks):
created_agents = []
errors_count = 0
- prefix_name, application_keys, count = DEFAULT_CUSTOMER_PREFIX, None, performance_customers_count
perf_users_with_requests = __get_customers_with_requests(jsm_client=jsm_client, jira_client=jira_client,
- count=count)
+ count=performance_customers_count)
+
while len(perf_users_with_requests) < performance_customers_count:
- username = f"{prefix_name}{__generate_random_string(10)}"
+ username = f"{DEFAULT_CUSTOMER_PREFIX}{__generate_random_string(10)}"
try:
agent = jira_client.create_user(name=username, password=DEFAULT_PASSWORD,
- application_keys=application_keys)
+ application_keys=None)
created_agents.append(agent)
request_types = __get_request_types(jsm_client, servicedesks)
if not request_types:
diff --git a/app/util/k8s/README.MD b/app/util/k8s/README.MD
index 27e90ada6..48506c694 100644
--- a/app/util/k8s/README.MD
+++ b/app/util/k8s/README.MD
@@ -14,6 +14,7 @@
- [Rebuild atlassian/dcapt docker image on the fly](#rebuild-atlassiandcapt-docker-image-on-the-fly)
- [Run tests locally from docker container](#run-tests-locally-from-docker-container)
- [Run tests from execution environment pod](#run-tests-from-execution-environment-pod)
+- [Retry to copy run results from the execution environment pod to local](#retry-to-copy-run-results-from-the-execution-environment-pod-to-local)
- [Debug AWS required policies](#debug-aws-required-policies)
# Development environment
@@ -31,7 +32,7 @@ docker run --pull=always --env-file aws_envs \
-v "/$PWD/dcapt-small.tfvars:/data-center-terraform/conf.tfvars" \
-v "/$PWD/dcapt-snapshots.json:/data-center-terraform/dcapt-snapshots.json" \
-v "/$PWD/logs:/data-center-terraform/logs" \
--it atlassianlabs/terraform:2.9.2 ./install.sh -c conf.tfvars
+-it atlassianlabs/terraform:2.9.3 ./install.sh -c conf.tfvars
```
### Terminate development environment
Note: install and uninstall commands have to use the same `atlassianlabs/terraform:TAG` image tag.
@@ -43,7 +44,7 @@ docker run --pull=always --env-file aws_envs \
-v "/$PWD/dcapt-small.tfvars:/data-center-terraform/conf.tfvars" \
-v "/$PWD/dcapt-snapshots.json:/data-center-terraform/dcapt-snapshots.json" \
-v "/$PWD/logs:/data-center-terraform/logs" \
--it atlassianlabs/terraform:2.9.2 ./uninstall.sh -c conf.tfvars
+-it atlassianlabs/terraform:2.9.3 ./uninstall.sh -c conf.tfvars
```
# Enterprise-scale environment
@@ -60,7 +61,7 @@ docker run --pull=always --env-file aws_envs \
-v "/$PWD/dcapt.tfvars:/data-center-terraform/conf.tfvars" \
-v "/$PWD/dcapt-snapshots.json:/data-center-terraform/dcapt-snapshots.json" \
-v "/$PWD/logs:/data-center-terraform/logs" \
--it atlassianlabs/terraform:2.9.2 ./install.sh -c conf.tfvars
+-it atlassianlabs/terraform:2.9.3 ./install.sh -c conf.tfvars
```
### Terminate enterprise-scale environment
Note: install and uninstall commands have to use the same `atlassianlabs/terraform:TAG` image tag.
@@ -72,7 +73,7 @@ docker run --pull=always --env-file aws_envs \
-v "/$PWD/dcapt.tfvars:/data-center-terraform/conf.tfvars" \
-v "/$PWD/dcapt-snapshots.json:/data-center-terraform/dcapt-snapshots.json" \
-v "/$PWD/logs:/data-center-terraform/logs" \
--it atlassianlabs/terraform:2.9.2 ./uninstall.sh -c conf.tfvars
+-it atlassianlabs/terraform:2.9.3 ./uninstall.sh -c conf.tfvars
```
# Collect detailed k8s logs
@@ -92,7 +93,7 @@ export REGION=us-east-2
docker run --pull=always --env-file aws_envs \
-v "/$PWD/k8s_logs:/data-center-terraform/k8s_logs" \
-v "/$PWD/logs:/data-center-terraform/logs" \
--it atlassianlabs/terraform:2.9.2 ./scripts/collect_k8s_logs.sh atlas-$ENVIRONMENT_NAME-cluster $REGION k8s_logs
+-it atlassianlabs/terraform:2.9.3 ./scripts/collect_k8s_logs.sh atlas-$ENVIRONMENT_NAME-cluster $REGION k8s_logs
```
# Force terminate cluster
@@ -125,7 +126,7 @@ atlassian/dcapt terminate_cluster.py --cluster_name atlas-$ENVIRONMENT_NAME-clus
docker run --pull=always --env-file aws_envs \
-e ENVIRONMENT_NAME=$ENVIRONMENT_NAME \
-e REGION=$REGION \
- -it atlassianlabs/terraform:2.9.2 bash
+ -it atlassianlabs/terraform:2.9.3 bash
```
5. Connect to the product pod. Example below for jira pod with number 0. For other product or pod number change `PRODUCT_POD` accordingly.
@@ -149,7 +150,7 @@ atlassian/dcapt terminate_cluster.py --cluster_name atlas-$ENVIRONMENT_NAME-clus
-e ENVIRONMENT_NAME=$ENVIRONMENT_NAME \
-e REGION=$REGION \
-v "/$PWD:/data-center-terraform/dc-app-performance-toolkit" \
- -it atlassianlabs/terraform:2.9.2 bash
+ -it atlassianlabs/terraform:2.9.3 bash
```
5. Copy code base and connect to the execution environment pod:
``` bash
@@ -177,7 +178,7 @@ atlassian/dcapt terminate_cluster.py --cluster_name atlas-$ENVIRONMENT_NAME-clus
-e REGION=$REGION \
-e PRODUCT=$PRODUCT \
-v "/$PWD/script-runner.yml:/data-center-terraform/script-runner.yml" \
- -it atlassianlabs/terraform:2.9.2 bash
+ -it atlassianlabs/terraform:2.9.3 bash
```
5. Run following commands one by one inside docker container:
``` bash
@@ -205,7 +206,7 @@ To enable detailed CPU/Memory monitoring and Grafana dashboards for visualisatio
docker run --pull=always --env-file aws_envs \
-e ENVIRONMENT_NAME=$ENVIRONMENT_NAME \
-e REGION=$REGION \
- -it atlassianlabs/terraform:2.9.2 bash
+ -it atlassianlabs/terraform:2.9.3 bash
```
``` bash
aws eks update-kubeconfig --name atlas-$ENVIRONMENT_NAME-cluster --region $REGION
@@ -241,7 +242,24 @@ Note: this option is **not** suitable for full-scale performance runs as local n
-e ENVIRONMENT_NAME=$ENVIRONMENT_NAME \
-v "/$PWD:/data-center-terraform/dc-app-performance-toolkit" \
-v "/$PWD/app/util/k8s/bzt_on_pod.sh:/data-center-terraform/bzt_on_pod.sh" \
- -it atlassianlabs/terraform:2.9.2 bash bzt_on_pod.sh jira.yml
+ -it atlassianlabs/terraform:2.9.3 bash bzt_on_pod.sh jira.yml
+ ```
+
+# Retry to copy run results from the execution environment pod to local
+1. Navigate to `dc-app-performance-toolkit` folder
+2. Set AWS credential in [aws_envs](./aws_envs) file
+3. Set environment name:
+ ``` bash
+ export ENVIRONMENT_NAME=your_environment_name
+ ```
+4. Run following command to copy results from execution environment pod to local:
+ ``` bash
+ docker run --pull=always --env-file ./app/util/k8s/aws_envs \
+ -e REGION=us-east-2 \
+ -e ENVIRONMENT_NAME=$ENVIRONMENT_NAME \
+ -v "/$PWD:/data-center-terraform/dc-app-performance-toolkit" \
+ -v "/$PWD/app/util/k8s/copy_run_results.sh:/data-center-terraform/copy_run_results.sh" \
+ -it atlassianlabs/terraform:2.9.3 bash copy_run_results.sh
```
# Debug AWS required policies
@@ -250,7 +268,7 @@ Note: this option is **not** suitable for full-scale performance runs as local n
3. Start and ssh to `atlassianlabs/terraform` docker container:
``` bash
docker run --pull=always --env-file aws_envs \
- -it atlassianlabs/terraform:2.9.2 bash
+ -it atlassianlabs/terraform:2.9.3 bash
```
4. Make sure you have IAM policies with names `policy1`, `policy2`, created from [policy1.json](https://github.com/atlassian-labs/data-center-terraform/blob/main/permissions/policy1.json) and [policy2.json](https://github.com/atlassian-labs/data-center-terraform/blob/main/permissions/policy2.json).
5. Run following commands one by one inside docker container to get effective policies permissions:
diff --git a/app/util/k8s/copy_run_results.sh b/app/util/k8s/copy_run_results.sh
new file mode 100755
index 000000000..367cce03e
--- /dev/null
+++ b/app/util/k8s/copy_run_results.sh
@@ -0,0 +1,37 @@
+#!/usr/bin/env bash
+
+DCAPT_DOCKER_IMAGE="atlassian/dcapt"
+echo "INFO: DCAPT docker image: $DCAPT_DOCKER_IMAGE"
+
+if [[ -z "$ENVIRONMENT_NAME" ]]; then
+ echo "ERROR: ENVIRONMENT_NAME variable is not set."
+ exit 1
+fi
+echo "INFO: Environment name: $ENVIRONMENT_NAME"
+
+if [[ -z "$REGION" ]]; then
+ echo "ERROR: REGION variable is not set."
+ exit 1
+fi
+echo "INFO: AWS REGION: $REGION"
+
+echo "INFO: Update kubeconfig"
+aws eks update-kubeconfig --name atlas-"$ENVIRONMENT_NAME"-cluster --region "$REGION"
+
+echo "INFO: Get execution environment pod name"
+exec_pod_name=$(kubectl get pods -n atlassian -l=exec=true --no-headers -o custom-columns=":metadata.name")
+
+if [[ -z "$exec_pod_name" ]]; then
+ echo "ERROR: Current cluster does not have execution environment pod. Check what environment type is used.
+ Development environment does not have execution environment pod by default because dedicated for local app-specific actions development only."
+ exit 1
+fi
+
+echo "INFO: Execution environment pod name: $exec_pod_name"
+
+echo "INFO: Copy results folder from the exec env pod to local"
+kubectl cp --retries 100 atlassian/"$exec_pod_name":dc-app-performance-toolkit/app/results dc-app-performance-toolkit/app/results
+if [[ $? -ne 0 ]]; then
+ echo "ERROR: Copy results folder failed"
+ exit 1
+fi
\ No newline at end of file
diff --git a/app/util/k8s/dcapt-snapshots.json b/app/util/k8s/dcapt-snapshots.json
index 1fed1dede..f03970d60 100644
--- a/app/util/k8s/dcapt-snapshots.json
+++ b/app/util/k8s/dcapt-snapshots.json
@@ -110,6 +110,61 @@
]
}
]
+ },
+ {
+ "version": "10.0.1",
+ "data": [
+ {
+ "type": "local-home",
+ "size": "large",
+ "snapshots": [
+ {
+ "us-east-2": "snap-034eee5f34bf936b3",
+ "us-east-1": "snap-001d16b1ffa28ac4a"
+ }
+ ]
+ },
+ {
+ "type": "ebs",
+ "size": "large",
+ "snapshots": [
+ {
+ "us-east-2": "snap-09b742ca1bb9fff88",
+ "us-east-1": "snap-0506e2d0d2b39e92a"
+ }
+ ]
+ },
+ {
+ "type": "rds",
+ "size": "large",
+ "snapshots": [
+ {
+ "us-east-2": "arn:aws:rds:us-east-2:585036043680:snapshot:dcapt-jira-10-0-1",
+ "us-east-1": "arn:aws:rds:us-east-1:585036043680:snapshot:dcapt-jira-10-0-1"
+ }
+ ]
+ },
+ {
+ "type": "ebs",
+ "size": "small",
+ "snapshots": [
+ {
+ "us-east-2": "snap-0ea31388ab389d097",
+ "us-east-1": "snap-09c71b1d40d21e408"
+ }
+ ]
+ },
+ {
+ "type": "rds",
+ "size": "small",
+ "snapshots": [
+ {
+ "us-east-2": "arn:aws:rds:us-east-2:585036043680:snapshot:dcapt-jira-small-10-0-1",
+ "us-east-1": "arn:aws:rds:us-east-1:585036043680:snapshot:dcapt-jira-small-10-0-1"
+ }
+ ]
+ }
+ ]
}
]
},
@@ -224,6 +279,61 @@
]
}
]
+ },
+ {
+ "version": "10.0.1",
+ "data": [
+ {
+ "type": "local-home",
+ "size": "large",
+ "snapshots": [
+ {
+ "us-east-2": "snap-019d79330995b9db7",
+ "us-east-1": "snap-08478133b48785725"
+ }
+ ]
+ },
+ {
+ "type": "ebs",
+ "size": "large",
+ "snapshots": [
+ {
+ "us-east-2": "snap-0f67c466251f5ead9",
+ "us-east-1": "snap-0d8a6668edce4d004"
+ }
+ ]
+ },
+ {
+ "type": "rds",
+ "size": "large",
+ "snapshots": [
+ {
+ "us-east-2": "arn:aws:rds:us-east-2:585036043680:snapshot:dcapt-jsm-10-0-1",
+ "us-east-1": "arn:aws:rds:us-east-1:585036043680:snapshot:dcapt-jsm-10-0-1"
+ }
+ ]
+ },
+ {
+ "type": "ebs",
+ "size": "small",
+ "snapshots": [
+ {
+ "us-east-2": "snap-0d582a31cfa7f908c",
+ "us-east-1": "snap-05b5d63ad1f2c0802"
+ }
+ ]
+ },
+ {
+ "type": "rds",
+ "size": "small",
+ "snapshots": [
+ {
+ "us-east-2": "arn:aws:rds:us-east-2:585036043680:snapshot:dcapt-jsm-small-10-0-1",
+ "us-east-1": "arn:aws:rds:us-east-1:585036043680:snapshot:dcapt-jsm-small-10-0-1"
+ }
+ ]
+ }
+ ]
}
]
},
diff --git a/docs/dc-apps-performance-toolkit-user-guide-bamboo.md b/docs/dc-apps-performance-toolkit-user-guide-bamboo.md
index 5cfbf4bf6..68f51df1b 100644
--- a/docs/dc-apps-performance-toolkit-user-guide-bamboo.md
+++ b/docs/dc-apps-performance-toolkit-user-guide-bamboo.md
@@ -4,7 +4,7 @@ platform: platform
product: marketplace
category: devguide
subcategory: build
-date: "2024-09-09"
+date: "2024-10-22"
---
# Data Center App Performance Toolkit User Guide For Bamboo
@@ -47,12 +47,20 @@ specifically for performance testing during the DC app review process.
{{% warning %}}
Do not use `root` user credentials for cluster creation.
- Use the following policies to restrict permissions: [policy1](https://raw.githubusercontent.com/atlassian-labs/data-center-terraform/main/permissions/policy1.json) and [policy2](https://raw.githubusercontent.com/atlassian-labs/data-center-terraform/main/permissions/policy2.json).
+ **Option 1** (simple): create admin user with `AdministratorAccess` permissions.
+
+ **Option 2** (complex): create granular permission policies with [policy1](https://raw.githubusercontent.com/atlassian-labs/data-center-terraform/main/permissions/policy1.json) and [policy2](https://raw.githubusercontent.com/atlassian-labs/data-center-terraform/main/permissions/policy2.json).
The specific configuration relies on how you manage permissions within AWS.
{{% /warning %}}
- **Example** of Policies and User creation:
+ **Example Option 1** with Admin user:
+ 1. Go to AWS Console -> IAM service -> Users
+ 2. Create new user -> attach policies directly -> `AdministratorAccess`
+ 3. Open newly created user -> Security credentials tab -> Access keys -> Create access key -> Command Line Interface (CLI) -> Create access key
+ 4. Use `Access key` and `Secret access key` in [aws_envs](https://github.com/atlassian/dc-app-performance-toolkit/blob/master/app/util/k8s/aws_envs) file
+
+ **Example Option 2** with granular Policies:
1. Go to AWS Console -> IAM service -> Policies
2. Create `policy1` with json content of the [policy1](https://raw.githubusercontent.com/atlassian-labs/data-center-terraform/main/permissions/policy1.json) file
{{% warning %}}
@@ -94,7 +102,7 @@ specifically for performance testing during the DC app review process.
-v "/$PWD/dcapt.tfvars:/data-center-terraform/conf.tfvars" \
-v "/$PWD/dcapt-snapshots.json:/data-center-terraform/dcapt-snapshots.json" \
-v "/$PWD/logs:/data-center-terraform/logs" \
- -it atlassianlabs/terraform:2.9.2 ./install.sh -c conf.tfvars
+ -it atlassianlabs/terraform:2.9.3 ./install.sh -c conf.tfvars
```
7. Copy product URL from the console output. Product url should look like `http://a1234-54321.us-east-2.elb.amazonaws.com/bamboo`.
8. Wait for all remote agents to be started and connected. It can take up to 10 minutes. Agents can be checked in `Settings` > `Agents`.
@@ -295,7 +303,7 @@ To receive performance baseline results **without** an app installed and **witho
-e ENVIRONMENT_NAME=$ENVIRONMENT_NAME \
-v "/$PWD:/data-center-terraform/dc-app-performance-toolkit" \
-v "/$PWD/app/util/k8s/bzt_on_pod.sh:/data-center-terraform/bzt_on_pod.sh" \
- -it atlassianlabs/terraform:2.9.2 bash bzt_on_pod.sh bamboo.yml
+ -it atlassianlabs/terraform:2.9.3 bash bzt_on_pod.sh bamboo.yml
```
1. View the following main results of the run in the `dc-app-performance-toolkit/app/results/bamboo/YY-MM-DD-hh-mm-ss` folder:
- `results_summary.log`: detailed run summary
@@ -326,7 +334,7 @@ To receive performance results with an app installed (still use master branch):
-e ENVIRONMENT_NAME=$ENVIRONMENT_NAME \
-v "/$PWD:/data-center-terraform/dc-app-performance-toolkit" \
-v "/$PWD/app/util/k8s/bzt_on_pod.sh:/data-center-terraform/bzt_on_pod.sh" \
- -it atlassianlabs/terraform:2.9.2 bash bzt_on_pod.sh bamboo.yml
+ -it atlassianlabs/terraform:2.9.3 bash bzt_on_pod.sh bamboo.yml
```
{{% note %}}
@@ -360,7 +368,7 @@ To receive results for Bamboo DC **with app** and **with app-specific actions**:
-e ENVIRONMENT_NAME=$ENVIRONMENT_NAME \
-v "/$PWD:/data-center-terraform/dc-app-performance-toolkit" \
-v "/$PWD/app/util/k8s/bzt_on_pod.sh:/data-center-terraform/bzt_on_pod.sh" \
- -it atlassianlabs/terraform:2.9.2 bash bzt_on_pod.sh bamboo.yml
+ -it atlassianlabs/terraform:2.9.3 bash bzt_on_pod.sh bamboo.yml
```
{{% note %}}
diff --git a/docs/dc-apps-performance-toolkit-user-guide-bitbucket.md b/docs/dc-apps-performance-toolkit-user-guide-bitbucket.md
index 02c039e7f..666fe4a66 100644
--- a/docs/dc-apps-performance-toolkit-user-guide-bitbucket.md
+++ b/docs/dc-apps-performance-toolkit-user-guide-bitbucket.md
@@ -4,7 +4,7 @@ platform: platform
product: marketplace
category: devguide
subcategory: build
-date: "2024-09-09"
+date: "2024-10-22"
---
# Data Center App Performance Toolkit User Guide For Bitbucket
@@ -59,12 +59,20 @@ Below process describes how to install low-tier Bitbucket DC with "small" datase
{{% warning %}}
Do not use `root` user credentials for cluster creation.
- Use the following policies to restrict permissions: [policy1](https://raw.githubusercontent.com/atlassian-labs/data-center-terraform/main/permissions/policy1.json) and [policy2](https://raw.githubusercontent.com/atlassian-labs/data-center-terraform/main/permissions/policy2.json).
+ **Option 1** (simple): create admin user with `AdministratorAccess` permissions.
+
+ **Option 2** (complex): create granular permission policies with [policy1](https://raw.githubusercontent.com/atlassian-labs/data-center-terraform/main/permissions/policy1.json) and [policy2](https://raw.githubusercontent.com/atlassian-labs/data-center-terraform/main/permissions/policy2.json).
The specific configuration relies on how you manage permissions within AWS.
{{% /warning %}}
- **Example** of Policies and User creation:
+ **Example Option 1** with Admin user:
+ 1. Go to AWS Console -> IAM service -> Users
+ 2. Create new user -> attach policies directly -> `AdministratorAccess`
+ 3. Open newly created user -> Security credentials tab -> Access keys -> Create access key -> Command Line Interface (CLI) -> Create access key
+ 4. Use `Access key` and `Secret access key` in [aws_envs](https://github.com/atlassian/dc-app-performance-toolkit/blob/master/app/util/k8s/aws_envs) file
+
+ **Example Option 2** with granular Policies:
1. Go to AWS Console -> IAM service -> Policies
2. Create `policy1` with json content of the [policy1](https://raw.githubusercontent.com/atlassian-labs/data-center-terraform/main/permissions/policy1.json) file
{{% warning %}}
@@ -107,7 +115,7 @@ Below process describes how to install low-tier Bitbucket DC with "small" datase
-v "/$PWD/dcapt-small.tfvars:/data-center-terraform/conf.tfvars" \
-v "/$PWD/dcapt-snapshots.json:/data-center-terraform/dcapt-snapshots.json" \
-v "/$PWD/logs:/data-center-terraform/logs" \
- -it atlassianlabs/terraform:2.9.2 ./install.sh -c conf.tfvars
+ -it atlassianlabs/terraform:2.9.3 ./install.sh -c conf.tfvars
```
8. Copy product URL from the console output. Product url should look like `http://a1234-54321.us-east-2.elb.amazonaws.com/bitbucket`.
@@ -255,12 +263,20 @@ Below process describes how to install enterprise-scale Bitbucket DC with "large
{{% warning %}}
Do not use `root` user credentials for cluster creation.
- Use the following policies to restrict permissions: [policy1](https://raw.githubusercontent.com/atlassian-labs/data-center-terraform/main/permissions/policy1.json) and [policy2](https://raw.githubusercontent.com/atlassian-labs/data-center-terraform/main/permissions/policy2.json).
+ **Option 1** (simple): create admin user with `AdministratorAccess` permissions.
+
+ **Option 2** (complex): create granular permission policies with [policy1](https://raw.githubusercontent.com/atlassian-labs/data-center-terraform/main/permissions/policy1.json) and [policy2](https://raw.githubusercontent.com/atlassian-labs/data-center-terraform/main/permissions/policy2.json).
The specific configuration relies on how you manage permissions within AWS.
{{% /warning %}}
- **Example** of Policies and User creation:
+ **Example Option 1** with Admin user:
+ 1. Go to AWS Console -> IAM service -> Users
+ 2. Create new user -> attach policies directly -> `AdministratorAccess`
+ 3. Open newly created user -> Security credentials tab -> Access keys -> Create access key -> Command Line Interface (CLI) -> Create access key
+ 4. Use `Access key` and `Secret access key` in [aws_envs](https://github.com/atlassian/dc-app-performance-toolkit/blob/master/app/util/k8s/aws_envs) file
+
+ **Example Option 2** with granular Policies:
1. Go to AWS Console -> IAM service -> Policies
2. Create `policy1` with json content of the [policy1](https://raw.githubusercontent.com/atlassian-labs/data-center-terraform/main/permissions/policy1.json) file
{{% warning %}}
@@ -303,7 +319,7 @@ Below process describes how to install enterprise-scale Bitbucket DC with "large
-v "/$PWD/dcapt.tfvars:/data-center-terraform/conf.tfvars" \
-v "/$PWD/dcapt-snapshots.json:/data-center-terraform/dcapt-snapshots.json" \
-v "/$PWD/logs:/data-center-terraform/logs" \
- -it atlassianlabs/terraform:2.9.2 ./install.sh -c conf.tfvars
+ -it atlassianlabs/terraform:2.9.3 ./install.sh -c conf.tfvars
```
8. Copy product URL from the console output. Product url should look like `http://a1234-54321.us-east-2.elb.amazonaws.com/bitbucket`.
@@ -378,7 +394,7 @@ To receive performance baseline results **without** an app installed:
-e ENVIRONMENT_NAME=$ENVIRONMENT_NAME \
-v "/$PWD:/data-center-terraform/dc-app-performance-toolkit" \
-v "/$PWD/app/util/k8s/bzt_on_pod.sh:/data-center-terraform/bzt_on_pod.sh" \
- -it atlassianlabs/terraform:2.9.2 bash bzt_on_pod.sh bitbucket.yml
+ -it atlassianlabs/terraform:2.9.3 bash bzt_on_pod.sh bitbucket.yml
```
1. View the following main results of the run in the `dc-app-performance-toolkit/app/results/bitbucket/YY-MM-DD-hh-mm-ss` folder:
@@ -409,7 +425,7 @@ To receive performance results with an app installed (still use master branch):
-e ENVIRONMENT_NAME=$ENVIRONMENT_NAME \
-v "/$PWD:/data-center-terraform/dc-app-performance-toolkit" \
-v "/$PWD/app/util/k8s/bzt_on_pod.sh:/data-center-terraform/bzt_on_pod.sh" \
- -it atlassianlabs/terraform:2.9.2 bash bzt_on_pod.sh bitbucket.yml
+ -it atlassianlabs/terraform:2.9.3 bash bzt_on_pod.sh bitbucket.yml
```
{{% note %}}
@@ -459,7 +475,7 @@ To receive scalability benchmark results for one-node Bitbucket DC **with** app-
-e ENVIRONMENT_NAME=$ENVIRONMENT_NAME \
-v "/$PWD:/data-center-terraform/dc-app-performance-toolkit" \
-v "/$PWD/app/util/k8s/bzt_on_pod.sh:/data-center-terraform/bzt_on_pod.sh" \
- -it atlassianlabs/terraform:2.9.2 bash bzt_on_pod.sh bitbucket.yml
+ -it atlassianlabs/terraform:2.9.3 bash bzt_on_pod.sh bitbucket.yml
```
{{% note %}}
@@ -484,7 +500,7 @@ To receive scalability benchmark results for two-node Bitbucket DC **with** app-
-v "/$PWD/dcapt.tfvars:/data-center-terraform/conf.tfvars" \
-v "/$PWD/dcapt-snapshots.json:/data-center-terraform/dcapt-snapshots.json" \
-v "/$PWD/logs:/data-center-terraform/logs" \
- -it atlassianlabs/terraform:2.9.2 ./install.sh -c conf.tfvars
+ -it atlassianlabs/terraform:2.9.3 ./install.sh -c conf.tfvars
```
1. Navigate to `dc-app-performance-toolkit` folder and start tests execution:
``` bash
@@ -497,7 +513,7 @@ To receive scalability benchmark results for two-node Bitbucket DC **with** app-
-e ENVIRONMENT_NAME=$ENVIRONMENT_NAME \
-v "/$PWD:/data-center-terraform/dc-app-performance-toolkit" \
-v "/$PWD/app/util/k8s/bzt_on_pod.sh:/data-center-terraform/bzt_on_pod.sh" \
- -it atlassianlabs/terraform:2.9.2 bash bzt_on_pod.sh bitbucket.yml
+ -it atlassianlabs/terraform:2.9.3 bash bzt_on_pod.sh bitbucket.yml
```
{{% note %}}
@@ -526,7 +542,7 @@ To receive scalability benchmark results for four-node Bitbucket DC with app-spe
-e ENVIRONMENT_NAME=$ENVIRONMENT_NAME \
-v "/$PWD:/data-center-terraform/dc-app-performance-toolkit" \
-v "/$PWD/app/util/k8s/bzt_on_pod.sh:/data-center-terraform/bzt_on_pod.sh" \
- -it atlassianlabs/terraform:2.9.2 bash bzt_on_pod.sh bitbucket.yml
+ -it atlassianlabs/terraform:2.9.3 bash bzt_on_pod.sh bitbucket.yml
```
{{% note %}}
diff --git a/docs/dc-apps-performance-toolkit-user-guide-confluence.md b/docs/dc-apps-performance-toolkit-user-guide-confluence.md
index 5d8c9ba68..4fc520dd3 100644
--- a/docs/dc-apps-performance-toolkit-user-guide-confluence.md
+++ b/docs/dc-apps-performance-toolkit-user-guide-confluence.md
@@ -4,7 +4,7 @@ platform: platform
product: marketplace
category: devguide
subcategory: build
-date: "2024-09-09"
+date: "2024-10-22"
---
# Data Center App Performance Toolkit User Guide For Confluence
@@ -59,12 +59,20 @@ Below process describes how to install low-tier Confluence DC with "small" datas
{{% warning %}}
Do not use `root` user credentials for cluster creation.
- Use the following policies to restrict permissions: [policy1](https://raw.githubusercontent.com/atlassian-labs/data-center-terraform/main/permissions/policy1.json) and [policy2](https://raw.githubusercontent.com/atlassian-labs/data-center-terraform/main/permissions/policy2.json).
+ **Option 1** (simple): create admin user with `AdministratorAccess` permissions.
+
+ **Option 2** (complex): create granular permission policies with [policy1](https://raw.githubusercontent.com/atlassian-labs/data-center-terraform/main/permissions/policy1.json) and [policy2](https://raw.githubusercontent.com/atlassian-labs/data-center-terraform/main/permissions/policy2.json).
The specific configuration relies on how you manage permissions within AWS.
{{% /warning %}}
- **Example** of Policies and User creation:
+ **Example Option 1** with Admin user:
+ 1. Go to AWS Console -> IAM service -> Users
+ 2. Create new user -> attach policies directly -> `AdministratorAccess`
+ 3. Open newly created user -> Security credentials tab -> Access keys -> Create access key -> Command Line Interface (CLI) -> Create access key
+ 4. Use `Access key` and `Secret access key` in [aws_envs](https://github.com/atlassian/dc-app-performance-toolkit/blob/master/app/util/k8s/aws_envs) file
+
+ **Example Option 2** with granular Policies:
1. Go to AWS Console -> IAM service -> Policies
2. Create `policy1` with json content of the [policy1](https://raw.githubusercontent.com/atlassian-labs/data-center-terraform/main/permissions/policy1.json) file
{{% warning %}}
@@ -106,7 +114,7 @@ Below process describes how to install low-tier Confluence DC with "small" datas
-v "/$PWD/dcapt-small.tfvars:/data-center-terraform/conf.tfvars" \
-v "/$PWD/dcapt-snapshots.json:/data-center-terraform/dcapt-snapshots.json" \
-v "/$PWD/logs:/data-center-terraform/logs" \
- -it atlassianlabs/terraform:2.9.2 ./install.sh -c conf.tfvars
+ -it atlassianlabs/terraform:2.9.3 ./install.sh -c conf.tfvars
```
8. Copy product URL from the console output. Product url should look like `http://a1234-54321.us-east-2.elb.amazonaws.com/confluence`.
@@ -335,12 +343,20 @@ Below process describes how to install enterprise-scale Confluence DC with "larg
{{% warning %}}
Do not use `root` user credentials for cluster creation.
- Use the following policies to restrict permissions: [policy1](https://raw.githubusercontent.com/atlassian-labs/data-center-terraform/main/permissions/policy1.json) and [policy2](https://raw.githubusercontent.com/atlassian-labs/data-center-terraform/main/permissions/policy2.json).
+ **Option 1** (simple): create admin user with `AdministratorAccess` permissions.
+
+ **Option 2** (complex): create granular permission policies with [policy1](https://raw.githubusercontent.com/atlassian-labs/data-center-terraform/main/permissions/policy1.json) and [policy2](https://raw.githubusercontent.com/atlassian-labs/data-center-terraform/main/permissions/policy2.json).
The specific configuration relies on how you manage permissions within AWS.
{{% /warning %}}
- **Example** of Policies and User creation:
+ **Example Option 1** with Admin user:
+ 1. Go to AWS Console -> IAM service -> Users
+ 2. Create new user -> attach policies directly -> `AdministratorAccess`
+ 3. Open newly created user -> Security credentials tab -> Access keys -> Create access key -> Command Line Interface (CLI) -> Create access key
+ 4. Use `Access key` and `Secret access key` in [aws_envs](https://github.com/atlassian/dc-app-performance-toolkit/blob/master/app/util/k8s/aws_envs) file
+
+ **Example Option 2** with granular Policies:
1. Go to AWS Console -> IAM service -> Policies
2. Create `policy1` with json content of the [policy1](https://raw.githubusercontent.com/atlassian-labs/data-center-terraform/main/permissions/policy1.json) file
{{% warning %}}
@@ -383,7 +399,7 @@ Below process describes how to install enterprise-scale Confluence DC with "larg
-v "/$PWD/dcapt.tfvars:/data-center-terraform/conf.tfvars" \
-v "/$PWD/dcapt-snapshots.json:/data-center-terraform/dcapt-snapshots.json" \
-v "/$PWD/logs:/data-center-terraform/logs" \
- -it atlassianlabs/terraform:2.9.2 ./install.sh -c conf.tfvars
+ -it atlassianlabs/terraform:2.9.3 ./install.sh -c conf.tfvars
```
8. Copy product URL from the console output. Product url should look like `http://a1234-54321.us-east-2.elb.amazonaws.com/confluence`.
@@ -458,7 +474,7 @@ To receive performance baseline results **without** an app installed:
-e ENVIRONMENT_NAME=$ENVIRONMENT_NAME \
-v "/$PWD:/data-center-terraform/dc-app-performance-toolkit" \
-v "/$PWD/app/util/k8s/bzt_on_pod.sh:/data-center-terraform/bzt_on_pod.sh" \
- -it atlassianlabs/terraform:2.9.2 bash bzt_on_pod.sh confluence.yml
+ -it atlassianlabs/terraform:2.9.3 bash bzt_on_pod.sh confluence.yml
```
1. View the following main results of the run in the `dc-app-performance-toolkit/app/results/confluence/YY-MM-DD-hh-mm-ss` folder:
- `results_summary.log`: detailed run summary
@@ -488,7 +504,7 @@ To receive performance results with an app installed (still use master branch):
-e ENVIRONMENT_NAME=$ENVIRONMENT_NAME \
-v "/$PWD:/data-center-terraform/dc-app-performance-toolkit" \
-v "/$PWD/app/util/k8s/bzt_on_pod.sh:/data-center-terraform/bzt_on_pod.sh" \
- -it atlassianlabs/terraform:2.9.2 bash bzt_on_pod.sh confluence.yml
+ -it atlassianlabs/terraform:2.9.3 bash bzt_on_pod.sh confluence.yml
```
{{% note %}}
@@ -549,7 +565,7 @@ To receive scalability benchmark results for one-node Confluence DC **with** app
-e ENVIRONMENT_NAME=$ENVIRONMENT_NAME \
-v "/$PWD:/data-center-terraform/dc-app-performance-toolkit" \
-v "/$PWD/app/util/k8s/bzt_on_pod.sh:/data-center-terraform/bzt_on_pod.sh" \
- -it atlassianlabs/terraform:2.9.2 bash bzt_on_pod.sh confluence.yml
+ -it atlassianlabs/terraform:2.9.3 bash bzt_on_pod.sh confluence.yml
```
{{% note %}}
@@ -574,7 +590,7 @@ To receive scalability benchmark results for two-node Confluence DC **with** app
-v "/$PWD/dcapt.tfvars:/data-center-terraform/conf.tfvars" \
-v "/$PWD/dcapt-snapshots.json:/data-center-terraform/dcapt-snapshots.json" \
-v "/$PWD/logs:/data-center-terraform/logs" \
- -it atlassianlabs/terraform:2.9.2 ./install.sh -c conf.tfvars
+ -it atlassianlabs/terraform:2.9.3 ./install.sh -c conf.tfvars
```
1. Navigate to `dc-app-performance-toolkit` folder and start tests execution:
``` bash
@@ -587,7 +603,7 @@ To receive scalability benchmark results for two-node Confluence DC **with** app
-e ENVIRONMENT_NAME=$ENVIRONMENT_NAME \
-v "/$PWD:/data-center-terraform/dc-app-performance-toolkit" \
-v "/$PWD/app/util/k8s/bzt_on_pod.sh:/data-center-terraform/bzt_on_pod.sh" \
- -it atlassianlabs/terraform:2.9.2 bash bzt_on_pod.sh confluence.yml
+ -it atlassianlabs/terraform:2.9.3 bash bzt_on_pod.sh confluence.yml
```
{{% note %}}
@@ -616,7 +632,7 @@ To receive scalability benchmark results for four-node Confluence DC with app-sp
-e ENVIRONMENT_NAME=$ENVIRONMENT_NAME \
-v "/$PWD:/data-center-terraform/dc-app-performance-toolkit" \
-v "/$PWD/app/util/k8s/bzt_on_pod.sh:/data-center-terraform/bzt_on_pod.sh" \
- -it atlassianlabs/terraform:2.9.2 bash bzt_on_pod.sh confluence.yml
+ -it atlassianlabs/terraform:2.9.3 bash bzt_on_pod.sh confluence.yml
```
{{% note %}}
diff --git a/docs/dc-apps-performance-toolkit-user-guide-crowd.md b/docs/dc-apps-performance-toolkit-user-guide-crowd.md
index a964423d5..eb7bac975 100644
--- a/docs/dc-apps-performance-toolkit-user-guide-crowd.md
+++ b/docs/dc-apps-performance-toolkit-user-guide-crowd.md
@@ -4,7 +4,7 @@ platform: platform
product: marketplace
category: devguide
subcategory: build
-date: "2024-09-09"
+date: "2024-10-22"
---
# Data Center App Performance Toolkit User Guide For Crowd
@@ -43,12 +43,20 @@ specifically for performance testing during the DC app review process.
{{% warning %}}
Do not use `root` user credentials for cluster creation.
- Use the following policies to restrict permissions: [policy1](https://raw.githubusercontent.com/atlassian-labs/data-center-terraform/main/permissions/policy1.json) and [policy2](https://raw.githubusercontent.com/atlassian-labs/data-center-terraform/main/permissions/policy2.json).
+ **Option 1** (simple): create admin user with `AdministratorAccess` permissions.
+
+ **Option 2** (complex): create granular permission policies with [policy1](https://raw.githubusercontent.com/atlassian-labs/data-center-terraform/main/permissions/policy1.json) and [policy2](https://raw.githubusercontent.com/atlassian-labs/data-center-terraform/main/permissions/policy2.json).
The specific configuration relies on how you manage permissions within AWS.
{{% /warning %}}
- **Example** of Policies and User creation:
+ **Example Option 1** with Admin user:
+ 1. Go to AWS Console -> IAM service -> Users
+ 2. Create new user -> attach policies directly -> `AdministratorAccess`
+ 3. Open newly created user -> Security credentials tab -> Access keys -> Create access key -> Command Line Interface (CLI) -> Create access key
+ 4. Use `Access key` and `Secret access key` in [aws_envs](https://github.com/atlassian/dc-app-performance-toolkit/blob/master/app/util/k8s/aws_envs) file
+
+ **Example Option 2** with granular Policies:
1. Go to AWS Console -> IAM service -> Policies
2. Create `policy1` with json content of the [policy1](https://raw.githubusercontent.com/atlassian-labs/data-center-terraform/main/permissions/policy1.json) file
{{% warning %}}
@@ -89,7 +97,7 @@ specifically for performance testing during the DC app review process.
-v "/$PWD/dcapt.tfvars:/data-center-terraform/conf.tfvars" \
-v "/$PWD/dcapt-snapshots.json:/data-center-terraform/dcapt-snapshots.json" \
-v "/$PWD/logs:/data-center-terraform/logs" \
- -it atlassianlabs/terraform:2.9.2 ./install.sh -c conf.tfvars
+ -it atlassianlabs/terraform:2.9.3 ./install.sh -c conf.tfvars
```
7. Copy product URL from the console output. Product url should look like `http://a1234-54321.us-east-2.elb.amazonaws.com/crowd`.
@@ -200,7 +208,7 @@ To receive performance baseline results **without** an app installed and **witho
-e ENVIRONMENT_NAME=$ENVIRONMENT_NAME \
-v "/$PWD:/data-center-terraform/dc-app-performance-toolkit" \
-v "/$PWD/app/util/k8s/bzt_on_pod.sh:/data-center-terraform/bzt_on_pod.sh" \
- -it atlassianlabs/terraform:2.9.2 bash bzt_on_pod.sh crowd.yml
+ -it atlassianlabs/terraform:2.9.3 bash bzt_on_pod.sh crowd.yml
```
1. View the following main results of the run in the `dc-app-performance-toolkit/app/results/crowd/YY-MM-DD-hh-mm-ss` folder:
- `results_summary.log`: detailed run summary
@@ -229,7 +237,7 @@ To receive performance results with an app installed (still use master branch):
-e ENVIRONMENT_NAME=$ENVIRONMENT_NAME \
-v "/$PWD:/data-center-terraform/dc-app-performance-toolkit" \
-v "/$PWD/app/util/k8s/bzt_on_pod.sh:/data-center-terraform/bzt_on_pod.sh" \
- -it atlassianlabs/terraform:2.9.2 bash bzt_on_pod.sh crowd.yml
+ -it atlassianlabs/terraform:2.9.3 bash bzt_on_pod.sh crowd.yml
```
{{% note %}}
@@ -288,7 +296,7 @@ To receive scalability benchmark results for one-node Crowd DC **with** app-spec
-e ENVIRONMENT_NAME=$ENVIRONMENT_NAME \
-v "/$PWD:/data-center-terraform/dc-app-performance-toolkit" \
-v "/$PWD/app/util/k8s/bzt_on_pod.sh:/data-center-terraform/bzt_on_pod.sh" \
- -it atlassianlabs/terraform:2.9.2 bash bzt_on_pod.sh crowd.yml
+ -it atlassianlabs/terraform:2.9.3 bash bzt_on_pod.sh crowd.yml
```
{{% note %}}
@@ -312,7 +320,7 @@ To receive scalability benchmark results for two-node Crowd DC **with** app-spec
-v "/$PWD/dcapt.tfvars:/data-center-terraform/conf.tfvars" \
-v "/$PWD/dcapt-snapshots.json:/data-center-terraform/dcapt-snapshots.json" \
-v "/$PWD/logs:/data-center-terraform/logs" \
- -it atlassianlabs/terraform:2.9.2 ./install.sh -c conf.tfvars
+ -it atlassianlabs/terraform:2.9.3 ./install.sh -c conf.tfvars
```
1. Edit **run parameters** for 2 nodes run. To do it, left uncommented only 2 nodes scenario parameters in `crowd.yml` file.
```
@@ -339,7 +347,7 @@ To receive scalability benchmark results for two-node Crowd DC **with** app-spec
-e ENVIRONMENT_NAME=$ENVIRONMENT_NAME \
-v "/$PWD:/data-center-terraform/dc-app-performance-toolkit" \
-v "/$PWD/app/util/k8s/bzt_on_pod.sh:/data-center-terraform/bzt_on_pod.sh" \
- -it atlassianlabs/terraform:2.9.2 bash bzt_on_pod.sh crowd.yml
+ -it atlassianlabs/terraform:2.9.3 bash bzt_on_pod.sh crowd.yml
```
{{% note %}}
@@ -382,7 +390,7 @@ To receive scalability benchmark results for four-node Crowd DC with app-specifi
-e ENVIRONMENT_NAME=$ENVIRONMENT_NAME \
-v "/$PWD:/data-center-terraform/dc-app-performance-toolkit" \
-v "/$PWD/app/util/k8s/bzt_on_pod.sh:/data-center-terraform/bzt_on_pod.sh" \
- -it atlassianlabs/terraform:2.9.2 bash bzt_on_pod.sh crowd.yml
+ -it atlassianlabs/terraform:2.9.3 bash bzt_on_pod.sh crowd.yml
```
{{% note %}}
diff --git a/docs/dc-apps-performance-toolkit-user-guide-jira.md b/docs/dc-apps-performance-toolkit-user-guide-jira.md
index 83d22c446..88386d8c5 100644
--- a/docs/dc-apps-performance-toolkit-user-guide-jira.md
+++ b/docs/dc-apps-performance-toolkit-user-guide-jira.md
@@ -4,7 +4,7 @@ platform: platform
product: marketplace
category: devguide
subcategory: build
-date: "2024-09-09"
+date: "2024-10-22"
---
# Data Center App Performance Toolkit User Guide For Jira
@@ -65,13 +65,21 @@ Below process describes how to install low-tier Jira DC with "small" dataset inc
1. Create Access keys for AWS CLI:
{{% warning %}}
Do not use `root` user credentials for cluster creation.
+
+ **Option 1** (simple): create admin user with `AdministratorAccess` permissions.
- Use the following policies to restrict permissions: [policy1](https://raw.githubusercontent.com/atlassian-labs/data-center-terraform/main/permissions/policy1.json) and [policy2](https://raw.githubusercontent.com/atlassian-labs/data-center-terraform/main/permissions/policy2.json).
+ **Option 2** (complex): create granular permission policies with [policy1](https://raw.githubusercontent.com/atlassian-labs/data-center-terraform/main/permissions/policy1.json) and [policy2](https://raw.githubusercontent.com/atlassian-labs/data-center-terraform/main/permissions/policy2.json).
The specific configuration relies on how you manage permissions within AWS.
{{% /warning %}}
- **Example** of Policies and User creation:
+ **Example Option 1** with Admin user:
+ 1. Go to AWS Console -> IAM service -> Users
+ 2. Create new user -> attach policies directly -> `AdministratorAccess`
+ 3. Open newly created user -> Security credentials tab -> Access keys -> Create access key -> Command Line Interface (CLI) -> Create access key
+ 4. Use `Access key` and `Secret access key` in [aws_envs](https://github.com/atlassian/dc-app-performance-toolkit/blob/master/app/util/k8s/aws_envs) file
+
+ **Example Option 2** with granular Policies:
1. Go to AWS Console -> IAM service -> Policies
2. Create `policy1` with json content of the [policy1](https://raw.githubusercontent.com/atlassian-labs/data-center-terraform/main/permissions/policy1.json) file
{{% warning %}}
@@ -84,7 +92,6 @@ Below process describes how to install low-tier Jira DC with "small" dataset inc
4. Go to User -> Create user -> Attach policies directly -> Attach `policy1` and `policy2`-> Click on Create user button
5. Open newly created user -> Security credentials tab -> Access keys -> Create access key -> Command Line Interface (CLI) -> Create access key
6. Use `Access key` and `Secret access key` in [aws_envs](https://github.com/atlassian/dc-app-performance-toolkit/blob/master/app/util/k8s/aws_envs) file
-
2. Clone [Data Center App Performance Toolkit](https://github.com/atlassian/dc-app-performance-toolkit) locally.
{{% warning %}}
For annual review, always get the latest version of the DCAPT code from the master branch.
@@ -115,7 +122,7 @@ Below process describes how to install low-tier Jira DC with "small" dataset inc
-v "/$PWD/dcapt-small.tfvars:/data-center-terraform/conf.tfvars" \
-v "/$PWD/dcapt-snapshots.json:/data-center-terraform/dcapt-snapshots.json" \
-v "/$PWD/logs:/data-center-terraform/logs" \
- -it atlassianlabs/terraform:2.9.2 ./install.sh -c conf.tfvars
+ -it atlassianlabs/terraform:2.9.3 ./install.sh -c conf.tfvars
```
8. Copy product URL from the console output. Product url should look like `http://a1234-54321.us-east-2.elb.amazonaws.com/jira`.
@@ -360,12 +367,20 @@ Below process describes how to install enterprise-scale Jira DC with "large" dat
{{% warning %}}
Do not use `root` user credentials for cluster creation.
- Use the following policies to restrict permissions: [policy1](https://raw.githubusercontent.com/atlassian-labs/data-center-terraform/main/permissions/policy1.json) and [policy2](https://raw.githubusercontent.com/atlassian-labs/data-center-terraform/main/permissions/policy2.json).
+ **Option 1** (simple): create admin user with `AdministratorAccess` permissions.
+
+ **Option 2** (complex): create granular permission policies with [policy1](https://raw.githubusercontent.com/atlassian-labs/data-center-terraform/main/permissions/policy1.json) and [policy2](https://raw.githubusercontent.com/atlassian-labs/data-center-terraform/main/permissions/policy2.json).
The specific configuration relies on how you manage permissions within AWS.
{{% /warning %}}
- **Example** of Policies and User creation:
+ **Example Option 1** with Admin user:
+ 1. Go to AWS Console -> IAM service -> Users
+ 2. Create new user -> attach policies directly -> `AdministratorAccess`
+ 3. Open newly created user -> Security credentials tab -> Access keys -> Create access key -> Command Line Interface (CLI) -> Create access key
+ 4. Use `Access key` and `Secret access key` in [aws_envs](https://github.com/atlassian/dc-app-performance-toolkit/blob/master/app/util/k8s/aws_envs) file
+
+ **Example Option 2** with granular Policies:
1. Go to AWS Console -> IAM service -> Policies
2. Create `policy1` with json content of the [policy1](https://raw.githubusercontent.com/atlassian-labs/data-center-terraform/main/permissions/policy1.json) file
{{% warning %}}
@@ -378,7 +393,6 @@ Below process describes how to install enterprise-scale Jira DC with "large" dat
4. Go to User -> Create user -> Attach policies directly -> Attach `policy1` and `policy2`-> Click on Create user button
5. Open newly created user -> Security credentials tab -> Access keys -> Create access key -> Command Line Interface (CLI) -> Create access key
6. Use `Access key` and `Secret access key` in [aws_envs](https://github.com/atlassian/dc-app-performance-toolkit/blob/master/app/util/k8s/aws_envs) file
-
2. Clone [Data Center App Performance Toolkit](https://github.com/atlassian/dc-app-performance-toolkit) locally.
{{% warning %}}
For annual review, always get the latest version of the DCAPT code from the master branch.
@@ -409,7 +423,7 @@ Below process describes how to install enterprise-scale Jira DC with "large" dat
-v "/$PWD/dcapt.tfvars:/data-center-terraform/conf.tfvars" \
-v "/$PWD/dcapt-snapshots.json:/data-center-terraform/dcapt-snapshots.json" \
-v "/$PWD/logs:/data-center-terraform/logs" \
- -it atlassianlabs/terraform:2.9.2 ./install.sh -c conf.tfvars
+ -it atlassianlabs/terraform:2.9.3 ./install.sh -c conf.tfvars
```
8. Copy product URL from the console output. Product url should look like `http://a1234-54321.us-east-2.elb.amazonaws.com/jira`.
@@ -484,7 +498,7 @@ To receive performance baseline results **without** an app installed:
-e ENVIRONMENT_NAME=$ENVIRONMENT_NAME \
-v "/$PWD:/data-center-terraform/dc-app-performance-toolkit" \
-v "/$PWD/app/util/k8s/bzt_on_pod.sh:/data-center-terraform/bzt_on_pod.sh" \
- -it atlassianlabs/terraform:2.9.2 bash bzt_on_pod.sh jira.yml
+ -it atlassianlabs/terraform:2.9.3 bash bzt_on_pod.sh jira.yml
```
1. View the results files of the run in the local `dc-app-performance-toolkit/app/results/jira/YY-MM-DD-hh-mm-ss` folder:
- `results_summary.log`: detailed run summary
@@ -536,7 +550,7 @@ The re-index time for Jira is about ~50-70 minutes.
-e ENVIRONMENT_NAME=$ENVIRONMENT_NAME \
-v "/$PWD:/data-center-terraform/dc-app-performance-toolkit" \
-v "/$PWD/app/util/k8s/bzt_on_pod.sh:/data-center-terraform/bzt_on_pod.sh" \
- -it atlassianlabs/terraform:2.9.2 bash bzt_on_pod.sh jira.yml
+ -it atlassianlabs/terraform:2.9.3 bash bzt_on_pod.sh jira.yml
```
{{% note %}}
@@ -597,7 +611,7 @@ To receive scalability benchmark results for one-node Jira DC **with** app-speci
-e ENVIRONMENT_NAME=$ENVIRONMENT_NAME \
-v "/$PWD:/data-center-terraform/dc-app-performance-toolkit" \
-v "/$PWD/app/util/k8s/bzt_on_pod.sh:/data-center-terraform/bzt_on_pod.sh" \
- -it atlassianlabs/terraform:2.9.2 bash bzt_on_pod.sh jira.yml
+ -it atlassianlabs/terraform:2.9.3 bash bzt_on_pod.sh jira.yml
```
{{% note %}}
@@ -622,7 +636,7 @@ To receive scalability benchmark results for two-node Jira DC **with** app-speci
-v "/$PWD/dcapt.tfvars:/data-center-terraform/conf.tfvars" \
-v "/$PWD/dcapt-snapshots.json:/data-center-terraform/dcapt-snapshots.json" \
-v "/$PWD/logs:/data-center-terraform/logs" \
- -it atlassianlabs/terraform:2.9.2 ./install.sh -c conf.tfvars
+ -it atlassianlabs/terraform:2.9.3 ./install.sh -c conf.tfvars
```
1. Navigate to `dc-app-performance-toolkit` folder and start tests execution:
``` bash
@@ -635,7 +649,7 @@ To receive scalability benchmark results for two-node Jira DC **with** app-speci
-e ENVIRONMENT_NAME=$ENVIRONMENT_NAME \
-v "/$PWD:/data-center-terraform/dc-app-performance-toolkit" \
-v "/$PWD/app/util/k8s/bzt_on_pod.sh:/data-center-terraform/bzt_on_pod.sh" \
- -it atlassianlabs/terraform:2.9.2 bash bzt_on_pod.sh jira.yml
+ -it atlassianlabs/terraform:2.9.3 bash bzt_on_pod.sh jira.yml
```
{{% note %}}
@@ -664,7 +678,7 @@ To receive scalability benchmark results for four-node Jira DC with app-specific
-e ENVIRONMENT_NAME=$ENVIRONMENT_NAME \
-v "/$PWD:/data-center-terraform/dc-app-performance-toolkit" \
-v "/$PWD/app/util/k8s/bzt_on_pod.sh:/data-center-terraform/bzt_on_pod.sh" \
- -it atlassianlabs/terraform:2.9.2 bash bzt_on_pod.sh jira.yml
+ -it atlassianlabs/terraform:2.9.3 bash bzt_on_pod.sh jira.yml
```
{{% note %}}
diff --git a/docs/dc-apps-performance-toolkit-user-guide-jsm.md b/docs/dc-apps-performance-toolkit-user-guide-jsm.md
index 1b6af50d2..130d834f3 100644
--- a/docs/dc-apps-performance-toolkit-user-guide-jsm.md
+++ b/docs/dc-apps-performance-toolkit-user-guide-jsm.md
@@ -4,7 +4,7 @@ platform: platform
product: marketplace
category: devguide
subcategory: build
-date: "2024-09-09"
+date: "2024-10-22"
---
# Data Center App Performance Toolkit User Guide For Jira Service Management
@@ -67,12 +67,20 @@ Below process describes how to install low-tier Jira Service Management DC with
{{% warning %}}
Do not use `root` user credentials for cluster creation.
- Use the following policies to restrict permissions: [policy1](https://raw.githubusercontent.com/atlassian-labs/data-center-terraform/main/permissions/policy1.json) and [policy2](https://raw.githubusercontent.com/atlassian-labs/data-center-terraform/main/permissions/policy2.json).
+ **Option 1** (simple): create admin user with `AdministratorAccess` permissions.
+
+ **Option 2** (complex): create granular permission policies with [policy1](https://raw.githubusercontent.com/atlassian-labs/data-center-terraform/main/permissions/policy1.json) and [policy2](https://raw.githubusercontent.com/atlassian-labs/data-center-terraform/main/permissions/policy2.json).
The specific configuration relies on how you manage permissions within AWS.
{{% /warning %}}
- **Example** of Policies and User creation:
+ **Example Option 1** with Admin user:
+ 1. Go to AWS Console -> IAM service -> Users
+ 2. Create new user -> attach policies directly -> `AdministratorAccess`
+ 3. Open newly created user -> Security credentials tab -> Access keys -> Create access key -> Command Line Interface (CLI) -> Create access key
+ 4. Use `Access key` and `Secret access key` in [aws_envs](https://github.com/atlassian/dc-app-performance-toolkit/blob/master/app/util/k8s/aws_envs) file
+
+ **Example Option 2** with granular Policies:
1. Go to AWS Console -> IAM service -> Policies
2. Create `policy1` with json content of the [policy1](https://raw.githubusercontent.com/atlassian-labs/data-center-terraform/main/permissions/policy1.json) file
{{% warning %}}
@@ -116,7 +124,7 @@ Below process describes how to install low-tier Jira Service Management DC with
-v "/$PWD/dcapt-small.tfvars:/data-center-terraform/conf.tfvars" \
-v "/$PWD/dcapt-snapshots.json:/data-center-terraform/dcapt-snapshots.json" \
-v "/$PWD/logs:/data-center-terraform/logs" \
- -it atlassianlabs/terraform:2.9.2 ./install.sh -c conf.tfvars
+ -it atlassianlabs/terraform:2.9.3 ./install.sh -c conf.tfvars
```
8. Copy product URL from the console output. Product url should look like `http://a1234-54321.us-east-2.elb.amazonaws.com/jira`.
@@ -394,12 +402,20 @@ Below process describes how to install enterprise-scale Jira Service Management
{{% warning %}}
Do not use `root` user credentials for cluster creation.
- Use the following policies to restrict permissions: [policy1](https://raw.githubusercontent.com/atlassian-labs/data-center-terraform/main/permissions/policy1.json) and [policy2](https://raw.githubusercontent.com/atlassian-labs/data-center-terraform/main/permissions/policy2.json).
+ **Option 1** (simple): create admin user with `AdministratorAccess` permissions.
+
+ **Option 2** (complex): create granular permission policies with [policy1](https://raw.githubusercontent.com/atlassian-labs/data-center-terraform/main/permissions/policy1.json) and [policy2](https://raw.githubusercontent.com/atlassian-labs/data-center-terraform/main/permissions/policy2.json).
The specific configuration relies on how you manage permissions within AWS.
{{% /warning %}}
- **Example** of Policies and User creation:
+ **Example Option 1** with Admin user:
+ 1. Go to AWS Console -> IAM service -> Users
+ 2. Create new user -> attach policies directly -> `AdministratorAccess`
+ 3. Open newly created user -> Security credentials tab -> Access keys -> Create access key -> Command Line Interface (CLI) -> Create access key
+ 4. Use `Access key` and `Secret access key` in [aws_envs](https://github.com/atlassian/dc-app-performance-toolkit/blob/master/app/util/k8s/aws_envs) file
+
+ **Example Option 2** with granular Policies:
1. Go to AWS Console -> IAM service -> Policies
2. Create `policy1` with json content of the [policy1](https://raw.githubusercontent.com/atlassian-labs/data-center-terraform/main/permissions/policy1.json) file
{{% warning %}}
@@ -443,7 +459,7 @@ Below process describes how to install enterprise-scale Jira Service Management
-v "/$PWD/dcapt.tfvars:/data-center-terraform/conf.tfvars" \
-v "/$PWD/dcapt-snapshots.json:/data-center-terraform/dcapt-snapshots.json" \
-v "/$PWD/logs:/data-center-terraform/logs" \
- -it atlassianlabs/terraform:2.9.2 ./install.sh -c conf.tfvars
+ -it atlassianlabs/terraform:2.9.3 ./install.sh -c conf.tfvars
```
8. Copy product URL from the console output. Product url should look like `http://a1234-54321.us-east-2.elb.amazonaws.com/jira`.
@@ -523,7 +539,7 @@ To receive performance baseline results **without** an app installed:
-e ENVIRONMENT_NAME=$ENVIRONMENT_NAME \
-v "/$PWD:/data-center-terraform/dc-app-performance-toolkit" \
-v "/$PWD/app/util/k8s/bzt_on_pod.sh:/data-center-terraform/bzt_on_pod.sh" \
- -it atlassianlabs/terraform:2.9.2 bash bzt_on_pod.sh jsm.yml
+ -it atlassianlabs/terraform:2.9.3 bash bzt_on_pod.sh jsm.yml
```
1. View the following main results of the run in the `dc-app-performance-toolkit/app/results/jsm/YY-MM-DD-hh-mm-ss` folder:
@@ -578,7 +594,7 @@ Re-index information window is displayed on the **Indexing page**. If the window
-e ENVIRONMENT_NAME=$ENVIRONMENT_NAME \
-v "/$PWD:/data-center-terraform/dc-app-performance-toolkit" \
-v "/$PWD/app/util/k8s/bzt_on_pod.sh:/data-center-terraform/bzt_on_pod.sh" \
- -it atlassianlabs/terraform:2.9.2 bash bzt_on_pod.sh jsm.yml
+ -it atlassianlabs/terraform:2.9.3 bash bzt_on_pod.sh jsm.yml
```
{{% note %}}
@@ -638,7 +654,7 @@ To receive scalability benchmark results for one-node Jira Service Management DC
-e ENVIRONMENT_NAME=$ENVIRONMENT_NAME \
-v "/$PWD:/data-center-terraform/dc-app-performance-toolkit" \
-v "/$PWD/app/util/k8s/bzt_on_pod.sh:/data-center-terraform/bzt_on_pod.sh" \
- -it atlassianlabs/terraform:2.9.2 bash bzt_on_pod.sh jsm.yml
+ -it atlassianlabs/terraform:2.9.3 bash bzt_on_pod.sh jsm.yml
```
{{% note %}}
@@ -663,7 +679,7 @@ To receive scalability benchmark results for two-node Jira Service Management DC
-v "/$PWD/dcapt.tfvars:/data-center-terraform/conf.tfvars" \
-v "/$PWD/dcapt-snapshots.json:/data-center-terraform/dcapt-snapshots.json" \
-v "/$PWD/logs:/data-center-terraform/logs" \
- -it atlassianlabs/terraform:2.9.2 ./install.sh -c conf.tfvars
+ -it atlassianlabs/terraform:2.9.3 ./install.sh -c conf.tfvars
```
1. Navigate to `dc-app-performance-toolkit` folder and start tests execution:
``` bash
@@ -676,7 +692,7 @@ To receive scalability benchmark results for two-node Jira Service Management DC
-e ENVIRONMENT_NAME=$ENVIRONMENT_NAME \
-v "/$PWD:/data-center-terraform/dc-app-performance-toolkit" \
-v "/$PWD/app/util/k8s/bzt_on_pod.sh:/data-center-terraform/bzt_on_pod.sh" \
- -it atlassianlabs/terraform:2.9.2 bash bzt_on_pod.sh jsm.yml
+ -it atlassianlabs/terraform:2.9.3 bash bzt_on_pod.sh jsm.yml
```
{{% note %}}
@@ -705,7 +721,7 @@ To receive scalability benchmark results for four-node Jira Service Management D
-e ENVIRONMENT_NAME=$ENVIRONMENT_NAME \
-v "/$PWD:/data-center-terraform/dc-app-performance-toolkit" \
-v "/$PWD/app/util/k8s/bzt_on_pod.sh:/data-center-terraform/bzt_on_pod.sh" \
- -it atlassianlabs/terraform:2.9.2 bash bzt_on_pod.sh jsm.yml
+ -it atlassianlabs/terraform:2.9.3 bash bzt_on_pod.sh jsm.yml
```
{{% note %}}
diff --git a/requirements.txt b/requirements.txt
index 489de6000..6b91e32c0 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -1,11 +1,11 @@
matplotlib==3.9.2
-pandas==2.2.2
-pytest==8.3.2
-locust==2.31.5
-selenium==4.24.0
-filelock==3.16.0
+pandas==2.2.3
+pytest==8.3.3
+locust==2.32.0
+selenium==4.25.0
+filelock==3.16.1
packaging==24.1
prettytable==3.11.0
bzt==1.16.32 # bzt 1.16.34 has pinned setuptools==65.5.0, which does not have distutils
-boto3==1.35.14
+boto3==1.35.47
retry==0.9.2