Add the "--dashboard" option to create a dashboard

This commit is contained in:
Michael Mintz 2020-12-18 01:24:56 -05:00
parent dc20567e6e
commit ac28e5abec
6 changed files with 413 additions and 34 deletions

View File

@ -69,6 +69,8 @@ except (ImportError, ValueError):
sb.demo_mode = False
sb.time_limit = None
sb.demo_sleep = 1
sb.dashboard = False
sb._dash_initialized = False
sb.message_duration = 2
sb.block_images = False
sb.remote_debug = False

View File

@ -27,14 +27,8 @@ def log_screenshot(test_logpath, driver, screenshot=None, get=False):
print("WARNING: Unable to get screenshot for failure logs!")
def log_test_failure_data(test, test_logpath, driver, browser, url=None):
basic_info_name = settings.BASIC_INFO_NAME
basic_file_path = "%s/%s" % (test_logpath, basic_info_name)
log_file = codecs.open(basic_file_path, "w+", "utf-8")
if url:
last_page = url
else:
last_page = get_last_page(driver)
def get_master_time():
""" Returns (timestamp, the_date, the_time) """
timestamp = str(int(time.time())) + " (Unix Timestamp)"
now = datetime.datetime.now()
utc_offset = -time.timezone / 3600.0
@ -63,6 +57,18 @@ def log_test_failure_data(test, test_logpath, driver, browser, url=None):
the_time = now.strftime("%I:%M:%S %p ") + time_zone
if the_time.startswith("0"):
the_time = the_time[1:]
return timestamp, the_date, the_time
def log_test_failure_data(test, test_logpath, driver, browser, url=None):
basic_info_name = settings.BASIC_INFO_NAME
basic_file_path = "%s/%s" % (test_logpath, basic_info_name)
log_file = codecs.open(basic_file_path, "w+", "utf-8")
if url:
last_page = url
else:
last_page = get_last_page(driver)
timestamp, the_date, the_time = get_master_time()
test_id = get_test_id(test)
data_to_save = []
data_to_save.append("%s" % test_id)

View File

@ -3261,6 +3261,9 @@ class BaseCase(unittest.TestCase):
def skip(self, reason=""):
""" Mark the test as Skipped. """
if self.dashboard:
test_id = self.__get_test_id_2()
sb_config._results[test_id] = "Skipped"
self.skipTest(reason)
############
@ -4036,11 +4039,18 @@ class BaseCase(unittest.TestCase):
},
plotOptions: {
pie: {
size: "95%",
allowPointSelect: true,
animation: false,
cursor: 'pointer',
dataLabels: {
enabled: false,
format: '{point.name}: {point.y:.1f}%'
// enabled: false,
// format: '{point.name}: {point.y:.0f}',
formatter: function() {
if (this.y > 0) {
return this.point.name + ': ' + this.point.y
}
}
},
states: {
hover: {
@ -4069,7 +4079,10 @@ class BaseCase(unittest.TestCase):
plotOptions: {
series: {
showInLegend: true,
animation: true,
animation: false,
dataLabels: {
enabled: true
},
shadow: false,
lineWidth: 3,
fillOpacity: 0.5,
@ -4158,13 +4171,15 @@ class BaseCase(unittest.TestCase):
if self._chart_first_series[chart_name]:
self._chart_label[chart_name].append(label)
def save_chart(self, chart_name=None, filename=None):
def save_chart(self, chart_name=None, filename=None, folder=None):
""" Saves a SeleniumBase-generated chart to a file for later use.
@Params
chart_name - If creating multiple charts at the same time,
use this to select the one you wish to use.
filename - The name of the HTML file that you wish to
save the chart to. (filename must end in ".html")
folder - The name of the folder where you wish to
save the HTML file. (Default: "./saved_charts/")
"""
if not chart_name:
chart_name = "default"
@ -4199,7 +4214,10 @@ class BaseCase(unittest.TestCase):
axis += "'%s'," % label
axis += "], crosshair: false},"
the_html = the_html.replace("xAxis: { },", axis)
saved_charts_folder = constants.Charts.SAVED_FOLDER
if not folder:
saved_charts_folder = constants.Charts.SAVED_FOLDER
else:
saved_charts_folder = folder
if saved_charts_folder.endswith("/"):
saved_charts_folder = saved_charts_folder[:-1]
if not os.path.exists(saved_charts_folder):
@ -6268,6 +6286,8 @@ class BaseCase(unittest.TestCase):
self.guest_mode = sb_config.guest_mode
self.devtools = sb_config.devtools
self.remote_debug = sb_config.remote_debug
self.dashboard = sb_config.dashboard
self._dash_initialized = sb_config._dashboard_initialized
self.swiftshader = sb_config.swiftshader
self.user_data_dir = sb_config.user_data_dir
self.extension_zip = sb_config.extension_zip
@ -6385,6 +6405,16 @@ class BaseCase(unittest.TestCase):
"AppleWebKit/537.36 (KHTML, like Gecko) "
"Chrome/76.0.3809.132 Mobile Safari/537.36")
# Dashboard pre-processing:
if self.dashboard:
sb_config._sbase_detected = True
sb_config._only_unittest = False
if not self._dash_initialized:
sb_config._dashboard_initialized = True
sb_config._sbase_detected = True
self._dash_initialized = True
self.__process_dashboard(False, init=True)
has_url = False
if self._reuse_session:
if not hasattr(sb_config, 'shared_driver'):
@ -6598,6 +6628,36 @@ class BaseCase(unittest.TestCase):
test_id = self._sb_test_identifier
return test_id
def __get_test_id_2(self):
""" The id for SeleniumBase Dashboard entries. """
test_id = "%s.%s.%s" % (self.__class__.__module__.split('.')[-1],
self.__class__.__name__,
self._testMethodName)
if self._sb_test_identifier and len(str(self._sb_test_identifier)) > 6:
test_id = self._sb_test_identifier
if test_id.count('.') > 1:
test_id = '.'.join(test_id.split('.')[1:])
return test_id
def __get_display_id(self):
test_id = "%s.py::%s::%s" % (
self.__class__.__module__.replace('.', '/'),
self.__class__.__name__,
self._testMethodName)
if self._sb_test_identifier and len(str(self._sb_test_identifier)) > 6:
test_id = self._sb_test_identifier
if hasattr(self, "_using_sb_fixture_class"):
if test_id.count('.') >= 2:
parts = test_id.split('.')
full = parts[-3] + '.py::' + parts[-2] + '::' + parts[-1]
test_id = full
elif hasattr(self, "_using_sb_fixture_no_class"):
if test_id.count('.') >= 1:
parts = test_id.split('.')
full = parts[-2] + '.py::' + parts[-1]
test_id = full
return test_id
def __create_log_path_as_needed(self, test_logpath):
if not os.path.exists(test_logpath):
try:
@ -6605,6 +6665,182 @@ class BaseCase(unittest.TestCase):
except Exception:
pass # Only reachable during multi-threaded runs
def __process_dashboard(self, has_exception, init=False):
''' SeleniumBase Dashboard Processing '''
if len(sb_config._extra_dash_entries) > 1:
# First take care of existing entries from non-SeleniumBase tests
for test_id in sb_config._extra_dash_entries:
if test_id in sb_config._results.keys():
if sb_config._results[test_id] == "Skipped":
sb_config.item_count_skipped += 1
sb_config.item_count_untested -= 1
elif sb_config._results[test_id] == "Failed":
sb_config.item_count_failed += 1
sb_config.item_count_untested -= 1
elif sb_config._results[test_id] == "Passed":
sb_config.item_count_passed += 1
sb_config.item_count_untested -= 1
else: # Mark "Skipped" if unknown
sb_config.item_count_skipped += 1
sb_config.item_count_untested -= 1
sb_config._extra_dash_entries = [] # Reset the list to empty
# Process new entries
test_id = self.__get_test_id_2()
dud = "seleniumbase/plugins/pytest_plugin.py::BaseClass::base_method"
if not init:
duration_ms = int(time.time() * 1000) - sb_config.start_time_ms
duration = float(duration_ms) / 1000.0
sb_config._duration[test_id] = duration
if test_id not in sb_config._display_id.keys():
sb_config._display_id[test_id] = self.__get_display_id()
if sb_config._display_id[test_id] == dud:
return
if hasattr(self, "_using_sb_fixture") and (
test_id not in sb_config._results.keys()):
cwd = os.getcwd()
if '\\' in cwd:
cwd = cwd.split('\\')[-1]
else:
cwd = cwd.split('/')[-1]
if test_id.count('.') > 1:
alt_test_id = '.'.join(test_id.split('.')[1:])
if alt_test_id in sb_config._results.keys():
sb_config._results.pop(alt_test_id)
if test_id in sb_config._results.keys() and (
sb_config._results[test_id] == "Skipped"):
sb_config.item_count_skipped += 1
sb_config.item_count_untested -= 1
sb_config._results[test_id] = "Skipped"
elif has_exception:
sb_config._results[test_id] = "Failed"
sb_config.item_count_failed += 1
sb_config.item_count_untested -= 1
else:
sb_config._results[test_id] = "Passed"
sb_config.item_count_passed += 1
sb_config.item_count_untested -= 1
num_passed = sb_config.item_count_passed
num_failed = sb_config.item_count_failed
num_skipped = sb_config.item_count_skipped
num_untested = sb_config.item_count_untested
self.create_pie_chart(title=constants.Dashboard.TITLE)
self.add_data_point("Passed", num_passed, color="#84d474")
self.add_data_point("Untested", num_untested, color="#eaeaea")
self.add_data_point("Skipped", num_skipped, color="#efd8b4")
self.add_data_point("Failed", num_failed, color="#f17476")
style = (
'<link rel="stylesheet" '
'href="%s">' % constants.Dashboard.STYLE_CSS)
auto_refresh_html = ''
if num_untested > 0:
# Refresh every X seconds when waiting for more test results
auto_refresh_html = constants.Dashboard.META_REFRESH_HTML
head = (
'<head><meta charset="utf-8" />'
'<meta property="og:image" '
'content="https://seleniumbase.io/img/dash_pie.png">'
'<link rel="shortcut icon" '
'href="https://seleniumbase.io/img/dash_pie_2.png">'
'%s'
'<title>Dashboard</title>'
'%s</head>' % (auto_refresh_html, style))
table_html = (
'<div></div>'
'<table border="1px solid #e6e6e6;" width="100%;" padding: 5px;'
' font-size="12px;" text-align="left;" id="results-table">'
'<thead id="results-table-head"><tr>'
'<th col="result">Result</th><th col="name">Test</th>'
'<th col="duration">Duration</th><th col="links">Links</th>'
'</tr></thead>')
the_failed = []
the_skipped = []
the_passed = []
the_untested = []
for key in sb_config._results.keys():
t_res = sb_config._results[key]
t_dur = sb_config._duration[key]
t_d_id = sb_config._display_id[key]
res_low = t_res.lower()
if sb_config._results[key] == "Failed":
the_failed.append([res_low, t_res, t_d_id, t_dur])
if sb_config._results[key] == "Skipped":
the_skipped.append([res_low, t_res, t_d_id, t_dur])
if sb_config._results[key] == "Passed":
the_passed.append([res_low, t_res, t_d_id, t_dur])
if sb_config._results[key] == "Untested":
the_untested.append([res_low, t_res, t_d_id, t_dur])
for row in the_failed:
row = (
'<tbody class="%s results-table-row"><tr>'
'<td class="col-result">%s</td><td>%s</td><td>%s</td>'
'<td><a href="latest_logs/">latest_logs/</a></td>'
'</tr></tbody>' % (row[0], row[1], row[2], row[3]))
table_html += row
for row in the_skipped:
row = (
'<tbody class="%s results-table-row"><tr>'
'<td class="col-result">%s</td><td>%s</td><td>%s</td>'
'<td></td></tr></tbody>' % (row[0], row[1], row[2], row[3]))
table_html += row
for row in the_passed:
row = (
'<tbody class="%s results-table-row"><tr>'
'<td class="col-result">%s</td><td>%s</td><td>%s</td>'
'<td></td></tr></tbody>' % (row[0], row[1], row[2], row[3]))
table_html += row
for row in the_untested:
row = (
'<tbody class="%s results-table-row"><tr>'
'<td class="col-result">%s</td><td>%s</td><td>%s</td>'
'<td></td></tr></tbody>' % (row[0], row[1], row[2], row[3]))
table_html += row
table_html += "</table>"
add_more = "<br /><b>Last updated:</b> "
timestamp, the_date, the_time = log_helper.get_master_time()
last_updated = "%s at %s" % (the_date, the_time)
add_more = add_more + "%s" % last_updated
status = "<p></p><div><b>Status:</b> Awaiting results..."
status += " (Refresh the page for updates)"
if num_untested == 0:
status = "<p></p><div><b>Status:</b> Test Run Complete:"
if num_failed == 0:
if num_passed > 0:
if num_skipped == 0:
status += " <b>Success!</b> (All tests passed)"
else:
status += " <b>Success!</b> (No failing tests)"
else:
status += " All tests were skipped!"
else:
latest_logs_dir = "latest_logs/"
log_msg = "See latest logs for details"
if num_failed == 1:
status += (
' <b>1 test failed!</b> (<a href="%s">%s</a>)'
'' % (latest_logs_dir, log_msg))
else:
status += (
' <b>%s tests failed!</b> (<a href="%s">%s</a>)'
'' % (num_failed, latest_logs_dir, log_msg))
status += "</div><p></p>"
add_more = add_more + status
gen_by = (
'<p><div>Generated by: <b><a href="https://seleniumbase.io/">'
'SeleniumBase</a></b></div></p><p></p>')
add_more = add_more + gen_by
# Have dashboard auto-refresh on updates when using an http server
refresh_line = (
'<script type="text/javascript" src="%s">'
'</script>' % constants.Dashboard.LIVE_JS)
add_more = add_more + refresh_line
the_html = head + self.extract_chart() + table_html + add_more
abs_path = os.path.abspath('.')
file_path = os.path.join(abs_path, "dashboard.html")
out_file = codecs.open(file_path, "w+", encoding="utf-8")
out_file.writelines(the_html)
out_file.close()
time.sleep(0.05) # Add time for dashboard server to process updates
def has_exception(self):
""" (This method should ONLY be used in custom tearDown() methods.)
This method returns True if the test failed or raised an exception.
@ -6691,6 +6927,7 @@ class BaseCase(unittest.TestCase):
# Save a screenshot if logging is on when an exception occurs
if has_exception:
self.__add_pytest_html_extra()
sb_config._has_exception = True
if self.with_testing_base and not has_exception and (
self.save_screenshot_after_test):
test_logpath = self.log_path + "/" + test_id
@ -6742,6 +6979,8 @@ class BaseCase(unittest.TestCase):
log_helper.log_page_source(
test_logpath, self.driver,
self.__last_page_source)
if self.dashboard:
self.__process_dashboard(has_exception)
# (Pytest) Finally close all open browser windows
self.__quit_all_drivers()
if self.headless:

View File

@ -27,6 +27,14 @@ class Charts:
SAVED_FOLDER = "saved_charts"
class Dashboard:
TITLE = "SeleniumBase Test Results Dashboard"
STYLE_CSS = 'https://seleniumbase.io/cdn/css/pytest_style.css'
META_REFRESH_HTML = '<meta http-equiv="refresh" content="10">'
# LIVE_JS = 'https://livejs.com/live.js#html'
LIVE_JS = 'https://seleniumbase.io/cdn/js/live.js#html'
class SavedCookies:
STORAGE_FOLDER = "saved_cookies"

View File

@ -2,8 +2,11 @@
""" This is the pytest configuration file """
import colorama
import os
import pytest
import re
import sys
import time
from seleniumbase import config as sb_config
from seleniumbase.core import log_helper
from seleniumbase.core import proxy_helper
@ -423,6 +426,11 @@ def pytest_addoption(parser):
To access the remote debugging interface, go to:
http://localhost:9222 while Chromedriver is running.
Info: chromedevtools.github.io/devtools-protocol/""")
parser.addoption('--dashboard',
action="store_true",
dest='dashboard',
default=False,
help="""...""")
parser.addoption('--swiftshader',
action="store_true",
dest='swiftshader',
@ -492,9 +500,22 @@ def pytest_addoption(parser):
"\n It's not thread-safe for WebDriver processes! "
"\n Use --time-limit=s from SeleniumBase instead!\n")
if "--dashboard" in sys.argv:
arg_join = " ".join(sys.argv)
if ("-n" in sys.argv) or ("-n=" in arg_join):
raise Exception(
"\n\n Multi-threading is not yet supported using --dashboard"
"\n (You can speed up tests using --reuse-session / --rs)\n")
def pytest_configure(config):
""" This runs after command line options have been parsed """
sb_config.item_count = 0
sb_config.item_count_passed = 0
sb_config.item_count_failed = 0
sb_config.item_count_skipped = 0
sb_config.item_count_untested = 0
sb_config.item_count_finalized = False
sb_config.is_pytest = True
sb_config.browser = config.getoption('browser')
sb_config.data = config.getoption('data')
@ -549,6 +570,7 @@ def pytest_configure(config):
sb_config.no_sandbox = config.getoption('no_sandbox')
sb_config.disable_gpu = config.getoption('disable_gpu')
sb_config.remote_debug = config.getoption('remote_debug')
sb_config.dashboard = config.getoption('dashboard')
sb_config.swiftshader = config.getoption('swiftshader')
sb_config.incognito = config.getoption('incognito')
sb_config.guest_mode = config.getoption('guest_mode')
@ -562,12 +584,20 @@ def pytest_configure(config):
sb_config.timeout_multiplier = config.getoption('timeout_multiplier')
sb_config.pytest_html_report = config.getoption('htmlpath') # --html=FILE
sb_config._sb_node = {} # sb node dictionary (Used with the sb fixture)
# Dashboard-specific variables
sb_config._results = {} # SBase Dashboard test results
sb_config._duration = {} # SBase Dashboard test duration
sb_config._display_id = {} # SBase Dashboard display ID
sb_config._dashboard_initialized = False # Becomes True after init
sb_config._has_exception = False # This becomes True if any test fails
sb_config._multithreaded = False # This becomes True if multithreading
sb_config._only_unittest = True # If any test uses BaseCase, becomes False
sb_config._sbase_detected = False # Becomes True during SeleniumBase tests
sb_config._extra_dash_entries = [] # Dashboard entries for non-SBase tests
if sb_config.reuse_session:
arg_join = " ".join(sys.argv)
if ("-n" in sys.argv) or ("-n=" in arg_join) or (arg_join == "-c"):
# sb_config.reuse_session = False
pass # Allow multithreaded browser sessions to be reused now
arg_join = " ".join(sys.argv)
if ("-n" in sys.argv) or ("-n=" in arg_join):
sb_config._multithreaded = True
if "linux" in sys.platform and (
not sb_config.headed and not sb_config.headless):
@ -583,25 +613,51 @@ def pytest_configure(config):
proxy_helper.remove_proxy_zip_if_present()
def pytest_unconfigure():
""" This runs after all tests have completed with pytest. """
proxy_helper.remove_proxy_zip_if_present()
if sb_config.reuse_session:
# Close the shared browser session
if sb_config.shared_driver:
try:
sb_config.shared_driver.quit()
except AttributeError:
pass
except Exception:
pass
sb_config.shared_driver = None
log_helper.archive_logs_if_set(sb_config.log_path, sb_config.archive_logs)
def pytest_sessionstart(session):
pass
def _get_test_ids_(the_item):
test_id = the_item.nodeid.split('/')[-1]
if not test_id:
test_id = "unidentified_TestCase"
test_id = test_id.replace(' ', '_')
if '[' in test_id:
test_id_intro = test_id.split('[')[0]
parameter = test_id.split('[')[1]
parameter = re.sub(re.compile(r'\W'), '', parameter)
test_id = test_id_intro + "__" + parameter
display_id = test_id
test_id = test_id.replace('/', '.').replace('\\', '.')
test_id = test_id.replace('::', '.').replace('.py', '')
return test_id, display_id
def pytest_itemcollected(item):
if sb_config.dashboard:
sb_config.item_count += 1
test_id, display_id = _get_test_ids_(item)
sb_config._results[test_id] = "Untested"
sb_config._duration[test_id] = "-"
sb_config._display_id[test_id] = display_id
def pytest_deselected(items):
if sb_config.dashboard:
sb_config.item_count -= len(items)
def pytest_runtest_setup():
""" This runs before every test with pytest """
pass
if sb_config.dashboard:
sb_config._sbase_detected = False
if not sb_config.item_count_finalized:
sb_config.item_count_finalized = True
sb_config.item_count_untested = sb_config.item_count
dashboard_html = "file://" + os.getcwd() + "/dashboard.html"
star_len = len("Dashboard: ") + len(dashboard_html)
stars = "*" * star_len
print("\nDashboard: %s\n%s" % (dashboard_html, stars))
def pytest_runtest_teardown(item):
@ -627,6 +683,61 @@ def pytest_runtest_teardown(item):
pass
def pytest_sessionfinish(session):
pass
def pytest_unconfigure():
""" This runs after all tests have completed with pytest. """
proxy_helper.remove_proxy_zip_if_present()
if sb_config.reuse_session:
# Close the shared browser session
if sb_config.shared_driver:
try:
sb_config.shared_driver.quit()
except AttributeError:
pass
except Exception:
pass
sb_config.shared_driver = None
log_helper.archive_logs_if_set(sb_config.log_path, sb_config.archive_logs)
# Dashboard post-processing: Disable auto-refresh and stamp complete
if sb_config.dashboard and not sb_config._only_unittest:
stamp = "\n<!--Test Run Complete-->"
find_it = constants.Dashboard.META_REFRESH_HTML
swap_with = '' # Stop refreshing the page after the run is done
try:
time.sleep(0.3) # Add time for "livejs" to detect changes
abs_path = os.path.abspath('.')
dashboard_path = os.path.join(abs_path, "dashboard.html")
if os.path.exists(dashboard_path):
with open(dashboard_path, 'r', encoding='utf-8') as f:
the_html = f.read()
# If the test run doesn't complete by itself, stop refresh
the_html = the_html.replace(find_it, swap_with)
the_html += stamp
time.sleep(0.25)
with open(dashboard_path, "w", encoding='utf-8') as f:
f.write(the_html)
except Exception:
pass
def pytest_terminal_summary(terminalreporter):
if sb_config._has_exception and (
sb_config.dashboard and not sb_config._only_unittest):
# Print link a second time because the first one may be off-screen
dashboard_file = "file://" + os.getcwd() + "/dashboard.html"
terminalreporter.write_sep(
"-", "Dashboard: %s" % dashboard_file)
if sb_config._has_exception or sb_config.save_screenshot:
# Log files are generated during test failures and Screenshot Mode
latest_logs_dir = os.getcwd() + "/latest_logs/"
terminalreporter.write_sep(
"-", "Log files: %s" % latest_logs_dir)
@pytest.fixture()
def sb(request):
""" SeleniumBase as a pytest fixture.
@ -650,6 +761,8 @@ def sb(request):
request.cls.sb = BaseClass("base_method")
request.cls.sb.setUp()
request.cls.sb._needs_tearDown = True
request.cls.sb._using_sb_fixture = True
request.cls.sb._using_sb_fixture_class = True
sb_config._sb_node[request.node.nodeid] = request.cls.sb
yield request.cls.sb
if request.cls.sb._needs_tearDown:
@ -659,6 +772,8 @@ def sb(request):
sb = BaseClass("base_method")
sb.setUp()
sb._needs_tearDown = True
sb._using_sb_fixture = True
sb._using_sb_fixture_no_class = True
sb_config._sb_node[request.node.nodeid] = sb
yield sb
if sb._needs_tearDown:
@ -672,6 +787,15 @@ def pytest_runtest_makereport(item, call):
outcome = yield
report = outcome.get_result()
if pytest_html and report.when == 'call':
if sb_config.dashboard and not sb_config._sbase_detected:
test_id, display_id = _get_test_ids_(item)
r_outcome = report.outcome
if len(r_outcome) > 1:
r_outcome = r_outcome[0].upper() + r_outcome[1:]
sb_config._results[test_id] = r_outcome
sb_config._duration[test_id] = "*****"
sb_config._display_id[test_id] = display_id
sb_config._extra_dash_entries.append(test_id)
try:
extra_report = None
if hasattr(item, "_testcase"):
@ -687,7 +811,6 @@ def pytest_runtest_makereport(item, call):
test_id = "unidentified_TestCase"
test_id = test_id.replace(' ', '_')
if '[' in test_id:
import re
test_id_intro = test_id.split('[')[0]
parameter = test_id.split('[')[1]
parameter = re.sub(re.compile(r'\W'), '', parameter)

View File

@ -443,6 +443,7 @@ class SeleniumBrowser(Plugin):
test.test.visual_baseline = self.options.visual_baseline
test.test.timeout_multiplier = self.options.timeout_multiplier
test.test.use_grid = False
test.test.dashboard = False
test.test._reuse_session = False
if test.test.servername != "localhost":
# Use Selenium Grid (Use --server="127.0.0.1" for localhost Grid)