black: re-format
black and isort are enabled in the openbmc-build-scripts on Python files
to have a consistent formatting. Re-run the formatter on the whole
repository.
Signed-off-by: Patrick Williams <patrick@stwcx.xyz>
Change-Id: I944f1915ece753f72a3fa654902d445a9749d0f9
diff --git a/bin/auto_status_file.py b/bin/auto_status_file.py
index d1614f3..c3f2544 100755
--- a/bin/auto_status_file.py
+++ b/bin/auto_status_file.py
@@ -4,86 +4,101 @@
See help text for details.
"""
-import sys
-import subprocess
import re
+import subprocess
+import sys
save_dir_path = sys.path.pop(0)
-modules = ['gen_arg', 'gen_print', 'gen_valid', 'gen_misc', 'gen_cmd', 'var_funcs']
+modules = [
+ "gen_arg",
+ "gen_print",
+ "gen_valid",
+ "gen_misc",
+ "gen_cmd",
+ "var_funcs",
+]
for module in modules:
exec("from " + module + " import *")
sys.path.insert(0, save_dir_path)
parser = argparse.ArgumentParser(
- usage='%(prog)s [OPTIONS]',
+ usage="%(prog)s [OPTIONS]",
description="%(prog)s will create a status file path name adhering to the"
- + " following pattern: <status dir path>/<prefix>.yymmdd."
- + "hhmmss.status. It will then run the command string and"
- + " direct its stdout/stderr to the status file and optionally"
- + " to stdout. This dual output streaming will be"
- + " accomplished using either the \"script\" or the \"tee\""
- + " program. %(prog)s will also set and export environment"
- + " variable \"AUTO_STATUS_FILE_PATH\" for the benefit of"
- + " child programs.",
+ + " following pattern: <status dir path>/<prefix>.yymmdd."
+ + "hhmmss.status. It will then run the command string and"
+ + " direct its stdout/stderr to the status file and optionally"
+ + " to stdout. This dual output streaming will be"
+ + ' accomplished using either the "script" or the "tee"'
+ + " program. %(prog)s will also set and export environment"
+ + ' variable "AUTO_STATUS_FILE_PATH" for the benefit of'
+ + " child programs.",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
- prefix_chars='-+')
+ prefix_chars="-+",
+)
parser.add_argument(
- '--status_dir_path',
- default='',
+ "--status_dir_path",
+ default="",
help="The path to the directory where the status file will be created."
- + "%(default)s The default value is obtained from environment"
- + " variable \"${STATUS_DIR_PATH}\", if set or from \"${HOME}/"
- + "status/\".")
+ + "%(default)s The default value is obtained from environment"
+ + ' variable "${STATUS_DIR_PATH}", if set or from "${HOME}/'
+ + 'status/".',
+)
parser.add_argument(
- '--prefix',
- default='',
+ "--prefix",
+ default="",
help="The prefix for the generated file name.%(default)s The default value"
- + " is the command portion (i.e. the first token) of the command"
- + " string.")
+ + " is the command portion (i.e. the first token) of the command"
+ + " string.",
+)
parser.add_argument(
- '--status_file_name',
- default='',
+ "--status_file_name",
+ default="",
help="This allows the user to explicitly specify the status file name. If"
- + " this argument is not used, %(prog)s composes a status file name."
- + " If this argument is specified, the \"--prefix\" argument is"
- + " ignored.")
+ + " this argument is not used, %(prog)s composes a status file name."
+ + ' If this argument is specified, the "--prefix" argument is'
+ + " ignored.",
+)
parser.add_argument(
- '--stdout',
+ "--stdout",
default=1,
type=int,
choices=[1, 0],
help="Indicates that stdout/stderr from the command string execution"
- + " should be written to stdout as well as to the status file.")
+ + " should be written to stdout as well as to the status file.",
+)
parser.add_argument(
- '--tee',
+ "--tee",
default=1,
type=int,
choices=[1, 0],
- help="Indicates that \"tee\" rather than \"script\" should be used.")
+ help='Indicates that "tee" rather than "script" should be used.',
+)
parser.add_argument(
- '--show_url',
+ "--show_url",
default=0,
type=int,
choices=[1, 0],
help="Indicates that the status file path shown should be shown in the"
- + " form of a url. If the output is to be viewed from a browser,"
- + " this may well become a clickable link. Note that the"
- + " get_file_path_url.py program must be found in the \"PATH\""
- + " environment variable for this argument to be effective.")
+ + " form of a url. If the output is to be viewed from a browser,"
+ + " this may well become a clickable link. Note that the"
+ + ' get_file_path_url.py program must be found in the "PATH"'
+ + " environment variable for this argument to be effective.",
+)
parser.add_argument(
- 'command_string',
- default='',
- nargs='*',
- help="The command string to be run.%(default)s")
+ "command_string",
+ default="",
+ nargs="*",
+ help="The command string to be run.%(default)s",
+)
# Populate stock_list with options we want.
stock_list = [("test_mode", 0), ("quiet", 1), ("debug", 0)]
@@ -103,9 +118,9 @@
valid_value(command_string)
if status_dir_path == "":
- status_dir_path = \
- os.environ.get("STATUS_DIR_PATH",
- os.environ.get("HOME") + "/status/")
+ status_dir_path = os.environ.get(
+ "STATUS_DIR_PATH", os.environ.get("HOME") + "/status/"
+ )
status_dir_path = add_trailing_slash(status_dir_path)
set_pgm_arg(status_dir_path)
valid_dir_path(status_dir_path)
@@ -126,9 +141,9 @@
status_file_path = status_dir_path + status_file_name
# Set environment variable for the benefit of child programs.
- os.environ['AUTO_STATUS_FILE_PATH'] = status_file_path
+ os.environ["AUTO_STATUS_FILE_PATH"] = status_file_path
# Set deprecated but still used AUTOSCRIPT_STATUS_FILE_PATH value.
- os.environ['AUTOSCRIPT_STATUS_FILE_PATH'] = status_file_path
+ os.environ["AUTOSCRIPT_STATUS_FILE_PATH"] = status_file_path
def script_func(command_string, status_file_path):
@@ -141,9 +156,15 @@
status_file_path The path to the status file which is to contain a copy of all stdout.
"""
- cmd_buf = "script -a -q -f " + status_file_path + " -c '" \
- + escape_bash_quotes(command_string) + " ; printf \"\\n" \
- + sprint_varx(ret_code_str, "${?}").rstrip("\n") + "\\n\"'"
+ cmd_buf = (
+ "script -a -q -f "
+ + status_file_path
+ + " -c '"
+ + escape_bash_quotes(command_string)
+ + ' ; printf "\\n'
+ + sprint_varx(ret_code_str, "${?}").rstrip("\n")
+ + "\\n\"'"
+ )
qprint_issuing(cmd_buf)
sub_proc = subprocess.Popen(cmd_buf, shell=True)
sub_proc.communicate()
@@ -152,8 +173,13 @@
# Retrieve return code by examining ret_code_str output statement from status file.
# Example text to be analyzed.
# auto_status_file_ret_code: 127
- cmd_buf = "tail -n 10 " + status_file_path + " | egrep -a \"" \
- + ret_code_str + ":[ ]+\""
+ cmd_buf = (
+ "tail -n 10 "
+ + status_file_path
+ + ' | egrep -a "'
+ + ret_code_str
+ + ':[ ]+"'
+ )
rc, output = shell_cmd(cmd_buf)
key, value = parse_key_value(output)
shell_rc = int(value)
@@ -170,8 +196,12 @@
status_file_path The path to the status file which is to contain a copy of all stdout.
"""
- cmd_buf = "set -o pipefail ; " + command_string + " 2>&1 | tee -a " \
+ cmd_buf = (
+ "set -o pipefail ; "
+ + command_string
+ + " 2>&1 | tee -a "
+ status_file_path
+ )
qprint_issuing(cmd_buf)
sub_proc = subprocess.Popen(cmd_buf, shell=True)
sub_proc.communicate()
@@ -187,10 +217,11 @@
def main():
-
gen_setup()
- set_term_options(term_requests={'pgm_names': [command_string.split(" ")[0]]})
+ set_term_options(
+ term_requests={"pgm_names": [command_string.split(" ")[0]]}
+ )
global ret_code_str
ret_code_str = re.sub("\\.py$", "", pgm_name) + "_ret_code"
@@ -202,8 +233,9 @@
show_url = 0
set_pgm_arg(show_url)
else:
- shell_rc, status_file_url = shell_cmd("get_file_path_url.py "
- + status_file_path)
+ shell_rc, status_file_url = shell_cmd(
+ "get_file_path_url.py " + status_file_path
+ )
status_file_url = status_file_url.rstrip("\n")
# Print status file path/url to stdout and to status file.
diff --git a/bin/event_notification_util.py b/bin/event_notification_util.py
index 380a120..9c52176 100755
--- a/bin/event_notification_util.py
+++ b/bin/event_notification_util.py
@@ -8,39 +8,41 @@
save_dir_path = sys.path.pop(0)
-modules = ['gen_arg', 'gen_print', 'gen_valid', 'event_notification']
+modules = ["gen_arg", "gen_print", "gen_valid", "event_notification"]
for module in modules:
exec("from " + module + " import *")
sys.path.insert(0, save_dir_path)
parser = argparse.ArgumentParser(
- usage='%(prog)s [OPTIONS]',
+ usage="%(prog)s [OPTIONS]",
description="%(prog)s will subscribe and receive event notifications when "
- + "properties change for the given dbus path.",
+ + "properties change for the given dbus path.",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
- prefix_chars='-+')
+ prefix_chars="-+",
+)
parser.add_argument(
- '--host',
- default='',
- help='The host name or IP of the system to subscribe to.')
+ "--host",
+ default="",
+ help="The host name or IP of the system to subscribe to.",
+)
parser.add_argument(
- '--username',
- default='root',
- help='The username for the host system.')
+ "--username", default="root", help="The username for the host system."
+)
parser.add_argument(
- '--password',
- default='',
- help='The password for the host system.')
+ "--password", default="", help="The password for the host system."
+)
parser.add_argument(
- '--dbus_path',
- default='',
- help='The path to be monitored (e.g. "/xyz/openbmc_project/sensors").')
+ "--dbus_path",
+ default="",
+ help='The path to be monitored (e.g. "/xyz/openbmc_project/sensors").',
+)
parser.add_argument(
- '--enable_trace',
+ "--enable_trace",
choices=[0, 1],
default=0,
- help='Indicates that trace needs to be enabled.')
+ help="Indicates that trace needs to be enabled.",
+)
# Populate stock_list with options we want.
diff --git a/bin/generate_inventory b/bin/generate_inventory
index 0d90343..c6a7d57 100755
--- a/bin/generate_inventory
+++ b/bin/generate_inventory
@@ -3,56 +3,67 @@
Generate an inventory variable file containing a list of properties
fields from the YAML phosphor-dbus-interfaces repository.
"""
-import sys
-import os
-import yaml
import json
+import os
+import sys
+
+import yaml
lib_path = sys.path[0] + "/../lib"
sys.path.insert(0, lib_path)
-from gen_print import * # NOQA
+from gen_print import * # NOQA
# This list will be longer when more development codes are available.
-inventory_items = ['fru', 'core', 'fan', 'fan_wc', 'gpu']
+inventory_items = ["fru", "core", "fan", "fan_wc", "gpu"]
print_var(inventory_items)
-fru_inventory_file_path = 'inventory.py'
+fru_inventory_file_path = "inventory.py"
print_var(fru_inventory_file_path)
# Properties inventory list
yaml_inventory_list = []
# Clone the phosphor-dbus-interfaces repository
-cmd_buf = 'git clone https://github.com/openbmc/phosphor-dbus-interfaces'
+cmd_buf = "git clone https://github.com/openbmc/phosphor-dbus-interfaces"
os.system(cmd_buf)
-repo_subdir_path = '/phosphor-dbus-interfaces/xyz/openbmc_project/'
+repo_subdir_path = "/phosphor-dbus-interfaces/xyz/openbmc_project/"
base_dir_path = os.getcwd() + repo_subdir_path
# yaml file paths for FRU
-yaml_fru_list = ['Inventory/Item.interface.yaml',
- 'Inventory/Decorator/Asset.interface.yaml',
- 'Inventory/Decorator/Revision.interface.yaml',
- 'Inventory/Decorator/Replaceable.interface.yaml',
- 'Inventory/Decorator/Cacheable.interface.yaml',
- 'State/Decorator/OperationalStatus.interface.yaml', ]
+yaml_fru_list = [
+ "Inventory/Item.interface.yaml",
+ "Inventory/Decorator/Asset.interface.yaml",
+ "Inventory/Decorator/Revision.interface.yaml",
+ "Inventory/Decorator/Replaceable.interface.yaml",
+ "Inventory/Decorator/Cacheable.interface.yaml",
+ "State/Decorator/OperationalStatus.interface.yaml",
+]
# yaml file paths for CORE.
-yaml_core_list = ['Inventory/Item.interface.yaml',
- 'State/Decorator/OperationalStatus.interface.yaml', ]
+yaml_core_list = [
+ "Inventory/Item.interface.yaml",
+ "State/Decorator/OperationalStatus.interface.yaml",
+]
# yaml file paths for fan.
-yaml_fan_list = ['Inventory/Item.interface.yaml',
- 'Inventory/Decorator/MeetsMinimumShipLevel.interface.yaml',
- 'State/Decorator/OperationalStatus.interface.yaml', ]
+yaml_fan_list = [
+ "Inventory/Item.interface.yaml",
+ "Inventory/Decorator/MeetsMinimumShipLevel.interface.yaml",
+ "State/Decorator/OperationalStatus.interface.yaml",
+]
# yaml file paths for fan_wc (fans in water-cooled system).
-yaml_fan_wc_list = ['Inventory/Item.interface.yaml',
- 'Inventory/Decorator/MeetsMinimumShipLevel.interface.yaml', ]
+yaml_fan_wc_list = [
+ "Inventory/Item.interface.yaml",
+ "Inventory/Decorator/MeetsMinimumShipLevel.interface.yaml",
+]
# yaml file paths for GPU.
-yaml_gpu_list = ['Inventory/Item.interface.yaml',
- 'Inventory/Decorator/Replaceable.interface.yaml',
- 'State/Decorator/OperationalStatus.interface.yaml', ]
+yaml_gpu_list = [
+ "Inventory/Item.interface.yaml",
+ "Inventory/Decorator/Replaceable.interface.yaml",
+ "State/Decorator/OperationalStatus.interface.yaml",
+]
# Append to inventory list
yaml_inventory_list.append(yaml_fru_list)
@@ -77,16 +88,18 @@
f = open(yaml_file_path)
yaml_data = yaml.safe_load(f)
f.close()
- for item in range(0, len(yaml_data['properties'])):
- tmp_data = yaml_data['properties'][item]['name']
+ for item in range(0, len(yaml_data["properties"])):
+ tmp_data = yaml_data["properties"][item]["name"]
inventory_dict[str(inventory_items[inv_index])].append(tmp_data)
# Pretty print json formatter
-data = json.dumps(inventory_dict,
- indent=4,
- sort_keys=True,
- default=str,
- separators=(',', ':'))
+data = json.dumps(
+ inventory_dict,
+ indent=4,
+ sort_keys=True,
+ default=str,
+ separators=(",", ":"),
+)
# Check if there is mismatch in data vs expect list
if len(inventory_dict) != len(inventory_items):
@@ -98,8 +111,8 @@
# Write dictionary data to inventory file
print("\nGenerated Inventory item json format\n")
print(data)
-out = open(fru_inventory_file_path, 'w')
-out.write('inventory_dict = ')
+out = open(fru_inventory_file_path, "w")
+out.write("inventory_dict = ")
out.write(data)
out.close()
diff --git a/bin/generate_testsuite_info.py b/bin/generate_testsuite_info.py
index 4a2f115..1847308 100755
--- a/bin/generate_testsuite_info.py
+++ b/bin/generate_testsuite_info.py
@@ -5,53 +5,56 @@
Refer to https://robot-framework.readthedocs.io/en/3.0.1/autodoc/robot.parsing.html
"""
-import sys
import os
+import sys
+
from robot.parsing.model import TestData
+
sys.path.append(os.path.join(os.path.dirname(__file__), "../lib"))
-from gen_arg import * # NOQA
-from gen_print import * # NOQA
-from gen_valid import * # NOQA
+from gen_arg import * # NOQA
+from gen_print import * # NOQA
+from gen_valid import * # NOQA
# Set exit_on_error for gen_valid functions.
set_exit_on_error(True)
-valid_options = ['name', 'tags', 'doc', 'all']
+valid_options = ["name", "tags", "doc", "all"]
parser = argparse.ArgumentParser(
- usage='%(prog)s [OPTIONS]',
- description=";%(prog)s will print test suite information to stdout. This \
- information consists of any and/or all of the following: \
- the suite name, test case names, tag names and doc strings. \
- Example for generated test case names \
- tests/test_basic_poweron.robot \
- Verify Front And Rear LED At Standby \
- Power On Test \
- Check For Application Failures \
- Verify Uptime Average Against Threshold \
- Test SSH And IPMI Connections",
+ usage="%(prog)s [OPTIONS]",
+ description=(
+ ";%(prog)s will print test suite information to stdout. This "
+ " information consists of any and/or all of the following: "
+ " the suite name, test case names, tag names and doc"
+ " strings. Example for generated test case names "
+ " tests/test_basic_poweron.robot "
+ " Verify Front And Rear LED At Standby Power On Test"
+ " Check For Application Failures "
+ " Verify Uptime Average Against Threshold Test SSH"
+ " And IPMI Connections"
+ ),
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
- prefix_chars='-+')
+ prefix_chars="-+",
+)
parser.add_argument(
- '--source_path',
- '-s',
- help='The robot test file or directory path.')
+ "--source_path", "-s", help="The robot test file or directory path."
+)
parser.add_argument(
- '--option',
- '-o',
+ "--option",
+ "-o",
default="name",
- help='Test case attribute name. This may be any one of the following:\n'
- + sprint_var(valid_options))
+ help="Test case attribute name. This may be any one of the following:\n"
+ + sprint_var(valid_options),
+)
# Populate stock_list with options we want.
stock_list = [("test_mode", 0), ("quiet", 0), ("debug", 0)]
-def exit_function(signal_number=0,
- frame=None):
+def exit_function(signal_number=0, frame=None):
r"""
Execute whenever the program ends normally or with the signals that we
catch (i.e. TERM, INT).
@@ -64,8 +67,7 @@
qprint_pgm_footer()
-def signal_handler(signal_number,
- frame):
+def signal_handler(signal_number, frame):
r"""
Handle signals. Without a function to catch a SIGTERM or SIGINT, the
program would terminate immediately with return code 143 and without
@@ -107,9 +109,11 @@
if os.path.isfile(source_path):
file_paths = [source_path]
else:
- file_paths = [os.path.join(path, file)
- for (path, dirs, files) in os.walk(source_path)
- for file in files]
+ file_paths = [
+ os.path.join(path, file)
+ for (path, dirs, files) in os.walk(source_path)
+ for file in files
+ ]
for file_path in file_paths:
print(file_path)
@@ -148,7 +152,6 @@
def main():
-
gen_get_options(parser, stock_list)
validate_parms()
diff --git a/bin/obmc_ser_num b/bin/obmc_ser_num
index 4057316..e56458c 100755
--- a/bin/obmc_ser_num
+++ b/bin/obmc_ser_num
@@ -4,16 +4,17 @@
This program will get the system serial number from an OBMC machine and print it to stdout.
"""
-import sys
import os
+import sys
+
import requests
save_path_0 = sys.path[0]
del sys.path[0]
-from gen_arg import * # NOQA
-from gen_print import * # NOQA
-from gen_valid import * # NOQA
+from gen_arg import * # NOQA
+from gen_print import * # NOQA
+from gen_valid import * # NOQA
# Restore sys.path[0].
sys.path.insert(0, save_path_0)
@@ -21,33 +22,35 @@
logging.captureWarnings(True)
parser = argparse.ArgumentParser(
- usage='%(prog)s [OPTIONS]',
+ usage="%(prog)s [OPTIONS]",
description="%(prog)s will get the system serial number from an OBMC"
+ " machine and print it to stdout as follows:\n\n"
+ "mch_ser_num:<ser num>",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
- prefix_chars='-+')
+ prefix_chars="-+",
+)
parser.add_argument(
- '--openbmc_username',
+ "--openbmc_username",
default="root",
- help='The username for communicating with the OpenBMC machine.')
+ help="The username for communicating with the OpenBMC machine.",
+)
parser.add_argument(
- '--openbmc_password',
+ "--openbmc_password",
default="0penBmc",
- help='The password for communicating with the OpenBMC machine.')
+ help="The password for communicating with the OpenBMC machine.",
+)
parser.add_argument(
- 'openbmc_host',
- help='The host name or IP address of the OpenBMC machine.')
+ "openbmc_host", help="The host name or IP address of the OpenBMC machine."
+)
# Populate stock_list with options we want.
stock_list = [("test_mode", 0), ("quiet", 1)]
-def exit_function(signal_number=0,
- frame=None):
+def exit_function(signal_number=0, frame=None):
r"""
Execute whenever the program ends normally or with the signals that we catch (i.e. TERM, INT).
"""
@@ -58,8 +61,7 @@
qprint_pgm_footer()
-def signal_handler(signal_number,
- frame):
+def signal_handler(signal_number, frame):
r"""
Handle signals. Without a function to catch a SIGTERM or SIGINT, our program would terminate immediately
with return code 143 and without calling our exit_function.
@@ -96,7 +98,6 @@
def main():
-
if not gen_get_options(parser, stock_list):
return False
@@ -109,12 +110,14 @@
http_prefix = create_http_prefix(openbmc_host)
- command = http_prefix + 'login'
+ command = http_prefix + "login"
qprint_issuing(command)
- resp = session.post(command,
- json={'data': [openbmc_username, openbmc_password]},
- verify=False)
- if resp.json()['status'] != 'ok':
+ resp = session.post(
+ command,
+ json={"data": [openbmc_username, openbmc_password]},
+ verify=False,
+ )
+ if resp.json()["status"] != "ok":
json = resp.json()
print_error_report("http request failed:\n" + sprint_var(command))
raise Exception("Login failed.\n")
@@ -123,15 +126,18 @@
qprint_issuing(command)
resp = session.get(command, verify=False)
json = resp.json()
- if json['status'] != 'ok':
+ if json["status"] != "ok":
print_error_report("http request failed:\n" + sprint_var(command))
raise Exception("http request failed.\n")
try:
- mch_ser_num = json['data']['SerialNumber']
+ mch_ser_num = json["data"]["SerialNumber"]
except KeyError:
- print_error_report("Failed to find 'SerialNumber' key in the"
- + " following data:\n" + sprint_var(json))
+ print_error_report(
+ "Failed to find 'SerialNumber' key in the"
+ + " following data:\n"
+ + sprint_var(json)
+ )
return False
print_var(mch_ser_num, 0, 0, 0)
diff --git a/bin/plug_ins/Auto_reboot/cp_master b/bin/plug_ins/Auto_reboot/cp_master
index 7e1b274..8f12588 100755
--- a/bin/plug_ins/Auto_reboot/cp_master
+++ b/bin/plug_ins/Auto_reboot/cp_master
@@ -10,7 +10,15 @@
save_dir_path = sys.path.pop(0)
-modules = ['gen_arg', 'gen_print', 'gen_valid', 'gen_misc', 'gen_cmd', 'gen_plug_in_utils', 'gen_call_robot']
+modules = [
+ "gen_arg",
+ "gen_print",
+ "gen_valid",
+ "gen_misc",
+ "gen_cmd",
+ "gen_plug_in_utils",
+ "gen_call_robot",
+]
for module in modules:
exec("from " + module + " import *")
@@ -21,22 +29,27 @@
set_exit_on_error(True)
parser = argparse.ArgumentParser(
- usage='%(prog)s [OPTIONS]',
- description="%(prog)s will set the auto_boot policy according to the user's wishes.",
+ usage="%(prog)s [OPTIONS]",
+ description=(
+ "%(prog)s will set the auto_boot policy according to the user's"
+ " wishes."
+ ),
formatter_class=argparse.RawTextHelpFormatter,
- prefix_chars='-+')
+ prefix_chars="-+",
+)
# Populate stock_list with options we want.
-stock_list = [("test_mode", get_plug_default("test_mode", 0)),
- ("quiet", get_plug_default("quiet", 0)),
- ("debug", get_plug_default("debug", 0))]
+stock_list = [
+ ("test_mode", get_plug_default("test_mode", 0)),
+ ("quiet", get_plug_default("quiet", 0)),
+ ("debug", get_plug_default("debug", 0)),
+]
AUTO_REBOOT_DISABLE = "1"
def validate_parms():
-
r"""
Validate program parameters, etc. Return True or False (i.e. pass/fail) accordingly.
"""
@@ -53,10 +66,9 @@
def main():
-
gen_setup()
- set_term_options(term_requests='children')
+ set_term_options(term_requests="children")
print_plug_in_header()
@@ -67,18 +79,36 @@
lib_file_path = init_robot_file_path("lib/utils.robot")
- REDFISH_SUPPORT_TRANS_STATE = int(os.environ.get('REDFISH_SUPPORT_TRANS_STATE', 0)) or \
- int(os.environ.get('AUTOBOOT_REDFISH_SUPPORT_TRANS_STATE', 0))
+ REDFISH_SUPPORT_TRANS_STATE = int(
+ os.environ.get("REDFISH_SUPPORT_TRANS_STATE", 0)
+ ) or int(os.environ.get("AUTOBOOT_REDFISH_SUPPORT_TRANS_STATE", 0))
enable_auto_reboot = 1 - AUTO_REBOOT_DISABLE
print_var(enable_auto_reboot)
keyword_string = "Set Auto Reboot Setting ${%i}" % enable_auto_reboot
- cmd_buf = create_robot_cmd_string("extended/run_keyword.robot", OPENBMC_HOST, SSH_PORT, HTTPS_PORT,
- REST_USERNAME, REST_PASSWORD, OPENBMC_USERNAME, OPENBMC_PASSWORD,
- IPMI_USERNAME, IPMI_PASSWORD, REDFISH_SUPPORT_TRANS_STATE,
- keyword_string, lib_file_path, quiet, test_mode, debug, outputdir,
- output, log, report)
+ cmd_buf = create_robot_cmd_string(
+ "extended/run_keyword.robot",
+ OPENBMC_HOST,
+ SSH_PORT,
+ HTTPS_PORT,
+ REST_USERNAME,
+ REST_PASSWORD,
+ OPENBMC_USERNAME,
+ OPENBMC_PASSWORD,
+ IPMI_USERNAME,
+ IPMI_PASSWORD,
+ REDFISH_SUPPORT_TRANS_STATE,
+ keyword_string,
+ lib_file_path,
+ quiet,
+ test_mode,
+ debug,
+ outputdir,
+ output,
+ log,
+ report,
+ )
retry_count = 3
while not robot_cmd_fnc(cmd_buf):
diff --git a/bin/plug_ins/FFDC/cp_ffdc_check b/bin/plug_ins/FFDC/cp_ffdc_check
index 8d51271..b928658 100755
--- a/bin/plug_ins/FFDC/cp_ffdc_check
+++ b/bin/plug_ins/FFDC/cp_ffdc_check
@@ -1,35 +1,41 @@
#!/usr/bin/env python3
+from gen_arg import *
+from gen_call_robot import *
+from gen_cmd import *
+from gen_misc import *
+from gen_plug_in_utils import *
from gen_print import *
from gen_valid import *
-from gen_arg import *
-from gen_misc import *
-from gen_cmd import *
-from gen_plug_in_utils import *
-from gen_call_robot import *
# Set exit_on_error for gen_valid functions.
set_exit_on_error(True)
parser = argparse.ArgumentParser(
- usage='%(prog)s [OPTIONS]',
- description="%(prog)s will determine whether FFDC should be collected. If so, it will return "
- + repr(dump_ffdc_rc()) + ".",
+ usage="%(prog)s [OPTIONS]",
+ description=(
+ "%(prog)s will determine whether FFDC should be collected. If so, it"
+ " will return "
+ )
+ + repr(dump_ffdc_rc())
+ + ".",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
- prefix_chars='-+')
+ prefix_chars="-+",
+)
# The stock_list will be passed to gen_get_options. We populate it with the names of stock parm options we
# want. These stock parms are pre-defined by gen_get_options.
-stock_list = [("test_mode", get_plug_default("test_mode", 0)),
- ("quiet", get_plug_default("quiet", 0)),
- ("debug", get_plug_default("debug", 0))]
+stock_list = [
+ ("test_mode", get_plug_default("test_mode", 0)),
+ ("quiet", get_plug_default("quiet", 0)),
+ ("debug", get_plug_default("debug", 0)),
+]
# For now we are hard-coding this value vs adding a soft_errors=boolean entry in the parm_def file.
FFDC_SOFT_ERRORS = 1
-def exit_function(signal_number=0,
- frame=None):
+def exit_function(signal_number=0, frame=None):
r"""
Execute whenever the program ends normally or with the signals that we catch (i.e. TERM, INT).
@@ -52,14 +58,15 @@
def main():
-
gen_setup()
print_plug_in_header()
if FFDC_COMMAND.upper() == "FAIL":
if AUTOBOOT_BOOT_SUCCESS == "0":
- print_timen("The caller wishes to dump FFDC after each boot failure.")
+ print_timen(
+ "The caller wishes to dump FFDC after each boot failure."
+ )
exit(dump_ffdc_rc())
elif FFDC_COMMAND.upper() == "ALL":
print_timen("The caller wishes to dump FFDC after each boot test.")
@@ -73,8 +80,10 @@
# Check the num_error_logs value left by the Soft_errors plug-in.
num_error_logs = int(restore_plug_in_value(0, "Soft_errors"))
if num_error_logs > 0:
- print_timen("The \"Soft_errors\" plug-in found soft_errors and the"
- + " caller wishes to dump FFDC on soft errors.")
+ print_timen(
+ 'The "Soft_errors" plug-in found soft_errors and the'
+ + " caller wishes to dump FFDC on soft errors."
+ )
exit(dump_ffdc_rc())
print_timen("The caller does not wish for any FFDC to be collected.")
diff --git a/bin/plug_ins/Soft_errors/cp_post_boot b/bin/plug_ins/Soft_errors/cp_post_boot
index 9f9d515..649fd31 100755
--- a/bin/plug_ins/Soft_errors/cp_post_boot
+++ b/bin/plug_ins/Soft_errors/cp_post_boot
@@ -1,13 +1,13 @@
#!/usr/bin/env python3
+from gen_arg import *
+from gen_call_robot import *
+from gen_cmd import *
+from gen_misc import *
+from gen_plug_in_utils import *
from gen_print import *
from gen_valid import *
-from gen_arg import *
-from gen_misc import *
-from gen_cmd import *
from var_funcs import *
-from gen_plug_in_utils import *
-from gen_call_robot import *
# Set exit_on_error for gen_valid functions.
set_exit_on_error(True)
@@ -15,23 +15,25 @@
parser = argparse.ArgumentParser(
- usage='%(prog)s [OPTIONS]',
+ usage="%(prog)s [OPTIONS]",
description="%(prog)s will calculate the value of num_err_logs and"
+ " save it as a plug-in value for the benefit of the FFDC plug-in."
+ " The FFDC plug-in can use that data to decide whether to collect"
+ " FFDC data.",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
- prefix_chars='-+')
+ prefix_chars="-+",
+)
# The stock_list will be passed to gen_get_options. We populate it with the names of stock parm options we
# want. These stock parms are pre-defined by gen_get_options.
-stock_list = [("test_mode", 0),
- ("quiet", get_plug_default("quiet", 0)),
- ("debug", get_plug_default("debug", 0))]
+stock_list = [
+ ("test_mode", 0),
+ ("quiet", get_plug_default("quiet", 0)),
+ ("debug", get_plug_default("debug", 0)),
+]
-def exit_function(signal_number=0,
- frame=None):
+def exit_function(signal_number=0, frame=None):
r"""
Execute whenever the program ends normally or with the signals that we catch (i.e. TERM, INT).
@@ -53,73 +55,116 @@
global AUTOSCRIPT_STATUS_FILE_PATH
# AUTOSCRIPT_STATUS_FILE_PATH is set when we're called by autoscript. For this program to work
# correctly, it must be called with autoscript.
- AUTOSCRIPT_STATUS_FILE_PATH = os.environ.get("AUTOSCRIPT_STATUS_FILE_PATH", "")
+ AUTOSCRIPT_STATUS_FILE_PATH = os.environ.get(
+ "AUTOSCRIPT_STATUS_FILE_PATH", ""
+ )
valid_value(AUTOSCRIPT_STATUS_FILE_PATH)
valid_value(AUTOBOOT_OPENBMC_HOST)
def main():
-
gen_setup()
print_plug_in_header()
# Get the number of error logs from the BMC.
init_robot_out_parms(get_plug_in_package_name() + "." + pgm_name + ".")
- high_sev_elogs_file_path = AUTOBOOT_FFDC_DIR_PATH + AUTOBOOT_FFDC_PREFIX + "high_severity_errorlog.json"
+ high_sev_elogs_file_path = (
+ AUTOBOOT_FFDC_DIR_PATH
+ + AUTOBOOT_FFDC_PREFIX
+ + "high_severity_errorlog.json"
+ )
lib_file_path = init_robot_file_path("lib/logging_utils.robot")
lib_file_path += ":" + init_robot_file_path("lib/gen_robot_print.py")
set_mod_global(lib_file_path)
- REDFISH_SUPPORT_TRANS_STATE = int(os.environ.get('REDFISH_SUPPORT_TRANS_STATE', 0))
+ REDFISH_SUPPORT_TRANS_STATE = int(
+ os.environ.get("REDFISH_SUPPORT_TRANS_STATE", 0)
+ )
if not REDFISH_SUPPORT_TRANS_STATE:
try:
from robot.libraries.BuiltIn import BuiltIn
- REDFISH_SUPPORT_TRANS_STATE = \
- int(BuiltIn().get_variable_value("${REDFISH_SUPPORT_TRANS_STATE}", default=0))
+
+ REDFISH_SUPPORT_TRANS_STATE = int(
+ BuiltIn().get_variable_value(
+ "${REDFISH_SUPPORT_TRANS_STATE}", default=0
+ )
+ )
except Exception:
pass
- keyword_redfish_strings = \
- [
- "${error_logs}= Get Redfish Event Logs &{filter_low_severity_errlogs}",
- "${num_error_logs}= Get Length ${error_logs}",
- "Rprint Vars num_error_logs",
- "${json_string}= Evaluate json.dumps($error_logs, indent=4) modules=json",
- "Append To File " + high_sev_elogs_file_path + " ${json_string}"
- ]
+ keyword_redfish_strings = [
+ (
+ "${error_logs}= Get Redfish Event Logs "
+ " &{filter_low_severity_errlogs}"
+ ),
+ "${num_error_logs}= Get Length ${error_logs}",
+ "Rprint Vars num_error_logs",
+ (
+ "${json_string}= Evaluate json.dumps($error_logs, indent=4) "
+ " modules=json"
+ ),
+ "Append To File " + high_sev_elogs_file_path + " ${json_string}",
+ ]
- keyword_strings = \
- [
- "${error_logs}= Get Error Logs &{filter_low_severity_errlogs}",
- "${num_error_logs}= Get Length ${error_logs}",
- "Rprint Vars num_error_logs",
- "${json_string}= Evaluate json.dumps($error_logs, indent=4) modules=json",
- "Append To File " + high_sev_elogs_file_path + " ${json_string}"
- ]
+ keyword_strings = [
+ "${error_logs}= Get Error Logs &{filter_low_severity_errlogs}",
+ "${num_error_logs}= Get Length ${error_logs}",
+ "Rprint Vars num_error_logs",
+ (
+ "${json_string}= Evaluate json.dumps($error_logs, indent=4) "
+ " modules=json"
+ ),
+ "Append To File " + high_sev_elogs_file_path + " ${json_string}",
+ ]
if REDFISH_SUPPORT_TRANS_STATE:
- keyword_string = ' ; '.join(keyword_redfish_strings)
+ keyword_string = " ; ".join(keyword_redfish_strings)
else:
- keyword_string = ' ; '.join(keyword_strings)
+ keyword_string = " ; ".join(keyword_strings)
set_mod_global(keyword_string)
- cmd_buf = create_robot_cmd_string("extended/run_keyword.robot", OPENBMC_HOST, SSH_PORT, HTTPS_PORT,
- REST_USERNAME, REST_PASSWORD, OPENBMC_USERNAME, OPENBMC_PASSWORD,
- keyword_string, lib_file_path, quiet,
- test_mode, debug, outputdir, output, log, report)
+ cmd_buf = create_robot_cmd_string(
+ "extended/run_keyword.robot",
+ OPENBMC_HOST,
+ SSH_PORT,
+ HTTPS_PORT,
+ REST_USERNAME,
+ REST_PASSWORD,
+ OPENBMC_USERNAME,
+ OPENBMC_PASSWORD,
+ keyword_string,
+ lib_file_path,
+ quiet,
+ test_mode,
+ debug,
+ outputdir,
+ output,
+ log,
+ report,
+ )
if not robot_cmd_fnc(cmd_buf):
exit(1)
# The output contains the num_error_logs value which we will isolate with egrep.
- rc, out_buf = shell_cmd("egrep '^num_error_logs:[ ]' " + AUTOSCRIPT_STATUS_FILE_PATH, quiet=1,
- print_output=0)
+ rc, out_buf = shell_cmd(
+ "egrep '^num_error_logs:[ ]' " + AUTOSCRIPT_STATUS_FILE_PATH,
+ quiet=1,
+ print_output=0,
+ )
result = key_value_outbuf_to_dict(out_buf)
- num_error_logs = int(result['num_error_logs'])
+ num_error_logs = int(result["num_error_logs"])
save_plug_in_value(num_error_logs)
if num_error_logs > 0:
- qprint_timen("Adding the name of our high severity error logs FFDC file "
- + "to the appropriate file list.")
- shell_cmd("echo " + high_sev_elogs_file_path + " > " + AUTOBOOT_FFDC_LIST_FILE_PATH)
+ qprint_timen(
+ "Adding the name of our high severity error logs FFDC file "
+ + "to the appropriate file list."
+ )
+ shell_cmd(
+ "echo "
+ + high_sev_elogs_file_path
+ + " > "
+ + AUTOBOOT_FFDC_LIST_FILE_PATH
+ )
else:
os.remove(high_sev_elogs_file_path)
diff --git a/bin/plug_ins/Stop/cp_stop_check b/bin/plug_ins/Stop/cp_stop_check
index 8eb8e05..994e2d9 100755
--- a/bin/plug_ins/Stop/cp_stop_check
+++ b/bin/plug_ins/Stop/cp_stop_check
@@ -5,56 +5,61 @@
"""
import argparse
-import sys
-import subprocess
import os
import re
+import subprocess
+import sys
+from gen_arg import *
+from gen_call_robot import *
+from gen_cmd import *
+from gen_misc import *
+from gen_plug_in_utils import *
from gen_print import *
from gen_valid import *
-from gen_arg import *
-from gen_misc import *
-from gen_cmd import *
-from gen_plug_in_utils import *
-from gen_call_robot import *
# Set exit_on_error for gen_valid functions.
set_exit_on_error(True)
# Initialize default plug-in parms..
STOP_REST_FAIL = 0
-STOP_COMMAND = ''
+STOP_COMMAND = ""
stop_test_rc = 2
STOP_VERIFY_HARDWARE_FAIL = 0
# Create parser object to process command line parameters and args.
parser = argparse.ArgumentParser(
- usage='%(prog)s [OPTIONS]',
- description="If the \"Stop\" plug-in is selected by the user, %(prog)s"
+ usage="%(prog)s [OPTIONS]",
+ description='If the "Stop" plug-in is selected by the user, %(prog)s'
+ " is called by OBMC Boot Test after each boot test. If %(prog)s returns"
- + " " + str(stop_test_rc) + ", then OBMC Boot Test will stop. The user"
+ + " "
+ + str(stop_test_rc)
+ + ", then OBMC Boot Test will stop. The user"
+ " may set environment variable STOP_COMMAND to contain any valid bash"
+ " command or program. %(prog)s will run this stop command. If the stop"
+ " command returns non-zero, then %(prog)s will return "
- + str(stop_test_rc) + ". %(prog)s recognizes some special values for"
- + " STOP_COMMAND: 1) \"FAIL\" means that OBMC Boot Test should stop"
- + " whenever a boot test fails. 2) \"ALL\" means that OBMC Boot Test"
+ + str(stop_test_rc)
+ + ". %(prog)s recognizes some special values for"
+ + ' STOP_COMMAND: 1) "FAIL" means that OBMC Boot Test should stop'
+ + ' whenever a boot test fails. 2) "ALL" means that OBMC Boot Test'
+ " should stop after any boot test. If environment variable"
+ " STOP_REST_FAIL is set, OBMC Boot Test will stop if REST commands are"
+ " no longer working.",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
- prefix_chars='-+')
+ prefix_chars="-+",
+)
# The stock_list will be passed to gen_get_options. We populate it with the names of stock parm options we
# want. These stock parms are pre-defined by gen_get_options.
-stock_list = [("test_mode", get_plug_default("test_mode", 0)),
- ("quiet", get_plug_default("quiet", 0)),
- ("debug", get_plug_default("debug", 0))]
+stock_list = [
+ ("test_mode", get_plug_default("test_mode", 0)),
+ ("quiet", get_plug_default("quiet", 0)),
+ ("debug", get_plug_default("debug", 0)),
+]
-def exit_function(signal_number=0,
- frame=None):
+def exit_function(signal_number=0, frame=None):
r"""
Execute whenever the program ends normally or with the signals that we catch (i.e. TERM, INT).
@@ -93,8 +98,9 @@
if not STOP_REST_FAIL:
return
- REDFISH_SUPPORT_TRANS_STATE = int(os.environ.get('REDFISH_SUPPORT_TRANS_STATE', 0)) or \
- int(os.environ.get('AUTOBOOT_REDFISH_SUPPORT_TRANS_STATE', 0))
+ REDFISH_SUPPORT_TRANS_STATE = int(
+ os.environ.get("REDFISH_SUPPORT_TRANS_STATE", 0)
+ ) or int(os.environ.get("AUTOBOOT_REDFISH_SUPPORT_TRANS_STATE", 0))
if REDFISH_SUPPORT_TRANS_STATE:
interface = "redfish"
@@ -103,23 +109,54 @@
print_timen("Checking to see whether %s commands are working." % interface)
init_robot_out_parms(get_plug_in_package_name() + "." + pgm_name + ".")
- lib_file_path = init_robot_file_path("lib/utils.robot") + ":" \
+ lib_file_path = (
+ init_robot_file_path("lib/utils.robot")
+ + ":"
+ init_robot_file_path("lib/gen_robot_print.py")
+ )
set_mod_global(lib_file_path)
- timeout = '0 seconds'
- interval = '1 second'
- keyword_string = "${match_state}= Create Dictionary %s=1 ;" % interface +\
- " ${state}= Wait State ${match_state} " + timeout + " " +\
- interval + " quiet=${1} ; Rpvar state"
+ timeout = "0 seconds"
+ interval = "1 second"
+ keyword_string = (
+ "${match_state}= Create Dictionary %s=1 ;" % interface
+ + " ${state}= Wait State ${match_state} "
+ + timeout
+ + " "
+ + interval
+ + " quiet=${1} ; Rpvar state"
+ )
set_mod_global(keyword_string)
- cmd_buf = create_robot_cmd_string("extended/run_keyword.robot", OPENBMC_HOST, SSH_PORT, HTTPS_PORT,
- REST_USERNAME, REST_PASSWORD, OPENBMC_USERNAME, OPENBMC_PASSWORD,
- REDFISH_SUPPORT_TRANS_STATE, keyword_string, lib_file_path, quiet,
- test_mode, debug, outputdir, output, log, report, loglevel)
+ cmd_buf = create_robot_cmd_string(
+ "extended/run_keyword.robot",
+ OPENBMC_HOST,
+ SSH_PORT,
+ HTTPS_PORT,
+ REST_USERNAME,
+ REST_PASSWORD,
+ OPENBMC_USERNAME,
+ OPENBMC_PASSWORD,
+ REDFISH_SUPPORT_TRANS_STATE,
+ keyword_string,
+ lib_file_path,
+ quiet,
+ test_mode,
+ debug,
+ outputdir,
+ output,
+ log,
+ report,
+ loglevel,
+ )
if not robot_cmd_fnc(cmd_buf):
- print_timen("The caller wishes to stop test execution if %s commands are failing." % interface)
+ print_timen(
+ "The caller wishes to stop test execution if %s commands are"
+ " failing." % interface
+ )
stop_check()
- print_timen("%s commands are working so no reason as of yet to stop the test." % interface)
+ print_timen(
+ "%s commands are working so no reason as of yet to stop the test."
+ % interface
+ )
def esel_stop_check():
@@ -131,10 +168,15 @@
if STOP_ESEL_STOP_FILE_PATH == "":
return
- cmd_buf = "esel_stop_check --esel_stop_file_path=" + STOP_ESEL_STOP_FILE_PATH
+ cmd_buf = (
+ "esel_stop_check --esel_stop_file_path=" + STOP_ESEL_STOP_FILE_PATH
+ )
shell_rc, out_buf = shell_cmd(cmd_buf, show_err=0)
if shell_rc == stop_test_rc:
- print_timen("The caller wishes to stop test execution based on the presence of certain esel entries.")
+ print_timen(
+ "The caller wishes to stop test execution based on the presence of"
+ " certain esel entries."
+ )
stop_check()
@@ -147,12 +189,17 @@
if STOP_PEL_STOP_FILE_PATH == "":
return
- pel_txt_file_path = os.environ.get("AUTOBOOT_FFDC_DIR_PATH", "") + \
- os.environ.get("AUTOBOOT_FFDC_PREFIX", "") + "PEL_logs_list.json"
+ pel_txt_file_path = (
+ os.environ.get("AUTOBOOT_FFDC_DIR_PATH", "")
+ + os.environ.get("AUTOBOOT_FFDC_PREFIX", "")
+ + "PEL_logs_list.json"
+ )
if not os.path.isfile(pel_txt_file_path):
- qprint_timen("The following file was not present so no further"
- + " action will be taken.")
+ qprint_timen(
+ "The following file was not present so no further"
+ + " action will be taken."
+ )
qprint_var(pel_txt_file_path)
return
@@ -161,8 +208,9 @@
# If pel_stop_file_path is unqualified and cannot be found, pre-pend
# default_stop_dir_path for the user.
pel_stop_file_path = os.environ.get("STOP_PEL_STOP_FILE_PATH", "")
- if not os.path.isfile(pel_stop_file_path) and \
- os.path.isfile(default_stop_dir_path + pel_stop_file_path):
+ if not os.path.isfile(pel_stop_file_path) and os.path.isfile(
+ default_stop_dir_path + pel_stop_file_path
+ ):
pel_stop_file_path = default_stop_dir_path + pel_stop_file_path
qprint_timen("Using default stop file path.")
qprint_var(pel_stop_file_path)
@@ -171,28 +219,31 @@
pel_stop_list = file_to_list(pel_stop_file_path, newlines=0, comments=0)
if len(pel_stop_list) == 0:
- print_timen("There are no records to process in "
- + pel_stop_file_path + ".")
+ print_timen(
+ "There are no records to process in " + pel_stop_file_path + "."
+ )
return
pel_all_list = file_to_list(pel_txt_file_path, newlines=0, comments=0)
if len(pel_all_list) == 0:
- print_timen("There are no records to process in "
- + pel_txt_file_path + ".")
+ print_timen(
+ "There are no records to process in " + pel_txt_file_path + "."
+ )
return
for stop_pel in pel_stop_list:
for pel_all in pel_all_list:
pel_match = re.search(".*SRC.*" + stop_pel + ".*", pel_all)
if pel_match:
- print_timen("The caller wishes to stop test execution based on "
- + "the presence of certain PEL entries.")
+ print_timen(
+ "The caller wishes to stop test execution based on "
+ + "the presence of certain PEL entries."
+ )
stop_check()
def main():
-
gen_setup()
print_plug_in_header()
@@ -217,10 +268,13 @@
pel_stop_check()
if STOP_VERIFY_HARDWARE_FAIL:
- hardware_error_found = restore_plug_in_value(0, 'Verify_hardware')
+ hardware_error_found = restore_plug_in_value(0, "Verify_hardware")
if hardware_error_found:
- print_timen("The caller wishes to stop test execution when the Verify_hardware plug-in detects a"
- + " hardware error.")
+ print_timen(
+ "The caller wishes to stop test execution when the"
+ " Verify_hardware plug-in detects a"
+ + " hardware error."
+ )
stop_check()
qprint_timen("The caller does not wish to stop the test run.")
diff --git a/bin/print_ffdc_functions b/bin/print_ffdc_functions
index 7988195..632841c 100755
--- a/bin/print_ffdc_functions
+++ b/bin/print_ffdc_functions
@@ -9,10 +9,10 @@
save_path_0 = sys.path[0]
del sys.path[0]
-from gen_arg import * # NOQA
-from gen_print import * # NOQA
-from gen_valid import * # NOQA
-from openbmc_ffdc_list import * # NOQA
+from gen_arg import * # NOQA
+from gen_print import * # NOQA
+from gen_valid import * # NOQA
+from openbmc_ffdc_list import * # NOQA
# Restore sys.path[0].
sys.path.insert(0, save_path_0)
@@ -22,8 +22,12 @@
parser = argparse.ArgumentParser(
usage="%(prog)s [OPTIONS]",
- description="%(prog)s will print a colon-delimited list of all valid OBMC FFDC functions.\n\nExample:"
- + "\n\n\nDump Log:FFDC Generic Report:Get Request FFDC:SEL Log:BMC Specific Files:Sys Inventory Files"
+ description=(
+ "%(prog)s will print a colon-delimited list of all valid OBMC FFDC"
+ " functions.\n\nExample:"
+ )
+ + "\n\n\nDump Log:FFDC Generic Report:Get Request FFDC:SEL Log:BMC"
+ " Specific Files:Sys Inventory Files"
+ ":Core Files:OS FFDC:Dump Files",
formatter_class=argparse.RawDescriptionHelpFormatter,
prefix_chars="-+",
@@ -68,7 +72,6 @@
def main():
-
gen_get_options(parser, stock_list)
validate_parms()
diff --git a/bin/process_plug_in_packages.py b/bin/process_plug_in_packages.py
index bd389c8..febbd53 100755
--- a/bin/process_plug_in_packages.py
+++ b/bin/process_plug_in_packages.py
@@ -4,13 +4,20 @@
See help text for details.
"""
-import sys
-import subprocess
import os
+import subprocess
+import sys
save_dir_path = sys.path.pop(0)
-modules = ['gen_arg', 'gen_print', 'gen_valid', 'gen_plug_in', 'gen_cmd', 'gen_misc']
+modules = [
+ "gen_arg",
+ "gen_print",
+ "gen_valid",
+ "gen_plug_in",
+ "gen_cmd",
+ "gen_misc",
+]
for module in modules:
exec("from " + module + " import *")
@@ -18,98 +25,105 @@
# Create parser object.
parser = argparse.ArgumentParser(
- usage='%(prog)s [OPTIONS]',
+ usage="%(prog)s [OPTIONS]",
description="%(prog)s will process the plug-in packages passed to it."
- + " A plug-in package is essentially a directory containing"
- + " one or more call point programs. Each of these call point"
- + " programs must have a prefix of \"cp_\". When calling"
- + " %(prog)s, a user must provide a call_point parameter"
- + " (described below). For each plug-in package passed,"
- + " %(prog)s will check for the presence of the specified call"
- + " point program in the plug-in directory. If it is found,"
- + " %(prog)s will run it. It is the responsibility of the"
- + " caller to set any environment variables needed by the call"
- + " point programs.\n\nAfter each call point program"
- + " has been run, %(prog)s will print the following values in"
- + " the following formats for use by the calling program:\n"
- + " failed_plug_in_name: <failed plug-in value,"
- + " if any>\n shell_rc: "
- + "<shell return code value of last call point program - this"
- + " will be printed in hexadecimal format. Also, be aware"
- + " that if a call point program returns a value it will be"
- + " shifted left 2 bytes (e.g. rc of 2 will be printed as"
- + " 0x00000200). That is because the rightmost byte is"
- + " reserved for errors in calling the call point program"
- + " rather than errors generated by the call point program.>",
+ + " A plug-in package is essentially a directory containing"
+ + " one or more call point programs. Each of these call point"
+ + ' programs must have a prefix of "cp_". When calling'
+ + " %(prog)s, a user must provide a call_point parameter"
+ + " (described below). For each plug-in package passed,"
+ + " %(prog)s will check for the presence of the specified call"
+ + " point program in the plug-in directory. If it is found,"
+ + " %(prog)s will run it. It is the responsibility of the"
+ + " caller to set any environment variables needed by the call"
+ + " point programs.\n\nAfter each call point program"
+ + " has been run, %(prog)s will print the following values in"
+ + " the following formats for use by the calling program:\n"
+ + " failed_plug_in_name: <failed plug-in value,"
+ + " if any>\n shell_rc: "
+ + "<shell return code value of last call point program - this"
+ + " will be printed in hexadecimal format. Also, be aware"
+ + " that if a call point program returns a value it will be"
+ + " shifted left 2 bytes (e.g. rc of 2 will be printed as"
+ + " 0x00000200). That is because the rightmost byte is"
+ + " reserved for errors in calling the call point program"
+ + " rather than errors generated by the call point program.>",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
- prefix_chars='-+')
+ prefix_chars="-+",
+)
# Create arguments.
parser.add_argument(
- 'plug_in_dir_paths',
- nargs='?',
+ "plug_in_dir_paths",
+ nargs="?",
default="",
- help=plug_in_dir_paths_help_text + default_string)
+ help=plug_in_dir_paths_help_text + default_string,
+)
parser.add_argument(
- '--call_point',
+ "--call_point",
default="setup",
required=True,
- help='The call point program name. This value must not include the'
- + ' "cp_" prefix. For each plug-in package passed to this program,'
- + ' the specified call_point program will be called if it exists in'
- + ' the plug-in directory.' + default_string)
+ help="The call point program name. This value must not include the"
+ + ' "cp_" prefix. For each plug-in package passed to this program,'
+ + " the specified call_point program will be called if it exists in"
+ + " the plug-in directory."
+ + default_string,
+)
parser.add_argument(
- '--allow_shell_rc',
+ "--allow_shell_rc",
default="0x00000000",
- help='The user may supply a value other than zero to indicate an'
- + ' acceptable non-zero return code. For example, if this value'
- + ' equals 0x00000200, it means that for each plug-in call point that'
- + ' runs, a 0x00000200 will not be counted as a failure. See note'
- + ' above regarding left-shifting of return codes.' + default_string)
+ help="The user may supply a value other than zero to indicate an"
+ + " acceptable non-zero return code. For example, if this value"
+ + " equals 0x00000200, it means that for each plug-in call point that"
+ + " runs, a 0x00000200 will not be counted as a failure. See note"
+ + " above regarding left-shifting of return codes."
+ + default_string,
+)
parser.add_argument(
- '--stop_on_plug_in_failure',
+ "--stop_on_plug_in_failure",
default=1,
type=int,
choices=[1, 0],
- help='If this parameter is set to 1, this program will stop and return '
- + 'non-zero if the call point program from any plug-in directory '
- + 'fails. Conversely, if it is set to false, this program will run '
- + 'the call point program from each and every plug-in directory '
- + 'regardless of their return values. Typical example cases where '
- + 'you\'d want to run all plug-in call points regardless of success '
- + 'or failure would be "cleanup" or "ffdc" call points.')
+ help="If this parameter is set to 1, this program will stop and return "
+ + "non-zero if the call point program from any plug-in directory "
+ + "fails. Conversely, if it is set to false, this program will run "
+ + "the call point program from each and every plug-in directory "
+ + "regardless of their return values. Typical example cases where "
+ + "you'd want to run all plug-in call points regardless of success "
+ + 'or failure would be "cleanup" or "ffdc" call points.',
+)
parser.add_argument(
- '--stop_on_non_zero_rc',
+ "--stop_on_non_zero_rc",
default=0,
type=int,
choices=[1, 0],
- help='If this parm is set to 1 and a plug-in call point program returns '
- + 'a valid non-zero return code (see "allow_shell_rc" parm above),'
- + ' this program will stop processing and return 0 (success). Since'
- + ' this constitutes a successful exit, this would normally be used'
- + ' where the caller wishes to stop processing if one of the plug-in'
- + ' directory call point programs returns a special value indicating'
- + ' that some special case has been found. An example might be in'
- + ' calling some kind of "check_errl" call point program. Such a'
- + ' call point program might return a 2 (i.e. 0x00000200) to indicate'
- + ' that a given error log entry was found in an "ignore" list and is'
- + ' therefore to be ignored. That being the case, no other'
- + ' "check_errl" call point program would need to be called.'
- + default_string)
+ help="If this parm is set to 1 and a plug-in call point program returns "
+ + 'a valid non-zero return code (see "allow_shell_rc" parm above),'
+ + " this program will stop processing and return 0 (success). Since"
+ + " this constitutes a successful exit, this would normally be used"
+ + " where the caller wishes to stop processing if one of the plug-in"
+ + " directory call point programs returns a special value indicating"
+ + " that some special case has been found. An example might be in"
+ + ' calling some kind of "check_errl" call point program. Such a'
+ + " call point program might return a 2 (i.e. 0x00000200) to indicate"
+ + ' that a given error log entry was found in an "ignore" list and is'
+ + " therefore to be ignored. That being the case, no other"
+ + ' "check_errl" call point program would need to be called.'
+ + default_string,
+)
parser.add_argument(
- '--mch_class',
- default="obmc",
- help=mch_class_help_text + default_string)
+ "--mch_class", default="obmc", help=mch_class_help_text + default_string
+)
# Populate stock_list with options we want.
stock_list = [("test_mode", 0), ("quiet", 1), ("debug", 0)]
-original_path = os.environ.get('PATH')
+original_path = os.environ.get("PATH")
def validate_parms():
@@ -127,9 +141,7 @@
set_pgm_arg(allow_shell_rc)
-def run_pgm(plug_in_dir_path,
- call_point,
- allow_shell_rc):
+def run_pgm(plug_in_dir_path, call_point, allow_shell_rc):
r"""
Run the call point program in the given plug_in_dir_path. Return the following:
rc The return code - 0 = PASS, 1 = FAIL.
@@ -159,8 +171,10 @@
# No such call point in this plug in dir path. This is legal so we return 0, etc.
return rc, shell_rc, failed_plug_in_name
- print("------------------------------------------------- Starting plug-"
- + "in -----------------------------------------------")
+ print(
+ "------------------------------------------------- Starting plug-"
+ + "in -----------------------------------------------"
+ )
print_timen("Running " + plug_in_name + "/" + cp_prefix + call_point + ".")
@@ -170,26 +184,36 @@
else:
auto_status_file_prefix = ""
auto_status_file_prefix += plug_in_name + ".cp_" + call_point
- status_dir_path =\
- add_trailing_slash(os.environ.get("STATUS_DIR_PATH",
- os.environ['HOME']
- + "/status/"))
+ status_dir_path = add_trailing_slash(
+ os.environ.get("STATUS_DIR_PATH", os.environ["HOME"] + "/status/")
+ )
if not os.path.isdir(status_dir_path):
- AUTOBOOT_EXECDIR = \
- add_trailing_slash(os.environ.get("AUTOBOOT_EXECDIR", ""))
+ AUTOBOOT_EXECDIR = add_trailing_slash(
+ os.environ.get("AUTOBOOT_EXECDIR", "")
+ )
status_dir_path = AUTOBOOT_EXECDIR + "logs/"
if not os.path.exists(status_dir_path):
os.makedirs(status_dir_path)
- status_file_name = auto_status_file_prefix + "." + file_date_time_stamp() \
- + ".status"
- auto_status_file_subcmd = "auto_status_file.py --status_dir_path=" \
- + status_dir_path + " --status_file_name=" + status_file_name \
- + " --quiet=1 --show_url=1 --prefix=" \
- + auto_status_file_prefix + " --stdout=" + str(stdout) + " "
+ status_file_name = (
+ auto_status_file_prefix + "." + file_date_time_stamp() + ".status"
+ )
+ auto_status_file_subcmd = (
+ "auto_status_file.py --status_dir_path="
+ + status_dir_path
+ + " --status_file_name="
+ + status_file_name
+ + " --quiet=1 --show_url=1 --prefix="
+ + auto_status_file_prefix
+ + " --stdout="
+ + str(stdout)
+ + " "
+ )
cmd_buf = "PATH=" + plug_in_dir_path.rstrip("/") + ":${PATH}"
print_issuing(cmd_buf)
- os.environ['PATH'] = plug_in_dir_path.rstrip("/") + os.pathsep + original_path
+ os.environ["PATH"] = (
+ plug_in_dir_path.rstrip("/") + os.pathsep + original_path
+ )
cmd_buf = auto_status_file_subcmd + cp_prefix + call_point
print_issuing(cmd_buf)
@@ -205,11 +229,16 @@
failed_plug_in_name = plug_in_name + "/" + cp_prefix + call_point
if failed_plug_in_name != "" and not stdout:
# Use tail to avoid double-printing of status_file_url.
- shell_cmd("tail -n +2 " + status_dir_path + status_file_name, quiet=1,
- print_output=1)
+ shell_cmd(
+ "tail -n +2 " + status_dir_path + status_file_name,
+ quiet=1,
+ print_output=1,
+ )
- print("------------------------------------------------- Ending plug-in"
- + " -------------------------------------------------")
+ print(
+ "------------------------------------------------- Ending plug-in"
+ + " -------------------------------------------------"
+ )
if failed_plug_in_name != "":
print_var(failed_plug_in_name)
print_var(shell_rc, hexa())
@@ -218,10 +247,9 @@
def main():
-
gen_setup()
- set_term_options(term_requests='children')
+ set_term_options(term_requests="children")
# Access program parameter globals.
global plug_in_dir_paths
@@ -230,8 +258,9 @@
global stop_on_plug_in_failure
global stop_on_non_zero_rc
- plug_in_packages_list = return_plug_in_packages_list(plug_in_dir_paths,
- mch_class)
+ plug_in_packages_list = return_plug_in_packages_list(
+ plug_in_dir_paths, mch_class
+ )
qprint_var(plug_in_packages_list)
qprint("\n")
@@ -245,15 +274,18 @@
ret_code = 0
for plug_in_dir_path in plug_in_packages_list:
- rc, shell_rc, failed_plug_in_name = \
- run_pgm(plug_in_dir_path, call_point, allow_shell_rc)
+ rc, shell_rc, failed_plug_in_name = run_pgm(
+ plug_in_dir_path, call_point, allow_shell_rc
+ )
if rc != 0:
ret_code = 1
if stop_on_plug_in_failure:
break
if shell_rc != 0 and stop_on_non_zero_rc:
- qprint_time("Stopping on non-zero shell return code as requested"
- + " by caller.\n")
+ qprint_time(
+ "Stopping on non-zero shell return code as requested"
+ + " by caller.\n"
+ )
break
if ret_code != 0:
diff --git a/bin/prop_call.py b/bin/prop_call.py
index e352d55..9e18f71 100755
--- a/bin/prop_call.py
+++ b/bin/prop_call.py
@@ -16,56 +16,56 @@
my_program --test_mode=y --quiet=n file1 file2 file3
"""
-import sys
import os
+import sys
save_path_0 = sys.path[0]
del sys.path[0]
-from gen_arg import * # NOQA
-from gen_print import * # NOQA
-from gen_valid import * # NOQA
-from gen_misc import * # NOQA
-from gen_cmd import * # NOQA
+from gen_arg import * # NOQA
+from gen_cmd import * # NOQA
+from gen_misc import * # NOQA
+from gen_print import * # NOQA
+from gen_valid import * # NOQA
# Restore sys.path[0].
sys.path.insert(0, save_path_0)
parser = argparse.ArgumentParser(
- usage='%(prog)s [OPTIONS]',
+ usage="%(prog)s [OPTIONS]",
description="%(prog)s will call a program using parameters retrieved"
+ " from the given properties file.",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
- prefix_chars='-+')
+ prefix_chars="-+",
+)
parser.add_argument(
- '--prop_dir_path',
+ "--prop_dir_path",
default=os.environ.get("PROP_DIR_PATH", os.getcwd()),
- help='The path to the directory that contains the properties file.'
+ help="The path to the directory that contains the properties file."
+ ' The default value is environment variable "PROP_DIR_PATH", if'
- + ' set. Otherwise, it is the current working directory.')
+ + " set. Otherwise, it is the current working directory.",
+)
parser.add_argument(
- '--prop_file_name',
- help='The path to a properties file that contains the parameters to'
+ "--prop_file_name",
+ help="The path to a properties file that contains the parameters to"
+ ' pass to the program. If the properties file has a ".properties"'
- + ' extension, the caller need not specify the extension. The format'
- + ' of each line in the properties file should be as follows:'
- + ' <parm_name=parm_value>. Do not quote the parm value. To specify'
+ + " extension, the caller need not specify the extension. The format"
+ + " of each line in the properties file should be as follows:"
+ + " <parm_name=parm_value>. Do not quote the parm value. To specify"
+ ' positional parms, use a parm name of "pos". For example: pos=this'
- ' value')
+ " value",
+)
-parser.add_argument(
- 'program_name',
- help='The name of the program to be run.')
+parser.add_argument("program_name", help="The name of the program to be run.")
# Populate stock_list with options we want.
stock_list = [("test_mode", 0), ("quiet", 1), ("debug", 0)]
-def exit_function(signal_number=0,
- frame=None):
+def exit_function(signal_number=0, frame=None):
r"""
Execute whenever the program ends normally or with the signals that we catch (i.e. TERM, INT).
"""
@@ -76,8 +76,7 @@
qprint_pgm_footer()
-def signal_handler(signal_number,
- frame):
+def signal_handler(signal_number, frame):
r"""
Handle signals. Without a function to catch a SIGTERM or SIGINT, our program would terminate immediately
with return code 143 and without calling our exit_function.
@@ -127,7 +126,6 @@
def main():
-
if not gen_get_options(parser, stock_list):
return False
diff --git a/bin/validate_plug_ins.py b/bin/validate_plug_ins.py
index 8e3ed1d..f5fa18a 100755
--- a/bin/validate_plug_ins.py
+++ b/bin/validate_plug_ins.py
@@ -2,6 +2,7 @@
import os
import sys
+
try:
import __builtin__
except ImportError:
@@ -17,9 +18,9 @@
save_path_0 = sys.path[0]
del sys.path[0]
-from gen_print import * # NOQA
-from gen_arg import * # NOQA
-from gen_plug_in import * # NOQA
+from gen_arg import * # NOQA
+from gen_plug_in import * # NOQA
+from gen_print import * # NOQA
# Restore sys.path[0].
sys.path.insert(0, save_path_0)
@@ -29,32 +30,32 @@
# Create parser object.
parser = argparse.ArgumentParser(
- usage='%(prog)s [OPTIONS] [PLUG_IN_DIR_PATHS]',
+ usage="%(prog)s [OPTIONS] [PLUG_IN_DIR_PATHS]",
description="%(prog)s will validate the plug-in packages passed to it."
- + " It will also print a list of the absolute plug-in"
- + " directory paths for use by the calling program.",
+ + " It will also print a list of the absolute plug-in"
+ + " directory paths for use by the calling program.",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
- prefix_chars='-+')
+ prefix_chars="-+",
+)
# Create arguments.
parser.add_argument(
- 'plug_in_dir_paths',
- nargs='?',
+ "plug_in_dir_paths",
+ nargs="?",
default="",
- help=plug_in_dir_paths_help_text + default_string)
+ help=plug_in_dir_paths_help_text + default_string,
+)
parser.add_argument(
- '--mch_class',
- default="obmc",
- help=mch_class_help_text + default_string)
+ "--mch_class", default="obmc", help=mch_class_help_text + default_string
+)
# The stock_list will be passed to gen_get_options. We populate it with the names of stock parm options we
# want. These stock parms are pre-defined by gen_get_options.
stock_list = [("test_mode", 0), ("quiet", 1), ("debug", 0)]
-def exit_function(signal_number=0,
- frame=None):
+def exit_function(signal_number=0, frame=None):
r"""
Execute whenever the program ends normally or with the signals that we catch (i.e. TERM, INT).
"""
@@ -112,8 +113,9 @@
global plug_in_dir_paths
global mch_class
- plug_in_packages_list = return_plug_in_packages_list(plug_in_dir_paths,
- mch_class)
+ plug_in_packages_list = return_plug_in_packages_list(
+ plug_in_dir_paths, mch_class
+ )
qprint_var(plug_in_packages_list)
# As stated in the help text, this program must print the full paths of each selected plug in.
diff --git a/bin/websocket_monitor.py b/bin/websocket_monitor.py
index 3425057..0f0a5a4 100755
--- a/bin/websocket_monitor.py
+++ b/bin/websocket_monitor.py
@@ -5,18 +5,19 @@
"""
import json
-import sys
-import websocket
import ssl
+import sys
+
import requests
+import websocket
from retrying import retry
save_path_0 = sys.path[0]
del sys.path[0]
-from gen_print import * # NOQA
-from gen_arg import * # NOQA
-from gen_valid import * # NOQA
+from gen_arg import * # NOQA
+from gen_print import * # NOQA
+from gen_valid import * # NOQA
# Restore sys.path[0].
sys.path.insert(0, save_path_0)
@@ -26,37 +27,39 @@
parser = argparse.ArgumentParser(
- usage='%(prog)s [OPTIONS]',
+ usage="%(prog)s [OPTIONS]",
description="%(prog)s will open a websocket session on a remote OpenBMC. "
- + "When an eSEL is created on that BMC, the monitor will receive "
- + "notice over websocket that the eSEL was created "
- + "and it will print a message.",
+ + "When an eSEL is created on that BMC, the monitor will receive "
+ + "notice over websocket that the eSEL was created "
+ + "and it will print a message.",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
- prefix_chars='-+')
+ prefix_chars="-+",
+)
parser.add_argument(
- 'openbmc_host',
- default='',
- help='The BMC host name or IP address.')
+ "openbmc_host", default="", help="The BMC host name or IP address."
+)
parser.add_argument(
- '--openbmc_username',
- default='root',
- help='The userid for the open BMC system.')
+ "--openbmc_username",
+ default="root",
+ help="The userid for the open BMC system.",
+)
parser.add_argument(
- '--openbmc_password',
- default='',
- help='The password for the open BMC system.')
+ "--openbmc_password",
+ default="",
+ help="The password for the open BMC system.",
+)
parser.add_argument(
- '--monitor_type',
- choices=['logging', 'dump'],
- default='logging',
- help='The type of notifications from websocket to monitor.')
+ "--monitor_type",
+ choices=["logging", "dump"],
+ default="logging",
+ help="The type of notifications from websocket to monitor.",
+)
stock_list = [("test_mode", 0), ("quiet", 0), ("debug", 0)]
-def exit_function(signal_number=0,
- frame=None):
+def exit_function(signal_number=0, frame=None):
r"""
Execute whenever the program ends normally or with the signals that we
catch (i.e. TERM, INT).
@@ -67,8 +70,7 @@
qprint_pgm_footer()
-def signal_handler(signal_number,
- frame):
+def signal_handler(signal_number, frame):
r"""
Handle signals. Without a function to catch a SIGTERM or SIGINT, the
program would terminate immediately with return code 143 and without
@@ -95,14 +97,12 @@
valid_value(openbmc_username)
valid_value(openbmc_password)
global monitoring_uri
- monitoring_uri = '/xyz/openbmc_project/' + monitor_type
+ monitoring_uri = "/xyz/openbmc_project/" + monitor_type
gen_post_validation(exit_function, signal_handler)
@retry(stop_max_attempt_number=3, wait_fixed=1000)
-def login(openbmc_host,
- openbmc_username,
- openbmc_password):
+def login(openbmc_host, openbmc_username, openbmc_password):
r"""
Log into the BMC and return the session object.
@@ -114,15 +114,19 @@
qprint_executing()
- http_header = {'Content-Type': 'application/json'}
+ http_header = {"Content-Type": "application/json"}
session = requests.session()
- response = session.post('https://' + openbmc_host + '/login', headers=http_header,
- json={"data": [openbmc_username, openbmc_password]},
- verify=False, timeout=30)
+ response = session.post(
+ "https://" + openbmc_host + "/login",
+ headers=http_header,
+ json={"data": [openbmc_username, openbmc_password]},
+ verify=False,
+ timeout=30,
+ )
valid_value(response.status_code, valid_values=[200])
login_response = json.loads(response.text)
qprint_var(login_response)
- valid_value(login_response['status'], valid_values=['ok'])
+ valid_value(login_response["status"], valid_values=["ok"])
return session
@@ -145,12 +149,14 @@
# or
# /xyz/openbmc_project/dump/entry/1","properties":{"Size":186180}}').
- if monitoring_uri + '/entry' in message:
- if 'Id' in message:
- qprint_timen('eSEL received over websocket interface.')
+ if monitoring_uri + "/entry" in message:
+ if "Id" in message:
+ qprint_timen("eSEL received over websocket interface.")
websocket_obj.close()
- elif 'Size' in message:
- qprint_timen('Dump notification received over websocket interface.')
+ elif "Size" in message:
+ qprint_timen(
+ "Dump notification received over websocket interface."
+ )
websocket_obj.close()
@@ -212,17 +218,23 @@
session = login(openbmc_host, openbmc_username, openbmc_password)
qprint_timen("Registering websocket handlers.")
cookies = session.cookies.get_dict()
- cookies = sprint_var(cookies, fmt=no_header() | strip_brackets(),
- col1_width=0, trailing_char="",
- delim="=").replace("\n", ";")
+ cookies = sprint_var(
+ cookies,
+ fmt=no_header() | strip_brackets(),
+ col1_width=0,
+ trailing_char="",
+ delim="=",
+ ).replace("\n", ";")
# Register the event handlers. When an ESEL is created by the system
# under test, the on_message() handler will be called.
- websocket_obj = websocket.WebSocketApp("wss://" + openbmc_host + "/subscribe",
- on_message=on_message,
- on_error=on_error,
- on_close=on_close,
- on_open=on_open,
- cookie=cookies)
+ websocket_obj = websocket.WebSocketApp(
+ "wss://" + openbmc_host + "/subscribe",
+ on_message=on_message,
+ on_error=on_error,
+ on_close=on_close,
+ on_open=on_open,
+ cookie=cookies,
+ )
qprint_timen("Completed registering of websocket handlers.")
websocket_obj.run_forever(sslopt={"cert_reqs": ssl.CERT_NONE})
diff --git a/data/Palmetto.py b/data/Palmetto.py
index 24842de..02c9a44 100755
--- a/data/Palmetto.py
+++ b/data/Palmetto.py
@@ -1,7 +1,7 @@
#!/usr/bin/env python3
-HOME_PATH = './'
-CACHE_PATH = '/var/cache/obmc/'
+HOME_PATH = "./"
+CACHE_PATH = "/var/cache/obmc/"
FLASH_DOWNLOAD_PATH = "/tmp"
GPIO_BASE = 320
SYSTEM_NAME = "Palmetto"
@@ -12,298 +12,375 @@
# - a process emits a GotoSystemState signal with state name to goto
# - objects specified in EXIT_STATE_DEPEND have started
SYSTEM_STATES = [
- 'BASE_APPS',
- 'BMC_STARTING',
- 'BMC_READY',
- 'HOST_POWERING_ON',
- 'HOST_POWERED_ON',
- 'HOST_BOOTING',
- 'HOST_BOOTED',
- 'HOST_POWERED_OFF',
+ "BASE_APPS",
+ "BMC_STARTING",
+ "BMC_READY",
+ "HOST_POWERING_ON",
+ "HOST_POWERED_ON",
+ "HOST_BOOTING",
+ "HOST_BOOTED",
+ "HOST_POWERED_OFF",
]
EXIT_STATE_DEPEND = {
- 'BASE_APPS': {
- '/org/openbmc/sensors': 0,
+ "BASE_APPS": {
+ "/org/openbmc/sensors": 0,
},
- 'BMC_STARTING': {
- '/org/openbmc/control/chassis0': 0,
- '/org/openbmc/control/power0': 0,
- '/org/openbmc/control/led/identify': 0,
- '/org/openbmc/control/host0': 0,
- '/org/openbmc/control/flash/bios': 0,
- }
+ "BMC_STARTING": {
+ "/org/openbmc/control/chassis0": 0,
+ "/org/openbmc/control/power0": 0,
+ "/org/openbmc/control/led/identify": 0,
+ "/org/openbmc/control/host0": 0,
+ "/org/openbmc/control/flash/bios": 0,
+ },
}
# method will be called when state is entered
ENTER_STATE_CALLBACK = {
- 'HOST_POWERED_ON': {
- 'boot': {
- 'bus_name': 'org.openbmc.control.Host',
- 'obj_name': '/org/openbmc/control/host0',
- 'interface_name': 'org.openbmc.control.Host',
+ "HOST_POWERED_ON": {
+ "boot": {
+ "bus_name": "org.openbmc.control.Host",
+ "obj_name": "/org/openbmc/control/host0",
+ "interface_name": "org.openbmc.control.Host",
}
},
- 'BMC_READY': {
- 'setOn': {
- 'bus_name': 'org.openbmc.control.led',
- 'obj_name': '/org/openbmc/control/led/identify',
- 'interface_name': 'org.openbmc.Led',
+ "BMC_READY": {
+ "setOn": {
+ "bus_name": "org.openbmc.control.led",
+ "obj_name": "/org/openbmc/control/led/identify",
+ "interface_name": "org.openbmc.Led",
},
- 'init': {
- 'bus_name': 'org.openbmc.control.Flash',
- 'obj_name': '/org/openbmc/control/flash/bios',
- 'interface_name': 'org.openbmc.Flash',
+ "init": {
+ "bus_name": "org.openbmc.control.Flash",
+ "obj_name": "/org/openbmc/control/flash/bios",
+ "interface_name": "org.openbmc.Flash",
},
- }
+ },
}
APPS = {
- 'startup_hacks': {
- 'system_state': 'BASE_APPS',
- 'start_process': True,
- 'monitor_process': False,
- 'process_name': 'startup_hacks.sh',
+ "startup_hacks": {
+ "system_state": "BASE_APPS",
+ "start_process": True,
+ "monitor_process": False,
+ "process_name": "startup_hacks.sh",
},
- 'inventory': {
- 'system_state': 'BMC_STARTING',
- 'start_process': True,
- 'monitor_process': True,
- 'process_name': 'inventory_items.py',
- 'args': [SYSTEM_NAME]
+ "inventory": {
+ "system_state": "BMC_STARTING",
+ "start_process": True,
+ "monitor_process": True,
+ "process_name": "inventory_items.py",
+ "args": [SYSTEM_NAME],
},
- 'pcie_present': {
- 'system_state': 'HOST_POWERED_ON',
- 'start_process': False,
- 'monitor_process': False,
- 'process_name': 'pcie_slot_present.exe',
+ "pcie_present": {
+ "system_state": "HOST_POWERED_ON",
+ "start_process": False,
+ "monitor_process": False,
+ "process_name": "pcie_slot_present.exe",
},
- 'virtual_sensors': {
- 'system_state': 'BMC_STARTING',
- 'start_process': True,
- 'monitor_process': True,
- 'process_name': 'hwmon.py',
- 'args': [SYSTEM_NAME]
+ "virtual_sensors": {
+ "system_state": "BMC_STARTING",
+ "start_process": True,
+ "monitor_process": True,
+ "process_name": "hwmon.py",
+ "args": [SYSTEM_NAME],
},
- 'sensor_manager': {
- 'system_state': 'BASE_APPS',
- 'start_process': True,
- 'monitor_process': True,
- 'process_name': 'sensor_manager2.py',
- 'args': [SYSTEM_NAME]
+ "sensor_manager": {
+ "system_state": "BASE_APPS",
+ "start_process": True,
+ "monitor_process": True,
+ "process_name": "sensor_manager2.py",
+ "args": [SYSTEM_NAME],
},
- 'host_watchdog': {
- 'system_state': 'BMC_STARTING',
- 'start_process': True,
- 'monitor_process': True,
- 'process_name': 'host_watchdog.exe',
+ "host_watchdog": {
+ "system_state": "BMC_STARTING",
+ "start_process": True,
+ "monitor_process": True,
+ "process_name": "host_watchdog.exe",
},
- 'power_control': {
- 'system_state': 'BMC_STARTING',
- 'start_process': True,
- 'monitor_process': True,
- 'process_name': 'power_control.exe',
- 'args': ['3000', '10']
+ "power_control": {
+ "system_state": "BMC_STARTING",
+ "start_process": True,
+ "monitor_process": True,
+ "process_name": "power_control.exe",
+ "args": ["3000", "10"],
},
- 'power_button': {
- 'system_state': 'BMC_STARTING',
- 'start_process': False,
- 'monitor_process': False,
- 'process_name': 'button_power.exe',
+ "power_button": {
+ "system_state": "BMC_STARTING",
+ "start_process": False,
+ "monitor_process": False,
+ "process_name": "button_power.exe",
},
- 'led_control': {
- 'system_state': 'BMC_STARTING',
- 'start_process': True,
- 'monitor_process': True,
- 'process_name': 'led_controller.exe',
+ "led_control": {
+ "system_state": "BMC_STARTING",
+ "start_process": True,
+ "monitor_process": True,
+ "process_name": "led_controller.exe",
},
- 'flash_control': {
- 'system_state': 'BMC_STARTING',
- 'start_process': True,
- 'monitor_process': True,
- 'process_name': 'flash_bios.exe',
+ "flash_control": {
+ "system_state": "BMC_STARTING",
+ "start_process": True,
+ "monitor_process": True,
+ "process_name": "flash_bios.exe",
},
- 'bmc_flash_control': {
- 'system_state': 'BMC_STARTING',
- 'start_process': True,
- 'monitor_process': True,
- 'process_name': 'bmc_update.py',
+ "bmc_flash_control": {
+ "system_state": "BMC_STARTING",
+ "start_process": True,
+ "monitor_process": True,
+ "process_name": "bmc_update.py",
},
- 'download_manager': {
- 'system_state': 'BMC_STARTING',
- 'start_process': True,
- 'monitor_process': True,
- 'process_name': 'download_manager.py',
- 'args': [SYSTEM_NAME]
+ "download_manager": {
+ "system_state": "BMC_STARTING",
+ "start_process": True,
+ "monitor_process": True,
+ "process_name": "download_manager.py",
+ "args": [SYSTEM_NAME],
},
- 'host_control': {
- 'system_state': 'BMC_STARTING',
- 'start_process': True,
- 'monitor_process': True,
- 'process_name': 'control_host.exe',
+ "host_control": {
+ "system_state": "BMC_STARTING",
+ "start_process": True,
+ "monitor_process": True,
+ "process_name": "control_host.exe",
},
- 'chassis_control': {
- 'system_state': 'BMC_STARTING',
- 'start_process': True,
- 'monitor_process': True,
- 'process_name': 'chassis_control.py',
+ "chassis_control": {
+ "system_state": "BMC_STARTING",
+ "start_process": True,
+ "monitor_process": True,
+ "process_name": "chassis_control.py",
},
- 'bmc_control': {
- 'system_state': 'BMC_STARTING',
- 'start_process': True,
- 'monitor_process': True,
- 'process_name': 'control_bmc.exe',
- }
+ "bmc_control": {
+ "system_state": "BMC_STARTING",
+ "start_process": True,
+ "monitor_process": True,
+ "process_name": "control_bmc.exe",
+ },
}
CACHED_INTERFACES = {
"org.openbmc.InventoryItem": True,
"org.openbmc.control.Chassis": True,
}
-INVENTORY_ROOT = '/org/openbmc/inventory'
+INVENTORY_ROOT = "/org/openbmc/inventory"
FRU_INSTANCES = {
- '<inventory_root>/system': {'fru_type': 'SYSTEM', 'is_fru': True, },
- '<inventory_root>/system/chassis': {'fru_type': 'SYSTEM', 'is_fru': True, },
- '<inventory_root>/system/chassis/motherboard': {'fru_type': 'MAIN_PLANAR', 'is_fru': True, },
-
- '<inventory_root>/system/chassis/fan0': {'fru_type': 'FAN', 'is_fru': True, },
- '<inventory_root>/system/chassis/fan1': {'fru_type': 'FAN', 'is_fru': True, },
- '<inventory_root>/system/chassis/fan2': {'fru_type': 'FAN', 'is_fru': True, },
- '<inventory_root>/system/chassis/fan3': {'fru_type': 'FAN', 'is_fru': True, },
- '<inventory_root>/system/chassis/fan4': {'fru_type': 'FAN', 'is_fru': True, },
-
- '<inventory_root>/system/chassis/motherboard/bmc': {'fru_type': 'BMC', 'is_fru': False,
- 'manufacturer': 'ASPEED'},
- '<inventory_root>/system/chassis/motherboard/cpu': {'fru_type': 'CPU', 'is_fru': True, },
- '<inventory_root>/system/chassis/motherboard/cpu/core0': {'fru_type': 'CORE',
- 'is_fru': False, },
- '<inventory_root>/system/chassis/motherboard/cpu/core1': {'fru_type': 'CORE',
- 'is_fru': False, },
- '<inventory_root>/system/chassis/motherboard/cpu/core2': {'fru_type': 'CORE',
- 'is_fru': False, },
- '<inventory_root>/system/chassis/motherboard/cpu/core3': {'fru_type': 'CORE',
- 'is_fru': False, },
- '<inventory_root>/system/chassis/motherboard/cpu/core4': {'fru_type': 'CORE',
- 'is_fru': False, },
- '<inventory_root>/system/chassis/motherboard/cpu/core5': {'fru_type': 'CORE',
- 'is_fru': False, },
- '<inventory_root>/system/chassis/motherboard/cpu/core6': {'fru_type': 'CORE',
- 'is_fru': False, },
- '<inventory_root>/system/chassis/motherboard/cpu/core7': {'fru_type': 'CORE',
- 'is_fru': False, },
- '<inventory_root>/system/chassis/motherboard/cpu/core8': {'fru_type': 'CORE',
- 'is_fru': False, },
- '<inventory_root>/system/chassis/motherboard/cpu/core9': {'fru_type': 'CORE',
- 'is_fru': False, },
- '<inventory_root>/system/chassis/motherboard/cpu/core10': {'fru_type': 'CORE',
- 'is_fru': False, },
- '<inventory_root>/system/chassis/motherboard/cpu/core11': {'fru_type': 'CORE',
- 'is_fru': False, },
-
- '<inventory_root>/system/chassis/motherboard/membuf': {'fru_type': 'MEMORY_BUFFER',
- 'is_fru': False, },
-
- '<inventory_root>/system/chassis/motherboard/dimm0': {'fru_type': 'DIMM', 'is_fru': True, },
- '<inventory_root>/system/chassis/motherboard/dimm1': {'fru_type': 'DIMM', 'is_fru': True, },
- '<inventory_root>/system/chassis/motherboard/dimm2': {'fru_type': 'DIMM', 'is_fru': True, },
- '<inventory_root>/system/chassis/motherboard/dimm3': {'fru_type': 'DIMM', 'is_fru': True, },
-
- '<inventory_root>/system/chassis/io_board/pcie_slot0': {'fru_type': 'PCIE_CARD',
- 'is_fru': True, },
- '<inventory_root>/system/chassis/io_board/pcie_slot1': {'fru_type': 'PCIE_CARD',
- 'is_fru': True, },
-
- '<inventory_root>/system/systemevent': {'fru_type': 'SYSTEM_EVENT', 'is_fru': False, },
- '<inventory_root>/system/chassis/motherboard/refclock': {'fru_type': 'MAIN_PLANAR',
- 'is_fru': False, },
- '<inventory_root>/system/chassis/motherboard/pcieclock': {'fru_type': 'MAIN_PLANAR',
- 'is_fru': False, },
- '<inventory_root>/system/chassis/motherboard/todclock': {'fru_type': 'MAIN_PLANAR',
- 'is_fru': False, },
- '<inventory_root>/system/chassis/motherboard/apss': {'fru_type': 'MAIN_PLANAR',
- 'is_fru': False, },
+ "<inventory_root>/system": {
+ "fru_type": "SYSTEM",
+ "is_fru": True,
+ },
+ "<inventory_root>/system/chassis": {
+ "fru_type": "SYSTEM",
+ "is_fru": True,
+ },
+ "<inventory_root>/system/chassis/motherboard": {
+ "fru_type": "MAIN_PLANAR",
+ "is_fru": True,
+ },
+ "<inventory_root>/system/chassis/fan0": {
+ "fru_type": "FAN",
+ "is_fru": True,
+ },
+ "<inventory_root>/system/chassis/fan1": {
+ "fru_type": "FAN",
+ "is_fru": True,
+ },
+ "<inventory_root>/system/chassis/fan2": {
+ "fru_type": "FAN",
+ "is_fru": True,
+ },
+ "<inventory_root>/system/chassis/fan3": {
+ "fru_type": "FAN",
+ "is_fru": True,
+ },
+ "<inventory_root>/system/chassis/fan4": {
+ "fru_type": "FAN",
+ "is_fru": True,
+ },
+ "<inventory_root>/system/chassis/motherboard/bmc": {
+ "fru_type": "BMC",
+ "is_fru": False,
+ "manufacturer": "ASPEED",
+ },
+ "<inventory_root>/system/chassis/motherboard/cpu": {
+ "fru_type": "CPU",
+ "is_fru": True,
+ },
+ "<inventory_root>/system/chassis/motherboard/cpu/core0": {
+ "fru_type": "CORE",
+ "is_fru": False,
+ },
+ "<inventory_root>/system/chassis/motherboard/cpu/core1": {
+ "fru_type": "CORE",
+ "is_fru": False,
+ },
+ "<inventory_root>/system/chassis/motherboard/cpu/core2": {
+ "fru_type": "CORE",
+ "is_fru": False,
+ },
+ "<inventory_root>/system/chassis/motherboard/cpu/core3": {
+ "fru_type": "CORE",
+ "is_fru": False,
+ },
+ "<inventory_root>/system/chassis/motherboard/cpu/core4": {
+ "fru_type": "CORE",
+ "is_fru": False,
+ },
+ "<inventory_root>/system/chassis/motherboard/cpu/core5": {
+ "fru_type": "CORE",
+ "is_fru": False,
+ },
+ "<inventory_root>/system/chassis/motherboard/cpu/core6": {
+ "fru_type": "CORE",
+ "is_fru": False,
+ },
+ "<inventory_root>/system/chassis/motherboard/cpu/core7": {
+ "fru_type": "CORE",
+ "is_fru": False,
+ },
+ "<inventory_root>/system/chassis/motherboard/cpu/core8": {
+ "fru_type": "CORE",
+ "is_fru": False,
+ },
+ "<inventory_root>/system/chassis/motherboard/cpu/core9": {
+ "fru_type": "CORE",
+ "is_fru": False,
+ },
+ "<inventory_root>/system/chassis/motherboard/cpu/core10": {
+ "fru_type": "CORE",
+ "is_fru": False,
+ },
+ "<inventory_root>/system/chassis/motherboard/cpu/core11": {
+ "fru_type": "CORE",
+ "is_fru": False,
+ },
+ "<inventory_root>/system/chassis/motherboard/membuf": {
+ "fru_type": "MEMORY_BUFFER",
+ "is_fru": False,
+ },
+ "<inventory_root>/system/chassis/motherboard/dimm0": {
+ "fru_type": "DIMM",
+ "is_fru": True,
+ },
+ "<inventory_root>/system/chassis/motherboard/dimm1": {
+ "fru_type": "DIMM",
+ "is_fru": True,
+ },
+ "<inventory_root>/system/chassis/motherboard/dimm2": {
+ "fru_type": "DIMM",
+ "is_fru": True,
+ },
+ "<inventory_root>/system/chassis/motherboard/dimm3": {
+ "fru_type": "DIMM",
+ "is_fru": True,
+ },
+ "<inventory_root>/system/chassis/io_board/pcie_slot0": {
+ "fru_type": "PCIE_CARD",
+ "is_fru": True,
+ },
+ "<inventory_root>/system/chassis/io_board/pcie_slot1": {
+ "fru_type": "PCIE_CARD",
+ "is_fru": True,
+ },
+ "<inventory_root>/system/systemevent": {
+ "fru_type": "SYSTEM_EVENT",
+ "is_fru": False,
+ },
+ "<inventory_root>/system/chassis/motherboard/refclock": {
+ "fru_type": "MAIN_PLANAR",
+ "is_fru": False,
+ },
+ "<inventory_root>/system/chassis/motherboard/pcieclock": {
+ "fru_type": "MAIN_PLANAR",
+ "is_fru": False,
+ },
+ "<inventory_root>/system/chassis/motherboard/todclock": {
+ "fru_type": "MAIN_PLANAR",
+ "is_fru": False,
+ },
+ "<inventory_root>/system/chassis/motherboard/apss": {
+ "fru_type": "MAIN_PLANAR",
+ "is_fru": False,
+ },
}
ID_LOOKUP = {
- 'FRU': {
- 0x0d: '<inventory_root>/system/chassis',
- 0x34: '<inventory_root>/system/chassis/motherboard',
- 0x01: '<inventory_root>/system/chassis/motherboard/cpu',
- 0x02: '<inventory_root>/system/chassis/motherboard/membuf',
- 0x03: '<inventory_root>/system/chassis/motherboard/dimm0',
- 0x04: '<inventory_root>/system/chassis/motherboard/dimm1',
- 0x05: '<inventory_root>/system/chassis/motherboard/dimm2',
- 0x06: '<inventory_root>/system/chassis/motherboard/dimm3',
- 0x35: '<inventory_root>/system',
+ "FRU": {
+ 0x0D: "<inventory_root>/system/chassis",
+ 0x34: "<inventory_root>/system/chassis/motherboard",
+ 0x01: "<inventory_root>/system/chassis/motherboard/cpu",
+ 0x02: "<inventory_root>/system/chassis/motherboard/membuf",
+ 0x03: "<inventory_root>/system/chassis/motherboard/dimm0",
+ 0x04: "<inventory_root>/system/chassis/motherboard/dimm1",
+ 0x05: "<inventory_root>/system/chassis/motherboard/dimm2",
+ 0x06: "<inventory_root>/system/chassis/motherboard/dimm3",
+ 0x35: "<inventory_root>/system",
},
- 'FRU_STR': {
- 'PRODUCT_15': '<inventory_root>/system',
- 'CHASSIS_2': '<inventory_root>/system/chassis',
- 'BOARD_1': '<inventory_root>/system/chassis/motherboard/cpu',
- 'BOARD_2': '<inventory_root>/system/chassis/motherboard/membuf',
- 'BOARD_14': '<inventory_root>/system/chassis/motherboard',
- 'PRODUCT_3': '<inventory_root>/system/chassis/motherboard/dimm0',
- 'PRODUCT_4': '<inventory_root>/system/chassis/motherboard/dimm1',
- 'PRODUCT_5': '<inventory_root>/system/chassis/motherboard/dimm2',
- 'PRODUCT_6': '<inventory_root>/system/chassis/motherboard/dimm3',
+ "FRU_STR": {
+ "PRODUCT_15": "<inventory_root>/system",
+ "CHASSIS_2": "<inventory_root>/system/chassis",
+ "BOARD_1": "<inventory_root>/system/chassis/motherboard/cpu",
+ "BOARD_2": "<inventory_root>/system/chassis/motherboard/membuf",
+ "BOARD_14": "<inventory_root>/system/chassis/motherboard",
+ "PRODUCT_3": "<inventory_root>/system/chassis/motherboard/dimm0",
+ "PRODUCT_4": "<inventory_root>/system/chassis/motherboard/dimm1",
+ "PRODUCT_5": "<inventory_root>/system/chassis/motherboard/dimm2",
+ "PRODUCT_6": "<inventory_root>/system/chassis/motherboard/dimm3",
},
- 'SENSOR': {
- 0x34: '<inventory_root>/system/chassis/motherboard',
- 0x37: '<inventory_root>/system/chassis/motherboard/refclock',
- 0x38: '<inventory_root>/system/chassis/motherboard/pcieclock',
- 0x39: '<inventory_root>/system/chassis/motherboard/todclock',
- 0x3A: '<inventory_root>/system/chassis/apss',
- 0x2f: '<inventory_root>/system/chassis/motherboard/cpu',
- 0x22: '<inventory_root>/system/chassis/motherboard/cpu/core1',
- 0x23: '<inventory_root>/system/chassis/motherboard/cpu/core2',
- 0x24: '<inventory_root>/system/chassis/motherboard/cpu/core3',
- 0x25: '<inventory_root>/system/chassis/motherboard/cpu/core4',
- 0x26: '<inventory_root>/system/chassis/motherboard/cpu/core5',
- 0x27: '<inventory_root>/system/chassis/motherboard/cpu/core6',
- 0x28: '<inventory_root>/system/chassis/motherboard/cpu/core9',
- 0x29: '<inventory_root>/system/chassis/motherboard/cpu/core10',
- 0x2a: '<inventory_root>/system/chassis/motherboard/cpu/core11',
- 0x2b: '<inventory_root>/system/chassis/motherboard/cpu/core12',
- 0x2c: '<inventory_root>/system/chassis/motherboard/cpu/core13',
- 0x2d: '<inventory_root>/system/chassis/motherboard/cpu/core14',
- 0x2e: '<inventory_root>/system/chassis/motherboard/membuf',
- 0x1e: '<inventory_root>/system/chassis/motherboard/dimm0',
- 0x1f: '<inventory_root>/system/chassis/motherboard/dimm1',
- 0x20: '<inventory_root>/system/chassis/motherboard/dimm2',
- 0x21: '<inventory_root>/system/chassis/motherboard/dimm3',
- 0x09: '/org/openbmc/sensors/host/BootCount',
- 0x05: '/org/openbmc/sensors/host/BootProgress',
- 0x08: '/org/openbmc/sensors/host/cpu0/OccStatus',
- 0x32: '/org/openbmc/sensors/host/OperatingSystemStatus',
- 0x33: '/org/openbmc/sensors/host/PowerCap',
+ "SENSOR": {
+ 0x34: "<inventory_root>/system/chassis/motherboard",
+ 0x37: "<inventory_root>/system/chassis/motherboard/refclock",
+ 0x38: "<inventory_root>/system/chassis/motherboard/pcieclock",
+ 0x39: "<inventory_root>/system/chassis/motherboard/todclock",
+ 0x3A: "<inventory_root>/system/chassis/apss",
+ 0x2F: "<inventory_root>/system/chassis/motherboard/cpu",
+ 0x22: "<inventory_root>/system/chassis/motherboard/cpu/core1",
+ 0x23: "<inventory_root>/system/chassis/motherboard/cpu/core2",
+ 0x24: "<inventory_root>/system/chassis/motherboard/cpu/core3",
+ 0x25: "<inventory_root>/system/chassis/motherboard/cpu/core4",
+ 0x26: "<inventory_root>/system/chassis/motherboard/cpu/core5",
+ 0x27: "<inventory_root>/system/chassis/motherboard/cpu/core6",
+ 0x28: "<inventory_root>/system/chassis/motherboard/cpu/core9",
+ 0x29: "<inventory_root>/system/chassis/motherboard/cpu/core10",
+ 0x2A: "<inventory_root>/system/chassis/motherboard/cpu/core11",
+ 0x2B: "<inventory_root>/system/chassis/motherboard/cpu/core12",
+ 0x2C: "<inventory_root>/system/chassis/motherboard/cpu/core13",
+ 0x2D: "<inventory_root>/system/chassis/motherboard/cpu/core14",
+ 0x2E: "<inventory_root>/system/chassis/motherboard/membuf",
+ 0x1E: "<inventory_root>/system/chassis/motherboard/dimm0",
+ 0x1F: "<inventory_root>/system/chassis/motherboard/dimm1",
+ 0x20: "<inventory_root>/system/chassis/motherboard/dimm2",
+ 0x21: "<inventory_root>/system/chassis/motherboard/dimm3",
+ 0x09: "/org/openbmc/sensors/host/BootCount",
+ 0x05: "/org/openbmc/sensors/host/BootProgress",
+ 0x08: "/org/openbmc/sensors/host/cpu0/OccStatus",
+ 0x32: "/org/openbmc/sensors/host/OperatingSystemStatus",
+ 0x33: "/org/openbmc/sensors/host/PowerCap",
},
- 'GPIO_PRESENT': {
- 'SLOT0_PRESENT': '<inventory_root>/system/chassis/io_board/pcie_slot0',
- 'SLOT1_PRESENT': '<inventory_root>/system/chassis/io_board/pcie_slot1',
- }
+ "GPIO_PRESENT": {
+ "SLOT0_PRESENT": "<inventory_root>/system/chassis/io_board/pcie_slot0",
+ "SLOT1_PRESENT": "<inventory_root>/system/chassis/io_board/pcie_slot1",
+ },
}
GPIO_CONFIG = {}
-GPIO_CONFIG['FSI_CLK'] = {'gpio_pin': 'A4', 'direction': 'out'}
-GPIO_CONFIG['FSI_DATA'] = {'gpio_pin': 'A5', 'direction': 'out'}
-GPIO_CONFIG['FSI_ENABLE'] = {'gpio_pin': 'D0', 'direction': 'out'}
-GPIO_CONFIG['POWER_PIN'] = {'gpio_pin': 'E1', 'direction': 'out'}
-GPIO_CONFIG['CRONUS_SEL'] = {'gpio_pin': 'A6', 'direction': 'out'}
-GPIO_CONFIG['PGOOD'] = {'gpio_pin': 'C7', 'direction': 'in'}
-GPIO_CONFIG['BMC_THROTTLE'] = {'gpio_pin': 'J3', 'direction': 'out'}
-GPIO_CONFIG['IDBTN'] = {'gpio_pin': 'Q7', 'direction': 'out'}
-GPIO_CONFIG['POWER_BUTTON'] = {'gpio_pin': 'E0', 'direction': 'both'}
-GPIO_CONFIG['PCIE_RESET'] = {'gpio_pin': 'B5', 'direction': 'out'}
-GPIO_CONFIG['USB_RESET'] = {'gpio_pin': 'B6', 'direction': 'out'}
-GPIO_CONFIG['SLOT0_RISER_PRESENT'] = {'gpio_pin': 'N0', 'direction': 'in'}
-GPIO_CONFIG['SLOT1_RISER_PRESENT'] = {'gpio_pin': 'N1', 'direction': 'in'}
-GPIO_CONFIG['SLOT2_RISER_PRESENT'] = {'gpio_pin': 'N2', 'direction': 'in'}
-GPIO_CONFIG['SLOT0_PRESENT'] = {'gpio_pin': 'N3', 'direction': 'in'}
-GPIO_CONFIG['SLOT1_PRESENT'] = {'gpio_pin': 'N4', 'direction': 'in'}
-GPIO_CONFIG['SLOT2_PRESENT'] = {'gpio_pin': 'N5', 'direction': 'in'}
-GPIO_CONFIG['MEZZ0_PRESENT'] = {'gpio_pin': 'O0', 'direction': 'in'}
-GPIO_CONFIG['MEZZ1_PRESENT'] = {'gpio_pin': 'O1', 'direction': 'in'}
+GPIO_CONFIG["FSI_CLK"] = {"gpio_pin": "A4", "direction": "out"}
+GPIO_CONFIG["FSI_DATA"] = {"gpio_pin": "A5", "direction": "out"}
+GPIO_CONFIG["FSI_ENABLE"] = {"gpio_pin": "D0", "direction": "out"}
+GPIO_CONFIG["POWER_PIN"] = {"gpio_pin": "E1", "direction": "out"}
+GPIO_CONFIG["CRONUS_SEL"] = {"gpio_pin": "A6", "direction": "out"}
+GPIO_CONFIG["PGOOD"] = {"gpio_pin": "C7", "direction": "in"}
+GPIO_CONFIG["BMC_THROTTLE"] = {"gpio_pin": "J3", "direction": "out"}
+GPIO_CONFIG["IDBTN"] = {"gpio_pin": "Q7", "direction": "out"}
+GPIO_CONFIG["POWER_BUTTON"] = {"gpio_pin": "E0", "direction": "both"}
+GPIO_CONFIG["PCIE_RESET"] = {"gpio_pin": "B5", "direction": "out"}
+GPIO_CONFIG["USB_RESET"] = {"gpio_pin": "B6", "direction": "out"}
+GPIO_CONFIG["SLOT0_RISER_PRESENT"] = {"gpio_pin": "N0", "direction": "in"}
+GPIO_CONFIG["SLOT1_RISER_PRESENT"] = {"gpio_pin": "N1", "direction": "in"}
+GPIO_CONFIG["SLOT2_RISER_PRESENT"] = {"gpio_pin": "N2", "direction": "in"}
+GPIO_CONFIG["SLOT0_PRESENT"] = {"gpio_pin": "N3", "direction": "in"}
+GPIO_CONFIG["SLOT1_PRESENT"] = {"gpio_pin": "N4", "direction": "in"}
+GPIO_CONFIG["SLOT2_PRESENT"] = {"gpio_pin": "N5", "direction": "in"}
+GPIO_CONFIG["MEZZ0_PRESENT"] = {"gpio_pin": "O0", "direction": "in"}
+GPIO_CONFIG["MEZZ1_PRESENT"] = {"gpio_pin": "O1", "direction": "in"}
def convertGpio(name):
@@ -316,38 +393,70 @@
HWMON_CONFIG = {
- '2-004c': {
- 'names': {
- 'temp1_input': {'object_path': 'temperature/ambient', 'poll_interval': 5000,
- 'scale': 1000, 'units': 'C'},
+ "2-004c": {
+ "names": {
+ "temp1_input": {
+ "object_path": "temperature/ambient",
+ "poll_interval": 5000,
+ "scale": 1000,
+ "units": "C",
+ },
}
},
- '3-0050': {
- 'names': {
- 'caps_curr_powercap': {'object_path': 'powercap/curr_cap', 'poll_interval': 10000,
- 'scale': 1, 'units': 'W'},
- 'caps_curr_powerreading': {'object_path': 'powercap/system_power',
- 'poll_interval': 10000, 'scale': 1, 'units': 'W'},
- 'caps_max_powercap': {'object_path': 'powercap/max_cap', 'poll_interval': 10000,
- 'scale': 1, 'units': 'W'},
- 'caps_min_powercap': {'object_path': 'powercap/min_cap', 'poll_interval': 10000,
- 'scale': 1, 'units': 'W'},
- 'caps_norm_powercap': {'object_path': 'powercap/n_cap', 'poll_interval': 10000,
- 'scale': 1, 'units': 'W'},
- 'caps_user_powerlimit': {'object_path': 'powercap/user_cap', 'poll_interval': 10000,
- 'scale': 1, 'units': 'W'},
+ "3-0050": {
+ "names": {
+ "caps_curr_powercap": {
+ "object_path": "powercap/curr_cap",
+ "poll_interval": 10000,
+ "scale": 1,
+ "units": "W",
+ },
+ "caps_curr_powerreading": {
+ "object_path": "powercap/system_power",
+ "poll_interval": 10000,
+ "scale": 1,
+ "units": "W",
+ },
+ "caps_max_powercap": {
+ "object_path": "powercap/max_cap",
+ "poll_interval": 10000,
+ "scale": 1,
+ "units": "W",
+ },
+ "caps_min_powercap": {
+ "object_path": "powercap/min_cap",
+ "poll_interval": 10000,
+ "scale": 1,
+ "units": "W",
+ },
+ "caps_norm_powercap": {
+ "object_path": "powercap/n_cap",
+ "poll_interval": 10000,
+ "scale": 1,
+ "units": "W",
+ },
+ "caps_user_powerlimit": {
+ "object_path": "powercap/user_cap",
+ "poll_interval": 10000,
+ "scale": 1,
+ "units": "W",
+ },
}
- }
+ },
}
# Miscellaneous non-poll sensor with system specific properties.
# The sensor id is the same as those defined in ID_LOOKUP['SENSOR'].
MISC_SENSORS = {
- 0x09: {'class': 'BootCountSensor'},
- 0x05: {'class': 'BootProgressSensor'},
- 0x08: {'class': 'OccStatusSensor',
- 'os_path': '/sys/class/i2c-adapter/i2c-3/3-0050/online'},
- 0x32: {'class': 'OperatingSystemStatusSensor'},
- 0x33: {'class': 'PowerCap',
- 'os_path': '/sys/class/hwmon/hwmon1/user_powercap'},
+ 0x09: {"class": "BootCountSensor"},
+ 0x05: {"class": "BootProgressSensor"},
+ 0x08: {
+ "class": "OccStatusSensor",
+ "os_path": "/sys/class/i2c-adapter/i2c-3/3-0050/online",
+ },
+ 0x32: {"class": "OperatingSystemStatusSensor"},
+ 0x33: {
+ "class": "PowerCap",
+ "os_path": "/sys/class/hwmon/hwmon1/user_powercap",
+ },
}
diff --git a/data/Romulus.py b/data/Romulus.py
index 13c405c..6b753d6 100644
--- a/data/Romulus.py
+++ b/data/Romulus.py
@@ -2,470 +2,943 @@
#
SYSTEM_STATES = [
- 'BASE_APPS',
- 'BMC_STARTING',
- 'BMC_READY',
- 'HOST_POWERING_ON',
- 'HOST_POWERED_ON',
- 'HOST_BOOTING',
- 'HOST_BOOTED',
- 'HOST_POWERED_OFF',
+ "BASE_APPS",
+ "BMC_STARTING",
+ "BMC_READY",
+ "HOST_POWERING_ON",
+ "HOST_POWERED_ON",
+ "HOST_BOOTING",
+ "HOST_BOOTED",
+ "HOST_POWERED_OFF",
]
EXIT_STATE_DEPEND = {
- 'BASE_APPS': {
- '/org/openbmc/sensors': 0,
+ "BASE_APPS": {
+ "/org/openbmc/sensors": 0,
},
- 'BMC_STARTING': {
- '/org/openbmc/control/chassis0': 0,
- '/org/openbmc/control/power0': 0,
- '/org/openbmc/control/flash/bios': 0,
+ "BMC_STARTING": {
+ "/org/openbmc/control/chassis0": 0,
+ "/org/openbmc/control/power0": 0,
+ "/org/openbmc/control/flash/bios": 0,
},
}
-INVENTORY_ROOT = '/org/openbmc/inventory'
+INVENTORY_ROOT = "/org/openbmc/inventory"
FRU_INSTANCES = {
- '<inventory_root>/system': {'fru_type': 'SYSTEM', 'is_fru': True, 'present': "True"},
- '<inventory_root>/system/bios': {'fru_type': 'SYSTEM', 'is_fru': True, 'present': "True"},
- '<inventory_root>/system/misc': {'fru_type': 'SYSTEM', 'is_fru': False, },
-
- '<inventory_root>/system/chassis': {'fru_type': 'SYSTEM', 'is_fru': True, 'present': "True"},
-
- '<inventory_root>/system/chassis/motherboard': {'fru_type': 'MAIN_PLANAR', 'is_fru': True, },
-
- '<inventory_root>/system/systemevent': {'fru_type': 'SYSTEM_EVENT', 'is_fru': False, },
- '<inventory_root>/system/chassis/motherboard/refclock': {'fru_type': 'MAIN_PLANAR',
- 'is_fru': False, },
- '<inventory_root>/system/chassis/motherboard/pcieclock': {'fru_type': 'MAIN_PLANAR',
- 'is_fru': False, },
- '<inventory_root>/system/chassis/motherboard/todclock': {'fru_type': 'MAIN_PLANAR',
- 'is_fru': False, },
- '<inventory_root>/system/chassis/motherboard/apss': {'fru_type': 'MAIN_PLANAR',
- 'is_fru': False, },
-
- '<inventory_root>/system/chassis/fan0': {'fru_type': 'FAN', 'is_fru': True, },
- '<inventory_root>/system/chassis/fan1': {'fru_type': 'FAN', 'is_fru': True, },
- '<inventory_root>/system/chassis/fan2': {'fru_type': 'FAN', 'is_fru': True, },
- '<inventory_root>/system/chassis/fan3': {'fru_type': 'FAN', 'is_fru': True, },
-
- '<inventory_root>/system/chassis/motherboard/bmc': {'fru_type': 'BMC', 'is_fru': False,
- 'manufacturer': 'ASPEED'},
-
- '<inventory_root>/system/chassis/motherboard/cpu0': {'fru_type': 'CPU', 'is_fru': True, },
- '<inventory_root>/system/chassis/motherboard/cpu1': {'fru_type': 'CPU', 'is_fru': True, },
-
- '<inventory_root>/system/chassis/motherboard/cpu0/core0': {'fru_type': 'CORE', 'is_fru': False, },
- '<inventory_root>/system/chassis/motherboard/cpu0/core1': {'fru_type': 'CORE', 'is_fru': False, },
- '<inventory_root>/system/chassis/motherboard/cpu0/core2': {'fru_type': 'CORE', 'is_fru': False, },
- '<inventory_root>/system/chassis/motherboard/cpu0/core3': {'fru_type': 'CORE', 'is_fru': False, },
- '<inventory_root>/system/chassis/motherboard/cpu0/core4': {'fru_type': 'CORE', 'is_fru': False, },
- '<inventory_root>/system/chassis/motherboard/cpu0/core5': {'fru_type': 'CORE', 'is_fru': False, },
- '<inventory_root>/system/chassis/motherboard/cpu0/core6': {'fru_type': 'CORE', 'is_fru': False, },
- '<inventory_root>/system/chassis/motherboard/cpu0/core7': {'fru_type': 'CORE', 'is_fru': False, },
- '<inventory_root>/system/chassis/motherboard/cpu0/core8': {'fru_type': 'CORE', 'is_fru': False, },
- '<inventory_root>/system/chassis/motherboard/cpu0/core9': {'fru_type': 'CORE', 'is_fru': False, },
- '<inventory_root>/system/chassis/motherboard/cpu0/core10': {'fru_type': 'CORE', 'is_fru': False, },
- '<inventory_root>/system/chassis/motherboard/cpu0/core11': {'fru_type': 'CORE', 'is_fru': False, },
- '<inventory_root>/system/chassis/motherboard/cpu0/core12': {'fru_type': 'CORE', 'is_fru': False, },
- '<inventory_root>/system/chassis/motherboard/cpu0/core13': {'fru_type': 'CORE', 'is_fru': False, },
- '<inventory_root>/system/chassis/motherboard/cpu0/core14': {'fru_type': 'CORE', 'is_fru': False, },
- '<inventory_root>/system/chassis/motherboard/cpu0/core15': {'fru_type': 'CORE', 'is_fru': False, },
- '<inventory_root>/system/chassis/motherboard/cpu0/core16': {'fru_type': 'CORE', 'is_fru': False, },
- '<inventory_root>/system/chassis/motherboard/cpu0/core17': {'fru_type': 'CORE', 'is_fru': False, },
- '<inventory_root>/system/chassis/motherboard/cpu0/core18': {'fru_type': 'CORE', 'is_fru': False, },
- '<inventory_root>/system/chassis/motherboard/cpu0/core19': {'fru_type': 'CORE', 'is_fru': False, },
- '<inventory_root>/system/chassis/motherboard/cpu0/core20': {'fru_type': 'CORE', 'is_fru': False, },
- '<inventory_root>/system/chassis/motherboard/cpu0/core21': {'fru_type': 'CORE', 'is_fru': False, },
- '<inventory_root>/system/chassis/motherboard/cpu0/core22': {'fru_type': 'CORE', 'is_fru': False, },
- '<inventory_root>/system/chassis/motherboard/cpu0/core23': {'fru_type': 'CORE', 'is_fru': False, },
-
- '<inventory_root>/system/chassis/motherboard/cpu1/core0': {'fru_type': 'CORE', 'is_fru': False, },
- '<inventory_root>/system/chassis/motherboard/cpu1/core1': {'fru_type': 'CORE', 'is_fru': False, },
- '<inventory_root>/system/chassis/motherboard/cpu1/core2': {'fru_type': 'CORE', 'is_fru': False, },
- '<inventory_root>/system/chassis/motherboard/cpu1/core3': {'fru_type': 'CORE', 'is_fru': False, },
- '<inventory_root>/system/chassis/motherboard/cpu1/core4': {'fru_type': 'CORE', 'is_fru': False, },
- '<inventory_root>/system/chassis/motherboard/cpu1/core5': {'fru_type': 'CORE', 'is_fru': False, },
- '<inventory_root>/system/chassis/motherboard/cpu1/core6': {'fru_type': 'CORE', 'is_fru': False, },
- '<inventory_root>/system/chassis/motherboard/cpu1/core7': {'fru_type': 'CORE', 'is_fru': False, },
- '<inventory_root>/system/chassis/motherboard/cpu1/core8': {'fru_type': 'CORE', 'is_fru': False, },
- '<inventory_root>/system/chassis/motherboard/cpu1/core9': {'fru_type': 'CORE', 'is_fru': False, },
- '<inventory_root>/system/chassis/motherboard/cpu1/core10': {'fru_type': 'CORE', 'is_fru': False, },
- '<inventory_root>/system/chassis/motherboard/cpu1/core11': {'fru_type': 'CORE', 'is_fru': False, },
- '<inventory_root>/system/chassis/motherboard/cpu1/core12': {'fru_type': 'CORE', 'is_fru': False, },
- '<inventory_root>/system/chassis/motherboard/cpu1/core13': {'fru_type': 'CORE', 'is_fru': False, },
- '<inventory_root>/system/chassis/motherboard/cpu1/core14': {'fru_type': 'CORE', 'is_fru': False, },
- '<inventory_root>/system/chassis/motherboard/cpu1/core15': {'fru_type': 'CORE', 'is_fru': False, },
- '<inventory_root>/system/chassis/motherboard/cpu1/core16': {'fru_type': 'CORE', 'is_fru': False, },
- '<inventory_root>/system/chassis/motherboard/cpu1/core17': {'fru_type': 'CORE', 'is_fru': False, },
- '<inventory_root>/system/chassis/motherboard/cpu1/core18': {'fru_type': 'CORE', 'is_fru': False, },
- '<inventory_root>/system/chassis/motherboard/cpu1/core19': {'fru_type': 'CORE', 'is_fru': False, },
- '<inventory_root>/system/chassis/motherboard/cpu1/core20': {'fru_type': 'CORE', 'is_fru': False, },
- '<inventory_root>/system/chassis/motherboard/cpu1/core21': {'fru_type': 'CORE', 'is_fru': False, },
- '<inventory_root>/system/chassis/motherboard/cpu1/core22': {'fru_type': 'CORE', 'is_fru': False, },
- '<inventory_root>/system/chassis/motherboard/cpu1/core23': {'fru_type': 'CORE', 'is_fru': False, },
-
- '<inventory_root>/system/chassis/motherboard/dimm0': {'fru_type': 'DIMM', 'is_fru': True, },
- '<inventory_root>/system/chassis/motherboard/dimm1': {'fru_type': 'DIMM', 'is_fru': True, },
- '<inventory_root>/system/chassis/motherboard/dimm2': {'fru_type': 'DIMM', 'is_fru': True, },
- '<inventory_root>/system/chassis/motherboard/dimm3': {'fru_type': 'DIMM', 'is_fru': True, },
- '<inventory_root>/system/chassis/motherboard/dimm4': {'fru_type': 'DIMM', 'is_fru': True, },
- '<inventory_root>/system/chassis/motherboard/dimm5': {'fru_type': 'DIMM', 'is_fru': True, },
- '<inventory_root>/system/chassis/motherboard/dimm6': {'fru_type': 'DIMM', 'is_fru': True, },
- '<inventory_root>/system/chassis/motherboard/dimm7': {'fru_type': 'DIMM', 'is_fru': True, },
- '<inventory_root>/system/chassis/motherboard/dimm8': {'fru_type': 'DIMM', 'is_fru': True, },
- '<inventory_root>/system/chassis/motherboard/dimm9': {'fru_type': 'DIMM', 'is_fru': True, },
- '<inventory_root>/system/chassis/motherboard/dimm10': {'fru_type': 'DIMM', 'is_fru': True, },
- '<inventory_root>/system/chassis/motherboard/dimm11': {'fru_type': 'DIMM', 'is_fru': True, },
- '<inventory_root>/system/chassis/motherboard/dimm12': {'fru_type': 'DIMM', 'is_fru': True, },
- '<inventory_root>/system/chassis/motherboard/dimm13': {'fru_type': 'DIMM', 'is_fru': True, },
- '<inventory_root>/system/chassis/motherboard/dimm14': {'fru_type': 'DIMM', 'is_fru': True, },
- '<inventory_root>/system/chassis/motherboard/dimm15': {'fru_type': 'DIMM', 'is_fru': True, },
+ "<inventory_root>/system": {
+ "fru_type": "SYSTEM",
+ "is_fru": True,
+ "present": "True",
+ },
+ "<inventory_root>/system/bios": {
+ "fru_type": "SYSTEM",
+ "is_fru": True,
+ "present": "True",
+ },
+ "<inventory_root>/system/misc": {
+ "fru_type": "SYSTEM",
+ "is_fru": False,
+ },
+ "<inventory_root>/system/chassis": {
+ "fru_type": "SYSTEM",
+ "is_fru": True,
+ "present": "True",
+ },
+ "<inventory_root>/system/chassis/motherboard": {
+ "fru_type": "MAIN_PLANAR",
+ "is_fru": True,
+ },
+ "<inventory_root>/system/systemevent": {
+ "fru_type": "SYSTEM_EVENT",
+ "is_fru": False,
+ },
+ "<inventory_root>/system/chassis/motherboard/refclock": {
+ "fru_type": "MAIN_PLANAR",
+ "is_fru": False,
+ },
+ "<inventory_root>/system/chassis/motherboard/pcieclock": {
+ "fru_type": "MAIN_PLANAR",
+ "is_fru": False,
+ },
+ "<inventory_root>/system/chassis/motherboard/todclock": {
+ "fru_type": "MAIN_PLANAR",
+ "is_fru": False,
+ },
+ "<inventory_root>/system/chassis/motherboard/apss": {
+ "fru_type": "MAIN_PLANAR",
+ "is_fru": False,
+ },
+ "<inventory_root>/system/chassis/fan0": {
+ "fru_type": "FAN",
+ "is_fru": True,
+ },
+ "<inventory_root>/system/chassis/fan1": {
+ "fru_type": "FAN",
+ "is_fru": True,
+ },
+ "<inventory_root>/system/chassis/fan2": {
+ "fru_type": "FAN",
+ "is_fru": True,
+ },
+ "<inventory_root>/system/chassis/fan3": {
+ "fru_type": "FAN",
+ "is_fru": True,
+ },
+ "<inventory_root>/system/chassis/motherboard/bmc": {
+ "fru_type": "BMC",
+ "is_fru": False,
+ "manufacturer": "ASPEED",
+ },
+ "<inventory_root>/system/chassis/motherboard/cpu0": {
+ "fru_type": "CPU",
+ "is_fru": True,
+ },
+ "<inventory_root>/system/chassis/motherboard/cpu1": {
+ "fru_type": "CPU",
+ "is_fru": True,
+ },
+ "<inventory_root>/system/chassis/motherboard/cpu0/core0": {
+ "fru_type": "CORE",
+ "is_fru": False,
+ },
+ "<inventory_root>/system/chassis/motherboard/cpu0/core1": {
+ "fru_type": "CORE",
+ "is_fru": False,
+ },
+ "<inventory_root>/system/chassis/motherboard/cpu0/core2": {
+ "fru_type": "CORE",
+ "is_fru": False,
+ },
+ "<inventory_root>/system/chassis/motherboard/cpu0/core3": {
+ "fru_type": "CORE",
+ "is_fru": False,
+ },
+ "<inventory_root>/system/chassis/motherboard/cpu0/core4": {
+ "fru_type": "CORE",
+ "is_fru": False,
+ },
+ "<inventory_root>/system/chassis/motherboard/cpu0/core5": {
+ "fru_type": "CORE",
+ "is_fru": False,
+ },
+ "<inventory_root>/system/chassis/motherboard/cpu0/core6": {
+ "fru_type": "CORE",
+ "is_fru": False,
+ },
+ "<inventory_root>/system/chassis/motherboard/cpu0/core7": {
+ "fru_type": "CORE",
+ "is_fru": False,
+ },
+ "<inventory_root>/system/chassis/motherboard/cpu0/core8": {
+ "fru_type": "CORE",
+ "is_fru": False,
+ },
+ "<inventory_root>/system/chassis/motherboard/cpu0/core9": {
+ "fru_type": "CORE",
+ "is_fru": False,
+ },
+ "<inventory_root>/system/chassis/motherboard/cpu0/core10": {
+ "fru_type": "CORE",
+ "is_fru": False,
+ },
+ "<inventory_root>/system/chassis/motherboard/cpu0/core11": {
+ "fru_type": "CORE",
+ "is_fru": False,
+ },
+ "<inventory_root>/system/chassis/motherboard/cpu0/core12": {
+ "fru_type": "CORE",
+ "is_fru": False,
+ },
+ "<inventory_root>/system/chassis/motherboard/cpu0/core13": {
+ "fru_type": "CORE",
+ "is_fru": False,
+ },
+ "<inventory_root>/system/chassis/motherboard/cpu0/core14": {
+ "fru_type": "CORE",
+ "is_fru": False,
+ },
+ "<inventory_root>/system/chassis/motherboard/cpu0/core15": {
+ "fru_type": "CORE",
+ "is_fru": False,
+ },
+ "<inventory_root>/system/chassis/motherboard/cpu0/core16": {
+ "fru_type": "CORE",
+ "is_fru": False,
+ },
+ "<inventory_root>/system/chassis/motherboard/cpu0/core17": {
+ "fru_type": "CORE",
+ "is_fru": False,
+ },
+ "<inventory_root>/system/chassis/motherboard/cpu0/core18": {
+ "fru_type": "CORE",
+ "is_fru": False,
+ },
+ "<inventory_root>/system/chassis/motherboard/cpu0/core19": {
+ "fru_type": "CORE",
+ "is_fru": False,
+ },
+ "<inventory_root>/system/chassis/motherboard/cpu0/core20": {
+ "fru_type": "CORE",
+ "is_fru": False,
+ },
+ "<inventory_root>/system/chassis/motherboard/cpu0/core21": {
+ "fru_type": "CORE",
+ "is_fru": False,
+ },
+ "<inventory_root>/system/chassis/motherboard/cpu0/core22": {
+ "fru_type": "CORE",
+ "is_fru": False,
+ },
+ "<inventory_root>/system/chassis/motherboard/cpu0/core23": {
+ "fru_type": "CORE",
+ "is_fru": False,
+ },
+ "<inventory_root>/system/chassis/motherboard/cpu1/core0": {
+ "fru_type": "CORE",
+ "is_fru": False,
+ },
+ "<inventory_root>/system/chassis/motherboard/cpu1/core1": {
+ "fru_type": "CORE",
+ "is_fru": False,
+ },
+ "<inventory_root>/system/chassis/motherboard/cpu1/core2": {
+ "fru_type": "CORE",
+ "is_fru": False,
+ },
+ "<inventory_root>/system/chassis/motherboard/cpu1/core3": {
+ "fru_type": "CORE",
+ "is_fru": False,
+ },
+ "<inventory_root>/system/chassis/motherboard/cpu1/core4": {
+ "fru_type": "CORE",
+ "is_fru": False,
+ },
+ "<inventory_root>/system/chassis/motherboard/cpu1/core5": {
+ "fru_type": "CORE",
+ "is_fru": False,
+ },
+ "<inventory_root>/system/chassis/motherboard/cpu1/core6": {
+ "fru_type": "CORE",
+ "is_fru": False,
+ },
+ "<inventory_root>/system/chassis/motherboard/cpu1/core7": {
+ "fru_type": "CORE",
+ "is_fru": False,
+ },
+ "<inventory_root>/system/chassis/motherboard/cpu1/core8": {
+ "fru_type": "CORE",
+ "is_fru": False,
+ },
+ "<inventory_root>/system/chassis/motherboard/cpu1/core9": {
+ "fru_type": "CORE",
+ "is_fru": False,
+ },
+ "<inventory_root>/system/chassis/motherboard/cpu1/core10": {
+ "fru_type": "CORE",
+ "is_fru": False,
+ },
+ "<inventory_root>/system/chassis/motherboard/cpu1/core11": {
+ "fru_type": "CORE",
+ "is_fru": False,
+ },
+ "<inventory_root>/system/chassis/motherboard/cpu1/core12": {
+ "fru_type": "CORE",
+ "is_fru": False,
+ },
+ "<inventory_root>/system/chassis/motherboard/cpu1/core13": {
+ "fru_type": "CORE",
+ "is_fru": False,
+ },
+ "<inventory_root>/system/chassis/motherboard/cpu1/core14": {
+ "fru_type": "CORE",
+ "is_fru": False,
+ },
+ "<inventory_root>/system/chassis/motherboard/cpu1/core15": {
+ "fru_type": "CORE",
+ "is_fru": False,
+ },
+ "<inventory_root>/system/chassis/motherboard/cpu1/core16": {
+ "fru_type": "CORE",
+ "is_fru": False,
+ },
+ "<inventory_root>/system/chassis/motherboard/cpu1/core17": {
+ "fru_type": "CORE",
+ "is_fru": False,
+ },
+ "<inventory_root>/system/chassis/motherboard/cpu1/core18": {
+ "fru_type": "CORE",
+ "is_fru": False,
+ },
+ "<inventory_root>/system/chassis/motherboard/cpu1/core19": {
+ "fru_type": "CORE",
+ "is_fru": False,
+ },
+ "<inventory_root>/system/chassis/motherboard/cpu1/core20": {
+ "fru_type": "CORE",
+ "is_fru": False,
+ },
+ "<inventory_root>/system/chassis/motherboard/cpu1/core21": {
+ "fru_type": "CORE",
+ "is_fru": False,
+ },
+ "<inventory_root>/system/chassis/motherboard/cpu1/core22": {
+ "fru_type": "CORE",
+ "is_fru": False,
+ },
+ "<inventory_root>/system/chassis/motherboard/cpu1/core23": {
+ "fru_type": "CORE",
+ "is_fru": False,
+ },
+ "<inventory_root>/system/chassis/motherboard/dimm0": {
+ "fru_type": "DIMM",
+ "is_fru": True,
+ },
+ "<inventory_root>/system/chassis/motherboard/dimm1": {
+ "fru_type": "DIMM",
+ "is_fru": True,
+ },
+ "<inventory_root>/system/chassis/motherboard/dimm2": {
+ "fru_type": "DIMM",
+ "is_fru": True,
+ },
+ "<inventory_root>/system/chassis/motherboard/dimm3": {
+ "fru_type": "DIMM",
+ "is_fru": True,
+ },
+ "<inventory_root>/system/chassis/motherboard/dimm4": {
+ "fru_type": "DIMM",
+ "is_fru": True,
+ },
+ "<inventory_root>/system/chassis/motherboard/dimm5": {
+ "fru_type": "DIMM",
+ "is_fru": True,
+ },
+ "<inventory_root>/system/chassis/motherboard/dimm6": {
+ "fru_type": "DIMM",
+ "is_fru": True,
+ },
+ "<inventory_root>/system/chassis/motherboard/dimm7": {
+ "fru_type": "DIMM",
+ "is_fru": True,
+ },
+ "<inventory_root>/system/chassis/motherboard/dimm8": {
+ "fru_type": "DIMM",
+ "is_fru": True,
+ },
+ "<inventory_root>/system/chassis/motherboard/dimm9": {
+ "fru_type": "DIMM",
+ "is_fru": True,
+ },
+ "<inventory_root>/system/chassis/motherboard/dimm10": {
+ "fru_type": "DIMM",
+ "is_fru": True,
+ },
+ "<inventory_root>/system/chassis/motherboard/dimm11": {
+ "fru_type": "DIMM",
+ "is_fru": True,
+ },
+ "<inventory_root>/system/chassis/motherboard/dimm12": {
+ "fru_type": "DIMM",
+ "is_fru": True,
+ },
+ "<inventory_root>/system/chassis/motherboard/dimm13": {
+ "fru_type": "DIMM",
+ "is_fru": True,
+ },
+ "<inventory_root>/system/chassis/motherboard/dimm14": {
+ "fru_type": "DIMM",
+ "is_fru": True,
+ },
+ "<inventory_root>/system/chassis/motherboard/dimm15": {
+ "fru_type": "DIMM",
+ "is_fru": True,
+ },
}
ID_LOOKUP = {
- 'FRU': {
- 0x01: '<inventory_root>/system/chassis/motherboard/cpu0',
- 0x02: '<inventory_root>/system/chassis/motherboard/cpu1',
- 0x03: '<inventory_root>/system/chassis/motherboard',
- 0x04: '<inventory_root>/system/chassis/motherboard/dimm0',
- 0x05: '<inventory_root>/system/chassis/motherboard/dimm1',
- 0x06: '<inventory_root>/system/chassis/motherboard/dimm2',
- 0x07: '<inventory_root>/system/chassis/motherboard/dimm3',
- 0x08: '<inventory_root>/system/chassis/motherboard/dimm4',
- 0x09: '<inventory_root>/system/chassis/motherboard/dimm5',
- 0x0a: '<inventory_root>/system/chassis/motherboard/dimm6',
- 0x0b: '<inventory_root>/system/chassis/motherboard/dimm7',
- 0x0c: '<inventory_root>/system/chassis/motherboard/dimm8',
- 0x0d: '<inventory_root>/system/chassis/motherboard/dimm9',
- 0x0e: '<inventory_root>/system/chassis/motherboard/dimm10',
- 0x0f: '<inventory_root>/system/chassis/motherboard/dimm11',
- 0x10: '<inventory_root>/system/chassis/motherboard/dimm12',
- 0x11: '<inventory_root>/system/chassis/motherboard/dimm13',
- 0x12: '<inventory_root>/system/chassis/motherboard/dimm14',
- 0x13: '<inventory_root>/system/chassis/motherboard/dimm15',
+ "FRU": {
+ 0x01: "<inventory_root>/system/chassis/motherboard/cpu0",
+ 0x02: "<inventory_root>/system/chassis/motherboard/cpu1",
+ 0x03: "<inventory_root>/system/chassis/motherboard",
+ 0x04: "<inventory_root>/system/chassis/motherboard/dimm0",
+ 0x05: "<inventory_root>/system/chassis/motherboard/dimm1",
+ 0x06: "<inventory_root>/system/chassis/motherboard/dimm2",
+ 0x07: "<inventory_root>/system/chassis/motherboard/dimm3",
+ 0x08: "<inventory_root>/system/chassis/motherboard/dimm4",
+ 0x09: "<inventory_root>/system/chassis/motherboard/dimm5",
+ 0x0A: "<inventory_root>/system/chassis/motherboard/dimm6",
+ 0x0B: "<inventory_root>/system/chassis/motherboard/dimm7",
+ 0x0C: "<inventory_root>/system/chassis/motherboard/dimm8",
+ 0x0D: "<inventory_root>/system/chassis/motherboard/dimm9",
+ 0x0E: "<inventory_root>/system/chassis/motherboard/dimm10",
+ 0x0F: "<inventory_root>/system/chassis/motherboard/dimm11",
+ 0x10: "<inventory_root>/system/chassis/motherboard/dimm12",
+ 0x11: "<inventory_root>/system/chassis/motherboard/dimm13",
+ 0x12: "<inventory_root>/system/chassis/motherboard/dimm14",
+ 0x13: "<inventory_root>/system/chassis/motherboard/dimm15",
},
- 'FRU_STR': {
- 'PRODUCT_0': '<inventory_root>/system/bios',
- 'BOARD_1': '<inventory_root>/system/chassis/motherboard/cpu0',
- 'BOARD_2': '<inventory_root>/system/chassis/motherboard/cpu1',
- 'CHASSIS_3': '<inventory_root>/system/chassis/motherboard',
- 'BOARD_3': '<inventory_root>/system/misc',
- 'PRODUCT_12': '<inventory_root>/system/chassis/motherboard/dimm0',
- 'PRODUCT_13': '<inventory_root>/system/chassis/motherboard/dimm1',
- 'PRODUCT_14': '<inventory_root>/system/chassis/motherboard/dimm2',
- 'PRODUCT_15': '<inventory_root>/system/chassis/motherboard/dimm3',
- 'PRODUCT_16': '<inventory_root>/system/chassis/motherboard/dimm4',
- 'PRODUCT_17': '<inventory_root>/system/chassis/motherboard/dimm5',
- 'PRODUCT_18': '<inventory_root>/system/chassis/motherboard/dimm6',
- 'PRODUCT_19': '<inventory_root>/system/chassis/motherboard/dimm7',
- 'PRODUCT_20': '<inventory_root>/system/chassis/motherboard/dimm8',
- 'PRODUCT_21': '<inventory_root>/system/chassis/motherboard/dimm9',
- 'PRODUCT_22': '<inventory_root>/system/chassis/motherboard/dimm10',
- 'PRODUCT_23': '<inventory_root>/system/chassis/motherboard/dimm11',
- 'PRODUCT_24': '<inventory_root>/system/chassis/motherboard/dimm12',
- 'PRODUCT_25': '<inventory_root>/system/chassis/motherboard/dimm13',
- 'PRODUCT_26': '<inventory_root>/system/chassis/motherboard/dimm14',
- 'PRODUCT_27': '<inventory_root>/system/chassis/motherboard/dimm15',
- 'PRODUCT_47': '<inventory_root>/system/misc',
+ "FRU_STR": {
+ "PRODUCT_0": "<inventory_root>/system/bios",
+ "BOARD_1": "<inventory_root>/system/chassis/motherboard/cpu0",
+ "BOARD_2": "<inventory_root>/system/chassis/motherboard/cpu1",
+ "CHASSIS_3": "<inventory_root>/system/chassis/motherboard",
+ "BOARD_3": "<inventory_root>/system/misc",
+ "PRODUCT_12": "<inventory_root>/system/chassis/motherboard/dimm0",
+ "PRODUCT_13": "<inventory_root>/system/chassis/motherboard/dimm1",
+ "PRODUCT_14": "<inventory_root>/system/chassis/motherboard/dimm2",
+ "PRODUCT_15": "<inventory_root>/system/chassis/motherboard/dimm3",
+ "PRODUCT_16": "<inventory_root>/system/chassis/motherboard/dimm4",
+ "PRODUCT_17": "<inventory_root>/system/chassis/motherboard/dimm5",
+ "PRODUCT_18": "<inventory_root>/system/chassis/motherboard/dimm6",
+ "PRODUCT_19": "<inventory_root>/system/chassis/motherboard/dimm7",
+ "PRODUCT_20": "<inventory_root>/system/chassis/motherboard/dimm8",
+ "PRODUCT_21": "<inventory_root>/system/chassis/motherboard/dimm9",
+ "PRODUCT_22": "<inventory_root>/system/chassis/motherboard/dimm10",
+ "PRODUCT_23": "<inventory_root>/system/chassis/motherboard/dimm11",
+ "PRODUCT_24": "<inventory_root>/system/chassis/motherboard/dimm12",
+ "PRODUCT_25": "<inventory_root>/system/chassis/motherboard/dimm13",
+ "PRODUCT_26": "<inventory_root>/system/chassis/motherboard/dimm14",
+ "PRODUCT_27": "<inventory_root>/system/chassis/motherboard/dimm15",
+ "PRODUCT_47": "<inventory_root>/system/misc",
},
- 'SENSOR': {
- 0x01: '/org/openbmc/sensors/host/HostStatus',
- 0x02: '/org/openbmc/sensors/host/BootProgress',
- 0x03: '/org/openbmc/sensors/host/cpu0/OccStatus',
- 0x04: '/org/openbmc/sensors/host/cpu1/OccStatus',
- 0x08: '<inventory_root>/system/chassis/motherboard/cpu0',
- 0x09: '<inventory_root>/system/chassis/motherboard/cpu1',
- 0x0b: '<inventory_root>/system/chassis/motherboard/dimm0',
- 0x0c: '<inventory_root>/system/chassis/motherboard/dimm1',
- 0x0d: '<inventory_root>/system/chassis/motherboard/dimm2',
- 0x0e: '<inventory_root>/system/chassis/motherboard/dimm3',
- 0x0f: '<inventory_root>/system/chassis/motherboard/dimm4',
- 0x10: '<inventory_root>/system/chassis/motherboard/dimm5',
- 0x11: '<inventory_root>/system/chassis/motherboard/dimm6',
- 0x12: '<inventory_root>/system/chassis/motherboard/dimm7',
- 0x13: '<inventory_root>/system/chassis/motherboard/dimm8',
- 0x14: '<inventory_root>/system/chassis/motherboard/dimm9',
- 0x15: '<inventory_root>/system/chassis/motherboard/dimm10',
- 0x16: '<inventory_root>/system/chassis/motherboard/dimm11',
- 0x17: '<inventory_root>/system/chassis/motherboard/dimm12',
- 0x18: '<inventory_root>/system/chassis/motherboard/dimm13',
- 0x19: '<inventory_root>/system/chassis/motherboard/dimm14',
- 0x1a: '<inventory_root>/system/chassis/motherboard/dimm15',
- 0x2b: '<inventory_root>/system/chassis/motherboard/cpu0/core0',
- 0x2c: '<inventory_root>/system/chassis/motherboard/cpu0/core1',
- 0x2d: '<inventory_root>/system/chassis/motherboard/cpu0/core2',
- 0x2e: '<inventory_root>/system/chassis/motherboard/cpu0/core3',
- 0x2f: '<inventory_root>/system/chassis/motherboard/cpu0/core4',
- 0x30: '<inventory_root>/system/chassis/motherboard/cpu0/core5',
- 0x31: '<inventory_root>/system/chassis/motherboard/cpu0/core6',
- 0x32: '<inventory_root>/system/chassis/motherboard/cpu0/core7',
- 0x33: '<inventory_root>/system/chassis/motherboard/cpu0/core8',
- 0x34: '<inventory_root>/system/chassis/motherboard/cpu0/core9',
- 0x35: '<inventory_root>/system/chassis/motherboard/cpu0/core10',
- 0x36: '<inventory_root>/system/chassis/motherboard/cpu0/core11',
- 0x37: '<inventory_root>/system/chassis/motherboard/cpu0/core12',
- 0x38: '<inventory_root>/system/chassis/motherboard/cpu0/core13',
- 0x39: '<inventory_root>/system/chassis/motherboard/cpu0/core14',
- 0x3a: '<inventory_root>/system/chassis/motherboard/cpu0/core15',
- 0x3b: '<inventory_root>/system/chassis/motherboard/cpu0/core16',
- 0x3c: '<inventory_root>/system/chassis/motherboard/cpu0/core17',
- 0x3d: '<inventory_root>/system/chassis/motherboard/cpu0/core18',
- 0x3e: '<inventory_root>/system/chassis/motherboard/cpu0/core19',
- 0x3f: '<inventory_root>/system/chassis/motherboard/cpu0/core20',
- 0x40: '<inventory_root>/system/chassis/motherboard/cpu0/core21',
- 0x41: '<inventory_root>/system/chassis/motherboard/cpu0/core22',
- 0x42: '<inventory_root>/system/chassis/motherboard/cpu0/core23',
- 0x43: '<inventory_root>/system/chassis/motherboard/cpu1/core0',
- 0x44: '<inventory_root>/system/chassis/motherboard/cpu1/core1',
- 0x45: '<inventory_root>/system/chassis/motherboard/cpu1/core2',
- 0x46: '<inventory_root>/system/chassis/motherboard/cpu1/core3',
- 0x47: '<inventory_root>/system/chassis/motherboard/cpu1/core4',
- 0x48: '<inventory_root>/system/chassis/motherboard/cpu1/core5',
- 0x49: '<inventory_root>/system/chassis/motherboard/cpu1/core6',
- 0x4a: '<inventory_root>/system/chassis/motherboard/cpu1/core7',
- 0x4b: '<inventory_root>/system/chassis/motherboard/cpu1/core8',
- 0x4c: '<inventory_root>/system/chassis/motherboard/cpu1/core9',
- 0x4d: '<inventory_root>/system/chassis/motherboard/cpu1/core10',
- 0x4e: '<inventory_root>/system/chassis/motherboard/cpu1/core11',
- 0x4f: '<inventory_root>/system/chassis/motherboard/cpu1/core12',
- 0x50: '<inventory_root>/system/chassis/motherboard/cpu1/core13',
- 0x51: '<inventory_root>/system/chassis/motherboard/cpu1/core14',
- 0x52: '<inventory_root>/system/chassis/motherboard/cpu1/core15',
- 0x53: '<inventory_root>/system/chassis/motherboard/cpu1/core16',
- 0x54: '<inventory_root>/system/chassis/motherboard/cpu1/core17',
- 0x55: '<inventory_root>/system/chassis/motherboard/cpu1/core18',
- 0x56: '<inventory_root>/system/chassis/motherboard/cpu1/core19',
- 0x57: '<inventory_root>/system/chassis/motherboard/cpu1/core20',
- 0x58: '<inventory_root>/system/chassis/motherboard/cpu1/core21',
- 0x59: '<inventory_root>/system/chassis/motherboard/cpu1/core22',
- 0x5a: '<inventory_root>/system/chassis/motherboard/cpu1/core23',
- 0x8b: '/org/openbmc/sensors/host/BootCount',
- 0x8c: '<inventory_root>/system/chassis/motherboard',
- 0x8d: '<inventory_root>/system/chassis/motherboard/refclock',
- 0x8e: '<inventory_root>/system/chassis/motherboard/pcieclock',
- 0x8f: '<inventory_root>/system/chassis/motherboard/todclock',
- 0x90: '<inventory_root>/system/systemevent',
- 0x91: '/org/openbmc/sensors/host/OperatingSystemStatus',
- 0x92: '<inventory_root>/system/chassis/motherboard/pcielink',
+ "SENSOR": {
+ 0x01: "/org/openbmc/sensors/host/HostStatus",
+ 0x02: "/org/openbmc/sensors/host/BootProgress",
+ 0x03: "/org/openbmc/sensors/host/cpu0/OccStatus",
+ 0x04: "/org/openbmc/sensors/host/cpu1/OccStatus",
+ 0x08: "<inventory_root>/system/chassis/motherboard/cpu0",
+ 0x09: "<inventory_root>/system/chassis/motherboard/cpu1",
+ 0x0B: "<inventory_root>/system/chassis/motherboard/dimm0",
+ 0x0C: "<inventory_root>/system/chassis/motherboard/dimm1",
+ 0x0D: "<inventory_root>/system/chassis/motherboard/dimm2",
+ 0x0E: "<inventory_root>/system/chassis/motherboard/dimm3",
+ 0x0F: "<inventory_root>/system/chassis/motherboard/dimm4",
+ 0x10: "<inventory_root>/system/chassis/motherboard/dimm5",
+ 0x11: "<inventory_root>/system/chassis/motherboard/dimm6",
+ 0x12: "<inventory_root>/system/chassis/motherboard/dimm7",
+ 0x13: "<inventory_root>/system/chassis/motherboard/dimm8",
+ 0x14: "<inventory_root>/system/chassis/motherboard/dimm9",
+ 0x15: "<inventory_root>/system/chassis/motherboard/dimm10",
+ 0x16: "<inventory_root>/system/chassis/motherboard/dimm11",
+ 0x17: "<inventory_root>/system/chassis/motherboard/dimm12",
+ 0x18: "<inventory_root>/system/chassis/motherboard/dimm13",
+ 0x19: "<inventory_root>/system/chassis/motherboard/dimm14",
+ 0x1A: "<inventory_root>/system/chassis/motherboard/dimm15",
+ 0x2B: "<inventory_root>/system/chassis/motherboard/cpu0/core0",
+ 0x2C: "<inventory_root>/system/chassis/motherboard/cpu0/core1",
+ 0x2D: "<inventory_root>/system/chassis/motherboard/cpu0/core2",
+ 0x2E: "<inventory_root>/system/chassis/motherboard/cpu0/core3",
+ 0x2F: "<inventory_root>/system/chassis/motherboard/cpu0/core4",
+ 0x30: "<inventory_root>/system/chassis/motherboard/cpu0/core5",
+ 0x31: "<inventory_root>/system/chassis/motherboard/cpu0/core6",
+ 0x32: "<inventory_root>/system/chassis/motherboard/cpu0/core7",
+ 0x33: "<inventory_root>/system/chassis/motherboard/cpu0/core8",
+ 0x34: "<inventory_root>/system/chassis/motherboard/cpu0/core9",
+ 0x35: "<inventory_root>/system/chassis/motherboard/cpu0/core10",
+ 0x36: "<inventory_root>/system/chassis/motherboard/cpu0/core11",
+ 0x37: "<inventory_root>/system/chassis/motherboard/cpu0/core12",
+ 0x38: "<inventory_root>/system/chassis/motherboard/cpu0/core13",
+ 0x39: "<inventory_root>/system/chassis/motherboard/cpu0/core14",
+ 0x3A: "<inventory_root>/system/chassis/motherboard/cpu0/core15",
+ 0x3B: "<inventory_root>/system/chassis/motherboard/cpu0/core16",
+ 0x3C: "<inventory_root>/system/chassis/motherboard/cpu0/core17",
+ 0x3D: "<inventory_root>/system/chassis/motherboard/cpu0/core18",
+ 0x3E: "<inventory_root>/system/chassis/motherboard/cpu0/core19",
+ 0x3F: "<inventory_root>/system/chassis/motherboard/cpu0/core20",
+ 0x40: "<inventory_root>/system/chassis/motherboard/cpu0/core21",
+ 0x41: "<inventory_root>/system/chassis/motherboard/cpu0/core22",
+ 0x42: "<inventory_root>/system/chassis/motherboard/cpu0/core23",
+ 0x43: "<inventory_root>/system/chassis/motherboard/cpu1/core0",
+ 0x44: "<inventory_root>/system/chassis/motherboard/cpu1/core1",
+ 0x45: "<inventory_root>/system/chassis/motherboard/cpu1/core2",
+ 0x46: "<inventory_root>/system/chassis/motherboard/cpu1/core3",
+ 0x47: "<inventory_root>/system/chassis/motherboard/cpu1/core4",
+ 0x48: "<inventory_root>/system/chassis/motherboard/cpu1/core5",
+ 0x49: "<inventory_root>/system/chassis/motherboard/cpu1/core6",
+ 0x4A: "<inventory_root>/system/chassis/motherboard/cpu1/core7",
+ 0x4B: "<inventory_root>/system/chassis/motherboard/cpu1/core8",
+ 0x4C: "<inventory_root>/system/chassis/motherboard/cpu1/core9",
+ 0x4D: "<inventory_root>/system/chassis/motherboard/cpu1/core10",
+ 0x4E: "<inventory_root>/system/chassis/motherboard/cpu1/core11",
+ 0x4F: "<inventory_root>/system/chassis/motherboard/cpu1/core12",
+ 0x50: "<inventory_root>/system/chassis/motherboard/cpu1/core13",
+ 0x51: "<inventory_root>/system/chassis/motherboard/cpu1/core14",
+ 0x52: "<inventory_root>/system/chassis/motherboard/cpu1/core15",
+ 0x53: "<inventory_root>/system/chassis/motherboard/cpu1/core16",
+ 0x54: "<inventory_root>/system/chassis/motherboard/cpu1/core17",
+ 0x55: "<inventory_root>/system/chassis/motherboard/cpu1/core18",
+ 0x56: "<inventory_root>/system/chassis/motherboard/cpu1/core19",
+ 0x57: "<inventory_root>/system/chassis/motherboard/cpu1/core20",
+ 0x58: "<inventory_root>/system/chassis/motherboard/cpu1/core21",
+ 0x59: "<inventory_root>/system/chassis/motherboard/cpu1/core22",
+ 0x5A: "<inventory_root>/system/chassis/motherboard/cpu1/core23",
+ 0x8B: "/org/openbmc/sensors/host/BootCount",
+ 0x8C: "<inventory_root>/system/chassis/motherboard",
+ 0x8D: "<inventory_root>/system/chassis/motherboard/refclock",
+ 0x8E: "<inventory_root>/system/chassis/motherboard/pcieclock",
+ 0x8F: "<inventory_root>/system/chassis/motherboard/todclock",
+ 0x90: "<inventory_root>/system/systemevent",
+ 0x91: "/org/openbmc/sensors/host/OperatingSystemStatus",
+ 0x92: "<inventory_root>/system/chassis/motherboard/pcielink",
# 0x08 : '<inventory_root>/system/powerlimit',
# 0x10 : '<inventory_root>/system/chassis/motherboard/apss',
# 0x06 : '/org/openbmc/sensors/host/powercap',
},
- 'GPIO_PRESENT': {}
+ "GPIO_PRESENT": {},
}
GPIO_CONFIG = {}
-GPIO_CONFIG['SOFTWARE_PGOOD'] = \
- {'gpio_pin': 'R1', 'direction': 'out'}
-GPIO_CONFIG['BMC_POWER_UP'] = \
- {'gpio_pin': 'D1', 'direction': 'out'}
-GPIO_CONFIG['SYS_PWROK_BUFF'] = \
- {'gpio_pin': 'D2', 'direction': 'in'}
-GPIO_CONFIG['BMC_WD_CLEAR_PULSE_N'] = \
- {'gpio_pin': 'N5', 'direction': 'out'}
-GPIO_CONFIG['CHECKSTOP'] = \
- {'gpio_pin': 'J2', 'direction': 'falling'}
-GPIO_CONFIG['BMC_CP0_RESET_N'] = \
- {'gpio_pin': 'A1', 'direction': 'out'}
-GPIO_CONFIG['BMC_CP0_PERST_ENABLE_R'] = \
- {'gpio_pin': 'A3', 'direction': 'out'}
-GPIO_CONFIG['FSI_DATA'] = \
- {'gpio_pin': 'AA2', 'direction': 'out'}
-GPIO_CONFIG['FSI_CLK'] = \
- {'gpio_pin': 'AA0', 'direction': 'out'}
-GPIO_CONFIG['FSI_ENABLE'] = \
- {'gpio_pin': 'D0', 'direction': 'out'}
+GPIO_CONFIG["SOFTWARE_PGOOD"] = {"gpio_pin": "R1", "direction": "out"}
+GPIO_CONFIG["BMC_POWER_UP"] = {"gpio_pin": "D1", "direction": "out"}
+GPIO_CONFIG["SYS_PWROK_BUFF"] = {"gpio_pin": "D2", "direction": "in"}
+GPIO_CONFIG["BMC_WD_CLEAR_PULSE_N"] = {"gpio_pin": "N5", "direction": "out"}
+GPIO_CONFIG["CHECKSTOP"] = {"gpio_pin": "J2", "direction": "falling"}
+GPIO_CONFIG["BMC_CP0_RESET_N"] = {"gpio_pin": "A1", "direction": "out"}
+GPIO_CONFIG["BMC_CP0_PERST_ENABLE_R"] = {"gpio_pin": "A3", "direction": "out"}
+GPIO_CONFIG["FSI_DATA"] = {"gpio_pin": "AA2", "direction": "out"}
+GPIO_CONFIG["FSI_CLK"] = {"gpio_pin": "AA0", "direction": "out"}
+GPIO_CONFIG["FSI_ENABLE"] = {"gpio_pin": "D0", "direction": "out"}
# DBG_CP0_MUX_SEL
-GPIO_CONFIG['CRONUS_SEL'] = \
- {'gpio_pin': 'A6', 'direction': 'out'}
-GPIO_CONFIG['BMC_THROTTLE'] = \
- {'gpio_pin': 'J3', 'direction': 'out'}
-GPIO_CONFIG['IDBTN'] = \
- {'gpio_pin': 'Q7', 'direction': 'out'}
+GPIO_CONFIG["CRONUS_SEL"] = {"gpio_pin": "A6", "direction": "out"}
+GPIO_CONFIG["BMC_THROTTLE"] = {"gpio_pin": "J3", "direction": "out"}
+GPIO_CONFIG["IDBTN"] = {"gpio_pin": "Q7", "direction": "out"}
# PM_FP_PWRBTN_IN_L
-GPIO_CONFIG['POWER_BUTTON'] = \
- {'gpio_pin': 'I3', 'direction': 'both'}
+GPIO_CONFIG["POWER_BUTTON"] = {"gpio_pin": "I3", "direction": "both"}
# PM_NMIBTN_IN_L
-GPIO_CONFIG['RESET_BUTTON'] = \
- {'gpio_pin': 'J1', 'direction': 'both'}
+GPIO_CONFIG["RESET_BUTTON"] = {"gpio_pin": "J1", "direction": "both"}
HWMON_CONFIG = {
- '4-0050': {
- 'names': {
- 'caps_curr_powercap': {'object_path': 'powercap/curr_cap', 'poll_interval': 10000,
- 'scale': 1, 'units': 'W'},
- 'caps_curr_powerreading': {'object_path': 'powercap/system_power',
- 'poll_interval': 10000, 'scale': 1, 'units': 'W'},
- 'caps_max_powercap': {'object_path': 'powercap/max_cap', 'poll_interval': 10000,
- 'scale': 1, 'units': 'W'},
- 'caps_min_powercap': {'object_path': 'powercap/min_cap', 'poll_interval': 10000,
- 'scale': 1, 'units': 'W'},
- 'caps_norm_powercap': {'object_path': 'powercap/n_cap', 'poll_interval': 10000,
- 'scale': 1, 'units': 'W'},
- 'caps_user_powerlimit': {'object_path': 'powercap/user_cap', 'poll_interval': 10000,
- 'scale': 1, 'units': 'W'},
+ "4-0050": {
+ "names": {
+ "caps_curr_powercap": {
+ "object_path": "powercap/curr_cap",
+ "poll_interval": 10000,
+ "scale": 1,
+ "units": "W",
+ },
+ "caps_curr_powerreading": {
+ "object_path": "powercap/system_power",
+ "poll_interval": 10000,
+ "scale": 1,
+ "units": "W",
+ },
+ "caps_max_powercap": {
+ "object_path": "powercap/max_cap",
+ "poll_interval": 10000,
+ "scale": 1,
+ "units": "W",
+ },
+ "caps_min_powercap": {
+ "object_path": "powercap/min_cap",
+ "poll_interval": 10000,
+ "scale": 1,
+ "units": "W",
+ },
+ "caps_norm_powercap": {
+ "object_path": "powercap/n_cap",
+ "poll_interval": 10000,
+ "scale": 1,
+ "units": "W",
+ },
+ "caps_user_powerlimit": {
+ "object_path": "powercap/user_cap",
+ "poll_interval": 10000,
+ "scale": 1,
+ "units": "W",
+ },
},
- 'labels': {
- '176': {'object_path': 'temperature/cpu0/core0', 'poll_interval': 5000, 'scale': -3,
- 'units': 'C',
- 'critical_upper': 100, 'critical_lower': -100, 'warning_upper': 90,
- 'warning_lower': -99, 'emergency_enabled': True},
- '177': {'object_path': 'temperature/cpu0/core1', 'poll_interval': 5000, 'scale': -3,
- 'units': 'C',
- 'critical_upper': 100, 'critical_lower': -100, 'warning_upper': 90,
- 'warning_lower': -99, 'emergency_enabled': True},
- '178': {'object_path': 'temperature/cpu0/core2', 'poll_interval': 5000, 'scale': -3,
- 'units': 'C',
- 'critical_upper': 100, 'critical_lower': -100, 'warning_upper': 90,
- 'warning_lower': -99, 'emergency_enabled': True},
- '179': {'object_path': 'temperature/cpu0/core3', 'poll_interval': 5000, 'scale': -3,
- 'units': 'C',
- 'critical_upper': 100, 'critical_lower': -100, 'warning_upper': 90,
- 'warning_lower': -99, 'emergency_enabled': True},
- '180': {'object_path': 'temperature/cpu0/core4', 'poll_interval': 5000, 'scale': -3,
- 'units': 'C',
- 'critical_upper': 100, 'critical_lower': -100, 'warning_upper': 90,
- 'warning_lower': -99, 'emergency_enabled': True},
- '181': {'object_path': 'temperature/cpu0/core5', 'poll_interval': 5000, 'scale': -3,
- 'units': 'C',
- 'critical_upper': 100, 'critical_lower': -100, 'warning_upper': 90,
- 'warning_lower': -99, 'emergency_enabled': True},
- '182': {'object_path': 'temperature/cpu0/core6', 'poll_interval': 5000, 'scale': -3,
- 'units': 'C',
- 'critical_upper': 100, 'critical_lower': -100, 'warning_upper': 90,
- 'warning_lower': -99, 'emergency_enabled': True},
- '183': {'object_path': 'temperature/cpu0/core7', 'poll_interval': 5000, 'scale': -3,
- 'units': 'C',
- 'critical_upper': 100, 'critical_lower': -100, 'warning_upper': 90,
- 'warning_lower': -99, 'emergency_enabled': True},
- '184': {'object_path': 'temperature/cpu0/core8', 'poll_interval': 5000, 'scale': -3,
- 'units': 'C',
- 'critical_upper': 100, 'critical_lower': -100, 'warning_upper': 90,
- 'warning_lower': -99, 'emergency_enabled': True},
- '185': {'object_path': 'temperature/cpu0/core9', 'poll_interval': 5000, 'scale': -3,
- 'units': 'C',
- 'critical_upper': 100, 'critical_lower': -100, 'warning_upper': 90,
- 'warning_lower': -99, 'emergency_enabled': True},
- '186': {'object_path': 'temperature/cpu0/core10', 'poll_interval': 5000, 'scale': -3,
- 'units': 'C',
- 'critical_upper': 100, 'critical_lower': -100, 'warning_upper': 90,
- 'warning_lower': -99, 'emergency_enabled': True},
- '187': {'object_path': 'temperature/cpu0/core11', 'poll_interval': 5000, 'scale': -3,
- 'units': 'C',
- 'critical_upper': 100, 'critical_lower': -100, 'warning_upper': 90,
- 'warning_lower': -99, 'emergency_enabled': True},
- '102': {'object_path': 'temperature/dimm0', 'poll_interval': 5000, 'scale': -3,
- 'units': 'C'},
- '103': {'object_path': 'temperature/dimm1', 'poll_interval': 5000, 'scale': -3,
- 'units': 'C'},
- '104': {'object_path': 'temperature/dimm2', 'poll_interval': 5000, 'scale': -3,
- 'units': 'C'},
- '105': {'object_path': 'temperature/dimm3', 'poll_interval': 5000, 'scale': -3,
- 'units': 'C'},
- '106': {'object_path': 'temperature/dimm4', 'poll_interval': 5000, 'scale': -3,
- 'units': 'C'},
- '107': {'object_path': 'temperature/dimm5', 'poll_interval': 5000, 'scale': -3,
- 'units': 'C'},
- '108': {'object_path': 'temperature/dimm6', 'poll_interval': 5000, 'scale': -3,
- 'units': 'C'},
- '109': {'object_path': 'temperature/dimm7', 'poll_interval': 5000, 'scale': -3,
- 'units': 'C'},
- }
+ "labels": {
+ "176": {
+ "object_path": "temperature/cpu0/core0",
+ "poll_interval": 5000,
+ "scale": -3,
+ "units": "C",
+ "critical_upper": 100,
+ "critical_lower": -100,
+ "warning_upper": 90,
+ "warning_lower": -99,
+ "emergency_enabled": True,
+ },
+ "177": {
+ "object_path": "temperature/cpu0/core1",
+ "poll_interval": 5000,
+ "scale": -3,
+ "units": "C",
+ "critical_upper": 100,
+ "critical_lower": -100,
+ "warning_upper": 90,
+ "warning_lower": -99,
+ "emergency_enabled": True,
+ },
+ "178": {
+ "object_path": "temperature/cpu0/core2",
+ "poll_interval": 5000,
+ "scale": -3,
+ "units": "C",
+ "critical_upper": 100,
+ "critical_lower": -100,
+ "warning_upper": 90,
+ "warning_lower": -99,
+ "emergency_enabled": True,
+ },
+ "179": {
+ "object_path": "temperature/cpu0/core3",
+ "poll_interval": 5000,
+ "scale": -3,
+ "units": "C",
+ "critical_upper": 100,
+ "critical_lower": -100,
+ "warning_upper": 90,
+ "warning_lower": -99,
+ "emergency_enabled": True,
+ },
+ "180": {
+ "object_path": "temperature/cpu0/core4",
+ "poll_interval": 5000,
+ "scale": -3,
+ "units": "C",
+ "critical_upper": 100,
+ "critical_lower": -100,
+ "warning_upper": 90,
+ "warning_lower": -99,
+ "emergency_enabled": True,
+ },
+ "181": {
+ "object_path": "temperature/cpu0/core5",
+ "poll_interval": 5000,
+ "scale": -3,
+ "units": "C",
+ "critical_upper": 100,
+ "critical_lower": -100,
+ "warning_upper": 90,
+ "warning_lower": -99,
+ "emergency_enabled": True,
+ },
+ "182": {
+ "object_path": "temperature/cpu0/core6",
+ "poll_interval": 5000,
+ "scale": -3,
+ "units": "C",
+ "critical_upper": 100,
+ "critical_lower": -100,
+ "warning_upper": 90,
+ "warning_lower": -99,
+ "emergency_enabled": True,
+ },
+ "183": {
+ "object_path": "temperature/cpu0/core7",
+ "poll_interval": 5000,
+ "scale": -3,
+ "units": "C",
+ "critical_upper": 100,
+ "critical_lower": -100,
+ "warning_upper": 90,
+ "warning_lower": -99,
+ "emergency_enabled": True,
+ },
+ "184": {
+ "object_path": "temperature/cpu0/core8",
+ "poll_interval": 5000,
+ "scale": -3,
+ "units": "C",
+ "critical_upper": 100,
+ "critical_lower": -100,
+ "warning_upper": 90,
+ "warning_lower": -99,
+ "emergency_enabled": True,
+ },
+ "185": {
+ "object_path": "temperature/cpu0/core9",
+ "poll_interval": 5000,
+ "scale": -3,
+ "units": "C",
+ "critical_upper": 100,
+ "critical_lower": -100,
+ "warning_upper": 90,
+ "warning_lower": -99,
+ "emergency_enabled": True,
+ },
+ "186": {
+ "object_path": "temperature/cpu0/core10",
+ "poll_interval": 5000,
+ "scale": -3,
+ "units": "C",
+ "critical_upper": 100,
+ "critical_lower": -100,
+ "warning_upper": 90,
+ "warning_lower": -99,
+ "emergency_enabled": True,
+ },
+ "187": {
+ "object_path": "temperature/cpu0/core11",
+ "poll_interval": 5000,
+ "scale": -3,
+ "units": "C",
+ "critical_upper": 100,
+ "critical_lower": -100,
+ "warning_upper": 90,
+ "warning_lower": -99,
+ "emergency_enabled": True,
+ },
+ "102": {
+ "object_path": "temperature/dimm0",
+ "poll_interval": 5000,
+ "scale": -3,
+ "units": "C",
+ },
+ "103": {
+ "object_path": "temperature/dimm1",
+ "poll_interval": 5000,
+ "scale": -3,
+ "units": "C",
+ },
+ "104": {
+ "object_path": "temperature/dimm2",
+ "poll_interval": 5000,
+ "scale": -3,
+ "units": "C",
+ },
+ "105": {
+ "object_path": "temperature/dimm3",
+ "poll_interval": 5000,
+ "scale": -3,
+ "units": "C",
+ },
+ "106": {
+ "object_path": "temperature/dimm4",
+ "poll_interval": 5000,
+ "scale": -3,
+ "units": "C",
+ },
+ "107": {
+ "object_path": "temperature/dimm5",
+ "poll_interval": 5000,
+ "scale": -3,
+ "units": "C",
+ },
+ "108": {
+ "object_path": "temperature/dimm6",
+ "poll_interval": 5000,
+ "scale": -3,
+ "units": "C",
+ },
+ "109": {
+ "object_path": "temperature/dimm7",
+ "poll_interval": 5000,
+ "scale": -3,
+ "units": "C",
+ },
+ },
},
- '5-0050': {
- 'labels': {
- '188': {'object_path': 'temperature/cpu1/core0', 'poll_interval': 5000, 'scale': -3,
- 'units': 'C',
- 'critical_upper': 100, 'critical_lower': -100, 'warning_upper': 90,
- 'warning_lower': -99, 'emergency_enabled': True},
- '189': {'object_path': 'temperature/cpu1/core1', 'poll_interval': 5000, 'scale': -3,
- 'units': 'C',
- 'critical_upper': 100, 'critical_lower': -100, 'warning_upper': 90,
- 'warning_lower': -99, 'emergency_enabled': True},
- '190': {'object_path': 'temperature/cpu1/core2', 'poll_interval': 5000, 'scale': -3,
- 'units': 'C',
- 'critical_upper': 100, 'critical_lower': -100, 'warning_upper': 90,
- 'warning_lower': -99, 'emergency_enabled': True},
- '191': {'object_path': 'temperature/cpu1/core3', 'poll_interval': 5000, 'scale': -3,
- 'units': 'C',
- 'critical_upper': 100, 'critical_lower': -100, 'warning_upper': 90,
- 'warning_lower': -99, 'emergency_enabled': True},
- '192': {'object_path': 'temperature/cpu1/core4', 'poll_interval': 5000, 'scale': -3,
- 'units': 'C',
- 'critical_upper': 100, 'critical_lower': -100, 'warning_upper': 90,
- 'warning_lower': -99, 'emergency_enabled': True},
- '193': {'object_path': 'temperature/cpu1/core5', 'poll_interval': 5000, 'scale': -3,
- 'units': 'C',
- 'critical_upper': 100, 'critical_lower': -100, 'warning_upper': 90,
- 'warning_lower': -99, 'emergency_enabled': True},
- '194': {'object_path': 'temperature/cpu1/core6', 'poll_interval': 5000, 'scale': -3,
- 'units': 'C',
- 'critical_upper': 100, 'critical_lower': -100, 'warning_upper': 90,
- 'warning_lower': -99, 'emergency_enabled': True},
- '195': {'object_path': 'temperature/cpu1/core7', 'poll_interval': 5000, 'scale': -3,
- 'units': 'C',
- 'critical_upper': 100, 'critical_lower': -100, 'warning_upper': 90,
- 'warning_lower': -99, 'emergency_enabled': True},
- '196': {'object_path': 'temperature/cpu1/core8', 'poll_interval': 5000, 'scale': -3,
- 'units': 'C',
- 'critical_upper': 100, 'critical_lower': -100, 'warning_upper': 90,
- 'warning_lower': -99, 'emergency_enabled': True},
- '197': {'object_path': 'temperature/cpu1/core9', 'poll_interval': 5000, 'scale': -3,
- 'units': 'C',
- 'critical_upper': 100, 'critical_lower': -100, 'warning_upper': 90,
- 'warning_lower': -99, 'emergency_enabled': True},
- '198': {'object_path': 'temperature/cpu1/core10', 'poll_interval': 5000, 'scale': -3,
- 'units': 'C',
- 'critical_upper': 100, 'critical_lower': -100, 'warning_upper': 90,
- 'warning_lower': -99, 'emergency_enabled': True},
- '199': {'object_path': 'temperature/cpu1/core11', 'poll_interval': 5000, 'scale': -3,
- 'units': 'C',
- 'critical_upper': 100, 'critical_lower': -100, 'warning_upper': 90,
- 'warning_lower': -99, 'emergency_enabled': True},
- '110': {'object_path': 'temperature/dimm8', 'poll_interval': 5000, 'scale': -3,
- 'units': 'C'},
- '111': {'object_path': 'temperature/dimm9', 'poll_interval': 5000, 'scale': -3,
- 'units': 'C'},
- '112': {'object_path': 'temperature/dimm10', 'poll_interval': 5000, 'scale': -3,
- 'units': 'C'},
- '113': {'object_path': 'temperature/dimm11', 'poll_interval': 5000, 'scale': -3,
- 'units': 'C'},
- '114': {'object_path': 'temperature/dimm12', 'poll_interval': 5000, 'scale': -3,
- 'units': 'C'},
- '115': {'object_path': 'temperature/dimm13', 'poll_interval': 5000, 'scale': -3,
- 'units': 'C'},
- '116': {'object_path': 'temperature/dimm14', 'poll_interval': 5000, 'scale': -3,
- 'units': 'C'},
- '117': {'object_path': 'temperature/dimm15', 'poll_interval': 5000, 'scale': -3,
- 'units': 'C'},
+ "5-0050": {
+ "labels": {
+ "188": {
+ "object_path": "temperature/cpu1/core0",
+ "poll_interval": 5000,
+ "scale": -3,
+ "units": "C",
+ "critical_upper": 100,
+ "critical_lower": -100,
+ "warning_upper": 90,
+ "warning_lower": -99,
+ "emergency_enabled": True,
+ },
+ "189": {
+ "object_path": "temperature/cpu1/core1",
+ "poll_interval": 5000,
+ "scale": -3,
+ "units": "C",
+ "critical_upper": 100,
+ "critical_lower": -100,
+ "warning_upper": 90,
+ "warning_lower": -99,
+ "emergency_enabled": True,
+ },
+ "190": {
+ "object_path": "temperature/cpu1/core2",
+ "poll_interval": 5000,
+ "scale": -3,
+ "units": "C",
+ "critical_upper": 100,
+ "critical_lower": -100,
+ "warning_upper": 90,
+ "warning_lower": -99,
+ "emergency_enabled": True,
+ },
+ "191": {
+ "object_path": "temperature/cpu1/core3",
+ "poll_interval": 5000,
+ "scale": -3,
+ "units": "C",
+ "critical_upper": 100,
+ "critical_lower": -100,
+ "warning_upper": 90,
+ "warning_lower": -99,
+ "emergency_enabled": True,
+ },
+ "192": {
+ "object_path": "temperature/cpu1/core4",
+ "poll_interval": 5000,
+ "scale": -3,
+ "units": "C",
+ "critical_upper": 100,
+ "critical_lower": -100,
+ "warning_upper": 90,
+ "warning_lower": -99,
+ "emergency_enabled": True,
+ },
+ "193": {
+ "object_path": "temperature/cpu1/core5",
+ "poll_interval": 5000,
+ "scale": -3,
+ "units": "C",
+ "critical_upper": 100,
+ "critical_lower": -100,
+ "warning_upper": 90,
+ "warning_lower": -99,
+ "emergency_enabled": True,
+ },
+ "194": {
+ "object_path": "temperature/cpu1/core6",
+ "poll_interval": 5000,
+ "scale": -3,
+ "units": "C",
+ "critical_upper": 100,
+ "critical_lower": -100,
+ "warning_upper": 90,
+ "warning_lower": -99,
+ "emergency_enabled": True,
+ },
+ "195": {
+ "object_path": "temperature/cpu1/core7",
+ "poll_interval": 5000,
+ "scale": -3,
+ "units": "C",
+ "critical_upper": 100,
+ "critical_lower": -100,
+ "warning_upper": 90,
+ "warning_lower": -99,
+ "emergency_enabled": True,
+ },
+ "196": {
+ "object_path": "temperature/cpu1/core8",
+ "poll_interval": 5000,
+ "scale": -3,
+ "units": "C",
+ "critical_upper": 100,
+ "critical_lower": -100,
+ "warning_upper": 90,
+ "warning_lower": -99,
+ "emergency_enabled": True,
+ },
+ "197": {
+ "object_path": "temperature/cpu1/core9",
+ "poll_interval": 5000,
+ "scale": -3,
+ "units": "C",
+ "critical_upper": 100,
+ "critical_lower": -100,
+ "warning_upper": 90,
+ "warning_lower": -99,
+ "emergency_enabled": True,
+ },
+ "198": {
+ "object_path": "temperature/cpu1/core10",
+ "poll_interval": 5000,
+ "scale": -3,
+ "units": "C",
+ "critical_upper": 100,
+ "critical_lower": -100,
+ "warning_upper": 90,
+ "warning_lower": -99,
+ "emergency_enabled": True,
+ },
+ "199": {
+ "object_path": "temperature/cpu1/core11",
+ "poll_interval": 5000,
+ "scale": -3,
+ "units": "C",
+ "critical_upper": 100,
+ "critical_lower": -100,
+ "warning_upper": 90,
+ "warning_lower": -99,
+ "emergency_enabled": True,
+ },
+ "110": {
+ "object_path": "temperature/dimm8",
+ "poll_interval": 5000,
+ "scale": -3,
+ "units": "C",
+ },
+ "111": {
+ "object_path": "temperature/dimm9",
+ "poll_interval": 5000,
+ "scale": -3,
+ "units": "C",
+ },
+ "112": {
+ "object_path": "temperature/dimm10",
+ "poll_interval": 5000,
+ "scale": -3,
+ "units": "C",
+ },
+ "113": {
+ "object_path": "temperature/dimm11",
+ "poll_interval": 5000,
+ "scale": -3,
+ "units": "C",
+ },
+ "114": {
+ "object_path": "temperature/dimm12",
+ "poll_interval": 5000,
+ "scale": -3,
+ "units": "C",
+ },
+ "115": {
+ "object_path": "temperature/dimm13",
+ "poll_interval": 5000,
+ "scale": -3,
+ "units": "C",
+ },
+ "116": {
+ "object_path": "temperature/dimm14",
+ "poll_interval": 5000,
+ "scale": -3,
+ "units": "C",
+ },
+ "117": {
+ "object_path": "temperature/dimm15",
+ "poll_interval": 5000,
+ "scale": -3,
+ "units": "C",
+ },
}
},
}
GPIO_CONFIGS = {
- 'power_config': {
- 'power_good_in': 'SYS_PWROK_BUFF',
- 'power_up_outs': [
- ('SOFTWARE_PGOOD', True),
- ('BMC_POWER_UP', True),
+ "power_config": {
+ "power_good_in": "SYS_PWROK_BUFF",
+ "power_up_outs": [
+ ("SOFTWARE_PGOOD", True),
+ ("BMC_POWER_UP", True),
],
- 'reset_outs': [
- ('BMC_CP0_RESET_N', False),
- ('BMC_CP0_PERST_ENABLE_R', False),
+ "reset_outs": [
+ ("BMC_CP0_RESET_N", False),
+ ("BMC_CP0_PERST_ENABLE_R", False),
],
},
- 'hostctl_config': {
- 'fsi_data': 'FSI_DATA',
- 'fsi_clk': 'FSI_CLK',
- 'fsi_enable': 'FSI_ENABLE',
- 'cronus_sel': 'CRONUS_SEL',
- 'optionals': [
- ],
+ "hostctl_config": {
+ "fsi_data": "FSI_DATA",
+ "fsi_clk": "FSI_CLK",
+ "fsi_enable": "FSI_ENABLE",
+ "cronus_sel": "CRONUS_SEL",
+ "optionals": [],
},
}
@@ -473,14 +946,18 @@
# Miscellaneous non-poll sensor with system specific properties.
# The sensor id is the same as those defined in ID_LOOKUP['SENSOR'].
MISC_SENSORS = {
- 0x8b: {'class': 'BootCountSensor'},
- 0x02: {'class': 'BootProgressSensor'},
+ 0x8B: {"class": "BootCountSensor"},
+ 0x02: {"class": "BootProgressSensor"},
# OCC active sensors aren't in the P9 XML yet. These are wrong.
- 0x03: {'class': 'OccStatusSensor',
- 'os_path': '/sys/bus/i2c/devices/3-0050/online'},
- 0x04: {'class': 'OccStatusSensor',
- 'os_path': '/sys/bus/i2c/devices/3-0051/online'},
- 0x91: {'class': 'OperatingSystemStatusSensor'},
+ 0x03: {
+ "class": "OccStatusSensor",
+ "os_path": "/sys/bus/i2c/devices/3-0050/online",
+ },
+ 0x04: {
+ "class": "OccStatusSensor",
+ "os_path": "/sys/bus/i2c/devices/3-0051/online",
+ },
+ 0x91: {"class": "OperatingSystemStatusSensor"},
# 0x06 : { 'class' : 'PowerCap',
# 'os_path' : '/sys/class/hwmon/hwmon3/user_powercap' },
}
diff --git a/data/Witherspoon.py b/data/Witherspoon.py
index e106eb9..a33293c 100755
--- a/data/Witherspoon.py
+++ b/data/Witherspoon.py
@@ -5,363 +5,584 @@
# - a process emits a GotoSystemState signal with state name to goto
# - objects specified in EXIT_STATE_DEPEND have started
SYSTEM_STATES = [
- 'BASE_APPS',
- 'BMC_STARTING',
- 'BMC_READY',
- 'HOST_POWERING_ON',
- 'HOST_POWERED_ON',
- 'HOST_BOOTING',
- 'HOST_BOOTED',
- 'HOST_POWERED_OFF',
+ "BASE_APPS",
+ "BMC_STARTING",
+ "BMC_READY",
+ "HOST_POWERING_ON",
+ "HOST_POWERED_ON",
+ "HOST_BOOTING",
+ "HOST_BOOTED",
+ "HOST_POWERED_OFF",
]
EXIT_STATE_DEPEND = {
- 'BASE_APPS': {
- '/org/openbmc/sensors': 0,
+ "BASE_APPS": {
+ "/org/openbmc/sensors": 0,
},
- 'BMC_STARTING': {
- '/org/openbmc/control/chassis0': 0,
- '/org/openbmc/control/power0': 0,
- '/org/openbmc/control/flash/bios': 0,
+ "BMC_STARTING": {
+ "/org/openbmc/control/chassis0": 0,
+ "/org/openbmc/control/power0": 0,
+ "/org/openbmc/control/flash/bios": 0,
},
}
FRU_INSTANCES = {
- '<inventory_root>/system': {'fru_type': 'SYSTEM', 'is_fru': True, 'present': "True"},
- '<inventory_root>/system/bios': {'fru_type': 'SYSTEM', 'is_fru': True, 'present': "True"},
- '<inventory_root>/system/misc': {'fru_type': 'SYSTEM', 'is_fru': False, },
-
- '<inventory_root>/system/chassis': {'fru_type': 'SYSTEM', 'is_fru': True, 'present': "True"},
-
- '<inventory_root>/system/chassis/motherboard': {'fru_type': 'MAIN_PLANAR', 'is_fru': True, },
-
- '<inventory_root>/system/systemevent': {'fru_type': 'SYSTEM_EVENT', 'is_fru': False, },
- '<inventory_root>/system/chassis/motherboard/refclock': {'fru_type': 'MAIN_PLANAR', 'is_fru': False, },
- '<inventory_root>/system/chassis/motherboard/pcieclock': {'fru_type': 'MAIN_PLANAR', 'is_fru': False, },
- '<inventory_root>/system/chassis/motherboard/todclock': {'fru_type': 'MAIN_PLANAR', 'is_fru': False, },
- '<inventory_root>/system/chassis/motherboard/apss': {'fru_type': 'MAIN_PLANAR', 'is_fru': False, },
-
- '<inventory_root>/system/chassis/fan0': {'fru_type': 'FAN', 'is_fru': True, },
- '<inventory_root>/system/chassis/fan1': {'fru_type': 'FAN', 'is_fru': True, },
- '<inventory_root>/system/chassis/fan2': {'fru_type': 'FAN', 'is_fru': True, },
- '<inventory_root>/system/chassis/fan3': {'fru_type': 'FAN', 'is_fru': True, },
-
- '<inventory_root>/system/chassis/motherboard/bmc': {'fru_type': 'BMC', 'is_fru': False,
- 'manufacturer': 'ASPEED'},
-
- '<inventory_root>/system/chassis/motherboard/cpu0': {'fru_type': 'CPU', 'is_fru': True, },
- '<inventory_root>/system/chassis/motherboard/cpu1': {'fru_type': 'CPU', 'is_fru': True, },
-
- '<inventory_root>/system/chassis/motherboard/cpu0/core0': {'fru_type': 'CORE', 'is_fru': False, },
- '<inventory_root>/system/chassis/motherboard/cpu0/core1': {'fru_type': 'CORE', 'is_fru': False, },
- '<inventory_root>/system/chassis/motherboard/cpu0/core2': {'fru_type': 'CORE', 'is_fru': False, },
- '<inventory_root>/system/chassis/motherboard/cpu0/core3': {'fru_type': 'CORE', 'is_fru': False, },
- '<inventory_root>/system/chassis/motherboard/cpu0/core4': {'fru_type': 'CORE', 'is_fru': False, },
- '<inventory_root>/system/chassis/motherboard/cpu0/core5': {'fru_type': 'CORE', 'is_fru': False, },
- '<inventory_root>/system/chassis/motherboard/cpu0/core6': {'fru_type': 'CORE', 'is_fru': False, },
- '<inventory_root>/system/chassis/motherboard/cpu0/core7': {'fru_type': 'CORE', 'is_fru': False, },
- '<inventory_root>/system/chassis/motherboard/cpu0/core8': {'fru_type': 'CORE', 'is_fru': False, },
- '<inventory_root>/system/chassis/motherboard/cpu0/core9': {'fru_type': 'CORE', 'is_fru': False, },
- '<inventory_root>/system/chassis/motherboard/cpu0/core10': {'fru_type': 'CORE', 'is_fru': False, },
- '<inventory_root>/system/chassis/motherboard/cpu0/core11': {'fru_type': 'CORE', 'is_fru': False, },
-
- '<inventory_root>/system/chassis/motherboard/cpu1/core0': {'fru_type': 'CORE', 'is_fru': False, },
- '<inventory_root>/system/chassis/motherboard/cpu1/core1': {'fru_type': 'CORE', 'is_fru': False, },
- '<inventory_root>/system/chassis/motherboard/cpu1/core2': {'fru_type': 'CORE', 'is_fru': False, },
- '<inventory_root>/system/chassis/motherboard/cpu1/core3': {'fru_type': 'CORE', 'is_fru': False, },
- '<inventory_root>/system/chassis/motherboard/cpu1/core4': {'fru_type': 'CORE', 'is_fru': False, },
- '<inventory_root>/system/chassis/motherboard/cpu1/core5': {'fru_type': 'CORE', 'is_fru': False, },
- '<inventory_root>/system/chassis/motherboard/cpu1/core6': {'fru_type': 'CORE', 'is_fru': False, },
- '<inventory_root>/system/chassis/motherboard/cpu1/core7': {'fru_type': 'CORE', 'is_fru': False, },
- '<inventory_root>/system/chassis/motherboard/cpu1/core8': {'fru_type': 'CORE', 'is_fru': False, },
- '<inventory_root>/system/chassis/motherboard/cpu1/core9': {'fru_type': 'CORE', 'is_fru': False, },
- '<inventory_root>/system/chassis/motherboard/cpu1/core10': {'fru_type': 'CORE', 'is_fru': False, },
- '<inventory_root>/system/chassis/motherboard/cpu1/core11': {'fru_type': 'CORE', 'is_fru': False, },
-
- '<inventory_root>/system/chassis/motherboard/membuf0': {'fru_type': 'MEMORY_BUFFER', 'is_fru': False, },
- '<inventory_root>/system/chassis/motherboard/membuf1': {'fru_type': 'MEMORY_BUFFER', 'is_fru': False, },
- '<inventory_root>/system/chassis/motherboard/membuf2': {'fru_type': 'MEMORY_BUFFER', 'is_fru': False, },
- '<inventory_root>/system/chassis/motherboard/membuf3': {'fru_type': 'MEMORY_BUFFER', 'is_fru': False, },
- '<inventory_root>/system/chassis/motherboard/membuf4': {'fru_type': 'MEMORY_BUFFER', 'is_fru': False, },
- '<inventory_root>/system/chassis/motherboard/membuf5': {'fru_type': 'MEMORY_BUFFER', 'is_fru': False, },
- '<inventory_root>/system/chassis/motherboard/membuf6': {'fru_type': 'MEMORY_BUFFER', 'is_fru': False, },
- '<inventory_root>/system/chassis/motherboard/membuf7': {'fru_type': 'MEMORY_BUFFER', 'is_fru': False, },
-
- '<inventory_root>/system/chassis/motherboard/dimm0': {'fru_type': 'DIMM', 'is_fru': True, },
- '<inventory_root>/system/chassis/motherboard/dimm1': {'fru_type': 'DIMM', 'is_fru': True, },
- '<inventory_root>/system/chassis/motherboard/dimm2': {'fru_type': 'DIMM', 'is_fru': True, },
- '<inventory_root>/system/chassis/motherboard/dimm3': {'fru_type': 'DIMM', 'is_fru': True, },
- '<inventory_root>/system/chassis/motherboard/dimm4': {'fru_type': 'DIMM', 'is_fru': True, },
- '<inventory_root>/system/chassis/motherboard/dimm5': {'fru_type': 'DIMM', 'is_fru': True, },
- '<inventory_root>/system/chassis/motherboard/dimm6': {'fru_type': 'DIMM', 'is_fru': True, },
- '<inventory_root>/system/chassis/motherboard/dimm7': {'fru_type': 'DIMM', 'is_fru': True, },
- '<inventory_root>/system/chassis/motherboard/dimm8': {'fru_type': 'DIMM', 'is_fru': True, },
- '<inventory_root>/system/chassis/motherboard/dimm9': {'fru_type': 'DIMM', 'is_fru': True, },
- '<inventory_root>/system/chassis/motherboard/dimm10': {'fru_type': 'DIMM', 'is_fru': True, },
- '<inventory_root>/system/chassis/motherboard/dimm11': {'fru_type': 'DIMM', 'is_fru': True, },
- '<inventory_root>/system/chassis/motherboard/dimm12': {'fru_type': 'DIMM', 'is_fru': True, },
- '<inventory_root>/system/chassis/motherboard/dimm13': {'fru_type': 'DIMM', 'is_fru': True, },
- '<inventory_root>/system/chassis/motherboard/dimm14': {'fru_type': 'DIMM', 'is_fru': True, },
- '<inventory_root>/system/chassis/motherboard/dimm15': {'fru_type': 'DIMM', 'is_fru': True, },
- '<inventory_root>/system/chassis/motherboard/dimm16': {'fru_type': 'DIMM', 'is_fru': True, },
- '<inventory_root>/system/chassis/motherboard/dimm17': {'fru_type': 'DIMM', 'is_fru': True, },
- '<inventory_root>/system/chassis/motherboard/dimm18': {'fru_type': 'DIMM', 'is_fru': True, },
- '<inventory_root>/system/chassis/motherboard/dimm19': {'fru_type': 'DIMM', 'is_fru': True, },
- '<inventory_root>/system/chassis/motherboard/dimm20': {'fru_type': 'DIMM', 'is_fru': True, },
- '<inventory_root>/system/chassis/motherboard/dimm21': {'fru_type': 'DIMM', 'is_fru': True, },
- '<inventory_root>/system/chassis/motherboard/dimm22': {'fru_type': 'DIMM', 'is_fru': True, },
- '<inventory_root>/system/chassis/motherboard/dimm23': {'fru_type': 'DIMM', 'is_fru': True, },
- '<inventory_root>/system/chassis/motherboard/dimm24': {'fru_type': 'DIMM', 'is_fru': True, },
- '<inventory_root>/system/chassis/motherboard/dimm25': {'fru_type': 'DIMM', 'is_fru': True, },
- '<inventory_root>/system/chassis/motherboard/dimm26': {'fru_type': 'DIMM', 'is_fru': True, },
- '<inventory_root>/system/chassis/motherboard/dimm27': {'fru_type': 'DIMM', 'is_fru': True, },
- '<inventory_root>/system/chassis/motherboard/dimm28': {'fru_type': 'DIMM', 'is_fru': True, },
- '<inventory_root>/system/chassis/motherboard/dimm29': {'fru_type': 'DIMM', 'is_fru': True, },
- '<inventory_root>/system/chassis/motherboard/dimm30': {'fru_type': 'DIMM', 'is_fru': True, },
- '<inventory_root>/system/chassis/motherboard/dimm31': {'fru_type': 'DIMM', 'is_fru': True, },
+ "<inventory_root>/system": {
+ "fru_type": "SYSTEM",
+ "is_fru": True,
+ "present": "True",
+ },
+ "<inventory_root>/system/bios": {
+ "fru_type": "SYSTEM",
+ "is_fru": True,
+ "present": "True",
+ },
+ "<inventory_root>/system/misc": {
+ "fru_type": "SYSTEM",
+ "is_fru": False,
+ },
+ "<inventory_root>/system/chassis": {
+ "fru_type": "SYSTEM",
+ "is_fru": True,
+ "present": "True",
+ },
+ "<inventory_root>/system/chassis/motherboard": {
+ "fru_type": "MAIN_PLANAR",
+ "is_fru": True,
+ },
+ "<inventory_root>/system/systemevent": {
+ "fru_type": "SYSTEM_EVENT",
+ "is_fru": False,
+ },
+ "<inventory_root>/system/chassis/motherboard/refclock": {
+ "fru_type": "MAIN_PLANAR",
+ "is_fru": False,
+ },
+ "<inventory_root>/system/chassis/motherboard/pcieclock": {
+ "fru_type": "MAIN_PLANAR",
+ "is_fru": False,
+ },
+ "<inventory_root>/system/chassis/motherboard/todclock": {
+ "fru_type": "MAIN_PLANAR",
+ "is_fru": False,
+ },
+ "<inventory_root>/system/chassis/motherboard/apss": {
+ "fru_type": "MAIN_PLANAR",
+ "is_fru": False,
+ },
+ "<inventory_root>/system/chassis/fan0": {
+ "fru_type": "FAN",
+ "is_fru": True,
+ },
+ "<inventory_root>/system/chassis/fan1": {
+ "fru_type": "FAN",
+ "is_fru": True,
+ },
+ "<inventory_root>/system/chassis/fan2": {
+ "fru_type": "FAN",
+ "is_fru": True,
+ },
+ "<inventory_root>/system/chassis/fan3": {
+ "fru_type": "FAN",
+ "is_fru": True,
+ },
+ "<inventory_root>/system/chassis/motherboard/bmc": {
+ "fru_type": "BMC",
+ "is_fru": False,
+ "manufacturer": "ASPEED",
+ },
+ "<inventory_root>/system/chassis/motherboard/cpu0": {
+ "fru_type": "CPU",
+ "is_fru": True,
+ },
+ "<inventory_root>/system/chassis/motherboard/cpu1": {
+ "fru_type": "CPU",
+ "is_fru": True,
+ },
+ "<inventory_root>/system/chassis/motherboard/cpu0/core0": {
+ "fru_type": "CORE",
+ "is_fru": False,
+ },
+ "<inventory_root>/system/chassis/motherboard/cpu0/core1": {
+ "fru_type": "CORE",
+ "is_fru": False,
+ },
+ "<inventory_root>/system/chassis/motherboard/cpu0/core2": {
+ "fru_type": "CORE",
+ "is_fru": False,
+ },
+ "<inventory_root>/system/chassis/motherboard/cpu0/core3": {
+ "fru_type": "CORE",
+ "is_fru": False,
+ },
+ "<inventory_root>/system/chassis/motherboard/cpu0/core4": {
+ "fru_type": "CORE",
+ "is_fru": False,
+ },
+ "<inventory_root>/system/chassis/motherboard/cpu0/core5": {
+ "fru_type": "CORE",
+ "is_fru": False,
+ },
+ "<inventory_root>/system/chassis/motherboard/cpu0/core6": {
+ "fru_type": "CORE",
+ "is_fru": False,
+ },
+ "<inventory_root>/system/chassis/motherboard/cpu0/core7": {
+ "fru_type": "CORE",
+ "is_fru": False,
+ },
+ "<inventory_root>/system/chassis/motherboard/cpu0/core8": {
+ "fru_type": "CORE",
+ "is_fru": False,
+ },
+ "<inventory_root>/system/chassis/motherboard/cpu0/core9": {
+ "fru_type": "CORE",
+ "is_fru": False,
+ },
+ "<inventory_root>/system/chassis/motherboard/cpu0/core10": {
+ "fru_type": "CORE",
+ "is_fru": False,
+ },
+ "<inventory_root>/system/chassis/motherboard/cpu0/core11": {
+ "fru_type": "CORE",
+ "is_fru": False,
+ },
+ "<inventory_root>/system/chassis/motherboard/cpu1/core0": {
+ "fru_type": "CORE",
+ "is_fru": False,
+ },
+ "<inventory_root>/system/chassis/motherboard/cpu1/core1": {
+ "fru_type": "CORE",
+ "is_fru": False,
+ },
+ "<inventory_root>/system/chassis/motherboard/cpu1/core2": {
+ "fru_type": "CORE",
+ "is_fru": False,
+ },
+ "<inventory_root>/system/chassis/motherboard/cpu1/core3": {
+ "fru_type": "CORE",
+ "is_fru": False,
+ },
+ "<inventory_root>/system/chassis/motherboard/cpu1/core4": {
+ "fru_type": "CORE",
+ "is_fru": False,
+ },
+ "<inventory_root>/system/chassis/motherboard/cpu1/core5": {
+ "fru_type": "CORE",
+ "is_fru": False,
+ },
+ "<inventory_root>/system/chassis/motherboard/cpu1/core6": {
+ "fru_type": "CORE",
+ "is_fru": False,
+ },
+ "<inventory_root>/system/chassis/motherboard/cpu1/core7": {
+ "fru_type": "CORE",
+ "is_fru": False,
+ },
+ "<inventory_root>/system/chassis/motherboard/cpu1/core8": {
+ "fru_type": "CORE",
+ "is_fru": False,
+ },
+ "<inventory_root>/system/chassis/motherboard/cpu1/core9": {
+ "fru_type": "CORE",
+ "is_fru": False,
+ },
+ "<inventory_root>/system/chassis/motherboard/cpu1/core10": {
+ "fru_type": "CORE",
+ "is_fru": False,
+ },
+ "<inventory_root>/system/chassis/motherboard/cpu1/core11": {
+ "fru_type": "CORE",
+ "is_fru": False,
+ },
+ "<inventory_root>/system/chassis/motherboard/membuf0": {
+ "fru_type": "MEMORY_BUFFER",
+ "is_fru": False,
+ },
+ "<inventory_root>/system/chassis/motherboard/membuf1": {
+ "fru_type": "MEMORY_BUFFER",
+ "is_fru": False,
+ },
+ "<inventory_root>/system/chassis/motherboard/membuf2": {
+ "fru_type": "MEMORY_BUFFER",
+ "is_fru": False,
+ },
+ "<inventory_root>/system/chassis/motherboard/membuf3": {
+ "fru_type": "MEMORY_BUFFER",
+ "is_fru": False,
+ },
+ "<inventory_root>/system/chassis/motherboard/membuf4": {
+ "fru_type": "MEMORY_BUFFER",
+ "is_fru": False,
+ },
+ "<inventory_root>/system/chassis/motherboard/membuf5": {
+ "fru_type": "MEMORY_BUFFER",
+ "is_fru": False,
+ },
+ "<inventory_root>/system/chassis/motherboard/membuf6": {
+ "fru_type": "MEMORY_BUFFER",
+ "is_fru": False,
+ },
+ "<inventory_root>/system/chassis/motherboard/membuf7": {
+ "fru_type": "MEMORY_BUFFER",
+ "is_fru": False,
+ },
+ "<inventory_root>/system/chassis/motherboard/dimm0": {
+ "fru_type": "DIMM",
+ "is_fru": True,
+ },
+ "<inventory_root>/system/chassis/motherboard/dimm1": {
+ "fru_type": "DIMM",
+ "is_fru": True,
+ },
+ "<inventory_root>/system/chassis/motherboard/dimm2": {
+ "fru_type": "DIMM",
+ "is_fru": True,
+ },
+ "<inventory_root>/system/chassis/motherboard/dimm3": {
+ "fru_type": "DIMM",
+ "is_fru": True,
+ },
+ "<inventory_root>/system/chassis/motherboard/dimm4": {
+ "fru_type": "DIMM",
+ "is_fru": True,
+ },
+ "<inventory_root>/system/chassis/motherboard/dimm5": {
+ "fru_type": "DIMM",
+ "is_fru": True,
+ },
+ "<inventory_root>/system/chassis/motherboard/dimm6": {
+ "fru_type": "DIMM",
+ "is_fru": True,
+ },
+ "<inventory_root>/system/chassis/motherboard/dimm7": {
+ "fru_type": "DIMM",
+ "is_fru": True,
+ },
+ "<inventory_root>/system/chassis/motherboard/dimm8": {
+ "fru_type": "DIMM",
+ "is_fru": True,
+ },
+ "<inventory_root>/system/chassis/motherboard/dimm9": {
+ "fru_type": "DIMM",
+ "is_fru": True,
+ },
+ "<inventory_root>/system/chassis/motherboard/dimm10": {
+ "fru_type": "DIMM",
+ "is_fru": True,
+ },
+ "<inventory_root>/system/chassis/motherboard/dimm11": {
+ "fru_type": "DIMM",
+ "is_fru": True,
+ },
+ "<inventory_root>/system/chassis/motherboard/dimm12": {
+ "fru_type": "DIMM",
+ "is_fru": True,
+ },
+ "<inventory_root>/system/chassis/motherboard/dimm13": {
+ "fru_type": "DIMM",
+ "is_fru": True,
+ },
+ "<inventory_root>/system/chassis/motherboard/dimm14": {
+ "fru_type": "DIMM",
+ "is_fru": True,
+ },
+ "<inventory_root>/system/chassis/motherboard/dimm15": {
+ "fru_type": "DIMM",
+ "is_fru": True,
+ },
+ "<inventory_root>/system/chassis/motherboard/dimm16": {
+ "fru_type": "DIMM",
+ "is_fru": True,
+ },
+ "<inventory_root>/system/chassis/motherboard/dimm17": {
+ "fru_type": "DIMM",
+ "is_fru": True,
+ },
+ "<inventory_root>/system/chassis/motherboard/dimm18": {
+ "fru_type": "DIMM",
+ "is_fru": True,
+ },
+ "<inventory_root>/system/chassis/motherboard/dimm19": {
+ "fru_type": "DIMM",
+ "is_fru": True,
+ },
+ "<inventory_root>/system/chassis/motherboard/dimm20": {
+ "fru_type": "DIMM",
+ "is_fru": True,
+ },
+ "<inventory_root>/system/chassis/motherboard/dimm21": {
+ "fru_type": "DIMM",
+ "is_fru": True,
+ },
+ "<inventory_root>/system/chassis/motherboard/dimm22": {
+ "fru_type": "DIMM",
+ "is_fru": True,
+ },
+ "<inventory_root>/system/chassis/motherboard/dimm23": {
+ "fru_type": "DIMM",
+ "is_fru": True,
+ },
+ "<inventory_root>/system/chassis/motherboard/dimm24": {
+ "fru_type": "DIMM",
+ "is_fru": True,
+ },
+ "<inventory_root>/system/chassis/motherboard/dimm25": {
+ "fru_type": "DIMM",
+ "is_fru": True,
+ },
+ "<inventory_root>/system/chassis/motherboard/dimm26": {
+ "fru_type": "DIMM",
+ "is_fru": True,
+ },
+ "<inventory_root>/system/chassis/motherboard/dimm27": {
+ "fru_type": "DIMM",
+ "is_fru": True,
+ },
+ "<inventory_root>/system/chassis/motherboard/dimm28": {
+ "fru_type": "DIMM",
+ "is_fru": True,
+ },
+ "<inventory_root>/system/chassis/motherboard/dimm29": {
+ "fru_type": "DIMM",
+ "is_fru": True,
+ },
+ "<inventory_root>/system/chassis/motherboard/dimm30": {
+ "fru_type": "DIMM",
+ "is_fru": True,
+ },
+ "<inventory_root>/system/chassis/motherboard/dimm31": {
+ "fru_type": "DIMM",
+ "is_fru": True,
+ },
}
ID_LOOKUP = {
- 'FRU': {
- 0x01: '<inventory_root>/system/chassis/motherboard/cpu0',
- 0x02: '<inventory_root>/system/chassis/motherboard/cpu1',
- 0x03: '<inventory_root>/system/chassis/motherboard',
- 0x04: '<inventory_root>/system/chassis/motherboard/membuf0',
- 0x05: '<inventory_root>/system/chassis/motherboard/membuf1',
- 0x06: '<inventory_root>/system/chassis/motherboard/membuf2',
- 0x07: '<inventory_root>/system/chassis/motherboard/membuf3',
- 0x08: '<inventory_root>/system/chassis/motherboard/membuf4',
- 0x09: '<inventory_root>/system/chassis/motherboard/membuf5',
- 0x0c: '<inventory_root>/system/chassis/motherboard/dimm0',
- 0x0d: '<inventory_root>/system/chassis/motherboard/dimm1',
- 0x0e: '<inventory_root>/system/chassis/motherboard/dimm2',
- 0x0f: '<inventory_root>/system/chassis/motherboard/dimm3',
- 0x10: '<inventory_root>/system/chassis/motherboard/dimm4',
- 0x11: '<inventory_root>/system/chassis/motherboard/dimm5',
- 0x12: '<inventory_root>/system/chassis/motherboard/dimm6',
- 0x13: '<inventory_root>/system/chassis/motherboard/dimm7',
- 0x14: '<inventory_root>/system/chassis/motherboard/dimm8',
- 0x15: '<inventory_root>/system/chassis/motherboard/dimm9',
- 0x16: '<inventory_root>/system/chassis/motherboard/dimm10',
- 0x17: '<inventory_root>/system/chassis/motherboard/dimm11',
- 0x18: '<inventory_root>/system/chassis/motherboard/dimm12',
- 0x19: '<inventory_root>/system/chassis/motherboard/dimm13',
- 0x1a: '<inventory_root>/system/chassis/motherboard/dimm14',
- 0x1b: '<inventory_root>/system/chassis/motherboard/dimm15',
- 0x1c: '<inventory_root>/system/chassis/motherboard/dimm16',
- 0x1d: '<inventory_root>/system/chassis/motherboard/dimm17',
- 0x1e: '<inventory_root>/system/chassis/motherboard/dimm18',
- 0x1f: '<inventory_root>/system/chassis/motherboard/dimm19',
- 0x20: '<inventory_root>/system/chassis/motherboard/dimm20',
- 0x21: '<inventory_root>/system/chassis/motherboard/dimm21',
- 0x22: '<inventory_root>/system/chassis/motherboard/dimm22',
- 0x23: '<inventory_root>/system/chassis/motherboard/dimm23',
- 0x24: '<inventory_root>/system/chassis/motherboard/dimm24',
- 0x25: '<inventory_root>/system/chassis/motherboard/dimm25',
- 0x26: '<inventory_root>/system/chassis/motherboard/dimm26',
- 0x27: '<inventory_root>/system/chassis/motherboard/dimm27',
- 0x28: '<inventory_root>/system/chassis/motherboard/dimm28',
- 0x29: '<inventory_root>/system/chassis/motherboard/dimm29',
- 0x2a: '<inventory_root>/system/chassis/motherboard/dimm30',
- 0x2b: '<inventory_root>/system/chassis/motherboard/dimm31',
+ "FRU": {
+ 0x01: "<inventory_root>/system/chassis/motherboard/cpu0",
+ 0x02: "<inventory_root>/system/chassis/motherboard/cpu1",
+ 0x03: "<inventory_root>/system/chassis/motherboard",
+ 0x04: "<inventory_root>/system/chassis/motherboard/membuf0",
+ 0x05: "<inventory_root>/system/chassis/motherboard/membuf1",
+ 0x06: "<inventory_root>/system/chassis/motherboard/membuf2",
+ 0x07: "<inventory_root>/system/chassis/motherboard/membuf3",
+ 0x08: "<inventory_root>/system/chassis/motherboard/membuf4",
+ 0x09: "<inventory_root>/system/chassis/motherboard/membuf5",
+ 0x0C: "<inventory_root>/system/chassis/motherboard/dimm0",
+ 0x0D: "<inventory_root>/system/chassis/motherboard/dimm1",
+ 0x0E: "<inventory_root>/system/chassis/motherboard/dimm2",
+ 0x0F: "<inventory_root>/system/chassis/motherboard/dimm3",
+ 0x10: "<inventory_root>/system/chassis/motherboard/dimm4",
+ 0x11: "<inventory_root>/system/chassis/motherboard/dimm5",
+ 0x12: "<inventory_root>/system/chassis/motherboard/dimm6",
+ 0x13: "<inventory_root>/system/chassis/motherboard/dimm7",
+ 0x14: "<inventory_root>/system/chassis/motherboard/dimm8",
+ 0x15: "<inventory_root>/system/chassis/motherboard/dimm9",
+ 0x16: "<inventory_root>/system/chassis/motherboard/dimm10",
+ 0x17: "<inventory_root>/system/chassis/motherboard/dimm11",
+ 0x18: "<inventory_root>/system/chassis/motherboard/dimm12",
+ 0x19: "<inventory_root>/system/chassis/motherboard/dimm13",
+ 0x1A: "<inventory_root>/system/chassis/motherboard/dimm14",
+ 0x1B: "<inventory_root>/system/chassis/motherboard/dimm15",
+ 0x1C: "<inventory_root>/system/chassis/motherboard/dimm16",
+ 0x1D: "<inventory_root>/system/chassis/motherboard/dimm17",
+ 0x1E: "<inventory_root>/system/chassis/motherboard/dimm18",
+ 0x1F: "<inventory_root>/system/chassis/motherboard/dimm19",
+ 0x20: "<inventory_root>/system/chassis/motherboard/dimm20",
+ 0x21: "<inventory_root>/system/chassis/motherboard/dimm21",
+ 0x22: "<inventory_root>/system/chassis/motherboard/dimm22",
+ 0x23: "<inventory_root>/system/chassis/motherboard/dimm23",
+ 0x24: "<inventory_root>/system/chassis/motherboard/dimm24",
+ 0x25: "<inventory_root>/system/chassis/motherboard/dimm25",
+ 0x26: "<inventory_root>/system/chassis/motherboard/dimm26",
+ 0x27: "<inventory_root>/system/chassis/motherboard/dimm27",
+ 0x28: "<inventory_root>/system/chassis/motherboard/dimm28",
+ 0x29: "<inventory_root>/system/chassis/motherboard/dimm29",
+ 0x2A: "<inventory_root>/system/chassis/motherboard/dimm30",
+ 0x2B: "<inventory_root>/system/chassis/motherboard/dimm31",
},
- 'FRU_STR': {
- 'PRODUCT_0': '<inventory_root>/system/bios',
- 'BOARD_1': '<inventory_root>/system/chassis/motherboard/cpu0',
- 'BOARD_2': '<inventory_root>/system/chassis/motherboard/cpu1',
- 'CHASSIS_3': '<inventory_root>/system/chassis/motherboard',
- 'BOARD_3': '<inventory_root>/system/misc',
- 'BOARD_4': '<inventory_root>/system/chassis/motherboard/membuf0',
- 'BOARD_5': '<inventory_root>/system/chassis/motherboard/membuf1',
- 'BOARD_6': '<inventory_root>/system/chassis/motherboard/membuf2',
- 'BOARD_7': '<inventory_root>/system/chassis/motherboard/membuf3',
- 'BOARD_8': '<inventory_root>/system/chassis/motherboard/membuf4',
- 'BOARD_9': '<inventory_root>/system/chassis/motherboard/membuf5',
- 'BOARD_10': '<inventory_root>/system/chassis/motherboard/membuf6',
- 'BOARD_11': '<inventory_root>/system/chassis/motherboard/membuf7',
- 'PRODUCT_12': '<inventory_root>/system/chassis/motherboard/dimm0',
- 'PRODUCT_13': '<inventory_root>/system/chassis/motherboard/dimm1',
- 'PRODUCT_14': '<inventory_root>/system/chassis/motherboard/dimm2',
- 'PRODUCT_15': '<inventory_root>/system/chassis/motherboard/dimm3',
- 'PRODUCT_16': '<inventory_root>/system/chassis/motherboard/dimm4',
- 'PRODUCT_17': '<inventory_root>/system/chassis/motherboard/dimm5',
- 'PRODUCT_18': '<inventory_root>/system/chassis/motherboard/dimm6',
- 'PRODUCT_19': '<inventory_root>/system/chassis/motherboard/dimm7',
- 'PRODUCT_20': '<inventory_root>/system/chassis/motherboard/dimm8',
- 'PRODUCT_21': '<inventory_root>/system/chassis/motherboard/dimm9',
- 'PRODUCT_22': '<inventory_root>/system/chassis/motherboard/dimm10',
- 'PRODUCT_23': '<inventory_root>/system/chassis/motherboard/dimm11',
- 'PRODUCT_24': '<inventory_root>/system/chassis/motherboard/dimm12',
- 'PRODUCT_25': '<inventory_root>/system/chassis/motherboard/dimm13',
- 'PRODUCT_26': '<inventory_root>/system/chassis/motherboard/dimm14',
- 'PRODUCT_27': '<inventory_root>/system/chassis/motherboard/dimm15',
- 'PRODUCT_28': '<inventory_root>/system/chassis/motherboard/dimm16',
- 'PRODUCT_29': '<inventory_root>/system/chassis/motherboard/dimm17',
- 'PRODUCT_30': '<inventory_root>/system/chassis/motherboard/dimm18',
- 'PRODUCT_31': '<inventory_root>/system/chassis/motherboard/dimm19',
- 'PRODUCT_32': '<inventory_root>/system/chassis/motherboard/dimm20',
- 'PRODUCT_33': '<inventory_root>/system/chassis/motherboard/dimm21',
- 'PRODUCT_34': '<inventory_root>/system/chassis/motherboard/dimm22',
- 'PRODUCT_35': '<inventory_root>/system/chassis/motherboard/dimm23',
- 'PRODUCT_36': '<inventory_root>/system/chassis/motherboard/dimm24',
- 'PRODUCT_37': '<inventory_root>/system/chassis/motherboard/dimm25',
- 'PRODUCT_38': '<inventory_root>/system/chassis/motherboard/dimm26',
- 'PRODUCT_39': '<inventory_root>/system/chassis/motherboard/dimm27',
- 'PRODUCT_40': '<inventory_root>/system/chassis/motherboard/dimm28',
- 'PRODUCT_41': '<inventory_root>/system/chassis/motherboard/dimm29',
- 'PRODUCT_42': '<inventory_root>/system/chassis/motherboard/dimm30',
- 'PRODUCT_43': '<inventory_root>/system/chassis/motherboard/dimm31',
- 'PRODUCT_47': '<inventory_root>/system/misc',
+ "FRU_STR": {
+ "PRODUCT_0": "<inventory_root>/system/bios",
+ "BOARD_1": "<inventory_root>/system/chassis/motherboard/cpu0",
+ "BOARD_2": "<inventory_root>/system/chassis/motherboard/cpu1",
+ "CHASSIS_3": "<inventory_root>/system/chassis/motherboard",
+ "BOARD_3": "<inventory_root>/system/misc",
+ "BOARD_4": "<inventory_root>/system/chassis/motherboard/membuf0",
+ "BOARD_5": "<inventory_root>/system/chassis/motherboard/membuf1",
+ "BOARD_6": "<inventory_root>/system/chassis/motherboard/membuf2",
+ "BOARD_7": "<inventory_root>/system/chassis/motherboard/membuf3",
+ "BOARD_8": "<inventory_root>/system/chassis/motherboard/membuf4",
+ "BOARD_9": "<inventory_root>/system/chassis/motherboard/membuf5",
+ "BOARD_10": "<inventory_root>/system/chassis/motherboard/membuf6",
+ "BOARD_11": "<inventory_root>/system/chassis/motherboard/membuf7",
+ "PRODUCT_12": "<inventory_root>/system/chassis/motherboard/dimm0",
+ "PRODUCT_13": "<inventory_root>/system/chassis/motherboard/dimm1",
+ "PRODUCT_14": "<inventory_root>/system/chassis/motherboard/dimm2",
+ "PRODUCT_15": "<inventory_root>/system/chassis/motherboard/dimm3",
+ "PRODUCT_16": "<inventory_root>/system/chassis/motherboard/dimm4",
+ "PRODUCT_17": "<inventory_root>/system/chassis/motherboard/dimm5",
+ "PRODUCT_18": "<inventory_root>/system/chassis/motherboard/dimm6",
+ "PRODUCT_19": "<inventory_root>/system/chassis/motherboard/dimm7",
+ "PRODUCT_20": "<inventory_root>/system/chassis/motherboard/dimm8",
+ "PRODUCT_21": "<inventory_root>/system/chassis/motherboard/dimm9",
+ "PRODUCT_22": "<inventory_root>/system/chassis/motherboard/dimm10",
+ "PRODUCT_23": "<inventory_root>/system/chassis/motherboard/dimm11",
+ "PRODUCT_24": "<inventory_root>/system/chassis/motherboard/dimm12",
+ "PRODUCT_25": "<inventory_root>/system/chassis/motherboard/dimm13",
+ "PRODUCT_26": "<inventory_root>/system/chassis/motherboard/dimm14",
+ "PRODUCT_27": "<inventory_root>/system/chassis/motherboard/dimm15",
+ "PRODUCT_28": "<inventory_root>/system/chassis/motherboard/dimm16",
+ "PRODUCT_29": "<inventory_root>/system/chassis/motherboard/dimm17",
+ "PRODUCT_30": "<inventory_root>/system/chassis/motherboard/dimm18",
+ "PRODUCT_31": "<inventory_root>/system/chassis/motherboard/dimm19",
+ "PRODUCT_32": "<inventory_root>/system/chassis/motherboard/dimm20",
+ "PRODUCT_33": "<inventory_root>/system/chassis/motherboard/dimm21",
+ "PRODUCT_34": "<inventory_root>/system/chassis/motherboard/dimm22",
+ "PRODUCT_35": "<inventory_root>/system/chassis/motherboard/dimm23",
+ "PRODUCT_36": "<inventory_root>/system/chassis/motherboard/dimm24",
+ "PRODUCT_37": "<inventory_root>/system/chassis/motherboard/dimm25",
+ "PRODUCT_38": "<inventory_root>/system/chassis/motherboard/dimm26",
+ "PRODUCT_39": "<inventory_root>/system/chassis/motherboard/dimm27",
+ "PRODUCT_40": "<inventory_root>/system/chassis/motherboard/dimm28",
+ "PRODUCT_41": "<inventory_root>/system/chassis/motherboard/dimm29",
+ "PRODUCT_42": "<inventory_root>/system/chassis/motherboard/dimm30",
+ "PRODUCT_43": "<inventory_root>/system/chassis/motherboard/dimm31",
+ "PRODUCT_47": "<inventory_root>/system/misc",
},
- 'SENSOR': {
- 0x02: '/org/openbmc/sensors/host/HostStatus',
- 0x03: '/org/openbmc/sensors/host/BootProgress',
- 0x5a: '<inventory_root>/system/chassis/motherboard/cpu0',
- 0xa4: '<inventory_root>/system/chassis/motherboard/cpu1',
- 0x1e: '<inventory_root>/system/chassis/motherboard/dimm3',
- 0x1f: '<inventory_root>/system/chassis/motherboard/dimm2',
- 0x20: '<inventory_root>/system/chassis/motherboard/dimm1',
- 0x21: '<inventory_root>/system/chassis/motherboard/dimm0',
- 0x22: '<inventory_root>/system/chassis/motherboard/dimm7',
- 0x23: '<inventory_root>/system/chassis/motherboard/dimm6',
- 0x24: '<inventory_root>/system/chassis/motherboard/dimm5',
- 0x25: '<inventory_root>/system/chassis/motherboard/dimm4',
- 0x26: '<inventory_root>/system/chassis/motherboard/dimm11',
- 0x27: '<inventory_root>/system/chassis/motherboard/dimm10',
- 0x28: '<inventory_root>/system/chassis/motherboard/dimm9',
- 0x29: '<inventory_root>/system/chassis/motherboard/dimm8',
- 0x2a: '<inventory_root>/system/chassis/motherboard/dimm15',
- 0x2b: '<inventory_root>/system/chassis/motherboard/dimm14',
- 0x2c: '<inventory_root>/system/chassis/motherboard/dimm13',
- 0x2d: '<inventory_root>/system/chassis/motherboard/dimm12',
- 0x2e: '<inventory_root>/system/chassis/motherboard/dimm19',
- 0x2f: '<inventory_root>/system/chassis/motherboard/dimm18',
- 0x30: '<inventory_root>/system/chassis/motherboard/dimm17',
- 0x31: '<inventory_root>/system/chassis/motherboard/dimm16',
- 0x32: '<inventory_root>/system/chassis/motherboard/dimm23',
- 0x33: '<inventory_root>/system/chassis/motherboard/dimm22',
- 0x34: '<inventory_root>/system/chassis/motherboard/dimm21',
- 0x35: '<inventory_root>/system/chassis/motherboard/dimm20',
- 0x36: '<inventory_root>/system/chassis/motherboard/dimm27',
- 0x37: '<inventory_root>/system/chassis/motherboard/dimm26',
- 0x38: '<inventory_root>/system/chassis/motherboard/dimm25',
- 0x39: '<inventory_root>/system/chassis/motherboard/dimm24',
- 0x3a: '<inventory_root>/system/chassis/motherboard/dimm31',
- 0x3b: '<inventory_root>/system/chassis/motherboard/dimm30',
- 0x3c: '<inventory_root>/system/chassis/motherboard/dimm29',
- 0x3d: '<inventory_root>/system/chassis/motherboard/dimm28',
- 0x3e: '<inventory_root>/system/chassis/motherboard/cpu0/core0',
- 0x3f: '<inventory_root>/system/chassis/motherboard/cpu0/core1',
- 0x40: '<inventory_root>/system/chassis/motherboard/cpu0/core2',
- 0x41: '<inventory_root>/system/chassis/motherboard/cpu0/core3',
- 0x42: '<inventory_root>/system/chassis/motherboard/cpu0/core4',
- 0x43: '<inventory_root>/system/chassis/motherboard/cpu0/core5',
- 0x44: '<inventory_root>/system/chassis/motherboard/cpu0/core6',
- 0x45: '<inventory_root>/system/chassis/motherboard/cpu0/core7',
- 0x46: '<inventory_root>/system/chassis/motherboard/cpu0/core8',
- 0x47: '<inventory_root>/system/chassis/motherboard/cpu0/core9',
- 0x48: '<inventory_root>/system/chassis/motherboard/cpu0/core10',
- 0x49: '<inventory_root>/system/chassis/motherboard/cpu0/core11',
- 0x4a: '<inventory_root>/system/chassis/motherboard/cpu1/core0',
- 0x4b: '<inventory_root>/system/chassis/motherboard/cpu1/core1',
- 0x4c: '<inventory_root>/system/chassis/motherboard/cpu1/core2',
- 0x4d: '<inventory_root>/system/chassis/motherboard/cpu1/core3',
- 0x4e: '<inventory_root>/system/chassis/motherboard/cpu1/core4',
- 0x4f: '<inventory_root>/system/chassis/motherboard/cpu1/core5',
- 0x50: '<inventory_root>/system/chassis/motherboard/cpu1/core6',
- 0x51: '<inventory_root>/system/chassis/motherboard/cpu1/core7',
- 0x52: '<inventory_root>/system/chassis/motherboard/cpu1/core8',
- 0x53: '<inventory_root>/system/chassis/motherboard/cpu1/core9',
- 0x54: '<inventory_root>/system/chassis/motherboard/cpu1/core10',
- 0x55: '<inventory_root>/system/chassis/motherboard/cpu1/core11',
- 0x56: '<inventory_root>/system/chassis/motherboard/membuf0',
- 0x57: '<inventory_root>/system/chassis/motherboard/membuf1',
- 0x58: '<inventory_root>/system/chassis/motherboard/membuf2',
- 0x59: '<inventory_root>/system/chassis/motherboard/membuf3',
- 0x5a: '<inventory_root>/system/chassis/motherboard/membuf4',
- 0x5b: '<inventory_root>/system/chassis/motherboard/membuf5',
- 0x5c: '<inventory_root>/system/chassis/motherboard/membuf6',
- 0x5d: '<inventory_root>/system/chassis/motherboard/membuf7',
- 0x07: '/org/openbmc/sensors/host/BootCount',
- 0x0c: '<inventory_root>/system/chassis/motherboard',
- 0x01: '<inventory_root>/system/systemevent',
- 0x08: '<inventory_root>/system/powerlimit',
- 0x0d: '<inventory_root>/system/chassis/motherboard/refclock',
- 0x0e: '<inventory_root>/system/chassis/motherboard/pcieclock',
- 0x0f: '<inventory_root>/system/chassis/motherboard/todclock',
- 0x10: '<inventory_root>/system/chassis/motherboard/apss',
- 0x02: '/org/openbmc/sensors/host/OperatingSystemStatus',
- 0x04: '<inventory_root>/system/chassis/motherboard/pcielink',
- 0x0b: '/xyz/openbmc_project/sensors/chassis/PowerSupplyRedundancy',
- 0xda: '/org/openbmc/sensors/host/TurboAllowed',
- 0xD8: '/org/openbmc/sensors/host/PowerSupplyDerating',
+ "SENSOR": {
+ 0x02: "/org/openbmc/sensors/host/HostStatus",
+ 0x03: "/org/openbmc/sensors/host/BootProgress",
+ 0x5A: "<inventory_root>/system/chassis/motherboard/cpu0",
+ 0xA4: "<inventory_root>/system/chassis/motherboard/cpu1",
+ 0x1E: "<inventory_root>/system/chassis/motherboard/dimm3",
+ 0x1F: "<inventory_root>/system/chassis/motherboard/dimm2",
+ 0x20: "<inventory_root>/system/chassis/motherboard/dimm1",
+ 0x21: "<inventory_root>/system/chassis/motherboard/dimm0",
+ 0x22: "<inventory_root>/system/chassis/motherboard/dimm7",
+ 0x23: "<inventory_root>/system/chassis/motherboard/dimm6",
+ 0x24: "<inventory_root>/system/chassis/motherboard/dimm5",
+ 0x25: "<inventory_root>/system/chassis/motherboard/dimm4",
+ 0x26: "<inventory_root>/system/chassis/motherboard/dimm11",
+ 0x27: "<inventory_root>/system/chassis/motherboard/dimm10",
+ 0x28: "<inventory_root>/system/chassis/motherboard/dimm9",
+ 0x29: "<inventory_root>/system/chassis/motherboard/dimm8",
+ 0x2A: "<inventory_root>/system/chassis/motherboard/dimm15",
+ 0x2B: "<inventory_root>/system/chassis/motherboard/dimm14",
+ 0x2C: "<inventory_root>/system/chassis/motherboard/dimm13",
+ 0x2D: "<inventory_root>/system/chassis/motherboard/dimm12",
+ 0x2E: "<inventory_root>/system/chassis/motherboard/dimm19",
+ 0x2F: "<inventory_root>/system/chassis/motherboard/dimm18",
+ 0x30: "<inventory_root>/system/chassis/motherboard/dimm17",
+ 0x31: "<inventory_root>/system/chassis/motherboard/dimm16",
+ 0x32: "<inventory_root>/system/chassis/motherboard/dimm23",
+ 0x33: "<inventory_root>/system/chassis/motherboard/dimm22",
+ 0x34: "<inventory_root>/system/chassis/motherboard/dimm21",
+ 0x35: "<inventory_root>/system/chassis/motherboard/dimm20",
+ 0x36: "<inventory_root>/system/chassis/motherboard/dimm27",
+ 0x37: "<inventory_root>/system/chassis/motherboard/dimm26",
+ 0x38: "<inventory_root>/system/chassis/motherboard/dimm25",
+ 0x39: "<inventory_root>/system/chassis/motherboard/dimm24",
+ 0x3A: "<inventory_root>/system/chassis/motherboard/dimm31",
+ 0x3B: "<inventory_root>/system/chassis/motherboard/dimm30",
+ 0x3C: "<inventory_root>/system/chassis/motherboard/dimm29",
+ 0x3D: "<inventory_root>/system/chassis/motherboard/dimm28",
+ 0x3E: "<inventory_root>/system/chassis/motherboard/cpu0/core0",
+ 0x3F: "<inventory_root>/system/chassis/motherboard/cpu0/core1",
+ 0x40: "<inventory_root>/system/chassis/motherboard/cpu0/core2",
+ 0x41: "<inventory_root>/system/chassis/motherboard/cpu0/core3",
+ 0x42: "<inventory_root>/system/chassis/motherboard/cpu0/core4",
+ 0x43: "<inventory_root>/system/chassis/motherboard/cpu0/core5",
+ 0x44: "<inventory_root>/system/chassis/motherboard/cpu0/core6",
+ 0x45: "<inventory_root>/system/chassis/motherboard/cpu0/core7",
+ 0x46: "<inventory_root>/system/chassis/motherboard/cpu0/core8",
+ 0x47: "<inventory_root>/system/chassis/motherboard/cpu0/core9",
+ 0x48: "<inventory_root>/system/chassis/motherboard/cpu0/core10",
+ 0x49: "<inventory_root>/system/chassis/motherboard/cpu0/core11",
+ 0x4A: "<inventory_root>/system/chassis/motherboard/cpu1/core0",
+ 0x4B: "<inventory_root>/system/chassis/motherboard/cpu1/core1",
+ 0x4C: "<inventory_root>/system/chassis/motherboard/cpu1/core2",
+ 0x4D: "<inventory_root>/system/chassis/motherboard/cpu1/core3",
+ 0x4E: "<inventory_root>/system/chassis/motherboard/cpu1/core4",
+ 0x4F: "<inventory_root>/system/chassis/motherboard/cpu1/core5",
+ 0x50: "<inventory_root>/system/chassis/motherboard/cpu1/core6",
+ 0x51: "<inventory_root>/system/chassis/motherboard/cpu1/core7",
+ 0x52: "<inventory_root>/system/chassis/motherboard/cpu1/core8",
+ 0x53: "<inventory_root>/system/chassis/motherboard/cpu1/core9",
+ 0x54: "<inventory_root>/system/chassis/motherboard/cpu1/core10",
+ 0x55: "<inventory_root>/system/chassis/motherboard/cpu1/core11",
+ 0x56: "<inventory_root>/system/chassis/motherboard/membuf0",
+ 0x57: "<inventory_root>/system/chassis/motherboard/membuf1",
+ 0x58: "<inventory_root>/system/chassis/motherboard/membuf2",
+ 0x59: "<inventory_root>/system/chassis/motherboard/membuf3",
+ 0x5A: "<inventory_root>/system/chassis/motherboard/membuf4",
+ 0x5B: "<inventory_root>/system/chassis/motherboard/membuf5",
+ 0x5C: "<inventory_root>/system/chassis/motherboard/membuf6",
+ 0x5D: "<inventory_root>/system/chassis/motherboard/membuf7",
+ 0x07: "/org/openbmc/sensors/host/BootCount",
+ 0x0C: "<inventory_root>/system/chassis/motherboard",
+ 0x01: "<inventory_root>/system/systemevent",
+ 0x08: "<inventory_root>/system/powerlimit",
+ 0x0D: "<inventory_root>/system/chassis/motherboard/refclock",
+ 0x0E: "<inventory_root>/system/chassis/motherboard/pcieclock",
+ 0x0F: "<inventory_root>/system/chassis/motherboard/todclock",
+ 0x10: "<inventory_root>/system/chassis/motherboard/apss",
+ 0x02: "/org/openbmc/sensors/host/OperatingSystemStatus",
+ 0x04: "<inventory_root>/system/chassis/motherboard/pcielink",
+ 0x0B: "/xyz/openbmc_project/sensors/chassis/PowerSupplyRedundancy",
+ 0xDA: "/org/openbmc/sensors/host/TurboAllowed",
+ 0xD8: "/org/openbmc/sensors/host/PowerSupplyDerating",
},
- 'GPIO_PRESENT': {}
+ "GPIO_PRESENT": {},
}
GPIO_CONFIG = {}
-GPIO_CONFIG['BMC_POWER_UP'] = \
- {'gpio_pin': 'D1', 'direction': 'out'}
-GPIO_CONFIG['SOFTWARE_PGOOD'] = \
- {'gpio_pin': 'R1', 'direction': 'out'}
-GPIO_CONFIG['SYS_PWROK_BUFF'] = \
- {'gpio_pin': 'D2', 'direction': 'in'}
+GPIO_CONFIG["BMC_POWER_UP"] = {"gpio_pin": "D1", "direction": "out"}
+GPIO_CONFIG["SOFTWARE_PGOOD"] = {"gpio_pin": "R1", "direction": "out"}
+GPIO_CONFIG["SYS_PWROK_BUFF"] = {"gpio_pin": "D2", "direction": "in"}
# PV_CP_MD_JTAG_ATTENTION_N
-GPIO_CONFIG['CHECKSTOP'] = \
- {'gpio_pin': 'J2', 'direction': 'falling'}
+GPIO_CONFIG["CHECKSTOP"] = {"gpio_pin": "J2", "direction": "falling"}
-GPIO_CONFIG['BMC_CP0_RESET_N'] = \
- {'gpio_pin': 'A1', 'direction': 'out'}
+GPIO_CONFIG["BMC_CP0_RESET_N"] = {"gpio_pin": "A1", "direction": "out"}
# pcie switch reset
-GPIO_CONFIG['BMC_VS1_PERST_N'] = \
- {'gpio_pin': 'B7', 'direction': 'out'}
+GPIO_CONFIG["BMC_VS1_PERST_N"] = {"gpio_pin": "B7", "direction": "out"}
# pcie slots reset - not connected?
-GPIO_CONFIG['BMC_CP0_PERST_ENABLE_R'] = \
- {'gpio_pin': 'A3', 'direction': 'out'}
+GPIO_CONFIG["BMC_CP0_PERST_ENABLE_R"] = {"gpio_pin": "A3", "direction": "out"}
# SOFT_FSI_DAT
-GPIO_CONFIG['FSI_DATA'] = \
- {'gpio_pin': 'E0', 'direction': 'out'}
+GPIO_CONFIG["FSI_DATA"] = {"gpio_pin": "E0", "direction": "out"}
# SOFT_FSI_CLK
-GPIO_CONFIG['FSI_CLK'] = \
- {'gpio_pin': 'AA0', 'direction': 'out'}
+GPIO_CONFIG["FSI_CLK"] = {"gpio_pin": "AA0", "direction": "out"}
# BMC_FSI_IN_ENA
-GPIO_CONFIG['FSI_ENABLE'] = \
- {'gpio_pin': 'D0', 'direction': 'out'}
+GPIO_CONFIG["FSI_ENABLE"] = {"gpio_pin": "D0", "direction": "out"}
# FSI_JMFG0_PRSNT_N
-GPIO_CONFIG['CRONUS_SEL'] = \
- {'gpio_pin': 'A6', 'direction': 'out'}
+GPIO_CONFIG["CRONUS_SEL"] = {"gpio_pin": "A6", "direction": "out"}
# FP_PWR_BTN_N
-GPIO_CONFIG['POWER_BUTTON'] = \
- {'gpio_pin': 'I3', 'direction': 'both'}
+GPIO_CONFIG["POWER_BUTTON"] = {"gpio_pin": "I3", "direction": "both"}
# BMC_NMIBTN_IN_N
-GPIO_CONFIG['RESET_BUTTON'] = \
- {'gpio_pin': 'J1', 'direction': 'both'}
+GPIO_CONFIG["RESET_BUTTON"] = {"gpio_pin": "J1", "direction": "both"}
# FP_ID_BTN_N
-GPIO_CONFIG['IDBTN'] = \
- {'gpio_pin': 'Q7', 'direction': 'out'}
+GPIO_CONFIG["IDBTN"] = {"gpio_pin": "Q7", "direction": "out"}
GPIO_CONFIGS = {
- 'power_config': {
- 'power_good_in': 'SYS_PWROK_BUFF',
- 'power_up_outs': [
- ('SOFTWARE_PGOOD', True),
- ('BMC_POWER_UP', True),
+ "power_config": {
+ "power_good_in": "SYS_PWROK_BUFF",
+ "power_up_outs": [
+ ("SOFTWARE_PGOOD", True),
+ ("BMC_POWER_UP", True),
],
- 'reset_outs': [
- ('BMC_CP0_RESET_N', False),
+ "reset_outs": [
+ ("BMC_CP0_RESET_N", False),
],
},
- 'hostctl_config': {
- 'fsi_data': 'FSI_DATA',
- 'fsi_clk': 'FSI_CLK',
- 'fsi_enable': 'FSI_ENABLE',
- 'cronus_sel': 'CRONUS_SEL',
- 'optionals': [
- ],
+ "hostctl_config": {
+ "fsi_data": "FSI_DATA",
+ "fsi_clk": "FSI_CLK",
+ "fsi_enable": "FSI_ENABLE",
+ "cronus_sel": "CRONUS_SEL",
+ "optionals": [],
},
}
@@ -369,13 +590,13 @@
# Miscellaneous non-poll sensor with system specific properties.
# The sensor id is the same as those defined in ID_LOOKUP['SENSOR'].
MISC_SENSORS = {
- 0x07: {'class': 'BootCountSensor'},
- 0x03: {'class': 'BootProgressSensor'},
- 0x02: {'class': 'OperatingSystemStatusSensor'},
+ 0x07: {"class": "BootCountSensor"},
+ 0x03: {"class": "BootProgressSensor"},
+ 0x02: {"class": "OperatingSystemStatusSensor"},
# Garrison value is used, Not in P9 XML yet.
- 0x0b: {'class': 'PowerSupplyRedundancySensor'},
- 0xda: {'class': 'TurboAllowedSensor'},
- 0xD8: {'class': 'PowerSupplyDeratingSensor'},
+ 0x0B: {"class": "PowerSupplyRedundancySensor"},
+ 0xDA: {"class": "TurboAllowedSensor"},
+ 0xD8: {"class": "PowerSupplyDeratingSensor"},
}
# vim: tabstop=8 expandtab shiftwidth=4 softtabstop=4
diff --git a/data/inventory.py b/data/inventory.py
index 00de1dd..6e3bc64 100644
--- a/data/inventory.py
+++ b/data/inventory.py
@@ -1,20 +1,7 @@
inventory_dict = {
- "core": [
- "PrettyName",
- "Present",
- "Functional"
- ],
- "fan": [
- "PrettyName",
- "Present",
- "MeetsMinimumShipLevel",
- "Functional"
- ],
- "fan_wc": [
- "PrettyName",
- "Present",
- "MeetsMinimumShipLevel"
- ],
+ "core": ["PrettyName", "Present", "Functional"],
+ "fan": ["PrettyName", "Present", "MeetsMinimumShipLevel", "Functional"],
+ "fan_wc": ["PrettyName", "Present", "MeetsMinimumShipLevel"],
"fru": [
"PrettyName",
"Present",
@@ -26,12 +13,7 @@
"Version",
"FieldReplaceable",
"Cached",
- "Functional"
+ "Functional",
],
- "gpu": [
- "PrettyName",
- "Present",
- "FieldReplaceable",
- "Functional"
- ]
+ "gpu": ["PrettyName", "Present", "FieldReplaceable", "Functional"],
}
diff --git a/data/ipmi_raw_cmd_table.py b/data/ipmi_raw_cmd_table.py
index 729e968..4df9872 100644
--- a/data/ipmi_raw_cmd_table.py
+++ b/data/ipmi_raw_cmd_table.py
@@ -11,16 +11,14 @@
# Refer:
# openbmc/meta-openbmc-machines/meta-openpower/meta-ibm/meta-witherspoon/recipe
# s-phosphor/ipmi/phosphor-ipmi-host/cipher_list.json
-valid_ciphers = ['17']
-unsupported_ciphers = ['1', '2', '15', '16']
+valid_ciphers = ["17"]
+unsupported_ciphers = ["1", "2", "15", "16"]
IPMI_RAW_CMD = {
# Interface name
- 'power_supply_redundancy':
- {
+ "power_supply_redundancy": {
# Command action type
- 'Get':
- [
+ "Get": [
# raw command, expected output(s), comment
"0x04 0x2d 0x0b",
"00 00 01 00",
@@ -30,73 +28,61 @@
"00 40 02 00",
"40 is scanning enabled and 02 indicates redundancy enabled",
],
- 'Enabled':
- [
+ "Enabled": [
# raw command, expected output, comment
"0x04 0x30 0x0b 0x00 0x00 0x02 0x00 0x00 0x00 0x00 0x00 0x00",
"none",
"Enabled nibble position 6th LSB e.g. 0x2",
],
- 'Disabled':
- [
+ "Disabled": [
# raw command, expected output, comment
"0x04 0x30 0x0b 0x00 0x00 0x01 0x00 0x00 0x00 0x00 0x00 0x00",
"none",
"Enabled nibble position 6th LSB e.g. 0x1",
],
},
- 'power_reading':
- {
- 'Get':
- [
+ "power_reading": {
+ "Get": [
# raw command, expected output(s), comment
"0x2c 0x02 0xdc 0x01 0x01 0x00",
"dc d5 00 d5 00 d5 00 d5 00 00 00 00 00 00 00 00 00 00",
"Byte position 2nd LSB e.g. d5 Instantaneous power readings",
],
},
- 'conf_param':
- {
- 'Enabled':
- [
+ "conf_param": {
+ "Enabled": [
# raw command, expected output, comment
"0x2c 0x12 0xdc 0x02 0x00 0x01",
"dc",
"Enabled nibble position 6th LSB e.g. 0x01",
],
- 'Disabled':
- [
+ "Disabled": [
# raw command, expected output, comment
"0x2c 0x12 0xdc 0x02 0x00 0x00",
"dc",
"Disable nibble position 6th LSB e.g. 0x00",
- ]
+ ],
},
- 'SEL_entry':
- {
- 'Reserve':
- [
+ "SEL_entry": {
+ "Reserve": [
# raw command, expected output, comment
"0x0a 0x42",
"27 00",
"27 is Reservation ID, LSB, 00 Reservation ID, MSB ",
],
- 'Get_SEL_Time':
- [
+ "Get_SEL_Time": [
# raw command
- '0x0a 0x48',
+ "0x0a 0x48",
],
- 'Set_SEL_Time':
- [
+ "Set_SEL_Time": [
# raw command, expected output(s)
- '0x0a 0x49',
- 'rsp=0xd5',
- 'not supported in present state',
- 'rsp=0xc7',
- 'Request data length invalid',
+ "0x0a 0x49",
+ "rsp=0xd5",
+ "not supported in present state",
+ "rsp=0xc7",
+ "Request data length invalid",
],
- 'Clear_SEL':
- [
+ "Clear_SEL": [
# raw command, expected output(s)
"0x0a 0x47",
"0x43 0x4c 0x52 0xaa",
@@ -106,56 +92,48 @@
"Reservation cancelled or invalid",
"0x43 0x4c 0x52 0x00",
],
- 'SEL_info':
- [
+ "SEL_info": [
# raw command
"0x0a 0x40"
],
- 'Create_SEL':
- [
+ "Create_SEL": [
# raw command
"0x0a 0x44 0x00 0x00 0x02 0x00 0x00 0x00 0x00 0x00 0x00 0x04",
"0x00 0xa0 0x04 0x07",
],
- 'Get_SEL_Entry':
- [
+ "Get_SEL_Entry": [
# raw command
"0x0a 0x43 0x00 0x00",
"0x00 0xff",
],
},
- 'Self_Test_Results':
- {
- 'Get':
- [
+ "Self_Test_Results": {
+ "Get": [
# raw command, expected output(s), comment
"0x06 0x04",
"56 00",
"56h = Self Test function not implemented in this controller.",
]
},
- 'Device GUID':
- {
- 'Get':
- [
+ "Device GUID": {
+ "Get": [
# raw command, expected output(s), comment
"0x06 0x08",
"01 70 9b ae da 6f dd 9c b4 4c 36 be 66 c8 49 28",
"Get GUID bytes 1 through 16.",
-
]
},
- 'LAN_Config_Params':
- {
- 'Get':
- [
+ "LAN_Config_Params": {
+ "Get": [
# raw command, expected output, comment
"0x0c 0x02",
"11 02",
- "11 is Parameter revision, 02 is Configuration parameter data e.g. Cipher Suite Entry count",
+ (
+ "11 is Parameter revision, 02 is Configuration parameter data"
+ " e.g. Cipher Suite Entry count"
+ ),
],
- 'Set':
- [
+ "Set": [
# raw command, expected output, error response
"0x0c 0x01",
"11 00",
@@ -163,135 +141,116 @@
"Invalid data field in request",
],
},
- 'Payload':
- {
- 'Get_Payload_Activation_Status':
- [
+ "Payload": {
+ "Get_Payload_Activation_Status": [
# raw command, expected output(s), comment
"0x06 0x4a 0x01",
"01 00 00",
- "1st byte is instance capacity, last two bytes is activation status of instances",
+ (
+ "1st byte is instance capacity, last two bytes is activation"
+ " status of instances"
+ ),
],
- 'Activate_Payload':
- [
+ "Activate_Payload": [
# raw command, expected output(s), comment
"0x06 0x48 0x01 0x01 0xc6 0x00 0x00 0x00",
"00 00 00 00 ff 00 ff 00 6f 02 ff ff",
- "Last two bits are payload vlan number, - FFFFh if VLAN addressing is not used",
+ (
+ "Last two bits are payload vlan number, - FFFFh if VLAN"
+ " addressing is not used"
+ ),
],
- 'Deactivate_Payload':
- [
+ "Deactivate_Payload": [
# raw command, expected output(s), comment
"0x06 0x49 0x01 0x01 0x00 0x00 0x00 0x00",
"",
"Line feed only",
],
- 'Get_Payload_Instance_Info':
- [
+ "Get_Payload_Instance_Info": [
# raw command, expected output(s), comment
"0x06 0x4b 0x01 0x01",
"00 00 00 00 00 00 00 00 00 00 00 00",
- "When the payload is activated, the first four bytes are the session ID,"
- "otherwise it should be 00."
+ (
+ "When the payload is activated, the first four bytes are the"
+ " session ID,otherwise it should be 00."
+ ),
],
- 'Get_User_Access_Payload':
- [
+ "Get_User_Access_Payload": [
# raw command,
"0x06 0x4d"
],
- 'Set_User_Access_Payload':
- [
+ "Set_User_Access_Payload": [
# raw command,
"0x06 0x4c"
],
- 'Get_Channel_Payload_Version':
- [
+ "Get_Channel_Payload_Version": [
# raw command,
"0x06 0x4F"
],
- 'Get_Channel_Payload_Support':
- [
+ "Get_Channel_Payload_Support": [
# raw command,
"0x06 0x4E"
],
},
- 'BIOS_POST_Code':
- {
- 'Get':
- [
+ "BIOS_POST_Code": {
+ "Get": [
# raw command, expected output, comment
"0x30 0xe9",
"",
"Response bytes will vary in length depending on state of system",
"0x89",
- "error response byte when host is powered off"
+ "error response byte when host is powered off",
]
},
- 'Device ID':
- {
- 'Get':
- [
+ "Device ID": {
+ "Get": [
# raw command, error response, error code
"0x06 0x01",
"Error: Unable to establish IPMI v2 / RMCP+ session",
"0xc7",
]
},
- 'Cold Reset':
- {
- 'reset':
- [
+ "Cold Reset": {
+ "reset": [
# raw command
"0x06 0x02"
]
},
- 'lan_parameters':
- {
- 'get_ip':
- [
+ "lan_parameters": {
+ "get_ip": [
# raw command
"0x0c 0x02 0x01 0x03 0 0"
],
- 'get_ip_src':
- [
+ "get_ip_src": [
# raw command
"0x0c 0x02 0x01 0x04 0 0"
],
- 'get_dot1q':
- [
+ "get_dot1q": [
# raw command
"0x0c 0x02 0x01 0x14 0 0"
- ]
+ ],
},
- 'SDR_Info':
- {
- 'get':
- [
+ "SDR_Info": {
+ "get": [
# raw command
"0x04 0x20 1"
]
},
- 'Chassis_status':
- {
- 'get':
- [
+ "Chassis_status": {
+ "get": [
# raw command
"0x00 0x01"
]
},
- 'SEL_Info':
- {
- 'get':
- [
+ "SEL_Info": {
+ "get": [
# raw command
"0x0a 0x40"
]
},
- 'Watchdog':
- {
+ "Watchdog": {
# Command action type
- 'Get':
- [
+ "Get": [
# raw command, expected output(s), comment
"0x06 0x25",
"05 00 00 00 64 00",
@@ -341,8 +300,7 @@
"0x06 0x25 0x00",
"Get with one extra byte",
],
- 'Set':
- [
+ "Set": [
# raw command, expected output, comment
"0x06 0x24 0x05 0x00 0x00 0x00 0x64 0x00",
"none",
@@ -435,8 +393,7 @@
"none",
"Set with one extra byte",
],
- 'Reset':
- [
+ "Reset": [
# raw command, expected output, comment
"0x06 0x22",
"none",
@@ -447,21 +404,17 @@
"0x06 0x22",
"none",
"Reset watchdog timer without initialized watchdog",
- ]
+ ],
},
- 'SOL':
- {
- 'Set_SOL':
- [
+ "SOL": {
+ "Set_SOL": [
# raw command, expected output(s), comment
- 'Invalid value',
- 'Valid values are serial, 9.6 19.2, 38.4, 57.6 and 115.2',
+ "Invalid value",
+ "Valid values are serial, 9.6 19.2, 38.4, 57.6 and 115.2",
]
},
- 'Get SDR':
- {
- 'Get':
- [
+ "Get SDR": {
+ "Get": [
# Get SDR raw command without Reservation ID.
"0x0a 0x23 0x00 0x00 0x00 0x00 0x00 0xff",
# Netfunction and cmd.
@@ -469,22 +422,18 @@
# Record ID offset and bytes to read.
"0x01 0x0f",
# Raw command To Get SDR Partial without Reservation ID.
- "0x0a 0x23 0x00 0x00 0x00 0x00 0x01 0x0f"
+ "0x0a 0x23 0x00 0x00 0x00 0x00 0x01 0x0f",
],
},
- 'Get':
- {
- 'POH_Counter':
- [
+ "Get": {
+ "POH_Counter": [
# raw command, error response
- '0x00 0x0f',
- 'Error: Unable to establish IPMI v2 / RMCP+ session'
+ "0x00 0x0f",
+ "Error: Unable to establish IPMI v2 / RMCP+ session",
]
},
- 'Device_SDR':
- {
- 'Get_Info':
- [
+ "Device_SDR": {
+ "Get_Info": [
# raw command, expected output(s), comment
"0x04 0x20 0x00",
"0x04 0x20 0x01",
@@ -493,16 +442,14 @@
"rsp=0xd4",
"Insufficient privilege level",
],
- 'Get':
- [
+ "Get": [
# raw command, expected output(s), comment
"0x04 0x21",
"0x00 0x00 0x00 0xff",
"rsp=0xc7",
"Request data length invalid",
],
- 'Reserve_Repository':
- [
+ "Reserve_Repository": [
# raw command, expected output(s), comment
"0x04 0x22",
"rsp=0xc7",
@@ -510,114 +457,91 @@
"rsp=0xd4",
"Insufficient privilege level",
"Reservation cancelled or invalid",
- ]
+ ],
},
- 'System_Info':
- {
- 'param0_Set_In_Progress':
- {
- 'Get':
- [
+ "System_Info": {
+ "param0_Set_In_Progress": {
+ "Get": [
# raw command, expected output(s)
"0x06 0x59 0x00 0x00 0x00 0x00",
"Request data length invalid",
],
- 'Set':
- [
+ "Set": [
# raw command, expected output(s)
"0x06 0x58 0x00",
"Request data length invalid",
"Invalid data field in request",
],
},
- 'param1_System_Firmware_Version':
- {
- 'Get':
- [
+ "param1_System_Firmware_Version": {
+ "Get": [
# raw command, expected output(s)
"0x06 0x59 0x00 0x01 0x00 0x00",
"Request data length invalid",
],
- 'Set':
- [
+ "Set": [
# raw command, expected output(s)
"0x06 0x58 0x01 0x00 0x00 0x0e",
"Invalid data field in request",
],
},
- 'param2_System_Name':
- {
- 'Get':
- [
+ "param2_System_Name": {
+ "Get": [
# raw command, expected output(s)
"0x06 0x59 0x00 0x02 0x00 0x00",
"Request data length invalid",
],
- 'Set':
- [
+ "Set": [
# raw command, expected output(s)
"0x06 0x58 0x02 0x00 0x00 0x0e",
"Invalid data field in request",
],
},
- 'param3_Primary_Operating_System_Name':
- {
- 'Get':
- [
+ "param3_Primary_Operating_System_Name": {
+ "Get": [
# raw command, expected output(s)
"0x06 0x59 0x00 0x03 0x00 0x00",
"Request data length invalid",
],
- 'Set':
- [
+ "Set": [
# raw command, expected output(s)
"0x06 0x58 0x03 0x00 0x00 0x0e",
"Invalid data field in request",
],
},
- 'param4_Operating_System_Name':
- {
- 'Get':
- [
+ "param4_Operating_System_Name": {
+ "Get": [
# raw command, expected output(s)
"0x06 0x59 0x00 0x04 0x00 0x00",
"Request data length invalid",
],
- 'Set':
- [
+ "Set": [
# raw command, expected output(s)
"0x06 0x58 0x04 0x00 0x00 0x0e",
"Invalid data field in request",
],
},
- 'param5_Present_OS_Version_number':
- {
- 'Get':
- [
+ "param5_Present_OS_Version_number": {
+ "Get": [
# raw command, expected output(s)
"0x06 0x59 0x00 0x05 0x00 0x00",
"Request data length invalid",
],
- 'Set':
- [
+ "Set": [
# raw command, expected output(s)
"0x06 0x58 0x05 0x00 0x00 0x0e",
"Invalid data field in request",
- ]
- }
+ ],
+ },
},
- 'Get Channel Auth Cap':
- {
- 'get':
- [
+ "Get Channel Auth Cap": {
+ "get": [
# raw command
"0x06 0x38",
]
},
- 'Cipher Suite':
- {
- 'get':
- [
+ "Cipher Suite": {
+ "get": [
# raw command, supported algorithm
"0x06 0x54",
"03 44 81",
@@ -626,10 +550,8 @@
# 81 - aes_cbc_128
]
},
- 'SDR':
- {
- 'Get':
- [
+ "SDR": {
+ "Get": [
# Get SDR raw command without Reservation ID.
"0x0a 0x23 0x00 0x00 0x00 0x00 0x00 0xff",
# Netfunction and command.
@@ -637,50 +559,41 @@
# Record ID offset and bytes to read.
"0x00 0x00 0x01 0x0f",
# Raw command To Get SDR Partial without reservation ID.
- "0x0a 0x23 0x00 0x00 0x00 0x00 0x01 0x0f"
+ "0x0a 0x23 0x00 0x00 0x00 0x00 0x01 0x0f",
],
- 'Reserve SDR Repository':
- [
+ "Reserve SDR Repository": [
# raw command, expected output(s), comment
"0x0a 0x22",
],
- 'SDR Repository Info':
- [
+ "SDR Repository Info": [
# raw command.
"0x0a 0x20",
],
- 'Get SDR allocation Info':
- [
+ "Get SDR allocation Info": [
# raw command.
"0x0a 0x21"
],
- 'Delete SDR':
- [
+ "Delete SDR": [
# raw command.
"0x0a 0x26"
],
- 'Partially Add SDR':
- [
+ "Partially Add SDR": [
# raw command.
"0x0a 0x25"
- ]
+ ],
},
- 'FRU':
- {
- 'Inventory_Area_Info':
- [
+ "FRU": {
+ "Inventory_Area_Info": [
# raw command, expected output(s), comment
"0x0a 0x10",
"Invalid data field in request",
- "Request data length invalid"
+ "Request data length invalid",
],
- 'Read':
- [
+ "Read": [
# raw command
"0x0a 0x11",
],
- 'Write':
- [
+ "Write": [
# raw command
"0x0a 0x12",
],
diff --git a/data/ipmi_rest_fru_field_map.py b/data/ipmi_rest_fru_field_map.py
index 0ada176..a0acfd7 100644
--- a/data/ipmi_rest_fru_field_map.py
+++ b/data/ipmi_rest_fru_field_map.py
@@ -13,42 +13,40 @@
"board_mfg": "Manufacturer",
"board_product": "PrettyName",
"board_serial": "SerialNumber",
- "board_part_number": "PartNumber"
+ "board_part_number": "PartNumber",
},
"system": {
"chassis_part_number": "Model",
- "chassis_serial": "SerialNumber"
+ "chassis_serial": "SerialNumber",
},
"motherboard": {
"board_mfg": "Manufacturer",
"board_product": "PrettyName",
"board_serial": "SerialNumber",
- "board_part_number": "PartNumber"
+ "board_part_number": "PartNumber",
},
"dimm": {
"product_manufacturer": "Manufacturer",
"product_name": "PrettyName",
"product_part_number": "Model",
"product_version": "Version",
- "product_serial": "SerialNumber"
+ "product_serial": "SerialNumber",
},
- "fan": {
- "product_name": "PrettyName"
- },
+ "fan": {"product_name": "PrettyName"},
"bmc": {
# "board_mfg_date": "BuildDate",
"board_mfg": "Manufacturer",
"board_product": "PrettyName",
"board_serial": "SerialNumber",
- "board_part_number": "PartNumber"
+ "board_part_number": "PartNumber",
},
"powersupply": {
# "board_mfg_date": "BuildDate",
"board_product": "PrettyName",
"board_serial": "SerialNumber",
- "board_part_number": "PartNumber"
+ "board_part_number": "PartNumber",
},
"gv100card": {
# "board_mfg_date": "BuildDate",
- }
+ },
}
diff --git a/data/ipmi_variable.py b/data/ipmi_variable.py
index b9958ed..a9f2166 100644
--- a/data/ipmi_variable.py
+++ b/data/ipmi_variable.py
@@ -17,7 +17,7 @@
"SMBus v2.0": "smbus-v2.0",
"USB 1.x": "usb-1x",
"USB 2.x": "usb-2x",
- "System Interface": "system-interface"
+ "System Interface": "system-interface",
}
diff --git a/data/model.py b/data/model.py
index de5ca9a..8134c99 100755
--- a/data/model.py
+++ b/data/model.py
@@ -1,55 +1,52 @@
#!/usr/bin/env python3 -u
-import sys
-from robot.libraries.BuiltIn import BuiltIn
import imp
import string
+import sys
+
+from robot.libraries.BuiltIn import BuiltIn
def get_sensor(module_name, value):
- m = imp.load_source('module.name', module_name)
+ m = imp.load_source("module.name", module_name)
- for i in m.ID_LOOKUP['SENSOR']:
-
- if m.ID_LOOKUP['SENSOR'][i] == value:
+ for i in m.ID_LOOKUP["SENSOR"]:
+ if m.ID_LOOKUP["SENSOR"][i] == value:
return i
return 0xFF
def get_inventory_sensor(module_name, value):
- m = imp.load_source('module.name', module_name)
+ m = imp.load_source("module.name", module_name)
- value = string.replace(value, m.INVENTORY_ROOT, '<inventory_root>')
+ value = string.replace(value, m.INVENTORY_ROOT, "<inventory_root>")
- for i in m.ID_LOOKUP['SENSOR']:
-
- if m.ID_LOOKUP['SENSOR'][i] == value:
+ for i in m.ID_LOOKUP["SENSOR"]:
+ if m.ID_LOOKUP["SENSOR"][i] == value:
return i
return 0xFF
def get_inventory_list(module_name):
-
inventory_list = []
- m = imp.load_source('module.name', module_name)
+ m = imp.load_source("module.name", module_name)
- for i in m.ID_LOOKUP['FRU']:
- s = m.ID_LOOKUP['FRU'][i]
- s = s.replace('<inventory_root>', m.INVENTORY_ROOT)
+ for i in m.ID_LOOKUP["FRU"]:
+ s = m.ID_LOOKUP["FRU"][i]
+ s = s.replace("<inventory_root>", m.INVENTORY_ROOT)
inventory_list.append(s)
return inventory_list
def get_inventory_fru_type_list(module_name, fru_type):
-
inventory_list = []
- m = imp.load_source('module.name', module_name)
+ m = imp.load_source("module.name", module_name)
for i in m.FRU_INSTANCES.keys():
- if m.FRU_INSTANCES[i]['fru_type'] == fru_type:
- s = i.replace('<inventory_root>', m.INVENTORY_ROOT)
+ if m.FRU_INSTANCES[i]["fru_type"] == fru_type:
+ s = i.replace("<inventory_root>", m.INVENTORY_ROOT)
inventory_list.append(s)
return inventory_list
@@ -61,7 +58,7 @@
def get_FRU_component_name_list(module_name):
name_list = []
- m = imp.load_source('module.name', module_name)
+ m = imp.load_source("module.name", module_name)
for name in m.FRU_COMPONENT_NAME:
name_list.append(name)
@@ -71,8 +68,7 @@
def get_ipmi_rest_fru_field_map(module_name):
-
- m = imp.load_source('module.name', module_name)
+ m = imp.load_source("module.name", module_name)
ipmi_rest_fru_field_map = dict.copy(m.ipmi_rest_fru_field_map)
diff --git a/data/pel_variables.py b/data/pel_variables.py
index 7c0c823..e5c3d6f 100644
--- a/data/pel_variables.py
+++ b/data/pel_variables.py
@@ -5,41 +5,72 @@
"""
PEL_DETAILS = {
- 'CreatorID': 'BMC',
- 'CompID': '0x1000',
- 'Subsystem': 'Platform Firmware',
- 'Message': 'This is a test error',
- 'SRC': 'BD8D1002',
- 'Sev': 'Unrecoverable Error'}
+ "CreatorID": "BMC",
+ "CompID": "0x1000",
+ "Subsystem": "Platform Firmware",
+ "Message": "This is a test error",
+ "SRC": "BD8D1002",
+ "Sev": "Unrecoverable Error",
+}
-ERROR_LOG_CREATE_BASE_CMD = 'busctl call xyz.openbmc_project.Logging /xyz/openbmc_project/logging \
- xyz.openbmc_project.Logging.Create Create ssa{ss} '
+ERROR_LOG_CREATE_BASE_CMD = (
+ "busctl call xyz.openbmc_project.Logging /xyz/openbmc_project/logging "
+ " xyz.openbmc_project.Logging.Create Create ssa{ss} "
+)
-CMD_INTERNAL_FAILURE = ERROR_LOG_CREATE_BASE_CMD + 'xyz.openbmc_project.Common.Error.InternalFailure \
- xyz.openbmc_project.Logging.Entry.Level.Error 0'
+CMD_INTERNAL_FAILURE = (
+ ERROR_LOG_CREATE_BASE_CMD
+ + "xyz.openbmc_project.Common.Error.InternalFailure "
+ " xyz.openbmc_project.Logging.Entry.Level.Error 0"
+)
-CMD_FRU_CALLOUT = ERROR_LOG_CREATE_BASE_CMD + 'xyz.openbmc_project.Sensor.Device.Error.ReadFailure \
- xyz.openbmc_project.Logging.Entry.Level.Error 2 "TIMEOUT_IN_MSEC" "5" "CALLOUT_INVENTORY_PATH" \
- "/xyz/openbmc_project/inventory/system/chassis/motherboard"'
+CMD_FRU_CALLOUT = (
+ ERROR_LOG_CREATE_BASE_CMD
+ + "xyz.openbmc_project.Sensor.Device.Error.ReadFailure "
+ ' xyz.openbmc_project.Logging.Entry.Level.Error 2 "TIMEOUT_IN_MSEC" "5"'
+ ' "CALLOUT_INVENTORY_PATH" '
+ ' "/xyz/openbmc_project/inventory/system/chassis/motherboard"'
+)
-CMD_PROCEDURAL_SYMBOLIC_FRU_CALLOUT = ERROR_LOG_CREATE_BASE_CMD + 'org.open_power.Logging.Error.TestError1 \
- xyz.openbmc_project.Logging.Entry.Level.Error 0'
+CMD_PROCEDURAL_SYMBOLIC_FRU_CALLOUT = (
+ ERROR_LOG_CREATE_BASE_CMD
+ + "org.open_power.Logging.Error.TestError1 "
+ " xyz.openbmc_project.Logging.Entry.Level.Error 0"
+)
-CMD_INFORMATIONAL_ERROR = ERROR_LOG_CREATE_BASE_CMD + 'xyz.openbmc_project.Common.Error.TestError2 \
- xyz.openbmc_project.Logging.Entry.Level.Informational 0'
+CMD_INFORMATIONAL_ERROR = (
+ ERROR_LOG_CREATE_BASE_CMD
+ + "xyz.openbmc_project.Common.Error.TestError2 "
+ " xyz.openbmc_project.Logging.Entry.Level.Informational 0"
+)
-CMD_INVENTORY_PREFIX = 'busctl get-property xyz.openbmc_project.Inventory.Manager \
- /xyz/openbmc_project/inventory/system/chassis/motherboard'
+CMD_INVENTORY_PREFIX = (
+ "busctl get-property xyz.openbmc_project.Inventory.Manager "
+ " /xyz/openbmc_project/inventory/system/chassis/motherboard"
+)
-CMD_UNRECOVERABLE_ERROR = ERROR_LOG_CREATE_BASE_CMD + 'org.open_power.Logging.Error.TestError1 \
- xyz.openbmc_project.Logging.Entry.Level.Error 0'
+CMD_UNRECOVERABLE_ERROR = (
+ ERROR_LOG_CREATE_BASE_CMD
+ + "org.open_power.Logging.Error.TestError1 "
+ " xyz.openbmc_project.Logging.Entry.Level.Error 0"
+)
-CMD_PREDICTIVE_ERROR = ERROR_LOG_CREATE_BASE_CMD + \
- 'xyz.openbmc_project.Sensor.Threshold.Error.TemperatureWarningLow \
- xyz.openbmc_project.Logging.Entry.Level.Warning 0'
+CMD_PREDICTIVE_ERROR = (
+ ERROR_LOG_CREATE_BASE_CMD
+ + "xyz.openbmc_project.Sensor.Threshold.Error.TemperatureWarningLow "
+ " xyz.openbmc_project.Logging.Entry.Level.Warning 0"
+)
-CMD_UNRECOVERABLE_HOST_ERROR = ERROR_LOG_CREATE_BASE_CMD + 'xyz.openbmc_project.Host.Error.Event \
- xyz.openbmc_project.Logging.Entry.Level.Error 1 RAWPEL /tmp/FILE_NBMC_UNRECOVERABLE'
+CMD_UNRECOVERABLE_HOST_ERROR = (
+ ERROR_LOG_CREATE_BASE_CMD
+ + "xyz.openbmc_project.Host.Error.Event "
+ " xyz.openbmc_project.Logging.Entry.Level.Error 1 RAWPEL"
+ " /tmp/FILE_NBMC_UNRECOVERABLE"
+)
-CMD_INFORMATIONAL_HOST_ERROR = ERROR_LOG_CREATE_BASE_CMD + 'xyz.openbmc_project.Host.Error.Event \
- xyz.openbmc_project.Logging.Entry.Level.Error 1 RAWPEL /tmp/FILE_HOST_INFORMATIONAL'
+CMD_INFORMATIONAL_HOST_ERROR = (
+ ERROR_LOG_CREATE_BASE_CMD
+ + "xyz.openbmc_project.Host.Error.Event "
+ " xyz.openbmc_project.Logging.Entry.Level.Error 1 RAWPEL"
+ " /tmp/FILE_HOST_INFORMATIONAL"
+)
diff --git a/data/platform_variables.py b/data/platform_variables.py
index 94d8a53..512b8ea 100644
--- a/data/platform_variables.py
+++ b/data/platform_variables.py
@@ -4,11 +4,12 @@
Define methods to import platform specific files.
"""
-import sys
-from robot.libraries.BuiltIn import BuiltIn
import imp
-import string
import importlib
+import string
+import sys
+
+from robot.libraries.BuiltIn import BuiltIn
def get_service_restart_policy_services(module_name):
diff --git a/data/pldm_variables.py b/data/pldm_variables.py
index c5407d6..61b0ea1 100755
--- a/data/pldm_variables.py
+++ b/data/pldm_variables.py
@@ -4,38 +4,66 @@
Contains PLDM-related constants.
"""
-PLDM_SUPPORTED_TYPES = ['base', 'platform', 'bios', 'fru', 'oem-ibm']
+PLDM_SUPPORTED_TYPES = ["base", "platform", "bios", "fru", "oem-ibm"]
# PLDM types.
-PLDM_TYPE_BASE = {'VALUE': '00', 'STRING': 'base'}
-PLDM_TYPE_PLATFORM = {'VALUE': '02', 'STRING': 'platform'}
-PLDM_TYPE_BIOS = {'VALUE': '03', 'STRING': 'bios'}
-PLDM_TYPE_FRU = {'VALUE': '04', 'STRING': 'fru'}
-PLDM_TYPE_OEM = {'VALUE': '63', 'STRING': 'oem-ibm'}
-PLDM_SUPPORTED_TYPES = ['0(base)', '2(platform)', '3(bios)', '4(fru)', '63(oem-ibm)']
+PLDM_TYPE_BASE = {"VALUE": "00", "STRING": "base"}
+PLDM_TYPE_PLATFORM = {"VALUE": "02", "STRING": "platform"}
+PLDM_TYPE_BIOS = {"VALUE": "03", "STRING": "bios"}
+PLDM_TYPE_FRU = {"VALUE": "04", "STRING": "fru"}
+PLDM_TYPE_OEM = {"VALUE": "63", "STRING": "oem-ibm"}
+PLDM_SUPPORTED_TYPES = [
+ "0(base)",
+ "2(platform)",
+ "3(bios)",
+ "4(fru)",
+ "63(oem-ibm)",
+]
-VERSION_BASE = {'VALUE': ['f1', 'f0', 'f0', '00'], 'STRING': '1.0.0'}
-VERSION_PLATFORM = {'VALUE': ['f1', 'f2', 'f0', '00'], 'STRING': '1.2.0'}
-VERSION_BIOS = {'VALUE': ['f1', 'f1', 'f1', '00'], 'STRING': '1.0.0'}
-VERSION_FRU = {'VALUE': ['f1', 'f0', 'f0', '00'], 'STRING': '1.0.0'}
-VERSION_OEM = {'VALUE': ['f1', 'f0', 'f0', '00'], 'STRING': '1.0.0'}
+VERSION_BASE = {"VALUE": ["f1", "f0", "f0", "00"], "STRING": "1.0.0"}
+VERSION_PLATFORM = {"VALUE": ["f1", "f2", "f0", "00"], "STRING": "1.2.0"}
+VERSION_BIOS = {"VALUE": ["f1", "f1", "f1", "00"], "STRING": "1.0.0"}
+VERSION_FRU = {"VALUE": ["f1", "f0", "f0", "00"], "STRING": "1.0.0"}
+VERSION_OEM = {"VALUE": ["f1", "f0", "f0", "00"], "STRING": "1.0.0"}
-PLDM_BASE_CMDS = ['2(GetTID)', '3(GetPLDMVersion)', '4(GetPLDMTypes)', '5(GetPLDMCommands)']
-PLDM_PLATFORM_CMDS = ['57(SetStateEffecterStates)', '81(GetPDR)']
-PLDM_BIOS_CMDS = ['1(GetBIOSTable)', '7(SetBIOSAttributeCurrentValue)',
- '8(GetBIOSAttributeCurrentValueByHandle)', '12(GetDateTime)',
- '13(SetDateTime)']
-PLDM_FRU_CMDS = ['1(GetFRURecordTableMetadata)', '2(GetFRURecordTable)', '4(GetFRURecordByOption)']
-PLDM_OEM_CMDS = ['1(GetFileTable)', '4(ReadFile)', '5(WriteFile)', '6(ReadFileInToMemory)',
- '7(WriteFileFromMemory)', '8(ReadFileByTypeIntoMemory)',
- '9(WriteFileByTypeFromMemory)', '10(NewFileAvailable)',
- '11(ReadFileByType)', '12(WriteFileByType)', '13(FileAck)',
- '240(GetAlertStatus)']
+PLDM_BASE_CMDS = [
+ "2(GetTID)",
+ "3(GetPLDMVersion)",
+ "4(GetPLDMTypes)",
+ "5(GetPLDMCommands)",
+]
+PLDM_PLATFORM_CMDS = ["57(SetStateEffecterStates)", "81(GetPDR)"]
+PLDM_BIOS_CMDS = [
+ "1(GetBIOSTable)",
+ "7(SetBIOSAttributeCurrentValue)",
+ "8(GetBIOSAttributeCurrentValueByHandle)",
+ "12(GetDateTime)",
+ "13(SetDateTime)",
+]
+PLDM_FRU_CMDS = [
+ "1(GetFRURecordTableMetadata)",
+ "2(GetFRURecordTable)",
+ "4(GetFRURecordByOption)",
+]
+PLDM_OEM_CMDS = [
+ "1(GetFileTable)",
+ "4(ReadFile)",
+ "5(WriteFile)",
+ "6(ReadFileInToMemory)",
+ "7(WriteFileFromMemory)",
+ "8(ReadFileByTypeIntoMemory)",
+ "9(WriteFileByTypeFromMemory)",
+ "10(NewFileAvailable)",
+ "11(ReadFileByType)",
+ "12(WriteFileByType)",
+ "13(FileAck)",
+ "240(GetAlertStatus)",
+]
# PLDM command format.
-'''
+"""
e.g. : GetPLDMVersion usage
pldmtool base GetPLDMVersion -t <pldm_type>
@@ -44,46 +72,49 @@
base->0,platform->2,bios->3,fru->4
-'''
-CMD_GETPLDMVERSION = 'base GetPLDMVersion -t %s'
+"""
+CMD_GETPLDMVERSION = "base GetPLDMVersion -t %s"
-'''
+"""
e.g. : PLDM raw command usage
pldmtool raw -d 0x80 0x00 0x03 0x00 0x00 0x00 0x00 0x01 0x00
pldm raw -d 0x<header> 0x<pldm_type> 0x<pldm_cmd_type> 0x<payload_data>
-'''
+"""
-CMD_PLDMTOOL_RAW = 'raw -d 0x80' + '0x%s' + ' ' + '0x%s'
+CMD_PLDMTOOL_RAW = "raw -d 0x80" + "0x%s" + " " + "0x%s"
# PLDM command payload data.
-PAYLOAD_GetPLDMVersion = \
- ' 0x00 0x00 0x00 0x00 0x%s 0x%s' # %(TransferOperationFlag, PLDMType)
+PAYLOAD_GetPLDMVersion = ( # %(TransferOperationFlag, PLDMType)
+ " 0x00 0x00 0x00 0x00 0x%s 0x%s"
+)
-'''
+"""
e.g. : SetDateTime usage
pldmtool bios SetDateTime -d <YYYYMMDDHHMMSS>
-'''
-CMD_SETDATETIME = 'bios SetDateTime -d %s'
+"""
+CMD_SETDATETIME = "bios SetDateTime -d %s"
-CMD_GETPDR = 'platform GetPDR -d %s'
+CMD_GETPDR = "platform GetPDR -d %s"
-'''
+"""
e.g. : SetStateEffecterStates usage
pldmtool platform GetPDR -i <effter_handle> -c <count> -d <effecterID, effecterState>
pldmtool platform SetStateEffecterStates -i 1 -c 1 -d 1 1
-'''
+"""
-CMD_SETSTATEEFFECTERSTATES = 'platform SetStateEffecterStates -i %s -c %s -d %s'
+CMD_SETSTATEEFFECTERSTATES = (
+ "platform SetStateEffecterStates -i %s -c %s -d %s"
+)
# GetPDR parsed response message for record handle.
# Dictionary value array holds the expected output for record handle 1, 2.
@@ -93,98 +124,137 @@
# Only record handle 0, 1, 2 are supported as of now.
RESPONSE_DICT_GETPDR_SETSTATEEFFECTER = {
- 'PDRHeaderVersion': [1],
- 'PDRType': ['State Effecter PDR'],
- 'recordChangeNumber': [0],
- 'effecterID': [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10],
- 'entityType': ['Virtual Machine Manager', 'System chassis (main enclosure)',
- 'System Firmware', 'Processor Module', '32801(OEM)',
- 'Management Controller', '24577(OEM)'],
- 'entityInstanceNumber': [0, 1, 2, 3, 4],
- 'containerID': [0, 1],
- 'effecterSemanticID': [0],
- 'effecterInit': ['noInit'],
- 'effecterDescriptionPDR': [False],
- 'compositeEffecterCount': [1]}
+ "PDRHeaderVersion": [1],
+ "PDRType": ["State Effecter PDR"],
+ "recordChangeNumber": [0],
+ "effecterID": [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10],
+ "entityType": [
+ "Virtual Machine Manager",
+ "System chassis (main enclosure)",
+ "System Firmware",
+ "Processor Module",
+ "32801(OEM)",
+ "Management Controller",
+ "24577(OEM)",
+ ],
+ "entityInstanceNumber": [0, 1, 2, 3, 4],
+ "containerID": [0, 1],
+ "effecterSemanticID": [0],
+ "effecterInit": ["noInit"],
+ "effecterDescriptionPDR": [False],
+ "compositeEffecterCount": [1],
+}
RESPONSE_DICT_GETPDR_FRURECORDSETIDENTIFIER = {
- 'PDRHeaderVersion': [1],
- 'PDRType': ['FRU Record Set PDR'],
- 'recordChangeNumber': [0],
- 'dataLength': [10],
- 'entityType': ['System Board', 'Chassis front panel board (control panel)',
- 'Management Controller', 'OEM', 'Power converter',
- 'System (logical)', 'System chassis (main enclosure)',
- 'Chassis front panel board (control panel)',
- 'Processor Module', 'Memory Module', 'Power Supply',
- '24576(OEM)', '60(OEM)', 'Processor', '142(OEM)'],
- 'containerID': [0, 1, 2, 3]}
+ "PDRHeaderVersion": [1],
+ "PDRType": ["FRU Record Set PDR"],
+ "recordChangeNumber": [0],
+ "dataLength": [10],
+ "entityType": [
+ "System Board",
+ "Chassis front panel board (control panel)",
+ "Management Controller",
+ "OEM",
+ "Power converter",
+ "System (logical)",
+ "System chassis (main enclosure)",
+ "Chassis front panel board (control panel)",
+ "Processor Module",
+ "Memory Module",
+ "Power Supply",
+ "24576(OEM)",
+ "60(OEM)",
+ "Processor",
+ "142(OEM)",
+ ],
+ "containerID": [0, 1, 2, 3],
+}
RESPONSE_DICT_GETPDR_PDRENTITYASSOCIATION = {
- 'PDRHeaderVersion': [1],
- 'PDRType': ['Entity Association PDR'],
- 'recordChangeNumber': [0],
- 'containerID': [1, 2, 3],
- 'associationtype': ['Physical'],
- 'containerentityType': ['System Board', 'System (logical)',
- 'System chassis (main enclosure)']}
+ "PDRHeaderVersion": [1],
+ "PDRType": ["Entity Association PDR"],
+ "recordChangeNumber": [0],
+ "containerID": [1, 2, 3],
+ "associationtype": ["Physical"],
+ "containerentityType": [
+ "System Board",
+ "System (logical)",
+ "System chassis (main enclosure)",
+ ],
+}
RESPONSE_DICT_GETPDR_STATESENSORPDR = {
- 'entityType': ['Communication Channel', 'Connector', 'Processor Module',
- '32774(OEM)', '57346(OEM)', '57347(OEM)', '32801(OEM)',
- '91(OEM)', '5(OEM)', '24577(OEM)'],
- 'sensorInit': ['noInit'],
- 'sensorAuxiliaryNamesPDR': [False]}
+ "entityType": [
+ "Communication Channel",
+ "Connector",
+ "Processor Module",
+ "32774(OEM)",
+ "57346(OEM)",
+ "57347(OEM)",
+ "32801(OEM)",
+ "91(OEM)",
+ "5(OEM)",
+ "24577(OEM)",
+ ],
+ "sensorInit": ["noInit"],
+ "sensorAuxiliaryNamesPDR": [False],
+}
RESPONSE_DICT_GETPDR_TERMINUSLOCATORPDR = {
- 'PDRHeaderVersion': [1],
- 'PDRType': ['Terminus Locator PDR'],
- 'recordChangeNumber': [0],
- 'validity': ['valid'],
- 'TID': [0, 1, 208],
- 'containerID': [0, 1],
- 'terminusLocatorType': ['MCTP_EID'],
- 'terminusLocatorValueSize': [1]}
+ "PDRHeaderVersion": [1],
+ "PDRType": ["Terminus Locator PDR"],
+ "recordChangeNumber": [0],
+ "validity": ["valid"],
+ "TID": [0, 1, 208],
+ "containerID": [0, 1],
+ "terminusLocatorType": ["MCTP_EID"],
+ "terminusLocatorValueSize": [1],
+}
RESPONSE_DICT_GETPDR_NUMERICEFFECTERPDR = {
- 'PDRHeaderVersion': [1],
- 'PDRType': ['Numeric Effecter PDR'],
- 'recordChangeNumber': [0],
- 'entityInstanceNumber': [0, 1],
- 'containerID': [0],
- 'effecterSemanticID': [0],
- 'effecterInit': [0],
- 'effecterAuxiliaryNames': [False],
- 'baseUnit': [0, 72, 21],
- 'unitModifier': [0],
- 'baseOEMUnitHandle': [0],
- 'auxUnit': [0],
- 'auxUnitModifier': [0],
- 'auxrateUnit': [0],
- 'auxOEMUnitHandle': [0],
- 'resolution': [1, 0],
- 'offset': [0],
- 'accuracy': [0],
- 'plusTolerance': [0],
- 'minusTolerance': [0],
- 'stateTransitionInterval': [0],
- 'TransitionInterval': [0],
- 'minSettable': [0],
- 'rangeFieldSupport': [0],
- 'nominalValue': [0],
- 'normalMax': [0],
- 'normalMin': [0],
- 'ratedMax': [0],
- 'ratedMin': [0]}
+ "PDRHeaderVersion": [1],
+ "PDRType": ["Numeric Effecter PDR"],
+ "recordChangeNumber": [0],
+ "entityInstanceNumber": [0, 1],
+ "containerID": [0],
+ "effecterSemanticID": [0],
+ "effecterInit": [0],
+ "effecterAuxiliaryNames": [False],
+ "baseUnit": [0, 72, 21],
+ "unitModifier": [0],
+ "baseOEMUnitHandle": [0],
+ "auxUnit": [0],
+ "auxUnitModifier": [0],
+ "auxrateUnit": [0],
+ "auxOEMUnitHandle": [0],
+ "resolution": [1, 0],
+ "offset": [0],
+ "accuracy": [0],
+ "plusTolerance": [0],
+ "minusTolerance": [0],
+ "stateTransitionInterval": [0],
+ "TransitionInterval": [0],
+ "minSettable": [0],
+ "rangeFieldSupport": [0],
+ "nominalValue": [0],
+ "normalMax": [0],
+ "normalMin": [0],
+ "ratedMax": [0],
+ "ratedMin": [0],
+}
PLDM_PDR_TYPES = {
- 'PLDM_STATE_EFFECTER_PDR': 'State Effecter PDR',
- 'PLDM_PDR_FRU_RECORD_SET': 'FRU Record Set PDR',
- 'PLDM_PDR_ENTITY_ASSOCIATION': 'Entity Association PDR',
- 'PLDM_STATE_SENSOR_PDR': 'State Sensor PDR',
- 'PLDM_NUMERIC_EFFECTER_PDR': 'Numeric Effecter PDR',
- 'PLDM_TERMINUS_LOCATOR_PDR': 'Terminus Locator PDR',
- 'PLDM_COMPACT_NUMERIC_SENSOR_PDR': '21'}
+ "PLDM_STATE_EFFECTER_PDR": "State Effecter PDR",
+ "PLDM_PDR_FRU_RECORD_SET": "FRU Record Set PDR",
+ "PLDM_PDR_ENTITY_ASSOCIATION": "Entity Association PDR",
+ "PLDM_STATE_SENSOR_PDR": "State Sensor PDR",
+ "PLDM_NUMERIC_EFFECTER_PDR": "Numeric Effecter PDR",
+ "PLDM_TERMINUS_LOCATOR_PDR": "Terminus Locator PDR",
+ "PLDM_COMPACT_NUMERIC_SENSOR_PDR": "21",
+}
RESPONSE_LIST_GETBIOSTABLE_ATTRVALTABLE = [
- 'BIOSString', 'BIOSInteger', 'BIOSEnumeration']
+ "BIOSString",
+ "BIOSInteger",
+ "BIOSEnumeration",
+]
diff --git a/data/variables.py b/data/variables.py
index 2c9933e..55d513f 100755
--- a/data/variables.py
+++ b/data/variables.py
@@ -1,235 +1,245 @@
import os
+
from robot.libraries.BuiltIn import BuiltIn
-OPENBMC_BASE_URI = '/xyz/openbmc_project/'
-OPENBMC_BASE_DBUS = 'xyz.openbmc_project.'
+OPENBMC_BASE_URI = "/xyz/openbmc_project/"
+OPENBMC_BASE_DBUS = "xyz.openbmc_project."
# Generic Dbus commands.
OPENBMC_DBUS_GET_PROPERTY = "busctl get-property "
OPENBMC_DBUS_SET_PROPERTY = "busctl set-property "
# org open power base URI.
-OPENPOWER_BASE_URI = '/org/open_power/'
-OPENPOWER_CONTROL = OPENPOWER_BASE_URI + 'control/'
-OPENPOWER_SENSORS = OPENPOWER_BASE_URI + 'sensors/'
+OPENPOWER_BASE_URI = "/org/open_power/"
+OPENPOWER_CONTROL = OPENPOWER_BASE_URI + "control/"
+OPENPOWER_SENSORS = OPENPOWER_BASE_URI + "sensors/"
# REST URI base endpoint paths.
-CONTROL_URI = OPENBMC_BASE_URI + 'control/'
+CONTROL_URI = OPENBMC_BASE_URI + "control/"
# Continue to keep to support legacy code.
-SETTINGS_URI = '/org/openbmc/settings/'
-WATCHDOG_URI = OPENBMC_BASE_URI + 'watchdog/'
-TIME_MANAGER_URI = OPENBMC_BASE_URI + 'time/'
-NETWORK_MANAGER = OPENBMC_BASE_URI + 'network/'
-NETWORK_RESOURCE = 'xyz.openbmc_project.Network.IP.Protocol.IPv4'
+SETTINGS_URI = "/org/openbmc/settings/"
+WATCHDOG_URI = OPENBMC_BASE_URI + "watchdog/"
+TIME_MANAGER_URI = OPENBMC_BASE_URI + "time/"
+NETWORK_MANAGER = OPENBMC_BASE_URI + "network/"
+NETWORK_RESOURCE = "xyz.openbmc_project.Network.IP.Protocol.IPv4"
# SNMP
-SNMP_MANAGER_URI = NETWORK_MANAGER + 'snmp/manager/'
+SNMP_MANAGER_URI = NETWORK_MANAGER + "snmp/manager/"
# Sensors base variables.
-SENSORS_URI = OPENBMC_BASE_URI + 'sensors/'
+SENSORS_URI = OPENBMC_BASE_URI + "sensors/"
# Thermal Control base variables
-THERMAL_CONTROL_URI = CONTROL_URI + 'thermal/0'
-THERMAL_METRICS = 'ThermalSubsystem/ThermalMetrics'
+THERMAL_CONTROL_URI = CONTROL_URI + "thermal/0"
+THERMAL_METRICS = "ThermalSubsystem/ThermalMetrics"
-COMPONENT_NAME_OF_POWER_SUPPLY = 'powersupply'
+COMPONENT_NAME_OF_POWER_SUPPLY = "powersupply"
# State Manager base variables
-BMC_REBOOT_TRANS = 'xyz.openbmc_project.State.BMC.Transition.Reboot'
+BMC_REBOOT_TRANS = "xyz.openbmc_project.State.BMC.Transition.Reboot"
-HOST_POWEROFF_TRANS = 'xyz.openbmc_project.State.Host.Transition.Off'
-HOST_POWERON_TRANS = 'xyz.openbmc_project.State.Host.Transition.On'
-HOST_REBOOT_TRANS = 'xyz.openbmc_project.State.Host.Transition.Reboot'
-HOST_POWEROFF_STATE = 'xyz.openbmc_project.State.Host.HostState.Off'
-HOST_POWERON_STATE = 'xyz.openbmc_project.State.Host.HostState.Running'
+HOST_POWEROFF_TRANS = "xyz.openbmc_project.State.Host.Transition.Off"
+HOST_POWERON_TRANS = "xyz.openbmc_project.State.Host.Transition.On"
+HOST_REBOOT_TRANS = "xyz.openbmc_project.State.Host.Transition.Reboot"
+HOST_POWEROFF_STATE = "xyz.openbmc_project.State.Host.HostState.Off"
+HOST_POWERON_STATE = "xyz.openbmc_project.State.Host.HostState.Running"
-CHASSIS_POWEROFF_TRANS = 'xyz.openbmc_project.State.Chassis.Transition.Off'
-CHASSIS_POWERON_TRANS = 'xyz.openbmc_project.State.Chassis.Transition.On'
-CHASSIS_POWEROFF_STATE = 'xyz.openbmc_project.State.Chassis.PowerState.Off'
-CHASSIS_POWERON_STATE = 'xyz.openbmc_project.State.Chassis.PowerState.On'
+CHASSIS_POWEROFF_TRANS = "xyz.openbmc_project.State.Chassis.Transition.Off"
+CHASSIS_POWERON_TRANS = "xyz.openbmc_project.State.Chassis.Transition.On"
+CHASSIS_POWEROFF_STATE = "xyz.openbmc_project.State.Chassis.PowerState.Off"
+CHASSIS_POWERON_STATE = "xyz.openbmc_project.State.Chassis.PowerState.On"
# State Manager URI variables.
-SYSTEM_STATE_URI = OPENBMC_BASE_URI + 'state/'
-BMC_STATE_URI = OPENBMC_BASE_URI + 'state/bmc0/'
-HOST_STATE_URI = OPENBMC_BASE_URI + 'state/host0/'
-CHASSIS_STATE_URI = OPENBMC_BASE_URI + 'state/chassis0/'
-HOST_WATCHDOG_URI = OPENBMC_BASE_URI + 'watchdog/host0/'
+SYSTEM_STATE_URI = OPENBMC_BASE_URI + "state/"
+BMC_STATE_URI = OPENBMC_BASE_URI + "state/bmc0/"
+HOST_STATE_URI = OPENBMC_BASE_URI + "state/host0/"
+CHASSIS_STATE_URI = OPENBMC_BASE_URI + "state/chassis0/"
+HOST_WATCHDOG_URI = OPENBMC_BASE_URI + "watchdog/host0/"
# OS state for x86 architecture
-OS_STATE_URI = OPENBMC_BASE_URI + 'state/os/'
+OS_STATE_URI = OPENBMC_BASE_URI + "state/os/"
# Logging URI variables
-BMC_LOGGING_URI = OPENBMC_BASE_URI + 'logging/'
-BMC_LOGGING_ENTRY = BMC_LOGGING_URI + 'entry/'
-REDFISH_BMC_LOGGING_ENTRY = '/redfish/v1/Systems/system/LogServices/EventLog/Entries/'
+BMC_LOGGING_URI = OPENBMC_BASE_URI + "logging/"
+BMC_LOGGING_ENTRY = BMC_LOGGING_URI + "entry/"
+REDFISH_BMC_LOGGING_ENTRY = (
+ "/redfish/v1/Systems/system/LogServices/EventLog/Entries/"
+)
# Software manager version
-SOFTWARE_VERSION_URI = OPENBMC_BASE_URI + 'software/'
-ACTIVE = 'xyz.openbmc_project.Software.Activation.Activations.Active'
-READY = 'xyz.openbmc_project.Software.Activation.Activations.Ready'
-INVALID = 'xyz.openbmc_project.Software.Activation.Activations.Invalid'
-ACTIVATING = 'xyz.openbmc_project.Software.Activation.Activations.Activating'
-NOTREADY = 'xyz.openbmc_project.Software.Activation.Activations.NotReady'
-FAILED = 'xyz.openbmc_project.Software.Activation.Activations.Failed'
+SOFTWARE_VERSION_URI = OPENBMC_BASE_URI + "software/"
+ACTIVE = "xyz.openbmc_project.Software.Activation.Activations.Active"
+READY = "xyz.openbmc_project.Software.Activation.Activations.Ready"
+INVALID = "xyz.openbmc_project.Software.Activation.Activations.Invalid"
+ACTIVATING = "xyz.openbmc_project.Software.Activation.Activations.Activating"
+NOTREADY = "xyz.openbmc_project.Software.Activation.Activations.NotReady"
+FAILED = "xyz.openbmc_project.Software.Activation.Activations.Failed"
-SOFTWARE_ACTIVATION = 'xyz.openbmc_project.Software.Activation'
-REQUESTED_ACTIVATION = SOFTWARE_ACTIVATION + '.RequestedActivations'
-REQUESTED_ACTIVE = REQUESTED_ACTIVATION + '.Active'
-REQUESTED_NONE = REQUESTED_ACTIVATION + '.None'
+SOFTWARE_ACTIVATION = "xyz.openbmc_project.Software.Activation"
+REQUESTED_ACTIVATION = SOFTWARE_ACTIVATION + ".RequestedActivations"
+REQUESTED_ACTIVE = REQUESTED_ACTIVATION + ".Active"
+REQUESTED_NONE = REQUESTED_ACTIVATION + ".None"
-SOFTWARE_PURPOSE = 'xyz.openbmc_project.Software.Version.VersionPurpose'
-VERSION_PURPOSE_HOST = SOFTWARE_PURPOSE + '.Host'
-VERSION_PURPOSE_BMC = SOFTWARE_PURPOSE + '.BMC'
-VERSION_PURPOSE_SYSTEM = SOFTWARE_PURPOSE + '.System'
+SOFTWARE_PURPOSE = "xyz.openbmc_project.Software.Version.VersionPurpose"
+VERSION_PURPOSE_HOST = SOFTWARE_PURPOSE + ".Host"
+VERSION_PURPOSE_BMC = SOFTWARE_PURPOSE + ".BMC"
+VERSION_PURPOSE_SYSTEM = SOFTWARE_PURPOSE + ".System"
# Image Upload Directory Path
-IMAGE_UPLOAD_DIR_PATH = '/tmp/images/'
+IMAGE_UPLOAD_DIR_PATH = "/tmp/images/"
# Inventory URI variables
-HOST_INVENTORY_URI = OPENBMC_BASE_URI + 'inventory/'
-CHASSIS_INVENTORY_URI = HOST_INVENTORY_URI + 'system/chassis/'
-MOTHERBOARD_INVENTORY_URI = CHASSIS_INVENTORY_URI + 'motherboard/'
+HOST_INVENTORY_URI = OPENBMC_BASE_URI + "inventory/"
+CHASSIS_INVENTORY_URI = HOST_INVENTORY_URI + "system/chassis/"
+MOTHERBOARD_INVENTORY_URI = CHASSIS_INVENTORY_URI + "motherboard/"
# Led URI variable
-LED_GROUPS_URI = OPENBMC_BASE_URI + 'led/groups/'
-LED_PHYSICAL_URI = OPENBMC_BASE_URI + 'led/physical/'
-LED_LAMP_TEST_ASSERTED_URI = LED_GROUPS_URI + 'lamp_test/'
-LED_PHYSICAL_PS0_URI = LED_PHYSICAL_URI + 'cffps1_69/'
-LED_PHYSICAL_PS1_URI = LED_PHYSICAL_URI + 'cffps1_68/'
-LED_PHYSICAL_FAN0_URI = LED_PHYSICAL_URI + 'fan0/'
-LED_PHYSICAL_FAN2_URI = LED_PHYSICAL_URI + 'fan2/'
-LED_PHYSICAL_FAN3_URI = LED_PHYSICAL_URI + 'fan3/'
+LED_GROUPS_URI = OPENBMC_BASE_URI + "led/groups/"
+LED_PHYSICAL_URI = OPENBMC_BASE_URI + "led/physical/"
+LED_LAMP_TEST_ASSERTED_URI = LED_GROUPS_URI + "lamp_test/"
+LED_PHYSICAL_PS0_URI = LED_PHYSICAL_URI + "cffps1_69/"
+LED_PHYSICAL_PS1_URI = LED_PHYSICAL_URI + "cffps1_68/"
+LED_PHYSICAL_FAN0_URI = LED_PHYSICAL_URI + "fan0/"
+LED_PHYSICAL_FAN2_URI = LED_PHYSICAL_URI + "fan2/"
+LED_PHYSICAL_FAN3_URI = LED_PHYSICAL_URI + "fan3/"
# Host control URI variables.
-CONTROL_HOST_URI = OPENBMC_BASE_URI + 'control/host0/'
+CONTROL_HOST_URI = OPENBMC_BASE_URI + "control/host0/"
# Power restore variables.
-POWER_RESTORE_URI = CONTROL_HOST_URI + 'power_restore_policy'
-CONTROL_DBUS_BASE = 'xyz.openbmc_project.Control.'
+POWER_RESTORE_URI = CONTROL_HOST_URI + "power_restore_policy"
+CONTROL_DBUS_BASE = "xyz.openbmc_project.Control."
-RESTORE_LAST_STATE = CONTROL_DBUS_BASE + 'Power.RestorePolicy.Policy.Restore'
-ALWAYS_POWER_ON = CONTROL_DBUS_BASE + 'Power.RestorePolicy.Policy.AlwaysOn'
-ALWAYS_POWER_OFF = CONTROL_DBUS_BASE + 'Power.RestorePolicy.Policy.AlwaysOff'
+RESTORE_LAST_STATE = CONTROL_DBUS_BASE + "Power.RestorePolicy.Policy.Restore"
+ALWAYS_POWER_ON = CONTROL_DBUS_BASE + "Power.RestorePolicy.Policy.AlwaysOn"
+ALWAYS_POWER_OFF = CONTROL_DBUS_BASE + "Power.RestorePolicy.Policy.AlwaysOff"
# Dump URI variables.
-REST_DUMP_URI = OPENBMC_BASE_URI + 'dump/bmc/'
-DUMP_ENTRY_URI = REST_DUMP_URI + 'entry/'
+REST_DUMP_URI = OPENBMC_BASE_URI + "dump/bmc/"
+DUMP_ENTRY_URI = REST_DUMP_URI + "entry/"
DUMP_DOWNLOAD_URI = "/download/dump/"
# The path on the BMC where dumps are stored.
DUMP_DIR_PATH = "/var/lib/phosphor-debug-collector/"
# Boot progress variables.
-STATE_DBUS_BASE = 'xyz.openbmc_project.State.'
-OS_BOOT_START = STATE_DBUS_BASE + 'Boot.Progress.ProgressStages.OSStart'
-OS_BOOT_OFF = STATE_DBUS_BASE + 'Boot.Progress.ProgressStages.Unspecified'
-OS_BOOT_PCI = STATE_DBUS_BASE + 'Boot.Progress.ProgressStages.PCIInit'
-OS_BOOT_SECPCI = STATE_DBUS_BASE + \
- 'Boot.Progress.ProgressStages.SecondaryProcInit'
-OS_BOOT_MEM = STATE_DBUS_BASE + 'Boot.Progress.ProgressStages.MemoryInit'
-OS_BOOT_MOTHERBOARD = STATE_DBUS_BASE + \
- 'Boot.Progress.ProgressStages.MotherboardInit'
+STATE_DBUS_BASE = "xyz.openbmc_project.State."
+OS_BOOT_START = STATE_DBUS_BASE + "Boot.Progress.ProgressStages.OSStart"
+OS_BOOT_OFF = STATE_DBUS_BASE + "Boot.Progress.ProgressStages.Unspecified"
+OS_BOOT_PCI = STATE_DBUS_BASE + "Boot.Progress.ProgressStages.PCIInit"
+OS_BOOT_SECPCI = (
+ STATE_DBUS_BASE + "Boot.Progress.ProgressStages.SecondaryProcInit"
+)
+OS_BOOT_MEM = STATE_DBUS_BASE + "Boot.Progress.ProgressStages.MemoryInit"
+OS_BOOT_MOTHERBOARD = (
+ STATE_DBUS_BASE + "Boot.Progress.ProgressStages.MotherboardInit"
+)
OPENBMC_DBUS_BMC_STATE = STATE_DBUS_BASE + "BMC"
# OperatingSystem status variables.
-OS_BOOT_COMPLETE = STATE_DBUS_BASE + \
- 'OperatingSystem.Status.OSStatus.BootComplete'
-OS_BOOT_CDROM = STATE_DBUS_BASE + 'OperatingSystem.Status.OSStatus.CDROMBoot'
-OS_BOOT_ROM = STATE_DBUS_BASE + 'OperatingSystem.Status.OSStatus.ROMBoot'
-OS_BOOT_PXE = STATE_DBUS_BASE + 'OperatingSystem.Status.OSStatus.PXEBoot'
-OS_BOOT_CBoot = STATE_DBUS_BASE + 'OperatingSystem.Status.OSStatus.CBoot'
-OS_BOOT_DiagBoot = STATE_DBUS_BASE + 'OperatingSystem.Status.OSStatus.DiagBoot'
+OS_BOOT_COMPLETE = (
+ STATE_DBUS_BASE + "OperatingSystem.Status.OSStatus.BootComplete"
+)
+OS_BOOT_CDROM = STATE_DBUS_BASE + "OperatingSystem.Status.OSStatus.CDROMBoot"
+OS_BOOT_ROM = STATE_DBUS_BASE + "OperatingSystem.Status.OSStatus.ROMBoot"
+OS_BOOT_PXE = STATE_DBUS_BASE + "OperatingSystem.Status.OSStatus.PXEBoot"
+OS_BOOT_CBoot = STATE_DBUS_BASE + "OperatingSystem.Status.OSStatus.CBoot"
+OS_BOOT_DiagBoot = STATE_DBUS_BASE + "OperatingSystem.Status.OSStatus.DiagBoot"
# Boot variables.
-BOOT_SOURCE_DEFAULT = 'xyz.openbmc_project.Control.Boot.Source.Sources.Default'
-BOOT_SOURCE_NETWORK = 'xyz.openbmc_project.Control.Boot.Source.Sources.Network'
-BOOT_SOURCE_DISK = 'xyz.openbmc_project.Control.Boot.Source.Sources.Disk'
-BOOT_SOURCE_CDROM = 'xyz.openbmc_project.Control.Boot.Source.Sources.ExternalMedia'
-BOOT_MODE_SAFE = 'xyz.openbmc_project.Control.Boot.Mode.Modes.Safe'
-BOOT_MODE_SETUP = 'xyz.openbmc_project.Control.Boot.Mode.Modes.Setup'
-BOOT_MODE_REGULAR = 'xyz.openbmc_project.Control.Boot.Mode.Modes.Regular'
-BOOT_TYPE_LEGACY = 'xyz.openbmc_project.Control.Boot.Type.Types.Legacy'
-BOOT_TYPE_EFI = 'xyz.openbmc_project.Control.Boot.Type.Types.EFI'
+BOOT_SOURCE_DEFAULT = "xyz.openbmc_project.Control.Boot.Source.Sources.Default"
+BOOT_SOURCE_NETWORK = "xyz.openbmc_project.Control.Boot.Source.Sources.Network"
+BOOT_SOURCE_DISK = "xyz.openbmc_project.Control.Boot.Source.Sources.Disk"
+BOOT_SOURCE_CDROM = (
+ "xyz.openbmc_project.Control.Boot.Source.Sources.ExternalMedia"
+)
+BOOT_MODE_SAFE = "xyz.openbmc_project.Control.Boot.Mode.Modes.Safe"
+BOOT_MODE_SETUP = "xyz.openbmc_project.Control.Boot.Mode.Modes.Setup"
+BOOT_MODE_REGULAR = "xyz.openbmc_project.Control.Boot.Mode.Modes.Regular"
+BOOT_TYPE_LEGACY = "xyz.openbmc_project.Control.Boot.Type.Types.Legacy"
+BOOT_TYPE_EFI = "xyz.openbmc_project.Control.Boot.Type.Types.EFI"
# Time variables.
-TIME_DBUS_BASE = 'xyz.openbmc_project.Time.'
-BMC_OWNER = TIME_DBUS_BASE + 'Owner.Owners.BMC'
-HOST_OWNER = TIME_DBUS_BASE + 'Owner.Owners.Host'
-SPLIT_OWNER = TIME_DBUS_BASE + 'Owner.Owners.Split'
-BOTH_OWNER = TIME_DBUS_BASE + 'Owner.Owners.Both'
-NTP_MODE = TIME_DBUS_BASE + 'Synchronization.Method.NTP'
-MANUAL_MODE = TIME_DBUS_BASE + 'Synchronization.Method.Manual'
+TIME_DBUS_BASE = "xyz.openbmc_project.Time."
+BMC_OWNER = TIME_DBUS_BASE + "Owner.Owners.BMC"
+HOST_OWNER = TIME_DBUS_BASE + "Owner.Owners.Host"
+SPLIT_OWNER = TIME_DBUS_BASE + "Owner.Owners.Split"
+BOTH_OWNER = TIME_DBUS_BASE + "Owner.Owners.Both"
+NTP_MODE = TIME_DBUS_BASE + "Synchronization.Method.NTP"
+MANUAL_MODE = TIME_DBUS_BASE + "Synchronization.Method.Manual"
# User manager variable.
-BMC_USER_URI = OPENBMC_BASE_URI + 'user/'
+BMC_USER_URI = OPENBMC_BASE_URI + "user/"
# LDAP User manager variable.
-BMC_LDAP_URI = BMC_USER_URI + 'ldap'
+BMC_LDAP_URI = BMC_USER_URI + "ldap"
# The path on the BMC where signed keys are stored.
ACTIVATION_DIR_PATH = "/etc/activationdata/"
# Redfish variables.
-REDFISH_BASE_URI = '/redfish/v1/'
-REDFISH_SESSION = REDFISH_BASE_URI + 'SessionService/Sessions'
-REDFISH_SESSION_URI = 'SessionService/Sessions/'
-REDFISH_NW_ETH0 = 'Managers/bmc/EthernetInterfaces/eth0/'
+REDFISH_BASE_URI = "/redfish/v1/"
+REDFISH_SESSION = REDFISH_BASE_URI + "SessionService/Sessions"
+REDFISH_SESSION_URI = "SessionService/Sessions/"
+REDFISH_NW_ETH0 = "Managers/bmc/EthernetInterfaces/eth0/"
REDFISH_NW_ETH0_URI = REDFISH_BASE_URI + REDFISH_NW_ETH0
-REDFISH_NW_ETH_IFACE = REDFISH_BASE_URI + 'Managers/bmc/EthernetInterfaces/'
-REDFISH_NW_PROTOCOL = 'Managers/bmc/NetworkProtocol'
+REDFISH_NW_ETH_IFACE = REDFISH_BASE_URI + "Managers/bmc/EthernetInterfaces/"
+REDFISH_NW_PROTOCOL = "Managers/bmc/NetworkProtocol"
REDFISH_NW_PROTOCOL_URI = REDFISH_BASE_URI + REDFISH_NW_PROTOCOL
-REDFISH_ACCOUNTS_SERVICE = 'AccountService/'
+REDFISH_ACCOUNTS_SERVICE = "AccountService/"
REDFISH_ACCOUNTS_SERVICE_URI = REDFISH_BASE_URI + REDFISH_ACCOUNTS_SERVICE
-REDFISH_ACCOUNTS = 'AccountService/Accounts/'
+REDFISH_ACCOUNTS = "AccountService/Accounts/"
REDFISH_ACCOUNTS_URI = REDFISH_BASE_URI + REDFISH_ACCOUNTS
-REDFISH_HTTPS_CERTIFICATE = 'Managers/bmc/NetworkProtocol/HTTPS/Certificates'
+REDFISH_HTTPS_CERTIFICATE = "Managers/bmc/NetworkProtocol/HTTPS/Certificates"
REDFISH_HTTPS_CERTIFICATE_URI = REDFISH_BASE_URI + REDFISH_HTTPS_CERTIFICATE
-REDFISH_LDAP_CERTIFICATE = 'AccountService/LDAP/Certificates'
+REDFISH_LDAP_CERTIFICATE = "AccountService/LDAP/Certificates"
REDFISH_LDAP_CERTIFICATE_URI = REDFISH_BASE_URI + REDFISH_LDAP_CERTIFICATE
-REDFISH_CA_CERTIFICATE = 'Managers/bmc/Truststore/Certificates'
+REDFISH_CA_CERTIFICATE = "Managers/bmc/Truststore/Certificates"
REDFISH_CA_CERTIFICATE_URI = REDFISH_BASE_URI + REDFISH_CA_CERTIFICATE
-REDFISH_CHASSIS_URI = REDFISH_BASE_URI + 'Chassis/'
-REDFISH_CHASSIS_THERMAL = 'chassis/Thermal/'
+REDFISH_CHASSIS_URI = REDFISH_BASE_URI + "Chassis/"
+REDFISH_CHASSIS_THERMAL = "chassis/Thermal/"
REDFISH_CHASSIS_THERMAL_URI = REDFISH_CHASSIS_URI + REDFISH_CHASSIS_THERMAL
-REDFISH_CHASSIS_POWER = 'chassis/Power/'
+REDFISH_CHASSIS_POWER = "chassis/Power/"
REDFISH_CHASSIS_POWER_URI = REDFISH_CHASSIS_URI + REDFISH_CHASSIS_POWER
-REDFISH_CHASSIS_SENSORS = 'chassis/Sensors'
+REDFISH_CHASSIS_SENSORS = "chassis/Sensors"
REDFISH_CHASSIS_SENSORS_URI = REDFISH_CHASSIS_URI + REDFISH_CHASSIS_SENSORS
-REDFISH_BMC_DUMP = 'Managers/bmc/LogServices/Dump/Entries'
+REDFISH_BMC_DUMP = "Managers/bmc/LogServices/Dump/Entries"
REDFISH_DUMP_URI = REDFISH_BASE_URI + REDFISH_BMC_DUMP
# Boot options and URI variables.
-POWER_ON = 'On'
+POWER_ON = "On"
POWER_GRACEFUL_OFF = "GracefulShutdown"
POWER_GRACEFUL_RESTART = "GracefulRestart"
-POWER_FORCE_OFF = 'ForceOff'
+POWER_FORCE_OFF = "ForceOff"
-REDFISH_POWER = 'Systems/system/Actions/ComputerSystem.Reset'
+REDFISH_POWER = "Systems/system/Actions/ComputerSystem.Reset"
REDFISH_POWER_URI = REDFISH_BASE_URI + REDFISH_POWER
# rsyslog variables.
-REMOTE_LOGGING_URI = OPENBMC_BASE_URI + 'logging/config/remote/'
+REMOTE_LOGGING_URI = OPENBMC_BASE_URI + "logging/config/remote/"
# Certificate variables.
-SERVER_CERTIFICATE_URI = OPENBMC_BASE_URI + 'certs/server/https'
-CLIENT_CERTIFICATE_URI = OPENBMC_BASE_URI + 'certs/client/ldap'
-CA_CERTIFICATE_URI = OPENBMC_BASE_URI + 'certs/authority/ldap'
+SERVER_CERTIFICATE_URI = OPENBMC_BASE_URI + "certs/server/https"
+CLIENT_CERTIFICATE_URI = OPENBMC_BASE_URI + "certs/client/ldap"
+CA_CERTIFICATE_URI = OPENBMC_BASE_URI + "certs/authority/ldap"
# EventLog variables.
-SYSTEM_BASE_URI = REDFISH_BASE_URI + 'Systems/system/'
-EVENT_LOG_URI = SYSTEM_BASE_URI + 'LogServices/EventLog/'
-DUMP_URI = SYSTEM_BASE_URI + 'LogServices/Dump/'
+SYSTEM_BASE_URI = REDFISH_BASE_URI + "Systems/system/"
+EVENT_LOG_URI = SYSTEM_BASE_URI + "LogServices/EventLog/"
+DUMP_URI = SYSTEM_BASE_URI + "LogServices/Dump/"
-BIOS_ATTR_URI = SYSTEM_BASE_URI + 'Bios'
-BIOS_ATTR_SETTINGS_URI = BIOS_ATTR_URI + '/Settings'
+BIOS_ATTR_URI = SYSTEM_BASE_URI + "Bios"
+BIOS_ATTR_SETTINGS_URI = BIOS_ATTR_URI + "/Settings"
-'''
+"""
QEMU HTTPS variable:
By default lib/resource.robot AUTH URI construct is as
${AUTH_URI} https://${OPENBMC_HOST}${AUTH_SUFFIX}
${AUTH_SUFFIX} is populated here by default EMPTY else
the port from the OS environment
-'''
+"""
-AUTH_SUFFIX = ":" + BuiltIn().get_variable_value("${HTTPS_PORT}", os.getenv('HTTPS_PORT', '443'))
+AUTH_SUFFIX = ":" + BuiltIn().get_variable_value(
+ "${HTTPS_PORT}", os.getenv("HTTPS_PORT", "443")
+)
# Here contains a list of valid Properties bases on fru_type after a boot.
INVENTORY_ITEMS = {
@@ -253,7 +263,6 @@
"present",
"version",
],
-
"DIMM": [
"Asset Tag",
"Custom Field 1",
diff --git a/data/vpd_variables.py b/data/vpd_variables.py
index dc310c8..3392b6a 100644
--- a/data/vpd_variables.py
+++ b/data/vpd_variables.py
@@ -25,5 +25,5 @@
},
"/system/chassis/motherboard/vdd_vrm1": {
"type": "xyz.openbmc_project.Inventory.Item.Vrm"
- }
+ },
}
diff --git a/extended/run_keyword.py b/extended/run_keyword.py
index 2d1ab51..dcab7ab 100644
--- a/extended/run_keyword.py
+++ b/extended/run_keyword.py
@@ -4,13 +4,13 @@
This module is the python counterpart to run_keyword.robot.
"""
-import gen_print as gp
-import gen_robot_valid as grv
-import gen_robot_utils as gru
-
-from robot.libraries.BuiltIn import BuiltIn
import re
+import gen_print as gp
+import gen_robot_utils as gru
+import gen_robot_valid as grv
+from robot.libraries.BuiltIn import BuiltIn
+
def setup():
r"""
@@ -42,10 +42,7 @@
gp.qprint_pgm_footer()
-def my_run_keywords(lib_file_path,
- keyword_string,
- quiet=0,
- test_mode=0):
+def my_run_keywords(lib_file_path, keyword_string, quiet=0, test_mode=0):
r"""
Run the keywords in the keyword string.
@@ -92,10 +89,10 @@
del lib_file_path_list[0]
for lib_file_path in lib_file_path_list:
if lib_file_path.endswith(".py"):
- gp.dprint_issuing("import_library(\"" + lib_file_path + "\")")
+ gp.dprint_issuing('import_library("' + lib_file_path + '")')
BuiltIn().import_library(lib_file_path)
else:
- gp.dprint_issuing("my_import_resource(\"" + lib_file_path + "\")")
+ gp.dprint_issuing('my_import_resource("' + lib_file_path + '")')
gru.my_import_resource(lib_file_path)
# The user can pass multiple keyword strings by separating them with " ; ".
diff --git a/ffdc/collect_ffdc.py b/ffdc/collect_ffdc.py
index d709a96..ef96c6b 100644
--- a/ffdc/collect_ffdc.py
+++ b/ffdc/collect_ffdc.py
@@ -6,12 +6,13 @@
import os
import sys
+
import click
# ---------Set sys.path for cli command execution---------------------------------------
# Absolute path to openbmc-test-automation/ffdc
abs_path = os.path.abspath(os.path.dirname(sys.argv[0]))
-full_path = abs_path.split('ffdc')[0]
+full_path = abs_path.split("ffdc")[0]
sys.path.append(full_path)
# Walk path and append to sys.path
for root, dirs, files in os.walk(full_path):
@@ -21,63 +22,105 @@
from ffdc_collector import ffdc_collector # NOQA
-@click.command(context_settings=dict(help_option_names=['-h', '--help']))
-@click.option('-r', '--remote',
- help="Hostname/IP of the remote host")
-@click.option('-u', '--username',
- help="Username of the remote host.")
-@click.option('-p', '--password',
- help="Password of the remote host.")
-@click.option('-c', '--config', default=abs_path + "/ffdc_config.yaml",
- show_default=True, help="YAML Configuration file for log collection.")
-@click.option('-l', '--location', default="/tmp",
- show_default=True, help="Location to save logs")
-@click.option('-t', '--type',
- help="OS type of the remote (targeting) host. OPENBMC, RHEL, UBUNTU, SLES, AIX")
-@click.option('-rp', '--protocol', default="ALL",
- show_default=True,
- help="Select protocol to communicate with remote host.")
-@click.option('-e', '--env_vars', show_default=True,
- help="Environment variables e.g: {'var':value}")
-@click.option('-ec', '--econfig', show_default=True,
- help="Predefine environment variables, refer en_vars_template.yaml ")
-@click.option('--log_level', default="INFO",
- show_default=True,
- help="Log level (CRITICAL, ERROR, WARNING, INFO, DEBUG)")
-def cli_ffdc(remote,
- username,
- password,
- config,
- location,
- type,
- protocol,
- env_vars,
- econfig,
- log_level):
+@click.command(context_settings=dict(help_option_names=["-h", "--help"]))
+@click.option("-r", "--remote", help="Hostname/IP of the remote host")
+@click.option("-u", "--username", help="Username of the remote host.")
+@click.option("-p", "--password", help="Password of the remote host.")
+@click.option(
+ "-c",
+ "--config",
+ default=abs_path + "/ffdc_config.yaml",
+ show_default=True,
+ help="YAML Configuration file for log collection.",
+)
+@click.option(
+ "-l",
+ "--location",
+ default="/tmp",
+ show_default=True,
+ help="Location to save logs",
+)
+@click.option(
+ "-t",
+ "--type",
+ help=(
+ "OS type of the remote (targeting) host. OPENBMC, RHEL, UBUNTU,"
+ " SLES, AIX"
+ ),
+)
+@click.option(
+ "-rp",
+ "--protocol",
+ default="ALL",
+ show_default=True,
+ help="Select protocol to communicate with remote host.",
+)
+@click.option(
+ "-e",
+ "--env_vars",
+ show_default=True,
+ help="Environment variables e.g: {'var':value}",
+)
+@click.option(
+ "-ec",
+ "--econfig",
+ show_default=True,
+ help="Predefine environment variables, refer en_vars_template.yaml ",
+)
+@click.option(
+ "--log_level",
+ default="INFO",
+ show_default=True,
+ help="Log level (CRITICAL, ERROR, WARNING, INFO, DEBUG)",
+)
+def cli_ffdc(
+ remote,
+ username,
+ password,
+ config,
+ location,
+ type,
+ protocol,
+ env_vars,
+ econfig,
+ log_level,
+):
r"""
Stand alone CLI to generate and collect FFDC from the selected target.
"""
- click.echo("\n********** FFDC (First Failure Data Collection) Starts **********")
+ click.echo(
+ "\n********** FFDC (First Failure Data Collection) Starts **********"
+ )
if input_options_ok(remote, username, password, config, type):
- this_ffdc = ffdc_collector(remote,
- username,
- password,
- config,
- location,
- type,
- protocol,
- env_vars,
- econfig,
- log_level)
+ this_ffdc = ffdc_collector(
+ remote,
+ username,
+ password,
+ config,
+ location,
+ type,
+ protocol,
+ env_vars,
+ econfig,
+ log_level,
+ )
this_ffdc.collect_ffdc()
if len(os.listdir(this_ffdc.ffdc_dir_path)) == 0:
- click.echo("\n\tFFDC Collection from " + remote + " has failed.\n\n")
+ click.echo(
+ "\n\tFFDC Collection from " + remote + " has failed.\n\n"
+ )
else:
- click.echo(str("\n\t" + str(len(os.listdir(this_ffdc.ffdc_dir_path)))
- + " files were retrieved from " + remote))
+ click.echo(
+ str(
+ "\n\t"
+ + str(len(os.listdir(this_ffdc.ffdc_dir_path)))
+ + " files were retrieved from "
+ + remote
+ )
+ )
click.echo("\tFiles are stored in " + this_ffdc.ffdc_dir_path)
click.echo("\tTotal elapsed time " + this_ffdc.elapsed_time + "\n\n")
@@ -93,27 +136,37 @@
if not remote:
all_options_ok = False
- print("\
- \n\tERROR: Name/IP of the remote host is not specified in CLI options.")
+ print(
+ " \n\tERROR: Name/IP of the remote host is not specified in"
+ " CLI options."
+ )
if not username:
all_options_ok = False
- print("\
- \n\tERROR: User of the remote host is not specified in CLI options.")
+ print(
+ " \n\tERROR: User of the remote host is not specified in"
+ " CLI options."
+ )
if not password:
all_options_ok = False
- print("\
- \n\tERROR: Password of the user remote host is not specified in CLI options.")
+ print(
+ " \n\tERROR: Password of the user remote host is not"
+ " specified in CLI options."
+ )
if not type:
all_options_ok = False
- print("\
- \n\tERROR: Remote host os type is not specified in CLI options.")
+ print(
+ " \n\tERROR: Remote host os type is not specified in CLI"
+ " options."
+ )
if not os.path.isfile(config):
all_options_ok = False
- print("\
- \n\tERROR: Config file %s is not found. Please verify path and filename." % config)
+ print(
+ " \n\tERROR: Config file %s is not found. Please verify"
+ " path and filename." % config
+ )
return all_options_ok
-if __name__ == '__main__':
+if __name__ == "__main__":
cli_ffdc()
diff --git a/ffdc/ffdc_collector.py b/ffdc/ffdc_collector.py
index 91fe261..b21044c 100644
--- a/ffdc/ffdc_collector.py
+++ b/ffdc/ffdc_collector.py
@@ -4,18 +4,17 @@
See class prolog below for details.
"""
+import json
+import logging
+import os
+import platform
+import re
+import subprocess
+import sys
+import time
from errno import EACCES, EPERM
-import os
-import re
-import sys
import yaml
-import json
-import time
-import logging
-import platform
-from errno import EACCES, EPERM
-import subprocess
script_dir = os.path.dirname(os.path.abspath(__file__))
sys.path.append(script_dir)
@@ -24,8 +23,8 @@
for dir in dirs:
sys.path.append(os.path.join(root, dir))
-from ssh_utility import SSHRemoteclient # NOQA
-from telnet_utility import TelnetRemoteclient # NOQA
+from ssh_utility import SSHRemoteclient # NOQA
+from telnet_utility import TelnetRemoteclient # NOQA
r"""
User define plugins python functions.
@@ -43,11 +42,11 @@
- arg1
- arg2
"""
-plugin_dir = __file__.split(__file__.split("/")[-1])[0] + '/plugins'
+plugin_dir = __file__.split(__file__.split("/")[-1])[0] + "/plugins"
sys.path.append(plugin_dir)
try:
for module in os.listdir(plugin_dir):
- if module == '__init__.py' or module[-3:] != '.py':
+ if module == "__init__.py" or module[-3:] != ".py":
continue
plugin_module = "plugins." + module[:-3]
# To access the module plugin.<module name>.<function>
@@ -99,34 +98,35 @@
global_plugin_type_list = []
# Path where logs are to be stored or written.
-global_log_store_path = ''
+global_log_store_path = ""
# Plugin error state defaults.
plugin_error_dict = {
- 'exit_on_error': False,
- 'continue_on_error': False,
+ "exit_on_error": False,
+ "continue_on_error": False,
}
class ffdc_collector:
-
r"""
Execute commands from configuration file to collect log files.
Fetch and store generated files at the specified location.
"""
- def __init__(self,
- hostname,
- username,
- password,
- ffdc_config,
- location,
- remote_type,
- remote_protocol,
- env_vars,
- econfig,
- log_level):
+ def __init__(
+ self,
+ hostname,
+ username,
+ password,
+ ffdc_config,
+ location,
+ remote_type,
+ remote_protocol,
+ env_vars,
+ econfig,
+ log_level,
+ ):
r"""
Description of argument(s):
@@ -156,7 +156,7 @@
self.env_vars = env_vars
self.econfig = econfig
self.start_time = 0
- self.elapsed_time = ''
+ self.elapsed_time = ""
self.logger = None
# Set prefix values for scp files and directory.
@@ -174,7 +174,7 @@
if self.verify_script_env():
# Load default or user define YAML configuration file.
- with open(self.ffdc_config, 'r') as file:
+ with open(self.ffdc_config, "r") as file:
try:
self.ffdc_actions = yaml.load(file, Loader=yaml.SafeLoader)
except yaml.YAMLError as e:
@@ -183,7 +183,9 @@
if self.target_type not in self.ffdc_actions.keys():
self.logger.error(
- "\n\tERROR: %s is not listed in %s.\n\n" % (self.target_type, self.ffdc_config))
+ "\n\tERROR: %s is not listed in %s.\n\n"
+ % (self.target_type, self.ffdc_config)
+ )
sys.exit(-1)
else:
sys.exit(-1)
@@ -194,43 +196,62 @@
self.load_env()
def verify_script_env(self):
-
# Import to log version
import click
import paramiko
run_env_ok = True
- redfishtool_version = self.run_tool_cmd('redfishtool -V').split(' ')[2].strip('\n')
- ipmitool_version = self.run_tool_cmd('ipmitool -V').split(' ')[2]
+ redfishtool_version = (
+ self.run_tool_cmd("redfishtool -V").split(" ")[2].strip("\n")
+ )
+ ipmitool_version = self.run_tool_cmd("ipmitool -V").split(" ")[2]
self.logger.info("\n\t---- Script host environment ----")
- self.logger.info("\t{:<10} {:<10}".format('Script hostname', os.uname()[1]))
- self.logger.info("\t{:<10} {:<10}".format('Script host os', platform.platform()))
- self.logger.info("\t{:<10} {:>10}".format('Python', platform.python_version()))
- self.logger.info("\t{:<10} {:>10}".format('PyYAML', yaml.__version__))
- self.logger.info("\t{:<10} {:>10}".format('click', click.__version__))
- self.logger.info("\t{:<10} {:>10}".format('paramiko', paramiko.__version__))
- self.logger.info("\t{:<10} {:>9}".format('redfishtool', redfishtool_version))
- self.logger.info("\t{:<10} {:>12}".format('ipmitool', ipmitool_version))
+ self.logger.info(
+ "\t{:<10} {:<10}".format("Script hostname", os.uname()[1])
+ )
+ self.logger.info(
+ "\t{:<10} {:<10}".format("Script host os", platform.platform())
+ )
+ self.logger.info(
+ "\t{:<10} {:>10}".format("Python", platform.python_version())
+ )
+ self.logger.info("\t{:<10} {:>10}".format("PyYAML", yaml.__version__))
+ self.logger.info("\t{:<10} {:>10}".format("click", click.__version__))
+ self.logger.info(
+ "\t{:<10} {:>10}".format("paramiko", paramiko.__version__)
+ )
+ self.logger.info(
+ "\t{:<10} {:>9}".format("redfishtool", redfishtool_version)
+ )
+ self.logger.info(
+ "\t{:<10} {:>12}".format("ipmitool", ipmitool_version)
+ )
- if eval(yaml.__version__.replace('.', ',')) < (5, 3, 0):
- self.logger.error("\n\tERROR: Python or python packages do not meet minimum version requirement.")
- self.logger.error("\tERROR: PyYAML version 5.3.0 or higher is needed.\n")
+ if eval(yaml.__version__.replace(".", ",")) < (5, 3, 0):
+ self.logger.error(
+ "\n\tERROR: Python or python packages do not meet minimum"
+ " version requirement."
+ )
+ self.logger.error(
+ "\tERROR: PyYAML version 5.3.0 or higher is needed.\n"
+ )
run_env_ok = False
self.logger.info("\t---- End script host environment ----")
return run_env_ok
- def script_logging(self,
- log_level_attr):
+ def script_logging(self, log_level_attr):
r"""
Create logger
"""
self.logger = logging.getLogger()
self.logger.setLevel(log_level_attr)
- log_file_handler = logging.FileHandler(self.ffdc_dir_path + "collector.log")
+ log_file_handler = logging.FileHandler(
+ self.ffdc_dir_path + "collector.log"
+ )
stdout_handler = logging.StreamHandler(sys.stdout)
self.logger.addHandler(log_file_handler)
@@ -246,11 +267,15 @@
"""
response = os.system("ping -c 1 %s 2>&1 >/dev/null" % self.hostname)
if response == 0:
- self.logger.info("\n\t[Check] %s is ping-able.\t\t [OK]" % self.hostname)
+ self.logger.info(
+ "\n\t[Check] %s is ping-able.\t\t [OK]" % self.hostname
+ )
return True
else:
self.logger.error(
- "\n\tERROR: %s is not ping-able. FFDC collection aborted.\n" % self.hostname)
+ "\n\tERROR: %s is not ping-able. FFDC collection aborted.\n"
+ % self.hostname
+ )
sys.exit(-1)
def collect_ffdc(self):
@@ -259,7 +284,9 @@
"""
- self.logger.info("\n\t---- Start communicating with %s ----" % self.hostname)
+ self.logger.info(
+ "\n\t---- Start communicating with %s ----" % self.hostname
+ )
self.start_time = time.time()
# Find the list of target and protocol supported.
@@ -271,24 +298,40 @@
continue
for k, v in config_dict[target_type].items():
- if config_dict[target_type][k]['PROTOCOL'][0] not in check_protocol_list:
- check_protocol_list.append(config_dict[target_type][k]['PROTOCOL'][0])
+ if (
+ config_dict[target_type][k]["PROTOCOL"][0]
+ not in check_protocol_list
+ ):
+ check_protocol_list.append(
+ config_dict[target_type][k]["PROTOCOL"][0]
+ )
- self.logger.info("\n\t %s protocol type: %s" % (self.target_type, check_protocol_list))
+ self.logger.info(
+ "\n\t %s protocol type: %s"
+ % (self.target_type, check_protocol_list)
+ )
verified_working_protocol = self.verify_protocol(check_protocol_list)
if verified_working_protocol:
- self.logger.info("\n\t---- Completed protocol pre-requisite check ----\n")
+ self.logger.info(
+ "\n\t---- Completed protocol pre-requisite check ----\n"
+ )
# Verify top level directory exists for storage
self.validate_local_store(self.location)
- if ((self.remote_protocol not in verified_working_protocol) and (self.remote_protocol != 'ALL')):
- self.logger.info("\n\tWorking protocol list: %s" % verified_working_protocol)
+ if (self.remote_protocol not in verified_working_protocol) and (
+ self.remote_protocol != "ALL"
+ ):
+ self.logger.info(
+ "\n\tWorking protocol list: %s" % verified_working_protocol
+ )
self.logger.error(
- '\tERROR: Requested protocol %s is not in working protocol list.\n'
- % self.remote_protocol)
+ "\tERROR: Requested protocol %s is not in working protocol"
+ " list.\n"
+ % self.remote_protocol
+ )
sys.exit(-1)
else:
self.generate_ffdc(verified_working_protocol)
@@ -299,12 +342,15 @@
"""
- self.ssh_remoteclient = SSHRemoteclient(self.hostname,
- self.username,
- self.password)
+ self.ssh_remoteclient = SSHRemoteclient(
+ self.hostname, self.username, self.password
+ )
if self.ssh_remoteclient.ssh_remoteclient_login():
- self.logger.info("\n\t[Check] %s SSH connection established.\t [OK]" % self.hostname)
+ self.logger.info(
+ "\n\t[Check] %s SSH connection established.\t [OK]"
+ % self.hostname
+ )
# Check scp connection.
# If scp connection fails,
@@ -312,21 +358,30 @@
self.ssh_remoteclient.scp_connection()
return True
else:
- self.logger.info("\n\t[Check] %s SSH connection.\t [NOT AVAILABLE]" % self.hostname)
+ self.logger.info(
+ "\n\t[Check] %s SSH connection.\t [NOT AVAILABLE]"
+ % self.hostname
+ )
return False
def telnet_to_target_system(self):
r"""
Open a telnet connection to targeted system.
"""
- self.telnet_remoteclient = TelnetRemoteclient(self.hostname,
- self.username,
- self.password)
+ self.telnet_remoteclient = TelnetRemoteclient(
+ self.hostname, self.username, self.password
+ )
if self.telnet_remoteclient.tn_remoteclient_login():
- self.logger.info("\n\t[Check] %s Telnet connection established.\t [OK]" % self.hostname)
+ self.logger.info(
+ "\n\t[Check] %s Telnet connection established.\t [OK]"
+ % self.hostname
+ )
return True
else:
- self.logger.info("\n\t[Check] %s Telnet connection.\t [NOT AVAILABLE]" % self.hostname)
+ self.logger.info(
+ "\n\t[Check] %s Telnet connection.\t [NOT AVAILABLE]"
+ % self.hostname
+ )
return False
def generate_ffdc(self, working_protocol_list):
@@ -337,8 +392,12 @@
working_protocol_list list of confirmed working protocols to connect to remote host.
"""
- self.logger.info("\n\t---- Executing commands on " + self.hostname + " ----")
- self.logger.info("\n\tWorking protocol list: %s" % working_protocol_list)
+ self.logger.info(
+ "\n\t---- Executing commands on " + self.hostname + " ----"
+ )
+ self.logger.info(
+ "\n\tWorking protocol list: %s" % working_protocol_list
+ )
config_dict = self.ffdc_actions
for target_type in config_dict.keys():
@@ -346,40 +405,47 @@
continue
self.logger.info("\n\tFFDC Path: %s " % self.ffdc_dir_path)
- global_plugin_dict['global_log_store_path'] = self.ffdc_dir_path
+ global_plugin_dict["global_log_store_path"] = self.ffdc_dir_path
self.logger.info("\tSystem Type: %s" % target_type)
for k, v in config_dict[target_type].items():
-
- if self.remote_protocol not in working_protocol_list \
- and self.remote_protocol != 'ALL':
+ if (
+ self.remote_protocol not in working_protocol_list
+ and self.remote_protocol != "ALL"
+ ):
continue
- protocol = config_dict[target_type][k]['PROTOCOL'][0]
+ protocol = config_dict[target_type][k]["PROTOCOL"][0]
if protocol not in working_protocol_list:
continue
if protocol in working_protocol_list:
- if protocol == 'SSH' or protocol == 'SCP':
+ if protocol == "SSH" or protocol == "SCP":
self.protocol_ssh(protocol, target_type, k)
- elif protocol == 'TELNET':
+ elif protocol == "TELNET":
self.protocol_telnet(target_type, k)
- elif protocol == 'REDFISH' or protocol == 'IPMI' or protocol == 'SHELL':
+ elif (
+ protocol == "REDFISH"
+ or protocol == "IPMI"
+ or protocol == "SHELL"
+ ):
self.protocol_execute(protocol, target_type, k)
else:
- self.logger.error("\n\tERROR: %s is not available for %s." % (protocol, self.hostname))
+ self.logger.error(
+ "\n\tERROR: %s is not available for %s."
+ % (protocol, self.hostname)
+ )
# Close network connection after collecting all files
- self.elapsed_time = time.strftime("%H:%M:%S", time.gmtime(time.time() - self.start_time))
+ self.elapsed_time = time.strftime(
+ "%H:%M:%S", time.gmtime(time.time() - self.start_time)
+ )
if self.ssh_remoteclient:
self.ssh_remoteclient.ssh_remoteclient_disconnect()
if self.telnet_remoteclient:
self.telnet_remoteclient.tn_remoteclient_disconnect()
- def protocol_ssh(self,
- protocol,
- target_type,
- sub_type):
+ def protocol_ssh(self, protocol, target_type, sub_type):
r"""
Perform actions using SSH and SCP protocols.
@@ -389,39 +455,50 @@
sub_type Group type of commands.
"""
- if protocol == 'SCP':
+ if protocol == "SCP":
self.group_copy(self.ffdc_actions[target_type][sub_type])
else:
- self.collect_and_copy_ffdc(self.ffdc_actions[target_type][sub_type])
+ self.collect_and_copy_ffdc(
+ self.ffdc_actions[target_type][sub_type]
+ )
- def protocol_telnet(self,
- target_type,
- sub_type):
+ def protocol_telnet(self, target_type, sub_type):
r"""
Perform actions using telnet protocol.
Description of argument(s):
target_type OS Type of remote host.
"""
- self.logger.info("\n\t[Run] Executing commands on %s using %s" % (self.hostname, 'TELNET'))
+ self.logger.info(
+ "\n\t[Run] Executing commands on %s using %s"
+ % (self.hostname, "TELNET")
+ )
telnet_files_saved = []
progress_counter = 0
- list_of_commands = self.ffdc_actions[target_type][sub_type]['COMMANDS']
+ list_of_commands = self.ffdc_actions[target_type][sub_type]["COMMANDS"]
for index, each_cmd in enumerate(list_of_commands, start=0):
command_txt, command_timeout = self.unpack_command(each_cmd)
- result = self.telnet_remoteclient.execute_command(command_txt, command_timeout)
+ result = self.telnet_remoteclient.execute_command(
+ command_txt, command_timeout
+ )
if result:
try:
- targ_file = self.ffdc_actions[target_type][sub_type]['FILES'][index]
+ targ_file = self.ffdc_actions[target_type][sub_type][
+ "FILES"
+ ][index]
except IndexError:
targ_file = command_txt
self.logger.warning(
- "\n\t[WARN] Missing filename to store data from telnet %s." % each_cmd)
- self.logger.warning("\t[WARN] Data will be stored in %s." % targ_file)
- targ_file_with_path = (self.ffdc_dir_path
- + self.ffdc_prefix
- + targ_file)
+ "\n\t[WARN] Missing filename to store data from"
+ " telnet %s." % each_cmd
+ )
+ self.logger.warning(
+ "\t[WARN] Data will be stored in %s." % targ_file
+ )
+ targ_file_with_path = (
+ self.ffdc_dir_path + self.ffdc_prefix + targ_file
+ )
# Creates a new file
- with open(targ_file_with_path, 'w') as fp:
+ with open(targ_file_with_path, "w") as fp:
fp.write(result)
fp.close
telnet_files_saved.append(targ_file)
@@ -431,10 +508,7 @@
for file in telnet_files_saved:
self.logger.info("\n\t\tSuccessfully save file " + file + ".")
- def protocol_execute(self,
- protocol,
- target_type,
- sub_type):
+ def protocol_execute(self, protocol, target_type, sub_type):
r"""
Perform actions for a given protocol.
@@ -444,27 +518,36 @@
sub_type Group type of commands.
"""
- self.logger.info("\n\t[Run] Executing commands to %s using %s" % (self.hostname, protocol))
+ self.logger.info(
+ "\n\t[Run] Executing commands to %s using %s"
+ % (self.hostname, protocol)
+ )
executed_files_saved = []
progress_counter = 0
- list_of_cmd = self.get_command_list(self.ffdc_actions[target_type][sub_type])
+ list_of_cmd = self.get_command_list(
+ self.ffdc_actions[target_type][sub_type]
+ )
for index, each_cmd in enumerate(list_of_cmd, start=0):
plugin_call = False
if isinstance(each_cmd, dict):
- if 'plugin' in each_cmd:
+ if "plugin" in each_cmd:
# If the error is set and plugin explicitly
# requested to skip execution on error..
- if plugin_error_dict['exit_on_error'] and \
- self.plugin_error_check(each_cmd['plugin']):
- self.logger.info("\n\t[PLUGIN-ERROR] exit_on_error: %s" %
- plugin_error_dict['exit_on_error'])
- self.logger.info("\t[PLUGIN-SKIP] %s" %
- each_cmd['plugin'][0])
+ if plugin_error_dict[
+ "exit_on_error"
+ ] and self.plugin_error_check(each_cmd["plugin"]):
+ self.logger.info(
+ "\n\t[PLUGIN-ERROR] exit_on_error: %s"
+ % plugin_error_dict["exit_on_error"]
+ )
+ self.logger.info(
+ "\t[PLUGIN-SKIP] %s" % each_cmd["plugin"][0]
+ )
continue
plugin_call = True
# call the plugin
self.logger.info("\n\t[PLUGIN-START]")
- result = self.execute_plugin_block(each_cmd['plugin'])
+ result = self.execute_plugin_block(each_cmd["plugin"])
self.logger.info("\t[PLUGIN-END]\n")
else:
each_cmd = self.yaml_env_and_plugin_vars_populate(each_cmd)
@@ -473,23 +556,31 @@
result = self.run_tool_cmd(each_cmd)
if result:
try:
- file_name = self.get_file_list(self.ffdc_actions[target_type][sub_type])[index]
+ file_name = self.get_file_list(
+ self.ffdc_actions[target_type][sub_type]
+ )[index]
# If file is specified as None.
if file_name == "None":
continue
- targ_file = self.yaml_env_and_plugin_vars_populate(file_name)
+ targ_file = self.yaml_env_and_plugin_vars_populate(
+ file_name
+ )
except IndexError:
- targ_file = each_cmd.split('/')[-1]
+ targ_file = each_cmd.split("/")[-1]
self.logger.warning(
- "\n\t[WARN] Missing filename to store data from %s." % each_cmd)
- self.logger.warning("\t[WARN] Data will be stored in %s." % targ_file)
+ "\n\t[WARN] Missing filename to store data from %s."
+ % each_cmd
+ )
+ self.logger.warning(
+ "\t[WARN] Data will be stored in %s." % targ_file
+ )
- targ_file_with_path = (self.ffdc_dir_path
- + self.ffdc_prefix
- + targ_file)
+ targ_file_with_path = (
+ self.ffdc_dir_path + self.ffdc_prefix + targ_file
+ )
# Creates a new file
- with open(targ_file_with_path, 'w') as fp:
+ with open(targ_file_with_path, "w") as fp:
if isinstance(result, dict):
fp.write(json.dumps(result))
else:
@@ -505,9 +596,9 @@
for file in executed_files_saved:
self.logger.info("\n\t\tSuccessfully save file " + file + ".")
- def collect_and_copy_ffdc(self,
- ffdc_actions_for_target_type,
- form_filename=False):
+ def collect_and_copy_ffdc(
+ self, ffdc_actions_for_target_type, form_filename=False
+ ):
r"""
Send commands in ffdc_config file to targeted system.
@@ -517,21 +608,32 @@
"""
# Executing commands, if any
- self.ssh_execute_ffdc_commands(ffdc_actions_for_target_type,
- form_filename)
+ self.ssh_execute_ffdc_commands(
+ ffdc_actions_for_target_type, form_filename
+ )
# Copying files
if self.ssh_remoteclient.scpclient:
- self.logger.info("\n\n\tCopying FFDC files from remote system %s.\n" % self.hostname)
+ self.logger.info(
+ "\n\n\tCopying FFDC files from remote system %s.\n"
+ % self.hostname
+ )
# Retrieving files from target system
list_of_files = self.get_file_list(ffdc_actions_for_target_type)
- self.scp_ffdc(self.ffdc_dir_path, self.ffdc_prefix, form_filename, list_of_files)
+ self.scp_ffdc(
+ self.ffdc_dir_path,
+ self.ffdc_prefix,
+ form_filename,
+ list_of_files,
+ )
else:
- self.logger.info("\n\n\tSkip copying FFDC files from remote system %s.\n" % self.hostname)
+ self.logger.info(
+ "\n\n\tSkip copying FFDC files from remote system %s.\n"
+ % self.hostname
+ )
- def get_command_list(self,
- ffdc_actions_for_target_type):
+ def get_command_list(self, ffdc_actions_for_target_type):
r"""
Fetch list of commands from configuration file
@@ -539,13 +641,12 @@
ffdc_actions_for_target_type commands and files for the selected remote host type.
"""
try:
- list_of_commands = ffdc_actions_for_target_type['COMMANDS']
+ list_of_commands = ffdc_actions_for_target_type["COMMANDS"]
except KeyError:
list_of_commands = []
return list_of_commands
- def get_file_list(self,
- ffdc_actions_for_target_type):
+ def get_file_list(self, ffdc_actions_for_target_type):
r"""
Fetch list of commands from configuration file
@@ -553,13 +654,12 @@
ffdc_actions_for_target_type commands and files for the selected remote host type.
"""
try:
- list_of_files = ffdc_actions_for_target_type['FILES']
+ list_of_files = ffdc_actions_for_target_type["FILES"]
except KeyError:
list_of_files = []
return list_of_files
- def unpack_command(self,
- command):
+ def unpack_command(self, command):
r"""
Unpack command from config file
@@ -576,9 +676,9 @@
return command_txt, command_timeout
- def ssh_execute_ffdc_commands(self,
- ffdc_actions_for_target_type,
- form_filename=False):
+ def ssh_execute_ffdc_commands(
+ self, ffdc_actions_for_target_type, form_filename=False
+ ):
r"""
Send commands in ffdc_config file to targeted system.
@@ -586,8 +686,10 @@
ffdc_actions_for_target_type commands and files for the selected remote host type.
form_filename if true, pre-pend self.target_type to filename
"""
- self.logger.info("\n\t[Run] Executing commands on %s using %s"
- % (self.hostname, ffdc_actions_for_target_type['PROTOCOL'][0]))
+ self.logger.info(
+ "\n\t[Run] Executing commands on %s using %s"
+ % (self.hostname, ffdc_actions_for_target_type["PROTOCOL"][0])
+ )
list_of_commands = self.get_command_list(ffdc_actions_for_target_type)
# If command list is empty, returns
@@ -601,12 +703,19 @@
if form_filename:
command_txt = str(command_txt % self.target_type)
- cmd_exit_code, err, response = \
- self.ssh_remoteclient.execute_command(command_txt, command_timeout)
+ (
+ cmd_exit_code,
+ err,
+ response,
+ ) = self.ssh_remoteclient.execute_command(
+ command_txt, command_timeout
+ )
if cmd_exit_code:
self.logger.warning(
- "\n\t\t[WARN] %s exits with code %s." % (command_txt, str(cmd_exit_code)))
+ "\n\t\t[WARN] %s exits with code %s."
+ % (command_txt, str(cmd_exit_code))
+ )
self.logger.warning("\t\t[WARN] %s " % err)
progress_counter += 1
@@ -614,8 +723,7 @@
self.logger.info("\n\t[Run] Commands execution completed.\t\t [OK]")
- def group_copy(self,
- ffdc_actions_for_target_type):
+ def group_copy(self, ffdc_actions_for_target_type):
r"""
scp group of files (wild card) from remote host.
@@ -624,9 +732,14 @@
"""
if self.ssh_remoteclient.scpclient:
- self.logger.info("\n\tCopying files from remote system %s via SCP.\n" % self.hostname)
+ self.logger.info(
+ "\n\tCopying files from remote system %s via SCP.\n"
+ % self.hostname
+ )
- list_of_commands = self.get_command_list(ffdc_actions_for_target_type)
+ list_of_commands = self.get_command_list(
+ ffdc_actions_for_target_type
+ )
# If command list is empty, returns
if not list_of_commands:
return
@@ -638,29 +751,42 @@
self.logger.error("\t\tInvalid command %s" % command)
continue
- cmd_exit_code, err, response = \
- self.ssh_remoteclient.execute_command(command)
+ (
+ cmd_exit_code,
+ err,
+ response,
+ ) = self.ssh_remoteclient.execute_command(command)
# If file does not exist, code take no action.
# cmd_exit_code is ignored for this scenario.
if response:
- scp_result = \
- self.ssh_remoteclient.scp_file_from_remote(response.split('\n'),
- self.ffdc_dir_path)
+ scp_result = self.ssh_remoteclient.scp_file_from_remote(
+ response.split("\n"), self.ffdc_dir_path
+ )
if scp_result:
- self.logger.info("\t\tSuccessfully copied from " + self.hostname + ':' + command)
+ self.logger.info(
+ "\t\tSuccessfully copied from "
+ + self.hostname
+ + ":"
+ + command
+ )
else:
self.logger.info("\t\t%s has no result" % command)
else:
- self.logger.info("\n\n\tSkip copying files from remote system %s.\n" % self.hostname)
+ self.logger.info(
+ "\n\n\tSkip copying files from remote system %s.\n"
+ % self.hostname
+ )
- def scp_ffdc(self,
- targ_dir_path,
- targ_file_prefix,
- form_filename,
- file_list=None,
- quiet=None):
+ def scp_ffdc(
+ self,
+ targ_dir_path,
+ targ_file_prefix,
+ form_filename,
+ file_list=None,
+ quiet=None,
+ ):
r"""
SCP all files in file_dict to the indicated directory on the local system.
@@ -677,21 +803,37 @@
if form_filename:
filename = str(filename % self.target_type)
source_file_path = filename
- targ_file_path = targ_dir_path + targ_file_prefix + filename.split('/')[-1]
+ targ_file_path = (
+ targ_dir_path + targ_file_prefix + filename.split("/")[-1]
+ )
# If source file name contains wild card, copy filename as is.
- if '*' in source_file_path:
- scp_result = self.ssh_remoteclient.scp_file_from_remote(source_file_path, self.ffdc_dir_path)
+ if "*" in source_file_path:
+ scp_result = self.ssh_remoteclient.scp_file_from_remote(
+ source_file_path, self.ffdc_dir_path
+ )
else:
- scp_result = self.ssh_remoteclient.scp_file_from_remote(source_file_path, targ_file_path)
+ scp_result = self.ssh_remoteclient.scp_file_from_remote(
+ source_file_path, targ_file_path
+ )
if not quiet:
if scp_result:
self.logger.info(
- "\t\tSuccessfully copied from " + self.hostname + ':' + source_file_path + ".\n")
+ "\t\tSuccessfully copied from "
+ + self.hostname
+ + ":"
+ + source_file_path
+ + ".\n"
+ )
else:
self.logger.info(
- "\t\tFail to copy from " + self.hostname + ':' + source_file_path + ".\n")
+ "\t\tFail to copy from "
+ + self.hostname
+ + ":"
+ + source_file_path
+ + ".\n"
+ )
else:
progress_counter += 1
self.print_progress(progress_counter)
@@ -710,7 +852,9 @@
"""
timestr = time.strftime("%Y%m%d-%H%M%S")
- self.ffdc_dir_path = self.location + "/" + self.hostname + "_" + timestr + "/"
+ self.ffdc_dir_path = (
+ self.location + "/" + self.hostname + "_" + timestr + "/"
+ )
self.ffdc_prefix = timestr + "_"
self.validate_local_store(self.ffdc_dir_path)
@@ -734,10 +878,14 @@
# PermissionError
if e.errno == EPERM or e.errno == EACCES:
self.logger.error(
- '\tERROR: os.makedirs %s failed with PermissionError.\n' % dir_path)
+ "\tERROR: os.makedirs %s failed with"
+ " PermissionError.\n" % dir_path
+ )
else:
self.logger.error(
- '\tERROR: os.makedirs %s failed with %s.\n' % (dir_path, e.strerror))
+ "\tERROR: os.makedirs %s failed with %s.\n"
+ % (dir_path, e.strerror)
+ )
sys.exit(-1)
def print_progress(self, progress):
@@ -751,34 +899,47 @@
sys.stdout.write("\r\t" + "+" * progress)
sys.stdout.flush()
- time.sleep(.1)
+ time.sleep(0.1)
def verify_redfish(self):
r"""
Verify remote host has redfish service active
"""
- redfish_parm = 'redfishtool -r ' \
- + self.hostname + ' -S Always raw GET /redfish/v1/'
- return (self.run_tool_cmd(redfish_parm, True))
+ redfish_parm = (
+ "redfishtool -r "
+ + self.hostname
+ + " -S Always raw GET /redfish/v1/"
+ )
+ return self.run_tool_cmd(redfish_parm, True)
def verify_ipmi(self):
r"""
Verify remote host has IPMI LAN service active
"""
- if self.target_type == 'OPENBMC':
- ipmi_parm = 'ipmitool -I lanplus -C 17 -U ' + self.username + ' -P ' \
- + self.password + ' -H ' + self.hostname + ' power status'
+ if self.target_type == "OPENBMC":
+ ipmi_parm = (
+ "ipmitool -I lanplus -C 17 -U "
+ + self.username
+ + " -P "
+ + self.password
+ + " -H "
+ + self.hostname
+ + " power status"
+ )
else:
- ipmi_parm = 'ipmitool -I lanplus -P ' \
- + self.password + ' -H ' + self.hostname + ' power status'
+ ipmi_parm = (
+ "ipmitool -I lanplus -P "
+ + self.password
+ + " -H "
+ + self.hostname
+ + " power status"
+ )
- return (self.run_tool_cmd(ipmi_parm, True))
+ return self.run_tool_cmd(ipmi_parm, True)
- def run_tool_cmd(self,
- parms_string,
- quiet=False):
+ def run_tool_cmd(self, parms_string, quiet=False):
r"""
Run CLI standard tool or scripts.
@@ -787,15 +948,17 @@
quiet do not print tool error message if True
"""
- result = subprocess.run([parms_string],
- stdout=subprocess.PIPE,
- stderr=subprocess.PIPE,
- shell=True,
- universal_newlines=True)
+ result = subprocess.run(
+ [parms_string],
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE,
+ shell=True,
+ universal_newlines=True,
+ )
if result.stderr and not quiet:
- self.logger.error('\n\t\tERROR with %s ' % parms_string)
- self.logger.error('\t\t' + result.stderr)
+ self.logger.error("\n\t\tERROR with %s " % parms_string)
+ self.logger.error("\t\t" + result.stderr)
return result.stdout
@@ -812,37 +975,53 @@
tmp_list.append("SHELL")
for protocol in protocol_list:
- if self.remote_protocol != 'ALL':
+ if self.remote_protocol != "ALL":
if self.remote_protocol != protocol:
continue
# Only check SSH/SCP once for both protocols
- if protocol == 'SSH' or protocol == 'SCP' and protocol not in tmp_list:
+ if (
+ protocol == "SSH"
+ or protocol == "SCP"
+ and protocol not in tmp_list
+ ):
if self.ssh_to_target_system():
# Add only what user asked.
- if self.remote_protocol != 'ALL':
+ if self.remote_protocol != "ALL":
tmp_list.append(self.remote_protocol)
else:
- tmp_list.append('SSH')
- tmp_list.append('SCP')
+ tmp_list.append("SSH")
+ tmp_list.append("SCP")
- if protocol == 'TELNET':
+ if protocol == "TELNET":
if self.telnet_to_target_system():
tmp_list.append(protocol)
- if protocol == 'REDFISH':
+ if protocol == "REDFISH":
if self.verify_redfish():
tmp_list.append(protocol)
- self.logger.info("\n\t[Check] %s Redfish Service.\t\t [OK]" % self.hostname)
+ self.logger.info(
+ "\n\t[Check] %s Redfish Service.\t\t [OK]"
+ % self.hostname
+ )
else:
- self.logger.info("\n\t[Check] %s Redfish Service.\t\t [NOT AVAILABLE]" % self.hostname)
+ self.logger.info(
+ "\n\t[Check] %s Redfish Service.\t\t [NOT AVAILABLE]"
+ % self.hostname
+ )
- if protocol == 'IPMI':
+ if protocol == "IPMI":
if self.verify_ipmi():
tmp_list.append(protocol)
- self.logger.info("\n\t[Check] %s IPMI LAN Service.\t\t [OK]" % self.hostname)
+ self.logger.info(
+ "\n\t[Check] %s IPMI LAN Service.\t\t [OK]"
+ % self.hostname
+ )
else:
- self.logger.info("\n\t[Check] %s IPMI LAN Service.\t\t [NOT AVAILABLE]" % self.hostname)
+ self.logger.info(
+ "\n\t[Check] %s IPMI LAN Service.\t\t [NOT AVAILABLE]"
+ % self.hostname
+ )
return tmp_list
@@ -855,14 +1034,14 @@
# Example YAML:
# -COMMANDS:
# - my_command ${hostname} ${username} ${password}
- os.environ['hostname'] = self.hostname
- os.environ['username'] = self.username
- os.environ['password'] = self.password
+ os.environ["hostname"] = self.hostname
+ os.environ["username"] = self.username
+ os.environ["password"] = self.password
# Append default Env.
- self.env_dict['hostname'] = self.hostname
- self.env_dict['username'] = self.username
- self.env_dict['password'] = self.password
+ self.env_dict["hostname"] = self.hostname
+ self.env_dict["username"] = self.username
+ self.env_dict["password"] = self.password
try:
tmp_env_dict = {}
@@ -874,14 +1053,14 @@
self.env_dict[key] = str(value)
if self.econfig:
- with open(self.econfig, 'r') as file:
+ with open(self.econfig, "r") as file:
try:
tmp_env_dict = yaml.load(file, Loader=yaml.SafeLoader)
except yaml.YAMLError as e:
self.logger.error(e)
sys.exit(-1)
# Export ENV vars.
- for key, value in tmp_env_dict['env_params'].items():
+ for key, value in tmp_env_dict["env_params"].items():
os.environ[key] = str(value)
self.env_dict[key] = str(value)
except json.decoder.JSONDecodeError as e:
@@ -894,8 +1073,9 @@
if k.lower().find("password") != -1:
hidden_text = []
hidden_text.append(v)
- password_regex = '(' +\
- '|'.join([re.escape(x) for x in hidden_text]) + ')'
+ password_regex = (
+ "(" + "|".join([re.escape(x) for x in hidden_text]) + ")"
+ )
mask_dict[k] = re.sub(password_regex, "********", v)
self.logger.info(json.dumps(mask_dict, indent=8, sort_keys=False))
@@ -915,16 +1095,18 @@
self.logger.debug("\tCall func: %s" % eval_string)
result = eval(eval_string)
self.logger.info("\treturn: %s" % str(result))
- except (ValueError,
- SyntaxError,
- NameError,
- AttributeError,
- TypeError) as e:
+ except (
+ ValueError,
+ SyntaxError,
+ NameError,
+ AttributeError,
+ TypeError,
+ ) as e:
self.logger.error("\tERROR: execute_python_eval: %s" % e)
# Set the plugin error state.
- plugin_error_dict['exit_on_error'] = True
+ plugin_error_dict["exit_on_error"] = True
self.logger.info("\treturn: PLUGIN_EVAL_ERROR")
- return 'PLUGIN_EVAL_ERROR'
+ return "PLUGIN_EVAL_ERROR"
return result
@@ -957,18 +1139,18 @@
- arg2
"""
try:
- idx = self.key_index_list_dict('plugin_name', plugin_cmd_list)
- plugin_name = plugin_cmd_list[idx]['plugin_name']
+ idx = self.key_index_list_dict("plugin_name", plugin_cmd_list)
+ plugin_name = plugin_cmd_list[idx]["plugin_name"]
# Equal separator means plugin function returns result.
- if ' = ' in plugin_name:
+ if " = " in plugin_name:
# Ex. ['result', 'plugin.foo_func.my_func']
- plugin_name_args = plugin_name.split(' = ')
+ plugin_name_args = plugin_name.split(" = ")
# plugin func return data.
for arg in plugin_name_args:
if arg == plugin_name_args[-1]:
plugin_name = arg
else:
- plugin_resp = arg.split(',')
+ plugin_resp = arg.split(",")
# ['result1','result2']
for x in plugin_resp:
global_plugin_list.append(x)
@@ -976,9 +1158,9 @@
# Walk the plugin args ['arg1,'arg2']
# If the YAML plugin statement 'plugin_args' is not declared.
- if any('plugin_args' in d for d in plugin_cmd_list):
- idx = self.key_index_list_dict('plugin_args', plugin_cmd_list)
- plugin_args = plugin_cmd_list[idx]['plugin_args']
+ if any("plugin_args" in d for d in plugin_cmd_list):
+ idx = self.key_index_list_dict("plugin_args", plugin_cmd_list)
+ plugin_args = plugin_cmd_list[idx]["plugin_args"]
if plugin_args:
plugin_args = self.yaml_args_populate(plugin_args)
else:
@@ -990,43 +1172,52 @@
# "arg1","arg2","argn" string as params for function.
parm_args_str = self.yaml_args_string(plugin_args)
if parm_args_str:
- plugin_func = plugin_name + '(' + parm_args_str + ')'
+ plugin_func = plugin_name + "(" + parm_args_str + ")"
else:
- plugin_func = plugin_name + '()'
+ plugin_func = plugin_name + "()"
# Execute plugin function.
if global_plugin_dict:
resp = self.execute_python_eval(plugin_func)
# Update plugin vars dict if there is any.
- if resp != 'PLUGIN_EVAL_ERROR':
+ if resp != "PLUGIN_EVAL_ERROR":
self.response_args_data(resp)
else:
resp = self.execute_python_eval(plugin_func)
except Exception as e:
# Set the plugin error state.
- plugin_error_dict['exit_on_error'] = True
+ plugin_error_dict["exit_on_error"] = True
self.logger.error("\tERROR: execute_plugin_block: %s" % e)
pass
# There is a real error executing the plugin function.
- if resp == 'PLUGIN_EVAL_ERROR':
+ if resp == "PLUGIN_EVAL_ERROR":
return resp
# Check if plugin_expects_return (int, string, list,dict etc)
- if any('plugin_expects_return' in d for d in plugin_cmd_list):
- idx = self.key_index_list_dict('plugin_expects_return', plugin_cmd_list)
- plugin_expects = plugin_cmd_list[idx]['plugin_expects_return']
+ if any("plugin_expects_return" in d for d in plugin_cmd_list):
+ idx = self.key_index_list_dict(
+ "plugin_expects_return", plugin_cmd_list
+ )
+ plugin_expects = plugin_cmd_list[idx]["plugin_expects_return"]
if plugin_expects:
if resp:
- if self.plugin_expect_type(plugin_expects, resp) == 'INVALID':
+ if (
+ self.plugin_expect_type(plugin_expects, resp)
+ == "INVALID"
+ ):
self.logger.error("\tWARN: Plugin error check skipped")
elif not self.plugin_expect_type(plugin_expects, resp):
- self.logger.error("\tERROR: Plugin expects return data: %s"
- % plugin_expects)
- plugin_error_dict['exit_on_error'] = True
+ self.logger.error(
+ "\tERROR: Plugin expects return data: %s"
+ % plugin_expects
+ )
+ plugin_error_dict["exit_on_error"] = True
elif not resp:
- self.logger.error("\tERROR: Plugin func failed to return data")
- plugin_error_dict['exit_on_error'] = True
+ self.logger.error(
+ "\tERROR: Plugin func failed to return data"
+ )
+ plugin_error_dict["exit_on_error"] = True
return resp
@@ -1040,26 +1231,26 @@
resp_data = ""
# There is nothing to update the plugin response.
- if len(global_plugin_list) == 0 or plugin_resp == 'None':
+ if len(global_plugin_list) == 0 or plugin_resp == "None":
return
if isinstance(plugin_resp, str):
- resp_data = plugin_resp.strip('\r\n\t')
+ resp_data = plugin_resp.strip("\r\n\t")
resp_list.append(resp_data)
elif isinstance(plugin_resp, bytes):
- resp_data = str(plugin_resp, 'UTF-8').strip('\r\n\t')
+ resp_data = str(plugin_resp, "UTF-8").strip("\r\n\t")
resp_list.append(resp_data)
elif isinstance(plugin_resp, tuple):
if len(global_plugin_list) == 1:
resp_list.append(plugin_resp)
else:
resp_list = list(plugin_resp)
- resp_list = [x.strip('\r\n\t') for x in resp_list]
+ resp_list = [x.strip("\r\n\t") for x in resp_list]
elif isinstance(plugin_resp, list):
if len(global_plugin_list) == 1:
- resp_list.append([x.strip('\r\n\t') for x in plugin_resp])
+ resp_list.append([x.strip("\r\n\t") for x in plugin_resp])
else:
- resp_list = [x.strip('\r\n\t') for x in plugin_resp]
+ resp_list = [x.strip("\r\n\t") for x in plugin_resp]
elif isinstance(plugin_resp, int) or isinstance(plugin_resp, float):
resp_list.append(plugin_resp)
@@ -1087,7 +1278,7 @@
plugin_args arg list ['arg1','arg2,'argn']
"""
- args_str = ''
+ args_str = ""
for args in plugin_args:
if args:
if isinstance(args, (int, float)):
@@ -1095,7 +1286,7 @@
elif args in global_plugin_type_list:
args_str += str(global_plugin_dict[args])
else:
- args_str += '"' + str(args.strip('\r\n\t')) + '"'
+ args_str += '"' + str(args.strip("\r\n\t")) + '"'
# Skip last list element.
if args != plugin_args[-1]:
args_str += ","
@@ -1148,11 +1339,11 @@
try:
# Example, list of matching env vars ['username', 'password', 'hostname']
# Extra escape \ for special symbols. '\$\{([^\}]+)\}' works good.
- var_name_regex = '\\$\\{([^\\}]+)\\}'
+ var_name_regex = "\\$\\{([^\\}]+)\\}"
env_var_names_list = re.findall(var_name_regex, yaml_arg_str)
for var in env_var_names_list:
env_var = os.environ[var]
- env_replace = '${' + var + '}'
+ env_replace = "${" + var + "}"
yaml_arg_str = yaml_arg_str.replace(env_replace, env_var)
except Exception as e:
self.logger.error("\tERROR:yaml_env_vars_populate: %s" % e)
@@ -1175,10 +1366,14 @@
# in eval function call.
global_plugin_type_list.append(var)
else:
- yaml_arg_str = yaml_arg_str.replace(str(var), str(global_plugin_dict[var]))
+ yaml_arg_str = yaml_arg_str.replace(
+ str(var), str(global_plugin_dict[var])
+ )
# Just a string like filename or command.
else:
- yaml_arg_str = yaml_arg_str.replace(str(var), str(global_plugin_dict[var]))
+ yaml_arg_str = yaml_arg_str.replace(
+ str(var), str(global_plugin_dict[var])
+ )
except (IndexError, ValueError) as e:
self.logger.error("\tERROR: yaml_plugin_vars_populate: %s" % e)
pass
@@ -1192,10 +1387,10 @@
Description of argument(s):
plugin_dict Dictionary of plugin error.
"""
- if any('plugin_error' in d for d in plugin_dict):
+ if any("plugin_error" in d for d in plugin_dict):
for d in plugin_dict:
- if 'plugin_error' in d:
- value = d['plugin_error']
+ if "plugin_error" in d:
+ value = d["plugin_error"]
# Reference if the error is set or not by plugin.
return plugin_error_dict[value]
@@ -1215,18 +1410,18 @@
r"""
Plugin expect directive type check.
"""
- if type == 'int':
+ if type == "int":
return isinstance(data, int)
- elif type == 'float':
+ elif type == "float":
return isinstance(data, float)
- elif type == 'str':
+ elif type == "str":
return isinstance(data, str)
- elif type == 'list':
+ elif type == "list":
return isinstance(data, list)
- elif type == 'dict':
+ elif type == "dict":
return isinstance(data, dict)
- elif type == 'tuple':
+ elif type == "tuple":
return isinstance(data, tuple)
else:
self.logger.info("\tInvalid data type requested: %s" % type)
- return 'INVALID'
+ return "INVALID"
diff --git a/ffdc/lib/ssh_utility.py b/ffdc/lib/ssh_utility.py
index 01b39dd..fb44121 100644
--- a/ffdc/lib/ssh_utility.py
+++ b/ffdc/lib/ssh_utility.py
@@ -1,17 +1,20 @@
#!/usr/bin/env python3
-import paramiko
-from paramiko.ssh_exception import AuthenticationException
-from paramiko.ssh_exception import NoValidConnectionsError
-from paramiko.ssh_exception import SSHException
-from paramiko.ssh_exception import BadHostKeyException
-from paramiko.buffered_pipe import PipeTimeout as PipeTimeout
-from scp import SCPClient, SCPException
-import time
-import socket
import logging
+import socket
+import time
from socket import timeout as SocketTimeout
+import paramiko
+from paramiko.buffered_pipe import PipeTimeout as PipeTimeout
+from paramiko.ssh_exception import (
+ AuthenticationException,
+ BadHostKeyException,
+ NoValidConnectionsError,
+ SSHException,
+)
+from scp import SCPClient, SCPException
+
class SSHRemoteclient:
r"""
@@ -20,7 +23,6 @@
"""
def __init__(self, hostname, username, password):
-
r"""
Description of argument(s):
@@ -38,7 +40,6 @@
self.password = password
def ssh_remoteclient_login(self):
-
r"""
Method to create a ssh connection to remote host.
"""
@@ -48,23 +49,31 @@
# SSHClient to make connections to the remote server
self.sshclient = paramiko.SSHClient()
# setting set_missing_host_key_policy() to allow any host
- self.sshclient.set_missing_host_key_policy(paramiko.AutoAddPolicy())
+ self.sshclient.set_missing_host_key_policy(
+ paramiko.AutoAddPolicy()
+ )
# Connect to the server
- self.sshclient.connect(hostname=self.hostname,
- username=self.username,
- password=self.password,
- banner_timeout=120,
- timeout=60,
- look_for_keys=False)
+ self.sshclient.connect(
+ hostname=self.hostname,
+ username=self.username,
+ password=self.password,
+ banner_timeout=120,
+ timeout=60,
+ look_for_keys=False,
+ )
- except (BadHostKeyException, AuthenticationException,
- SSHException, NoValidConnectionsError, socket.error) as e:
+ except (
+ BadHostKeyException,
+ AuthenticationException,
+ SSHException,
+ NoValidConnectionsError,
+ socket.error,
+ ) as e:
is_ssh_login = False
return is_ssh_login
def ssh_remoteclient_disconnect(self):
-
r"""
Clean up.
"""
@@ -75,8 +84,7 @@
if self.scpclient:
self.scpclient.close()
- def execute_command(self, command,
- default_timeout=60):
+ def execute_command(self, command, default_timeout=60):
"""
Execute command on the remote host.
@@ -85,25 +93,26 @@
"""
- empty = ''
+ empty = ""
cmd_start = time.time()
try:
- stdin, stdout, stderr = \
- self.sshclient.exec_command(command, timeout=default_timeout)
+ stdin, stdout, stderr = self.sshclient.exec_command(
+ command, timeout=default_timeout
+ )
start = time.time()
while time.time() < start + default_timeout:
# Need to do read/write operation to trigger
# paramiko exec_command timeout mechanism.
xresults = stderr.readlines()
- results = ''.join(xresults)
+ results = "".join(xresults)
time.sleep(1)
if stdout.channel.exit_status_ready():
break
cmd_exit_code = stdout.channel.recv_exit_status()
# Convert list of string to one string
- err = ''
- out = ''
+ err = ""
+ out = ""
for item in results:
err += item
for item in stdout.readlines():
@@ -111,30 +120,53 @@
return cmd_exit_code, err, out
- except (paramiko.AuthenticationException, paramiko.SSHException,
- paramiko.ChannelException, SocketTimeout) as e:
+ except (
+ paramiko.AuthenticationException,
+ paramiko.SSHException,
+ paramiko.ChannelException,
+ SocketTimeout,
+ ) as e:
# Log command with error. Return to caller for next command, if any.
- logging.error("\n\tERROR: Fail remote command %s %s" % (e.__class__, e))
- logging.error("\tCommand '%s' Elapsed Time %s" %
- (command, time.strftime("%H:%M:%S", time.gmtime(time.time() - cmd_start))))
+ logging.error(
+ "\n\tERROR: Fail remote command %s %s" % (e.__class__, e)
+ )
+ logging.error(
+ "\tCommand '%s' Elapsed Time %s"
+ % (
+ command,
+ time.strftime(
+ "%H:%M:%S", time.gmtime(time.time() - cmd_start)
+ ),
+ )
+ )
return 0, empty, empty
def scp_connection(self):
-
r"""
Create a scp connection for file transfer.
"""
try:
- self.scpclient = SCPClient(self.sshclient.get_transport(), sanitize=lambda x: x)
- logging.info("\n\t[Check] %s SCP transport established.\t [OK]" % self.hostname)
+ self.scpclient = SCPClient(
+ self.sshclient.get_transport(), sanitize=lambda x: x
+ )
+ logging.info(
+ "\n\t[Check] %s SCP transport established.\t [OK]"
+ % self.hostname
+ )
except (SCPException, SocketTimeout, PipeTimeout) as e:
self.scpclient = None
- logging.error("\n\tERROR: SCP get_transport has failed. %s %s" % (e.__class__, e))
- logging.info("\tScript continues generating FFDC on %s." % self.hostname)
- logging.info("\tCollected data will need to be manually offloaded.")
+ logging.error(
+ "\n\tERROR: SCP get_transport has failed. %s %s"
+ % (e.__class__, e)
+ )
+ logging.info(
+ "\tScript continues generating FFDC on %s." % self.hostname
+ )
+ logging.info(
+ "\tCollected data will need to be manually offloaded."
+ )
def scp_file_from_remote(self, remote_file, local_file):
-
r"""
scp file in remote system to local with date-prefixed filename.
@@ -151,7 +183,9 @@
except (SCPException, SocketTimeout, PipeTimeout, SSHException) as e:
# Log command with error. Return to caller for next file, if any.
logging.error(
- "\n\tERROR: Fail scp %s from remotehost %s %s\n\n" % (remote_file, e.__class__, e))
+ "\n\tERROR: Fail scp %s from remotehost %s %s\n\n"
+ % (remote_file, e.__class__, e)
+ )
# Pause for 2 seconds allowing Paramiko to finish error processing before next fetch.
# Without the delay after SCPException,
# next fetch will get 'paramiko.ssh_exception.SSHException'> Channel closed Error.
diff --git a/ffdc/lib/telnet_utility.py b/ffdc/lib/telnet_utility.py
index 08e4071..03f7983 100644
--- a/ffdc/lib/telnet_utility.py
+++ b/ffdc/lib/telnet_utility.py
@@ -1,21 +1,21 @@
#!/usr/bin/env python3
-import time
-import socket
import logging
+import socket
import telnetlib
+import time
from collections import deque
class TelnetRemoteclient:
-
r"""
Class to create telnet connection to remote host for command execution.
"""
- def __init__(self, hostname, username, password, port=23, read_timeout=None):
-
+ def __init__(
+ self, hostname, username, password, port=23, read_timeout=None
+ ):
r"""
Description of argument(s):
@@ -33,23 +33,35 @@
self.read_timeout = read_timeout
def tn_remoteclient_login(self):
-
is_telnet = True
try:
- self.tnclient = telnetlib.Telnet(self.hostname, self.port, timeout=15)
- if b'login:' in self.tnclient.read_until(b'login:', timeout=self.read_timeout):
- self.tnclient.write(self.username.encode('utf-8') + b"\n")
+ self.tnclient = telnetlib.Telnet(
+ self.hostname, self.port, timeout=15
+ )
+ if b"login:" in self.tnclient.read_until(
+ b"login:", timeout=self.read_timeout
+ ):
+ self.tnclient.write(self.username.encode("utf-8") + b"\n")
- if b'Password:' in self.tnclient.read_until(b'Password:', timeout=self.read_timeout):
- self.tnclient.write(self.password.encode('utf-8') + b"\n")
+ if b"Password:" in self.tnclient.read_until(
+ b"Password:", timeout=self.read_timeout
+ ):
+ self.tnclient.write(self.password.encode("utf-8") + b"\n")
- n, match, pre_match = \
- self.tnclient.expect(
- [b'Login incorrect', b'invalid login name or password.', br'\#', br'\$'],
- timeout=self.read_timeout)
+ n, match, pre_match = self.tnclient.expect(
+ [
+ b"Login incorrect",
+ b"invalid login name or password.",
+ rb"\#",
+ rb"\$",
+ ],
+ timeout=self.read_timeout,
+ )
if n == 0 or n == 1:
logging.error(
- "\n\tERROR: Telnet Authentication Failed. Check userid and password.\n\n")
+ "\n\tERROR: Telnet Authentication Failed. Check"
+ " userid and password.\n\n"
+ )
is_telnet = False
else:
# login successful
@@ -76,17 +88,15 @@
# the telnet object might not exist yet, so ignore this one
pass
- def execute_command(self, cmd,
- i_timeout=120):
+ def execute_command(self, cmd, i_timeout=120):
+ r"""
+ Executes commands on the remote host
- r'''
- Executes commands on the remote host
-
- Description of argument(s):
- cmd Command to run on remote host
- i_timeout Timeout for command output
- default is 120 seconds
- '''
+ Description of argument(s):
+ cmd Command to run on remote host
+ i_timeout Timeout for command output
+ default is 120 seconds
+ """
# Wait time for command execution before reading the output.
# Use user input wait time for command execution if one exists.
@@ -97,23 +107,22 @@
execution_time = 120
# Execute the command and read the command output.
- return_buffer = b''
+ return_buffer = b""
try:
-
# Do at least one non-blocking read.
# to flush whatever data is in the read buffer.
while self.tnclient.read_very_eager():
continue
# Execute the command
- self.tnclient.write(cmd.encode('utf-8') + b'\n')
+ self.tnclient.write(cmd.encode("utf-8") + b"\n")
time.sleep(execution_time)
- local_buffer = b''
+ local_buffer = b""
# Read the command output one block at a time.
return_buffer = self.tnclient.read_very_eager()
while return_buffer:
- local_buffer = b''.join([local_buffer, return_buffer])
+ local_buffer = b"".join([local_buffer, return_buffer])
time.sleep(3) # let the buffer fill up a bit
return_buffer = self.tnclient.read_very_eager()
except (socket.error, EOFError) as e:
@@ -129,4 +138,4 @@
logging.error("\t\t ERROR %s " % msg)
# Return ASCII string data with ending PROMPT stripped
- return local_buffer.decode('ascii', 'ignore').replace('$ ', '\n')
+ return local_buffer.decode("ascii", "ignore").replace("$ ", "\n")
diff --git a/ffdc/plugins/date_time_utils.py b/ffdc/plugins/date_time_utils.py
index 65bcb88..f3787bd 100644
--- a/ffdc/plugins/date_time_utils.py
+++ b/ffdc/plugins/date_time_utils.py
@@ -22,7 +22,11 @@
if isinstance(date_str, list):
tmp_date = []
for date in date_str:
- tmp_date.append(datetime.strptime(date, date_format).strftime(desired_format))
+ tmp_date.append(
+ datetime.strptime(date, date_format).strftime(desired_format)
+ )
return tmp_date
else:
- return datetime.strptime(date_str, date_format).strftime(desired_format)
+ return datetime.strptime(date_str, date_format).strftime(
+ desired_format
+ )
diff --git a/ffdc/plugins/redfish.py b/ffdc/plugins/redfish.py
index 0ea7a00..74f1370 100644
--- a/ffdc/plugins/redfish.py
+++ b/ffdc/plugins/redfish.py
@@ -4,13 +4,13 @@
This module contains functions having to do with redfish path walking.
"""
+import json
import os
import subprocess
-import json
ERROR_RESPONSE = {
- "404": 'Response Error: status_code: 404 -- Not Found',
- "500": 'Response Error: status_code: 500 -- Internal Server Error',
+ "404": "Response Error: status_code: 404 -- Not Found",
+ "500": "Response Error: status_code: 500 -- Internal Server Error",
}
# Variable to hold enumerated data.
@@ -29,15 +29,17 @@
parms_string Command to execute from the current SHELL.
quiet do not print tool error message if True
"""
- resp = subprocess.run([parms],
- stdout=subprocess.PIPE,
- stderr=subprocess.PIPE,
- shell=True,
- universal_newlines=True)
+ resp = subprocess.run(
+ [parms],
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE,
+ shell=True,
+ universal_newlines=True,
+ )
if resp.stderr:
- print('\n\t\tERROR with %s ' % parms)
- print('\t\t' + resp.stderr)
+ print("\n\t\tERROR with %s " % parms)
+ print("\t\t" + resp.stderr)
return resp.stderr
elif json_type == "json":
json_data = json.loads(resp.stdout)
@@ -57,8 +59,15 @@
returned as a json string or as a
dictionary.
"""
- parms = 'redfishtool -u ' + username + ' -p ' + password + ' -r ' + \
- hostname + ' -S Always raw GET '
+ parms = (
+ "redfishtool -u "
+ + username
+ + " -p "
+ + password
+ + " -r "
+ + hostname
+ + " -S Always raw GET "
+ )
pending_enumeration.add(url)
@@ -74,31 +83,37 @@
# Example: '/redfish/v1/JsonSchemas/' and sub resources.
# '/redfish/v1/SessionService'
# '/redfish/v1/Managers/bmc#/Oem'
- if ('JsonSchemas' in resource) or ('SessionService' in resource)\
- or ('PostCodes' in resource) or ('Registries' in resource)\
- or ('#' in resource):
+ if (
+ ("JsonSchemas" in resource)
+ or ("SessionService" in resource)
+ or ("PostCodes" in resource)
+ or ("Registries" in resource)
+ or ("#" in resource)
+ ):
continue
response = execute_redfish_cmd(parms + resource)
# Enumeration is done for available resources ignoring the
# ones for which response is not obtained.
- if 'Error getting response' in response:
+ if "Error getting response" in response:
continue
walk_nested_dict(response, url=resource)
enumerated_resources.update(set(resources_to_be_enumerated))
- resources_to_be_enumerated = \
- tuple(pending_enumeration - enumerated_resources)
+ resources_to_be_enumerated = tuple(
+ pending_enumeration - enumerated_resources
+ )
if return_json == "json":
- return json.dumps(result, sort_keys=True,
- indent=4, separators=(',', ': '))
+ return json.dumps(
+ result, sort_keys=True, indent=4, separators=(",", ": ")
+ )
else:
return result
-def walk_nested_dict(data, url=''):
+def walk_nested_dict(data, url=""):
r"""
Parse through the nested dictionary and get the resource id paths.
@@ -106,25 +121,24 @@
data Nested dictionary data from response message.
url Resource for which the response is obtained in data.
"""
- url = url.rstrip('/')
+ url = url.rstrip("/")
for key, value in data.items():
-
# Recursion if nested dictionary found.
if isinstance(value, dict):
walk_nested_dict(value)
else:
# Value contains a list of dictionaries having member data.
- if 'Members' == key:
+ if "Members" == key:
if isinstance(value, list):
for memberDict in value:
if isinstance(memberDict, str):
pending_enumeration.add(memberDict)
else:
- pending_enumeration.add(memberDict['@odata.id'])
+ pending_enumeration.add(memberDict["@odata.id"])
- if '@odata.id' == key:
- value = value.rstrip('/')
+ if "@odata.id" == key:
+ value = value.rstrip("/")
# Data for the given url.
if value == url:
result[url] = data
diff --git a/ffdc/plugins/scp_execution.py b/ffdc/plugins/scp_execution.py
index 3b4767a..f44735a 100644
--- a/ffdc/plugins/scp_execution.py
+++ b/ffdc/plugins/scp_execution.py
@@ -8,7 +8,7 @@
# Absolute path to this plugin
abs_path = os.path.abspath(os.path.dirname(sys.argv[0]))
# full_path to plugins parent directory
-full_path = abs_path.split('plugins')[0]
+full_path = abs_path.split("plugins")[0]
sys.path.append(full_path)
# Walk path and append to sys.path
for root, dirs, files in os.walk(full_path):
@@ -19,33 +19,28 @@
from ssh_utility import SSHRemoteclient # NOQA
-def scp_remote_file(hostname,
- username,
- password,
- filename,
- local_dir_path):
+def scp_remote_file(hostname, username, password, filename, local_dir_path):
r"""
- Description of argument(s):
+ Description of argument(s):
- hostname Name/IP of the remote (targeting) host
- username User on the remote host with access to files
- password Password for user on remote host
- filename Filename with full path on remote host
- Filename can contain wild cards for multiple files
- local_dir_path Location to store file on local host
+ hostname Name/IP of the remote (targeting) host
+ username User on the remote host with access to files
+ password Password for user on remote host
+ filename Filename with full path on remote host
+ Filename can contain wild cards for multiple files
+ local_dir_path Location to store file on local host
"""
- ssh_remoteclient = SSHRemoteclient(hostname,
- username,
- password)
+ ssh_remoteclient = SSHRemoteclient(hostname, username, password)
if ssh_remoteclient.ssh_remoteclient_login():
-
# Obtain scp connection.
ssh_remoteclient.scp_connection()
if ssh_remoteclient.scpclient:
if isinstance(filename, list):
for each_file in filename:
- ssh_remoteclient.scp_file_from_remote(each_file, local_dir_path)
+ ssh_remoteclient.scp_file_from_remote(
+ each_file, local_dir_path
+ )
else:
ssh_remoteclient.scp_file_from_remote(filename, local_dir_path)
diff --git a/ffdc/plugins/shell_execution.py b/ffdc/plugins/shell_execution.py
index 817dc1e..91a42b2 100644
--- a/ffdc/plugins/shell_execution.py
+++ b/ffdc/plugins/shell_execution.py
@@ -10,14 +10,16 @@
quiet do not print tool error message if True
"""
- result = subprocess.run([parms_string],
- stdout=subprocess.PIPE,
- stderr=subprocess.PIPE,
- shell=True,
- universal_newlines=True)
+ result = subprocess.run(
+ [parms_string],
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE,
+ shell=True,
+ universal_newlines=True,
+ )
if result.stderr and not quiet:
- print('\n\t\tERROR with %s ' % parms_string)
- print('\t\t' + result.stderr)
+ print("\n\t\tERROR with %s " % parms_string)
+ print("\t\t" + result.stderr)
return result.stdout
diff --git a/ffdc/plugins/ssh_execution.py b/ffdc/plugins/ssh_execution.py
index 8623918..fd76583 100644
--- a/ffdc/plugins/ssh_execution.py
+++ b/ffdc/plugins/ssh_execution.py
@@ -8,7 +8,7 @@
# Absolute path to this plugin
abs_path = os.path.abspath(os.path.dirname(sys.argv[0]))
# full_path to plugins parent directory
-full_path = abs_path.split('plugins')[0]
+full_path = abs_path.split("plugins")[0]
sys.path.append(full_path)
# Walk path and append to sys.path
for root, dirs, files in os.walk(full_path):
@@ -19,44 +19,39 @@
from ssh_utility import SSHRemoteclient # NOQA
-def ssh_execute_cmd(hostname,
- username,
- password,
- command,
- timeout=60,
- type=None):
+def ssh_execute_cmd(
+ hostname, username, password, command, timeout=60, type=None
+):
r"""
- Description of argument(s):
+ Description of argument(s):
- hostname Name/IP of the remote (targeting) host
- username User on the remote host with access to FFCD files
- password Password for user on remote host
- command Command to run on remote host
- timeout Time, in second, to wait for command completion
- type Data type return as list or others.
+ hostname Name/IP of the remote (targeting) host
+ username User on the remote host with access to FFCD files
+ password Password for user on remote host
+ command Command to run on remote host
+ timeout Time, in second, to wait for command completion
+ type Data type return as list or others.
"""
- ssh_remoteclient = SSHRemoteclient(hostname,
- username,
- password)
+ ssh_remoteclient = SSHRemoteclient(hostname, username, password)
cmd_exit_code = 0
- err = ''
- response = ''
+ err = ""
+ response = ""
if ssh_remoteclient.ssh_remoteclient_login():
-
"""
cmd_exit_code: command exit status from remote host
err: stderr from remote host
response: stdout from remote host
"""
- cmd_exit_code, err, response = \
- ssh_remoteclient.execute_command(command, int(timeout))
+ cmd_exit_code, err, response = ssh_remoteclient.execute_command(
+ command, int(timeout)
+ )
# Close ssh session
if ssh_remoteclient:
ssh_remoteclient.ssh_remoteclient_disconnect()
if type == "list":
- return response.split('\n')
+ return response.split("\n")
else:
return response
diff --git a/ffdc/plugins/telnet_execution.py b/ffdc/plugins/telnet_execution.py
index d55d18a..08aaf02 100644
--- a/ffdc/plugins/telnet_execution.py
+++ b/ffdc/plugins/telnet_execution.py
@@ -8,7 +8,7 @@
# Absolute path to this plugin
abs_path = os.path.abspath(os.path.dirname(sys.argv[0]))
# full_path to plugins parent directory
-full_path = abs_path.split('plugins')[0]
+full_path = abs_path.split("plugins")[0]
sys.path.append(full_path)
# Walk path and append to sys.path
for root, dirs, files in os.walk(full_path):
@@ -18,28 +18,21 @@
from telnet_utility import TelnetRemoteclient # NOQA
-def telnet_execute_cmd(hostname,
- username,
- password,
- command,
- timeout=60):
+def telnet_execute_cmd(hostname, username, password, command, timeout=60):
r"""
- Description of argument(s):
+ Description of argument(s):
- hostname Name/IP of the remote (targeting) host
- username User on the remote host with access to FFCD files
- password Password for user on remote host
- command Command to run on remote host
- timeout Time, in second, to wait for command completion
+ hostname Name/IP of the remote (targeting) host
+ username User on the remote host with access to FFCD files
+ password Password for user on remote host
+ command Command to run on remote host
+ timeout Time, in second, to wait for command completion
"""
- telnet_remoteclient = TelnetRemoteclient(hostname,
- username,
- password)
- result = ''
+ telnet_remoteclient = TelnetRemoteclient(hostname, username, password)
+ result = ""
if telnet_remoteclient.tn_remoteclient_login():
# result: stdout from remote host
- result = \
- telnet_remoteclient.execute_command(command, timeout)
+ result = telnet_remoteclient.execute_command(command, timeout)
# Close telnet session
if telnet_remoteclient:
diff --git a/ffdc/setup.py b/ffdc/setup.py
index f8c6b18..ef360af 100644
--- a/ffdc/setup.py
+++ b/ffdc/setup.py
@@ -1,16 +1,12 @@
from setuptools import setup
+
setup(
- name='ffdc',
- version='0.1',
- description=("A standalone script to collect logs from a given system."),
- py_modules=['install'],
- install_requires=[
- 'click',
- 'PyYAML',
- 'paramiko',
- 'redfishtool'
- ],
+ name="ffdc",
+ version="0.1",
+ description="A standalone script to collect logs from a given system.",
+ py_modules=["install"],
+ install_requires=["click", "PyYAML", "paramiko", "redfishtool"],
entry_points={
- 'console_scripts': ['collectFFDC=commands.install_cmd:main']
- }
+ "console_scripts": ["collectFFDC=commands.install_cmd:main"]
+ },
)
diff --git a/gui/data/gui_variables.py b/gui/data/gui_variables.py
index 0fe686a..663f9b0 100644
--- a/gui/data/gui_variables.py
+++ b/gui/data/gui_variables.py
@@ -5,8 +5,7 @@
"""
-class gui_variables():
-
+class gui_variables:
# Login page
xpath_textbox_hostname = "//input[@id='host']"
xpath_textbox_username = "//*[@data-test-id='login-input-username']"
@@ -20,8 +19,12 @@
# GUI header
xpath_root_button_menu = "//*[@data-test-id='appHeader-container-user']"
xpath_profile_settings = "//*[@data-test-id='appHeader-link-profile']"
- xpath_server_health_header = "//*[@data-test-id='appHeader-container-health']"
- xpath_server_power_header = "//*[@data-test-id='appHeader-container-power']"
+ xpath_server_health_header = (
+ "//*[@data-test-id='appHeader-container-health']"
+ )
+ xpath_server_power_header = (
+ "//*[@data-test-id='appHeader-container-power']"
+ )
xpath_refresh_button = "//*[@data-test-id='appHeader-button-refresh']"
# Logs menu
@@ -30,21 +33,33 @@
xpath_dumps_header = "//h1[text()='Dumps']"
xpath_event_logs_sub_menu = "//*[@data-test-id='nav-item-event-logs']"
xpath_event_logs_heading = "//h1[contains(text(), 'Event logs')]"
- xpath_progress_logs_sub_menu = "//*[@data-test-id='nav-item-post-code-logs']"
+ xpath_progress_logs_sub_menu = (
+ "//*[@data-test-id='nav-item-post-code-logs']"
+ )
# Hardware status menu
- xpath_hardware_status_menu = "//*[@data-test-id='nav-button-hardware-status']"
- xpath_inventory_and_leds_sub_menu = "//*[@data-test-id='nav-item-inventory']"
+ xpath_hardware_status_menu = (
+ "//*[@data-test-id='nav-button-hardware-status']"
+ )
+ xpath_inventory_and_leds_sub_menu = (
+ "//*[@data-test-id='nav-item-inventory']"
+ )
xpath_sensor_sub_menu = "//*[@data-test-id='nav-item-sensors']"
- xpath_inventory_and_leds_heading = "//h1[contains(text(), 'Inventory and LEDs')]"
+ xpath_inventory_and_leds_heading = (
+ "//h1[contains(text(), 'Inventory and LEDs')]"
+ )
# Operations menu
xpath_operations_menu = "//*[@data-test-id='nav-button-operations']"
- xpath_factory_reset_sub_menu = "//*[@data-test-id='nav-item-factory-reset']"
+ xpath_factory_reset_sub_menu = (
+ "//*[@data-test-id='nav-item-factory-reset']"
+ )
xpath_firmware_update_sub_menu = "//*[@data-test-id='nav-item-firmware']"
xpath_reboot_bmc_sub_menu = "//*[@data-test-id='nav-item-reboot-bmc']"
xpath_host_console_sub_menu = "//*[@data-test-id='nav-item-host-console']"
- xpath_server_power_operations_sub_menu = "//*[@data-test-id='nav-item-server-power-operations']"
+ xpath_server_power_operations_sub_menu = (
+ "//*[@data-test-id='nav-item-server-power-operations']"
+ )
xpath_host_console_heading = "//h1[text()='Host console']"
xpath_firmware_heading = "//h1[contains(text(), 'Firmware')]"
@@ -53,33 +68,49 @@
xpath_network_heading = "//h1[text()='Network']"
xpath_date_time_sub_menu = "//*[@data-test-id='nav-item-date-time']"
xpath_network_sub_menu = "//*[@data-test-id='nav-item-network']"
- xpath_power_restore_policy_sub_menu = "//*[@data-test-id='nav-item-power-restore-policy']"
+ xpath_power_restore_policy_sub_menu = (
+ "//*[@data-test-id='nav-item-power-restore-policy']"
+ )
# Security and access menu
- xpath_secuity_and_accesss_menu = "//*[@data-test-id='nav-button-security-and-access']"
+ xpath_secuity_and_accesss_menu = (
+ "//*[@data-test-id='nav-button-security-and-access']"
+ )
xpath_sessions_sub_menu = "//*[@data-test-id='nav-item-sessions']"
xpath_ldap_sub_menu = "//*[@data-test-id='nav-item-ldap']"
- xpath_user_management_sub_menu = "//*[@data-test-id='nav-item-user-management']"
+ xpath_user_management_sub_menu = (
+ "//*[@data-test-id='nav-item-user-management']"
+ )
xpath_policies_sub_menu = "//*[@data-test-id='nav-item-policies']"
xpath_certificates_sub_menu = "//*[@data-test-id='nav-item-certificates']"
# Resource management menu
- xpath_resource_management_menu = "//*[@data-test-id='nav-button-resource-management']"
+ xpath_resource_management_menu = (
+ "//*[@data-test-id='nav-button-resource-management']"
+ )
xpath_power_sub_menu = "//*[@data-test-id='nav-item-power']"
xpath_power_heading = "//h1[contains(text(), 'Power')]"
# Profile settings
xpath_default_UTC = "//*[@data-test-id='profileSettings-radio-defaultUTC']"
- xpath_profile_save_button = "//*[@data-test-id='profileSettings-button-saveSettings']"
- xpath_input_password = "//*[@data-test-id='profileSettings-input-newPassword']"
- xpath_input_confirm_password = "//*[@data-test-id='profileSettings-input-confirmPassword']"
+ xpath_profile_save_button = (
+ "//*[@data-test-id='profileSettings-button-saveSettings']"
+ )
+ xpath_input_password = (
+ "//*[@data-test-id='profileSettings-input-newPassword']"
+ )
+ xpath_input_confirm_password = (
+ "//*[@data-test-id='profileSettings-input-confirmPassword']"
+ )
# Common variables
xpath_save_settings_button = "//button[contains(text(),'Save')]"
xpath_confirm_button = "//button[contains(text(),'Confirm')]"
xpath_cancel_button = "//button[contains(text(),'Cancel')]"
xpath_add_button = "//button[normalize-space(text())='Add']"
- xpath_page_loading_progress_bar = "//*[@aria-label='Page loading progress bar']"
+ xpath_page_loading_progress_bar = (
+ "//*[@aria-label='Page loading progress bar']"
+ )
# Reboot sub menu
xpath_reboot_bmc_heading = "//h1[text()='Reboot BMC']"
diff --git a/gui/data/resource_variables.py b/gui/data/resource_variables.py
index 07413d7..e70a324 100644
--- a/gui/data/resource_variables.py
+++ b/gui/data/resource_variables.py
@@ -6,8 +6,7 @@
"""
-class resource_variables():
-
+class resource_variables:
xpath_textbox_hostname = "//input[@id='host']"
xpath_textbox_username = "//input[@id='username']"
xpath_textbox_password = "//input[@id='password']"
@@ -27,38 +26,38 @@
xpath_remove_button = "//button[contains(text(),'Remove')]"
xpath_add_button = "//button[@type='submit']"
- xpath_select_button_warm_reboot = \
- "//*[@id='power__warm-boot']"
- xpath_operation_warning_message = \
- "//*[@class='inline__confirm active']"
+ xpath_select_button_warm_reboot = "//*[@id='power__warm-boot']"
+ xpath_operation_warning_message = "//*[@class='inline__confirm active']"
text_warm_reboot_warning_message = "warm reboot?"
- xpath_select_button_warm_reboot_yes = \
- "//*[@id='power-operations']" \
+ xpath_select_button_warm_reboot_yes = (
+ "//*[@id='power-operations']"
"/div[3]/div[3]/confirm/div/div[2]/button[1]"
+ )
- xpath_select_button_cold_reboot = \
- "//*[@id='power__cold-boot']"
+ xpath_select_button_cold_reboot = "//*[@id='power__cold-boot']"
text_cold_reboot_warning_message = "cold reboot?"
- xpath_select_button_cold_reboot_yes = \
- "//*[@id='power-operations']" \
+ xpath_select_button_cold_reboot_yes = (
+ "//*[@id='power-operations']"
"/div[3]/div[4]/confirm/div/div[2]/button[2]"
+ )
- xpath_select_button_orderly_shutdown = \
- "//*[@id='power__soft-shutdown']"
- xpath_select_button_orderly_shutdown_button_no = \
- "//*[@id='power-operations']/div[3]/div[5]"\
+ xpath_select_button_orderly_shutdown = "//*[@id='power__soft-shutdown']"
+ xpath_select_button_orderly_shutdown_button_no = (
+ "//*[@id='power-operations']/div[3]/div[5]"
"/confirm/div/div[2]/button[2]"
+ )
text_orderly_shutdown_warning_message = "orderly shutdown?"
- xpath_select_button_orderly_shutdown_yes = \
- "//*[@id='power-operations']/div[3]/div[5]" \
+ xpath_select_button_orderly_shutdown_yes = (
+ "//*[@id='power-operations']/div[3]/div[5]"
"/confirm/div/div[2]/button[1]"
+ )
- xpath_select_button_immediate_shutdown = \
- "//*[@id='power__hard-shutdown']"
+ xpath_select_button_immediate_shutdown = "//*[@id='power__hard-shutdown']"
text_immediate_shutdown_warning_message = "immediate shutdown?"
- xpath_select_button_immediate_shutdown_yes = \
- "//*[@id='power-operations']/div[3]/div[6]" \
+ xpath_select_button_immediate_shutdown_yes = (
+ "//*[@id='power-operations']/div[3]/div[6]"
"/confirm/div/div[2]/button[1]"
+ )
obmc_off_state = "Off"
obmc_standby_state = "Standby"
@@ -66,13 +65,21 @@
# xpath for main menu.
xpath_select_server_control = "//button[contains(@class,'btn-control')]"
- xpath_select_server_configuration = "//button[contains(@class,'btn-config')]"
- xpath_select_access_control = "//button[contains(@class,'btn-access-control')]"
+ xpath_select_server_configuration = (
+ "//button[contains(@class,'btn-config')]"
+ )
+ xpath_select_access_control = (
+ "//button[contains(@class,'btn-access-control')]"
+ )
# xpath for sub main menu.
- xpath_select_server_power_operations = "//a[@href='#/server-control/power-operations']"
+ xpath_select_server_power_operations = (
+ "//a[@href='#/server-control/power-operations']"
+ )
xpath_select_snmp_settings = "//a[@href='#/configuration/snmp']"
- xpath_select_manage_power_usage = "//a[@href='#/server-control/power-usage']"
+ xpath_select_manage_power_usage = (
+ "//a[@href='#/server-control/power-usage']"
+ )
xpath_select_virtual_media = "//a[@href='#/server-control/virtual-media']"
xpath_select_sol_console = "//a[@href='#/server-control/remote-console']"
xpath_select_reboot_bmc = "//a[@href='#/server-control/bmc-reboot']"
@@ -83,40 +90,53 @@
xpath_select_local_users = "//a[@href='#/access-control/local-users']"
# GUI header elements locators.
- xpath_select_server_power = "//a[@href='#/server-control/power-operations']"
+ xpath_select_server_power = (
+ "//a[@href='#/server-control/power-operations']"
+ )
# Server health elements locators.
- xpath_select_refresh_button = \
- "//*[contains(text(),'Refresh')]"
- xpath_event_severity_all = "//*[text()='Filter by severity']/following-sibling::button[1]"
- xpath_event_severity_high = "//*[text()='Filter by severity']/following-sibling::button[2]"
- xpath_event_severity_medium = "//*[text()='Filter by severity']/following-sibling::button[3]"
- xpath_event_severity_low = "//*[text()='Filter by severity']/following-sibling::button[4]"
- xpath_drop_down_timezone_edt = \
+ xpath_select_refresh_button = "//*[contains(text(),'Refresh')]"
+ xpath_event_severity_all = (
+ "//*[text()='Filter by severity']/following-sibling::button[1]"
+ )
+ xpath_event_severity_high = (
+ "//*[text()='Filter by severity']/following-sibling::button[2]"
+ )
+ xpath_event_severity_medium = (
+ "//*[text()='Filter by severity']/following-sibling::button[3]"
+ )
+ xpath_event_severity_low = (
+ "//*[text()='Filter by severity']/following-sibling::button[4]"
+ )
+ xpath_drop_down_timezone_edt = (
"//*[@id='event-log']/section[1]/div/div/button"
+ )
xpath_refresh_circle = "/html/body/main/loader/div[1]/svg/circle"
- xpath_drop_down_timezone_utc = \
+ xpath_drop_down_timezone_utc = (
"//*[@id='event-log']/section[1]/div/div/ul/li[2]/button"
+ )
xpath_event_filter_all = "//*[text()='All events']"
xpath_event_filter_resolved = "//*[text()='Resolved events']"
xpath_event_filter_unresolved = "//*[text()='Unresolved events']"
- xpath_event_action_bars = \
- "//*[@id='event__actions-bar']/div[1]/label/span"
- xpath_event_action_delete = \
+ xpath_event_action_bars = "//*[@id='event__actions-bar']/div[1]/label/span"
+ xpath_event_action_delete = (
"//*[@id='event__actions-bar']/div[2]/div[2]/button[1]"
- xpath_event_action_export = \
- "//*[@id='event__actions-bar']/div[2]/div[2]/a"
- xpath_number_of_events = \
- "//*[@id='event__actions-bar']/div[2]/p[2]/span"
- xpath_mark_as_resolved = \
+ )
+ xpath_event_action_export = "//*[@id='event__actions-bar']/div[2]/div[2]/a"
+ xpath_number_of_events = "//*[@id='event__actions-bar']/div[2]/p[2]/span"
+ xpath_mark_as_resolved = (
"//*[@id='event__actions-bar']/div[2]/div[2]/button[2]"
+ )
xpath_events_export = "//*[@id='event__actions-bar']/div[2]/div[2]/a"
xpath_individual_event_select = "(//*[@class='control__indicator'])[2]"
- xpath_individual_event_delete = \
+ xpath_individual_event_delete = (
"//*[@id='event__actions-bar']/div[2]/div[2]/button[1]"
+ )
xpath_second_event_select = "(//*[@class='control__indicator'])[3]"
- xpath_individual_event_resolved = \
+ xpath_individual_event_resolved = (
"//*[@id='event__actions-bar']/div[2]/div[2]/button[2]"
- xpath_individual_event_export = \
+ )
+ xpath_individual_event_export = (
"//*[@id='event__actions-bar']/div[2]/div[2]/a"
+ )
xpath_select_all_events = "(//*[@class='control__indicator'])[1]"
diff --git a/gui/lib/supporting_libs.py b/gui/lib/supporting_libs.py
index 4cc3044..ae7d005 100644
--- a/gui/lib/supporting_libs.py
+++ b/gui/lib/supporting_libs.py
@@ -9,7 +9,6 @@
import socket
-class supporting_libs():
-
+class supporting_libs:
def get_hostname_from_ip_address(self, ip):
return socket.gethostbyaddr(ip)[0]
diff --git a/gui/lib/utils_get_version.py b/gui/lib/utils_get_version.py
index 0c7f279..3f1d331 100755
--- a/gui/lib/utils_get_version.py
+++ b/gui/lib/utils_get_version.py
@@ -1,11 +1,19 @@
#!/usr/bin/env python3
-import gen_print as gp
-import gen_cmd as gc
import collections
-module_names = ['Selenium2Library', 'SeleniumLibrary', 'SSHLibrary', 'requests',
- 'XvfbRobot', 'robotremoteserver', 'redfish']
+import gen_cmd as gc
+import gen_print as gp
+
+module_names = [
+ "Selenium2Library",
+ "SeleniumLibrary",
+ "SSHLibrary",
+ "requests",
+ "XvfbRobot",
+ "robotremoteserver",
+ "redfish",
+]
import_versions = collections.OrderedDict()
@@ -13,8 +21,13 @@
try:
cmd_buf = "import " + module_name
exec(cmd_buf)
- cmd_buf = "import_versions['" + module_name + "'] = " + module_name \
- + ".__version__"
+ cmd_buf = (
+ "import_versions['"
+ + module_name
+ + "'] = "
+ + module_name
+ + ".__version__"
+ )
exec(cmd_buf)
except ImportError:
import_versions[module_name] = "Not installed"
@@ -50,21 +63,30 @@
quiet = 1
versions = collections.OrderedDict()
- for package in ['python', 'python3', 'robot', 'firefox', 'google-chrome']:
+ for package in ["python", "python3", "robot", "firefox", "google-chrome"]:
# Note: "robot --version" returns 0x00000000000000fb.
# Note: If package does not exist, 0x7f is returned.
- rc, version = gc.shell_cmd(package + " --version",
- valid_rcs=[0, 0x7f, 0xfb])
- versions[package] = "Not installed" if rc == 0x7f else version.rstrip('\n')
+ rc, version = gc.shell_cmd(
+ package + " --version", valid_rcs=[0, 0x7F, 0xFB]
+ )
+ versions[package] = (
+ "Not installed" if rc == 0x7F else version.rstrip("\n")
+ )
versions.update(import_versions)
- for package in ['robotframework-angularjs', 'robotframework-scplibrary',
- 'robotframework-extendedselenium2library']:
- rc, version = gc.shell_cmd("pip3 show " + package
- + " | grep Version | sed -re 's/.*: //g'")
- versions[package] = "Not installed" if not version else version.rstrip('\n')
+ for package in [
+ "robotframework-angularjs",
+ "robotframework-scplibrary",
+ "robotframework-extendedselenium2library",
+ ]:
+ rc, version = gc.shell_cmd(
+ "pip3 show " + package + " | grep Version | sed -re 's/.*: //g'"
+ )
+ versions[package] = (
+ "Not installed" if not version else version.rstrip("\n")
+ )
rc, version = gc.shell_cmd("lsb_release -d -s")
- versions["host OS"] = "Failed" if not version else version.rstrip('\n')
+ versions["host OS"] = "Failed" if not version else version.rstrip("\n")
return versions
diff --git a/lib/bmc_network_utils.py b/lib/bmc_network_utils.py
index 9a4a1dc..82b10fd 100644
--- a/lib/bmc_network_utils.py
+++ b/lib/bmc_network_utils.py
@@ -5,18 +5,19 @@
"""
-import gen_print as gp
+import collections
+import ipaddress
+import json
+import re
+import socket
+import subprocess
+
+import bmc_ssh_utils as bsu
import gen_cmd as gc
import gen_misc as gm
+import gen_print as gp
import var_funcs as vf
-import collections
-import re
-import ipaddress
-import subprocess
-import socket
from robot.libraries.BuiltIn import BuiltIn
-import json
-import bmc_ssh_utils as bsu
ip_regex = r"\d{1,3}.\d{1,3}.\d{1,3}.\d{1,3}"
@@ -28,7 +29,7 @@
"""
ip_list = list()
- stdout = subprocess.check_output(['hostname', '--all-fqdns'], shell=True)
+ stdout = subprocess.check_output(["hostname", "--all-fqdns"], shell=True)
host_fqdns = stdout.decode("utf-8").strip()
ip_address = socket.gethostbyname(str(host_fqdns))
ip_list.append(ip_address)
@@ -46,7 +47,7 @@
"""
# IP address netmask format: '0.0.0.0/255.255.252.0'
- return ipaddress.ip_network('0.0.0.0/' + netmask).prefixlen
+ return ipaddress.ip_network("0.0.0.0/" + netmask).prefixlen
def get_netmask_address(prefix_len):
@@ -58,7 +59,7 @@
"""
# IP address netmask format: '0.0.0.0/24'
- return ipaddress.ip_network('0.0.0.0/' + prefix_len).netmask
+ return ipaddress.ip_network("0.0.0.0/" + prefix_len).netmask
def parse_nping_output(output):
@@ -101,20 +102,26 @@
lines = output.split("\n")
# Obtain only the lines of interest.
- lines = list(filter(lambda x: re.match(r"(Max rtt|Raw packets|TCP connection)", x),
- lines))
+ lines = list(
+ filter(
+ lambda x: re.match(r"(Max rtt|Raw packets|TCP connection)", x),
+ lines,
+ )
+ )
key_value_list = []
for line in lines:
key_value_list += line.split("|")
nping_result = vf.key_value_list_to_dict(key_value_list)
# Extract percent_lost/percent_failed value from lost/failed field.
- if 'lost' in nping_result:
- nping_result['percent_lost'] = \
- float(nping_result['lost'].split(" ")[-1].strip("()%"))
+ if "lost" in nping_result:
+ nping_result["percent_lost"] = float(
+ nping_result["lost"].split(" ")[-1].strip("()%")
+ )
else:
- nping_result['percent_failed'] = \
- float(nping_result['failed'].split(" ")[-1].strip("()%"))
+ nping_result["percent_failed"] = float(
+ nping_result["failed"].split(" ")[-1].strip("()%")
+ )
return nping_result
@@ -158,7 +165,7 @@
command. Do a 'man nping' for details.
"""
- command_string = gc.create_command_string('nping', host, options)
+ command_string = gc.create_command_string("nping", host, options)
rc, output = gc.shell_cmd(command_string, print_output=0, ignore_err=0)
if parse_results:
return parse_nping_output(output)
@@ -205,7 +212,9 @@
(etc.)
"""
- stdout, stderr, rc = bsu.bmc_execute_command("cat /usr/share/ipmi-providers/channel_config.json")
+ stdout, stderr, rc = bsu.bmc_execute_command(
+ "cat /usr/share/ipmi-providers/channel_config.json"
+ )
return json.loads(stdout)
@@ -215,7 +224,11 @@
this function.
"""
- return vf.filter_struct(get_channel_config(), "[('medium_type', 'other-lan|lan-802.3')]", regex=1)
+ return vf.filter_struct(
+ get_channel_config(),
+ "[('medium_type', 'other-lan|lan-802.3')]",
+ regex=1,
+ )
def get_channel_access_config(file_name):
diff --git a/lib/bmc_redfish.py b/lib/bmc_redfish.py
index 2ae405a..0e97af7 100644
--- a/lib/bmc_redfish.py
+++ b/lib/bmc_redfish.py
@@ -4,17 +4,16 @@
See class prolog below for details.
"""
-import sys
-import re
import json
-from redfish_plus import redfish_plus
-from robot.libraries.BuiltIn import BuiltIn
+import re
+import sys
from json.decoder import JSONDecodeError
-from redfish.rest.v1 import InvalidCredentialsError
import func_args as fa
import gen_print as gp
-
+from redfish.rest.v1 import InvalidCredentialsError
+from redfish_plus import redfish_plus
+from robot.libraries.BuiltIn import BuiltIn
MTLS_ENABLED = BuiltIn().get_variable_value("${MTLS_ENABLED}")
@@ -44,7 +43,7 @@
"""
self.__inited__ = False
try:
- if MTLS_ENABLED == 'True':
+ if MTLS_ENABLED == "True":
self.__inited__ = True
else:
super(bmc_redfish, self).__init__(*args, **kwargs)
@@ -70,7 +69,7 @@
kwargs See parent class method prolog for details.
"""
- if MTLS_ENABLED == 'True':
+ if MTLS_ENABLED == "True":
return None
if not self.__inited__:
message = "bmc_redfish.__init__() was never successfully run. It "
@@ -82,11 +81,12 @@
openbmc_password = BuiltIn().get_variable_value("${OPENBMC_PASSWORD}")
username, args, kwargs = fa.pop_arg(openbmc_username, *args, **kwargs)
password, args, kwargs = fa.pop_arg(openbmc_password, *args, **kwargs)
- auth, args, kwargs = fa.pop_arg('session', *args, **kwargs)
+ auth, args, kwargs = fa.pop_arg("session", *args, **kwargs)
try:
- super(bmc_redfish, self).login(username, password, auth,
- *args, **kwargs)
+ super(bmc_redfish, self).login(
+ username, password, auth, *args, **kwargs
+ )
# Handle InvalidCredentialsError.
# (raise redfish.rest.v1.InvalidCredentialsError if not [200, 201, 202, 204])
except InvalidCredentialsError:
@@ -96,8 +96,9 @@
e_message = "Re-try login due to exception and "
e_message += "it is likely error response from server side."
BuiltIn().log_to_console(e_message)
- super(bmc_redfish, self).login(username, password, auth,
- *args, **kwargs)
+ super(bmc_redfish, self).login(
+ username, password, auth, *args, **kwargs
+ )
# Handle JSONDecodeError and others.
except JSONDecodeError:
except_type, except_value, except_traceback = sys.exc_info()
@@ -106,8 +107,9 @@
e_message = "Re-try login due to JSONDecodeError exception and "
e_message += "it is likely error response from server side."
BuiltIn().log_to_console(e_message)
- super(bmc_redfish, self).login(username, password, auth,
- *args, **kwargs)
+ super(bmc_redfish, self).login(
+ username, password, auth, *args, **kwargs
+ )
except ValueError:
except_type, except_value, except_traceback = sys.exc_info()
BuiltIn().log_to_console(str(except_type))
@@ -116,8 +118,7 @@
BuiltIn().log_to_console(e_message)
def logout(self):
-
- if MTLS_ENABLED == 'True':
+ if MTLS_ENABLED == "True":
return None
else:
super(bmc_redfish, self).logout()
@@ -150,7 +151,7 @@
"""
resp = self.get(*args, **kwargs)
- return resp.dict if hasattr(resp, 'dict') else {}
+ return resp.dict if hasattr(resp, "dict") else {}
def get_attribute(self, path, attribute, default=None, *args, **kwargs):
r"""
@@ -183,8 +184,9 @@
kwargs See parent class get() prolog for details.
"""
- return self.get_properties(path, *args, **kwargs).get(attribute,
- default)
+ return self.get_properties(path, *args, **kwargs).get(
+ attribute, default
+ )
def get_session_info(self):
r"""
@@ -194,7 +196,9 @@
return self.get_session_key(), self.get_session_location()
- def enumerate(self, resource_path, return_json=1, include_dead_resources=False):
+ def enumerate(
+ self, resource_path, return_json=1, include_dead_resources=False
+ ):
r"""
Perform a GET enumerate request and return available resource paths.
@@ -224,38 +228,61 @@
# Example: '/redfish/v1/JsonSchemas/' and sub resources.
# '/redfish/v1/SessionService'
# '/redfish/v1/Managers/bmc#/Oem'
- if ('JsonSchemas' in resource) or ('SessionService' in resource) or ('#' in resource):
+ if (
+ ("JsonSchemas" in resource)
+ or ("SessionService" in resource)
+ or ("#" in resource)
+ ):
continue
- self._rest_response_ = self.get(resource, valid_status_codes=[200, 404, 500])
+ self._rest_response_ = self.get(
+ resource, valid_status_codes=[200, 404, 500]
+ )
# Enumeration is done for available resources ignoring the ones for which response is not
# obtained.
if self._rest_response_.status != 200:
if include_dead_resources:
try:
- dead_resources[self._rest_response_.status].append(resource)
+ dead_resources[self._rest_response_.status].append(
+ resource
+ )
except KeyError:
- dead_resources[self._rest_response_.status] = [resource]
+ dead_resources[self._rest_response_.status] = [
+ resource
+ ]
continue
self.walk_nested_dict(self._rest_response_.dict, url=resource)
enumerated_resources.update(set(resources_to_be_enumerated))
- resources_to_be_enumerated = tuple(self.__pending_enumeration - enumerated_resources)
+ resources_to_be_enumerated = tuple(
+ self.__pending_enumeration - enumerated_resources
+ )
if return_json:
if include_dead_resources:
- return json.dumps(self.__result, sort_keys=True,
- indent=4, separators=(',', ': ')), dead_resources
+ return (
+ json.dumps(
+ self.__result,
+ sort_keys=True,
+ indent=4,
+ separators=(",", ": "),
+ ),
+ dead_resources,
+ )
else:
- return json.dumps(self.__result, sort_keys=True,
- indent=4, separators=(',', ': '))
+ return json.dumps(
+ self.__result,
+ sort_keys=True,
+ indent=4,
+ separators=(",", ": "),
+ )
else:
if include_dead_resources:
return self.__result, dead_resources
else:
return self.__result
- def walk_nested_dict(self, data, url=''):
+ def walk_nested_dict(self, data, url=""):
r"""
Parse through the nested dictionary and get the resource id paths.
@@ -263,21 +290,22 @@
data Nested dictionary data from response message.
url Resource for which the response is obtained in data.
"""
- url = url.rstrip('/')
+ url = url.rstrip("/")
for key, value in data.items():
-
# Recursion if nested dictionary found.
if isinstance(value, dict):
self.walk_nested_dict(value)
else:
# Value contains a list of dictionaries having member data.
- if 'Members' == key:
+ if "Members" == key:
if isinstance(value, list):
for memberDict in value:
- self.__pending_enumeration.add(memberDict['@odata.id'])
- if '@odata.id' == key:
- value = value.rstrip('/')
+ self.__pending_enumeration.add(
+ memberDict["@odata.id"]
+ )
+ if "@odata.id" == key:
+ value = value.rstrip("/")
# Data for the given url.
if value == url:
self.__result[url] = data
@@ -323,7 +351,9 @@
"""
member_list = []
- self._rest_response_ = self.get(resource_path, valid_status_codes=[200])
+ self._rest_response_ = self.get(
+ resource_path, valid_status_codes=[200]
+ )
try:
for member in self._rest_response_.dict["Members"]:
@@ -334,7 +364,7 @@
# Filter elements in the list and return matched elements.
if filter is not None:
- regex = '.*/' + filter + '[^/]*$'
+ regex = ".*/" + filter + "[^/]*$"
return [x for x in member_list if re.match(regex, x)]
return member_list
diff --git a/lib/bmc_redfish_utils.py b/lib/bmc_redfish_utils.py
index 8eade5f..e93eee3 100644
--- a/lib/bmc_redfish_utils.py
+++ b/lib/bmc_redfish_utils.py
@@ -6,15 +6,15 @@
import json
import re
-from robot.libraries.BuiltIn import BuiltIn
+
import gen_print as gp
+from robot.libraries.BuiltIn import BuiltIn
MTLS_ENABLED = BuiltIn().get_variable_value("${MTLS_ENABLED}")
class bmc_redfish_utils(object):
-
- ROBOT_LIBRARY_SCOPE = 'TEST SUITE'
+ ROBOT_LIBRARY_SCOPE = "TEST SUITE"
def __init__(self):
r"""
@@ -22,22 +22,25 @@
"""
# Obtain a reference to the global redfish object.
self.__inited__ = False
- self._redfish_ = BuiltIn().get_library_instance('redfish')
+ self._redfish_ = BuiltIn().get_library_instance("redfish")
- if MTLS_ENABLED == 'True':
+ if MTLS_ENABLED == "True":
self.__inited__ = True
else:
# There is a possibility that a given driver support both redfish and
# legacy REST.
self._redfish_.login()
- self._rest_response_ = \
- self._redfish_.get("/xyz/openbmc_project/", valid_status_codes=[200, 404])
+ self._rest_response_ = self._redfish_.get(
+ "/xyz/openbmc_project/", valid_status_codes=[200, 404]
+ )
# If REST URL /xyz/openbmc_project/ is supported.
if self._rest_response_.status == 200:
self.__inited__ = True
- BuiltIn().set_global_variable("${REDFISH_REST_SUPPORTED}", self.__inited__)
+ BuiltIn().set_global_variable(
+ "${REDFISH_REST_SUPPORTED}", self.__inited__
+ )
def get_redfish_session_info(self):
r"""
@@ -50,7 +53,7 @@
"""
session_dict = {
"key": self._redfish_.get_session_key(),
- "location": self._redfish_.get_session_location()
+ "location": self._redfish_.get_session_location(),
}
return session_dict
@@ -115,9 +118,11 @@
# Iterate and check if path object has the attribute.
for child_path_idx in child_path_list:
- if ('JsonSchemas' in child_path_idx)\
- or ('SessionService' in child_path_idx)\
- or ('#' in child_path_idx):
+ if (
+ ("JsonSchemas" in child_path_idx)
+ or ("SessionService" in child_path_idx)
+ or ("#" in child_path_idx)
+ ):
continue
if self.get_attribute(child_path_idx, attribute):
valid_path_list.append(child_path_idx)
@@ -180,7 +185,7 @@
# Return the matching target URL entry.
for target in target_list:
# target "/redfish/v1/Systems/system/Actions/ComputerSystem.Reset"
- attribute_in_uri = target.rsplit('/', 1)[-1]
+ attribute_in_uri = target.rsplit("/", 1)[-1]
# attribute_in_uri "ComputerSystem.Reset"
if target_attribute == attribute_in_uri:
return target
@@ -226,9 +231,9 @@
# Set quiet variable to keep subordinate get() calls quiet.
quiet = 1
self.__pending_enumeration = set()
- self._rest_response_ = \
- self._redfish_.get(resource_path,
- valid_status_codes=[200, 404, 500])
+ self._rest_response_ = self._redfish_.get(
+ resource_path, valid_status_codes=[200, 404, 500]
+ )
# Return empty list.
if self._rest_response_.status != 200:
@@ -237,17 +242,18 @@
if not self.__pending_enumeration:
return resource_path
for resource in self.__pending_enumeration.copy():
- self._rest_response_ = \
- self._redfish_.get(resource,
- valid_status_codes=[200, 404, 500])
+ self._rest_response_ = self._redfish_.get(
+ resource, valid_status_codes=[200, 404, 500]
+ )
if self._rest_response_.status != 200:
continue
self.walk_nested_dict(self._rest_response_.dict)
return list(sorted(self.__pending_enumeration))
- def enumerate_request(self, resource_path, return_json=1,
- include_dead_resources=False):
+ def enumerate_request(
+ self, resource_path, return_json=1, include_dead_resources=False
+ ):
r"""
Perform a GET enumerate request and return available resource paths.
@@ -292,71 +298,91 @@
# Example: '/redfish/v1/JsonSchemas/' and sub resources.
# '/redfish/v1/SessionService'
# '/redfish/v1/Managers/bmc#/Oem'
- if ('JsonSchemas' in resource) or ('SessionService' in resource)\
- or ('PostCodes' in resource) or ('Registries' in resource)\
- or ('Journal' in resource)\
- or ('#' in resource):
+ if (
+ ("JsonSchemas" in resource)
+ or ("SessionService" in resource)
+ or ("PostCodes" in resource)
+ or ("Registries" in resource)
+ or ("Journal" in resource)
+ or ("#" in resource)
+ ):
continue
- self._rest_response_ = \
- self._redfish_.get(resource, valid_status_codes=[200, 404, 405, 500])
+ self._rest_response_ = self._redfish_.get(
+ resource, valid_status_codes=[200, 404, 405, 500]
+ )
# Enumeration is done for available resources ignoring the
# ones for which response is not obtained.
if self._rest_response_.status != 200:
if include_dead_resources:
try:
dead_resources[self._rest_response_.status].append(
- resource)
+ resource
+ )
except KeyError:
- dead_resources[self._rest_response_.status] = \
- [resource]
+ dead_resources[self._rest_response_.status] = [
+ resource
+ ]
continue
self.walk_nested_dict(self._rest_response_.dict, url=resource)
enumerated_resources.update(set(resources_to_be_enumerated))
- resources_to_be_enumerated = \
- tuple(self.__pending_enumeration - enumerated_resources)
+ resources_to_be_enumerated = tuple(
+ self.__pending_enumeration - enumerated_resources
+ )
if return_json:
if include_dead_resources:
- return json.dumps(self.__result, sort_keys=True,
- indent=4, separators=(',', ': ')), dead_resources
+ return (
+ json.dumps(
+ self.__result,
+ sort_keys=True,
+ indent=4,
+ separators=(",", ": "),
+ ),
+ dead_resources,
+ )
else:
- return json.dumps(self.__result, sort_keys=True,
- indent=4, separators=(',', ': '))
+ return json.dumps(
+ self.__result,
+ sort_keys=True,
+ indent=4,
+ separators=(",", ": "),
+ )
else:
if include_dead_resources:
return self.__result, dead_resources
else:
return self.__result
- def walk_nested_dict(self, data, url=''):
+ def walk_nested_dict(self, data, url=""):
r"""
Parse through the nested dictionary and get the resource id paths.
Description of argument(s):
data Nested dictionary data from response message.
url Resource for which the response is obtained in data.
"""
- url = url.rstrip('/')
+ url = url.rstrip("/")
for key, value in data.items():
-
# Recursion if nested dictionary found.
if isinstance(value, dict):
self.walk_nested_dict(value)
else:
# Value contains a list of dictionaries having member data.
- if 'Members' == key:
+ if "Members" == key:
if isinstance(value, list):
for memberDict in value:
if isinstance(memberDict, str):
self.__pending_enumeration.add(memberDict)
else:
- self.__pending_enumeration.add(memberDict['@odata.id'])
+ self.__pending_enumeration.add(
+ memberDict["@odata.id"]
+ )
- if '@odata.id' == key:
- value = value.rstrip('/')
+ if "@odata.id" == key:
+ value = value.rstrip("/")
# Data for the given url.
if value == url:
self.__result[url] = data
diff --git a/lib/bmc_ssh_utils.py b/lib/bmc_ssh_utils.py
index fdd376f..b9ead59 100755
--- a/lib/bmc_ssh_utils.py
+++ b/lib/bmc_ssh_utils.py
@@ -5,19 +5,22 @@
"""
import os
-import gen_valid as gv
+
import gen_robot_ssh as grs
+import gen_valid as gv
from robot.libraries.BuiltIn import BuiltIn
-def bmc_execute_command(cmd_buf,
- print_out=0,
- print_err=0,
- ignore_err=0,
- fork=0,
- quiet=None,
- test_mode=None,
- time_out=None):
+def bmc_execute_command(
+ cmd_buf,
+ print_out=0,
+ print_err=0,
+ ignore_err=0,
+ fork=0,
+ quiet=None,
+ test_mode=None,
+ time_out=None,
+):
r"""
Run the given command in an BMC SSH session and return the stdout, stderr and the return code.
@@ -45,10 +48,12 @@
# Get global BMC variable values.
openbmc_host = BuiltIn().get_variable_value("${OPENBMC_HOST}", default="")
ssh_port = BuiltIn().get_variable_value("${SSH_PORT}", default="22")
- openbmc_username = BuiltIn().get_variable_value("${OPENBMC_USERNAME}",
- default="")
- openbmc_password = BuiltIn().get_variable_value("${OPENBMC_PASSWORD}",
- default="")
+ openbmc_username = BuiltIn().get_variable_value(
+ "${OPENBMC_USERNAME}", default=""
+ )
+ openbmc_password = BuiltIn().get_variable_value(
+ "${OPENBMC_PASSWORD}", default=""
+ )
if not gv.valid_value(openbmc_host):
return "", "", 1
@@ -59,30 +64,47 @@
if not gv.valid_value(ssh_port):
return "", "", 1
- open_connection_args = {'host': openbmc_host, 'alias': 'bmc_connection',
- 'timeout': '25.0', 'prompt': '# ', 'port': ssh_port}
- login_args = {'username': openbmc_username, 'password': openbmc_password}
+ open_connection_args = {
+ "host": openbmc_host,
+ "alias": "bmc_connection",
+ "timeout": "25.0",
+ "prompt": "# ",
+ "port": ssh_port,
+ }
+ login_args = {"username": openbmc_username, "password": openbmc_password}
- openbmc_user_type = os.environ.get('USER_TYPE', "") or \
- BuiltIn().get_variable_value("${USER_TYPE}", default="")
- if openbmc_user_type == 'sudo':
- cmd_buf = 'sudo -i ' + cmd_buf
- return grs.execute_ssh_command(cmd_buf, open_connection_args, login_args,
- print_out, print_err, ignore_err, fork,
- quiet, test_mode, time_out)
+ openbmc_user_type = os.environ.get(
+ "USER_TYPE", ""
+ ) or BuiltIn().get_variable_value("${USER_TYPE}", default="")
+ if openbmc_user_type == "sudo":
+ cmd_buf = "sudo -i " + cmd_buf
+ return grs.execute_ssh_command(
+ cmd_buf,
+ open_connection_args,
+ login_args,
+ print_out,
+ print_err,
+ ignore_err,
+ fork,
+ quiet,
+ test_mode,
+ time_out,
+ )
-def os_execute_command(cmd_buf,
- print_out=0,
- print_err=0,
- ignore_err=0,
- fork=0,
- quiet=None,
- test_mode=None,
- time_out=None,
- os_host="",
- os_username="",
- os_password=""):
+def os_execute_command(
+ cmd_buf,
+ print_out=0,
+ print_err=0,
+ ignore_err=0,
+ fork=0,
+ quiet=None,
+ test_mode=None,
+ time_out=None,
+ os_host="",
+ os_username="",
+ os_password="",
+):
r"""
Run the given command in an OS SSH session and return the stdout, stderr and the return code.
@@ -111,9 +133,13 @@
if os_host == "":
os_host = BuiltIn().get_variable_value("${OS_HOST}", default="")
if os_username == "":
- os_username = BuiltIn().get_variable_value("${OS_USERNAME}", default="")
+ os_username = BuiltIn().get_variable_value(
+ "${OS_USERNAME}", default=""
+ )
if os_password == "":
- os_password = BuiltIn().get_variable_value("${OS_PASSWORD}", default="")
+ os_password = BuiltIn().get_variable_value(
+ "${OS_PASSWORD}", default=""
+ )
if not gv.valid_value(os_host):
return "", "", 1
@@ -122,21 +148,32 @@
if not gv.valid_value(os_password):
return "", "", 1
- open_connection_args = {'host': os_host, 'alias': 'os_connection'}
- login_args = {'username': os_username, 'password': os_password}
+ open_connection_args = {"host": os_host, "alias": "os_connection"}
+ login_args = {"username": os_username, "password": os_password}
- return grs.execute_ssh_command(cmd_buf, open_connection_args, login_args,
- print_out, print_err, ignore_err, fork,
- quiet, test_mode, time_out)
+ return grs.execute_ssh_command(
+ cmd_buf,
+ open_connection_args,
+ login_args,
+ print_out,
+ print_err,
+ ignore_err,
+ fork,
+ quiet,
+ test_mode,
+ time_out,
+ )
-def xcat_execute_command(cmd_buf,
- print_out=0,
- print_err=0,
- ignore_err=0,
- fork=0,
- quiet=None,
- test_mode=None):
+def xcat_execute_command(
+ cmd_buf,
+ print_out=0,
+ print_err=0,
+ ignore_err=0,
+ fork=0,
+ quiet=None,
+ test_mode=None,
+):
r"""
Run the given command in an XCAT SSH session and return the stdout, stderr and the return code.
@@ -161,12 +198,13 @@
# Get global XCAT variable values.
xcat_host = BuiltIn().get_variable_value("${XCAT_HOST}", default="")
- xcat_username = BuiltIn().get_variable_value("${XCAT_USERNAME}",
- default="")
- xcat_password = BuiltIn().get_variable_value("${XCAT_PASSWORD}",
- default="")
- xcat_port = BuiltIn().get_variable_value("${XCAT_PORT}",
- default="22")
+ xcat_username = BuiltIn().get_variable_value(
+ "${XCAT_USERNAME}", default=""
+ )
+ xcat_password = BuiltIn().get_variable_value(
+ "${XCAT_PASSWORD}", default=""
+ )
+ xcat_port = BuiltIn().get_variable_value("${XCAT_PORT}", default="22")
if not gv.valid_value(xcat_host):
return "", "", 1
@@ -177,19 +215,27 @@
if not gv.valid_value(xcat_port):
return "", "", 1
- open_connection_args = {'host': xcat_host, 'alias': 'xcat_connection',
- 'port': xcat_port}
- login_args = {'username': xcat_username, 'password': xcat_password}
+ open_connection_args = {
+ "host": xcat_host,
+ "alias": "xcat_connection",
+ "port": xcat_port,
+ }
+ login_args = {"username": xcat_username, "password": xcat_password}
- return grs.execute_ssh_command(cmd_buf, open_connection_args, login_args,
- print_out, print_err, ignore_err, fork,
- quiet, test_mode)
+ return grs.execute_ssh_command(
+ cmd_buf,
+ open_connection_args,
+ login_args,
+ print_out,
+ print_err,
+ ignore_err,
+ fork,
+ quiet,
+ test_mode,
+ )
-def device_write(cmd_buf,
- print_out=0,
- quiet=None,
- test_mode=None):
+def device_write(cmd_buf, print_out=0, quiet=None, test_mode=None):
r"""
Write the given command in a device SSH session and return the stdout, stderr and the return code.
@@ -216,12 +262,13 @@
# Get global DEVICE variable values.
device_host = BuiltIn().get_variable_value("${DEVICE_HOST}", default="")
- device_username = BuiltIn().get_variable_value("${DEVICE_USERNAME}",
- default="")
- device_password = BuiltIn().get_variable_value("${DEVICE_PASSWORD}",
- default="")
- device_port = BuiltIn().get_variable_value("${DEVICE_PORT}",
- default="22")
+ device_username = BuiltIn().get_variable_value(
+ "${DEVICE_USERNAME}", default=""
+ )
+ device_password = BuiltIn().get_variable_value(
+ "${DEVICE_PASSWORD}", default=""
+ )
+ device_port = BuiltIn().get_variable_value("${DEVICE_PORT}", default="22")
if not gv.valid_value(device_host):
return "", "", 1
@@ -232,10 +279,21 @@
if not gv.valid_value(device_port):
return "", "", 1
- open_connection_args = {'host': device_host, 'alias': 'device_connection',
- 'port': device_port}
- login_args = {'username': device_username, 'password': device_password}
+ open_connection_args = {
+ "host": device_host,
+ "alias": "device_connection",
+ "port": device_port,
+ }
+ login_args = {"username": device_username, "password": device_password}
- return grs.execute_ssh_command(cmd_buf, open_connection_args, login_args,
- print_out, print_err=0, ignore_err=1,
- fork=0, quiet=quiet, test_mode=test_mode)
+ return grs.execute_ssh_command(
+ cmd_buf,
+ open_connection_args,
+ login_args,
+ print_out,
+ print_err=0,
+ ignore_err=1,
+ fork=0,
+ quiet=quiet,
+ test_mode=test_mode,
+ )
diff --git a/lib/boot_data.py b/lib/boot_data.py
index eaa7a52..b767e21 100755
--- a/lib/boot_data.py
+++ b/lib/boot_data.py
@@ -5,36 +5,40 @@
boot_results_table.
"""
+import glob
+import json
import os
import tempfile
-import json
-import glob
-from tally_sheet import *
from robot.libraries.BuiltIn import BuiltIn
+from tally_sheet import *
+
try:
from robot.utils import DotDict
except ImportError:
import collections
+import gen_cmd as gc
+import gen_misc as gm
import gen_print as gp
import gen_valid as gv
-import gen_misc as gm
-import gen_cmd as gc
import var_funcs as vf
# The code base directory will be one level up from the directory containing this module.
code_base_dir_path = os.path.dirname(os.path.dirname(__file__)) + os.sep
-redfish_support_trans_state = int(os.environ.get('REDFISH_SUPPORT_TRANS_STATE', 0)) or \
- int(BuiltIn().get_variable_value("${REDFISH_SUPPORT_TRANS_STATE}", default=0))
+redfish_support_trans_state = int(
+ os.environ.get("REDFISH_SUPPORT_TRANS_STATE", 0)
+) or int(
+ BuiltIn().get_variable_value("${REDFISH_SUPPORT_TRANS_STATE}", default=0)
+)
-platform_arch_type = os.environ.get('PLATFORM_ARCH_TYPE', '') or \
- BuiltIn().get_variable_value("${PLATFORM_ARCH_TYPE}", default="power")
+platform_arch_type = os.environ.get(
+ "PLATFORM_ARCH_TYPE", ""
+) or BuiltIn().get_variable_value("${PLATFORM_ARCH_TYPE}", default="power")
-def create_boot_table(file_path=None,
- os_host=""):
+def create_boot_table(file_path=None, os_host=""):
r"""
Read the boot table JSON file, convert it to an object and return it.
@@ -54,11 +58,17 @@
"""
if file_path is None:
if redfish_support_trans_state and platform_arch_type != "x86":
- file_path = os.environ.get('BOOT_TABLE_PATH', 'data/boot_table_redfish.json')
+ file_path = os.environ.get(
+ "BOOT_TABLE_PATH", "data/boot_table_redfish.json"
+ )
elif platform_arch_type == "x86":
- file_path = os.environ.get('BOOT_TABLE_PATH', 'data/boot_table_x86.json')
+ file_path = os.environ.get(
+ "BOOT_TABLE_PATH", "data/boot_table_x86.json"
+ )
else:
- file_path = os.environ.get('BOOT_TABLE_PATH', 'data/boot_table.json')
+ file_path = os.environ.get(
+ "BOOT_TABLE_PATH", "data/boot_table.json"
+ )
if not file_path.startswith("/"):
file_path = code_base_dir_path + file_path
@@ -77,7 +87,7 @@
# the boot entries.
if os_host == "":
for boot in boot_table:
- state_keys = ['start', 'end']
+ state_keys = ["start", "end"]
for state_key in state_keys:
for sub_state in list(boot_table[boot][state_key]):
if sub_state.startswith("os_"):
@@ -149,8 +159,7 @@
return boot_lists
-def valid_boot_list(boot_list,
- valid_boot_types):
+def valid_boot_list(boot_list, valid_boot_types):
r"""
Verify that each entry in boot_list is a supported boot test.
@@ -162,24 +171,21 @@
for boot_name in boot_list:
boot_name = boot_name.strip(" ")
- error_message = gv.valid_value(boot_name,
- valid_values=valid_boot_types,
- var_name="boot_name")
+ error_message = gv.valid_value(
+ boot_name, valid_values=valid_boot_types, var_name="boot_name"
+ )
if error_message != "":
BuiltIn().fail(gp.sprint_error(error_message))
class boot_results:
-
r"""
This class defines a boot_results table.
"""
- def __init__(self,
- boot_table,
- boot_pass=0,
- boot_fail=0,
- obj_name='boot_results'):
+ def __init__(
+ self, boot_table, boot_pass=0, boot_fail=0, obj_name="boot_results"
+ ):
r"""
Initialize the boot results object.
@@ -202,13 +208,13 @@
self.__initial_boot_fail = boot_fail
# Create boot_results_fields for use in creating boot_results table.
- boot_results_fields = DotDict([('total', 0), ('pass', 0), ('fail', 0)])
+ boot_results_fields = DotDict([("total", 0), ("pass", 0), ("fail", 0)])
# Create boot_results table.
- self.__boot_results = tally_sheet('boot type',
- boot_results_fields,
- 'boot_test_results')
- self.__boot_results.set_sum_fields(['total', 'pass', 'fail'])
- self.__boot_results.set_calc_fields(['total=pass+fail'])
+ self.__boot_results = tally_sheet(
+ "boot type", boot_results_fields, "boot_test_results"
+ )
+ self.__boot_results.set_sum_fields(["total", "pass", "fail"])
+ self.__boot_results.set_calc_fields(["total=pass+fail"])
# Create one row in the result table for each kind of boot test in the boot_table (i.e. for all
# supported boot tests).
for boot_name in list(boot_table.keys()):
@@ -230,12 +236,12 @@
"""
totals_line = self.__boot_results.calc()
- return totals_line['pass'] + self.__initial_boot_pass,\
- totals_line['fail'] + self.__initial_boot_fail
+ return (
+ totals_line["pass"] + self.__initial_boot_pass,
+ totals_line["fail"] + self.__initial_boot_fail,
+ )
- def update(self,
- boot_type,
- boot_status):
+ def update(self, boot_type, boot_status):
r"""
Update our boot_results_table. This includes:
- Updating the record for the given boot_type by incrementing the pass or fail field.
@@ -250,8 +256,7 @@
self.__boot_results.inc_row_field(boot_type, boot_status.lower())
self.__boot_results.calc()
- def sprint_report(self,
- header_footer="\n"):
+ def sprint_report(self, header_footer="\n"):
r"""
String-print the formatted boot_resuls_table and return them.
@@ -268,9 +273,7 @@
return buffer
- def print_report(self,
- header_footer="\n",
- quiet=None):
+ def print_report(self, header_footer="\n", quiet=None):
r"""
Print the formatted boot_resuls_table to the console.
@@ -280,7 +283,7 @@
stack to get the default value.
"""
- quiet = int(gm.dft(quiet, gp.get_stack_var('quiet', 0)))
+ quiet = int(gm.dft(quiet, gp.get_stack_var("quiet", 0)))
gp.qprint(self.sprint_report(header_footer))
@@ -307,9 +310,7 @@
gp.gp_print(self.sprint_obj())
-def create_boot_results_file_path(pgm_name,
- openbmc_nickname,
- master_pid):
+def create_boot_results_file_path(pgm_name, openbmc_nickname, master_pid):
r"""
Create a file path to be used to store a boot_results object.
@@ -318,7 +319,7 @@
openbmc_nickname The name of the system. This could be a nickname, a hostname, an IP,
etc. This will form part of the resulting file name.
master_pid The master process id which will form part of the file name.
- """
+ """
USER = os.environ.get("USER", "")
dir_path = "/tmp/" + USER + "/"
@@ -326,8 +327,9 @@
os.makedirs(dir_path)
file_name_dict = vf.create_var_dict(pgm_name, openbmc_nickname, master_pid)
- return vf.create_file_path(file_name_dict, dir_path=dir_path,
- file_suffix=":boot_results")
+ return vf.create_file_path(
+ file_name_dict, dir_path=dir_path, file_suffix=":boot_results"
+ )
def cleanup_boot_results_file():
@@ -341,7 +343,7 @@
for file_path in file_list:
# Use parse_file_path to extract info from the file path.
file_dict = vf.parse_file_path(file_path)
- if gm.pid_active(file_dict['master_pid']):
+ if gm.pid_active(file_dict["master_pid"]):
gp.qprint_timen("Preserving " + file_path + ".")
else:
gc.cmd_fnc("rm -f " + file_path)
@@ -363,7 +365,7 @@
boot_history.append(boot_start_message)
# Trim list to max number of entries.
- del boot_history[:max(0, len(boot_history) - max_boot_history)]
+ del boot_history[: max(0, len(boot_history) - max_boot_history)]
def print_boot_history(boot_history, quiet=None):
@@ -375,7 +377,7 @@
stack to get the default value.
"""
- quiet = int(gm.dft(quiet, gp.get_stack_var('quiet', 0)))
+ quiet = int(gm.dft(quiet, gp.get_stack_var("quiet", 0)))
# indent 0, 90 chars wide, linefeed, char is "="
gp.qprint_dashes(0, 90)
diff --git a/lib/code_update_utils.py b/lib/code_update_utils.py
index 212dc3d..4c244ca 100644
--- a/lib/code_update_utils.py
+++ b/lib/code_update_utils.py
@@ -4,24 +4,23 @@
This module provides utilities for code updates.
"""
-from robot.libraries.BuiltIn import BuiltIn
-
+import collections
import os
import re
import sys
import tarfile
import time
-import collections
+
from robot.libraries.BuiltIn import BuiltIn
robot_pgm_dir_path = os.path.dirname(__file__) + os.sep
-repo_data_path = re.sub('/lib', '/data', robot_pgm_dir_path)
+repo_data_path = re.sub("/lib", "/data", robot_pgm_dir_path)
sys.path.append(repo_data_path)
-import bmc_ssh_utils as bsu # NOQA
-import gen_robot_keyword as keyword # NOQA
-import gen_print as gp # NOQA
-import variables as var # NOQA
+import bmc_ssh_utils as bsu # NOQA
+import gen_print as gp # NOQA
+import gen_robot_keyword as keyword # NOQA
+import variables as var # NOQA
def get_bmc_firmware(image_type, sw_dict):
@@ -35,7 +34,7 @@
temp_dict = collections.OrderedDict()
for key, value in sw_dict.items():
- if value['image_type'] == image_type:
+ if value["image_type"] == image_type:
temp_dict[key] = value
else:
pass
@@ -52,8 +51,9 @@
"""
taken_priorities = {}
- _, image_names = keyword.run_key("Get Software Objects "
- + "version_type=" + image_purpose)
+ _, image_names = keyword.run_key(
+ "Get Software Objects " + "version_type=" + image_purpose
+ )
for image_name in image_names:
_, image = keyword.run_key("Get Host Software Property " + image_name)
@@ -61,9 +61,10 @@
continue
image_priority = image["Priority"]
if image_priority in taken_priorities:
- BuiltIn().fail("Found active images with the same priority.\n"
- + gp.sprint_vars(image,
- taken_priorities[image_priority]))
+ BuiltIn().fail(
+ "Found active images with the same priority.\n"
+ + gp.sprint_vars(image, taken_priorities[image_priority])
+ )
taken_priorities[image_priority] = image
@@ -75,17 +76,22 @@
# Get the version of the image currently running on the BMC.
_, cur_img_version = keyword.run_key("Get BMC Version")
# Remove the surrounding double quotes from the version.
- cur_img_version = cur_img_version.replace('"', '')
+ cur_img_version = cur_img_version.replace('"', "")
- _, images = keyword.run_key("Read Properties "
- + var.SOFTWARE_VERSION_URI + "enumerate")
+ _, images = keyword.run_key(
+ "Read Properties " + var.SOFTWARE_VERSION_URI + "enumerate"
+ )
for image_name in images:
_, image_properties = keyword.run_key(
- "Get Host Software Property " + image_name)
- if 'Purpose' in image_properties and 'Version' in image_properties \
- and image_properties['Purpose'] != var.VERSION_PURPOSE_HOST \
- and image_properties['Version'] != cur_img_version:
+ "Get Host Software Property " + image_name
+ )
+ if (
+ "Purpose" in image_properties
+ and "Version" in image_properties
+ and image_properties["Purpose"] != var.VERSION_PURPOSE_HOST
+ and image_properties["Version"] != cur_img_version
+ ):
return image_name
BuiltIn().fail("Did not find any non-running BMC images.")
@@ -97,11 +103,16 @@
keyword.run_key("Initiate Host PowerOff")
- status, images = keyword.run_key("Get Software Objects "
- + var.VERSION_PURPOSE_HOST)
+ status, images = keyword.run_key(
+ "Get Software Objects " + var.VERSION_PURPOSE_HOST
+ )
for image_name in images:
- keyword.run_key("Delete Image And Verify " + image_name + " "
- + var.VERSION_PURPOSE_HOST)
+ keyword.run_key(
+ "Delete Image And Verify "
+ + image_name
+ + " "
+ + var.VERSION_PURPOSE_HOST
+ )
def wait_for_activation_state_change(version_id, initial_state):
@@ -119,22 +130,23 @@
retry = 0
num_read_errors = 0
read_fail_threshold = 1
- while (retry < 60):
- status, software_state = keyword.run_key("Read Properties "
- + var.SOFTWARE_VERSION_URI
- + str(version_id),
- ignore=1)
- if status == 'FAIL':
+ while retry < 60:
+ status, software_state = keyword.run_key(
+ "Read Properties " + var.SOFTWARE_VERSION_URI + str(version_id),
+ ignore=1,
+ )
+ if status == "FAIL":
num_read_errors += 1
if num_read_errors > read_fail_threshold:
- message = "Read errors exceeds threshold:\n " \
- + gp.sprint_vars(num_read_errors, read_fail_threshold)
+ message = "Read errors exceeds threshold:\n " + gp.sprint_vars(
+ num_read_errors, read_fail_threshold
+ )
BuiltIn().fail(message)
time.sleep(10)
continue
current_state = (software_state)["Activation"]
- if (initial_state == current_state):
+ if initial_state == current_state:
time.sleep(10)
retry += 1
num_read_errors = 0
@@ -153,10 +165,12 @@
returned to the calling function.
"""
- stdout, stderr, rc = \
- bsu.bmc_execute_command("cd " + dir_path
- + "; stat -c '%Y %n' * |"
- + " sort -k1,1nr | head -n 1")
+ stdout, stderr, rc = bsu.bmc_execute_command(
+ "cd "
+ + dir_path
+ + "; stat -c '%Y %n' * |"
+ + " sort -k1,1nr | head -n 1"
+ )
return stdout.split(" ")[-1]
@@ -197,9 +211,9 @@
version.
"""
- stdout, stderr, rc = \
- bsu.bmc_execute_command("cat " + file_path
- + " | grep \"version=\"", ignore_err=1)
+ stdout, stderr, rc = bsu.bmc_execute_command(
+ "cat " + file_path + ' | grep "version="', ignore_err=1
+ )
return (stdout.split("\n")[0]).split("=")[-1]
@@ -212,9 +226,9 @@
purpose.
"""
- stdout, stderr, rc = \
- bsu.bmc_execute_command("cat " + file_path
- + " | grep \"purpose=\"", ignore_err=1)
+ stdout, stderr, rc = bsu.bmc_execute_command(
+ "cat " + file_path + ' | grep "purpose="', ignore_err=1
+ )
return stdout.split("=")[-1]
@@ -230,22 +244,22 @@
one of the images in the upload dir.
"""
- stdout, stderr, rc = \
- bsu.bmc_execute_command("ls -d " + var.IMAGE_UPLOAD_DIR_PATH + "*/")
+ stdout, stderr, rc = bsu.bmc_execute_command(
+ "ls -d " + var.IMAGE_UPLOAD_DIR_PATH + "*/"
+ )
image_list = stdout.split("\n")
retry = 0
- while (retry < 10):
+ while retry < 10:
for i in range(0, len(image_list)):
version = get_image_version(image_list[i] + "MANIFEST")
- if (version == image_version):
+ if version == image_version:
return image_list[i]
time.sleep(10)
retry += 1
-def verify_image_upload(image_version,
- timeout=3):
+def verify_image_upload(image_version, timeout=3):
r"""
Verify the image was uploaded correctly and that it created
a valid d-bus object. If the first check for the image
@@ -263,16 +277,22 @@
keyword.run_key_u("Open Connection And Log In")
image_purpose = get_image_purpose(image_path + "MANIFEST")
- if (image_purpose == var.VERSION_PURPOSE_BMC
- or image_purpose == var.VERSION_PURPOSE_HOST):
+ if (
+ image_purpose == var.VERSION_PURPOSE_BMC
+ or image_purpose == var.VERSION_PURPOSE_HOST
+ ):
uri = var.SOFTWARE_VERSION_URI + image_version_id
ret_values = ""
for itr in range(timeout * 2):
- status, ret_values = \
- keyword.run_key("Read Attribute " + uri + " Activation")
+ status, ret_values = keyword.run_key(
+ "Read Attribute " + uri + " Activation"
+ )
- if ((ret_values == var.READY) or (ret_values == var.INVALID)
- or (ret_values == var.ACTIVE)):
+ if (
+ (ret_values == var.READY)
+ or (ret_values == var.INVALID)
+ or (ret_values == var.ACTIVE)
+ ):
return True, image_version_id
else:
time.sleep(30)
@@ -300,13 +320,16 @@
"""
for i in range(timeout * 2):
- stdout, stderr, rc = \
- bsu.bmc_execute_command('ls ' + var.IMAGE_UPLOAD_DIR_PATH
- + '*/MANIFEST 2>/dev/null '
- + '| xargs grep -rl "version='
- + image_version + '"')
- image_dir = os.path.dirname(stdout.split('\n')[0])
- if '' != image_dir:
- bsu.bmc_execute_command('rm -rf ' + image_dir)
- BuiltIn().fail('Found invalid BMC Image: ' + image_dir)
+ stdout, stderr, rc = bsu.bmc_execute_command(
+ "ls "
+ + var.IMAGE_UPLOAD_DIR_PATH
+ + "*/MANIFEST 2>/dev/null "
+ + '| xargs grep -rl "version='
+ + image_version
+ + '"'
+ )
+ image_dir = os.path.dirname(stdout.split("\n")[0])
+ if "" != image_dir:
+ bsu.bmc_execute_command("rm -rf " + image_dir)
+ BuiltIn().fail("Found invalid BMC Image: " + image_dir)
time.sleep(30)
diff --git a/lib/disable_warning_urllib.py b/lib/disable_warning_urllib.py
index 4c08a24..8527b6d 100644
--- a/lib/disable_warning_urllib.py
+++ b/lib/disable_warning_urllib.py
@@ -1,6 +1,7 @@
#!/usr/bin/env python3
import logging
import warnings
+
try:
import httplib
except ImportError:
@@ -14,6 +15,6 @@
requests_log.propagate = False
-class disable_warning_urllib():
+class disable_warning_urllib:
def do_nothing():
return
diff --git a/lib/dump_utils.py b/lib/dump_utils.py
index f3691ed..ee70850 100755
--- a/lib/dump_utils.py
+++ b/lib/dump_utils.py
@@ -4,20 +4,23 @@
This file contains functions which are useful for processing BMC dumps.
"""
-from robot.libraries.BuiltIn import BuiltIn
-import gen_print as gp
-import gen_misc as gm
-import gen_robot_keyword as grk
-import bmc_ssh_utils as bsu
-import var_funcs as vf
+import imp
import os
import sys
-import os
-import imp
-base_path = os.path.dirname(os.path.dirname(
- imp.find_module("gen_robot_print")[1])) + os.sep
+
+import bmc_ssh_utils as bsu
+import gen_misc as gm
+import gen_print as gp
+import gen_robot_keyword as grk
+import var_funcs as vf
+from robot.libraries.BuiltIn import BuiltIn
+
+base_path = (
+ os.path.dirname(os.path.dirname(imp.find_module("gen_robot_print")[1]))
+ + os.sep
+)
sys.path.append(base_path + "data/")
-import variables as var # NOQA
+import variables as var # NOQA
def get_dump_dict(quiet=None):
@@ -55,9 +58,7 @@
return output.split("\n")
-def valid_dump(dump_id,
- dump_dict=None,
- quiet=None):
+def valid_dump(dump_id, dump_dict=None, quiet=None):
r"""
Verify that dump_id is a valid. If it is not valid, issue robot failure
message.
@@ -79,25 +80,26 @@
dump_dict = get_dump_dict(quiet=quiet)
if dump_id not in dump_dict:
- message = "The specified dump ID was not found among the existing" \
+ message = (
+ "The specified dump ID was not found among the existing"
+ " dumps:\n"
+ )
message += gp.sprint_var(dump_id)
message += gp.sprint_var(dump_dict)
BuiltIn().fail(gp.sprint_error(message))
if not dump_dict[dump_id].endswith("tar.xz"):
- message = "There is no \"tar.xz\" file associated with the given" \
+ message = (
+ 'There is no "tar.xz" file associated with the given'
+ " dump_id:\n"
+ )
message += gp.sprint_var(dump_id)
dump_file_path = dump_dict[dump_id]
message += gp.sprint_var(dump_file_path)
BuiltIn().fail(gp.sprint_error(message))
-def scp_dumps(targ_dir_path,
- targ_file_prefix="",
- dump_dict=None,
- quiet=None):
+def scp_dumps(targ_dir_path, targ_file_prefix="", dump_dict=None, quiet=None):
r"""
SCP all dumps from the BMC to the indicated directory on the local system
and return a list of the new files.
@@ -124,10 +126,12 @@
dump_file_list = []
for file_path in dump_list:
- targ_file_path = targ_dir_path + targ_file_prefix \
- + os.path.basename(file_path)
- status, ret_values = grk.run_key("scp.Get File " + file_path
- + " " + targ_file_path, quiet=quiet)
+ targ_file_path = (
+ targ_dir_path + targ_file_prefix + os.path.basename(file_path)
+ )
+ status, ret_values = grk.run_key(
+ "scp.Get File " + file_path + " " + targ_file_path, quiet=quiet
+ )
dump_file_list.append(targ_file_path)
return dump_file_list
diff --git a/lib/event_notification.py b/lib/event_notification.py
index 363caf4..53c010e 100755
--- a/lib/event_notification.py
+++ b/lib/event_notification.py
@@ -1,14 +1,15 @@
#!/usr/bin/env python3
-import requests
-import websocket
import json
import ssl
-import gen_valid as gv
+
import gen_print as gp
+import gen_valid as gv
+import requests
+import websocket
-class event_notification():
+class event_notification:
r"""
Main class to subscribe and receive event notifications.
"""
@@ -36,20 +37,22 @@
r"""
Login and return session object.
"""
- http_header = {'Content-Type': 'application/json'}
+ http_header = {"Content-Type": "application/json"}
session = requests.session()
- response = session.post('https://' + self.__host + '/login',
- headers=http_header,
- json={"data": [self.__user, self.__password]},
- verify=False, timeout=30)
+ response = session.post(
+ "https://" + self.__host + "/login",
+ headers=http_header,
+ json={"data": [self.__user, self.__password]},
+ verify=False,
+ timeout=30,
+ )
gv.valid_value(response.status_code, valid_values=[200])
login_response = json.loads(response.text)
gp.qprint_var(login_response)
- gv.valid_value(login_response['status'], valid_values=['ok'])
+ gv.valid_value(login_response["status"], valid_values=["ok"])
return session
def subscribe(self, dbus_path, enable_trace=False):
-
r"""
Subscribe to the given path and return a list of event notifications.
@@ -79,15 +82,21 @@
cookies = session.cookies.get_dict()
# Convert from dictionary to a string of the following format:
# key=value;key=value...
- cookies = gp.sprint_var(cookies, fmt=gp.no_header() | gp.strip_brackets(),
- col1_width=0, trailing_char="",
- delim="=").replace("\n", ";")
+ cookies = gp.sprint_var(
+ cookies,
+ fmt=gp.no_header() | gp.strip_brackets(),
+ col1_width=0,
+ trailing_char="",
+ delim="=",
+ ).replace("\n", ";")
websocket.enableTrace(enable_trace)
- self.__websocket = websocket.create_connection("wss://{host}/subscribe".format(host=self.__host),
- sslopt={"cert_reqs": ssl.CERT_NONE},
- cookie=cookies)
- dbus_path = [path.strip() for path in dbus_path.split(',')]
+ self.__websocket = websocket.create_connection(
+ "wss://{host}/subscribe".format(host=self.__host),
+ sslopt={"cert_reqs": ssl.CERT_NONE},
+ cookie=cookies,
+ )
+ dbus_path = [path.strip() for path in dbus_path.split(",")]
dbus_path = {"paths": dbus_path}
self.__websocket.send(json.dumps(dbus_path))
diff --git a/lib/external_intf/management_console_utils.py b/lib/external_intf/management_console_utils.py
index d754bea..5f02732 100644
--- a/lib/external_intf/management_console_utils.py
+++ b/lib/external_intf/management_console_utils.py
@@ -1,14 +1,15 @@
#!/usr/bin/env python3
+import json
import os
import re
-import json
-from data import variables
from collections import OrderedDict
-bmc_rec_pattern = '^=(.*)\n(.*)\n(.*)\n(.*)\n(.*)'
-bmc_prop_pattern = [r"\w+", r"\d{1,3}.\d{1,3}.\d{1,3}.\d{1,3}", '443']
-bmc_rec_prop = ['hostname', 'address', 'port', 'txt']
+from data import variables
+
+bmc_rec_pattern = "^=(.*)\n(.*)\n(.*)\n(.*)\n(.*)"
+bmc_prop_pattern = [r"\w+", r"\d{1,3}.\d{1,3}.\d{1,3}.\d{1,3}", "443"]
+bmc_rec_prop = ["hostname", "address", "port", "txt"]
class Exception(Exception):
@@ -30,7 +31,7 @@
try:
for bmc_key, bmc_val in bmc_records.items():
- temp_ip = bmc_val.get('address', None)
+ temp_ip = bmc_val.get("address", None)
if bmc_ip.strip() == temp_ip.strip():
return True
else:
@@ -39,8 +40,9 @@
return exc_obj
-def validate_bmc_properties(bmc_prop_pattern, bmc_prop, bmc_value,
- bmc_rec_valid):
+def validate_bmc_properties(
+ bmc_prop_pattern, bmc_prop, bmc_value, bmc_rec_valid
+):
r"""
This function is to check pattern match in bmc properties.
@@ -52,9 +54,10 @@
"""
try:
- status = \
- [lambda bmc_prop: re.search(bmc_prop_pattern, bmc_prob),
- bmc_value]
+ status = [
+ lambda bmc_prop: re.search(bmc_prop_pattern, bmc_prob),
+ bmc_value,
+ ]
if None in status:
bmc_rec_valid[bmc_prop] = None
except Exception as exc_obj:
@@ -72,14 +75,17 @@
"""
try:
- for bmc_prop_key, bmc_pattern_val in \
- zip(bmc_rec_prop, bmc_prop_pattern):
+ for bmc_prop_key, bmc_pattern_val in zip(
+ bmc_rec_prop, bmc_prop_pattern
+ ):
bmc_prop_value = bmc_rec_valid.get(bmc_prop_key, False)
if bmc_rec_valid[bmc_prop_key] is not False:
- valid_status = validate_bmc_properties(bmc_pattern_val,
- bmc_prop_key,
- bmc_prop_value,
- bmc_rec_valid)
+ valid_status = validate_bmc_properties(
+ bmc_pattern_val,
+ bmc_prop_key,
+ bmc_prop_value,
+ bmc_rec_valid,
+ )
if None not in bmc_rec_valid.values():
return bmc_rec_valid
else:
@@ -112,15 +118,16 @@
exc_obj = None
bmc_inv = OrderedDict()
service_count = 0
- for line in bmc_inv_record.split('\n'):
+ for line in bmc_inv_record.split("\n"):
if line == "":
pass
elif service_type in line:
- bmc_inv['service'] = service_type
+ bmc_inv["service"] = service_type
service_count += 1
- elif not line.startswith('=') and service_count == 1:
- bmc_inv[line.split('=')[0].strip()] = \
- str(line.split('=')[-1].strip())[1:-1]
+ elif not line.startswith("=") and service_count == 1:
+ bmc_inv[line.split("=")[0].strip()] = str(
+ line.split("=")[-1].strip()
+ )[1:-1]
except Exception as exc_obj:
return exc_obj
finally:
@@ -161,10 +168,8 @@
count = 0
exe_obj = None
bmc_inv_list = OrderedDict()
- for match in re.finditer(bmc_rec_pattern, bmc_records,
- re.MULTILINE):
- bmc_record, exc_msg = \
- bmc_inventory(service_type, match.group())
+ for match in re.finditer(bmc_rec_pattern, bmc_records, re.MULTILINE):
+ bmc_record, exc_msg = bmc_inventory(service_type, match.group())
if bmc_record is not None and exc_msg is None:
count += 1
bmc_inv_list[count] = bmc_record
@@ -172,6 +177,6 @@
return exe_obj
finally:
if len(bmc_inv_list) == 0:
- '', exe_obj
+ "", exe_obj
else:
return bmc_inv_list, exe_obj
diff --git a/lib/ffdc_cli_robot_script.py b/lib/ffdc_cli_robot_script.py
index 30fe428..9ff0e99 100644
--- a/lib/ffdc_cli_robot_script.py
+++ b/lib/ffdc_cli_robot_script.py
@@ -3,12 +3,11 @@
import os
import sys
-
from robot.libraries.BuiltIn import BuiltIn as robotBuildIn
sys.path.append(__file__.split(__file__.split("/")[-1])[0] + "../ffdc")
from ffdc_collector import ffdc_collector # NOQA
-from ssh_utility import SSHRemoteclient # NOQA
+from ssh_utility import SSHRemoteclient # NOQA
# (Sub) String constants used for input dictionary key search
HOST = "HOST"
@@ -75,12 +74,15 @@
# When method is invoked with no parm,
# use robot variables
# OPENBMC_HOST, OPENBMC_USERNAME, OPENBMC_PASSWORD, OPENBMC (type)
- dict_of_parms["OPENBMC_HOST"] = \
- robotBuildIn().get_variable_value("${OPENBMC_HOST}", default=None)
- dict_of_parms["OPENBMC_USERNAME"] = \
- robotBuildIn().get_variable_value("${OPENBMC_USERNAME}", default=None)
- dict_of_parms["OPENBMC_PASSWORD"] = \
- robotBuildIn().get_variable_value("${OPENBMC_PASSWORD}", default=None)
+ dict_of_parms["OPENBMC_HOST"] = robotBuildIn().get_variable_value(
+ "${OPENBMC_HOST}", default=None
+ )
+ dict_of_parms["OPENBMC_USERNAME"] = robotBuildIn().get_variable_value(
+ "${OPENBMC_USERNAME}", default=None
+ )
+ dict_of_parms["OPENBMC_PASSWORD"] = robotBuildIn().get_variable_value(
+ "${OPENBMC_PASSWORD}", default=None
+ )
dict_of_parms["REMOTE_TYPE"] = "OPENBMC"
run_ffdc_collector(dict_of_parms)
@@ -153,7 +155,10 @@
# that are not specified with input and have acceptable defaults.
if not location:
# Default FFDC store location
- location = robotBuildIn().get_variable_value("${EXECDIR}", default=None) + "/logs"
+ location = (
+ robotBuildIn().get_variable_value("${EXECDIR}", default=None)
+ + "/logs"
+ )
ffdc_collector.validate_local_store(location)
if not config:
@@ -174,76 +179,83 @@
log_level = "INFO"
# If minimum required inputs are met, go collect.
- if (remote and username and password and remote_type):
+ if remote and username and password and remote_type:
# Execute data collection
- this_ffdc = ffdc_collector(remote,
- username,
- password,
- config,
- location,
- remote_type,
- protocol,
- env_vars,
- econfig,
- log_level)
+ this_ffdc = ffdc_collector(
+ remote,
+ username,
+ password,
+ config,
+ location,
+ remote_type,
+ protocol,
+ env_vars,
+ econfig,
+ log_level,
+ )
this_ffdc.collect_ffdc()
# If original ffdc request is for BMC,
# attempt to also collect ffdc for HOST_OS if possible.
- if remote_type.upper() == 'OPENBMC':
- os_host = \
- robotBuildIn().get_variable_value("${OS_HOST}", default=None)
- os_username = \
- robotBuildIn().get_variable_value("${OS_USERNAME}", default=None)
- os_password = \
- robotBuildIn().get_variable_value("${OS_PASSWORD}", default=None)
+ if remote_type.upper() == "OPENBMC":
+ os_host = robotBuildIn().get_variable_value(
+ "${OS_HOST}", default=None
+ )
+ os_username = robotBuildIn().get_variable_value(
+ "${OS_USERNAME}", default=None
+ )
+ os_password = robotBuildIn().get_variable_value(
+ "${OS_PASSWORD}", default=None
+ )
if os_host and os_username and os_password:
os_type = get_os_type(os_host, os_username, os_password)
if os_type:
- os_ffdc = ffdc_collector(os_host,
- os_username,
- os_password,
- config,
- location,
- os_type,
- protocol,
- env_vars,
- econfig,
- log_level)
+ os_ffdc = ffdc_collector(
+ os_host,
+ os_username,
+ os_password,
+ config,
+ location,
+ os_type,
+ protocol,
+ env_vars,
+ econfig,
+ log_level,
+ )
os_ffdc.collect_ffdc()
def get_os_type(os_host, os_username, os_password):
-
os_type = None
# If HOST_OS is pingable
if os.system("ping -c 1 " + os_host) == 0:
r"""
- Open a ssh connection to targeted system.
+ Open a ssh connection to targeted system.
"""
- ssh_remoteclient = SSHRemoteclient(os_host,
- os_username,
- os_password)
+ ssh_remoteclient = SSHRemoteclient(os_host, os_username, os_password)
if ssh_remoteclient.ssh_remoteclient_login():
-
# Find OS_TYPE
- cmd_exit_code, err, response = \
- ssh_remoteclient.execute_command('uname')
+ cmd_exit_code, err, response = ssh_remoteclient.execute_command(
+ "uname"
+ )
os_type = response.strip()
# If HOST_OS is linux, expands os_type to one of
# the 2 linux distros that have more details in ffdc_config.yaml
- if os_type.upper() == 'LINUX':
- cmd_exit_code, err, response = \
- ssh_remoteclient.execute_command('cat /etc/os-release')
+ if os_type.upper() == "LINUX":
+ (
+ cmd_exit_code,
+ err,
+ response,
+ ) = ssh_remoteclient.execute_command("cat /etc/os-release")
linux_distro = response
- if 'redhat' in linux_distro:
- os_type = 'RHEL'
- elif 'ubuntu' in linux_distro:
- os_type = 'UBUNTU'
+ if "redhat" in linux_distro:
+ os_type = "RHEL"
+ elif "ubuntu" in linux_distro:
+ os_type = "UBUNTU"
if ssh_remoteclient:
ssh_remoteclient.ssh_remoteclient_disconnect()
diff --git a/lib/firmware_utils.py b/lib/firmware_utils.py
index 959693d..61d1e0b 100755
--- a/lib/firmware_utils.py
+++ b/lib/firmware_utils.py
@@ -61,15 +61,17 @@
"""
- cmd_buf = "hdparm -I " + device + " | egrep \":.+\" | sed -re" +\
- " \"s/[ \t]+/ /g\""
+ cmd_buf = (
+ "hdparm -I " + device + ' | egrep ":.+" | sed -re' + ' "s/[ \t]+/ /g"'
+ )
stdout, stderr, rc = bsu.os_execute_command(cmd_buf)
firmware_dict = vf.key_value_outbuf_to_dict(stdout)
cmd_buf = "lsblk -P " + device + " | sed -re 's/\" /\"\\n/g'"
stdout, stderr, rc = bsu.os_execute_command(cmd_buf)
- firmware_dict.update(vf.key_value_outbuf_to_dict(stdout, delim='=',
- strip=" \""))
+ firmware_dict.update(
+ vf.key_value_outbuf_to_dict(stdout, delim="=", strip=' "')
+ )
return firmware_dict
diff --git a/lib/func_args.py b/lib/func_args.py
index 306af38..d779b48 100644
--- a/lib/func_args.py
+++ b/lib/func_args.py
@@ -4,9 +4,10 @@
This module provides argument manipulation functions like pop_arg.
"""
-import gen_print as gp
import collections
+import gen_print as gp
+
def pop_arg(pop_arg_default=None, *args, **kwargs):
r"""
diff --git a/lib/func_timer.py b/lib/func_timer.py
index 852bbf2..fc339c2 100644
--- a/lib/func_timer.py
+++ b/lib/func_timer.py
@@ -5,11 +5,12 @@
"""
import os
-import sys
import signal
+import sys
import time
-import gen_print as gp
+
import gen_misc as gm
+import gen_print as gp
import gen_valid as gv
@@ -30,9 +31,7 @@
second. "sleep 2" is a positional parm for the run_key function.
"""
- def __init__(self,
- obj_name='func_timer_class'):
-
+ def __init__(self, obj_name="func_timer_class"):
# Initialize object variables.
self.__obj_name = obj_name
self.__func = None
@@ -59,9 +58,11 @@
buffer += gp.sprint_var(func_name, indent=indent)
buffer += gp.sprint_varx("time_out", self.__time_out, indent=indent)
buffer += gp.sprint_varx("child_pid", self.__child_pid, indent=indent)
- buffer += gp.sprint_varx("original_SIGUSR1_handler",
- self.__original_SIGUSR1_handler,
- indent=indent)
+ buffer += gp.sprint_varx(
+ "original_SIGUSR1_handler",
+ self.__original_SIGUSR1_handler,
+ indent=indent,
+ )
return buffer
def print_obj(self):
@@ -95,8 +96,7 @@
if self.__original_SIGUSR1_handler != 0:
signal.signal(signal.SIGUSR1, self.__original_SIGUSR1_handler)
try:
- gp.lprint_timen("Killing child pid " + str(self.__child_pid)
- + ".")
+ gp.lprint_timen("Killing child pid " + str(self.__child_pid) + ".")
os.kill(self.__child_pid, signal.SIGKILL)
except OSError:
gp.lprint_timen("Tolerated kill failure.")
@@ -110,9 +110,7 @@
children = gm.get_child_pids()
gp.lprint_var(children)
- def timed_out(self,
- signal_number,
- frame):
+ def timed_out(self, signal_number, frame):
r"""
Handle a SIGUSR1 generated by the child process after the time_out has expired.
@@ -135,7 +133,6 @@
raise ValueError(err_msg)
def run(self, func, *args, **kwargs):
-
r"""
Run the indicated function with the given args and kwargs and return the value that the function
returns. If the time_out value expires, raise a ValueError exception with a detailed error message.
@@ -171,9 +168,9 @@
# Get self.__time_out value from kwargs. If kwargs['time_out'] is not present, self.__time_out will
# default to None.
self.__time_out = None
- if 'time_out' in kwargs:
- self.__time_out = kwargs['time_out']
- del kwargs['time_out']
+ if "time_out" in kwargs:
+ self.__time_out = kwargs["time_out"]
+ del kwargs["time_out"]
# Convert "none" string to None.
try:
if self.__time_out.lower() == "none":
@@ -183,12 +180,13 @@
if self.__time_out is not None:
self.__time_out = int(self.__time_out)
# Ensure that time_out is non-negative.
- message = gv.valid_range(self.__time_out, 0,
- var_name="time_out")
+ message = gv.valid_range(
+ self.__time_out, 0, var_name="time_out"
+ )
if message != "":
- raise ValueError("\n"
- + gp.sprint_error_report(message,
- format='long'))
+ raise ValueError(
+ "\n" + gp.sprint_error_report(message, format="long")
+ )
gp.lprint_varx("time_out", self.__time_out)
self.__child_pid = 0
@@ -200,13 +198,21 @@
parent_pid = os.getpid()
self.__child_pid = os.fork()
if self.__child_pid == 0:
- gp.dprint_timen("Child timer pid " + str(os.getpid())
- + ": Sleeping for " + str(self.__time_out)
- + " seconds.")
+ gp.dprint_timen(
+ "Child timer pid "
+ + str(os.getpid())
+ + ": Sleeping for "
+ + str(self.__time_out)
+ + " seconds."
+ )
time.sleep(self.__time_out)
- gp.dprint_timen("Child timer pid " + str(os.getpid())
- + ": Sending SIGUSR1 to parent pid "
- + str(parent_pid) + ".")
+ gp.dprint_timen(
+ "Child timer pid "
+ + str(os.getpid())
+ + ": Sending SIGUSR1 to parent pid "
+ + str(parent_pid)
+ + "."
+ )
os.kill(parent_pid, signal.SIGUSR1)
os._exit(0)
diff --git a/lib/gen_arg.py b/lib/gen_arg.py
index afa7b57..2d2ed68 100755
--- a/lib/gen_arg.py
+++ b/lib/gen_arg.py
@@ -4,11 +4,13 @@
This module provides valuable argument processing functions like gen_get_options and sprint_args.
"""
-import sys
import os
import re
+import sys
+
try:
import psutil
+
psutil_imported = True
except ImportError:
psutil_imported = False
@@ -16,15 +18,16 @@
import __builtin__
except ImportError:
import builtins as __builtin__
+
+import argparse
import atexit
import signal
-import argparse
import textwrap as textwrap
-import gen_print as gp
-import gen_valid as gv
import gen_cmd as gc
import gen_misc as gm
+import gen_print as gp
+import gen_valid as gv
class MultilineFormatter(argparse.HelpFormatter):
@@ -32,13 +35,20 @@
r"""
Split text into formatted lines for every "%%n" encountered in the text and return the result.
"""
- lines = self._whitespace_matcher.sub(' ', text).strip().split('%n')
- formatted_lines = \
- [textwrap.fill(x, width, initial_indent=indent, subsequent_indent=indent) + '\n' for x in lines]
- return ''.join(formatted_lines)
+ lines = self._whitespace_matcher.sub(" ", text).strip().split("%n")
+ formatted_lines = [
+ textwrap.fill(
+ x, width, initial_indent=indent, subsequent_indent=indent
+ )
+ + "\n"
+ for x in lines
+ ]
+ return "".join(formatted_lines)
-class ArgumentDefaultsHelpMultilineFormatter(MultilineFormatter, argparse.ArgumentDefaultsHelpFormatter):
+class ArgumentDefaultsHelpMultilineFormatter(
+ MultilineFormatter, argparse.ArgumentDefaultsHelpFormatter
+):
pass
@@ -46,8 +56,7 @@
module = sys.modules["__main__"]
-def gen_get_options(parser,
- stock_list=[]):
+def gen_get_options(parser, stock_list=[]):
r"""
Parse the command line arguments using the parser object passed and return True/False (i.e. pass/fail).
However, if gv.exit_on_error is set, simply exit the program on failure. Also set the following built in
@@ -75,11 +84,14 @@
# Process stock_list.
for ix in range(0, len(stock_list)):
if len(stock_list[ix]) < 1:
- error_message = "Programmer error - stock_list[" + str(ix) +\
- "] is supposed to be a tuple containing at" +\
- " least one element which is the name of" +\
- " the desired stock parameter:\n" +\
- gp.sprint_var(stock_list)
+ error_message = (
+ "Programmer error - stock_list["
+ + str(ix)
+ + "] is supposed to be a tuple containing at"
+ + " least one element which is the name of"
+ + " the desired stock parameter:\n"
+ + gp.sprint_var(stock_list)
+ )
return gv.process_error_message(error_message)
if isinstance(stock_list[ix], tuple):
arg_name = stock_list[ix][0]
@@ -89,65 +101,86 @@
default = None
if arg_name not in master_stock_list:
- error_message = "Programmer error - arg_name \"" + arg_name +\
- "\" not found found in stock list:\n" +\
- gp.sprint_var(master_stock_list)
+ error_message = (
+ 'Programmer error - arg_name "'
+ + arg_name
+ + '" not found found in stock list:\n'
+ + gp.sprint_var(master_stock_list)
+ )
return gv.process_error_message(error_message)
if arg_name == "quiet":
if default is None:
default = 0
parser.add_argument(
- '--quiet',
+ "--quiet",
default=default,
type=int,
choices=[1, 0],
help='If this parameter is set to "1", %(prog)s'
- + ' will print only essential information, i.e. it will'
- + ' not echo parameters, echo commands, print the total'
- + ' run time, etc.' + default_string)
+ + " will print only essential information, i.e. it will"
+ + " not echo parameters, echo commands, print the total"
+ + " run time, etc."
+ + default_string,
+ )
elif arg_name == "test_mode":
if default is None:
default = 0
parser.add_argument(
- '--test_mode',
+ "--test_mode",
default=default,
type=int,
choices=[1, 0],
- help='This means that %(prog)s should go through all the'
- + ' motions but not actually do anything substantial.'
- + ' This is mainly to be used by the developer of'
- + ' %(prog)s.' + default_string)
+ help="This means that %(prog)s should go through all the"
+ + " motions but not actually do anything substantial."
+ + " This is mainly to be used by the developer of"
+ + " %(prog)s."
+ + default_string,
+ )
elif arg_name == "debug":
if default is None:
default = 0
parser.add_argument(
- '--debug',
+ "--debug",
default=default,
type=int,
choices=[1, 0],
help='If this parameter is set to "1", %(prog)s will print'
- + ' additional debug information. This is mainly to be'
- + ' used by the developer of %(prog)s.' + default_string)
+ + " additional debug information. This is mainly to be"
+ + " used by the developer of %(prog)s."
+ + default_string,
+ )
elif arg_name == "loglevel":
if default is None:
default = "info"
parser.add_argument(
- '--loglevel',
+ "--loglevel",
default=default,
type=str,
- choices=['DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL',
- 'debug', 'info', 'warning', 'error', 'critical'],
+ choices=[
+ "DEBUG",
+ "INFO",
+ "WARNING",
+ "ERROR",
+ "CRITICAL",
+ "debug",
+ "info",
+ "warning",
+ "error",
+ "critical",
+ ],
help='If this parameter is set to "1", %(prog)s will print'
- + ' additional debug information. This is mainly to be'
- + ' used by the developer of %(prog)s.' + default_string)
+ + " additional debug information. This is mainly to be"
+ + " used by the developer of %(prog)s."
+ + default_string,
+ )
arg_obj = parser.parse_args()
__builtin__.quiet = 0
__builtin__.test_mode = 0
__builtin__.debug = 0
- __builtin__.loglevel = 'WARNING'
+ __builtin__.loglevel = "WARNING"
for ix in range(0, len(stock_list)):
if isinstance(stock_list[ix], tuple):
arg_name = stock_list[ix][0]
@@ -170,15 +203,14 @@
# For each command line parameter, create a corresponding global variable and assign it the appropriate
# value. For example, if the command line contained "--last_name='Smith', we'll create a global variable
# named "last_name" with the value "Smith".
- module = sys.modules['__main__']
+ module = sys.modules["__main__"]
for key in arg_obj.__dict__:
setattr(module, key, getattr(__builtin__.arg_obj, key))
return True
-def set_pgm_arg(var_value,
- var_name=None):
+def set_pgm_arg(var_value, var_name=None):
r"""
Set the value of the arg_obj.__dict__ entry named in var_name with the var_value provided. Also, set
corresponding global variable.
@@ -193,7 +225,7 @@
var_name = gp.get_arg_name(None, 1, 2)
arg_obj.__dict__[var_name] = var_value
- module = sys.modules['__main__']
+ module = sys.modules["__main__"]
setattr(module, var_name, var_value)
if var_name == "quiet":
__builtin__.quiet = var_value
@@ -203,8 +235,7 @@
__builtin__.test_mode = var_value
-def sprint_args(arg_obj,
- indent=0):
+def sprint_args(arg_obj, indent=0):
r"""
sprint_var all of the arguments found in arg_obj and return the result as a string.
@@ -218,8 +249,9 @@
buffer = ""
for key in arg_obj.__dict__:
- buffer += gp.sprint_varx(key, getattr(arg_obj, key), 0, indent,
- col1_width)
+ buffer += gp.sprint_varx(
+ key, getattr(arg_obj, key), 0, indent, col1_width
+ )
return buffer
@@ -238,7 +270,7 @@
# Set a default value for dir_path argument.
dir_path = gm.add_trailing_slash(gm.dft(dir_path, os.getcwd()))
"""
- module = sys.modules['__main__']
+ module = sys.modules["__main__"]
for key in arg_obj.__dict__:
arg_obj.__dict__[key] = getattr(module, key)
@@ -266,16 +298,17 @@
global term_options
# Validation:
arg_names = list(kwargs.keys())
- gv.valid_list(arg_names, ['term_requests'])
- if type(kwargs['term_requests']) is dict:
- keys = list(kwargs['term_requests'].keys())
- gv.valid_list(keys, ['pgm_names'])
+ gv.valid_list(arg_names, ["term_requests"])
+ if type(kwargs["term_requests"]) is dict:
+ keys = list(kwargs["term_requests"].keys())
+ gv.valid_list(keys, ["pgm_names"])
else:
- gv.valid_value(kwargs['term_requests'], ['children', 'descendants'])
+ gv.valid_value(kwargs["term_requests"], ["children", "descendants"])
term_options = kwargs
if psutil_imported:
+
def match_process_by_pgm_name(process, pgm_name):
r"""
Return True or False to indicate whether the process matches the program name.
@@ -325,9 +358,10 @@
# Because "sleep" is a compiled executable, it will appear in entry 0.
optional_dir_path_regex = "(.*/)?"
- cmdline = process.as_dict()['cmdline']
- return re.match(optional_dir_path_regex + pgm_name + '( |$)', cmdline[0]) \
- or re.match(optional_dir_path_regex + pgm_name + '( |$)', cmdline[1])
+ cmdline = process.as_dict()["cmdline"]
+ return re.match(
+ optional_dir_path_regex + pgm_name + "( |$)", cmdline[0]
+ ) or re.match(optional_dir_path_regex + pgm_name + "( |$)", cmdline[1])
def select_processes_by_pgm_name(processes, pgm_name):
r"""
@@ -340,7 +374,11 @@
object.
"""
- return [process for process in processes if match_process_by_pgm_name(process, pgm_name)]
+ return [
+ process
+ for process in processes
+ if match_process_by_pgm_name(process, pgm_name)
+ ]
def sprint_process_report(pids):
r"""
@@ -350,7 +388,10 @@
pids A list of process IDs for processes to be included in the report.
"""
report = "\n"
- cmd_buf = "echo ; ps wwo user,pgrp,pid,ppid,lstart,cmd --forest " + ' '.join(pids)
+ cmd_buf = (
+ "echo ; ps wwo user,pgrp,pid,ppid,lstart,cmd --forest "
+ + " ".join(pids)
+ )
report += gp.sprint_issuing(cmd_buf)
rc, outbuf = gc.shell_cmd(cmd_buf, quiet=1)
report += outbuf + "\n"
@@ -371,7 +412,9 @@
descendants = process.children(recursive=True)
descendant_pids = [str(process.pid) for process in descendants]
if descendants:
- process_report = sprint_process_report([str(process.pid)] + descendant_pids)
+ process_report = sprint_process_report(
+ [str(process.pid)] + descendant_pids
+ )
else:
process_report = ""
return descendants, descendant_pids, process_report
@@ -397,12 +440,15 @@
children it produces.
"""
- message = "\n" + gp.sprint_dashes(width=120) \
- + gp.sprint_executing() + "\n"
+ message = (
+ "\n" + gp.sprint_dashes(width=120) + gp.sprint_executing() + "\n"
+ )
current_process = psutil.Process()
- descendants, descendant_pids, process_report = get_descendant_info(current_process)
+ descendants, descendant_pids, process_report = get_descendant_info(
+ current_process
+ )
if not descendants:
# If there are no descendants, then we have nothing to do.
return
@@ -410,38 +456,50 @@
terminate_descendants_temp_file_path = gm.create_temp_file_path()
gp.print_vars(terminate_descendants_temp_file_path)
- message += gp.sprint_varx("pgm_name", gp.pgm_name) \
- + gp.sprint_vars(term_options) \
+ message += (
+ gp.sprint_varx("pgm_name", gp.pgm_name)
+ + gp.sprint_vars(term_options)
+ process_report
+ )
# Process the termination requests:
- if term_options['term_requests'] == 'children':
+ if term_options["term_requests"] == "children":
term_processes = current_process.children(recursive=False)
term_pids = [str(process.pid) for process in term_processes]
- elif term_options['term_requests'] == 'descendants':
+ elif term_options["term_requests"] == "descendants":
term_processes = descendants
term_pids = descendant_pids
else:
# Process term requests by pgm_names.
term_processes = []
- for pgm_name in term_options['term_requests']['pgm_names']:
- term_processes.extend(select_processes_by_pgm_name(descendants, pgm_name))
+ for pgm_name in term_options["term_requests"]["pgm_names"]:
+ term_processes.extend(
+ select_processes_by_pgm_name(descendants, pgm_name)
+ )
term_pids = [str(process.pid) for process in term_processes]
- message += gp.sprint_timen("Processes to be terminated:") \
- + gp.sprint_var(term_pids)
+ message += gp.sprint_timen(
+ "Processes to be terminated:"
+ ) + gp.sprint_var(term_pids)
for process in term_processes:
process.terminate()
- message += gp.sprint_timen("Waiting on the following pids: " + ' '.join(descendant_pids))
+ message += gp.sprint_timen(
+ "Waiting on the following pids: " + " ".join(descendant_pids)
+ )
gm.append_file(terminate_descendants_temp_file_path, message)
psutil.wait_procs(descendants)
# Checking after the fact to see whether any descendant processes are still alive. If so, a process
# report showing this will be included in the output.
- descendants, descendant_pids, process_report = get_descendant_info(current_process)
+ descendants, descendant_pids, process_report = get_descendant_info(
+ current_process
+ )
if descendants:
- message = "\n" + gp.sprint_timen("Not all of the processes terminated:") \
+ message = (
+ "\n"
+ + gp.sprint_timen("Not all of the processes terminated:")
+ process_report
+ )
gm.append_file(terminate_descendants_temp_file_path, message)
message = gp.sprint_dashes(width=120)
@@ -470,8 +528,7 @@
gp.qprint_pgm_footer()
-def gen_signal_handler(signal_number,
- frame):
+def gen_signal_handler(signal_number, frame):
r"""
Handle signals. Without a function to catch a SIGTERM or SIGINT, the program would terminate immediately
with return code 143 and without calling the exit_function.
@@ -486,8 +543,7 @@
exit(0)
-def gen_post_validation(exit_function=None,
- signal_handler=None):
+def gen_post_validation(exit_function=None, signal_handler=None):
r"""
Do generic post-validation processing. By "post", we mean that this is to be called from a validation
function after the caller has done any validation desired. If the calling program passes exit_function
diff --git a/lib/gen_call_robot.py b/lib/gen_call_robot.py
index 2e51626..e873c52 100755
--- a/lib/gen_call_robot.py
+++ b/lib/gen_call_robot.py
@@ -5,21 +5,22 @@
robot program calls.
"""
-import sys
-import os
-import subprocess
-import re
-import time
import imp
+import os
+import re
+import subprocess
+import sys
+import time
+import gen_cmd as gc
+import gen_misc as gm
import gen_print as gp
import gen_valid as gv
-import gen_misc as gm
-import gen_cmd as gc
-base_path = \
- os.path.dirname(os.path.dirname(imp.find_module("gen_robot_print")[1])) +\
- os.sep
+base_path = (
+ os.path.dirname(os.path.dirname(imp.find_module("gen_robot_print")[1]))
+ + os.sep
+)
def init_robot_out_parms(extra_prefix=""):
@@ -44,17 +45,19 @@
# Environment variable TMP_ROBOT_DIR_PATH can be set by the user to indicate that robot-generated output
# should initially be written to the specified temporary directory and then moved to the normal output
# location after completion.
- outputdir =\
- os.environ.get("TMP_ROBOT_DIR_PATH",
- os.environ.get("STATUS_DIR_PATH",
- os.environ.get("HOME", ".")
- + "/status"))
+ outputdir = os.environ.get(
+ "TMP_ROBOT_DIR_PATH",
+ os.environ.get(
+ "STATUS_DIR_PATH", os.environ.get("HOME", ".") + "/status"
+ ),
+ )
outputdir = gm.add_trailing_slash(outputdir)
seconds = time.time()
loc_time = time.localtime(seconds)
time_string = time.strftime("%y%m%d.%H%M%S", loc_time)
- file_prefix = AUTOBOOT_OPENBMC_NICKNAME + "." + extra_prefix +\
- time_string + "."
+ file_prefix = (
+ AUTOBOOT_OPENBMC_NICKNAME + "." + extra_prefix + time_string + "."
+ )
# Environment variable SAVE_STATUS_POLICY governs when robot-generated output files (e.g. the log.html)
# will be moved from TMP_ROBOT_DIR_PATH to FFDC_DIR_PATH. Valid values are "ALWAYS", "NEVER" and "FAIL".
SAVE_STATUS_POLICY = os.environ.get("SAVE_STATUS_POLICY", "ALWAYS")
@@ -67,8 +70,8 @@
log = file_prefix + "log.html"
report = file_prefix + "report.html"
loglevel = "TRACE"
- consolecolors = 'off'
- consolemarkers = 'off'
+ consolecolors = "off"
+ consolemarkers = "off"
# Make create_robot_cmd_string values global.
gm.set_mod_global(outputdir)
@@ -79,7 +82,15 @@
gm.set_mod_global(consolecolors)
gm.set_mod_global(consolemarkers)
- return outputdir, output, log, report, loglevel, consolecolors, consolemarkers
+ return (
+ outputdir,
+ output,
+ log,
+ report,
+ loglevel,
+ consolecolors,
+ consolemarkers,
+ )
def init_robot_test_base_dir_path():
@@ -100,12 +111,13 @@
# - Not in user sandbox:
# ROBOT_TEST_BASE_DIR_PATH will be set to <program dir path>/git/openbmc-test-automation/
- ROBOT_TEST_BASE_DIR_PATH = os.environ.get('ROBOT_TEST_BASE_DIR_PATH', "")
- ROBOT_TEST_RUNNING_FROM_SB = \
- int(os.environ.get('ROBOT_TEST_RUNNING_FROM_SB', "0"))
+ ROBOT_TEST_BASE_DIR_PATH = os.environ.get("ROBOT_TEST_BASE_DIR_PATH", "")
+ ROBOT_TEST_RUNNING_FROM_SB = int(
+ os.environ.get("ROBOT_TEST_RUNNING_FROM_SB", "0")
+ )
if ROBOT_TEST_BASE_DIR_PATH == "":
# ROBOT_TEST_BASE_DIR_PATH was not set by user/caller.
- AUTOIPL_VERSION = os.environ.get('AUTOIPL_VERSION', '')
+ AUTOIPL_VERSION = os.environ.get("AUTOIPL_VERSION", "")
if AUTOIPL_VERSION == "":
ROBOT_TEST_BASE_DIR_PATH = base_path
else:
@@ -113,17 +125,26 @@
# Determine whether we're running out of a developer sandbox or simply out of an apolloxxx/bin
# path.
- shell_rc, out_buf = gc.shell_cmd('dirname $(which gen_print.py)',
- quiet=(not debug), print_output=0)
+ shell_rc, out_buf = gc.shell_cmd(
+ "dirname $(which gen_print.py)",
+ quiet=(not debug),
+ print_output=0,
+ )
executable_base_dir_path = os.path.realpath(out_buf.rstrip()) + "/"
- apollo_dir_path = os.environ['AUTO_BASE_PATH'] + AUTOIPL_VERSION +\
- "/bin/"
- developer_home_dir_path = re.sub('/sandbox.*', '',
- executable_base_dir_path)
- developer_home_dir_path = \
- gm.add_trailing_slash(developer_home_dir_path)
- gp.dprint_vars(executable_base_dir_path, developer_home_dir_path,
- apollo_dir_path)
+ apollo_dir_path = (
+ os.environ["AUTO_BASE_PATH"] + AUTOIPL_VERSION + "/bin/"
+ )
+ developer_home_dir_path = re.sub(
+ "/sandbox.*", "", executable_base_dir_path
+ )
+ developer_home_dir_path = gm.add_trailing_slash(
+ developer_home_dir_path
+ )
+ gp.dprint_vars(
+ executable_base_dir_path,
+ developer_home_dir_path,
+ apollo_dir_path,
+ )
ROBOT_TEST_RUNNING_FROM_SB = 0
if executable_base_dir_path != apollo_dir_path:
@@ -131,46 +152,61 @@
gp.dprint_vars(ROBOT_TEST_RUNNING_FROM_SB)
ROBOT_TEST_BASE_DIR_PATH = developer_home_dir_path + suffix
if not os.path.isdir(ROBOT_TEST_BASE_DIR_PATH):
- gp.dprint_timen("NOTE: Sandbox directory "
- + ROBOT_TEST_BASE_DIR_PATH + " does not"
- + " exist.")
+ gp.dprint_timen(
+ "NOTE: Sandbox directory "
+ + ROBOT_TEST_BASE_DIR_PATH
+ + " does not"
+ + " exist."
+ )
# Fall back to the apollo dir path.
ROBOT_TEST_BASE_DIR_PATH = apollo_dir_path + suffix
else:
# Use to the apollo dir path.
ROBOT_TEST_BASE_DIR_PATH = apollo_dir_path + suffix
- OBMC_TOOLS_BASE_DIR_PATH = \
- os.path.dirname(ROBOT_TEST_BASE_DIR_PATH.rstrip("/")) \
+ OBMC_TOOLS_BASE_DIR_PATH = (
+ os.path.dirname(ROBOT_TEST_BASE_DIR_PATH.rstrip("/"))
+ "/openbmc-tools/"
+ )
OPENBMCTOOL_DIR_PATH = OBMC_TOOLS_BASE_DIR_PATH + "openbmctool/"
- JSON_CHECKER_TOOLS_DIR_PATH = OBMC_TOOLS_BASE_DIR_PATH + "expectedJsonChecker/"
+ JSON_CHECKER_TOOLS_DIR_PATH = (
+ OBMC_TOOLS_BASE_DIR_PATH + "expectedJsonChecker/"
+ )
gv.valid_value(ROBOT_TEST_BASE_DIR_PATH)
- gp.dprint_vars(ROBOT_TEST_RUNNING_FROM_SB, ROBOT_TEST_BASE_DIR_PATH, OBMC_TOOLS_BASE_DIR_PATH,
- OPENBMCTOOL_DIR_PATH, JSON_CHECKER_TOOLS_DIR_PATH)
+ gp.dprint_vars(
+ ROBOT_TEST_RUNNING_FROM_SB,
+ ROBOT_TEST_BASE_DIR_PATH,
+ OBMC_TOOLS_BASE_DIR_PATH,
+ OPENBMCTOOL_DIR_PATH,
+ JSON_CHECKER_TOOLS_DIR_PATH,
+ )
gv.valid_dir_path(ROBOT_TEST_BASE_DIR_PATH)
ROBOT_TEST_BASE_DIR_PATH = gm.add_trailing_slash(ROBOT_TEST_BASE_DIR_PATH)
gm.set_mod_global(ROBOT_TEST_BASE_DIR_PATH)
- os.environ['ROBOT_TEST_BASE_DIR_PATH'] = ROBOT_TEST_BASE_DIR_PATH
+ os.environ["ROBOT_TEST_BASE_DIR_PATH"] = ROBOT_TEST_BASE_DIR_PATH
gm.set_mod_global(ROBOT_TEST_RUNNING_FROM_SB)
- os.environ['ROBOT_TEST_RUNNING_FROM_SB'] = str(ROBOT_TEST_RUNNING_FROM_SB)
+ os.environ["ROBOT_TEST_RUNNING_FROM_SB"] = str(ROBOT_TEST_RUNNING_FROM_SB)
gm.set_mod_global(OBMC_TOOLS_BASE_DIR_PATH)
- os.environ['OBMC_TOOLS_BASE_DIR_PATH'] = str(OBMC_TOOLS_BASE_DIR_PATH)
+ os.environ["OBMC_TOOLS_BASE_DIR_PATH"] = str(OBMC_TOOLS_BASE_DIR_PATH)
gm.set_mod_global(OPENBMCTOOL_DIR_PATH)
- os.environ['OPENBMCTOOL_DIR_PATH'] = str(OPENBMCTOOL_DIR_PATH)
+ os.environ["OPENBMCTOOL_DIR_PATH"] = str(OPENBMCTOOL_DIR_PATH)
gm.set_mod_global(JSON_CHECKER_TOOLS_DIR_PATH)
- os.environ['JSON_CHECKER_TOOLS_DIR_PATH'] = str(JSON_CHECKER_TOOLS_DIR_PATH)
+ os.environ["JSON_CHECKER_TOOLS_DIR_PATH"] = str(
+ JSON_CHECKER_TOOLS_DIR_PATH
+ )
-raw_robot_file_search_path = "${ROBOT_TEST_BASE_DIR_PATH}:" +\
- "${ROBOT_TEST_BASE_DIR_PATH}tests:${ROBOT_TEST_BASE_DIR_PATH}extended:" +\
- "${ROBOT_TEST_BASE_DIR_PATH}scratch:${PATH}"
+raw_robot_file_search_path = (
+ "${ROBOT_TEST_BASE_DIR_PATH}:"
+ + "${ROBOT_TEST_BASE_DIR_PATH}tests:${ROBOT_TEST_BASE_DIR_PATH}extended:"
+ + "${ROBOT_TEST_BASE_DIR_PATH}scratch:${PATH}"
+)
def init_robot_file_path(robot_file_path):
@@ -206,12 +242,13 @@
gp.dprint_vars(abs_path, robot_file_path)
if not abs_path:
- cmd_buf = "echo -n \"" + raw_robot_file_search_path + "\""
- shell_rc, out_buf = gc.shell_cmd(cmd_buf, quiet=(not debug),
- print_output=0)
+ cmd_buf = 'echo -n "' + raw_robot_file_search_path + '"'
+ shell_rc, out_buf = gc.shell_cmd(
+ cmd_buf, quiet=(not debug), print_output=0
+ )
robot_file_search_paths = out_buf
gp.dprint_var(robot_file_search_paths)
- robot_file_search_paths_list = robot_file_search_paths.split(':')
+ robot_file_search_paths_list = robot_file_search_paths.split(":")
for search_path in robot_file_search_paths_list:
search_path = gm.add_trailing_slash(search_path)
candidate_file_path = search_path + robot_file_path
@@ -233,9 +270,11 @@
Double dashes are not included in the names returned.
"""
- cmd_buf = "robot -h | egrep " +\
- "'^([ ]\\-[a-zA-Z0-9])?[ ]+--[a-zA-Z0-9]+[ ]+' | sed -re" +\
- " s'/.*\\-\\-//g' -e s'/ .*//g' | sort -u"
+ cmd_buf = (
+ "robot -h | egrep "
+ + "'^([ ]\\-[a-zA-Z0-9])?[ ]+--[a-zA-Z0-9]+[ ]+' | sed -re"
+ + " s'/.*\\-\\-//g' -e s'/ .*//g' | sort -u"
+ )
shell_rc, out_buf = gc.shell_cmd(cmd_buf, quiet=1, print_output=0)
return out_buf.split("\n")
@@ -285,8 +324,9 @@
robot_parm_list.append(p_string)
ix += 1
- robot_cmd_buf = "robot " + ' '.join(robot_parm_list) + " " +\
- robot_file_path
+ robot_cmd_buf = (
+ "robot " + " ".join(robot_parm_list) + " " + robot_file_path
+ )
return robot_cmd_buf
@@ -296,9 +336,7 @@
gcr_last_robot_rc = 0
-def process_robot_output_files(robot_cmd_buf=None,
- robot_rc=None,
- gzip=None):
+def process_robot_output_files(robot_cmd_buf=None, robot_rc=None, gzip=None):
r"""
Process robot output files which can involve several operations:
- If the files are in a temporary location, using SAVE_STATUS_POLICY to decide whether to move them to a
@@ -329,25 +367,36 @@
# Compose file_list based on robot command buffer passed in.
robot_cmd_buf_dict = gc.parse_command_string(robot_cmd_buf)
- outputdir = robot_cmd_buf_dict['outputdir']
+ outputdir = robot_cmd_buf_dict["outputdir"]
outputdir = gm.add_trailing_slash(outputdir)
- file_list = outputdir + robot_cmd_buf_dict['output'] + " " + outputdir\
- + robot_cmd_buf_dict['log'] + " " + outputdir\
- + robot_cmd_buf_dict['report']
+ file_list = (
+ outputdir
+ + robot_cmd_buf_dict["output"]
+ + " "
+ + outputdir
+ + robot_cmd_buf_dict["log"]
+ + " "
+ + outputdir
+ + robot_cmd_buf_dict["report"]
+ )
# Double checking that files are present.
- shell_rc, out_buf = gc.shell_cmd("ls -1 " + file_list + " 2>/dev/null",
- show_err=0)
+ shell_rc, out_buf = gc.shell_cmd(
+ "ls -1 " + file_list + " 2>/dev/null", show_err=0
+ )
file_list = re.sub("\n", " ", out_buf.rstrip("\n"))
if file_list == "":
- gp.qprint_timen("No robot output files were found in " + outputdir
- + ".")
+ gp.qprint_timen(
+ "No robot output files were found in " + outputdir + "."
+ )
return
gp.qprint_var(robot_rc, gp.hexa())
if SAVE_STATUS_POLICY == "FAIL" and robot_rc == 0:
- gp.qprint_timen("The call to robot produced no failures."
- + " Deleting robot output files.")
+ gp.qprint_timen(
+ "The call to robot produced no failures."
+ + " Deleting robot output files."
+ )
gc.shell_cmd("rm -rf " + file_list)
return
@@ -363,23 +412,29 @@
return
# We're directing these to the FFDC dir path so that they'll be subjected to FFDC cleanup.
- target_dir_path = os.environ.get("FFDC_DIR_PATH",
- os.environ.get("HOME", ".")
- + "/ffdc")
+ target_dir_path = os.environ.get(
+ "FFDC_DIR_PATH", os.environ.get("HOME", ".") + "/ffdc"
+ )
target_dir_path = gm.add_trailing_slash(target_dir_path)
- targ_file_list = [re.sub(".*/", target_dir_path, x)
- for x in file_list.split(" ")]
+ targ_file_list = [
+ re.sub(".*/", target_dir_path, x) for x in file_list.split(" ")
+ ]
- gc.shell_cmd("mv " + file_list + " " + target_dir_path + " >/dev/null",
- time_out=600)
+ gc.shell_cmd(
+ "mv " + file_list + " " + target_dir_path + " >/dev/null", time_out=600
+ )
gp.qprint_timen("New robot log file locations:")
- gp.qprintn('\n'.join(targ_file_list))
+ gp.qprintn("\n".join(targ_file_list))
-def robot_cmd_fnc(robot_cmd_buf,
- robot_jail=os.environ.get('ROBOT_JAIL', ''), quiet=None, test_mode=0):
+def robot_cmd_fnc(
+ robot_cmd_buf,
+ robot_jail=os.environ.get("ROBOT_JAIL", ""),
+ quiet=None,
+ test_mode=0,
+):
r"""
Run the robot command string.
@@ -393,7 +448,7 @@
test_mode If test_mode is set, this function will not actually run the command.
"""
- quiet = int(gm.dft(quiet, gp.get_stack_var('quiet', 0)))
+ quiet = int(gm.dft(quiet, gp.get_stack_var("quiet", 0)))
gv.valid_value(robot_cmd_buf)
# Set global variables to aid in cleanup with process_robot_output_files.
@@ -409,7 +464,9 @@
init_robot_test_base_dir_path()
ROBOT_TEST_BASE_DIR_PATH = getattr(module, "ROBOT_TEST_BASE_DIR_PATH")
- ROBOT_TEST_RUNNING_FROM_SB = gm.get_mod_global("ROBOT_TEST_RUNNING_FROM_SB")
+ ROBOT_TEST_RUNNING_FROM_SB = gm.get_mod_global(
+ "ROBOT_TEST_RUNNING_FROM_SB"
+ )
OPENBMCTOOL_DIR_PATH = gm.get_mod_global("OPENBMCTOOL_DIR_PATH")
if robot_jail == "":
@@ -419,9 +476,13 @@
robot_jail = 1
robot_jail = int(robot_jail)
- ROBOT_JAIL = os.environ.get('ROBOT_JAIL', '')
- gp.dprint_vars(ROBOT_TEST_BASE_DIR_PATH, ROBOT_TEST_RUNNING_FROM_SB,
- ROBOT_JAIL, robot_jail)
+ ROBOT_JAIL = os.environ.get("ROBOT_JAIL", "")
+ gp.dprint_vars(
+ ROBOT_TEST_BASE_DIR_PATH,
+ ROBOT_TEST_RUNNING_FROM_SB,
+ ROBOT_JAIL,
+ robot_jail,
+ )
# Save PATH and PYTHONPATH to be restored later.
os.environ["SAVED_PYTHONPATH"] = os.environ.get("PYTHONPATH", "")
@@ -433,28 +494,50 @@
# It is expected that there will be a "python" program in the tool base bin path which is really a
# link to select_version. Ditto for "robot". Call each with the --print_only option to get the
# paths to the "real" programs.
- cmd_buf = "for program in " + required_programs \
+ cmd_buf = (
+ "for program in "
+ + required_programs
+ " ; do dirname $(${program} --print_only) ; done 2>/dev/null"
+ )
rc, out_buf = gc.shell_cmd(cmd_buf, quiet=1, print_output=0)
PYTHONPATH = ROBOT_TEST_BASE_DIR_PATH + "lib"
NEW_PATH_LIST = [ROBOT_TEST_BASE_DIR_PATH + "bin"]
NEW_PATH_LIST.extend(list(set(out_buf.rstrip("\n").split("\n"))))
- NEW_PATH_LIST.extend(["/usr/local/sbin", "/usr/local/bin", "/usr/sbin",
- "/usr/bin", "/sbin", "/bin",
- OPENBMCTOOL_DIR_PATH.rstrip('/')])
+ NEW_PATH_LIST.extend(
+ [
+ "/usr/local/sbin",
+ "/usr/local/bin",
+ "/usr/sbin",
+ "/usr/bin",
+ "/sbin",
+ "/bin",
+ OPENBMCTOOL_DIR_PATH.rstrip("/"),
+ ]
+ )
PATH = ":".join(NEW_PATH_LIST)
else:
- PYTHONPATH = os.environ.get('PYTHONPATH', '') + ":" +\
- ROBOT_TEST_BASE_DIR_PATH + "lib"
- PATH = os.environ.get('PATH', '') + ":" + ROBOT_TEST_BASE_DIR_PATH +\
- "bin" + ":" + OPENBMCTOOL_DIR_PATH.rstrip('/')
+ PYTHONPATH = (
+ os.environ.get("PYTHONPATH", "")
+ + ":"
+ + ROBOT_TEST_BASE_DIR_PATH
+ + "lib"
+ )
+ PATH = (
+ os.environ.get("PATH", "")
+ + ":"
+ + ROBOT_TEST_BASE_DIR_PATH
+ + "bin"
+ + ":"
+ + OPENBMCTOOL_DIR_PATH.rstrip("/")
+ )
- os.environ['PYTHONPATH'] = PYTHONPATH
- os.environ['PATH'] = PATH
+ os.environ["PYTHONPATH"] = PYTHONPATH
+ os.environ["PATH"] = PATH
gp.dprint_vars(PATH, PYTHONPATH)
- os.environ['FFDC_DIR_PATH_STYLE'] = os.environ.get('FFDC_DIR_PATH_STYLE',
- '1')
+ os.environ["FFDC_DIR_PATH_STYLE"] = os.environ.get(
+ "FFDC_DIR_PATH_STYLE", "1"
+ )
gp.qpissuing(robot_cmd_buf, test_mode)
if test_mode:
os.environ["PATH"] = os.environ.get("SAVED_PATH", "")
@@ -462,7 +545,7 @@
return True
if quiet:
- DEVNULL = open(os.devnull, 'wb')
+ DEVNULL = open(os.devnull, "wb")
stdout = DEVNULL
else:
stdout = None
diff --git a/lib/gen_cmd.py b/lib/gen_cmd.py
index cac5ba4..03e8a11 100644
--- a/lib/gen_cmd.py
+++ b/lib/gen_cmd.py
@@ -4,19 +4,19 @@
This module provides command execution functions such as cmd_fnc and cmd_fnc_u.
"""
-import os
-import sys
-import subprocess
import collections
-import signal
-import time
-import re
import inspect
+import os
+import re
+import signal
+import subprocess
+import sys
+import time
+import func_args as fa
+import gen_misc as gm
import gen_print as gp
import gen_valid as gv
-import gen_misc as gm
-import func_args as fa
robot_env = gp.robot_env
@@ -26,14 +26,16 @@
# cmd_fnc and cmd_fnc_u should now be considered deprecated. shell_cmd and t_shell_cmd should be used
# instead.
-def cmd_fnc(cmd_buf,
- quiet=None,
- test_mode=None,
- debug=0,
- print_output=1,
- show_err=1,
- return_stderr=0,
- ignore_err=1):
+def cmd_fnc(
+ cmd_buf,
+ quiet=None,
+ test_mode=None,
+ debug=0,
+ print_output=1,
+ show_err=1,
+ return_stderr=0,
+ ignore_err=1,
+):
r"""
Run the given command in a shell and return the shell return code and the output.
@@ -80,13 +82,15 @@
else:
stderr = subprocess.STDOUT
- sub_proc = subprocess.Popen(cmd_buf,
- bufsize=1,
- shell=True,
- universal_newlines=True,
- executable='/bin/bash',
- stdout=subprocess.PIPE,
- stderr=stderr)
+ sub_proc = subprocess.Popen(
+ cmd_buf,
+ bufsize=1,
+ shell=True,
+ universal_newlines=True,
+ executable="/bin/bash",
+ stdout=subprocess.PIPE,
+ stderr=stderr,
+ )
out_buf = ""
if return_stderr:
for line in sub_proc.stderr:
@@ -131,22 +135,31 @@
return shell_rc, out_buf
-def cmd_fnc_u(cmd_buf,
- quiet=None,
- debug=None,
- print_output=1,
- show_err=1,
- return_stderr=0,
- ignore_err=1):
+def cmd_fnc_u(
+ cmd_buf,
+ quiet=None,
+ debug=None,
+ print_output=1,
+ show_err=1,
+ return_stderr=0,
+ ignore_err=1,
+):
r"""
Call cmd_fnc with test_mode=0. See cmd_fnc (above) for details.
Note the "u" in "cmd_fnc_u" stands for "unconditional".
"""
- return cmd_fnc(cmd_buf, test_mode=0, quiet=quiet, debug=debug,
- print_output=print_output, show_err=show_err,
- return_stderr=return_stderr, ignore_err=ignore_err)
+ return cmd_fnc(
+ cmd_buf,
+ test_mode=0,
+ quiet=quiet,
+ debug=debug,
+ print_output=print_output,
+ show_err=show_err,
+ return_stderr=return_stderr,
+ ignore_err=ignore_err,
+ )
def parse_command_string(command_string):
@@ -200,16 +213,18 @@
# We want the parms in the string broken down the way bash would do it, so we'll call upon bash to do
# that by creating a simple inline bash function.
- bash_func_def = "function parse { for parm in \"${@}\" ; do" +\
- " echo $parm ; done ; }"
+ bash_func_def = (
+ 'function parse { for parm in "${@}" ; do' + " echo $parm ; done ; }"
+ )
- rc, outbuf = cmd_fnc_u(bash_func_def + " ; parse " + command_string,
- quiet=1, print_output=0)
+ rc, outbuf = cmd_fnc_u(
+ bash_func_def + " ; parse " + command_string, quiet=1, print_output=0
+ )
command_string_list = outbuf.rstrip("\n").split("\n")
command_string_dict = collections.OrderedDict()
ix = 1
- command_string_dict['command'] = command_string_list[0]
+ command_string_dict["command"] = command_string_list[0]
while ix < len(command_string_list):
if command_string_list[ix].startswith("--"):
key, value = command_string_list[ix].split("=")
@@ -222,7 +237,7 @@
except IndexError:
value = ""
else:
- key = 'positional'
+ key = "positional"
value = command_string_list[ix]
if key in command_string_dict:
if isinstance(command_string_dict[key], str):
@@ -239,8 +254,7 @@
original_sigalrm_handler = signal.getsignal(signal.SIGALRM)
-def shell_cmd_timed_out(signal_number,
- frame):
+def shell_cmd_timed_out(signal_number, frame):
r"""
Handle an alarm signal generated during the shell_cmd function.
"""
@@ -249,7 +263,7 @@
global command_timed_out
command_timed_out = True
# Get subprocess pid from shell_cmd's call stack.
- sub_proc = gp.get_stack_var('sub_proc', 0)
+ sub_proc = gp.get_stack_var("sub_proc", 0)
pid = sub_proc.pid
gp.dprint_var(pid)
# Terminate the child process group.
@@ -260,19 +274,21 @@
return
-def shell_cmd(command_string,
- quiet=None,
- print_output=None,
- show_err=1,
- test_mode=0,
- time_out=None,
- max_attempts=1,
- retry_sleep_time=5,
- valid_rcs=[0],
- ignore_err=None,
- return_stderr=0,
- fork=0,
- error_regexes=None):
+def shell_cmd(
+ command_string,
+ quiet=None,
+ print_output=None,
+ show_err=1,
+ test_mode=0,
+ time_out=None,
+ max_attempts=1,
+ retry_sleep_time=5,
+ valid_rcs=[0],
+ ignore_err=None,
+ return_stderr=0,
+ fork=0,
+ error_regexes=None,
+):
r"""
Run the given command string in a shell and return a tuple consisting of the shell return code and the
output.
@@ -330,10 +346,10 @@
raise ValueError(err_msg)
# Assign default values to some of the arguments to this function.
- quiet = int(gm.dft(quiet, gp.get_stack_var('quiet', 0)))
+ quiet = int(gm.dft(quiet, gp.get_stack_var("quiet", 0)))
print_output = int(gm.dft(print_output, not quiet))
show_err = int(show_err)
- ignore_err = int(gm.dft(ignore_err, gp.get_stack_var('ignore_err', 1)))
+ ignore_err = int(gm.dft(ignore_err, gp.get_stack_var("ignore_err", 1)))
gp.qprint_issuing(command_string, test_mode)
if test_mode:
@@ -353,14 +369,16 @@
command_timed_out = False
func_out_history_buf = ""
for attempt_num in range(1, max_attempts + 1):
- sub_proc = subprocess.Popen(command_string,
- bufsize=1,
- shell=True,
- universal_newlines=True,
- executable='/bin/bash',
- stdin=subprocess.PIPE,
- stdout=subprocess.PIPE,
- stderr=stderr)
+ sub_proc = subprocess.Popen(
+ command_string,
+ bufsize=1,
+ shell=True,
+ universal_newlines=True,
+ executable="/bin/bash",
+ stdin=subprocess.PIPE,
+ stdout=subprocess.PIPE,
+ stderr=stderr,
+ )
if fork:
return sub_proc
@@ -387,7 +405,7 @@
shell_rc = sub_proc.returncode
if shell_rc in valid_rcs:
# Check output for text indicating there is an error.
- if error_regexes and re.match('|'.join(error_regexes), stdout_buf):
+ if error_regexes and re.match("|".join(error_regexes), stdout_buf):
shell_rc = -1
else:
break
@@ -415,21 +433,25 @@
gp.gp_print(func_out_buf)
else:
if show_err:
- gp.gp_print(func_out_history_buf, stream='stderr')
+ gp.gp_print(func_out_history_buf, stream="stderr")
else:
# There is no error information to show so just print output from last loop iteration.
gp.gp_print(func_out_buf)
if not ignore_err:
# If the caller has already asked to show error info, avoid repeating that in the failure message.
- err_msg = "The prior shell command failed.\n" if show_err \
- else err_msg
+ err_msg = (
+ "The prior shell command failed.\n" if show_err else err_msg
+ )
if robot_env:
BuiltIn().fail(err_msg)
else:
raise ValueError(err_msg)
- return (shell_rc, stdout_buf, stderr_buf) if return_stderr \
+ return (
+ (shell_rc, stdout_buf, stderr_buf)
+ if return_stderr
else (shell_rc, stdout_buf)
+ )
def t_shell_cmd(command_string, **kwargs):
@@ -440,14 +462,16 @@
See shell_cmd prolog for details on all arguments.
"""
- if 'test_mode' in kwargs:
- error_message = "Programmer error - test_mode is not a valid" +\
- " argument to this function."
+ if "test_mode" in kwargs:
+ error_message = (
+ "Programmer error - test_mode is not a valid"
+ + " argument to this function."
+ )
gp.print_error_report(error_message)
exit(1)
- test_mode = int(gp.get_stack_var('test_mode', 0))
- kwargs['test_mode'] = test_mode
+ test_mode = int(gp.get_stack_var("test_mode", 0))
+ kwargs["test_mode"] = test_mode
return shell_cmd(command_string, **kwargs)
@@ -521,8 +545,9 @@
new_kwargs = collections.OrderedDict()
# Get position number of first keyword on the calling line of code.
- (args, varargs, keywords, locals) =\
- inspect.getargvalues(inspect.stack()[stack_frame_ix][0])
+ (args, varargs, keywords, locals) = inspect.getargvalues(
+ inspect.stack()[stack_frame_ix][0]
+ )
first_kwarg_pos = 1 + len(args)
if varargs is not None:
first_kwarg_pos += len(locals[varargs])
@@ -531,7 +556,7 @@
arg_name = gp.get_arg_name(None, arg_num, stack_frame_ix + 2)
# Continuing with the prior example, the following line will result
# in key being set to 'arg1'.
- key = arg_name.split('=')[0]
+ key = arg_name.split("=")[0]
new_kwargs[key] = kwargs[key]
return new_kwargs
@@ -684,7 +709,7 @@
del pos_parms[-1]
else:
# Either get stack_frame_ix from the caller via options or set it to the default value.
- stack_frame_ix = options.pop('_stack_frame_ix_', 1)
+ stack_frame_ix = options.pop("_stack_frame_ix_", 1)
if gm.python_version < gm.ordered_dict_version:
# Re-establish the original options order as specified on the original line of code. This
# function depends on correct order.
@@ -709,6 +734,6 @@
command_string += gm.quote_bash_parm(str(value))
# Finally, append the pos_parms to the end of the command_string. Use filter to eliminate blank pos
# parms.
- command_string = ' '.join([command_string] + list(filter(None, pos_parms)))
+ command_string = " ".join([command_string] + list(filter(None, pos_parms)))
return command_string
diff --git a/lib/gen_misc.py b/lib/gen_misc.py
index ad5beea..d7a6373 100755
--- a/lib/gen_misc.py
+++ b/lib/gen_misc.py
@@ -4,16 +4,18 @@
This module provides many valuable functions such as my_parm_file.
"""
+import collections
+import errno
+import inspect
+import json
+import os
+import random
+import shutil
+
# sys and os are needed to get the program dir path and program name.
import sys
-import errno
-import os
-import shutil
-import collections
-import json
import time
-import inspect
-import random
+
try:
import ConfigParser
except ImportError:
@@ -22,17 +24,20 @@
import StringIO
except ImportError:
import io
+
import re
import socket
import tempfile
+
try:
import psutil
+
psutil_imported = True
except ImportError:
psutil_imported = False
-import gen_print as gp
import gen_cmd as gc
+import gen_print as gp
robot_env = gp.robot_env
if robot_env:
@@ -65,7 +70,7 @@
mode The mode or permissions to be granted to the created directories.
quiet Indicates whether this function should run the print_issuing() function.
"""
- quiet = int(dft(quiet, gp.get_stack_var('quiet', 0)))
+ quiet = int(dft(quiet, gp.get_stack_var("quiet", 0)))
gp.qprint_issuing("os.makedirs('" + path + "', mode=" + oct(mode) + ")")
try:
os.makedirs(path, mode)
@@ -84,9 +89,11 @@
(All parms are passed directly to shutil.rmtree. See its prolog for details)
quiet Indicates whether this function should run the print_issuing() function.
"""
- quiet = int(dft(quiet, gp.get_stack_var('quiet', 0)))
+ quiet = int(dft(quiet, gp.get_stack_var("quiet", 0)))
print_string = gp.sprint_executing(max_width=2000)
- print_string = re.sub(r"Executing: ", "Issuing: shutil.", print_string.rstrip("\n"))
+ print_string = re.sub(
+ r"Executing: ", "Issuing: shutil.", print_string.rstrip("\n")
+ )
gp.qprintn(re.sub(r", quiet[ ]?=.*", ")", print_string))
shutil.rmtree(path, ignore_errors, onerror)
@@ -102,7 +109,7 @@
path The path of the directory to change to.
quiet Indicates whether this function should run the print_issuing() function.
"""
- quiet = int(dft(quiet, gp.get_stack_var('quiet', 0)))
+ quiet = int(dft(quiet, gp.get_stack_var("quiet", 0)))
gp.qprint_issuing("os.chdir('" + path + "')")
os.chdir(path)
@@ -117,11 +124,13 @@
file_path The relative file path (e.g. "my_file" or "lib/my_file").
"""
- shell_rc, out_buf = gc.cmd_fnc_u("which " + file_path, quiet=1,
- print_output=0, show_err=0)
+ shell_rc, out_buf = gc.cmd_fnc_u(
+ "which " + file_path, quiet=1, print_output=0, show_err=0
+ )
if shell_rc != 0:
- error_message = "Failed to find complete path for file \"" +\
- file_path + "\".\n"
+ error_message = (
+ 'Failed to find complete path for file "' + file_path + '".\n'
+ )
error_message += gp.sprint_var(shell_rc, gp.hexa())
error_message += out_buf
if robot_env:
@@ -135,9 +144,7 @@
return file_path
-def add_path(new_path,
- path,
- position=0):
+def add_path(new_path, path, position=0):
r"""
Add new_path to path, provided that path doesn't already contain new_path, and return the result.
@@ -183,9 +190,7 @@
return default if value is None else value
-def get_mod_global(var_name,
- default=None,
- mod_name="__main__"):
+def get_mod_global(var_name, default=None, mod_name="__main__"):
r"""
Get module global variable value and return it.
@@ -204,10 +209,12 @@
try:
module = sys.modules[mod_name]
except KeyError:
- gp.print_error_report("Programmer error - The mod_name passed to"
- + " this function is invalid:\n"
- + gp.sprint_var(mod_name))
- raise ValueError('Programmer error.')
+ gp.print_error_report(
+ "Programmer error - The mod_name passed to"
+ + " this function is invalid:\n"
+ + gp.sprint_var(mod_name)
+ )
+ raise ValueError("Programmer error.")
if default is None:
return getattr(module, var_name)
@@ -215,8 +222,7 @@
return getattr(module, var_name, default)
-def global_default(var_value,
- default=0):
+def global_default(var_value, default=0):
r"""
If var_value is not None, return it. Otherwise, return the global
variable of the same name, if it exists. If not, return default.
@@ -239,9 +245,7 @@
return dft(var_value, get_mod_global(var_name, 0))
-def set_mod_global(var_value,
- mod_name="__main__",
- var_name=None):
+def set_mod_global(var_value, mod_name="__main__", var_name=None):
r"""
Set a global variable for a given module.
@@ -255,10 +259,12 @@
try:
module = sys.modules[mod_name]
except KeyError:
- gp.print_error_report("Programmer error - The mod_name passed to"
- + " this function is invalid:\n"
- + gp.sprint_var(mod_name))
- raise ValueError('Programmer error.')
+ gp.print_error_report(
+ "Programmer error - The mod_name passed to"
+ + " this function is invalid:\n"
+ + gp.sprint_var(mod_name)
+ )
+ raise ValueError("Programmer error.")
if var_name is None:
var_name = gp.get_arg_name(None, 1, 2)
@@ -292,7 +298,7 @@
string_file = io.StringIO()
# Write the dummy section header to the string file.
- string_file.write('[dummysection]\n')
+ string_file.write("[dummysection]\n")
# Write the entire contents of the properties file to the string file.
string_file.write(open(prop_file_path).read())
# Rewind the string file.
@@ -309,15 +315,12 @@
config_parser.readfp(string_file)
# Return the properties as a dictionary.
if robot_env:
- return DotDict(config_parser.items('dummysection'))
+ return DotDict(config_parser.items("dummysection"))
else:
- return collections.OrderedDict(config_parser.items('dummysection'))
+ return collections.OrderedDict(config_parser.items("dummysection"))
-def file_to_list(file_path,
- newlines=0,
- comments=1,
- trim=0):
+def file_to_list(file_path, newlines=0, comments=1, trim=0):
r"""
Return the contents of a file as a list. Each element of the resulting
list is one line from the file.
@@ -354,7 +357,7 @@
See file_to_list defined above for description of arguments.
"""
- return '\n'.join(file_to_list(*args, **kwargs))
+ return "\n".join(file_to_list(*args, **kwargs))
def append_file(file_path, buffer):
@@ -376,7 +379,7 @@
the list will be normalized and have a trailing slash added.
"""
- PATH_LIST = os.environ['PATH'].split(":")
+ PATH_LIST = os.environ["PATH"].split(":")
PATH_LIST = [os.path.normpath(path) + os.sep for path in PATH_LIST]
return PATH_LIST
@@ -402,7 +405,7 @@
buffer The string whose quotes are to be escaped.
"""
- return re.sub("\'", "\'\\\'\'", buffer)
+ return re.sub("'", "'\\''", buffer)
def quote_bash_parm(parm):
@@ -424,19 +427,18 @@
# Tilde expansion: ~
# Piped commands: |
# Bash re-direction: >, <
- bash_special_chars = set(' \'"$*?[]+@!{}~|><')
+ bash_special_chars = set(" '\"$*?[]+@!{}~|><")
if any((char in bash_special_chars) for char in parm):
return "'" + escape_bash_quotes(parm) + "'"
- if parm == '':
+ if parm == "":
parm = "''"
return parm
-def get_host_name_ip(host=None,
- short_name=0):
+def get_host_name_ip(host=None, short_name=0):
r"""
Get the host name and the IP address for the given host and return them as a tuple.
@@ -451,8 +453,11 @@
try:
host_ip = socket.gethostbyname(host)
except socket.gaierror as my_gaierror:
- message = "Unable to obtain the host name for the following host:" +\
- "\n" + gp.sprint_var(host)
+ message = (
+ "Unable to obtain the host name for the following host:"
+ + "\n"
+ + gp.sprint_var(host)
+ )
gp.print_error_report(message)
raise my_gaierror
@@ -488,8 +493,7 @@
return True
-def to_signed(number,
- bit_width=None):
+def to_signed(number, bit_width=None):
r"""
Convert number to a signed number and return the result.
@@ -530,7 +534,7 @@
if number < 0:
return number
- neg_bit_mask = 2**(bit_width - 1)
+ neg_bit_mask = 2 ** (bit_width - 1)
if number & neg_bit_mask:
return ((2**bit_width) - number) * -1
else:
@@ -538,7 +542,6 @@
def get_child_pids(quiet=1):
-
r"""
Get and return a list of pids representing all first-generation processes that are the children of the
current process.
@@ -564,24 +567,30 @@
# Otherwise, find child pids using shell commands.
print_output = not quiet
- ps_cmd_buf = "ps --no-headers --ppid " + str(os.getpid()) +\
- " -o pid,args"
+ ps_cmd_buf = (
+ "ps --no-headers --ppid " + str(os.getpid()) + " -o pid,args"
+ )
# Route the output of ps to a temporary file for later grepping. Avoid using " | grep" in the ps
# command string because it creates yet another process which is of no interest to the caller.
temp = tempfile.NamedTemporaryFile()
temp_file_path = temp.name
- gc.shell_cmd(ps_cmd_buf + " > " + temp_file_path,
- print_output=print_output)
+ gc.shell_cmd(
+ ps_cmd_buf + " > " + temp_file_path, print_output=print_output
+ )
# Sample contents of the temporary file:
# 30703 sleep 2
# 30795 /bin/bash -c ps --no-headers --ppid 30672 -o pid,args > /tmp/tmpqqorWY
# Use egrep to exclude the "ps" process itself from the results collected with the prior shell_cmd
# invocation. Only the other children are of interest to the caller. Use cut on the grep results to
# obtain only the pid column.
- rc, output = \
- gc.shell_cmd("egrep -v '" + re.escape(ps_cmd_buf) + "' "
- + temp_file_path + " | cut -c1-5",
- print_output=print_output)
+ rc, output = gc.shell_cmd(
+ "egrep -v '"
+ + re.escape(ps_cmd_buf)
+ + "' "
+ + temp_file_path
+ + " | cut -c1-5",
+ print_output=print_output,
+ )
# Split the output buffer by line into a list. Strip each element of extra spaces and convert each
# element to an integer.
return map(int, map(str.strip, filter(None, output.split("\n"))))
@@ -671,8 +680,7 @@
return re.sub("[^0-9\\.]", "", sys_version)
-python_version = \
- version_tuple(get_python_version())
+python_version = version_tuple(get_python_version())
ordered_dict_version = version_tuple("3.6")
@@ -714,11 +722,14 @@
pass
callers_stack_frame = inspect.stack()[1]
- file_name_elements = \
- [
- gp.pgm_name, callers_stack_frame.function, "line_" + str(callers_stack_frame.lineno),
- "pid_" + str(os.getpid()), str(random.randint(0, 1000000)), suffix
- ]
+ file_name_elements = [
+ gp.pgm_name,
+ callers_stack_frame.function,
+ "line_" + str(callers_stack_frame.lineno),
+ "pid_" + str(os.getpid()),
+ str(random.randint(0, 1000000)),
+ suffix,
+ ]
temp_file_name = delim.join(file_name_elements)
temp_file_path = temp_dir_path + temp_file_name
diff --git a/lib/gen_plug_in.py b/lib/gen_plug_in.py
index fc57cef..d6ab0a6 100755
--- a/lib/gen_plug_in.py
+++ b/lib/gen_plug_in.py
@@ -4,27 +4,29 @@
This module provides functions which are useful for running plug-ins.
"""
-import sys
-import os
import glob
+import os
+import sys
-import gen_print as gp
import gen_misc as gm
+import gen_print as gp
# Some help text that is common to more than one program.
-plug_in_dir_paths_help_text = \
- 'This is a colon-separated list of plug-in directory paths. If one' +\
- ' of the entries in the list is a plain directory name (i.e. no' +\
- ' path info), it will be taken to be a native plug-in. In that case,' +\
- ' %(prog)s will search for the native plug-in in the "plug-ins"' +\
- ' subdirectory of each path in the PATH environment variable until it' +\
- ' is found. Also, integrated plug-ins will automatically be appended' +\
- ' to your plug_in_dir_paths list. An integrated plug-in is any plug-in' +\
- ' found using the PATH variable that contains a file named "integrated".'
+plug_in_dir_paths_help_text = (
+ "This is a colon-separated list of plug-in directory paths. If one"
+ + " of the entries in the list is a plain directory name (i.e. no"
+ + " path info), it will be taken to be a native plug-in. In that case,"
+ + ' %(prog)s will search for the native plug-in in the "plug-ins"'
+ + " subdirectory of each path in the PATH environment variable until it"
+ + " is found. Also, integrated plug-ins will automatically be appended"
+ + " to your plug_in_dir_paths list. An integrated plug-in is any plug-in"
+ + ' found using the PATH variable that contains a file named "integrated".'
+)
-mch_class_help_text = \
- 'The class of machine that we are testing (e.g. "op" = "open power",' +\
- ' "obmc" = "open bmc", etc).'
+mch_class_help_text = (
+ 'The class of machine that we are testing (e.g. "op" = "open power",'
+ + ' "obmc" = "open bmc", etc).'
+)
PATH_LIST = gm.return_path_list()
@@ -64,17 +66,16 @@
global plug_in_base_path_list
for plug_in_base_dir_path in plug_in_base_path_list:
- candidate_plug_in_dir_path = os.path.normpath(plug_in_base_dir_path
- + plug_in_name) + \
- os.sep
+ candidate_plug_in_dir_path = (
+ os.path.normpath(plug_in_base_dir_path + plug_in_name) + os.sep
+ )
if os.path.isdir(candidate_plug_in_dir_path):
return candidate_plug_in_dir_path
return ""
-def validate_plug_in_package(plug_in_dir_path,
- mch_class="obmc"):
+def validate_plug_in_package(plug_in_dir_path, mch_class="obmc"):
r"""
Validate the plug in package and return the normalized plug-in directory path.
@@ -88,33 +89,42 @@
if os.path.isabs(plug_in_dir_path):
# plug_in_dir_path begins with a slash so it is an absolute path.
- candidate_plug_in_dir_path = os.path.normpath(plug_in_dir_path) +\
- os.sep
+ candidate_plug_in_dir_path = (
+ os.path.normpath(plug_in_dir_path) + os.sep
+ )
if not os.path.isdir(candidate_plug_in_dir_path):
- gp.print_error_report("Plug-in directory path \""
- + plug_in_dir_path + "\" does not exist.\n")
+ gp.print_error_report(
+ 'Plug-in directory path "'
+ + plug_in_dir_path
+ + '" does not exist.\n'
+ )
exit(1)
else:
# The plug_in_dir_path is actually a simple name (e.g. "OBMC_Sample")...
candidate_plug_in_dir_path = find_plug_in_package(plug_in_dir_path)
if candidate_plug_in_dir_path == "":
global PATH_LIST
- gp.print_error_report("Plug-in directory path \""
- + plug_in_dir_path + "\" could not be found"
- + " in any of the following directories:\n"
- + gp.sprint_var(PATH_LIST))
+ gp.print_error_report(
+ 'Plug-in directory path "'
+ + plug_in_dir_path
+ + '" could not be found'
+ + " in any of the following directories:\n"
+ + gp.sprint_var(PATH_LIST)
+ )
exit(1)
# Make sure that this plug-in supports us...
supports_file_path = candidate_plug_in_dir_path + "supports_" + mch_class
if not os.path.exists(supports_file_path):
- gp.print_error_report("The following file path could not be"
- + " found:\n"
- + gp.sprint_varx("supports_file_path",
- supports_file_path)
- + "\nThis file is necessary to indicate that"
- + " the given plug-in supports the class of"
- + " machine we are testing, namely \""
- + mch_class + "\".\n")
+ gp.print_error_report(
+ "The following file path could not be"
+ + " found:\n"
+ + gp.sprint_varx("supports_file_path", supports_file_path)
+ + "\nThis file is necessary to indicate that"
+ + " the given plug-in supports the class of"
+ + ' machine we are testing, namely "'
+ + mch_class
+ + '".\n'
+ )
exit(1)
return candidate_plug_in_dir_path
@@ -136,22 +146,25 @@
integrated_plug_ins_list = []
- DEBUG_SKIP_INTEGRATED = int(os.getenv('DEBUG_SKIP_INTEGRATED', '0'))
+ DEBUG_SKIP_INTEGRATED = int(os.getenv("DEBUG_SKIP_INTEGRATED", "0"))
if DEBUG_SKIP_INTEGRATED:
return integrated_plug_ins_list
for plug_in_base_path in plug_in_base_path_list:
# Get a list of all plug-in paths that support our mch_class.
- mch_class_candidate_list = glob.glob(plug_in_base_path
- + "*/supports_" + mch_class)
+ mch_class_candidate_list = glob.glob(
+ plug_in_base_path + "*/supports_" + mch_class
+ )
for candidate_path in mch_class_candidate_list:
- integrated_plug_in_dir_path = os.path.dirname(candidate_path) +\
- os.sep
+ integrated_plug_in_dir_path = (
+ os.path.dirname(candidate_path) + os.sep
+ )
integrated_file_path = integrated_plug_in_dir_path + "integrated"
if os.path.exists(integrated_file_path):
- plug_in_name = \
- os.path.basename(os.path.dirname(candidate_path))
+ plug_in_name = os.path.basename(
+ os.path.dirname(candidate_path)
+ )
if plug_in_name not in integrated_plug_ins_list:
# If this plug-in has not already been added to the list...
integrated_plug_ins_list.append(plug_in_name)
@@ -159,8 +172,7 @@
return integrated_plug_ins_list
-def return_plug_in_packages_list(plug_in_dir_paths,
- mch_class="obmc"):
+def return_plug_in_packages_list(plug_in_dir_paths, mch_class="obmc"):
r"""
Return a list of plug-in packages given the plug_in_dir_paths string. This function calls
validate_plug_in_package so it will fail if plug_in_dir_paths contains any invalid plug-ins.
@@ -184,8 +196,13 @@
plug_in_packages_list = plug_in_packages_list + integrated_plug_ins_list
- plug_in_packages_list = \
- list(set([validate_plug_in_package(path, mch_class)
- for path in plug_in_packages_list]))
+ plug_in_packages_list = list(
+ set(
+ [
+ validate_plug_in_package(path, mch_class)
+ for path in plug_in_packages_list
+ ]
+ )
+ )
return plug_in_packages_list
diff --git a/lib/gen_plug_in_utils.py b/lib/gen_plug_in_utils.py
index 0cf3262..dbbafe5 100755
--- a/lib/gen_plug_in_utils.py
+++ b/lib/gen_plug_in_utils.py
@@ -4,16 +4,16 @@
This module provides functions which are useful to plug-in call point programs.
"""
-import sys
+import collections
import os
import re
-import collections
+import sys
+import func_args as fa
+import gen_cmd as gc
+import gen_misc as gm
import gen_print as gp
import gen_valid as gv
-import gen_misc as gm
-import gen_cmd as gc
-import func_args as fa
PLUG_VAR_PREFIX = os.environ.get("PLUG_VAR_PREFIX", "AUTOBOOT")
@@ -36,9 +36,7 @@
return plug_in_package_name
-def return_plug_vars(general=True,
- custom=True,
- plug_in_package_name=None):
+def return_plug_vars(general=True, custom=True, plug_in_package_name=None):
r"""
Return an OrderedDict which is sorted by key and which contains all of the plug-in environment variables.
@@ -82,7 +80,9 @@
regex_list = []
if not (general or custom):
return collections.OrderedDict()
- plug_in_package_name = gm.dft(plug_in_package_name, get_plug_in_package_name())
+ plug_in_package_name = gm.dft(
+ plug_in_package_name, get_plug_in_package_name()
+ )
if general:
regex_list = [PLUG_VAR_PREFIX, "AUTOGUI"]
if custom:
@@ -92,17 +92,23 @@
# Set a default for nickname.
if os.environ.get("AUTOBOOT_OPENBMC_NICKNAME", "") == "":
- os.environ['AUTOBOOT_OPENBMC_NICKNAME'] = \
- os.environ.get("AUTOBOOT_OPENBMC_HOST", "")
+ os.environ["AUTOBOOT_OPENBMC_NICKNAME"] = os.environ.get(
+ "AUTOBOOT_OPENBMC_HOST", ""
+ )
if os.environ.get("AUTOIPL_FSP1_NICKNAME", "") == "":
- os.environ['AUTOIPL_FSP1_NICKNAME'] = \
- os.environ.get("AUTOIPL_FSP1_NAME", "").split(".")[0]
+ os.environ["AUTOIPL_FSP1_NICKNAME"] = os.environ.get(
+ "AUTOIPL_FSP1_NAME", ""
+ ).split(".")[0]
# For all variables specified in the parm_def file, we want them to default to "" rather than being unset.
# Process the parm_def file if it exists.
- parm_def_file_path = os.path.dirname(gp.pgm_dir_path.rstrip("/")) + "/" + plug_in_package_name \
+ parm_def_file_path = (
+ os.path.dirname(gp.pgm_dir_path.rstrip("/"))
+ + "/"
+ + plug_in_package_name
+ "/parm_def"
+ )
if os.path.exists(parm_def_file_path):
parm_defs = gm.my_parm_file(parm_def_file_path)
else:
@@ -115,8 +121,10 @@
# Create a list of plug-in environment variables by pre-pending <all caps plug-in package name>_<all
# caps var name>
- plug_in_parm_names = [plug_in_package_name.upper() + "_" + x for x in
- map(str.upper, parm_defs.keys())]
+ plug_in_parm_names = [
+ plug_in_package_name.upper() + "_" + x
+ for x in map(str.upper, parm_defs.keys())
+ ]
# Example plug_in_parm_names:
# plug_in_parm_names:
# plug_in_parm_names[0]: STOP_REST_FAIL
@@ -137,13 +145,20 @@
if os.environ[var_name] == "":
os.environ[var_name] = str(default_value)
- plug_var_dict = \
- collections.OrderedDict(sorted({k: v for (k, v) in
- os.environ.items()
- if re.match(regex, k)}.items()))
+ plug_var_dict = collections.OrderedDict(
+ sorted(
+ {
+ k: v for (k, v) in os.environ.items() if re.match(regex, k)
+ }.items()
+ )
+ )
# Restore the types of any variables where the caller had defined default values.
for key, value in non_string_defaults.items():
- cmd_buf = "plug_var_dict[key] = " + str(value).split("'")[1] + "(plug_var_dict[key]"
+ cmd_buf = (
+ "plug_var_dict[key] = "
+ + str(value).split("'")[1]
+ + "(plug_var_dict[key]"
+ )
if value is int:
# Use int base argument of 0 to allow it to interpret hex strings.
cmd_buf += ", 0)"
@@ -152,8 +167,11 @@
exec(cmd_buf) in globals(), locals()
# Register password values to prevent printing them out. Any plug var whose name ends in PASSWORD will
# be registered.
- password_vals = {k: v for (k, v) in plug_var_dict.items()
- if re.match(r".*_PASSWORD$", k)}.values()
+ password_vals = {
+ k: v
+ for (k, v) in plug_var_dict.items()
+ if re.match(r".*_PASSWORD$", k)
+ }.values()
map(gp.register_passwords, password_vals)
return plug_var_dict
@@ -231,8 +249,7 @@
setattr(module, re.sub("^" + PLUG_VAR_PREFIX + "_", "", key), value)
-def get_plug_default(var_name,
- default=None):
+def get_plug_default(var_name, default=None):
r"""
Derive and return a default value for the given parm variable.
@@ -288,7 +305,7 @@
default_value = os.environ.get(package_var_name, None)
if default_value is not None:
# A package-name version of the variable was found so return its value.
- return (default_value)
+ return default_value
plug_var_name = PLUG_VAR_PREFIX + "_OVERRIDE_" + var_name
default_value = os.environ.get(plug_var_name, None)
@@ -305,8 +322,7 @@
return default
-def required_plug_in(required_plug_in_names,
- plug_in_dir_paths=None):
+def required_plug_in(required_plug_in_names, plug_in_dir_paths=None):
r"""
Determine whether the required_plug_in_names are in plug_in_dir_paths, construct an error_message and
call gv.process_error_message(error_message).
@@ -327,15 +343,22 @@
"""
# Calculate default value for plug_in_dir_paths.
- plug_in_dir_paths = gm.dft(plug_in_dir_paths,
- os.environ.get('AUTOGUI_PLUG_IN_DIR_PATHS',
- os.environ.get(PLUG_VAR_PREFIX + "_PLUG_IN_DIR_PATHS", "")))
+ plug_in_dir_paths = gm.dft(
+ plug_in_dir_paths,
+ os.environ.get(
+ "AUTOGUI_PLUG_IN_DIR_PATHS",
+ os.environ.get(PLUG_VAR_PREFIX + "_PLUG_IN_DIR_PATHS", ""),
+ ),
+ )
# Convert plug_in_dir_paths to a list of base names.
- plug_in_dir_paths = \
- list(filter(None, map(os.path.basename, plug_in_dir_paths.split(":"))))
+ plug_in_dir_paths = list(
+ filter(None, map(os.path.basename, plug_in_dir_paths.split(":")))
+ )
- error_message = gv.valid_list(plug_in_dir_paths, required_values=required_plug_in_names)
+ error_message = gv.valid_list(
+ plug_in_dir_paths, required_values=required_plug_in_names
+ )
if error_message:
return gv.process_error_message(error_message)
@@ -356,20 +379,31 @@
to retrieve data saved by another plug-in package.
"""
- plug_in_package_name = gm.dft(plug_in_package_name,
- get_plug_in_package_name())
+ plug_in_package_name = gm.dft(
+ plug_in_package_name, get_plug_in_package_name()
+ )
- BASE_TOOL_DIR_PATH = \
- gm.add_trailing_slash(os.environ.get(PLUG_VAR_PREFIX
- + "_BASE_TOOL_DIR_PATH",
- "/tmp/"))
+ BASE_TOOL_DIR_PATH = gm.add_trailing_slash(
+ os.environ.get(PLUG_VAR_PREFIX + "_BASE_TOOL_DIR_PATH", "/tmp/")
+ )
NICKNAME = os.environ.get("AUTOBOOT_OPENBMC_NICKNAME", "")
if NICKNAME == "":
NICKNAME = os.environ["AUTOIPL_FSP1_NICKNAME"]
MASTER_PID = os.environ[PLUG_VAR_PREFIX + "_MASTER_PID"]
- gp.dprint_vars(BASE_TOOL_DIR_PATH, NICKNAME, plug_in_package_name, MASTER_PID)
- return BASE_TOOL_DIR_PATH + gm.username() + "/" + NICKNAME + "/" +\
- plug_in_package_name + "/" + str(MASTER_PID) + "/"
+ gp.dprint_vars(
+ BASE_TOOL_DIR_PATH, NICKNAME, plug_in_package_name, MASTER_PID
+ )
+ return (
+ BASE_TOOL_DIR_PATH
+ + gm.username()
+ + "/"
+ + NICKNAME
+ + "/"
+ + plug_in_package_name
+ + "/"
+ + str(MASTER_PID)
+ + "/"
+ )
def create_plug_in_save_dir(plug_in_package_name=None):
@@ -397,8 +431,9 @@
plug_in_package_name See compose_plug_in_save_dir_path for details.
"""
- gc.shell_cmd("rm -rf "
- + compose_plug_in_save_dir_path(plug_in_package_name))
+ gc.shell_cmd(
+ "rm -rf " + compose_plug_in_save_dir_path(plug_in_package_name)
+ )
def save_plug_in_value(var_value=None, plug_in_package_name=None, **kwargs):
@@ -439,7 +474,7 @@
var_name = gp.get_arg_name(0, 1, stack_frame_ix=2)
plug_in_save_dir_path = create_plug_in_save_dir(plug_in_package_name)
save_file_path = plug_in_save_dir_path + var_name
- gp.qprint_timen("Saving \"" + var_name + "\" value.")
+ gp.qprint_timen('Saving "' + var_name + '" value.')
gp.qprint_varx(var_name, var_value)
gc.shell_cmd("echo '" + str(var_value) + "' > " + save_file_path)
@@ -485,22 +520,32 @@
default, args, kwargs = fa.pop_arg("", *args, **kwargs)
plug_in_package_name, args, kwargs = fa.pop_arg(None, *args, **kwargs)
if args or kwargs:
- error_message = "Programmer error - Too many arguments passed for this function."
+ error_message = (
+ "Programmer error - Too many arguments passed for this function."
+ )
raise ValueError(error_message)
plug_in_save_dir_path = create_plug_in_save_dir(plug_in_package_name)
save_file_path = plug_in_save_dir_path + var_name
if os.path.isfile(save_file_path):
- gp.qprint_timen("Restoring " + var_name + " value from " + save_file_path + ".")
- var_value = gm.file_to_list(save_file_path, newlines=0, comments=0, trim=1)[0]
+ gp.qprint_timen(
+ "Restoring " + var_name + " value from " + save_file_path + "."
+ )
+ var_value = gm.file_to_list(
+ save_file_path, newlines=0, comments=0, trim=1
+ )[0]
if type(default) is bool:
# Convert from string to bool.
- var_value = (var_value == 'True')
+ var_value = var_value == "True"
if type(default) is int:
# Convert from string to int.
var_value = int(var_value)
else:
var_value = default
- gp.qprint_timen("Save file " + save_file_path + " does not exist so returning default value.")
+ gp.qprint_timen(
+ "Save file "
+ + save_file_path
+ + " does not exist so returning default value."
+ )
gp.qprint_varx(var_name, var_value)
return var_value
@@ -527,9 +572,14 @@
AUTOBOOT_PROGRAM_PID = gm.get_mod_global("AUTOBOOT_PROGRAM_PID")
if AUTOBOOT_MASTER_PID != AUTOBOOT_PROGRAM_PID:
- message = get_plug_in_package_name() + "/" + gp.pgm_name + " is not" \
- + " being called by the master program in the stack so no action" \
+ message = (
+ get_plug_in_package_name()
+ + "/"
+ + gp.pgm_name
+ + " is not"
+ + " being called by the master program in the stack so no action"
+ " will be taken."
+ )
gp.qprint_timen(message)
gp.qprint_vars(AUTOBOOT_MASTER_PID, AUTOBOOT_PROGRAM_PID)
exit(0)
@@ -541,13 +591,22 @@
The calling program is responsible for making sure that the tarball has been unpacked.
"""
- AUTOBOOT_BASE_TOOL_DIR_PATH = gm.get_mod_global("AUTOBOOT_BASE_TOOL_DIR_PATH")
+ AUTOBOOT_BASE_TOOL_DIR_PATH = gm.get_mod_global(
+ "AUTOBOOT_BASE_TOOL_DIR_PATH"
+ )
AUTOBOOT_OPENBMC_NICKNAME = gm.get_mod_global("AUTOBOOT_OPENBMC_NICKNAME")
- tool_dir_path = AUTOBOOT_BASE_TOOL_DIR_PATH + os.environ.get('USER') + os.sep \
- + AUTOBOOT_OPENBMC_NICKNAME + os.sep
- tarball_tools_dir_path = tool_dir_path + 'tarball/x86/bin'
- os.environ['PATH'] = gm.add_path(tarball_tools_dir_path, os.environ.get('PATH', ''))
+ tool_dir_path = (
+ AUTOBOOT_BASE_TOOL_DIR_PATH
+ + os.environ.get("USER")
+ + os.sep
+ + AUTOBOOT_OPENBMC_NICKNAME
+ + os.sep
+ )
+ tarball_tools_dir_path = tool_dir_path + "tarball/x86/bin"
+ os.environ["PATH"] = gm.add_path(
+ tarball_tools_dir_path, os.environ.get("PATH", "")
+ )
def stop_test_rc():
@@ -573,14 +632,15 @@
# Create print wrapper functions for all sprint functions defined above.
# func_names contains a list of all print functions which should be created from their sprint counterparts.
-func_names = ['print_plug_vars']
+func_names = ["print_plug_vars"]
# stderr_func_names is a list of functions whose output should go to stderr rather than stdout.
stderr_func_names = []
replace_dict = dict(gp.replace_dict)
-replace_dict['mod_qualifier'] = 'gp.'
-func_defs = gp.create_print_wrapper_funcs(func_names, stderr_func_names,
- replace_dict)
+replace_dict["mod_qualifier"] = "gp."
+func_defs = gp.create_print_wrapper_funcs(
+ func_names, stderr_func_names, replace_dict
+)
gp.gp_debug_print(func_defs)
exec(func_defs)
diff --git a/lib/gen_print.py b/lib/gen_print.py
index 5f87343..8372832 100755
--- a/lib/gen_print.py
+++ b/lib/gen_print.py
@@ -4,28 +4,31 @@
This module provides many print functions such as sprint_var, sprint_time, sprint_error, sprint_call_stack.
"""
-import sys
-import os
-import time
-import inspect
-import re
-import grp
-import socket
import argparse
import copy
+import grp
+import inspect
+import os
+import re
+import socket
+import sys
+import time
+
try:
import __builtin__
except ImportError:
import builtins as __builtin__
-import logging
+
import collections
+import logging
+
from wrap_utils import *
try:
robot_env = 1
- from robot.utils import DotDict
- from robot.utils import NormalizedDict
from robot.libraries.BuiltIn import BuiltIn
+ from robot.utils import DotDict, NormalizedDict
+
# Having access to the robot libraries alone does not indicate that we are in a robot environment. The
# following try block should confirm that.
try:
@@ -40,8 +43,9 @@
# Setting these variables for use both inside this module and by programs importing this module.
pgm_file_path = sys.argv[0]
pgm_name = os.path.basename(pgm_file_path)
-pgm_dir_path = os.path.normpath(re.sub("/" + pgm_name, "", pgm_file_path)) +\
- os.path.sep
+pgm_dir_path = (
+ os.path.normpath(re.sub("/" + pgm_name, "", pgm_file_path)) + os.path.sep
+)
# Some functions (e.g. sprint_pgm_header) have need of a program name value that looks more like a valid
@@ -55,12 +59,12 @@
# objective is to make the variable values line up nicely with the time stamps.
dft_col1_width = 29
-NANOSECONDS = os.environ.get('NANOSECONDS', '1')
+NANOSECONDS = os.environ.get("NANOSECONDS", "1")
if NANOSECONDS == "1":
dft_col1_width = dft_col1_width + 7
-SHOW_ELAPSED_TIME = os.environ.get('SHOW_ELAPSED_TIME', '1')
+SHOW_ELAPSED_TIME = os.environ.get("SHOW_ELAPSED_TIME", "1")
if SHOW_ELAPSED_TIME == "1":
if NANOSECONDS == "1":
@@ -107,7 +111,7 @@
# The user can set environment variable "GEN_PRINT_DEBUG" to get debug output from this module.
-gen_print_debug = int(os.environ.get('GEN_PRINT_DEBUG', 0))
+gen_print_debug = int(os.environ.get("GEN_PRINT_DEBUG", 0))
def sprint_func_name(stack_frame_ix=None):
@@ -156,14 +160,12 @@
Return the number of spaces at the beginning of the line.
"""
- return len(line) - len(line.lstrip(' '))
+ return len(line) - len(line.lstrip(" "))
# get_arg_name is not a print function per se. It has been included in this module because it is used by
# sprint_var which is defined in this module.
-def get_arg_name(var,
- arg_num=1,
- stack_frame_ix=1):
+def get_arg_name(var, arg_num=1, stack_frame_ix=1):
r"""
Return the "name" of an argument passed to a function. This could be a literal or a variable name.
@@ -221,17 +223,22 @@
# sprint_var, valid_value, etc.).
# The user can set environment variable "GET_ARG_NAME_DEBUG" to get debug output from this function.
- local_debug = int(os.environ.get('GET_ARG_NAME_DEBUG', 0))
+ local_debug = int(os.environ.get("GET_ARG_NAME_DEBUG", 0))
# In addition to GET_ARG_NAME_DEBUG, the user can set environment variable "GET_ARG_NAME_SHOW_SOURCE" to
# have this function include source code in the debug output.
local_debug_show_source = int(
- os.environ.get('GET_ARG_NAME_SHOW_SOURCE', 0))
+ os.environ.get("GET_ARG_NAME_SHOW_SOURCE", 0)
+ )
if stack_frame_ix < 1:
- print_error("Programmer error - Variable \"stack_frame_ix\" has an"
- + " invalid value of \"" + str(stack_frame_ix) + "\". The"
- + " value must be an integer that is greater than or equal"
- + " to 1.\n")
+ print_error(
+ 'Programmer error - Variable "stack_frame_ix" has an'
+ + ' invalid value of "'
+ + str(stack_frame_ix)
+ + '". The'
+ + " value must be an integer that is greater than or equal"
+ + " to 1.\n"
+ )
return
if local_debug:
@@ -248,15 +255,25 @@
work_around_inspect_stack_cwd_failure()
for count in range(0, 2):
try:
- frame, filename, cur_line_no, function_name, lines, index = \
- inspect.stack()[stack_frame_ix]
+ (
+ frame,
+ filename,
+ cur_line_no,
+ function_name,
+ lines,
+ index,
+ ) = inspect.stack()[stack_frame_ix]
except IndexError:
- print_error("Programmer error - The caller has asked for"
- + " information about the stack frame at index \""
- + str(stack_frame_ix) + "\". However, the stack"
- + " only contains " + str(len(inspect.stack()))
- + " entries. Therefore the stack frame index is out"
- + " of range.\n")
+ print_error(
+ "Programmer error - The caller has asked for"
+ + ' information about the stack frame at index "'
+ + str(stack_frame_ix)
+ + '". However, the stack'
+ + " only contains "
+ + str(len(inspect.stack()))
+ + " entries. Therefore the stack frame index is out"
+ + " of range.\n"
+ )
return
if filename != "<string>":
break
@@ -275,12 +292,10 @@
# Though one would expect inspect.getsourcelines(frame) to get all module source lines if the frame is
# "<module>", it doesn't do that. Therefore, for this special case, do inspect.getsourcelines(module).
if function_name == "<module>":
- source_lines, source_line_num =\
- inspect.getsourcelines(module)
+ source_lines, source_line_num = inspect.getsourcelines(module)
line_ix = cur_line_no - source_line_num - 1
else:
- source_lines, source_line_num =\
- inspect.getsourcelines(frame)
+ source_lines, source_line_num = inspect.getsourcelines(frame)
line_ix = cur_line_no - source_line_num
if local_debug:
@@ -295,8 +310,9 @@
print_varx("line_ix", line_ix, indent=debug_indent)
if local_debug_show_source:
print_varx("source_lines", source_lines, indent=debug_indent)
- print_varx("real_called_func_name", real_called_func_name,
- indent=debug_indent)
+ print_varx(
+ "real_called_func_name", real_called_func_name, indent=debug_indent
+ )
# Get a list of all functions defined for the module. Note that this doesn't work consistently when
# _run_exitfuncs is at the top of the stack (i.e. if we're running an exit function). I've coded a
@@ -330,8 +346,9 @@
# The call to the function could be encased in a recast (e.g. int(func_name())).
recast_regex = "([^ ]+\\([ ]*)?"
import_name_regex = "([a-zA-Z0-9_]+\\.)?"
- func_name_regex = recast_regex + import_name_regex + "(" +\
- '|'.join(aliases) + ")"
+ func_name_regex = (
+ recast_regex + import_name_regex + "(" + "|".join(aliases) + ")"
+ )
pre_args_regex = ".*" + func_name_regex + "[ ]*\\("
# Search backward through source lines looking for the calling function name.
@@ -344,9 +361,12 @@
found = True
break
if not found:
- print_error("Programmer error - Could not find the source line with"
- + " a reference to function \"" + real_called_func_name
- + "\".\n")
+ print_error(
+ "Programmer error - Could not find the source line with"
+ + ' a reference to function "'
+ + real_called_func_name
+ + '".\n'
+ )
return
# Search forward through the source lines looking for a line whose indentation is the same or less than
@@ -365,15 +385,18 @@
prior_line = source_lines[start_line_ix - 1]
prior_line_stripped = re.sub(r"[ ]*\\([\r\n]$)", " \\1", prior_line)
prior_line_indent = get_line_indent(prior_line)
- if prior_line != prior_line_stripped and\
- prior_line_indent < start_indent:
+ if (
+ prior_line != prior_line_stripped
+ and prior_line_indent < start_indent
+ ):
start_line_ix -= 1
# Remove the backslash (continuation char) from prior line.
source_lines[start_line_ix] = prior_line_stripped
# Join the start line through the end line into a composite line.
- composite_line = ''.join(map(str.strip,
- source_lines[start_line_ix:end_line_ix + 1]))
+ composite_line = "".join(
+ map(str.strip, source_lines[start_line_ix : end_line_ix + 1])
+ )
# Insert one space after first "=" if there isn't one already.
composite_line = re.sub("=[ ]*([^ ])", "= \\1", composite_line, 1)
@@ -393,8 +416,9 @@
lvalues[ix] = lvalue
ix += 1
lvalue_prefix_regex = "(.*=[ ]+)?"
- called_func_name_regex = lvalue_prefix_regex + func_name_regex +\
- "[ ]*\\(.*"
+ called_func_name_regex = (
+ lvalue_prefix_regex + func_name_regex + "[ ]*\\(.*"
+ )
called_func_name = re.sub(called_func_name_regex, "\\4", composite_line)
arg_list_etc = "(" + re.sub(pre_args_regex, "", composite_line)
if local_debug:
@@ -408,8 +432,11 @@
print_varx("lvalue_regex", lvalue_regex, indent=debug_indent)
print_varx("lvalue_string", lvalue_string, indent=debug_indent)
print_varx("lvalues", lvalues, indent=debug_indent)
- print_varx("called_func_name_regex", called_func_name_regex,
- indent=debug_indent)
+ print_varx(
+ "called_func_name_regex",
+ called_func_name_regex,
+ indent=debug_indent,
+ )
print_varx("called_func_name", called_func_name, indent=debug_indent)
print_varx("arg_list_etc", arg_list_etc, indent=debug_indent)
@@ -530,8 +557,11 @@
if SHOW_ELAPSED_TIME == "1":
cur_time_seconds = seconds
- math_string = "%9.9f" % cur_time_seconds + " - " + "%9.9f" % \
- sprint_time_last_seconds[last_seconds_ix]
+ math_string = (
+ "%9.9f" % cur_time_seconds
+ + " - "
+ + "%9.9f" % sprint_time_last_seconds[last_seconds_ix]
+ )
elapsed_seconds = eval(math_string)
if NANOSECONDS == "1":
elapsed_seconds = "%11.6f" % elapsed_seconds
@@ -648,8 +678,9 @@
return word_length_in_digits()
num_length_in_bits = bit_length(working_number)
- num_hex_digits, remainder = divmod(num_length_in_bits,
- digit_length_in_bits())
+ num_hex_digits, remainder = divmod(
+ num_length_in_bits, digit_length_in_bits()
+ )
if remainder > 0:
# Example: the number 7 requires 3 bits. The divmod above produces, 0 with remainder of 3. So
# because we have a remainder, we increment num_hex_digits from 0 to 1.
@@ -780,16 +811,17 @@
"""
return [
- 'hexa',
- 'octal',
- 'binary',
- 'blank',
- 'verbose',
- 'quote_keys',
- 'show_type',
- 'strip_brackets',
- 'no_header',
- 'quote_values']
+ "hexa",
+ "octal",
+ "binary",
+ "blank",
+ "verbose",
+ "quote_keys",
+ "show_type",
+ "strip_brackets",
+ "no_header",
+ "quote_values",
+ ]
def create_fmt_definition():
@@ -960,14 +992,16 @@
return fmt, fmt
-def sprint_varx(var_name,
- var_value,
- fmt=0,
- indent=dft_indent,
- col1_width=dft_col1_width,
- trailing_char="\n",
- key_list=None,
- delim=":"):
+def sprint_varx(
+ var_name,
+ var_value,
+ fmt=0,
+ indent=dft_indent,
+ col1_width=dft_col1_width,
+ trailing_char="\n",
+ key_list=None,
+ delim=":",
+):
r"""
Print the var name/value passed to it. If the caller lets col1_width default, the printing lines up
nicely with output generated by the print_time functions.
@@ -1071,8 +1105,9 @@
if type(var_value) in int_types:
# Process format values pertaining to int types.
if fmt & hexa():
- num_hex_digits = max(dft_num_hex_digits(),
- get_req_num_hex_digits(var_value))
+ num_hex_digits = max(
+ dft_num_hex_digits(), get_req_num_hex_digits(var_value)
+ )
# Convert a negative number to its positive twos complement for proper printing. For
# example, instead of printing -1 as "0x-000000000000001" it will be printed as
# "0xffffffffffffffff".
@@ -1081,13 +1116,14 @@
elif fmt & octal():
value_format = "0o%016o"
elif fmt & binary():
- num_digits, remainder = \
- divmod(max(bit_length(var_value), 1), 8)
+ num_digits, remainder = divmod(
+ max(bit_length(var_value), 1), 8
+ )
num_digits *= 8
if remainder:
num_digits += 8
num_digits += 2
- value_format = '#0' + str(num_digits) + 'b'
+ value_format = "#0" + str(num_digits) + "b"
var_value = format(var_value, value_format)
value_format = "%s"
elif type(var_value) in string_types:
@@ -1097,8 +1133,9 @@
var_value = "<blank>"
elif type(var_value) is type:
var_value = str(var_value).split("'")[1]
- format_string = "%" + str(indent) + "s%-" + str(col1_width) + "s" \
- + value_format
+ format_string = (
+ "%" + str(indent) + "s%-" + str(col1_width) + "s" + value_format
+ )
if fmt & show_type():
if var_value != "":
format_string += " "
@@ -1109,16 +1146,19 @@
if not (fmt & verbose()):
# Strip everything leading up to the first left square brace.
var_name = re.sub(r".*\[", "[", var_name)
- if (fmt & strip_brackets()):
+ if fmt & strip_brackets():
var_name = re.sub(r"[\[\]]", "", var_name)
if value_format == "0x%08x":
- return format_string % ("", str(var_name) + delim,
- var_value & 0xffffffff)
+ return format_string % (
+ "",
+ str(var_name) + delim,
+ var_value & 0xFFFFFFFF,
+ )
else:
return format_string % ("", str(var_name) + delim, var_value)
else:
# The data type is complex in the sense that it has subordinate parts.
- if (fmt & no_header()):
+ if fmt & no_header():
buffer = ""
else:
# Create header line.
@@ -1127,7 +1167,7 @@
loc_var_name = re.sub(r".*\[", "[", var_name)
else:
loc_var_name = var_name
- if (fmt & strip_brackets()):
+ if fmt & strip_brackets():
loc_var_name = re.sub(r"[\[\]]", "", loc_var_name)
format_string = "%" + str(indent) + "s%s\n"
buffer = format_string % ("", loc_var_name + ":")
@@ -1142,9 +1182,9 @@
loc_trailing_char = "\n"
if is_dict(var_value):
if type(child_fmt) is list:
- child_quote_keys = (child_fmt[0] & quote_keys())
+ child_quote_keys = child_fmt[0] & quote_keys()
else:
- child_quote_keys = (child_fmt & quote_keys())
+ child_quote_keys = child_fmt & quote_keys()
for key, value in var_value.items():
if key_list is not None:
key_list_regex = "^" + "|".join(key_list) + "$"
@@ -1156,39 +1196,65 @@
if child_quote_keys:
key = "'" + key + "'"
key = "[" + str(key) + "]"
- buffer += sprint_varx(var_name + key, value, child_fmt, indent,
- col1_width, loc_trailing_char, key_list,
- delim)
+ buffer += sprint_varx(
+ var_name + key,
+ value,
+ child_fmt,
+ indent,
+ col1_width,
+ loc_trailing_char,
+ key_list,
+ delim,
+ )
elif type(var_value) in (list, tuple, set):
for key, value in enumerate(var_value):
ix += 1
if ix == length:
loc_trailing_char = trailing_char
key = "[" + str(key) + "]"
- buffer += sprint_varx(var_name + key, value, child_fmt, indent,
- col1_width, loc_trailing_char, key_list,
- delim)
+ buffer += sprint_varx(
+ var_name + key,
+ value,
+ child_fmt,
+ indent,
+ col1_width,
+ loc_trailing_char,
+ key_list,
+ delim,
+ )
elif isinstance(var_value, argparse.Namespace):
for key in var_value.__dict__:
ix += 1
if ix == length:
loc_trailing_char = trailing_char
- cmd_buf = "buffer += sprint_varx(var_name + \".\" + str(key)" \
- + ", var_value." + key + ", child_fmt, indent," \
- + " col1_width, loc_trailing_char, key_list," \
- + " delim)"
+ cmd_buf = (
+ 'buffer += sprint_varx(var_name + "." + str(key)'
+ + ", var_value."
+ + key
+ + ", child_fmt, indent,"
+ + " col1_width, loc_trailing_char, key_list,"
+ + " delim)"
+ )
exec(cmd_buf)
else:
var_type = type(var_value).__name__
func_name = sys._getframe().f_code.co_name
- var_value = "<" + var_type + " type not supported by " + \
- func_name + "()>"
+ var_value = (
+ "<" + var_type + " type not supported by " + func_name + "()>"
+ )
value_format = "%s"
indent -= 2
# Adjust col1_width.
col1_width = col1_width - indent
- format_string = "%" + str(indent) + "s%-" \
- + str(col1_width) + "s" + value_format + trailing_char
+ format_string = (
+ "%"
+ + str(indent)
+ + "s%-"
+ + str(col1_width)
+ + "s"
+ + value_format
+ + trailing_char
+ )
return format_string % ("", str(var_name) + ":", var_value)
return buffer
@@ -1239,10 +1305,7 @@
return buffer
-def sprint_dashes(indent=dft_indent,
- width=80,
- line_feed=1,
- char="-"):
+def sprint_dashes(indent=dft_indent, width=80, line_feed=1, char="-"):
r"""
Return a string of dashes to the caller.
@@ -1261,8 +1324,7 @@
return buffer
-def sindent(text="",
- indent=0):
+def sindent(text="", indent=0):
r"""
Pre-pend the specified number of characters to the text string (i.e. indent it) and return it.
@@ -1310,36 +1372,41 @@
if func_name == "<module>":
# If the func_name is the "main" program, we simply get the command line call string.
- func_and_args = ' '.join(sys.argv)
+ func_and_args = " ".join(sys.argv)
else:
# Get the program arguments.
- (args, varargs, keywords, locals) =\
- inspect.getargvalues(stack_frame[0])
+ (args, varargs, keywords, locals) = inspect.getargvalues(
+ stack_frame[0]
+ )
args_list = []
for arg_name in filter(None, args + [varargs, keywords]):
# Get the arg value from frame locals.
arg_value = locals[arg_name]
- if arg_name == 'self':
+ if arg_name == "self":
if style == func_line_style_short:
continue
# Manipulations to improve output for class methods.
func_name = arg_value.__class__.__name__ + "." + func_name
args_list.append(arg_name + " = <self>")
- elif (style == func_line_style_short
- and arg_name == 'args'
- and type(arg_value) in (list, tuple)):
+ elif (
+ style == func_line_style_short
+ and arg_name == "args"
+ and type(arg_value) in (list, tuple)
+ ):
if len(arg_value) == 0:
continue
- args_list.append(repr(', '.join(arg_value)))
- elif (style == func_line_style_short
- and arg_name == 'kwargs'
- and type(arg_value) is dict):
+ args_list.append(repr(", ".join(arg_value)))
+ elif (
+ style == func_line_style_short
+ and arg_name == "kwargs"
+ and type(arg_value) is dict
+ ):
for key, value in arg_value.items():
args_list.append(key + "=" + repr(value))
else:
args_list.append(arg_name + " = " + repr(arg_value))
- args_str = "(" + ', '.join(map(str, args_list)) + ")"
+ args_str = "(" + ", ".join(map(str, args_list)) + ")"
# Now we need to print this in a nicely-wrapped way.
func_and_args = func_name + args_str
@@ -1349,9 +1416,7 @@
return func_and_args
-def sprint_call_stack(indent=0,
- stack_frame_ix=0,
- style=None):
+def sprint_call_stack(indent=0, stack_frame_ix=0, style=None):
r"""
Return a call stack report for the given point in the program with line numbers, function names and
function parameters and arguments.
@@ -1444,8 +1509,7 @@
return sprint_time() + "Executing: " + func_and_args + "\n"
-def sprint_pgm_header(indent=0,
- linefeed=1):
+def sprint_pgm_header(indent=0, linefeed=1):
r"""
Return a standardized header that programs should print at the beginning of the run. It includes useful
information like command line, pid, userid, program parameters, etc.
@@ -1464,20 +1528,25 @@
if robot_env:
suite_name = BuiltIn().get_variable_value("${suite_name}")
- buffer += sindent(sprint_time("Running test suite \"" + suite_name
- + "\".\n"), indent)
+ buffer += sindent(
+ sprint_time('Running test suite "' + suite_name + '".\n'), indent
+ )
buffer += sindent(sprint_time() + "Running " + pgm_name + ".\n", indent)
- buffer += sindent(sprint_time() + "Program parameter values, etc.:\n\n",
- indent)
- buffer += sprint_varx("command_line", ' '.join(sys.argv), 0, indent,
- col1_width)
+ buffer += sindent(
+ sprint_time() + "Program parameter values, etc.:\n\n", indent
+ )
+ buffer += sprint_varx(
+ "command_line", " ".join(sys.argv), 0, indent, col1_width
+ )
# We want the output to show a customized name for the pid and pgid but we want it to look like a valid
# variable name. Therefore, we'll use pgm_name_var_name which was set when this module was imported.
- buffer += sprint_varx(pgm_name_var_name + "_pid", os.getpid(), 0, indent,
- col1_width)
- buffer += sprint_varx(pgm_name_var_name + "_pgid", os.getpgrp(), 0, indent,
- col1_width)
+ buffer += sprint_varx(
+ pgm_name_var_name + "_pid", os.getpid(), 0, indent, col1_width
+ )
+ buffer += sprint_varx(
+ pgm_name_var_name + "_pgid", os.getpgrp(), 0, indent, col1_width
+ )
userid_num = str(os.geteuid())
try:
username = os.getlogin()
@@ -1486,30 +1555,36 @@
username = "root"
else:
username = "?"
- buffer += sprint_varx("uid", userid_num + " (" + username
- + ")", 0, indent, col1_width)
- buffer += sprint_varx("gid", str(os.getgid()) + " ("
- + str(grp.getgrgid(os.getgid()).gr_name) + ")", 0,
- indent, col1_width)
- buffer += sprint_varx("host_name", socket.gethostname(), 0, indent,
- col1_width)
+ buffer += sprint_varx(
+ "uid", userid_num + " (" + username + ")", 0, indent, col1_width
+ )
+ buffer += sprint_varx(
+ "gid",
+ str(os.getgid()) + " (" + str(grp.getgrgid(os.getgid()).gr_name) + ")",
+ 0,
+ indent,
+ col1_width,
+ )
+ buffer += sprint_varx(
+ "host_name", socket.gethostname(), 0, indent, col1_width
+ )
try:
- DISPLAY = os.environ['DISPLAY']
+ DISPLAY = os.environ["DISPLAY"]
except KeyError:
DISPLAY = ""
buffer += sprint_var(DISPLAY, 0, indent, col1_width)
- PYTHON_VERSION = os.environ.get('PYTHON_VERSION', None)
+ PYTHON_VERSION = os.environ.get("PYTHON_VERSION", None)
if PYTHON_VERSION is not None:
buffer += sprint_var(PYTHON_VERSION, 0, indent, col1_width)
- PYTHON_PGM_PATH = os.environ.get('PYTHON_PGM_PATH', None)
+ PYTHON_PGM_PATH = os.environ.get("PYTHON_PGM_PATH", None)
if PYTHON_PGM_PATH is not None:
buffer += sprint_var(PYTHON_PGM_PATH, 0, indent, col1_width)
python_version = sys.version.replace("\n", "")
buffer += sprint_var(python_version, 0, indent, col1_width)
- ROBOT_VERSION = os.environ.get('ROBOT_VERSION', None)
+ ROBOT_VERSION = os.environ.get("ROBOT_VERSION", None)
if ROBOT_VERSION is not None:
buffer += sprint_var(ROBOT_VERSION, 0, indent, col1_width)
- ROBOT_PGM_PATH = os.environ.get('ROBOT_PGM_PATH', None)
+ ROBOT_PGM_PATH = os.environ.get("ROBOT_PGM_PATH", None)
if ROBOT_PGM_PATH is not None:
buffer += sprint_var(ROBOT_PGM_PATH, 0, indent, col1_width)
@@ -1536,10 +1611,9 @@
return buffer
-def sprint_error_report(error_text="\n",
- indent=2,
- format=None,
- stack_frame_ix=None):
+def sprint_error_report(
+ error_text="\n", indent=2, format=None, stack_frame_ix=None
+):
r"""
Return a string with a standardized report which includes the caller's error text, the call stack and the
program header.
@@ -1558,12 +1632,12 @@
indent = int(indent)
if format is None:
if robot_env:
- format = 'short'
+ format = "short"
else:
- format = 'long'
- error_text = error_text.rstrip('\n') + '\n'
+ format = "long"
+ error_text = error_text.rstrip("\n") + "\n"
- if format == 'short':
+ if format == "short":
return sprint_error(error_text)
buffer = ""
@@ -1588,8 +1662,7 @@
return buffer
-def sprint_issuing(cmd_buf,
- test_mode=0):
+def sprint_issuing(cmd_buf, test_mode=0):
r"""
Return a line indicating a command that the program is about to execute.
@@ -1610,7 +1683,7 @@
buffer += "(test_mode) "
if type(cmd_buf) is list:
# Assume this is a robot command in the form of a list.
- cmd_buf = ' '.join([str(element) for element in cmd_buf])
+ cmd_buf = " ".join([str(element) for element in cmd_buf])
buffer += "Issuing: " + cmd_buf + "\n"
return buffer
@@ -1641,7 +1714,7 @@
file_path The path to a file (e.g. "/tmp/file1").
"""
- with open(file_path, 'r') as file:
+ with open(file_path, "r") as file:
buffer = file.read()
return buffer
@@ -1678,8 +1751,7 @@
return buffer
-def gp_print(buffer,
- stream='stdout'):
+def gp_print(buffer, stream="stdout"):
r"""
Print the buffer using either sys.stdout.write or BuiltIn().log_to_console depending on whether we are
running in a robot environment.
@@ -1735,9 +1807,7 @@
gp_print(buffer)
-def get_var_value(var_value=None,
- default=1,
- var_name=None):
+def get_var_value(var_value=None, default=1, var_name=None):
r"""
Return either var_value, the corresponding global value or default.
@@ -1787,17 +1857,16 @@
var_name = get_arg_name(None, 1, 2)
if robot_env:
- var_value = BuiltIn().get_variable_value("${" + var_name + "}",
- default)
+ var_value = BuiltIn().get_variable_value(
+ "${" + var_name + "}", default
+ )
else:
var_value = getattr(__builtin__, var_name, default)
return var_value
-def get_stack_var(var_name,
- default="",
- init_stack_ix=2):
+def get_stack_var(var_name, default="", init_stack_ix=2):
r"""
Starting with the caller's stack level, search upward in the call stack for a variable named var_name and
return its value. If the variable cannot be found in the stack, attempt to get the global value. If the
@@ -1825,9 +1894,14 @@
work_around_inspect_stack_cwd_failure()
default = get_var_value(var_name=var_name, default=default)
- return next((frame[0].f_locals[var_name]
- for frame in inspect.stack()[init_stack_ix:]
- if var_name in frame[0].f_locals), default)
+ return next(
+ (
+ frame[0].f_locals[var_name]
+ for frame in inspect.stack()[init_stack_ix:]
+ if var_name in frame[0].f_locals
+ ),
+ default,
+ )
# hidden_text is a list of passwords which are to be replaced with asterisks by print functions defined in
@@ -1861,8 +1935,9 @@
# Place the password into the hidden_text list.
hidden_text.append(password)
# Create a corresponding password regular expression. Escape regex special characters too.
- password_regex = '(' +\
- '|'.join([re.escape(x) for x in hidden_text]) + ')'
+ password_regex = (
+ "(" + "|".join([re.escape(x) for x in hidden_text]) + ")"
+ )
def replace_passwords(buffer):
@@ -1886,10 +1961,9 @@
return re.sub(password_regex, "********", buffer)
-def create_print_wrapper_funcs(func_names,
- stderr_func_names,
- replace_dict,
- func_prefix=""):
+def create_print_wrapper_funcs(
+ func_names, stderr_func_names, replace_dict, func_prefix=""
+):
r"""
Generate code for print wrapper functions and return the generated code as a string.
@@ -1919,9 +1993,9 @@
for func_name in func_names:
if func_name in stderr_func_names:
- replace_dict['output_stream'] = "stderr"
+ replace_dict["output_stream"] = "stderr"
else:
- replace_dict['output_stream'] = "stdout"
+ replace_dict["output_stream"] = "stdout"
s_func_name = "s" + func_name
q_func_name = "q" + func_name
@@ -1929,32 +2003,48 @@
# We don't want to try to redefine the "print" function, thus the following if statement.
if func_name != "print":
- func_def = create_func_def_string(s_func_name,
- func_prefix + func_name,
- print_func_template,
- replace_dict)
+ func_def = create_func_def_string(
+ s_func_name,
+ func_prefix + func_name,
+ print_func_template,
+ replace_dict,
+ )
buffer += func_def
- func_def = create_func_def_string(s_func_name,
- func_prefix + "q" + func_name,
- qprint_func_template, replace_dict)
+ func_def = create_func_def_string(
+ s_func_name,
+ func_prefix + "q" + func_name,
+ qprint_func_template,
+ replace_dict,
+ )
buffer += func_def
- func_def = create_func_def_string(s_func_name,
- func_prefix + "d" + func_name,
- dprint_func_template, replace_dict)
+ func_def = create_func_def_string(
+ s_func_name,
+ func_prefix + "d" + func_name,
+ dprint_func_template,
+ replace_dict,
+ )
buffer += func_def
- func_def = create_func_def_string(s_func_name,
- func_prefix + "l" + func_name,
- lprint_func_template, replace_dict)
+ func_def = create_func_def_string(
+ s_func_name,
+ func_prefix + "l" + func_name,
+ lprint_func_template,
+ replace_dict,
+ )
buffer += func_def
# Create abbreviated aliases (e.g. spvar is an alias for sprint_var).
alias = re.sub("print_", "p", func_name)
alias = re.sub("print", "p", alias)
- prefixes = [func_prefix + "", "s", func_prefix + "q",
- func_prefix + "d", func_prefix + "l"]
+ prefixes = [
+ func_prefix + "",
+ "s",
+ func_prefix + "q",
+ func_prefix + "d",
+ func_prefix + "l",
+ ]
for prefix in prefixes:
if alias == "p":
continue
@@ -1984,49 +2074,61 @@
# means use of the logging module. For robot programs it means use of the BuiltIn().log() function.
# Templates for the various print wrapper functions.
-print_func_template = \
- [
- " <mod_qualifier>gp_print(<mod_qualifier>replace_passwords("
- + "<call_line>), stream='<output_stream>')"
- ]
+print_func_template = [
+ " <mod_qualifier>gp_print(<mod_qualifier>replace_passwords("
+ + "<call_line>), stream='<output_stream>')"
+]
-qprint_func_template = \
- [
- " quiet = <mod_qualifier>get_stack_var(\"quiet\", 0)",
- " if int(quiet): return"
- ] + print_func_template
+qprint_func_template = [
+ ' quiet = <mod_qualifier>get_stack_var("quiet", 0)',
+ " if int(quiet): return",
+] + print_func_template
-dprint_func_template = \
- [
- " debug = <mod_qualifier>get_stack_var(\"debug\", 0)",
- " if not int(debug): return"
- ] + print_func_template
+dprint_func_template = [
+ ' debug = <mod_qualifier>get_stack_var("debug", 0)',
+ " if not int(debug): return",
+] + print_func_template
-lprint_func_template = \
- [
- " <mod_qualifier>set_last_seconds_ix(<mod_qualifier>"
- + "lprint_last_seconds_ix())",
- " <mod_qualifier>gp_log(<mod_qualifier>replace_passwords"
- + "(<call_line>))",
- " <mod_qualifier>set_last_seconds_ix(<mod_qualifier>"
- + "standard_print_last_seconds_ix())"
- ]
+lprint_func_template = [
+ " <mod_qualifier>set_last_seconds_ix(<mod_qualifier>"
+ + "lprint_last_seconds_ix())",
+ " <mod_qualifier>gp_log(<mod_qualifier>replace_passwords"
+ + "(<call_line>))",
+ " <mod_qualifier>set_last_seconds_ix(<mod_qualifier>"
+ + "standard_print_last_seconds_ix())",
+]
-replace_dict = {'output_stream': 'stdout', 'mod_qualifier': ''}
+replace_dict = {"output_stream": "stdout", "mod_qualifier": ""}
gp_debug_print("robot_env: " + str(robot_env) + "\n")
# func_names contains a list of all print functions which should be created from their sprint counterparts.
-func_names = ['print_time', 'print_timen', 'print_error', 'print_varx',
- 'print_var', 'print_vars', 'print_dashes', 'indent',
- 'print_call_stack', 'print_func_name', 'print_executing',
- 'print_pgm_header', 'print_issuing', 'print_pgm_footer',
- 'print_file', 'print_error_report', 'print', 'printn']
+func_names = [
+ "print_time",
+ "print_timen",
+ "print_error",
+ "print_varx",
+ "print_var",
+ "print_vars",
+ "print_dashes",
+ "indent",
+ "print_call_stack",
+ "print_func_name",
+ "print_executing",
+ "print_pgm_header",
+ "print_issuing",
+ "print_pgm_footer",
+ "print_file",
+ "print_error_report",
+ "print",
+ "printn",
+]
# stderr_func_names is a list of functions whose output should go to stderr rather than stdout.
-stderr_func_names = ['print_error', 'print_error_report']
+stderr_func_names = ["print_error", "print_error_report"]
-func_defs = create_print_wrapper_funcs(func_names, stderr_func_names,
- replace_dict)
+func_defs = create_print_wrapper_funcs(
+ func_names, stderr_func_names, replace_dict
+)
gp_debug_print(func_defs)
exec(func_defs)
diff --git a/lib/gen_robot_keyword.py b/lib/gen_robot_keyword.py
index f4b2e73..304a836 100755
--- a/lib/gen_robot_keyword.py
+++ b/lib/gen_robot_keyword.py
@@ -8,10 +8,7 @@
from robot.libraries.BuiltIn import BuiltIn
-def run_key(keyword_buf,
- quiet=None,
- test_mode=None,
- ignore=0):
+def run_key(keyword_buf, quiet=None, test_mode=None, ignore=0):
r"""
Run the given keyword, return the status and the keyword return values.
@@ -47,28 +44,29 @@
ignore = int(ignore)
# Convert the keyword_buf into a list split wherever 2 or more spaces are found.
- keyword_list = keyword_buf.split(' ')
+ keyword_list = keyword_buf.split(" ")
# Strip spaces from each argument to make the output look clean and uniform.
- keyword_list = [item.strip(' ') for item in keyword_list]
+ keyword_list = [item.strip(" ") for item in keyword_list]
if not quiet:
# Join the list back into keyword_buf for the sake of output.
- keyword_buf = ' '.join(keyword_list)
+ keyword_buf = " ".join(keyword_list)
gp.pissuing(keyword_buf, test_mode)
if test_mode:
- return 'PASS', ""
+ return "PASS", ""
try:
- status, ret_values = \
- BuiltIn().run_keyword_and_ignore_error(*keyword_list)
+ status, ret_values = BuiltIn().run_keyword_and_ignore_error(
+ *keyword_list
+ )
except Exception as my_assertion_error:
status = "FAIL"
ret_values = my_assertion_error.args[0]
- if status != 'PASS':
+ if status != "PASS":
# Output the error message to stderr.
- BuiltIn().log_to_console(ret_values, stream='STDERR')
+ BuiltIn().log_to_console(ret_values, stream="STDERR")
if not ignore:
# Fail with the given error message.
BuiltIn().fail(ret_values)
@@ -76,9 +74,7 @@
return status, ret_values
-def run_key_u(keyword_buf,
- quiet=None,
- ignore=0):
+def run_key_u(keyword_buf, quiet=None, ignore=0):
r"""
Run keyword unconditionally (i.e. without regard to global test_mode setting).
diff --git a/lib/gen_robot_plug_in.py b/lib/gen_robot_plug_in.py
index 0f6deda..77a1f35 100755
--- a/lib/gen_robot_plug_in.py
+++ b/lib/gen_robot_plug_in.py
@@ -4,19 +4,18 @@
This module provides functions which are useful for running plug-ins from a robot program.
"""
-import sys
-import subprocess
-from robot.libraries.BuiltIn import BuiltIn
import os
+import subprocess
+import sys
import tempfile
-import gen_print as gp
-import gen_misc as gm
import gen_cmd as gc
+import gen_misc as gm
+import gen_print as gp
+from robot.libraries.BuiltIn import BuiltIn
-def rvalidate_plug_ins(plug_in_dir_paths,
- quiet=1):
+def rvalidate_plug_ins(plug_in_dir_paths, quiet=1):
r"""
Call the external validate_plug_ins.py program which validates the plug-in dir paths given to it. Return
a list containing a normalized path for each plug-in selected.
@@ -27,11 +26,15 @@
stdout.
"""
- cmd_buf = "validate_plug_ins.py \"" + plug_in_dir_paths + "\""
+ cmd_buf = 'validate_plug_ins.py "' + plug_in_dir_paths + '"'
rc, out_buf = gc.shell_cmd(cmd_buf, print_output=0)
if rc != 0:
- BuiltIn().fail(gp.sprint_error("Validate plug ins call failed. See"
- + " stderr text for details.\n"))
+ BuiltIn().fail(
+ gp.sprint_error(
+ "Validate plug ins call failed. See"
+ + " stderr text for details.\n"
+ )
+ )
# plug_in_packages_list = out_buf.split("\n")
plug_in_packages_list = list(filter(None, out_buf.split("\n")))
@@ -41,15 +44,17 @@
return plug_in_packages_list
-def rprocess_plug_in_packages(plug_in_packages_list=None,
- call_point="setup",
- shell_rc="0x00000000",
- stop_on_plug_in_failure=1,
- stop_on_non_zero_rc=0,
- release_type="obmc",
- quiet=None,
- debug=None,
- return_history=False):
+def rprocess_plug_in_packages(
+ plug_in_packages_list=None,
+ call_point="setup",
+ shell_rc="0x00000000",
+ stop_on_plug_in_failure=1,
+ stop_on_non_zero_rc=0,
+ release_type="obmc",
+ quiet=None,
+ debug=None,
+ return_history=False,
+):
r"""
Call the external process_plug_in_packages.py to process the plug-in packages. Return the following:
rc The return code - 0 = PASS, 1 = FAIL.
@@ -110,7 +115,7 @@
debug = int(gp.get_var_value(debug, 0))
# Create string from list.
- plug_in_dir_paths = ':'.join(plug_in_packages_list)
+ plug_in_dir_paths = ":".join(plug_in_packages_list)
temp = tempfile.NamedTemporaryFile()
temp_file_path = temp.name
@@ -125,23 +130,37 @@
loc_shell_rc = 0
- sub_cmd_buf = "process_plug_in_packages.py" + debug_string +\
- " --call_point=" + call_point + " --allow_shell_rc=" +\
- str(shell_rc) + " --stop_on_plug_in_failure=" +\
- str(stop_on_plug_in_failure) + " --stop_on_non_zero_rc=" +\
- str(stop_on_non_zero_rc) + " " + plug_in_dir_paths
+ sub_cmd_buf = (
+ "process_plug_in_packages.py"
+ + debug_string
+ + " --call_point="
+ + call_point
+ + " --allow_shell_rc="
+ + str(shell_rc)
+ + " --stop_on_plug_in_failure="
+ + str(stop_on_plug_in_failure)
+ + " --stop_on_non_zero_rc="
+ + str(stop_on_non_zero_rc)
+ + " "
+ + plug_in_dir_paths
+ )
if quiet:
cmd_buf = sub_cmd_buf + " > " + temp_file_path + " 2>&1"
else:
- cmd_buf = "set -o pipefail ; " + sub_cmd_buf + " 2>&1 | tee " +\
- temp_file_path
+ cmd_buf = (
+ "set -o pipefail ; "
+ + sub_cmd_buf
+ + " 2>&1 | tee "
+ + temp_file_path
+ )
if debug:
gp.print_issuing(cmd_buf)
else:
- gp.print_timen("Processing " + call_point
- + " call point programs.")
+ gp.print_timen(
+ "Processing " + call_point + " call point programs."
+ )
- sub_proc = subprocess.Popen(cmd_buf, shell=True, executable='/bin/bash')
+ sub_proc = subprocess.Popen(cmd_buf, shell=True, executable="/bin/bash")
sub_proc.communicate()
proc_plug_pkg_rc = sub_proc.returncode
@@ -149,8 +168,13 @@
# Get the "Running" statements from the output.
regex = " Running [^/]+/cp_"
cmd_buf = "egrep '" + regex + "' " + temp_file_path
- _, history = gc.shell_cmd(cmd_buf, quiet=(not debug), print_output=0,
- show_err=0, ignore_err=1)
+ _, history = gc.shell_cmd(
+ cmd_buf,
+ quiet=(not debug),
+ print_output=0,
+ show_err=0,
+ ignore_err=1,
+ )
history = [x + "\n" for x in filter(None, history.split("\n"))]
else:
history = []
@@ -167,8 +191,14 @@
# - Zero or more spaces
bash_var_regex = "[_[:alpha:]][_[:alnum:]]*"
regex = "^" + bash_var_regex + ":[ ]*"
- cmd_buf = "egrep '" + regex + "' " + temp_file_path + " > " +\
- temp_properties_file_path
+ cmd_buf = (
+ "egrep '"
+ + regex
+ + "' "
+ + temp_file_path
+ + " > "
+ + temp_properties_file_path
+ )
gp.dprint_issuing(cmd_buf)
grep_rc = os.system(cmd_buf)
@@ -176,8 +206,8 @@
properties = gm.my_parm_file(temp_properties_file_path)
# Finally, we access the 2 values that we need.
- shell_rc = int(properties.get('shell_rc', '0x0000000000000000'), 16)
- failed_plug_in_name = properties.get('failed_plug_in_name', '')
+ shell_rc = int(properties.get("shell_rc", "0x0000000000000000"), 16)
+ failed_plug_in_name = properties.get("failed_plug_in_name", "")
if proc_plug_pkg_rc != 0:
if quiet:
@@ -186,9 +216,13 @@
gp.print_var(grep_rc, gp.hexa())
gp.print_var(proc_plug_pkg_rc, gp.hexa())
gp.print_timen("Re-cap of plug-in failures:")
- gc.cmd_fnc_u("egrep -A 1 '^failed_plug_in_name:[ ]+' "
- + temp_properties_file_path + " | egrep -v '^\\--'",
- quiet=1, show_err=0)
+ gc.cmd_fnc_u(
+ "egrep -A 1 '^failed_plug_in_name:[ ]+' "
+ + temp_properties_file_path
+ + " | egrep -v '^\\--'",
+ quiet=1,
+ show_err=0,
+ )
rc = 1
if return_history:
diff --git a/lib/gen_robot_print.py b/lib/gen_robot_print.py
index fb958e0..b0e6a94 100755
--- a/lib/gen_robot_print.py
+++ b/lib/gen_robot_print.py
@@ -4,15 +4,14 @@
This file contains functions useful for printing to stdout from robot programs.
"""
-import re
import os
+import re
-import gen_print as gp
import func_args as fa
-
+import gen_print as gp
from robot.libraries.BuiltIn import BuiltIn
-gen_robot_print_debug = int(os.environ.get('GEN_ROBOT_PRINT_DEBUG', '0'))
+gen_robot_print_debug = int(os.environ.get("GEN_ROBOT_PRINT_DEBUG", "0"))
def sprint_vars(*args, **kwargs):
@@ -28,15 +27,14 @@
kwargs See sprint_varx in gen_print.py for descriptions of all other arguments.
"""
- if 'fmt' in kwargs:
+ if "fmt" in kwargs:
# Find format option names in kwargs['fmt'] and wrap them with "gp." and "()" to make them into
# function calls. For example, verbose would be converted to "gp.verbose()". This allows the user
# to simply specify "fmt=verbose" (vs. fmt=gp.verbose()).
# Note "terse" has been explicitly added for backward compatibility. Once the repo has been purged
# of its use, this code can return to its original form.
regex = "(" + "|".join(gp.valid_fmts()) + "|terse)"
- kwargs['fmt'] = \
- re.sub(regex, "gp.\\1()", kwargs['fmt'])
+ kwargs["fmt"] = re.sub(regex, "gp.\\1()", kwargs["fmt"])
kwargs = fa.args_to_objects(kwargs)
buffer = ""
for var_name in args:
@@ -61,15 +59,32 @@
buffer += gp.sprint_dashes()
buffer += "Automatic Variables:"
- buffer += \
- sprint_vars(
- "TEST_NAME", "TEST_TAGS", "TEST_DOCUMENTATION", "TEST_STATUS",
- "TEST_DOCUMENTATION", "TEST_STATUS", "TEST_MESSAGE",
- "PREV_TEST_NAME", "PREV_TEST_STATUS", "PREV_TEST_MESSAGE",
- "SUITE_NAME", "SUITE_SOURCE", "SUITE_DOCUMENTATION",
- "SUITE_METADATA", "SUITE_STATUS", "SUITE_MESSAGE",
- "KEYWORD_STATUS", "KEYWORD_MESSAGE", "LOG_LEVEL", "OUTPUT_FILE",
- "LOG_FILE", "REPORT_FILE", "DEBUG_FILE", "OUTPUT_DIR")
+ buffer += sprint_vars(
+ "TEST_NAME",
+ "TEST_TAGS",
+ "TEST_DOCUMENTATION",
+ "TEST_STATUS",
+ "TEST_DOCUMENTATION",
+ "TEST_STATUS",
+ "TEST_MESSAGE",
+ "PREV_TEST_NAME",
+ "PREV_TEST_STATUS",
+ "PREV_TEST_MESSAGE",
+ "SUITE_NAME",
+ "SUITE_SOURCE",
+ "SUITE_DOCUMENTATION",
+ "SUITE_METADATA",
+ "SUITE_STATUS",
+ "SUITE_MESSAGE",
+ "KEYWORD_STATUS",
+ "KEYWORD_MESSAGE",
+ "LOG_LEVEL",
+ "OUTPUT_FILE",
+ "LOG_FILE",
+ "REPORT_FILE",
+ "DEBUG_FILE",
+ "OUTPUT_DIR",
+ )
if int(headers) == 1:
buffer += gp.sprint_dashes()
@@ -120,20 +135,19 @@
# full names.
# Rprint Vars (instead of Rpvars)
-replace_dict = {'output_stream': 'stdout', 'mod_qualifier': 'gp.'}
+replace_dict = {"output_stream": "stdout", "mod_qualifier": "gp."}
gp_debug_print("gp.robot_env: " + str(gp.robot_env) + "\n")
# func_names contains a list of all rprint functions which should be created from their sprint counterparts.
-func_names = [
- 'print_vars', 'print_auto_vars'
-]
+func_names = ["print_vars", "print_auto_vars"]
# stderr_func_names is a list of functions whose output should go to stderr rather than stdout.
stderr_func_names = []
-func_defs = gp.create_print_wrapper_funcs(func_names, stderr_func_names,
- replace_dict, "r")
+func_defs = gp.create_print_wrapper_funcs(
+ func_names, stderr_func_names, replace_dict, "r"
+)
gp_debug_print(func_defs)
exec(func_defs)
diff --git a/lib/gen_robot_ssh.py b/lib/gen_robot_ssh.py
index b08d470..5cc2913 100755
--- a/lib/gen_robot_ssh.py
+++ b/lib/gen_robot_ssh.py
@@ -4,28 +4,29 @@
This module provides many valuable ssh functions such as sprint_connection, execute_ssh_command, etc.
"""
+import re
+import socket
+import sys
+import traceback
+
+import paramiko
from robot.libraries.BuiltIn import BuiltIn
from SSHLibrary import SSHLibrary
-import sys
-import traceback
-import re
-import socket
-import paramiko
try:
import exceptions
except ImportError:
import builtins as exceptions
-import gen_print as gp
import func_timer as ft
+import gen_print as gp
+
func_timer = ft.func_timer_class()
sshlib = SSHLibrary()
-def sprint_connection(connection,
- indent=0):
+def sprint_connection(connection, indent=0):
r"""
sprint data from the connection object to a string and return it.
@@ -47,15 +48,15 @@
buffer += gp.sprint_varx("term_type", connection.term_type, 0, indent)
buffer += gp.sprint_varx("width", connection.width, 0, indent)
buffer += gp.sprint_varx("height", connection.height, 0, indent)
- buffer += gp.sprint_varx("path_separator", connection.path_separator, 0,
- indent)
+ buffer += gp.sprint_varx(
+ "path_separator", connection.path_separator, 0, indent
+ )
buffer += gp.sprint_varx("encoding", connection.encoding, 0, indent)
return buffer
-def sprint_connections(connections=None,
- indent=0):
+def sprint_connections(connections=None, indent=0):
r"""
sprint data from the connections list to a string and return it.
@@ -91,16 +92,16 @@
for connection in sshlib.get_connections():
# Create connection_dict from connection object.
- connection_dict = dict((key, str(value)) for key, value in
- connection._config.items())
+ connection_dict = dict(
+ (key, str(value)) for key, value in connection._config.items()
+ )
if dict(connection_dict, **open_connection_args) == connection_dict:
return connection
return False
-def login_ssh(login_args={},
- max_login_attempts=5):
+def login_ssh(login_args={}, max_login_attempts=5):
r"""
Login on the latest open SSH connection. Retry on failure up to max_login_attempts.
@@ -132,8 +133,10 @@
except_type, except_value, except_traceback = sys.exc_info()
gp.lprint_var(except_type)
gp.lprint_varx("except_value", str(except_value))
- if except_type is paramiko.ssh_exception.SSHException and\
- re.match(r"No existing session", str(except_value)):
+ if (
+ except_type is paramiko.ssh_exception.SSHException
+ and re.match(r"No existing session", str(except_value))
+ ):
continue
else:
# We don't tolerate any other error so break from loop and re-raise exception.
@@ -146,16 +149,18 @@
raise (except_value)
-def execute_ssh_command(cmd_buf,
- open_connection_args={},
- login_args={},
- print_out=0,
- print_err=0,
- ignore_err=1,
- fork=0,
- quiet=None,
- test_mode=None,
- time_out=None):
+def execute_ssh_command(
+ cmd_buf,
+ open_connection_args={},
+ login_args={},
+ print_out=0,
+ print_err=0,
+ ignore_err=1,
+ fork=0,
+ quiet=None,
+ test_mode=None,
+ time_out=None,
+):
r"""
Run the given command in an SSH session and return the stdout, stderr and the return code.
@@ -221,11 +226,12 @@
index_or_alias = connection.index
else:
index_or_alias = connection.alias
- gp.lprint_timen("Switching to existing connection: \""
- + str(index_or_alias) + "\".")
+ gp.lprint_timen(
+ 'Switching to existing connection: "' + str(index_or_alias) + '".'
+ )
sshlib.switch_connection(index_or_alias)
else:
- gp.lprint_timen("Connecting to " + open_connection_args['host'] + ".")
+ gp.lprint_timen("Connecting to " + open_connection_args["host"] + ".")
cix = sshlib.open_connection(**open_connection_args)
try:
login_ssh(login_args)
@@ -242,18 +248,19 @@
if fork:
sshlib.start_command(cmd_buf)
else:
- if open_connection_args['alias'] == "device_connection":
+ if open_connection_args["alias"] == "device_connection":
stdout = sshlib.write(cmd_buf)
stderr = ""
rc = 0
else:
- stdout, stderr, rc = \
- func_timer.run(sshlib.execute_command,
- cmd_buf,
- return_stdout=True,
- return_stderr=True,
- return_rc=True,
- time_out=time_out)
+ stdout, stderr, rc = func_timer.run(
+ sshlib.execute_command,
+ cmd_buf,
+ return_stdout=True,
+ return_stderr=True,
+ return_rc=True,
+ time_out=time_out,
+ )
BuiltIn().log_to_console(stdout)
except Exception:
except_type, except_value, except_traceback = sys.exc_info()
@@ -265,30 +272,47 @@
stderr = str(except_value)
stdout = ""
- if except_type is exceptions.AssertionError and\
- re.match(r"Connection not open", str(except_value)):
+ if except_type is exceptions.AssertionError and re.match(
+ r"Connection not open", str(except_value)
+ ):
try:
login_ssh(login_args)
# Now we must continue to next loop iteration to retry the
# execute_command.
continue
except Exception:
- except_type, except_value, except_traceback =\
- sys.exc_info()
+ (
+ except_type,
+ except_value,
+ except_traceback,
+ ) = sys.exc_info()
rc = 1
stderr = str(except_value)
stdout = ""
break
- if (except_type is paramiko.ssh_exception.SSHException
- and re.match(r"SSH session not active", str(except_value))) or\
- ((except_type is socket.error
- or except_type is ConnectionResetError)
- and re.match(r"\[Errno 104\] Connection reset by peer",
- str(except_value))) or\
- (except_type is paramiko.ssh_exception.SSHException
- and re.match(r"Timeout opening channel\.",
- str(except_value))):
+ if (
+ (
+ except_type is paramiko.ssh_exception.SSHException
+ and re.match(r"SSH session not active", str(except_value))
+ )
+ or (
+ (
+ except_type is socket.error
+ or except_type is ConnectionResetError
+ )
+ and re.match(
+ r"\[Errno 104\] Connection reset by peer",
+ str(except_value),
+ )
+ )
+ or (
+ except_type is paramiko.ssh_exception.SSHException
+ and re.match(
+ r"Timeout opening channel\.", str(except_value)
+ )
+ )
+ ):
# Close and re-open a connection.
# Note: close_connection() doesn't appear to get rid of the
# connection. It merely closes it. Since there is a concern
@@ -297,8 +321,9 @@
# connections.
gp.lprint_timen("Closing all connections.")
sshlib.close_all_connections()
- gp.lprint_timen("Connecting to "
- + open_connection_args['host'] + ".")
+ gp.lprint_timen(
+ "Connecting to " + open_connection_args["host"] + "."
+ )
cix = sshlib.open_connection(**open_connection_args)
login_ssh(login_args)
continue
@@ -324,13 +349,16 @@
gp.printn(stderr + stdout)
if not ignore_err:
- message = gp.sprint_error("The prior SSH"
- + " command returned a non-zero return"
- + " code:\n"
- + gp.sprint_var(rc, gp.hexa()) + stderr
- + "\n")
+ message = gp.sprint_error(
+ "The prior SSH"
+ + " command returned a non-zero return"
+ + " code:\n"
+ + gp.sprint_var(rc, gp.hexa())
+ + stderr
+ + "\n"
+ )
BuiltIn().should_be_equal(rc, 0, message)
- if open_connection_args['alias'] == "device_connection":
+ if open_connection_args["alias"] == "device_connection":
return stdout
return stdout, stderr, rc
diff --git a/lib/gen_robot_utils.py b/lib/gen_robot_utils.py
index bd61a87..07ff1b4 100644
--- a/lib/gen_robot_utils.py
+++ b/lib/gen_robot_utils.py
@@ -6,6 +6,7 @@
"""
import re
+
from robot.libraries.BuiltIn import BuiltIn
@@ -70,5 +71,6 @@
if key in pre_var_dict:
if value != pre_var_dict[key]:
global_var_name = re.sub("[@&]", "$", key)
- BuiltIn().set_global_variable(global_var_name,
- pre_var_dict[key])
+ BuiltIn().set_global_variable(
+ global_var_name, pre_var_dict[key]
+ )
diff --git a/lib/gen_robot_valid.py b/lib/gen_robot_valid.py
index 5580a5e..d1e8d23 100755
--- a/lib/gen_robot_valid.py
+++ b/lib/gen_robot_valid.py
@@ -5,10 +5,10 @@
"""
import re
+
+import func_args as fa
import gen_print as gp
import gen_valid as gv
-import func_args as fa
-
from robot.libraries.BuiltIn import BuiltIn
@@ -27,8 +27,9 @@
var_value = BuiltIn().get_variable_value("${" + var_name + "}")
if var_value is None:
var_value = "<undefined>"
- error_message = gv.valid_value(var_value, invalid_values=[var_value],
- var_name=var_name)
+ error_message = gv.valid_value(
+ var_value, invalid_values=[var_value], var_name=var_name
+ )
BuiltIn().fail(error_message)
return var_value
@@ -73,8 +74,7 @@
# The docstring header will be pre-pended to each validation function's existing docstring.
-docstring_header = \
- r"""
+docstring_header = r"""
Fail if the variable named by var_name is invalid.
"""
@@ -105,12 +105,19 @@
start_ix = 0
# Find the "var_value" line.
- start_ix = next((index for index, value in
- enumerate(doc_string[start_ix:], start_ix)
- if re.match("[ ]+var_value ", value)), None)
+ start_ix = next(
+ (
+ index
+ for index, value in enumerate(doc_string[start_ix:], start_ix)
+ if re.match("[ ]+var_value ", value)
+ ),
+ None,
+ )
# Replace the "var_value" line with our "var_name" line.
- doc_string[start_ix] = " var_name " \
+ doc_string[start_ix] = (
+ " var_name "
+ "The name of the variable to be validated."
+ )
return "\n".join(doc_string)
@@ -120,121 +127,134 @@
# the gv.<function name> which they call. Also, note that the docstring for each is created by modifying the
# docstring from the supporting gen_valid.py function.
-def valid_type(var_name, *args, **kwargs):
+def valid_type(var_name, *args, **kwargs):
var_value, args, kwargs = valid_init(var_name, *args, **kwargs)
- error_message = \
- gv.valid_type(var_value, *args, var_name=var_name, **kwargs)
+ error_message = gv.valid_type(
+ var_value, *args, var_name=var_name, **kwargs
+ )
process_error_message(error_message)
def valid_value(var_name, *args, **kwargs):
-
var_value, args, kwargs = valid_init(var_name, *args, **kwargs)
- error_message = \
- gv.valid_value(var_value, *args, var_name=var_name, **kwargs)
+ error_message = gv.valid_value(
+ var_value, *args, var_name=var_name, **kwargs
+ )
process_error_message(error_message)
def valid_range(var_name, *args, **kwargs):
-
var_value, args, kwargs = valid_init(var_name, *args, **kwargs)
- error_message = \
- gv.valid_range(var_value, *args, var_name=var_name, **kwargs)
+ error_message = gv.valid_range(
+ var_value, *args, var_name=var_name, **kwargs
+ )
process_error_message(error_message)
def valid_integer(var_name, *args, **kwargs):
-
var_value, args, kwargs = valid_init(var_name, *args, **kwargs)
- error_message = \
- gv.valid_integer(var_value, *args, var_name=var_name, **kwargs)
+ error_message = gv.valid_integer(
+ var_value, *args, var_name=var_name, **kwargs
+ )
process_error_message(error_message)
def valid_float(var_name, *args, **kwargs):
-
var_value, args, kwargs = valid_init(var_name, *args, **kwargs)
- error_message = \
- gv.valid_float(var_value, *args, var_name=var_name, **kwargs)
+ error_message = gv.valid_float(
+ var_value, *args, var_name=var_name, **kwargs
+ )
process_error_message(error_message)
def valid_date_time(var_name, *args, **kwargs):
-
var_value, args, kwargs = valid_init(var_name, *args, **kwargs)
- error_message = \
- gv.valid_date_time(var_value, *args, var_name=var_name, **kwargs)
+ error_message = gv.valid_date_time(
+ var_value, *args, var_name=var_name, **kwargs
+ )
process_error_message(error_message)
def valid_dir_path(var_name, *args, **kwargs):
-
var_value, args, kwargs = valid_init(var_name, *args, **kwargs)
- error_message = \
- gv.valid_dir_path(var_value, *args, var_name=var_name, **kwargs)
+ error_message = gv.valid_dir_path(
+ var_value, *args, var_name=var_name, **kwargs
+ )
process_error_message(error_message)
def valid_file_path(var_name, *args, **kwargs):
-
var_value, args, kwargs = valid_init(var_name, *args, **kwargs)
- error_message = \
- gv.valid_file_path(var_value, *args, var_name=var_name, **kwargs)
+ error_message = gv.valid_file_path(
+ var_value, *args, var_name=var_name, **kwargs
+ )
process_error_message(error_message)
def valid_path(var_name, *args, **kwargs):
-
var_value, args, kwargs = valid_init(var_name, *args, **kwargs)
- error_message = \
- gv.valid_path(var_value, *args, var_name=var_name, **kwargs)
+ error_message = gv.valid_path(
+ var_value, *args, var_name=var_name, **kwargs
+ )
process_error_message(error_message)
def valid_list(var_name, *args, **kwargs):
-
var_value, args, kwargs = valid_init(var_name, *args, **kwargs)
- error_message = \
- gv.valid_list(var_value, *args, var_name=var_name, **kwargs)
+ error_message = gv.valid_list(
+ var_value, *args, var_name=var_name, **kwargs
+ )
process_error_message(error_message)
def valid_dict(var_name, *args, **kwargs):
-
var_value, args, kwargs = valid_init(var_name, *args, **kwargs)
- error_message = \
- gv.valid_dict(var_value, *args, var_name=var_name, **kwargs)
+ error_message = gv.valid_dict(
+ var_value, *args, var_name=var_name, **kwargs
+ )
process_error_message(error_message)
def valid_program(var_name, *args, **kwargs):
-
var_value, args, kwargs = valid_init(var_name, *args, **kwargs)
- error_message = \
- gv.valid_program(var_value, *args, var_name=var_name, **kwargs)
+ error_message = gv.valid_program(
+ var_value, *args, var_name=var_name, **kwargs
+ )
process_error_message(error_message)
def valid_length(var_name, *args, **kwargs):
-
var_value, args, kwargs = valid_init(var_name, *args, **kwargs)
- error_message = \
- gv.valid_length(var_value, *args, var_name=var_name, **kwargs)
+ error_message = gv.valid_length(
+ var_value, *args, var_name=var_name, **kwargs
+ )
process_error_message(error_message)
# Modify the validation function docstrings by calling customize_doc_string for each function in the
# func_names list.
func_names = [
- "valid_type", "valid_value", "valid_range", "valid_integer",
- "valid_dir_path", "valid_file_path", "valid_path", "valid_list",
- "valid_dict", "valid_program", "valid_length", "valid_float",
- "valid_date_time"
+ "valid_type",
+ "valid_value",
+ "valid_range",
+ "valid_integer",
+ "valid_dir_path",
+ "valid_file_path",
+ "valid_path",
+ "valid_list",
+ "valid_dict",
+ "valid_program",
+ "valid_length",
+ "valid_float",
+ "valid_date_time",
]
for func_name in func_names:
- cmd_buf = func_name \
- + ".__doc__ = customize_doc_string(gv.raw_doc_strings['" \
- + func_name + "'])"
+ cmd_buf = (
+ func_name
+ + ".__doc__ = customize_doc_string(gv.raw_doc_strings['"
+ + func_name
+ + "'])"
+ )
exec(cmd_buf)
diff --git a/lib/gen_valid.py b/lib/gen_valid.py
index 57c1a1d..a422e0f 100755
--- a/lib/gen_valid.py
+++ b/lib/gen_valid.py
@@ -4,11 +4,12 @@
This module provides validation functions like valid_value(), valid_integer(), etc.
"""
-import os
-import gen_print as gp
-import gen_cmd as gc
-import func_args as fa
import datetime
+import os
+
+import func_args as fa
+import gen_cmd as gc
+import gen_print as gp
exit_on_error = False
@@ -134,8 +135,7 @@
# The docstring header and footer will be added to each validation function's existing docstring.
-docstring_header = \
- r"""
+docstring_header = r"""
Determine whether var_value is valid, construct an error_message and call
process_error_message(error_message).
@@ -143,8 +143,7 @@
are processed.
"""
-additional_args_docstring_footer = \
- r"""
+additional_args_docstring_footer = r"""
var_name The name of the variable whose value is passed in var_value. For the
general case, this argument is unnecessary as this function can figure
out the var_name. This is provided for Robot callers in which case, this
@@ -178,8 +177,9 @@
# If we get to this point, the validation has failed.
var_name = get_var_name(var_name)
error_message += "Invalid variable type:\n"
- error_message += gp.sprint_varx(var_name, var_value,
- gp.blank() | gp.show_type())
+ error_message += gp.sprint_varx(
+ var_name, var_value, gp.blank() | gp.show_type()
+ )
error_message += "\n"
error_message += gp.sprint_var(required_type)
@@ -187,7 +187,6 @@
def valid_value(var_value, valid_values=[], invalid_values=[], var_name=None):
-
r"""
The variable value is valid if it is either contained in the valid_values list or if it is NOT contained
in the invalid_values list. If the caller specifies nothing for either of these 2 arguments,
@@ -230,11 +229,11 @@
error_message += gp.sprint_var(valid_values)
return process_error_message(error_message)
- error_message = valid_type(valid_values, list, var_name='valid_values')
+ error_message = valid_type(valid_values, list, var_name="valid_values")
if error_message:
return process_error_message(error_message)
- error_message = valid_type(invalid_values, list, var_name='invalid_values')
+ error_message = valid_type(invalid_values, list, var_name="invalid_values")
if error_message:
return process_error_message(error_message)
@@ -244,14 +243,15 @@
return process_error_message(error_message)
var_name = get_var_name(var_name)
error_message += "Invalid variable value:\n"
- error_message += gp.sprint_varx(var_name, var_value,
- gp.blank() | gp.verbose()
- | gp.show_type())
+ error_message += gp.sprint_varx(
+ var_name, var_value, gp.blank() | gp.verbose() | gp.show_type()
+ )
error_message += "\n"
error_message += "It must be one of the following values:\n"
error_message += "\n"
- error_message += gp.sprint_var(valid_values,
- gp.blank() | gp.show_type())
+ error_message += gp.sprint_var(
+ valid_values, gp.blank() | gp.show_type()
+ )
return process_error_message(error_message)
if len_invalid_values == 0:
@@ -264,14 +264,13 @@
var_name = get_var_name(var_name)
error_message += "Invalid variable value:\n"
- error_message += gp.sprint_varx(var_name, var_value,
- gp.blank() | gp.verbose()
- | gp.show_type())
+ error_message += gp.sprint_varx(
+ var_name, var_value, gp.blank() | gp.verbose() | gp.show_type()
+ )
error_message += "\n"
error_message += "It must NOT be any of the following values:\n"
error_message += "\n"
- error_message += gp.sprint_var(invalid_values,
- gp.blank() | gp.show_type())
+ error_message += gp.sprint_var(invalid_values, gp.blank() | gp.show_type())
return process_error_message(error_message)
@@ -338,8 +337,9 @@
var_value = int(str(var_value), 0)
except ValueError:
error_message += "Invalid integer value:\n"
- error_message += gp.sprint_varx(var_name, var_value,
- gp.blank() | gp.show_type())
+ error_message += gp.sprint_varx(
+ var_name, var_value, gp.blank() | gp.show_type()
+ )
return process_error_message(error_message)
# Check the range (if any).
@@ -373,8 +373,9 @@
var_value = float(str(var_value))
except ValueError:
error_message += "Invalid float value:\n"
- error_message += gp.sprint_varx(var_name, var_value,
- gp.blank() | gp.show_type())
+ error_message += gp.sprint_varx(
+ var_name, var_value, gp.blank() | gp.show_type()
+ )
return process_error_message(error_message)
# Check the range (if any).
@@ -397,12 +398,15 @@
"""
error_message = ""
- rc, out_buf = gc.shell_cmd("date -d '" + str(var_value) + "'", quiet=1, show_err=0, ignore_err=1)
+ rc, out_buf = gc.shell_cmd(
+ "date -d '" + str(var_value) + "'", quiet=1, show_err=0, ignore_err=1
+ )
if rc:
var_name = get_var_name(var_name)
error_message += "Invalid date/time value:\n"
- error_message += gp.sprint_varx(var_name, var_value,
- gp.blank() | gp.show_type())
+ error_message += gp.sprint_varx(
+ var_name, var_value, gp.blank() | gp.show_type()
+ )
return process_error_message(error_message)
return process_error_message(error_message)
@@ -459,8 +463,14 @@
return process_error_message(error_message)
-def valid_list(var_value, valid_values=[], invalid_values=[],
- required_values=[], fail_on_empty=False, var_name=None):
+def valid_list(
+ var_value,
+ valid_values=[],
+ invalid_values=[],
+ required_values=[],
+ fail_on_empty=False,
+ var_name=None,
+):
r"""
The variable value is valid if it is a list where each entry can be found in the valid_values list or if
none of its values can be found in the invalid_values list or if all of the values in the required_values
@@ -483,7 +493,11 @@
error_message = ""
# Validate this function's arguments.
- if not (bool(len(valid_values)) ^ bool(len(invalid_values)) ^ bool(len(required_values))):
+ if not (
+ bool(len(valid_values))
+ ^ bool(len(invalid_values))
+ ^ bool(len(required_values))
+ ):
error_message += "Programmer error - You must provide only one of the"
error_message += " following: valid_values, invalid_values,"
error_message += " required_values.\n"
@@ -510,21 +524,25 @@
for ix in range(0, len(required_values)):
if required_values[ix] not in var_value:
found_error = 1
- display_required_values[ix] = \
+ display_required_values[ix] = (
str(display_required_values[ix]) + "*"
+ )
if found_error:
var_name = get_var_name(var_name)
error_message += "The following list is invalid:\n"
- error_message += gp.sprint_varx(var_name, var_value,
- gp.blank() | gp.show_type())
+ error_message += gp.sprint_varx(
+ var_name, var_value, gp.blank() | gp.show_type()
+ )
error_message += "\n"
error_message += "Because some of the values in the "
error_message += "required_values list are not present (see"
- error_message += " entries marked with \"*\"):\n"
+ error_message += ' entries marked with "*"):\n'
error_message += "\n"
- error_message += gp.sprint_varx('required_values',
- display_required_values,
- gp.blank() | gp.show_type())
+ error_message += gp.sprint_varx(
+ "required_values",
+ display_required_values,
+ gp.blank() | gp.show_type(),
+ )
error_message += "\n"
return process_error_message(error_message)
@@ -540,9 +558,10 @@
if found_error:
var_name = get_var_name(var_name)
error_message += "The following list is invalid (see entries"
- error_message += " marked with \"*\"):\n"
- error_message += gp.sprint_varx(var_name, display_var_value,
- gp.blank() | gp.show_type())
+ error_message += ' marked with "*"):\n'
+ error_message += gp.sprint_varx(
+ var_name, display_var_value, gp.blank() | gp.show_type()
+ )
error_message += "\n"
error_message += gp.sprint_var(invalid_values, gp.show_type())
return process_error_message(error_message)
@@ -557,9 +576,10 @@
if found_error:
var_name = get_var_name(var_name)
error_message += "The following list is invalid (see entries marked"
- error_message += " with \"*\"):\n"
- error_message += gp.sprint_varx(var_name, display_var_value,
- gp.blank() | gp.show_type())
+ error_message += ' with "*"):\n'
+ error_message += gp.sprint_varx(
+ var_name, display_var_value, gp.blank() | gp.show_type()
+ )
error_message += "\n"
error_message += gp.sprint_var(valid_values, gp.show_type())
return process_error_message(error_message)
@@ -567,7 +587,13 @@
return process_error_message(error_message)
-def valid_dict(var_value, required_keys=[], valid_values={}, invalid_values={}, var_name=None):
+def valid_dict(
+ var_value,
+ required_keys=[],
+ valid_values={},
+ invalid_values={},
+ var_name=None,
+):
r"""
The dictionary variable value is valid if it contains all required keys and each entry passes the
valid_value() call.
@@ -601,7 +627,9 @@
var_name = get_var_name(var_name)
error_message += "The following dictionary is invalid because it is"
error_message += " missing required keys:\n"
- error_message += gp.sprint_varx(var_name, var_value, gp.blank() | gp.show_type())
+ error_message += gp.sprint_varx(
+ var_name, var_value, gp.blank() | gp.show_type()
+ )
error_message += "\n"
error_message += gp.sprint_var(missing_keys, gp.show_type())
return process_error_message(error_message)
@@ -609,15 +637,24 @@
var_name = get_var_name(var_name)
if len(valid_values):
keys = valid_values.keys()
- error_message = valid_dict(var_value, required_keys=keys, var_name=var_name)
+ error_message = valid_dict(
+ var_value, required_keys=keys, var_name=var_name
+ )
if error_message:
return process_error_message(error_message)
for key, value in valid_values.items():
key_name = " [" + key + "]"
- sub_error_message = valid_value(var_value[key], valid_values=value, var_name=key_name)
+ sub_error_message = valid_value(
+ var_value[key], valid_values=value, var_name=key_name
+ )
if sub_error_message:
- error_message += "The following dictionary is invalid because one of its entries is invalid:\n"
- error_message += gp.sprint_varx(var_name, var_value, gp.blank() | gp.show_type())
+ error_message += (
+ "The following dictionary is invalid because one of its"
+ " entries is invalid:\n"
+ )
+ error_message += gp.sprint_varx(
+ var_name, var_value, gp.blank() | gp.show_type()
+ )
error_message += "\n"
error_message += sub_error_message
return process_error_message(error_message)
@@ -626,10 +663,17 @@
if key not in var_value:
continue
key_name = " [" + key + "]"
- sub_error_message = valid_value(var_value[key], invalid_values=value, var_name=key_name)
+ sub_error_message = valid_value(
+ var_value[key], invalid_values=value, var_name=key_name
+ )
if sub_error_message:
- error_message += "The following dictionary is invalid because one of its entries is invalid:\n"
- error_message += gp.sprint_varx(var_name, var_value, gp.blank() | gp.show_type())
+ error_message += (
+ "The following dictionary is invalid because one of its"
+ " entries is invalid:\n"
+ )
+ error_message += gp.sprint_varx(
+ var_name, var_value, gp.blank() | gp.show_type()
+ )
error_message += "\n"
error_message += sub_error_message
return process_error_message(error_message)
@@ -647,8 +691,9 @@
"""
error_message = ""
- rc, out_buf = gc.shell_cmd("which " + var_value, quiet=1, show_err=0,
- ignore_err=1)
+ rc, out_buf = gc.shell_cmd(
+ "which " + var_value, quiet=1, show_err=0, ignore_err=1
+ )
if rc:
var_name = get_var_name(var_name)
error_message += "The following required program could not be found"
@@ -692,10 +737,19 @@
# Modify selected function docstrings by adding headers/footers.
func_names = [
- "valid_type", "valid_value", "valid_range", "valid_integer",
- "valid_dir_path", "valid_file_path", "valid_path", "valid_list",
- "valid_dict", "valid_program", "valid_length", "valid_float",
- "valid_date_time"
+ "valid_type",
+ "valid_value",
+ "valid_range",
+ "valid_integer",
+ "valid_dir_path",
+ "valid_file_path",
+ "valid_path",
+ "valid_list",
+ "valid_dict",
+ "valid_program",
+ "valid_length",
+ "valid_float",
+ "valid_date_time",
]
raw_doc_strings = {}
@@ -705,5 +759,5 @@
cmd_buf += ".__doc__"
exec(cmd_buf)
cmd_buf = func_name + ".__doc__ = docstring_header + " + func_name
- cmd_buf += ".__doc__.rstrip(\" \\n\") + additional_args_docstring_footer"
+ cmd_buf += '.__doc__.rstrip(" \\n") + additional_args_docstring_footer'
exec(cmd_buf)
diff --git a/lib/ipmi_client.py b/lib/ipmi_client.py
index 7eb8f08..7d4e582 100644
--- a/lib/ipmi_client.py
+++ b/lib/ipmi_client.py
@@ -5,32 +5,32 @@
"""
import collections
-import gen_print as gp
+
import gen_cmd as gc
+import gen_print as gp
from robot.libraries.BuiltIn import BuiltIn
-
# Set default values for required IPMI options.
-ipmi_interface = 'lanplus'
-ipmi_cipher_suite = BuiltIn().get_variable_value("${IPMI_CIPHER_LEVEL}", '17')
-ipmi_timeout = BuiltIn().get_variable_value("${IPMI_TIMEOUT}", '3')
-ipmi_port = BuiltIn().get_variable_value("${IPMI_PORT}", '623')
+ipmi_interface = "lanplus"
+ipmi_cipher_suite = BuiltIn().get_variable_value("${IPMI_CIPHER_LEVEL}", "17")
+ipmi_timeout = BuiltIn().get_variable_value("${IPMI_TIMEOUT}", "3")
+ipmi_port = BuiltIn().get_variable_value("${IPMI_PORT}", "623")
ipmi_username = BuiltIn().get_variable_value("${IPMI_USERNAME}", "root")
ipmi_password = BuiltIn().get_variable_value("${IPMI_PASSWORD}", "0penBmc")
ipmi_host = BuiltIn().get_variable_value("${OPENBMC_HOST}")
# Create a list of the required IPMI options.
-ipmi_required_options = ['I', 'C', 'N', 'p', 'U', 'P', 'H']
+ipmi_required_options = ["I", "C", "N", "p", "U", "P", "H"]
# The following dictionary maps the ipmitool option names (e.g. "I") to our
# more descriptive names (e.g. "interface") for the required options.
ipmi_option_name_map = {
- 'I': 'interface',
- 'C': 'cipher_suite',
- 'N': 'timeout',
- 'p': 'port',
- 'U': 'username',
- 'P': 'password',
- 'H': 'host',
+ "I": "interface",
+ "C": "cipher_suite",
+ "N": "timeout",
+ "p": "port",
+ "U": "username",
+ "P": "password",
+ "H": "host",
}
@@ -78,7 +78,7 @@
else:
# The caller hasn't specified this required option so specify it
# for them using the global value.
- var_name = 'ipmi_' + ipmi_option_name_map[option]
+ var_name = "ipmi_" + ipmi_option_name_map[option]
value = eval(var_name)
new_options[option] = value
# Include the remainder of the caller's options in the new options
@@ -86,7 +86,7 @@
for key, value in options.items():
new_options[key] = value
- return gc.create_command_string('ipmitool', command, new_options)
+ return gc.create_command_string("ipmitool", command, new_options)
def verify_ipmi_user_parm_accepted():
@@ -99,11 +99,10 @@
global ipmi_required_options
print_output = 0
- command_string = create_ipmi_ext_command_string('power status')
- rc, stdout = gc.shell_cmd(command_string,
- print_output=print_output,
- show_err=0,
- ignore_err=1)
+ command_string = create_ipmi_ext_command_string("power status")
+ rc, stdout = gc.shell_cmd(
+ command_string, print_output=print_output, show_err=0, ignore_err=1
+ )
gp.qprint_var(rc, 1)
if rc == 0:
# The OBMC accepts the ipmitool "-U" option so new further work needs
@@ -112,13 +111,12 @@
# Remove the "U" option from ipmi_required_options to allow us to create a
# command string without the "U" option.
- if 'U' in ipmi_required_options:
- del ipmi_required_options[ipmi_required_options.index('U')]
- command_string = create_ipmi_ext_command_string('power status')
- rc, stdout = gc.shell_cmd(command_string,
- print_output=print_output,
- show_err=0,
- ignore_err=1)
+ if "U" in ipmi_required_options:
+ del ipmi_required_options[ipmi_required_options.index("U")]
+ command_string = create_ipmi_ext_command_string("power status")
+ rc, stdout = gc.shell_cmd(
+ command_string, print_output=print_output, show_err=0, ignore_err=1
+ )
gp.qprint_var(rc, 1)
if rc == 0:
# The "U" option has been removed from the ipmi_required_options
@@ -130,7 +128,7 @@
# Revert to original ipmi_required_options by inserting 'U' right before
# 'P'.
- ipmi_required_options.insert(ipmi_required_options.index('P'), 'U')
+ ipmi_required_options.insert(ipmi_required_options.index("P"), "U")
def ipmi_setup():
@@ -152,7 +150,9 @@
command An IPMI command (e.g. "power status").
"""
- ipmi_user_options = BuiltIn().get_variable_value("${IPMI_USER_OPTIONS}", '')
+ ipmi_user_options = BuiltIn().get_variable_value(
+ "${IPMI_USER_OPTIONS}", ""
+ )
if ipmi_user_options == "":
return command
return ipmi_user_options + " " + command
diff --git a/lib/ipmi_utils.py b/lib/ipmi_utils.py
index 38045f5..5d1598c 100644
--- a/lib/ipmi_utils.py
+++ b/lib/ipmi_utils.py
@@ -4,18 +4,20 @@
Provide useful ipmi functions.
"""
-from robot.libraries.BuiltIn import BuiltIn
+import json
import re
-import gen_print as gp
-import gen_misc as gm
+import tempfile
+
+import bmc_ssh_utils as bsu
import gen_cmd as gc
+import gen_misc as gm
+import gen_print as gp
import gen_robot_keyword as grk
import gen_robot_utils as gru
-import bmc_ssh_utils as bsu
-import var_funcs as vf
import ipmi_client as ic
-import tempfile
-import json
+import var_funcs as vf
+from robot.libraries.BuiltIn import BuiltIn
+
gru.my_import_resource("ipmi_client.robot")
@@ -75,17 +77,19 @@
# setting_value Value which needs to be set (e.g. "7").
"""
- status, ret_values = grk.run_key_u("Run IPMI Standard Command sol set "
- + setting_name + " " + setting_value)
+ status, ret_values = grk.run_key_u(
+ "Run IPMI Standard Command sol set "
+ + setting_name
+ + " "
+ + setting_value
+ )
return status
-def execute_ipmi_cmd(cmd_string,
- ipmi_cmd_type='inband',
- print_output=1,
- ignore_err=0,
- **options):
+def execute_ipmi_cmd(
+ cmd_string, ipmi_cmd_type="inband", print_output=1, ignore_err=0, **options
+):
r"""
Run the given command string as an IPMI command and return the stdout,
stderr and the return code.
@@ -106,23 +110,25 @@
See that function's prolog for details.
"""
- if ipmi_cmd_type == 'inband':
+ if ipmi_cmd_type == "inband":
IPMI_INBAND_CMD = BuiltIn().get_variable_value("${IPMI_INBAND_CMD}")
cmd_buf = IPMI_INBAND_CMD + " " + cmd_string
- return bsu.os_execute_command(cmd_buf,
- print_out=print_output,
- ignore_err=ignore_err)
+ return bsu.os_execute_command(
+ cmd_buf, print_out=print_output, ignore_err=ignore_err
+ )
- if ipmi_cmd_type == 'external':
+ if ipmi_cmd_type == "external":
cmd_buf = ic.create_ipmi_ext_command_string(cmd_string, **options)
- rc, stdout, stderr = gc.shell_cmd(cmd_buf,
- print_output=print_output,
- ignore_err=ignore_err,
- return_stderr=1)
+ rc, stdout, stderr = gc.shell_cmd(
+ cmd_buf,
+ print_output=print_output,
+ ignore_err=ignore_err,
+ return_stderr=1,
+ )
return stdout, stderr, rc
-def get_lan_print_dict(channel_number='', ipmi_cmd_type='external'):
+def get_lan_print_dict(channel_number="", ipmi_cmd_type="external"):
r"""
Get IPMI 'lan print' output and return it as a dictionary.
@@ -174,26 +180,34 @@
# special processing. We essentially want to isolate its data and remove
# the 'Auth Type Enable' string so that key_value_outbuf_to_dict can
# process it as a sub-dictionary.
- cmd_buf = "lan print " + channel_number + " | grep -E '^(Auth Type Enable)" +\
- "?[ ]+: ' | sed -re 's/^(Auth Type Enable)?[ ]+: //g'"
- stdout1, stderr, rc = execute_ipmi_cmd(cmd_buf, ipmi_cmd_type,
- print_output=0)
+ cmd_buf = (
+ "lan print "
+ + channel_number
+ + " | grep -E '^(Auth Type Enable)"
+ + "?[ ]+: ' | sed -re 's/^(Auth Type Enable)?[ ]+: //g'"
+ )
+ stdout1, stderr, rc = execute_ipmi_cmd(
+ cmd_buf, ipmi_cmd_type, print_output=0
+ )
# Now get the remainder of the data and exclude the lines with no field
# names (i.e. the 'Auth Type Enable' sub-fields).
cmd_buf = "lan print " + channel_number + " | grep -E -v '^[ ]+: '"
- stdout2, stderr, rc = execute_ipmi_cmd(cmd_buf, ipmi_cmd_type,
- print_output=0)
+ stdout2, stderr, rc = execute_ipmi_cmd(
+ cmd_buf, ipmi_cmd_type, print_output=0
+ )
# Make auth_type_enable_dict sub-dictionary...
- auth_type_enable_dict = vf.key_value_outbuf_to_dict(stdout1, to_lower=0,
- underscores=0)
+ auth_type_enable_dict = vf.key_value_outbuf_to_dict(
+ stdout1, to_lower=0, underscores=0
+ )
# Create the lan_print_dict...
- lan_print_dict = vf.key_value_outbuf_to_dict(stdout2, to_lower=0,
- underscores=0)
+ lan_print_dict = vf.key_value_outbuf_to_dict(
+ stdout2, to_lower=0, underscores=0
+ )
# Re-assign 'Auth Type Enable' to contain the auth_type_enable_dict.
- lan_print_dict['Auth Type Enable'] = auth_type_enable_dict
+ lan_print_dict["Auth Type Enable"] = auth_type_enable_dict
return lan_print_dict
@@ -229,12 +243,13 @@
trailing " Watts" substring.
"""
- status, ret_values = \
- grk.run_key_u("Run IPMI Standard Command dcmi power reading")
+ status, ret_values = grk.run_key_u(
+ "Run IPMI Standard Command dcmi power reading"
+ )
result = vf.key_value_outbuf_to_dict(ret_values)
if strip_watts:
- result.update((k, re.sub(' Watts$', '', v)) for k, v in result.items())
+ result.update((k, re.sub(" Watts$", "", v)) for k, v in result.items())
return result
@@ -291,8 +306,7 @@
[aux_firmware_rev_info][3]: 0x00
"""
- status, ret_values = \
- grk.run_key_u("Run IPMI Standard Command mc info")
+ status, ret_values = grk.run_key_u("Run IPMI Standard Command mc info")
result = vf.key_value_outbuf_to_dict(ret_values, process_indent=1)
return result
@@ -333,8 +347,7 @@
[sdr_repository_alloc_info_supported]: no
"""
- status, ret_values = \
- grk.run_key_u("Run IPMI Standard Command sdr info")
+ status, ret_values = grk.run_key_u("Run IPMI Standard Command sdr info")
result = vf.key_value_outbuf_to_dict(ret_values, process_indent=1)
return result
@@ -415,19 +428,21 @@
[board_part_number]: 02CY209
"""
- status, ret_values = \
- grk.run_key_u("Run IPMI Standard Command fru print -N 50")
+ status, ret_values = grk.run_key_u(
+ "Run IPMI Standard Command fru print -N 50"
+ )
# Manipulate the "Device not present" line to create a "state" key.
- ret_values = re.sub("Device not present", "state : Device not present",
- ret_values)
+ ret_values = re.sub(
+ "Device not present", "state : Device not present", ret_values
+ )
- return [vf.key_value_outbuf_to_dict(x) for x in re.split("\n\n",
- ret_values)]
+ return [
+ vf.key_value_outbuf_to_dict(x) for x in re.split("\n\n", ret_values)
+ ]
-def get_component_fru_info(component='cpu',
- fru_objs=None):
+def get_component_fru_info(component="cpu", fru_objs=None):
r"""
Get fru info for the given component and return it as a list of
dictionaries.
@@ -448,9 +463,11 @@
if fru_objs is None:
fru_objs = get_fru_info()
- return\
- [x for x in fru_objs
- if re.match(component + '([0-9]+)? ', x['fru_device_description'])]
+ return [
+ x
+ for x in fru_objs
+ if re.match(component + "([0-9]+)? ", x["fru_device_description"])
+ ]
def get_user_info(userid, channel_number=1):
@@ -493,8 +510,12 @@
[enable_status] enabled
"""
- status, ret_values = grk.run_key_u("Run IPMI Standard Command channel getaccess "
- + str(channel_number) + " " + str(userid))
+ status, ret_values = grk.run_key_u(
+ "Run IPMI Standard Command channel getaccess "
+ + str(channel_number)
+ + " "
+ + str(userid)
+ )
if userid == "":
return vf.key_value_outbuf_to_dicts(ret_values, process_indent=1)
@@ -503,7 +524,6 @@
def channel_getciphers_ipmi():
-
r"""
Run 'channel getciphers ipmi' command and return the result as a list of dictionaries.
@@ -550,7 +570,9 @@
[revision]: 129
[device_revision]: 1
"""
- stdout, stderr, rc = bsu.bmc_execute_command("cat /usr/share/ipmi-providers/dev_id.json")
+ stdout, stderr, rc = bsu.bmc_execute_command(
+ "cat /usr/share/ipmi-providers/dev_id.json"
+ )
result = json.loads(stdout)
@@ -561,7 +583,7 @@
# [6:4] reserved. Return as 0.
# [3:0] Device Revision, binary encoded.
- result['device_revision'] = result['revision'] & 0x0F
+ result["device_revision"] = result["revision"] & 0x0F
return result
@@ -617,8 +639,9 @@
[power_button_disabled]: false
"""
- status, ret_values = \
- grk.run_key_u("Run IPMI Standard Command chassis status")
+ status, ret_values = grk.run_key_u(
+ "Run IPMI Standard Command chassis status"
+ )
result = vf.key_value_outbuf_to_dict(ret_values, process_indent=1)
return result
@@ -648,19 +671,19 @@
[access_mode]: always available
"""
- status, ret_values = \
- grk.run_key_u("Run IPMI Standard Command channel info " + str(channel_number))
+ status, ret_values = grk.run_key_u(
+ "Run IPMI Standard Command channel info " + str(channel_number)
+ )
key_var_list = list(filter(None, ret_values.split("\n")))
# To match the dict format, add a colon after 'Volatile(active) Settings' and 'Non-Volatile Settings'
# respectively.
- key_var_list[6] = 'Volatile(active) Settings:'
- key_var_list[11] = 'Non-Volatile Settings:'
+ key_var_list[6] = "Volatile(active) Settings:"
+ key_var_list[11] = "Non-Volatile Settings:"
result = vf.key_value_list_to_dict(key_var_list, process_indent=1)
return result
def get_user_access_ipmi(channel_number=1):
-
r"""
Run 'user list [<channel number>]' command and return the result as a list of dictionaries.
@@ -710,9 +733,12 @@
[channel_supports_ipmi_v2.0]: yes
"""
- status, ret_values = \
- grk.run_key_u("Run IPMI Standard Command channel authcap " + str(channel_number) + " "
- + str(privilege_level))
+ status, ret_values = grk.run_key_u(
+ "Run IPMI Standard Command channel authcap "
+ + str(channel_number)
+ + " "
+ + str(privilege_level)
+ )
result = vf.key_value_outbuf_to_dict(ret_values, process_indent=1)
return result
@@ -789,8 +815,8 @@
newthreshold_list = []
for th in old_threshold:
th = th.strip()
- if th == 'na':
- newthreshold_list.append('na')
+ if th == "na":
+ newthreshold_list.append("na")
else:
x = int(float(th)) + n
newthreshold_list.append(x)
diff --git a/lib/jobs_processing.py b/lib/jobs_processing.py
index 5555b62..2f698c9 100644
--- a/lib/jobs_processing.py
+++ b/lib/jobs_processing.py
@@ -6,11 +6,12 @@
"""
-from robot.libraries.BuiltIn import BuiltIn
-from multiprocessing import Process, Manager
-import os
import datetime
+import os
+from multiprocessing import Manager, Process
+
import gen_print as gp
+from robot.libraries.BuiltIn import BuiltIn
def execute_keyword(keyword_name, return_dict):
@@ -48,8 +49,9 @@
# Append user-defined times process needed to execute.
for ix in range(int(num_process)):
- task = Process(target=execute_keyword,
- args=(keyword_name, return_dict))
+ task = Process(
+ target=execute_keyword, args=(keyword_name, return_dict)
+ )
process_list.append(task)
task.start()
@@ -98,8 +100,10 @@
for keywords_data in keyword_names:
keyword_args = tuple(keywords_data.split(" ")[-number_args:])
keyword_name = " ".join(keywords_data.split(" ")[:-number_args])
- task = Process(target=execute_keyword_args,
- args=(keyword_name, keyword_args, return_dict))
+ task = Process(
+ target=execute_keyword_args,
+ args=(keyword_name, keyword_args, return_dict),
+ )
process_list.append(task)
task.start()
diff --git a/lib/logging_utils.py b/lib/logging_utils.py
index 3e0d079..4cd1898 100644
--- a/lib/logging_utils.py
+++ b/lib/logging_utils.py
@@ -4,22 +4,29 @@
Provide useful error log utility keywords.
"""
-from robot.libraries.BuiltIn import BuiltIn
+import imp
+import os
+import sys
import gen_print as gp
-import sys
-import os
-import imp
-base_path = os.path.dirname(os.path.dirname(
- imp.find_module("gen_robot_print")[1])) + os.sep
+from robot.libraries.BuiltIn import BuiltIn
+
+base_path = (
+ os.path.dirname(os.path.dirname(imp.find_module("gen_robot_print")[1]))
+ + os.sep
+)
sys.path.append(base_path + "data/")
-import variables as var # NOQA
-import gen_robot_utils as gru # NOQA
+import gen_robot_utils as gru # NOQA
+import variables as var # NOQA
+
gru.my_import_resource("logging_utils.robot")
-redfish_support_trans_state = int(os.environ.get('REDFISH_SUPPORT_TRANS_STATE', 0)) or \
- int(BuiltIn().get_variable_value("${REDFISH_SUPPORT_TRANS_STATE}", default=0))
+redfish_support_trans_state = int(
+ os.environ.get("REDFISH_SUPPORT_TRANS_STATE", 0)
+) or int(
+ BuiltIn().get_variable_value("${REDFISH_SUPPORT_TRANS_STATE}", default=0)
+)
def print_error_logs(error_logs, key_list=None):
@@ -106,7 +113,7 @@
"""
if error_logs is None:
- error_logs = BuiltIn().run_keyword('Get Error Logs')
+ error_logs = BuiltIn().run_keyword("Get Error Logs")
# Look for any error log entries containing the 'AdditionalData' field
# which in turn has an entry starting with "ESEL=". Here is an excerpt of
@@ -118,9 +125,9 @@
# [AdditionalData][1]: ESEL=00 00 df 00 00 00 00 20 00 04...
esels = []
for error_log in error_logs.values():
- if 'AdditionalData' in error_log:
- for additional_data in error_log['AdditionalData']:
- if additional_data.startswith('ESEL='):
+ if "AdditionalData" in error_log:
+ for additional_data in error_log["AdditionalData"]:
+ if additional_data.startswith("ESEL="):
esels.append(additional_data)
return esels
diff --git a/lib/obmc_boot_test.py b/lib/obmc_boot_test.py
index 452d607..8e0c17e 100755
--- a/lib/obmc_boot_test.py
+++ b/lib/obmc_boot_test.py
@@ -4,47 +4,49 @@
This module is the python counterpart to obmc_boot_test.
"""
-import os
-import imp
-import time
import glob
+import imp
+import os
import random
import re
import signal
+import time
+
try:
import cPickle as pickle
except ImportError:
import pickle
+
import socket
-from robot.utils import DotDict
-from robot.libraries.BuiltIn import BuiltIn
-
-from boot_data import *
-import gen_print as gp
-import gen_robot_plug_in as grpi
import gen_arg as ga
-import gen_valid as gv
-import gen_misc as gm
import gen_cmd as gc
+import gen_misc as gm
+import gen_plug_in_utils as gpu
+import gen_print as gp
import gen_robot_keyword as grk
+import gen_robot_plug_in as grpi
+import gen_valid as gv
+import logging_utils as log
+import pel_utils as pel
import state as st
import var_stack as vs
-import gen_plug_in_utils as gpu
-import pel_utils as pel
-import logging_utils as log
+from boot_data import *
+from robot.libraries.BuiltIn import BuiltIn
+from robot.utils import DotDict
-base_path = os.path.dirname(os.path.dirname(
- imp.find_module("gen_robot_print")[1])) +\
- os.sep
+base_path = (
+ os.path.dirname(os.path.dirname(imp.find_module("gen_robot_print")[1]))
+ + os.sep
+)
sys.path.append(base_path + "extended/")
import run_keyword as rk # NOQA
# Setting master_pid correctly influences the behavior of plug-ins like
# DB_Logging
program_pid = os.getpid()
-master_pid = os.environ.get('AUTOBOOT_MASTER_PID', program_pid)
-pgm_name = re.sub('\\.py$', '', os.path.basename(__file__))
+master_pid = os.environ.get("AUTOBOOT_MASTER_PID", program_pid)
+pgm_name = re.sub("\\.py$", "", os.path.basename(__file__))
# Set up boot data structures.
os_host = BuiltIn().get_variable_value("${OS_HOST}", default="")
@@ -55,29 +57,41 @@
max_boot_history = 10
boot_history = []
-state = st.return_state_constant('default_state')
+state = st.return_state_constant("default_state")
cp_setup_called = 0
next_boot = ""
-base_tool_dir_path = os.path.normpath(os.environ.get(
- 'AUTOBOOT_BASE_TOOL_DIR_PATH', "/tmp")) + os.sep
+base_tool_dir_path = (
+ os.path.normpath(os.environ.get("AUTOBOOT_BASE_TOOL_DIR_PATH", "/tmp"))
+ + os.sep
+)
-ffdc_dir_path = os.path.normpath(os.environ.get('FFDC_DIR_PATH', '')) + os.sep
+ffdc_dir_path = os.path.normpath(os.environ.get("FFDC_DIR_PATH", "")) + os.sep
boot_success = 0
-status_dir_path = os.environ.get('STATUS_DIR_PATH', "") or \
- BuiltIn().get_variable_value("${STATUS_DIR_PATH}", default="")
+status_dir_path = os.environ.get(
+ "STATUS_DIR_PATH", ""
+) or BuiltIn().get_variable_value("${STATUS_DIR_PATH}", default="")
if status_dir_path != "":
status_dir_path = os.path.normpath(status_dir_path) + os.sep
# For plugin expecting env gen_call_robot.py
- os.environ['STATUS_DIR_PATH'] = status_dir_path
+ os.environ["STATUS_DIR_PATH"] = status_dir_path
-redfish_support_trans_state = int(os.environ.get('REDFISH_SUPPORT_TRANS_STATE', 0)) or \
- int(BuiltIn().get_variable_value("${REDFISH_SUPPORT_TRANS_STATE}", default=0))
-redfish_supported = BuiltIn().get_variable_value("${REDFISH_SUPPORTED}", default=False)
-redfish_rest_supported = BuiltIn().get_variable_value("${REDFISH_REST_SUPPORTED}", default=False)
-redfish_delete_sessions = int(BuiltIn().get_variable_value("${REDFISH_DELETE_SESSIONS}", default=1))
+redfish_support_trans_state = int(
+ os.environ.get("REDFISH_SUPPORT_TRANS_STATE", 0)
+) or int(
+ BuiltIn().get_variable_value("${REDFISH_SUPPORT_TRANS_STATE}", default=0)
+)
+redfish_supported = BuiltIn().get_variable_value(
+ "${REDFISH_SUPPORTED}", default=False
+)
+redfish_rest_supported = BuiltIn().get_variable_value(
+ "${REDFISH_REST_SUPPORTED}", default=False
+)
+redfish_delete_sessions = int(
+ BuiltIn().get_variable_value("${REDFISH_DELETE_SESSIONS}", default=1)
+)
if redfish_supported:
- redfish = BuiltIn().get_library_instance('redfish')
+ redfish = BuiltIn().get_library_instance("redfish")
default_power_on = "Redfish Power On"
default_power_off = "Redfish Power Off"
if not redfish_support_trans_state:
@@ -88,7 +102,9 @@
delete_errlogs_cmd = "Redfish Purge Event Log"
delete_bmcdump_cmd = "Redfish Delete All BMC Dumps"
delete_sysdump_cmd = "Redfish Delete All System Dumps"
- default_set_power_policy = "Redfish Set Power Restore Policy AlwaysOff"
+ default_set_power_policy = (
+ "Redfish Set Power Restore Policy AlwaysOff"
+ )
else:
default_power_on = "REST Power On"
default_power_off = "REST Power Off"
@@ -98,12 +114,12 @@
boot_count = 0
LOG_LEVEL = BuiltIn().get_variable_value("${LOG_LEVEL}")
-AUTOBOOT_FFDC_PREFIX = os.environ.get('AUTOBOOT_FFDC_PREFIX', '')
+AUTOBOOT_FFDC_PREFIX = os.environ.get("AUTOBOOT_FFDC_PREFIX", "")
ffdc_prefix = AUTOBOOT_FFDC_PREFIX
boot_start_time = ""
boot_end_time = ""
-save_stack = vs.var_stack('save_stack')
-main_func_parm_list = ['boot_stack', 'stack_mode', 'quiet']
+save_stack = vs.var_stack("save_stack")
+main_func_parm_list = ["boot_stack", "stack_mode", "quiet"]
def dump_ffdc_rc():
@@ -128,8 +144,7 @@
return 0x00000200
-def process_host(host,
- host_var_name=""):
+def process_host(host, host_var_name=""):
r"""
Process a host by getting the associated host name and IP address and
setting them in global variables.
@@ -162,9 +177,19 @@
host_name_var_name = re.sub("host", "host_name", host_var_name)
ip_var_name = re.sub("host", "ip", host_var_name)
- cmd_buf = "global " + host_name_var_name + ", " + ip_var_name + " ; " +\
- host_name_var_name + ", " + ip_var_name + " = gm.get_host_name_ip('" +\
- host + "')"
+ cmd_buf = (
+ "global "
+ + host_name_var_name
+ + ", "
+ + ip_var_name
+ + " ; "
+ + host_name_var_name
+ + ", "
+ + ip_var_name
+ + " = gm.get_host_name_ip('"
+ + host
+ + "')"
+ )
exec(cmd_buf)
@@ -181,16 +206,26 @@
global parm_list
parm_list = BuiltIn().get_variable_value("${parm_list}")
# The following subset of parms should be processed as integers.
- int_list = ['max_num_tests', 'boot_pass', 'boot_fail', 'ffdc_only',
- 'boot_fail_threshold', 'delete_errlogs',
- 'call_post_stack_plug', 'do_pre_boot_plug_in_setup', 'quiet',
- 'test_mode', 'debug']
+ int_list = [
+ "max_num_tests",
+ "boot_pass",
+ "boot_fail",
+ "ffdc_only",
+ "boot_fail_threshold",
+ "delete_errlogs",
+ "call_post_stack_plug",
+ "do_pre_boot_plug_in_setup",
+ "quiet",
+ "test_mode",
+ "debug",
+ ]
for parm in parm_list:
if parm in int_list:
- sub_cmd = "int(BuiltIn().get_variable_value(\"${" + parm +\
- "}\", \"0\"))"
+ sub_cmd = (
+ 'int(BuiltIn().get_variable_value("${' + parm + '}", "0"))'
+ )
else:
- sub_cmd = "BuiltIn().get_variable_value(\"${" + parm + "}\")"
+ sub_cmd = 'BuiltIn().get_variable_value("${' + parm + '}")'
cmd_buf = "global " + parm + " ; " + parm + " = " + sub_cmd
gp.dpissuing(cmd_buf)
exec(cmd_buf)
@@ -217,7 +252,7 @@
global valid_boot_types
if ffdc_dir_path_style == "":
- ffdc_dir_path_style = int(os.environ.get('FFDC_DIR_PATH_STYLE', '0'))
+ ffdc_dir_path_style = int(os.environ.get("FFDC_DIR_PATH_STYLE", "0"))
# Convert these program parms to lists for easier processing..
boot_list = list(filter(None, boot_list.split(":")))
@@ -227,25 +262,29 @@
valid_boot_types = create_valid_boot_list(boot_table)
cleanup_boot_results_file()
- boot_results_file_path = create_boot_results_file_path(pgm_name,
- openbmc_nickname,
- master_pid)
+ boot_results_file_path = create_boot_results_file_path(
+ pgm_name, openbmc_nickname, master_pid
+ )
if os.path.isfile(boot_results_file_path):
# We've been called before in this run so we'll load the saved
# boot_results and boot_history objects.
- boot_results, boot_history =\
- pickle.load(open(boot_results_file_path, 'rb'))
+ boot_results, boot_history = pickle.load(
+ open(boot_results_file_path, "rb")
+ )
else:
boot_results = boot_results(boot_table, boot_pass, boot_fail)
- ffdc_list_file_path = base_tool_dir_path + openbmc_nickname +\
- "/FFDC_FILE_LIST"
- ffdc_report_list_path = base_tool_dir_path + openbmc_nickname +\
- "/FFDC_REPORT_FILE_LIST"
+ ffdc_list_file_path = (
+ base_tool_dir_path + openbmc_nickname + "/FFDC_FILE_LIST"
+ )
+ ffdc_report_list_path = (
+ base_tool_dir_path + openbmc_nickname + "/FFDC_REPORT_FILE_LIST"
+ )
- ffdc_summary_list_path = base_tool_dir_path + openbmc_nickname +\
- "/FFDC_SUMMARY_FILE_LIST"
+ ffdc_summary_list_path = (
+ base_tool_dir_path + openbmc_nickname + "/FFDC_SUMMARY_FILE_LIST"
+ )
def initial_plug_in_setup():
@@ -262,26 +301,38 @@
BuiltIn().set_global_variable("${FFDC_DIR_PATH}", ffdc_dir_path)
BuiltIn().set_global_variable("${STATUS_DIR_PATH}", status_dir_path)
BuiltIn().set_global_variable("${BASE_TOOL_DIR_PATH}", base_tool_dir_path)
- BuiltIn().set_global_variable("${FFDC_LIST_FILE_PATH}",
- ffdc_list_file_path)
- BuiltIn().set_global_variable("${FFDC_REPORT_LIST_PATH}",
- ffdc_report_list_path)
- BuiltIn().set_global_variable("${FFDC_SUMMARY_LIST_PATH}",
- ffdc_summary_list_path)
+ BuiltIn().set_global_variable(
+ "${FFDC_LIST_FILE_PATH}", ffdc_list_file_path
+ )
+ BuiltIn().set_global_variable(
+ "${FFDC_REPORT_LIST_PATH}", ffdc_report_list_path
+ )
+ BuiltIn().set_global_variable(
+ "${FFDC_SUMMARY_LIST_PATH}", ffdc_summary_list_path
+ )
- BuiltIn().set_global_variable("${FFDC_DIR_PATH_STYLE}",
- ffdc_dir_path_style)
- BuiltIn().set_global_variable("${FFDC_CHECK}",
- ffdc_check)
+ BuiltIn().set_global_variable(
+ "${FFDC_DIR_PATH_STYLE}", ffdc_dir_path_style
+ )
+ BuiltIn().set_global_variable("${FFDC_CHECK}", ffdc_check)
# For each program parameter, set the corresponding AUTOBOOT_ environment
# variable value. Also, set an AUTOBOOT_ environment variable for every
# element in additional_values.
- additional_values = ["program_pid", "master_pid", "ffdc_dir_path",
- "status_dir_path", "base_tool_dir_path",
- "ffdc_list_file_path", "ffdc_report_list_path",
- "ffdc_summary_list_path", "execdir", "redfish_supported",
- "redfish_rest_supported", "redfish_support_trans_state"]
+ additional_values = [
+ "program_pid",
+ "master_pid",
+ "ffdc_dir_path",
+ "status_dir_path",
+ "base_tool_dir_path",
+ "ffdc_list_file_path",
+ "ffdc_report_list_path",
+ "ffdc_summary_list_path",
+ "execdir",
+ "redfish_supported",
+ "redfish_rest_supported",
+ "redfish_support_trans_state",
+ ]
plug_in_vars = parm_list + additional_values
@@ -317,8 +368,9 @@
else:
test_really_running = 0
- BuiltIn().set_global_variable("${test_really_running}",
- test_really_running)
+ BuiltIn().set_global_variable(
+ "${test_really_running}", test_really_running
+ )
BuiltIn().set_global_variable("${boot_type_desc}", next_boot)
BuiltIn().set_global_variable("${boot_pass}", boot_pass)
BuiltIn().set_global_variable("${boot_fail}", boot_fail)
@@ -330,9 +382,16 @@
# For each program parameter, set the corresponding AUTOBOOT_ environment
# variable value. Also, set an AUTOBOOT_ environment variable for every
# element in additional_values.
- additional_values = ["boot_type_desc", "boot_success", "boot_pass",
- "boot_fail", "test_really_running", "ffdc_prefix",
- "boot_start_time", "boot_end_time"]
+ additional_values = [
+ "boot_type_desc",
+ "boot_success",
+ "boot_pass",
+ "boot_fail",
+ "test_really_running",
+ "ffdc_prefix",
+ "boot_start_time",
+ "boot_end_time",
+ ]
plug_in_vars = additional_values
@@ -344,14 +403,14 @@
os.environ["AUTOBOOT_" + var_name] = str(var_value)
if debug:
- shell_rc, out_buf = \
- gc.cmd_fnc_u("printenv | egrep AUTOBOOT_ | sort -u")
+ shell_rc, out_buf = gc.cmd_fnc_u(
+ "printenv | egrep AUTOBOOT_ | sort -u"
+ )
BuiltIn().set_log_level(LOG_LEVEL)
def pre_boot_plug_in_setup():
-
# Clear the ffdc_list_file_path file. Plug-ins may now write to it.
try:
os.remove(ffdc_list_file_path)
@@ -379,8 +438,7 @@
ffdc_prefix = openbmc_nickname + "." + time_string
-def default_sigusr1(signal_number=0,
- frame=None):
+def default_sigusr1(signal_number=0, frame=None):
r"""
Handle SIGUSR1 by doing nothing.
@@ -428,19 +486,22 @@
repo_bin_path = robot_pgm_dir_path.replace("/lib/", "/bin/")
# If we can't find process_plug_in_packages.py, ssh_pw or
# validate_plug_ins.py, then we don't have our repo bin in PATH.
- shell_rc, out_buf = gc.cmd_fnc_u("which process_plug_in_packages.py"
- + " ssh_pw validate_plug_ins.py", quiet=1,
- print_output=0, show_err=0)
+ shell_rc, out_buf = gc.cmd_fnc_u(
+ "which process_plug_in_packages.py" + " ssh_pw validate_plug_ins.py",
+ quiet=1,
+ print_output=0,
+ show_err=0,
+ )
if shell_rc != 0:
- os.environ['PATH'] = repo_bin_path + ":" + os.environ.get('PATH', "")
+ os.environ["PATH"] = repo_bin_path + ":" + os.environ.get("PATH", "")
# Likewise, our repo lib subdir needs to be in sys.path and PYTHONPATH.
if robot_pgm_dir_path not in sys.path:
sys.path.append(robot_pgm_dir_path)
PYTHONPATH = os.environ.get("PYTHONPATH", "")
if PYTHONPATH == "":
- os.environ['PYTHONPATH'] = robot_pgm_dir_path
+ os.environ["PYTHONPATH"] = robot_pgm_dir_path
else:
- os.environ['PYTHONPATH'] = robot_pgm_dir_path + ":" + PYTHONPATH
+ os.environ["PYTHONPATH"] = robot_pgm_dir_path + ":" + PYTHONPATH
validate_parms()
@@ -452,7 +513,8 @@
plug_in_setup()
rc, shell_rc, failed_plug_in_name = grpi.rprocess_plug_in_packages(
- call_point='setup')
+ call_point="setup"
+ )
if rc != 0:
error_message = "Plug-in setup failed.\n"
gp.print_error_report(error_message)
@@ -487,11 +549,10 @@
global openbmc_model
if openbmc_model == "":
- status, ret_values =\
- grk.run_key_u("Get BMC System Model", ignore=1)
+ status, ret_values = grk.run_key_u("Get BMC System Model", ignore=1)
# Set the model to default "OPENBMC" if getting it from BMC fails.
- if status == 'FAIL':
- openbmc_model = 'OPENBMC'
+ if status == "FAIL":
+ openbmc_model = "OPENBMC"
else:
openbmc_model = ret_values
BuiltIn().set_global_variable("${openbmc_model}", openbmc_model)
@@ -517,21 +578,27 @@
gv.valid_integer(boot_pass)
gv.valid_integer(boot_fail)
plug_in_packages_list = grpi.rvalidate_plug_ins(plug_in_dir_paths)
- BuiltIn().set_global_variable("${plug_in_packages_list}",
- plug_in_packages_list)
- gv.valid_value(stack_mode, valid_values=['normal', 'skip'])
+ BuiltIn().set_global_variable(
+ "${plug_in_packages_list}", plug_in_packages_list
+ )
+ gv.valid_value(stack_mode, valid_values=["normal", "skip"])
gv.set_exit_on_error(False)
if len(boot_list) == 0 and len(boot_stack) == 0 and not ffdc_only:
- error_message = "You must provide either a value for either the" +\
- " boot_list or the boot_stack parm.\n"
+ error_message = (
+ "You must provide either a value for either the"
+ + " boot_list or the boot_stack parm.\n"
+ )
BuiltIn().fail(gp.sprint_error(error_message))
valid_boot_list(boot_list, valid_boot_types)
valid_boot_list(boot_stack, valid_boot_types)
- selected_PDU_boots = list(set(boot_list + boot_stack)
- & set(boot_lists['PDU_reboot']))
+ selected_PDU_boots = list(
+ set(boot_list + boot_stack) & set(boot_lists["PDU_reboot"])
+ )
if len(selected_PDU_boots) > 0 and pdu_host == "":
- error_message = "You have selected the following boots which" +\
- " require a PDU host but no value for pdu_host:\n"
+ error_message = (
+ "You have selected the following boots which"
+ + " require a PDU host but no value for pdu_host:\n"
+ )
error_message += gp.sprint_var(selected_PDU_boots)
error_message += gp.sprint_var(pdu_host, fmt=gp.blank())
BuiltIn().fail(gp.sprint_error(error_message))
@@ -546,11 +613,11 @@
global state
- req_states = ['epoch_seconds'] + st.default_req_states
+ req_states = ["epoch_seconds"] + st.default_req_states
gp.qprint_timen("Getting system state.")
if test_mode:
- state['epoch_seconds'] = int(time.time())
+ state["epoch_seconds"] = int(time.time())
else:
state = st.get_state(req_states=req_states, quiet=quiet)
gp.qprint_var(state)
@@ -562,9 +629,12 @@
valid state data, we cannot continue to work.
"""
- if st.compare_states(state, st.invalid_state_match, 'or'):
- error_message = "The state dictionary contains blank fields which" +\
- " is illegal.\n" + gp.sprint_var(state)
+ if st.compare_states(state, st.invalid_state_match, "or"):
+ error_message = (
+ "The state dictionary contains blank fields which"
+ + " is illegal.\n"
+ + gp.sprint_var(state)
+ )
BuiltIn().fail(gp.sprint_error(error_message))
@@ -585,12 +655,20 @@
if transitional_boot_selected and not boot_success:
prior_boot = next_boot
boot_candidate = boot_stack.pop()
- gp.qprint_timen("The prior '" + next_boot + "' was chosen to"
- + " transition to a valid state for '" + boot_candidate
- + "' which was at the top of the boot_stack. Since"
- + " the '" + next_boot + "' failed, the '"
- + boot_candidate + "' has been removed from the stack"
- + " to avoid and endless failure loop.")
+ gp.qprint_timen(
+ "The prior '"
+ + next_boot
+ + "' was chosen to"
+ + " transition to a valid state for '"
+ + boot_candidate
+ + "' which was at the top of the boot_stack. Since"
+ + " the '"
+ + next_boot
+ + "' failed, the '"
+ + boot_candidate
+ + "' has been removed from the stack"
+ + " to avoid and endless failure loop."
+ )
if len(boot_stack) == 0:
return ""
@@ -607,17 +685,19 @@
skip_boot_printed = 0
while len(boot_stack) > 0:
boot_candidate = boot_stack.pop()
- if stack_mode == 'normal':
+ if stack_mode == "normal":
break
else:
- if st.compare_states(state, boot_table[boot_candidate]['end']):
+ if st.compare_states(state, boot_table[boot_candidate]["end"]):
if not skip_boot_printed:
gp.qprint_var(stack_mode)
gp.qprintn()
- gp.qprint_timen("Skipping the following boot tests"
- + " which are unnecessary since their"
- + " required end states match the"
- + " current machine state:")
+ gp.qprint_timen(
+ "Skipping the following boot tests"
+ + " which are unnecessary since their"
+ + " required end states match the"
+ + " current machine state:"
+ )
skip_boot_printed = 1
gp.qprint_var(boot_candidate)
boot_candidate = ""
@@ -626,19 +706,26 @@
gp.qprint_var(boot_stack)
gp.qprint_dashes()
return boot_candidate
- if st.compare_states(state, boot_table[boot_candidate]['start']):
- gp.qprint_timen("The machine state is valid for a '"
- + boot_candidate + "' boot test.")
+ if st.compare_states(state, boot_table[boot_candidate]["start"]):
+ gp.qprint_timen(
+ "The machine state is valid for a '"
+ + boot_candidate
+ + "' boot test."
+ )
gp.qprint_dashes()
gp.qprint_var(boot_stack)
gp.qprint_dashes()
return boot_candidate
else:
- gp.qprint_timen("The machine state does not match the required"
- + " starting state for a '" + boot_candidate
- + "' boot test:")
- gp.qprint_varx("boot_table_start_entry",
- boot_table[boot_candidate]['start'])
+ gp.qprint_timen(
+ "The machine state does not match the required"
+ + " starting state for a '"
+ + boot_candidate
+ + "' boot test:"
+ )
+ gp.qprint_varx(
+ "boot_table_start_entry", boot_table[boot_candidate]["start"]
+ )
boot_stack.append(boot_candidate)
transitional_boot_selected = True
popped_boot = boot_candidate
@@ -646,23 +733,30 @@
# Loop through your list selecting a boot_candidates
boot_candidates = []
for boot_candidate in boot_list:
- if st.compare_states(state, boot_table[boot_candidate]['start']):
+ if st.compare_states(state, boot_table[boot_candidate]["start"]):
if stack_popped:
- if st.compare_states(boot_table[boot_candidate]['end'],
- boot_table[popped_boot]['start']):
+ if st.compare_states(
+ boot_table[boot_candidate]["end"],
+ boot_table[popped_boot]["start"],
+ ):
boot_candidates.append(boot_candidate)
else:
boot_candidates.append(boot_candidate)
if len(boot_candidates) == 0:
- gp.qprint_timen("The user's boot list contained no boot tests"
- + " which are valid for the current machine state.")
+ gp.qprint_timen(
+ "The user's boot list contained no boot tests"
+ + " which are valid for the current machine state."
+ )
boot_candidate = default_power_on
- if not st.compare_states(state, boot_table[default_power_on]['start']):
+ if not st.compare_states(state, boot_table[default_power_on]["start"]):
boot_candidate = default_power_off
boot_candidates.append(boot_candidate)
- gp.qprint_timen("Using default '" + boot_candidate
- + "' boot type to transition to valid state.")
+ gp.qprint_timen(
+ "Using default '"
+ + boot_candidate
+ + "' boot type to transition to valid state."
+ )
gp.dprint_var(boot_candidates)
@@ -683,29 +777,41 @@
# Making deliberate choice to NOT run plug_in_setup(). We don't want
# ffdc_prefix updated.
rc, shell_rc, failed_plug_in_name = grpi.rprocess_plug_in_packages(
- call_point='ffdc_report', stop_on_plug_in_failure=0)
+ call_point="ffdc_report", stop_on_plug_in_failure=0
+ )
# Get additional header data which may have been created by ffdc plug-ins.
# Also, delete the individual header files to cleanup.
- cmd_buf = "file_list=$(cat " + ffdc_report_list_path + " 2>/dev/null)" +\
- " ; [ ! -z \"${file_list}\" ] && cat ${file_list}" +\
- " 2>/dev/null ; rm -rf ${file_list} 2>/dev/null || :"
- shell_rc, more_header_info = gc.cmd_fnc_u(cmd_buf, print_output=0,
- show_err=0)
+ cmd_buf = (
+ "file_list=$(cat "
+ + ffdc_report_list_path
+ + " 2>/dev/null)"
+ + ' ; [ ! -z "${file_list}" ] && cat ${file_list}'
+ + " 2>/dev/null ; rm -rf ${file_list} 2>/dev/null || :"
+ )
+ shell_rc, more_header_info = gc.cmd_fnc_u(
+ cmd_buf, print_output=0, show_err=0
+ )
# Get additional summary data which may have been created by ffdc plug-ins.
# Also, delete the individual header files to cleanup.
- cmd_buf = "file_list=$(cat " + ffdc_summary_list_path + " 2>/dev/null)" +\
- " ; [ ! -z \"${file_list}\" ] && cat ${file_list}" +\
- " 2>/dev/null ; rm -rf ${file_list} 2>/dev/null || :"
- shell_rc, ffdc_summary_info = gc.cmd_fnc_u(cmd_buf, print_output=0,
- show_err=0)
+ cmd_buf = (
+ "file_list=$(cat "
+ + ffdc_summary_list_path
+ + " 2>/dev/null)"
+ + ' ; [ ! -z "${file_list}" ] && cat ${file_list}'
+ + " 2>/dev/null ; rm -rf ${file_list} 2>/dev/null || :"
+ )
+ shell_rc, ffdc_summary_info = gc.cmd_fnc_u(
+ cmd_buf, print_output=0, show_err=0
+ )
# ffdc_list_file_path contains a list of any ffdc files created by plug-
# ins, etc. Read that data into a list.
try:
- plug_in_ffdc_list = \
- open(ffdc_list_file_path, 'r').read().rstrip("\n").split("\n")
+ plug_in_ffdc_list = (
+ open(ffdc_list_file_path, "r").read().rstrip("\n").split("\n")
+ )
plug_in_ffdc_list = list(filter(None, plug_in_ffdc_list))
except IOError:
plug_in_ffdc_list = []
@@ -722,7 +828,7 @@
# Open ffdc_file_list for writing. We will write a complete list of
# FFDC files to it for possible use by plug-ins like cp_stop_check.
- ffdc_list_file = open(ffdc_list_file_path, 'w')
+ ffdc_list_file = open(ffdc_list_file_path, "w")
ffdc_list_file.write(printable_ffdc_file_list + "\n")
ffdc_list_file.close()
@@ -737,13 +843,35 @@
if len(more_header_info) > 0:
gp.qprintn(more_header_info)
- gp.qpvars(host_name, host_ip, openbmc_nickname, openbmc_host,
- openbmc_host_name, openbmc_ip, openbmc_username,
- openbmc_password, rest_username, rest_password, ipmi_username,
- ipmi_password, os_host, os_host_name, os_ip, os_username,
- os_password, pdu_host, pdu_host_name, pdu_ip, pdu_username,
- pdu_password, pdu_slot_no, openbmc_serial_host,
- openbmc_serial_host_name, openbmc_serial_ip, openbmc_serial_port)
+ gp.qpvars(
+ host_name,
+ host_ip,
+ openbmc_nickname,
+ openbmc_host,
+ openbmc_host_name,
+ openbmc_ip,
+ openbmc_username,
+ openbmc_password,
+ rest_username,
+ rest_password,
+ ipmi_username,
+ ipmi_password,
+ os_host,
+ os_host_name,
+ os_ip,
+ os_username,
+ os_password,
+ pdu_host,
+ pdu_host_name,
+ pdu_ip,
+ pdu_username,
+ pdu_password,
+ pdu_slot_no,
+ openbmc_serial_host,
+ openbmc_serial_host_name,
+ openbmc_serial_ip,
+ openbmc_serial_port,
+ )
gp.qprintn()
print_boot_history(boot_history)
@@ -769,14 +897,18 @@
plug_in_setup()
rc, shell_rc, failed_plug_in_name = grpi.rprocess_plug_in_packages(
- call_point='ffdc', stop_on_plug_in_failure=0)
+ call_point="ffdc", stop_on_plug_in_failure=0
+ )
- AUTOBOOT_FFDC_PREFIX = os.environ['AUTOBOOT_FFDC_PREFIX']
- status, ffdc_file_list = grk.run_key_u("FFDC ffdc_prefix="
- + AUTOBOOT_FFDC_PREFIX
- + " ffdc_function_list="
- + ffdc_function_list, ignore=1)
- if status != 'PASS':
+ AUTOBOOT_FFDC_PREFIX = os.environ["AUTOBOOT_FFDC_PREFIX"]
+ status, ffdc_file_list = grk.run_key_u(
+ "FFDC ffdc_prefix="
+ + AUTOBOOT_FFDC_PREFIX
+ + " ffdc_function_list="
+ + ffdc_function_list,
+ ignore=1,
+ )
+ if status != "PASS":
gp.qprint_error("Call to ffdc failed.\n")
if type(ffdc_file_list) is not list:
ffdc_file_list = []
@@ -801,7 +933,7 @@
global boot_history
global boot_start_time
- doing_msg = gp.sprint_timen("Doing \"" + boot_keyword + "\".")
+ doing_msg = gp.sprint_timen('Doing "' + boot_keyword + '".')
# Set boot_start_time for use by plug-ins.
boot_start_time = doing_msg[1:33]
@@ -812,8 +944,7 @@
update_boot_history(boot_history, doing_msg, max_boot_history)
-def stop_boot_test(signal_number=0,
- frame=None):
+def stop_boot_test(signal_number=0, frame=None):
r"""
Handle SIGUSR1 by aborting the boot test that is running.
@@ -855,34 +986,40 @@
print_test_start_message(boot)
plug_in_setup()
- rc, shell_rc, failed_plug_in_name = \
- grpi.rprocess_plug_in_packages(call_point="pre_boot")
+ rc, shell_rc, failed_plug_in_name = grpi.rprocess_plug_in_packages(
+ call_point="pre_boot"
+ )
if rc != 0:
- error_message = "Plug-in failed with non-zero return code.\n" +\
- gp.sprint_var(rc, fmt=gp.hexa())
+ error_message = (
+ "Plug-in failed with non-zero return code.\n"
+ + gp.sprint_var(rc, fmt=gp.hexa())
+ )
set_default_siguser1()
BuiltIn().fail(gp.sprint_error(error_message))
if test_mode:
# In test mode, we'll pretend the boot worked by assigning its
# required end state to the default state value.
- state = st.strip_anchor_state(boot_table[boot]['end'])
+ state = st.strip_anchor_state(boot_table[boot]["end"])
else:
# Assertion: We trust that the state data was made fresh by the
# caller.
gp.qprintn()
- if boot_table[boot]['method_type'] == "keyword":
- rk.my_run_keywords(boot_table[boot].get('lib_file_path', ''),
- boot_table[boot]['method'],
- quiet=quiet)
+ if boot_table[boot]["method_type"] == "keyword":
+ rk.my_run_keywords(
+ boot_table[boot].get("lib_file_path", ""),
+ boot_table[boot]["method"],
+ quiet=quiet,
+ )
- if boot_table[boot]['bmc_reboot']:
- st.wait_for_comm_cycle(int(state['epoch_seconds']))
+ if boot_table[boot]["bmc_reboot"]:
+ st.wait_for_comm_cycle(int(state["epoch_seconds"]))
plug_in_setup()
- rc, shell_rc, failed_plug_in_name = \
- grpi.rprocess_plug_in_packages(call_point="post_reboot")
+ rc, shell_rc, failed_plug_in_name = grpi.rprocess_plug_in_packages(
+ call_point="post_reboot"
+ )
if rc != 0:
error_message = "Plug-in failed with non-zero return code.\n"
error_message += gp.sprint_var(rc, fmt=gp.hexa())
@@ -890,25 +1027,35 @@
BuiltIn().fail(gp.sprint_error(error_message))
else:
match_state = st.anchor_state(state)
- del match_state['epoch_seconds']
+ del match_state["epoch_seconds"]
# Wait for the state to change in any way.
- st.wait_state(match_state, wait_time=state_change_timeout,
- interval="10 seconds", invert=1)
+ st.wait_state(
+ match_state,
+ wait_time=state_change_timeout,
+ interval="10 seconds",
+ invert=1,
+ )
gp.qprintn()
- if boot_table[boot]['end']['chassis'] == "Off":
+ if boot_table[boot]["end"]["chassis"] == "Off":
boot_timeout = power_off_timeout
else:
boot_timeout = power_on_timeout
- st.wait_state(boot_table[boot]['end'], wait_time=boot_timeout,
- interval="10 seconds")
+ st.wait_state(
+ boot_table[boot]["end"],
+ wait_time=boot_timeout,
+ interval="10 seconds",
+ )
plug_in_setup()
- rc, shell_rc, failed_plug_in_name = \
- grpi.rprocess_plug_in_packages(call_point="post_boot")
+ rc, shell_rc, failed_plug_in_name = grpi.rprocess_plug_in_packages(
+ call_point="post_boot"
+ )
if rc != 0:
- error_message = "Plug-in failed with non-zero return code.\n" +\
- gp.sprint_var(rc, fmt=gp.hexa())
+ error_message = (
+ "Plug-in failed with non-zero return code.\n"
+ + gp.sprint_var(rc, fmt=gp.hexa())
+ )
set_default_siguser1()
BuiltIn().fail(gp.sprint_error(error_message))
@@ -949,12 +1096,14 @@
gp.qprintn()
if boot_status == "PASS":
boot_success = 1
- completion_msg = gp.sprint_timen("BOOT_SUCCESS: \"" + next_boot
- + "\" succeeded.")
+ completion_msg = gp.sprint_timen(
+ 'BOOT_SUCCESS: "' + next_boot + '" succeeded.'
+ )
else:
boot_success = 0
- completion_msg = gp.sprint_timen("BOOT_FAILED: \"" + next_boot
- + "\" failed.")
+ completion_msg = gp.sprint_timen(
+ 'BOOT_FAILED: "' + next_boot + '" failed.'
+ )
# Set boot_end_time for use by plug-ins.
boot_end_time = completion_msg[1:33]
@@ -968,16 +1117,19 @@
# NOTE: A post_test_case call point failure is NOT counted as a boot
# failure.
rc, shell_rc, failed_plug_in_name = grpi.rprocess_plug_in_packages(
- call_point='post_test_case', stop_on_plug_in_failure=0)
+ call_point="post_test_case", stop_on_plug_in_failure=0
+ )
plug_in_setup()
rc, shell_rc, failed_plug_in_name = grpi.rprocess_plug_in_packages(
- call_point='ffdc_check', shell_rc=dump_ffdc_rc(),
- stop_on_plug_in_failure=1, stop_on_non_zero_rc=1)
- if ffdc_check == "All" or\
- shell_rc == dump_ffdc_rc():
+ call_point="ffdc_check",
+ shell_rc=dump_ffdc_rc(),
+ stop_on_plug_in_failure=1,
+ stop_on_non_zero_rc=1,
+ )
+ if ffdc_check == "All" or shell_rc == dump_ffdc_rc():
status, ret_values = grk.run_key_u("my_ffdc", ignore=1)
- if status != 'PASS':
+ if status != "PASS":
gp.qprint_error("Call to my_ffdc failed.\n")
# Leave a record for caller that "soft" errors occurred.
soft_errors = 1
@@ -987,7 +1139,9 @@
# print error logs before delete
if redfish_support_trans_state:
status, error_logs = grk.run_key_u("Get Redfish Event Logs")
- log.print_error_logs(error_logs, "AdditionalDataURI Message Severity")
+ log.print_error_logs(
+ error_logs, "AdditionalDataURI Message Severity"
+ )
else:
status, error_logs = grk.run_key_u("Get Error Logs")
log.print_error_logs(error_logs, "AdditionalData Message Severity")
@@ -1005,8 +1159,8 @@
plug_in_setup()
rc, shell_rc, failed_plug_in_name = grpi.rprocess_plug_in_packages(
- call_point='stop_check', shell_rc=stop_test_rc(),
- stop_on_non_zero_rc=1)
+ call_point="stop_check", shell_rc=stop_test_rc(), stop_on_non_zero_rc=1
+ )
if shell_rc == stop_test_rc():
message = "Stopping as requested by user.\n"
gp.qprint_time(message)
@@ -1033,16 +1187,19 @@
if cp_setup_called:
plug_in_setup()
rc, shell_rc, failed_plug_in_name = grpi.rprocess_plug_in_packages(
- call_point='cleanup', stop_on_plug_in_failure=0)
+ call_point="cleanup", stop_on_plug_in_failure=0
+ )
- if 'boot_results_file_path' in globals():
+ if "boot_results_file_path" in globals():
# Save boot_results and boot_history objects to a file in case they are
# needed again.
gp.qprint_timen("Saving boot_results to the following path.")
gp.qprint_var(boot_results_file_path)
- pickle.dump((boot_results, boot_history),
- open(boot_results_file_path, 'wb'),
- pickle.HIGHEST_PROTOCOL)
+ pickle.dump(
+ (boot_results, boot_history),
+ open(boot_results_file_path, "wb"),
+ pickle.HIGHEST_PROTOCOL,
+ )
global save_stack
# Restore any global values saved on the save_stack.
@@ -1055,8 +1212,9 @@
continue
# Restore the saved value.
- cmd_buf = "BuiltIn().set_global_variable(\"${" + parm_name +\
- "}\", parm_value)"
+ cmd_buf = (
+ 'BuiltIn().set_global_variable("${' + parm_name + '}", parm_value)'
+ )
gp.dpissuing(cmd_buf)
exec(cmd_buf)
@@ -1074,8 +1232,10 @@
if ga.psutil_imported:
ga.terminate_descendants()
- cmd_buf = ["Print Error",
- "A keyword timeout occurred ending this program.\n"]
+ cmd_buf = [
+ "Print Error",
+ "A keyword timeout occurred ending this program.\n",
+ ]
BuiltIn().run_keyword_if_timeout_occurred(*cmd_buf)
if redfish_supported:
@@ -1100,24 +1260,29 @@
# For the purposes of the following plug-ins, mark the "boot" as a success.
boot_success = 1
plug_in_setup()
- rc, shell_rc, failed_plug_in_name, history =\
- grpi.rprocess_plug_in_packages(call_point='post_stack',
- stop_on_plug_in_failure=0,
- return_history=True)
+ (
+ rc,
+ shell_rc,
+ failed_plug_in_name,
+ history,
+ ) = grpi.rprocess_plug_in_packages(
+ call_point="post_stack", stop_on_plug_in_failure=0, return_history=True
+ )
for doing_msg in history:
update_boot_history(boot_history, doing_msg, max_boot_history)
if rc != 0:
boot_success = 0
plug_in_setup()
- rc, shell_rc, failed_plug_in_name =\
- grpi.rprocess_plug_in_packages(call_point='ffdc_check',
- shell_rc=dump_ffdc_rc(),
- stop_on_plug_in_failure=1,
- stop_on_non_zero_rc=1)
+ rc, shell_rc, failed_plug_in_name = grpi.rprocess_plug_in_packages(
+ call_point="ffdc_check",
+ shell_rc=dump_ffdc_rc(),
+ stop_on_plug_in_failure=1,
+ stop_on_non_zero_rc=1,
+ )
if shell_rc == dump_ffdc_rc():
status, ret_values = grk.run_key_u("my_ffdc", ignore=1)
- if status != 'PASS':
+ if status != "PASS":
gp.qprint_error("Call to my_ffdc failed.\n")
# Leave a record for caller that "soft" errors occurred.
soft_errors = 1
@@ -1125,24 +1290,26 @@
plug_in_setup()
rc, shell_rc, failed_plug_in_name = grpi.rprocess_plug_in_packages(
- call_point='stop_check', shell_rc=stop_test_rc(),
- stop_on_non_zero_rc=1)
+ call_point="stop_check", shell_rc=stop_test_rc(), stop_on_non_zero_rc=1
+ )
if shell_rc == stop_test_rc():
message = "Stopping as requested by user.\n"
gp.qprint_time(message)
BuiltIn().fail(message)
-def obmc_boot_test_py(loc_boot_stack=None,
- loc_stack_mode=None,
- loc_quiet=None):
+def obmc_boot_test_py(
+ loc_boot_stack=None, loc_stack_mode=None, loc_quiet=None
+):
r"""
Do main program processing.
"""
global save_stack
- ga.set_term_options(term_requests={'pgm_names': ['process_plug_in_packages.py']})
+ ga.set_term_options(
+ term_requests={"pgm_names": ["process_plug_in_packages.py"]}
+ )
gp.dprintn()
# Process function parms.
@@ -1153,14 +1320,24 @@
if parm_value is not None:
# Save the global value on a stack.
- cmd_buf = "save_stack.push(BuiltIn().get_variable_value(\"${" +\
- parm_name + "}\"), \"" + parm_name + "\")"
+ cmd_buf = (
+ 'save_stack.push(BuiltIn().get_variable_value("${'
+ + parm_name
+ + '}"), "'
+ + parm_name
+ + '")'
+ )
gp.dpissuing(cmd_buf)
exec(cmd_buf)
# Set the global value to the passed value.
- cmd_buf = "BuiltIn().set_global_variable(\"${" + parm_name +\
- "}\", loc_" + parm_name + ")"
+ cmd_buf = (
+ 'BuiltIn().set_global_variable("${'
+ + parm_name
+ + '}", loc_'
+ + parm_name
+ + ")"
+ )
gp.dpissuing(cmd_buf)
exec(cmd_buf)
@@ -1181,7 +1358,9 @@
# print error logs before delete
if redfish_support_trans_state:
status, error_logs = grk.run_key_u("Get Redfish Event Logs")
- log.print_error_logs(error_logs, "AdditionalDataURI Message Severity")
+ log.print_error_logs(
+ error_logs, "AdditionalDataURI Message Severity"
+ )
else:
status, error_logs = grk.run_key_u("Get Error Logs")
log.print_error_logs(error_logs, "AdditionalData Message Severity")
@@ -1195,7 +1374,7 @@
grk.run_key(delete_sysdump_cmd, ignore=1)
# Process caller's boot_stack.
- while (len(boot_stack) > 0):
+ while len(boot_stack) > 0:
test_loop_body()
gp.qprint_timen("Finished processing stack.")
@@ -1212,8 +1391,10 @@
boot_pass, boot_fail = boot_results.return_total_pass_fail()
new_fail = boot_fail - init_boot_fail
if new_fail > boot_fail_threshold:
- error_message = "Boot failures exceed the boot failure" +\
- " threshold:\n" +\
- gp.sprint_var(new_fail) +\
- gp.sprint_var(boot_fail_threshold)
+ error_message = (
+ "Boot failures exceed the boot failure"
+ + " threshold:\n"
+ + gp.sprint_var(new_fail)
+ + gp.sprint_var(boot_fail_threshold)
+ )
BuiltIn().fail(gp.sprint_error(error_message))
diff --git a/lib/openbmc_ffdc.py b/lib/openbmc_ffdc.py
index 78f596c..b5efa7a 100644
--- a/lib/openbmc_ffdc.py
+++ b/lib/openbmc_ffdc.py
@@ -7,20 +7,24 @@
import os
import gen_print as gp
-import gen_valid as gv
import gen_robot_keyword as grk
+import gen_valid as gv
import state as st
-
from robot.libraries.BuiltIn import BuiltIn
-redfish_support_trans_state = int(os.environ.get('REDFISH_SUPPORT_TRANS_STATE', 0)) or \
- int(BuiltIn().get_variable_value("${REDFISH_SUPPORT_TRANS_STATE}", default=0))
+redfish_support_trans_state = int(
+ os.environ.get("REDFISH_SUPPORT_TRANS_STATE", 0)
+) or int(
+ BuiltIn().get_variable_value("${REDFISH_SUPPORT_TRANS_STATE}", default=0)
+)
-def ffdc(ffdc_dir_path=None,
- ffdc_prefix=None,
- ffdc_function_list="",
- comm_check=True):
+def ffdc(
+ ffdc_dir_path=None,
+ ffdc_prefix=None,
+ ffdc_function_list="",
+ comm_check=True,
+):
r"""
Gather First Failure Data Capture (FFDC).
@@ -48,26 +52,31 @@
if comm_check:
if not redfish_support_trans_state:
- interface = 'rest'
+ interface = "rest"
else:
- interface = 'redfish'
+ interface = "redfish"
- state = st.get_state(req_states=['ping', 'uptime', interface])
+ state = st.get_state(req_states=["ping", "uptime", interface])
gp.qprint_var(state)
- if not int(state['ping']):
- gp.print_error("BMC is not ping-able. Terminating FFDC collection.\n")
+ if not int(state["ping"]):
+ gp.print_error(
+ "BMC is not ping-able. Terminating FFDC collection.\n"
+ )
return ffdc_file_list
if not int(state[interface]):
gp.print_error("%s commands to the BMC are failing." % interface)
- if state['uptime'] == "":
+ if state["uptime"] == "":
gp.print_error("BMC is not communicating via ssh.\n")
# If SSH and Redfish connection doesn't works, abort.
- if not int(state[interface]) and state['uptime'] == "":
- gp.print_error("BMC is not communicating via ssh or Redfish. Terminating FFDC"
- + " collection.\n")
+ if not int(state[interface]) and state["uptime"] == "":
+ gp.print_error(
+ "BMC is not communicating via ssh or Redfish. Terminating"
+ " FFDC"
+ + " collection.\n"
+ )
return ffdc_file_list
gp.qprint_timen("Collecting FFDC.")
@@ -85,9 +94,12 @@
gp.qprint_issuing(cmd_buf)
status, output = BuiltIn().run_keyword_and_ignore_error(*cmd_buf)
if status != "PASS":
- error_message = gp.sprint_error_report("Create Directory failed"
- + " with the following"
- + " error:\n" + output)
+ error_message = gp.sprint_error_report(
+ "Create Directory failed"
+ + " with the following"
+ + " error:\n"
+ + output
+ )
BuiltIn().fail(error_message)
# FFDC_FILE_PATH is used by Header Message.
@@ -95,9 +107,9 @@
BuiltIn().set_global_variable("${FFDC_FILE_PATH}", FFDC_FILE_PATH)
status, ffdc_file_list = grk.run_key_u("Header Message")
- status, ffdc_file_sub_list = \
- grk.run_key_u("Call FFDC Methods ffdc_function_list="
- + ffdc_function_list)
+ status, ffdc_file_sub_list = grk.run_key_u(
+ "Call FFDC Methods ffdc_function_list=" + ffdc_function_list
+ )
# Combine lists, remove duplicates and sort.
ffdc_file_list = sorted(set(ffdc_file_list + ffdc_file_sub_list))
@@ -107,8 +119,7 @@
return ffdc_file_list
-def set_ffdc_defaults(ffdc_dir_path=None,
- ffdc_prefix=None):
+def set_ffdc_defaults(ffdc_dir_path=None, ffdc_prefix=None):
r"""
Set a default value for ffdc_dir_path and ffdc_prefix if they don't
already have values. Return both values.
@@ -130,24 +141,34 @@
BuiltIn().set_global_variable("${FFDC_TIME}", FFDC_TIME)
ffdc_dir_path_style = BuiltIn().get_variable_value(
- "${ffdc_dir_path_style}")
+ "${ffdc_dir_path_style}"
+ )
if ffdc_dir_path is None:
if ffdc_dir_path_style:
try:
- ffdc_dir_path = os.environ['FFDC_DIR_PATH']
+ ffdc_dir_path = os.environ["FFDC_DIR_PATH"]
except KeyError:
- ffdc_dir_path = os.path.dirname(
- BuiltIn().get_variable_value("${LOG_FILE}")) + "/"
+ ffdc_dir_path = (
+ os.path.dirname(
+ BuiltIn().get_variable_value("${LOG_FILE}")
+ )
+ + "/"
+ )
else:
FFDC_LOG_PATH = os.getcwd() + "/logs/"
if FFDC_LOG_PATH is None:
FFDC_LOG_PATH = ""
if FFDC_LOG_PATH == "":
- FFDC_LOG_PATH = os.path.dirname(
- BuiltIn().get_variable_value("${LOG_FILE}")) + "/"
- error_message = gv.valid_value(FFDC_LOG_PATH,
- var_name="FFDC_LOG_PATH")
+ FFDC_LOG_PATH = (
+ os.path.dirname(
+ BuiltIn().get_variable_value("${LOG_FILE}")
+ )
+ + "/"
+ )
+ error_message = gv.valid_value(
+ FFDC_LOG_PATH, var_name="FFDC_LOG_PATH"
+ )
if error_message != "":
error_message = gp.sprint_error_report(error_message)
BuiltIn().fail(error_message)
@@ -168,9 +189,16 @@
if ffdc_dir_path_style:
OPENBMC_HOST = BuiltIn().get_variable_value("${OPENBMC_HOST}")
OPENBMC_NICKNAME = BuiltIn().get_variable_value(
- "${OPENBMC_NICKNAME}", default=OPENBMC_HOST)
- ffdc_prefix = OPENBMC_NICKNAME + "." + FFDC_TIME[2:8] + "." +\
- FFDC_TIME[8:14] + "."
+ "${OPENBMC_NICKNAME}", default=OPENBMC_HOST
+ )
+ ffdc_prefix = (
+ OPENBMC_NICKNAME
+ + "."
+ + FFDC_TIME[2:8]
+ + "."
+ + FFDC_TIME[8:14]
+ + "."
+ )
else:
ffdc_prefix = FFDC_TIME + "_"
diff --git a/lib/openbmc_ffdc_list.py b/lib/openbmc_ffdc_list.py
index 9fb882a..ea79a81 100755
--- a/lib/openbmc_ffdc_list.py
+++ b/lib/openbmc_ffdc_list.py
@@ -17,159 +17,203 @@
# -----------------------------------------------------------------
# Add cmd's needed to be part of the ffdc report manifest file
FFDC_BMC_CMD = {
- 'DRIVER INFO':
- {
+ "DRIVER INFO": {
# String Name Command
- 'FW Level': 'cat /etc/os-release',
- 'FW Timestamp': 'cat /etc/timestamp',
+ "FW Level": "cat /etc/os-release",
+ "FW Timestamp": "cat /etc/timestamp",
},
- 'BMC DATA':
- {
- 'BMC OS': 'uname -a',
- 'BMC Uptime': 'uptime;cat /proc/uptime',
- 'BMC File System Disk Space Usage': 'df -hT',
- 'BMC Date Time': 'date;/sbin/hwclock --show;/usr/bin/timedatectl'
+ "BMC DATA": {
+ "BMC OS": "uname -a",
+ "BMC Uptime": "uptime;cat /proc/uptime",
+ "BMC File System Disk Space Usage": "df -hT",
+ "BMC Date Time": "date;/sbin/hwclock --show;/usr/bin/timedatectl",
},
- 'APPLICATION DATA':
- {
- 'BMC state': '/usr/bin/obmcutil state',
+ "APPLICATION DATA": {
+ "BMC state": "/usr/bin/obmcutil state",
},
}
# Add file name and corresponding command needed for BMC
FFDC_BMC_FILE = {
- 'BMC FILES':
- {
+ "BMC FILES": {
# File Name Command
- 'BMC_flash_side.txt': 'cat /sys/class/watchdog/watchdog1/bootstatus >/tmp/BMC_flash_side.txt 2>&1',
- 'BMC_hwmon.txt': 'grep -r . /sys/class/hwmon/* >/tmp/BMC_hwmon.txt 2>&1',
- 'BMC_proc_list.txt': 'top -n 1 -b >/tmp/BMC_proc_list.txt 2>&1',
- 'BMC_proc_fd_active_list.txt': 'ls -Al /proc/*/fd/ >/tmp/BMC_proc_fd_active_list.txt 2>&1',
- 'BMC_journalctl_nopager.txt': 'journalctl --no-pager >/tmp/BMC_journalctl_nopager.txt 2>&1',
- 'BMC_journalctl_pretty.json': 'journalctl -o json-pretty >/tmp/BMC_journalctl_pretty.json 2>&1',
- 'BMC_dmesg.txt': 'dmesg >/tmp/BMC_dmesg.txt 2>&1',
- 'BMC_procinfo.txt': 'cat /proc/cpuinfo >/tmp/BMC_procinfo.txt 2>&1',
- 'BMC_meminfo.txt': 'cat /proc/meminfo >/tmp/BMC_meminfo.txt 2>&1',
- 'BMC_systemd.txt': 'systemctl status --all >/tmp/BMC_systemd.txt 2>&1',
- 'BMC_failed_service.txt': 'systemctl list-units --failed >/tmp/BMC_failed_service.txt 2>&1',
- 'BMC_list_service.txt': 'systemctl list-jobs >/tmp/BMC_list_service.txt 2>&1',
- 'BMC_obmc_console.txt': 'cat /var/log/obmc-console.log >/tmp/BMC_obmc_console.txt 2>&1',
- 'BMC_obmc_console1.txt': 'cat /var/log/obmc-console1.log >/tmp/BMC_obmc_console1.txt 2>&1',
- 'PEL_logs_list.json': 'peltool -l >/tmp/PEL_logs_list.json 2>&1',
- 'PEL_logs_complete_list.json': 'peltool -l -a -f >/tmp/PEL_logs_complete_list.json 2>&1',
- 'PEL_logs_display.json': 'peltool -a >/tmp/PEL_logs_display.json 2>&1',
- 'PEL_logs_complete_display.json': 'peltool -a -f -h>/tmp/PEL_logs_complete_display.json 2>&1',
- 'PEL_logs_badPEL.txt': 'hexdump -C'
- + ' /var/lib/phosphor-logging/extensions/pels/badPEL>/tmp/PEL_logs_badPEL.txt 2>&1',
- 'PLDM_fru_record.txt': 'pldmtool fru getfrurecordtable>/tmp/PLDM_fru_record.txt 2>&1',
- 'BMC_pldm_flight_recorder.txt': 'rm -rf /tmp/pldm_flight_recorder; killall -s SIGUSR1 pldmd;'
- + ' sleep 5; cat /tmp/pldm_flight_recorder > /tmp/BMC_pldm_flight_recorder.txt 2>&1;',
- 'OCC_state.txt': 'echo "OCC state check";for i in {0..3};'
- + ' do (echo /org/open_power/control/occ$i;'
- + ' busctl get-property org.open_power.OCC.Control /org/open_power/control/occ$i'
- + ' org.open_power.OCC.Status OccActive) done > /tmp/OCC_state.txt 2>&1',
- 'bmcweb_persistent_data.json': 'cat /home/root/bmcweb_persistent_data.json'
- + ' > /tmp/bmcweb_persistent_data.json',
- 'GUARD_list.txt': 'guard -l > /tmp/GUARD_list.txt 2>&1',
- 'fan_control_dump.json': 'fanctl dump; sleep 5',
- 'DEVTREE': 'cat /var/lib/phosphor-software-manager/pnor/rw/DEVTREE > /tmp/DEVTREE 2>&1',
+ "BMC_flash_side.txt": (
+ "cat /sys/class/watchdog/watchdog1/bootstatus"
+ " >/tmp/BMC_flash_side.txt 2>&1"
+ ),
+ "BMC_hwmon.txt": (
+ "grep -r . /sys/class/hwmon/* >/tmp/BMC_hwmon.txt 2>&1"
+ ),
+ "BMC_proc_list.txt": "top -n 1 -b >/tmp/BMC_proc_list.txt 2>&1",
+ "BMC_proc_fd_active_list.txt": (
+ "ls -Al /proc/*/fd/ >/tmp/BMC_proc_fd_active_list.txt 2>&1"
+ ),
+ "BMC_journalctl_nopager.txt": (
+ "journalctl --no-pager >/tmp/BMC_journalctl_nopager.txt 2>&1"
+ ),
+ "BMC_journalctl_pretty.json": (
+ "journalctl -o json-pretty >/tmp/BMC_journalctl_pretty.json 2>&1"
+ ),
+ "BMC_dmesg.txt": "dmesg >/tmp/BMC_dmesg.txt 2>&1",
+ "BMC_procinfo.txt": "cat /proc/cpuinfo >/tmp/BMC_procinfo.txt 2>&1",
+ "BMC_meminfo.txt": "cat /proc/meminfo >/tmp/BMC_meminfo.txt 2>&1",
+ "BMC_systemd.txt": "systemctl status --all >/tmp/BMC_systemd.txt 2>&1",
+ "BMC_failed_service.txt": (
+ "systemctl list-units --failed >/tmp/BMC_failed_service.txt 2>&1"
+ ),
+ "BMC_list_service.txt": (
+ "systemctl list-jobs >/tmp/BMC_list_service.txt 2>&1"
+ ),
+ "BMC_obmc_console.txt": (
+ "cat /var/log/obmc-console.log >/tmp/BMC_obmc_console.txt 2>&1"
+ ),
+ "BMC_obmc_console1.txt": (
+ "cat /var/log/obmc-console1.log >/tmp/BMC_obmc_console1.txt 2>&1"
+ ),
+ "PEL_logs_list.json": "peltool -l >/tmp/PEL_logs_list.json 2>&1",
+ "PEL_logs_complete_list.json": (
+ "peltool -l -a -f >/tmp/PEL_logs_complete_list.json 2>&1"
+ ),
+ "PEL_logs_display.json": "peltool -a >/tmp/PEL_logs_display.json 2>&1",
+ "PEL_logs_complete_display.json": (
+ "peltool -a -f -h>/tmp/PEL_logs_complete_display.json 2>&1"
+ ),
+ "PEL_logs_badPEL.txt": "hexdump -C"
+ + " /var/lib/phosphor-logging/extensions/pels/badPEL>/tmp/PEL_logs_badPEL.txt"
+ " 2>&1",
+ "PLDM_fru_record.txt": (
+ "pldmtool fru getfrurecordtable>/tmp/PLDM_fru_record.txt 2>&1"
+ ),
+ "BMC_pldm_flight_recorder.txt": (
+ "rm -rf /tmp/pldm_flight_recorder; killall -s SIGUSR1 pldmd;"
+ )
+ + " sleep 5; cat /tmp/pldm_flight_recorder >"
+ " /tmp/BMC_pldm_flight_recorder.txt 2>&1;",
+ "OCC_state.txt": 'echo "OCC state check";for i in {0..3};'
+ + " do (echo /org/open_power/control/occ$i;"
+ + " busctl get-property org.open_power.OCC.Control"
+ " /org/open_power/control/occ$i"
+ + " org.open_power.OCC.Status OccActive) done > /tmp/OCC_state.txt"
+ " 2>&1",
+ "bmcweb_persistent_data.json": (
+ "cat /home/root/bmcweb_persistent_data.json"
+ )
+ + " > /tmp/bmcweb_persistent_data.json",
+ "GUARD_list.txt": "guard -l > /tmp/GUARD_list.txt 2>&1",
+ "fan_control_dump.json": "fanctl dump; sleep 5",
+ "DEVTREE": (
+ "cat /var/lib/phosphor-software-manager/pnor/rw/DEVTREE >"
+ " /tmp/DEVTREE 2>&1"
+ ),
},
}
# Add file name and corresponding command needed for all Linux distributions
FFDC_OS_ALL_DISTROS_FILE = {
- 'OS FILES':
- {
+ "OS FILES": {
# File Name Command
- 'OS_msglog.txt': 'cat /sys/firmware/opal/msglog >/tmp/OS_msglog.txt 2>&1',
- 'OS_cpufrequency.txt': 'ppc64_cpu --frequency '
- + '>/tmp/OS_cpufrequency.txt 2>&1',
- 'OS_dmesg.txt': 'dmesg >/tmp/OS_dmesg.txt 2>&1',
- 'OS_opal_prd.txt': 'cat /var/log/opal-prd* >/tmp/OS_opal_prd.txt 2>&1',
- 'OS_boot.txt': 'cat /var/log/boot.log >/tmp/OS_boot.txt 2>&1',
- 'OS_procinfo.txt': 'cat /proc/cpuinfo >/tmp/OS_procinfo.txt 2>&1',
- 'OS_meminfo.txt': 'cat /proc/meminfo >/tmp/OS_meminfo.txt 2>&1',
- 'OS_netstat.txt': 'netstat -a >/tmp/OS_netstat.txt 2>&1',
- 'OS_lspci.txt': 'lspci >/tmp/OS_lspci.txt 2>&1',
- 'OS_lscpu.txt': 'lscpu >/tmp/OS_lscpu.txt 2>&1',
- 'OS_lscfg.txt': 'lscfg >/tmp/OS_lscfg.txt 2>&1',
- 'OS_journalctl_nopager.txt': 'journalctl --no-pager -b '
- + '> /tmp/OS_journalctl_nopager.txt 2>&1',
+ "OS_msglog.txt": (
+ "cat /sys/firmware/opal/msglog >/tmp/OS_msglog.txt 2>&1"
+ ),
+ "OS_cpufrequency.txt": "ppc64_cpu --frequency "
+ + ">/tmp/OS_cpufrequency.txt 2>&1",
+ "OS_dmesg.txt": "dmesg >/tmp/OS_dmesg.txt 2>&1",
+ "OS_opal_prd.txt": "cat /var/log/opal-prd* >/tmp/OS_opal_prd.txt 2>&1",
+ "OS_boot.txt": "cat /var/log/boot.log >/tmp/OS_boot.txt 2>&1",
+ "OS_procinfo.txt": "cat /proc/cpuinfo >/tmp/OS_procinfo.txt 2>&1",
+ "OS_meminfo.txt": "cat /proc/meminfo >/tmp/OS_meminfo.txt 2>&1",
+ "OS_netstat.txt": "netstat -a >/tmp/OS_netstat.txt 2>&1",
+ "OS_lspci.txt": "lspci >/tmp/OS_lspci.txt 2>&1",
+ "OS_lscpu.txt": "lscpu >/tmp/OS_lscpu.txt 2>&1",
+ "OS_lscfg.txt": "lscfg >/tmp/OS_lscfg.txt 2>&1",
+ "OS_journalctl_nopager.txt": "journalctl --no-pager -b "
+ + "> /tmp/OS_journalctl_nopager.txt 2>&1",
},
}
# Add file name and corresponding command needed for Ubuntu Linux
FFDC_OS_UBUNTU_FILE = {
- 'OS FILES':
- {
+ "OS FILES": {
# File Name Command
- 'OS_isusb.txt': '{ lsusb -t ; lsusb -v ; } >/tmp/OS_isusb.txt 2>&1',
- 'OS_kern.txt': 'tail -n 50000 /var/log/kern.log >/tmp/OS_kern.txt 2>&1',
- 'OS_authlog.txt': '{ cat /var/log/auth.log; cat /var/log/auth.log.1 ; } '
- + '>/tmp/OS_authlog.txt 2>&1',
- 'OS_syslog.txt': 'tail -n 200000 /var/log/syslog >/tmp/OS_syslog.txt 2>&1',
- 'OS_info.txt': '{ uname -a; dpkg -s opal-prd; dpkg -s ipmitool ; } '
- + '>/tmp/OS_info.txt 2>&1',
- 'OS_sosreport.txt': '{ rm -rf /tmp/sosreport*FFDC* ; sosreport --batch --tmp-dir '
- + '/tmp --ticket-number FFDC ; } >/tmp/OS_sosreport.txt 2>&1',
+ "OS_isusb.txt": "{ lsusb -t ; lsusb -v ; } >/tmp/OS_isusb.txt 2>&1",
+ "OS_kern.txt": (
+ "tail -n 50000 /var/log/kern.log >/tmp/OS_kern.txt 2>&1"
+ ),
+ "OS_authlog.txt": (
+ "{ cat /var/log/auth.log; cat /var/log/auth.log.1 ; } "
+ )
+ + ">/tmp/OS_authlog.txt 2>&1",
+ "OS_syslog.txt": (
+ "tail -n 200000 /var/log/syslog >/tmp/OS_syslog.txt 2>&1"
+ ),
+ "OS_info.txt": "{ uname -a; dpkg -s opal-prd; dpkg -s ipmitool ; } "
+ + ">/tmp/OS_info.txt 2>&1",
+ "OS_sosreport.txt": (
+ "{ rm -rf /tmp/sosreport*FFDC* ; sosreport --batch --tmp-dir "
+ )
+ + "/tmp --ticket-number FFDC ; } >/tmp/OS_sosreport.txt 2>&1",
},
}
# Add file name and corresponding command needed for RHEL Linux
FFDC_OS_RHEL_FILE = {
- 'OS FILES':
- {
+ "OS FILES": {
# File Name Command
- 'OS_rsct.txt': '/usr/bin/ctversion -bv >/tmp/OS_rsct.txt 2>&1',
- 'OS_secure.txt': 'cat /var/log/secure >/tmp/OS_secure.txt 2>&1',
- 'OS_syslog.txt': 'tail -n 200000 /var/log/messages '
- + '>/tmp/OS_syslog.txt 2>&1',
- 'OS_info.txt': '{ lsb_release -a; cat /etc/redhat-release; '
- + 'uname -a; rpm -qa ; } >/tmp/OS_info.txt 2>&1',
- 'OS_sosreport.txt': '{ rm -rf /tmp/sosreport*FFDC* ; sosreport --batch --tmp-dir '
- + '/tmp --label FFDC ; } >/tmp/OS_sosreport.txt 2>&1',
+ "OS_rsct.txt": "/usr/bin/ctversion -bv >/tmp/OS_rsct.txt 2>&1",
+ "OS_secure.txt": "cat /var/log/secure >/tmp/OS_secure.txt 2>&1",
+ "OS_syslog.txt": "tail -n 200000 /var/log/messages "
+ + ">/tmp/OS_syslog.txt 2>&1",
+ "OS_info.txt": "{ lsb_release -a; cat /etc/redhat-release; "
+ + "uname -a; rpm -qa ; } >/tmp/OS_info.txt 2>&1",
+ "OS_sosreport.txt": (
+ "{ rm -rf /tmp/sosreport*FFDC* ; sosreport --batch --tmp-dir "
+ )
+ + "/tmp --label FFDC ; } >/tmp/OS_sosreport.txt 2>&1",
},
}
# Add file name and corresponding command needed for AIX.
FFDC_OS_AIX_FILE = {
- 'OS FILES':
- {
+ "OS FILES": {
# File Name Command
- 'OS_errpt.txt': 'errpt >/tmp/OS_errpt.txt 2>&1 ; errclear 0;',
- 'OS_processors.txt': 'bindprocessor -q >/tmp/OS_processors.txt 2>&1',
+ "OS_errpt.txt": "errpt >/tmp/OS_errpt.txt 2>&1 ; errclear 0;",
+ "OS_processors.txt": "bindprocessor -q >/tmp/OS_processors.txt 2>&1",
},
}
try:
- redfish_support_trans_state = os.environ.get('REDFISH_SUPPORT_TRANS_STATE', 0) or \
- int(BuiltIn().get_variable_value("${REDFISH_SUPPORT_TRANS_STATE}", default=0))
+ redfish_support_trans_state = os.environ.get(
+ "REDFISH_SUPPORT_TRANS_STATE", 0
+ ) or int(
+ BuiltIn().get_variable_value(
+ "${REDFISH_SUPPORT_TRANS_STATE}", default=0
+ )
+ )
except RobotNotRunningError:
pass
-OPENBMC_BASE = '/xyz/openbmc_project/'
-OPENPOWER_BASE = '/org/open_power/'
-ENUMERATE_SENSORS = OPENBMC_BASE + 'sensors/enumerate'
-ENUMERATE_INVENTORY = OPENBMC_BASE + 'inventory/enumerate'
-ENUMERATE_ELOG = OPENBMC_BASE + 'logging/entry/enumerate'
-ENUMERATE_LED = OPENBMC_BASE + 'led/enumerate'
-ENUMERATE_SW = OPENBMC_BASE + 'software/enumerate'
-ENUMERATE_CONTROL = OPENBMC_BASE + 'control/enumerate'
-ENUMERATE_STATE = OPENBMC_BASE + 'state/enumerate'
-ENUMERATE_OCC = OPENPOWER_BASE + '/enumerate'
-ENUMERATE_DUMPS = OPENBMC_BASE + 'dumps/enumerate'
-ENUMERATE_USER = OPENBMC_BASE + 'user/enumerate'
+OPENBMC_BASE = "/xyz/openbmc_project/"
+OPENPOWER_BASE = "/org/open_power/"
+ENUMERATE_SENSORS = OPENBMC_BASE + "sensors/enumerate"
+ENUMERATE_INVENTORY = OPENBMC_BASE + "inventory/enumerate"
+ENUMERATE_ELOG = OPENBMC_BASE + "logging/entry/enumerate"
+ENUMERATE_LED = OPENBMC_BASE + "led/enumerate"
+ENUMERATE_SW = OPENBMC_BASE + "software/enumerate"
+ENUMERATE_CONTROL = OPENBMC_BASE + "control/enumerate"
+ENUMERATE_STATE = OPENBMC_BASE + "state/enumerate"
+ENUMERATE_OCC = OPENPOWER_BASE + "/enumerate"
+ENUMERATE_DUMPS = OPENBMC_BASE + "dumps/enumerate"
+ENUMERATE_USER = OPENBMC_BASE + "user/enumerate"
# Add file name and corresponding Get Request
FFDC_GET_REQUEST = {
- 'GET REQUESTS':
- {
+ "GET REQUESTS": {
# File Name Command
- 'FIRMWARE_list.txt': ENUMERATE_SW,
- 'BMC_sensor_list.txt': ENUMERATE_SENSORS,
- 'BMC_control_list.txt': ENUMERATE_CONTROL,
- 'BMC_inventory.txt': ENUMERATE_INVENTORY,
- 'BMC_elog.txt': ENUMERATE_ELOG,
- 'BMC_led.txt': ENUMERATE_LED,
- 'BMC_state.txt': ENUMERATE_STATE,
- 'OCC_state.txt': ENUMERATE_OCC,
- 'BMC_dumps.txt': ENUMERATE_DUMPS,
- 'BMC_USER.txt': ENUMERATE_USER,
+ "FIRMWARE_list.txt": ENUMERATE_SW,
+ "BMC_sensor_list.txt": ENUMERATE_SENSORS,
+ "BMC_control_list.txt": ENUMERATE_CONTROL,
+ "BMC_inventory.txt": ENUMERATE_INVENTORY,
+ "BMC_elog.txt": ENUMERATE_ELOG,
+ "BMC_led.txt": ENUMERATE_LED,
+ "BMC_state.txt": ENUMERATE_STATE,
+ "OCC_state.txt": ENUMERATE_OCC,
+ "BMC_dumps.txt": ENUMERATE_DUMPS,
+ "BMC_USER.txt": ENUMERATE_USER,
},
}
@@ -178,61 +222,60 @@
for key in list(FFDC_GET_REQUEST):
del FFDC_GET_REQUEST[key]
-REDFISH_BASE = '/redfish/v1/'
-REDFISH_ELOG = REDFISH_BASE + 'Systems/system/LogServices/EventLog/Entries'
-REDFISH_FIRMWARE = REDFISH_BASE + 'UpdateService/FirmwareInventory'
+REDFISH_BASE = "/redfish/v1/"
+REDFISH_ELOG = REDFISH_BASE + "Systems/system/LogServices/EventLog/Entries"
+REDFISH_FIRMWARE = REDFISH_BASE + "UpdateService/FirmwareInventory"
# Add file name and corresponding Get Request
FFDC_GET_REDFISH_REQUEST = {
- 'GET REQUESTS':
- {
+ "GET REQUESTS": {
# File Name Command
- 'BMC_redfish_elog.txt': REDFISH_ELOG,
+ "BMC_redfish_elog.txt": REDFISH_ELOG,
},
}
# Define your keywords in method/utils and call here
FFDC_METHOD_CALL = {
- 'BMC LOGS':
- {
+ "BMC LOGS": {
# Description Keyword name
- 'Start ffdc cleanup': 'BMC FFDC Cleanup',
- 'FFDC Generic Report': 'BMC FFDC Manifest',
- 'BMC Specific Files': 'BMC FFDC Files',
- 'Get Request FFDC': 'BMC FFDC Get Requests',
- 'Get Redfish Request FFDC': 'BMC FFDC Get Redfish Requests',
- 'OS FFDC': 'OS FFDC Files',
- 'Core Files': 'SCP Coredump Files',
- 'SEL Log': 'Collect eSEL Log',
- 'Sys Inventory Files': 'System Inventory Files',
- 'Dump Files': 'SCP Dump Files',
- 'PEL Files': 'Collect PEL Log',
- 'Redfish Log': 'Enumerate Redfish Resources',
- 'Firmware Log': 'Enumerate Redfish Resources '
- + ' enum_uri=/redfish/v1/UpdateService/FirmwareInventory '
- + ' file_enum_name=redfish_FIRMWARE_list.txt',
- 'Redfish OEM Log': 'Enumerate Redfish OEM Resources',
- 'End ffdc cleanup': 'BMC FFDC Cleanup',
+ "Start ffdc cleanup": "BMC FFDC Cleanup",
+ "FFDC Generic Report": "BMC FFDC Manifest",
+ "BMC Specific Files": "BMC FFDC Files",
+ "Get Request FFDC": "BMC FFDC Get Requests",
+ "Get Redfish Request FFDC": "BMC FFDC Get Redfish Requests",
+ "OS FFDC": "OS FFDC Files",
+ "Core Files": "SCP Coredump Files",
+ "SEL Log": "Collect eSEL Log",
+ "Sys Inventory Files": "System Inventory Files",
+ "Dump Files": "SCP Dump Files",
+ "PEL Files": "Collect PEL Log",
+ "Redfish Log": "Enumerate Redfish Resources",
+ "Firmware Log": "Enumerate Redfish Resources "
+ + " enum_uri=/redfish/v1/UpdateService/FirmwareInventory "
+ + " file_enum_name=redfish_FIRMWARE_list.txt",
+ "Redfish OEM Log": "Enumerate Redfish OEM Resources",
+ "End ffdc cleanup": "BMC FFDC Cleanup",
},
}
try:
- platform_arch_type = os.environ.get('PLATFORM_ARCH_TYPE', '') or \
- BuiltIn().get_variable_value("${PLATFORM_ARCH_TYPE}", default="power")
+ platform_arch_type = os.environ.get(
+ "PLATFORM_ARCH_TYPE", ""
+ ) or BuiltIn().get_variable_value("${PLATFORM_ARCH_TYPE}", default="power")
except RobotNotRunningError:
pass
# Filter the logs based on platform type.
if platform_arch_type == "x86":
- del FFDC_BMC_FILE['BMC FILES']['PEL_logs_list.json']
- del FFDC_BMC_FILE['BMC FILES']['PEL_logs_display.json']
- del FFDC_METHOD_CALL['BMC LOGS']['PEL Files']
+ del FFDC_BMC_FILE["BMC FILES"]["PEL_logs_list.json"]
+ del FFDC_BMC_FILE["BMC FILES"]["PEL_logs_display.json"]
+ del FFDC_METHOD_CALL["BMC LOGS"]["PEL Files"]
# -----------------------------------------------------------------
# base class for FFDC default list
-class openbmc_ffdc_list():
+class openbmc_ffdc_list:
def get_ffdc_bmc_cmd(self, i_type):
r"""
#######################################################################
@@ -318,8 +361,7 @@
"""
return FFDC_METHOD_CALL.keys()
- def get_ffdc_method_desc(self,
- index):
+ def get_ffdc_method_desc(self, index):
r"""
#######################################################################
# @brief This method returns the just the keys from the dictionary.
@@ -383,7 +425,7 @@
# @return Remove all special chars and return the string
#######################################################################
"""
- return ''.join(e for e in i_str if e.isalnum())
+ return "".join(e for e in i_str if e.isalnum())
def get_esel_index(self, esel_list):
r"""
@@ -393,7 +435,7 @@
# @return Index of "ESEL=" in the list.
#######################################################################
"""
- index = [i for i, str in enumerate(esel_list) if 'ESEL=' in str]
+ index = [i for i, str in enumerate(esel_list) if "ESEL=" in str]
return index[0]
def get_dump_index(self, dump_list):
@@ -404,5 +446,5 @@
# @return Index of "ESEL=" in the list.
#######################################################################
"""
- index = [i for i, str in enumerate(dump_list) if 'DUMP=' in str]
+ index = [i for i, str in enumerate(dump_list) if "DUMP=" in str]
return index[0]
diff --git a/lib/openbmctool_utils.py b/lib/openbmctool_utils.py
index a6c94e9..dfe84e7 100755
--- a/lib/openbmctool_utils.py
+++ b/lib/openbmctool_utils.py
@@ -5,22 +5,21 @@
openbmctool_execute_command.
"""
-import gen_print as gp
-import gen_cmd as gc
-import gen_valid as gv
-import gen_misc as gm
-import var_funcs as vf
-import utils as utils
-from robot.libraries.BuiltIn import BuiltIn
-import re
-import tempfile
import collections
import json
+import re
+import tempfile
+
+import gen_cmd as gc
+import gen_misc as gm
+import gen_print as gp
+import gen_valid as gv
+import utils as utils
+import var_funcs as vf
+from robot.libraries.BuiltIn import BuiltIn
-def openbmctool_execute_command(command_string,
- *args,
- **kwargs):
+def openbmctool_execute_command(command_string, *args, **kwargs):
r"""
Run the command string as an argument to the openbmctool.py program and
return the stdout and the return code.
@@ -64,10 +63,12 @@
# Get global BMC variable values.
openbmc_host = BuiltIn().get_variable_value("${OPENBMC_HOST}", default="")
https_port = BuiltIn().get_variable_value("${HTTPS_PORT}", default="443")
- openbmc_username = BuiltIn().get_variable_value("${OPENBMC_USERNAME}",
- default="")
- openbmc_password = BuiltIn().get_variable_value("${OPENBMC_PASSWORD}",
- default="")
+ openbmc_username = BuiltIn().get_variable_value(
+ "${OPENBMC_USERNAME}", default=""
+ )
+ openbmc_password = BuiltIn().get_variable_value(
+ "${OPENBMC_PASSWORD}", default=""
+ )
if not gv.valid_value(openbmc_host):
return "", "", 1
if not gv.valid_value(openbmc_username):
@@ -81,24 +82,35 @@
# example, the user may have specified "fru status | head -n 2" which
# would be broken into 2 list elements. We will also break on ">"
# (re-direct).
- pipeline = list(map(str.strip, re.split(r' ([\|>]) ',
- str(command_string))))
+ pipeline = list(
+ map(str.strip, re.split(r" ([\|>]) ", str(command_string)))
+ )
# The "tail" command below prevents a "egrep: write error: Broken pipe"
# error if the user is piping the output to a sub-process.
# Use "egrep -v" to get rid of editorial output from openbmctool.py.
- pipeline.insert(1, "| tail -n +1 | egrep -v 'Attempting login|User [^ ]+"
- " has been logged out'")
+ pipeline.insert(
+ 1,
+ "| tail -n +1 | egrep -v 'Attempting login|User [^ ]+"
+ " has been logged out'",
+ )
- command_string = "set -o pipefail ; python3 $(which openbmctool.py) -H "\
- + openbmc_host + ":" + https_port + " -U " + openbmc_username + " -P " + openbmc_password\
- + " " + " ".join(pipeline)
+ command_string = (
+ "set -o pipefail ; python3 $(which openbmctool.py) -H "
+ + openbmc_host
+ + ":"
+ + https_port
+ + " -U "
+ + openbmc_username
+ + " -P "
+ + openbmc_password
+ + " "
+ + " ".join(pipeline)
+ )
return gc.shell_cmd(command_string, *args, **kwargs)
-def openbmctool_execute_command_json(command_string,
- *args,
- **kwargs):
+def openbmctool_execute_command_json(command_string, *args, **kwargs):
r"""
Run the command string as an argument to the openbmctool.py program, parse
the JSON output into a dictionary and return the dictionary.
@@ -112,21 +124,19 @@
See openbmctool_execute_command (above) for all field descriptions.
"""
- rc, output = openbmctool_execute_command(command_string,
- *args,
- **kwargs)
+ rc, output = openbmctool_execute_command(command_string, *args, **kwargs)
try:
json_object = utils.to_json_ordered(output)
except json.JSONDecodeError:
BuiltIn().fail(gp.sprint_error(output))
- if json_object['status'] != "ok":
+ if json_object["status"] != "ok":
err_msg = "Error found in JSON data returned by the openbmctool.py "
err_msg += "command. Expected a 'status' field value of \"ok\":\n"
err_msg += gp.sprint_var(json_object, 1)
BuiltIn().fail(gp.sprint_error(err_msg))
- return json_object['data']
+ return json_object["data"]
def get_fru_status():
@@ -155,8 +165,9 @@
[functional]: No
...
"""
- rc, output = openbmctool_execute_command("fru status", print_output=False,
- ignore_err=False)
+ rc, output = openbmctool_execute_command(
+ "fru status", print_output=False, ignore_err=False
+ )
# Example value for output (partial):
# Component | Is a FRU | Present | Functional | Has Logs
# cpu0 | Yes | Yes | Yes | No
@@ -234,8 +245,9 @@
parsed into a list of dictionaries.
"""
- rc, output = openbmctool_execute_command("fru print", print_output=False,
- ignore_err=False)
+ rc, output = openbmctool_execute_command(
+ "fru print", print_output=False, ignore_err=False
+ )
if parse_json:
return gm.json_loads_multiple(output)
else:
@@ -305,8 +317,9 @@
parsed into a list of dictionaries.
"""
- rc, output = openbmctool_execute_command("fru list", print_output=False,
- ignore_err=False)
+ rc, output = openbmctool_execute_command(
+ "fru list", print_output=False, ignore_err=False
+ )
if parse_json:
return gm.json_loads_multiple(output)
else:
@@ -314,7 +327,6 @@
def get_sensors_print():
-
r"""
Get the output of the sensors print command and return as a list of
dictionaries.
@@ -341,9 +353,9 @@
[target]: Active
...
"""
- rc, output = openbmctool_execute_command("sensors print",
- print_output=False,
- ignore_err=False)
+ rc, output = openbmctool_execute_command(
+ "sensors print", print_output=False, ignore_err=False
+ )
# Example value for output (partial):
# sensor | type | units | value | target
# OCC0 | Discrete | N/A | Active | Active
@@ -353,7 +365,6 @@
def get_sensors_list():
-
r"""
Get the output of the sensors list command and return as a list of
dictionaries.
@@ -380,9 +391,9 @@
[target]: Active
...
"""
- rc, output = openbmctool_execute_command("sensors list",
- print_output=False,
- ignore_err=False)
+ rc, output = openbmctool_execute_command(
+ "sensors list", print_output=False, ignore_err=False
+ )
# Example value for output (partial):
# sensor | type | units | value | target
# OCC0 | Discrete | N/A | Active | Active
@@ -402,9 +413,9 @@
Example result (excerpt):
openbmctool_version: 1.06
"""
- rc, output = openbmctool_execute_command("-V | cut -f 2 -d ' '",
- print_output=False,
- ignore_err=False)
+ rc, output = openbmctool_execute_command(
+ "-V | cut -f 2 -d ' '", print_output=False, ignore_err=False
+ )
return output
@@ -414,15 +425,14 @@
the collect_service_data command.
"""
- return\
- [
- "inventory.txt",
- "sensorReadings.txt",
- "ledStatus.txt",
- "SELshortlist.txt",
- "parsedSELs.txt",
- "bmcFullRaw.txt"
- ]
+ return [
+ "inventory.txt",
+ "sensorReadings.txt",
+ "ledStatus.txt",
+ "SELshortlist.txt",
+ "parsedSELs.txt",
+ "bmcFullRaw.txt",
+ ]
def collect_service_data(verify=False):
@@ -439,32 +449,41 @@
# Route the output of collect_service_data to a file for easier parsing.
temp = tempfile.NamedTemporaryFile()
temp_file_path = temp.name
- openbmctool_execute_command("collect_service_data > " + temp_file_path,
- ignore_err=False)
+ openbmctool_execute_command(
+ "collect_service_data > " + temp_file_path, ignore_err=False
+ )
# Isolate the file paths in the collect_service_data output. We're
# looking for output lines like this from which to extract the file paths:
# Inventory collected and stored in /tmp/dummy--2018-09-26_17.59.18/inventory.txt
- rc, file_paths = gc.shell_cmd("egrep 'collected and' " + temp_file_path
- # + " | sed -re 's#.*/tmp#/tmp#g'",
- + " | sed -re 's#[^/]*/#/#'",
- quiet=1, print_output=0)
+ rc, file_paths = gc.shell_cmd(
+ "egrep 'collected and' " + temp_file_path
+ # + " | sed -re 's#.*/tmp#/tmp#g'",
+ + " | sed -re 's#[^/]*/#/#'",
+ quiet=1,
+ print_output=0,
+ )
# Example file_paths value:
# /tmp/dummy--2018-09-26_17.59.18/inventory.txt
# /tmp/dummy--2018-09-26_17.59.18/sensorReadings.txt
# etc.
# Convert from output to list.
- collect_service_data_file_paths =\
- list(filter(None, file_paths.split("\n")))
+ collect_service_data_file_paths = list(
+ filter(None, file_paths.split("\n"))
+ )
if int(verify):
# Create a list of files by stripping the dir names from the elements
# of collect_service_data_file_paths.
- files_obtained = [re.sub(r".*/", "", file_path)
- for file_path in collect_service_data_file_paths]
+ files_obtained = [
+ re.sub(r".*/", "", file_path)
+ for file_path in collect_service_data_file_paths
+ ]
files_expected = service_data_files()
files_missing = list(set(files_expected) - set(files_obtained))
if len(files_missing) > 0:
- gp.printn("collect_service_data output:\n"
- + gm.file_to_str(temp_file_path))
+ gp.printn(
+ "collect_service_data output:\n"
+ + gm.file_to_str(temp_file_path)
+ )
err_msg = "The following files are missing from the list of files"
err_msg += " returned by collect_service_data:\n"
err_msg += gp.sprint_var(files_missing)
@@ -479,11 +498,7 @@
Return a complete list of field names returned by the health_check command.
"""
- return\
- [
- "hardware_status",
- "performance"
- ]
+ return ["hardware_status", "performance"]
def get_health_check(verify=False):
@@ -507,9 +522,9 @@
health_check command.
"""
- rc, output = openbmctool_execute_command("health_check",
- print_output=False,
- ignore_err=False)
+ rc, output = openbmctool_execute_command(
+ "health_check", print_output=False, ignore_err=False
+ )
health_check = vf.key_value_outbuf_to_dict(output, delim=":")
if int(verify):
err_msg = gv.valid_dict(health_check, health_check_fields())
@@ -525,11 +540,7 @@
remote_logging view command.
"""
- return\
- [
- "Address",
- "Port"
- ]
+ return ["Address", "Port"]
def get_remote_logging_view(verify=False):
@@ -554,14 +565,14 @@
remote_logging view' command.
"""
- remote_logging_view =\
- openbmctool_execute_command_json("logging remote_logging view",
- print_output=False,
- ignore_err=False)
+ remote_logging_view = openbmctool_execute_command_json(
+ "logging remote_logging view", print_output=False, ignore_err=False
+ )
if int(verify):
- err_msg = gv.valid_dict(remote_logging_view,
- remote_logging_view_fields())
+ err_msg = gv.valid_dict(
+ remote_logging_view, remote_logging_view_fields()
+ )
if err_msg != "":
BuiltIn().fail(gp.sprint_error(err_msg))
@@ -604,8 +615,9 @@
else:
new_options = options
- command_string = gc.create_command_string('network ' + sub_command,
- new_options)
- return openbmctool_execute_command_json(command_string,
- print_output=False,
- ignore_err=False)
+ command_string = gc.create_command_string(
+ "network " + sub_command, new_options
+ )
+ return openbmctool_execute_command_json(
+ command_string, print_output=False, ignore_err=False
+ )
diff --git a/lib/pel_utils.py b/lib/pel_utils.py
index 334f26d..5f42070 100644
--- a/lib/pel_utils.py
+++ b/lib/pel_utils.py
@@ -4,11 +4,12 @@
PEL functions.
"""
-import func_args as fa
-import bmc_ssh_utils as bsu
import json
import os
import sys
+
+import bmc_ssh_utils as bsu
+import func_args as fa
from robot.libraries.BuiltIn import BuiltIn
base_path = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
@@ -56,7 +57,9 @@
"""
bsu_options = fa.args_to_objects(bsu_options)
- out_buf, stderr, rc = bsu.bmc_execute_command('peltool ' + option_string, **bsu_options)
+ out_buf, stderr, rc = bsu.bmc_execute_command(
+ "peltool " + option_string, **bsu_options
+ )
if parse_json:
try:
return json.loads(out_buf)
@@ -65,8 +68,9 @@
return out_buf
-def get_pel_data_from_bmc(include_hidden_pels=False,
- include_informational_pels=False):
+def get_pel_data_from_bmc(
+ include_hidden_pels=False, include_informational_pels=False
+):
r"""
Returns PEL data from BMC else throws exception.
@@ -109,13 +113,19 @@
pel_id_list = pel_data.keys()
for pel_id in pel_id_list:
# Check if required SRC ID with severity is present
- if ((pel_data[pel_id]["SRC"] == src_id) and (pel_data[pel_id]["Sev"] == severity)):
+ if (pel_data[pel_id]["SRC"] == src_id) and (
+ pel_data[pel_id]["Sev"] == severity
+ ):
src_pel_ids.append(pel_id)
if not src_pel_ids:
- raise peltool_exception(src_id + " with severity " + severity + " not present")
+ raise peltool_exception(
+ src_id + " with severity " + severity + " not present"
+ )
except Exception as e:
- raise peltool_exception("Failed to fetch PEL ID for required SRC : " + str(e))
+ raise peltool_exception(
+ "Failed to fetch PEL ID for required SRC : " + str(e)
+ )
return src_pel_ids
@@ -139,7 +149,9 @@
return src_id
-def check_for_unexpected_src(unexpected_src_list=[], include_hidden_pels=False):
+def check_for_unexpected_src(
+ unexpected_src_list=[], include_hidden_pels=False
+):
r"""
From the given unexpected SRC list, check if any unexpected SRC created
on the BMC. Returns 0 if no SRC found else throws exception.
@@ -160,11 +172,13 @@
if src in src_data:
print("Found an unexpected SRC : " + src)
unexpected_src_count = unexpected_src_count + 1
- if (unexpected_src_count >= 1):
+ if unexpected_src_count >= 1:
raise peltool_exception("Unexpected SRC found.")
except Exception as e:
- raise peltool_exception("Failed to verify unexpected SRC list : " + str(e))
+ raise peltool_exception(
+ "Failed to verify unexpected SRC list : " + str(e)
+ )
return unexpected_src_count
diff --git a/lib/pldm_utils.py b/lib/pldm_utils.py
index 35fe929..81e53fe 100755
--- a/lib/pldm_utils.py
+++ b/lib/pldm_utils.py
@@ -4,18 +4,18 @@
PLDM functions.
"""
-import re
-import var_funcs as vf
-import func_args as fa
-import bmc_ssh_utils as bsu
import json
import random
+import re
import string
+
+import bmc_ssh_utils as bsu
+import func_args as fa
+import var_funcs as vf
from robot.api import logger
def pldmtool(option_string, **bsu_options):
-
r"""
Run pldmtool on the BMC with the caller's option string and return the result.
@@ -41,7 +41,9 @@
# This allows callers to specify arguments in python style (e.g. print_out=1 vs. print_out=${1}).
bsu_options = fa.args_to_objects(bsu_options)
- stdout, stderr, rc = bsu.bmc_execute_command('pldmtool ' + option_string, **bsu_options)
+ stdout, stderr, rc = bsu.bmc_execute_command(
+ "pldmtool " + option_string, **bsu_options
+ )
if stderr:
return stderr
try:
@@ -51,7 +53,6 @@
def GetBIOSEnumAttributeOptionalValues(attr_val_table_data):
-
"""
From pldmtool GetBIOSTable of type AttributeValueTable get the dict of
attribute handle and its optional values for BIOS Enumeration type.
@@ -77,27 +78,30 @@
attr_val_data_dict = {}
for item in attr_val_table_data:
for attr in item:
- if (attr == "NumberOfPossibleValues"):
+ if attr == "NumberOfPossibleValues":
value_list = []
for i in range(0, int(item[attr])):
- attr_values = item["PossibleValueStringHandle[" + str(i) + "]"]
- value = re.search(r'\((.*?)\)', attr_values).group(1)
+ attr_values = item[
+ "PossibleValueStringHandle[" + str(i) + "]"
+ ]
+ value = re.search(r"\((.*?)\)", attr_values).group(1)
if value:
# Example:
# value = '"Power Off"'
- if ' ' in value:
+ if " " in value:
value = '"' + value + '"'
value_list.append(value)
else:
- value_list.append('')
+ value_list.append("")
- attr_handle = re.findall(r'\(.*?\)', item["AttributeNameHandle"])
+ attr_handle = re.findall(
+ r"\(.*?\)", item["AttributeNameHandle"]
+ )
attr_val_data_dict[attr_handle[0][1:-1]] = value_list
return attr_val_data_dict
def GetBIOSStrAndIntAttributeHandles(attr_type, attr_val_table_data):
-
"""
From pldmtool GetBIOSTable of type AttributeValueTable get the dict of
attribute handle and its values based on the attribute type.
@@ -113,28 +117,27 @@
attr_val_str_dict = {}
for item in attr_val_table_data:
value_dict = {}
- attr_handle = re.findall(r'\(.*?\)', item["AttributeNameHandle"])
+ attr_handle = re.findall(r"\(.*?\)", item["AttributeNameHandle"])
# Example:
# {'vmi_if0_ipv4_prefix_length': {'UpperBound': 32, 'LowerBound': 0}
- if (item["AttributeType"] == "BIOSInteger"):
+ if item["AttributeType"] == "BIOSInteger":
value_dict["LowerBound"] = item["LowerBound"]
value_dict["UpperBound"] = item["UpperBound"]
attr_val_int_dict[attr_handle[0][1:-1]] = value_dict
# Example:
# {'vmi_if1_ipv4_ipaddr': {'MaximumStringLength': 15, 'MinimumStringLength': 7}}
- elif (item["AttributeType"] == "BIOSString"):
+ elif item["AttributeType"] == "BIOSString":
value_dict["MinimumStringLength"] = item["MinimumStringLength"]
value_dict["MaximumStringLength"] = item["MaximumStringLength"]
attr_val_str_dict[attr_handle[0][1:-1]] = value_dict
- if (attr_type == "BIOSInteger"):
+ if attr_type == "BIOSInteger":
return attr_val_int_dict
- elif (attr_type == "BIOSString"):
+ elif attr_type == "BIOSString":
return attr_val_str_dict
def GetRandomBIOSIntAndStrValues(attr_name, count):
-
"""
Get random integer or string values for BIOS attribute values based on the count.
@@ -146,27 +149,35 @@
or string.
"""
- attr_random_value = ''
+ attr_random_value = ""
# Example
# 12.13.14.15
- if 'gateway' in attr_name:
- attr_random_value = ".".join(map(str, (random.randint(0, 255) for _ in range(4))))
+ if "gateway" in attr_name:
+ attr_random_value = ".".join(
+ map(str, (random.randint(0, 255) for _ in range(4)))
+ )
# Example
# 11.11.11.11
- elif 'ipaddr' in attr_name:
- attr_random_value = ".".join(map(str, (random.randint(0, 255) for _ in range(4))))
+ elif "ipaddr" in attr_name:
+ attr_random_value = ".".join(
+ map(str, (random.randint(0, 255) for _ in range(4)))
+ )
# Example
# E5YWEDWJJ
- elif 'name' in attr_name:
+ elif "name" in attr_name:
data = string.ascii_uppercase + string.digits
- attr_random_value = ''.join(random.choice(data) for _ in range(int(count)))
+ attr_random_value = "".join(
+ random.choice(data) for _ in range(int(count))
+ )
- elif 'mfg_flags' in attr_name:
+ elif "mfg_flags" in attr_name:
data = string.ascii_uppercase + string.digits
- attr_random_value = ''.join(random.choice(data) for _ in range(int(count)))
+ attr_random_value = "".join(
+ random.choice(data) for _ in range(int(count))
+ )
- elif 'hb_lid_ids' in attr_name:
+ elif "hb_lid_ids" in attr_name:
attr_random_value = str(random.randint(0, int(count)))
else:
@@ -175,7 +186,6 @@
def GetBIOSAttrOriginalValues(attr_val_table_data):
-
"""
From pldmtool GetBIOSTable of type AttributeValueTable get the dict of
attribute handle and its values.
@@ -188,23 +198,22 @@
"""
attr_val_data_dict = {}
for item in attr_val_table_data:
- attr_handle = re.findall(r'\(.*?\)', item["AttributeNameHandle"])
+ attr_handle = re.findall(r"\(.*?\)", item["AttributeNameHandle"])
attr_name = attr_handle[0][1:-1]
command = "bios GetBIOSAttributeCurrentValueByHandle -a " + attr_name
value = pldmtool(command)
attr_val_data_dict[attr_name] = value["CurrentValue"]
if not value["CurrentValue"]:
- if 'name' in attr_name:
+ if "name" in attr_name:
attr_val_data_dict[attr_name] = '""'
- elif 'hb_lid_ids' in attr_name:
+ elif "hb_lid_ids" in attr_name:
attr_val_data_dict[attr_name] = '""'
return attr_val_data_dict
def GetBIOSAttrDefaultValues(attr_val_table_data):
-
"""
From pldmtool GetBIOSTable of type AttributeValueTable get the dict of
attribute handle and its default attribute values.
@@ -217,27 +226,26 @@
"""
attr_val_data_dict = {}
for item in attr_val_table_data:
- attr_handle = re.findall(r'\(.*?\)', item["AttributeNameHandle"])
+ attr_handle = re.findall(r"\(.*?\)", item["AttributeNameHandle"])
attr_name = attr_handle[0][1:-1]
if "DefaultString" in item:
attr_val_data_dict[attr_name] = item["DefaultString"]
if not item["DefaultString"]:
- if 'name' in attr_name:
+ if "name" in attr_name:
attr_val_data_dict[attr_name] = '""'
- elif 'hb_lid_ids' in attr_name:
+ elif "hb_lid_ids" in attr_name:
attr_val_data_dict[attr_name] = '""'
elif "DefaultValue" in item:
attr_val_data_dict[attr_name] = item["DefaultValue"]
elif "StringHandle" in item:
- attr_default_value = re.findall(r'\(.*?\)', item["StringHandle"])
+ attr_default_value = re.findall(r"\(.*?\)", item["StringHandle"])
attr_val_data_dict[attr_name] = attr_default_value[0][1:-1]
return attr_val_data_dict
def GetNewValuesForAllBIOSAttrs(attr_table_data):
-
"""
Get a new set of values for all attributes in Attribute Table.
@@ -249,9 +257,13 @@
"""
existing_data = GetBIOSAttrOriginalValues(attr_table_data)
logger.info(existing_data)
- string_attr_data = GetBIOSStrAndIntAttributeHandles("BIOSString", attr_table_data)
+ string_attr_data = GetBIOSStrAndIntAttributeHandles(
+ "BIOSString", attr_table_data
+ )
logger.info(string_attr_data)
- int_attr_data = GetBIOSStrAndIntAttributeHandles("BIOSInteger", attr_table_data)
+ int_attr_data = GetBIOSStrAndIntAttributeHandles(
+ "BIOSInteger", attr_table_data
+ )
logger.info(int_attr_data)
enum_attr_data = GetBIOSEnumAttributeOptionalValues(attr_table_data)
logger.info(enum_attr_data)
@@ -268,8 +280,12 @@
data = '"' + str(existing_data[attr]) + '"'
temp_list[attr].remove(data)
except ValueError:
- logger.info("Unable to remove the existing value "
- + str(data) + " from list " + str(temp_list[attr]))
+ logger.info(
+ "Unable to remove the existing value "
+ + str(data)
+ + " from list "
+ + str(temp_list[attr])
+ )
valid_values = temp_list[attr][:]
value = random.choice(valid_values)
attr_random_data[attr] = value.strip('"')
@@ -279,7 +295,9 @@
# Iterating to make sure we have a different value
# other than the existing value.
for iter in range(5):
- random_val = GetRandomBIOSIntAndStrValues(attr, string_attr_data[attr]["MaximumStringLength"])
+ random_val = GetRandomBIOSIntAndStrValues(
+ attr, string_attr_data[attr]["MaximumStringLength"]
+ )
if random_val != existing_data[attr]:
break
attr_random_data[attr] = random_val.strip('"')
@@ -287,7 +305,9 @@
for attr in int_attr_data:
for iter in range(5):
- random_val = GetRandomBIOSIntAndStrValues(attr, int_attr_data[attr]["UpperBound"])
+ random_val = GetRandomBIOSIntAndStrValues(
+ attr, int_attr_data[attr]["UpperBound"]
+ )
if random_val != existing_data[attr]:
break
attr_random_data[attr] = random_val
diff --git a/lib/pythonutil.py b/lib/pythonutil.py
index 3fd6ffb..bf5e1a4 100644
--- a/lib/pythonutil.py
+++ b/lib/pythonutil.py
@@ -5,7 +5,7 @@
def calcDottedNetmask(mask):
bits = 0
for i in xrange(32 - mask, 32):
- bits |= (1 << i)
- packed_value = pack('!I', bits)
+ bits |= 1 << i
+ packed_value = pack("!I", bits)
addr = inet_ntoa(packed_value)
return addr
diff --git a/lib/ras/variables.py b/lib/ras/variables.py
index 4525ef3..e0369e7 100644
--- a/lib/ras/variables.py
+++ b/lib/ras/variables.py
@@ -1,4 +1,3 @@
-
r"""
Signature description in error log corresponding to error injection.
"""
@@ -48,41 +47,39 @@
# - field2: chip address.
# - field3: Error log signature description.
-ERROR_INJECT_DICT = {'MCACALIFIR_RECV1': ['07010900', '8000000000000000',
- DES_MCA_RECV1],
- 'MCACALIFIR_RECV32': ['07010900', '2000000000000000',
- DES_MCA_RECV32],
- 'MCACALIFIR_UE': ['07010900', '0020000000000000', DES_MCA_UE],
- 'MCI_RECV1': ['05010800', '8000000000000000', DES_MCI_RECV1],
- 'MCI_UE': ['05010800', '4000000000000000', DES_MCI_UE],
- 'NX_RECV1': ['02011100', '0004000000000000', DES_NX_RECV1],
- 'NX_UE': ['02011100', '0400000000000000', DES_NX_UE],
- 'NX_RECV32': ['02011100', '0800000000000000', DES_NX_RECV32],
- 'CXA_RECV5': ['02010800', '0000000020000000', DES_CXA_RECV5],
- 'CXA_RECV32': ['02010800', '2000000000000000', DES_CXA_RECV32],
- 'CXA_UE': ['02010800', '4000000000000000', DES_CXA_UE],
- 'OBUS_RECV32': ['0904000a', '8000000000000000', DES_OBUS_RECV32],
- 'NPU0_RECV32': ['05013C00', '0004000000000000', DES_NPU0_RECV32],
- 'L2FIR_RECV1': ['10010800', '0080000000000000', DES_L2_RECV1],
- 'L2FIR_RECV32': ['10010800', '0200000000000000', DES_L2_RECV32],
- 'L2FIR_UE': ['10010800', '0040000000000000', DES_L2_UE],
- 'L3FIR_RECV1': ['10011800', '0000400000000000', DES_L3_RECV1],
- 'L3FIR_RECV32': ['10011800', '0100000000000000', DES_L3_RECV32],
- 'L3FIR_UE': ['10011800', '0000800000000000', DES_L3_UE],
- 'OCCFIR_RECV1': ['01010800', '0000000000040000', DES_OCC_RECV1],
- 'CMEFIR_RECV1': ['10012000', '0100000000000000', DES_CME_RECV1],
- 'EQFIR_RECV32': ['1004000A', '8000000000000000', DES_EQ_RECV32],
- 'NCUFIR_RECV1': ['10011400', '0080000000000000', DES_NCU_RECV1],
- 'NCUFIR_UE': ['10011400', '8000000000000000', DES_NCU_UE],
- 'COREFIR_RECV5': ['20010A40', '8000000000000000', DES_CORE_RECV5],
- 'COREFIR_RECV1': ['20010A40', '0000000200000000', DES_CORE_RECV1],
- 'COREFIR_UE': ['20010A40', '4000000000000000', DES_CORE_UE],
-
- }
+ERROR_INJECT_DICT = {
+ "MCACALIFIR_RECV1": ["07010900", "8000000000000000", DES_MCA_RECV1],
+ "MCACALIFIR_RECV32": ["07010900", "2000000000000000", DES_MCA_RECV32],
+ "MCACALIFIR_UE": ["07010900", "0020000000000000", DES_MCA_UE],
+ "MCI_RECV1": ["05010800", "8000000000000000", DES_MCI_RECV1],
+ "MCI_UE": ["05010800", "4000000000000000", DES_MCI_UE],
+ "NX_RECV1": ["02011100", "0004000000000000", DES_NX_RECV1],
+ "NX_UE": ["02011100", "0400000000000000", DES_NX_UE],
+ "NX_RECV32": ["02011100", "0800000000000000", DES_NX_RECV32],
+ "CXA_RECV5": ["02010800", "0000000020000000", DES_CXA_RECV5],
+ "CXA_RECV32": ["02010800", "2000000000000000", DES_CXA_RECV32],
+ "CXA_UE": ["02010800", "4000000000000000", DES_CXA_UE],
+ "OBUS_RECV32": ["0904000a", "8000000000000000", DES_OBUS_RECV32],
+ "NPU0_RECV32": ["05013C00", "0004000000000000", DES_NPU0_RECV32],
+ "L2FIR_RECV1": ["10010800", "0080000000000000", DES_L2_RECV1],
+ "L2FIR_RECV32": ["10010800", "0200000000000000", DES_L2_RECV32],
+ "L2FIR_UE": ["10010800", "0040000000000000", DES_L2_UE],
+ "L3FIR_RECV1": ["10011800", "0000400000000000", DES_L3_RECV1],
+ "L3FIR_RECV32": ["10011800", "0100000000000000", DES_L3_RECV32],
+ "L3FIR_UE": ["10011800", "0000800000000000", DES_L3_UE],
+ "OCCFIR_RECV1": ["01010800", "0000000000040000", DES_OCC_RECV1],
+ "CMEFIR_RECV1": ["10012000", "0100000000000000", DES_CME_RECV1],
+ "EQFIR_RECV32": ["1004000A", "8000000000000000", DES_EQ_RECV32],
+ "NCUFIR_RECV1": ["10011400", "0080000000000000", DES_NCU_RECV1],
+ "NCUFIR_UE": ["10011400", "8000000000000000", DES_NCU_UE],
+ "COREFIR_RECV5": ["20010A40", "8000000000000000", DES_CORE_RECV5],
+ "COREFIR_RECV1": ["20010A40", "0000000200000000", DES_CORE_RECV1],
+ "COREFIR_UE": ["20010A40", "4000000000000000", DES_CORE_UE],
+}
# Address translation files
-probe_cpu_file_path = '/root/probe_cpus.sh'
-addr_translation_file_path = '/root/scom_addr_p9.sh'
+probe_cpu_file_path = "/root/probe_cpus.sh"
+addr_translation_file_path = "/root/scom_addr_p9.sh"
cfam_address = "2801"
mem_address = "8208000"
diff --git a/lib/redfish_plus.py b/lib/redfish_plus.py
index e2125ca..2cc7531 100755
--- a/lib/redfish_plus.py
+++ b/lib/redfish_plus.py
@@ -4,13 +4,13 @@
See redfish_plus class prolog below for details.
"""
-from redfish.rest.v1 import HttpClient
-import gen_print as gp
-import func_args as fa
-import requests
import json
-from robot.libraries.BuiltIn import BuiltIn
+import func_args as fa
+import gen_print as gp
+import requests
+from redfish.rest.v1 import HttpClient
+from robot.libraries.BuiltIn import BuiltIn
host = BuiltIn().get_variable_value("${OPENBMC_HOST}")
MTLS_ENABLED = BuiltIn().get_variable_value("${MTLS_ENABLED}")
@@ -55,7 +55,7 @@
- Easily used from robot programs.
"""
- ROBOT_LIBRARY_SCOPE = 'TEST SUITE'
+ ROBOT_LIBRARY_SCOPE = "TEST SUITE"
def rest_request(self, func, *args, **kwargs):
r"""
@@ -115,74 +115,75 @@
# Convert python string object definitions to objects (mostly useful for robot callers).
args = fa.args_to_objects(args)
kwargs = fa.args_to_objects(kwargs)
- timeout = kwargs.pop('timeout', 30)
+ timeout = kwargs.pop("timeout", 30)
self._timeout = timeout
- max_retry = kwargs.pop('max_retry', 10)
+ max_retry = kwargs.pop("max_retry", 10)
self._max_retry = max_retry
- valid_status_codes = kwargs.pop('valid_status_codes', [200])
+ valid_status_codes = kwargs.pop("valid_status_codes", [200])
response = func(*args, **kwargs)
valid_http_status_code(response.status, valid_status_codes)
return response
# Define rest function wrappers.
def get(self, *args, **kwargs):
-
- if MTLS_ENABLED == 'True':
+ if MTLS_ENABLED == "True":
return self.rest_request(self.get_with_mtls, *args, **kwargs)
else:
- return self.rest_request(super(redfish_plus, self).get, *args,
- **kwargs)
+ return self.rest_request(
+ super(redfish_plus, self).get, *args, **kwargs
+ )
def head(self, *args, **kwargs):
-
- if MTLS_ENABLED == 'True':
+ if MTLS_ENABLED == "True":
return self.rest_request(self.head_with_mtls, *args, **kwargs)
else:
- return self.rest_request(super(redfish_plus, self).head, *args,
- **kwargs)
+ return self.rest_request(
+ super(redfish_plus, self).head, *args, **kwargs
+ )
def post(self, *args, **kwargs):
-
- if MTLS_ENABLED == 'True':
+ if MTLS_ENABLED == "True":
return self.rest_request(self.post_with_mtls, *args, **kwargs)
else:
- return self.rest_request(super(redfish_plus, self).post, *args,
- **kwargs)
+ return self.rest_request(
+ super(redfish_plus, self).post, *args, **kwargs
+ )
def put(self, *args, **kwargs):
-
- if MTLS_ENABLED == 'True':
+ if MTLS_ENABLED == "True":
return self.rest_request(self.put_with_mtls, *args, **kwargs)
else:
- return self.rest_request(super(redfish_plus, self).put, *args,
- **kwargs)
+ return self.rest_request(
+ super(redfish_plus, self).put, *args, **kwargs
+ )
def patch(self, *args, **kwargs):
-
- if MTLS_ENABLED == 'True':
+ if MTLS_ENABLED == "True":
return self.rest_request(self.patch_with_mtls, *args, **kwargs)
else:
- return self.rest_request(super(redfish_plus, self).patch, *args,
- **kwargs)
+ return self.rest_request(
+ super(redfish_plus, self).patch, *args, **kwargs
+ )
def delete(self, *args, **kwargs):
-
- if MTLS_ENABLED == 'True':
+ if MTLS_ENABLED == "True":
return self.rest_request(self.delete_with_mtls, *args, **kwargs)
else:
- return self.rest_request(super(redfish_plus, self).delete, *args,
- **kwargs)
+ return self.rest_request(
+ super(redfish_plus, self).delete, *args, **kwargs
+ )
def __del__(self):
del self
def get_with_mtls(self, *args, **kwargs):
-
- cert_dict = kwargs.pop('certificate', {"certificate_name": VALID_CERT})
- response = requests.get(url='https://' + host + args[0],
- cert=CERT_DIR_PATH + '/' + cert_dict['certificate_name'],
- verify=False,
- headers={"Cache-Control": "no-cache"})
+ cert_dict = kwargs.pop("certificate", {"certificate_name": VALID_CERT})
+ response = requests.get(
+ url="https://" + host + args[0],
+ cert=CERT_DIR_PATH + "/" + cert_dict["certificate_name"],
+ verify=False,
+ headers={"Cache-Control": "no-cache"},
+ )
response.status = response.status_code
if response.status == 200:
@@ -191,68 +192,73 @@
return response
def post_with_mtls(self, *args, **kwargs):
-
- cert_dict = kwargs.pop('certificate', {"certificate_name": VALID_CERT})
- body = kwargs.pop('body', {})
- response = requests.post(url='https://' + host + args[0],
- json=body,
- cert=CERT_DIR_PATH + '/' + cert_dict['certificate_name'],
- verify=False,
- headers={"Content-Type": "application/json"})
+ cert_dict = kwargs.pop("certificate", {"certificate_name": VALID_CERT})
+ body = kwargs.pop("body", {})
+ response = requests.post(
+ url="https://" + host + args[0],
+ json=body,
+ cert=CERT_DIR_PATH + "/" + cert_dict["certificate_name"],
+ verify=False,
+ headers={"Content-Type": "application/json"},
+ )
response.status = response.status_code
return response
def patch_with_mtls(self, *args, **kwargs):
-
- cert_dict = kwargs.pop('certificate', {"certificate_name": VALID_CERT})
- body = kwargs.pop('body', {})
- response = requests.patch(url='https://' + host + args[0],
- json=body,
- cert=CERT_DIR_PATH + '/' + cert_dict['certificate_name'],
- verify=False,
- headers={"Content-Type": "application/json"})
+ cert_dict = kwargs.pop("certificate", {"certificate_name": VALID_CERT})
+ body = kwargs.pop("body", {})
+ response = requests.patch(
+ url="https://" + host + args[0],
+ json=body,
+ cert=CERT_DIR_PATH + "/" + cert_dict["certificate_name"],
+ verify=False,
+ headers={"Content-Type": "application/json"},
+ )
response.status = response.status_code
return response
def delete_with_mtls(self, *args, **kwargs):
-
- cert_dict = kwargs.pop('certificate', {"certificate_name": VALID_CERT})
- response = requests.delete(url='https://' + host + args[0],
- cert=CERT_DIR_PATH + '/' + cert_dict['certificate_name'],
- verify=False,
- headers={"Content-Type": "application/json"})
+ cert_dict = kwargs.pop("certificate", {"certificate_name": VALID_CERT})
+ response = requests.delete(
+ url="https://" + host + args[0],
+ cert=CERT_DIR_PATH + "/" + cert_dict["certificate_name"],
+ verify=False,
+ headers={"Content-Type": "application/json"},
+ )
response.status = response.status_code
return response
def put_with_mtls(self, *args, **kwargs):
-
- cert_dict = kwargs.pop('certificate', {"certificate_name": VALID_CERT})
- body = kwargs.pop('body', {})
- response = requests.put(url='https://' + host + args[0],
- json=body,
- cert=CERT_DIR_PATH + '/' + cert_dict['certificate_name'],
- verify=False,
- headers={"Content-Type": "application/json"})
+ cert_dict = kwargs.pop("certificate", {"certificate_name": VALID_CERT})
+ body = kwargs.pop("body", {})
+ response = requests.put(
+ url="https://" + host + args[0],
+ json=body,
+ cert=CERT_DIR_PATH + "/" + cert_dict["certificate_name"],
+ verify=False,
+ headers={"Content-Type": "application/json"},
+ )
response.status = response.status_code
return response
def head_with_mtls(self, *args, **kwargs):
-
- cert_dict = kwargs.pop('certificate', {"certificate_name": VALID_CERT})
- body = kwargs.pop('body', {})
- response = requests.head(url='https://' + host + args[0],
- json=body,
- cert=CERT_DIR_PATH + '/' + cert_dict['certificate_name'],
- verify=False,
- headers={"Content-Type": "application/json"})
+ cert_dict = kwargs.pop("certificate", {"certificate_name": VALID_CERT})
+ body = kwargs.pop("body", {})
+ response = requests.head(
+ url="https://" + host + args[0],
+ json=body,
+ cert=CERT_DIR_PATH + "/" + cert_dict["certificate_name"],
+ verify=False,
+ headers={"Content-Type": "application/json"},
+ )
response.status = response.status_code
diff --git a/lib/redfish_request.py b/lib/redfish_request.py
index 6add29f..da455a3 100644
--- a/lib/redfish_request.py
+++ b/lib/redfish_request.py
@@ -1,19 +1,18 @@
#!/usr/bin/env python3
-import requests
-import urllib.request
-from urllib3.exceptions import InsecureRequestWarning
import json
import secrets
import string
+import urllib.request
+import requests
from robot.api import logger
-from robot.libraries.BuiltIn import BuiltIn
from robot.api.deco import keyword
+from robot.libraries.BuiltIn import BuiltIn
+from urllib3.exceptions import InsecureRequestWarning
class redfish_request(object):
-
@staticmethod
def generate_clientid():
r"""
@@ -23,9 +22,11 @@
"""
- clientid = ''.join(secrets.choice(
- string.ascii_letters + string.digits) for i in range(10))
- clientid = ''.join(str(i) for i in clientid)
+ clientid = "".join(
+ secrets.choice(string.ascii_letters + string.digits)
+ for i in range(10)
+ )
+ clientid = "".join(str(i) for i in clientid)
return clientid
@@ -38,11 +39,13 @@
url Url passed by user e.g. /redfish/v1/Systems/system.
"""
- openbmc_host = \
- BuiltIn().get_variable_value("${OPENBMC_HOST}", default="")
+ openbmc_host = BuiltIn().get_variable_value(
+ "${OPENBMC_HOST}", default=""
+ )
https_port = BuiltIn().get_variable_value("${HTTPS_PORT}", default="")
- form_url = \
+ form_url = (
"https://" + str(openbmc_host) + ":" + str(https_port) + str(url)
+ )
return form_url
@@ -55,10 +58,11 @@
response Response from requests.
"""
- logger.console(msg='', newline=True)
- logger.info("Response : [%s]" % response.status_code,
- also_console=True)
- logger.console(msg='', newline=True)
+ logger.console(msg="", newline=True)
+ logger.info(
+ "Response : [%s]" % response.status_code, also_console=True
+ )
+ logger.console(msg="", newline=True)
def request_login(self, headers, url, credential, timeout=10):
r"""
@@ -81,19 +85,21 @@
if headers == "None":
headers = dict()
- headers['Content-Type'] = 'application/json'
+ headers["Content-Type"] = "application/json"
- client_id = credential['Oem']['OpenBMC'].get('ClientID', "None")
+ client_id = credential["Oem"]["OpenBMC"].get("ClientID", "None")
if "None" == client_id:
self.clientid = redfish_request.generate_clientid()
- credential['Oem']['OpenBMC']['ClientID'] = self.clientid
+ credential["Oem"]["OpenBMC"]["ClientID"] = self.clientid
- logger.console(msg='', newline=True)
- requests.packages.urllib3.\
- disable_warnings(category=InsecureRequestWarning)
- response = redfish_request.request_post(self, headers=headers,
- url=url, data=credential)
+ logger.console(msg="", newline=True)
+ requests.packages.urllib3.disable_warnings(
+ category=InsecureRequestWarning
+ )
+ response = redfish_request.request_post(
+ self, headers=headers, url=url, data=credential
+ )
return response
@@ -116,19 +122,27 @@
is not considered.
"""
- if headers.get('Content-Type', None) is None:
- headers['Content-Type'] = 'application/json'
+ if headers.get("Content-Type", None) is None:
+ headers["Content-Type"] = "application/json"
url = redfish_request.form_url(url)
- logger.console(msg='', newline=True)
- msg = "Request Method : GET ,headers = " + \
- json.dumps(headers) + " ,uri = " + str(url) + " ,timeout = " + \
- str(timeout) + " ,verify = " + str(verify)
+ logger.console(msg="", newline=True)
+ msg = (
+ "Request Method : GET ,headers = "
+ + json.dumps(headers)
+ + " ,uri = "
+ + str(url)
+ + " ,timeout = "
+ + str(timeout)
+ + " ,verify = "
+ + str(verify)
+ )
logger.info(msg, also_console=True)
- response = requests.get(url, headers=headers,
- timeout=timeout, verify=verify)
+ response = requests.get(
+ url, headers=headers, timeout=timeout, verify=verify
+ )
redfish_request.log_console(response)
return response
@@ -155,20 +169,29 @@
is not considered.
"""
- if headers.get('Content-Type', None) is None:
- headers['Content-Type'] = 'application/json'
+ if headers.get("Content-Type", None) is None:
+ headers["Content-Type"] = "application/json"
url = redfish_request.form_url(url)
- logger.console(msg='', newline=True)
- msg = "Request Method : PATCH ,headers = " + \
- json.dumps(headers) + " ,uri = " + str(url) + " ,data = " + \
- json.dumps(data) + " ,timeout = " + str(timeout) + \
- " ,verify = " + str(verify)
+ logger.console(msg="", newline=True)
+ msg = (
+ "Request Method : PATCH ,headers = "
+ + json.dumps(headers)
+ + " ,uri = "
+ + str(url)
+ + " ,data = "
+ + json.dumps(data)
+ + " ,timeout = "
+ + str(timeout)
+ + " ,verify = "
+ + str(verify)
+ )
logger.info(msg, also_console=True)
- response = requests.patch(url, headers=headers, data=data,
- timeout=timeout, verify=verify)
+ response = requests.patch(
+ url, headers=headers, data=data, timeout=timeout, verify=verify
+ )
redfish_request.log_console(response)
return response
@@ -195,26 +218,40 @@
is not considered.
"""
- if headers.get('Content-Type', None) is None:
- headers['Content-Type'] = 'application/json'
+ if headers.get("Content-Type", None) is None:
+ headers["Content-Type"] = "application/json"
url = redfish_request.form_url(url)
- logger.console(msg='', newline=True)
- msg = "Request Method : POST ,headers = " + \
- json.dumps(headers) + " ,uri = " + str(url) + " ,data = " + \
- json.dumps(data) + " ,timeout = " + str(timeout) + \
- " ,verify = " + str(verify)
+ logger.console(msg="", newline=True)
+ msg = (
+ "Request Method : POST ,headers = "
+ + json.dumps(headers)
+ + " ,uri = "
+ + str(url)
+ + " ,data = "
+ + json.dumps(data)
+ + " ,timeout = "
+ + str(timeout)
+ + " ,verify = "
+ + str(verify)
+ )
logger.info(msg, also_console=True)
- response = requests.post(url, headers=headers, data=json.dumps(data),
- timeout=timeout, verify=verify)
+ response = requests.post(
+ url,
+ headers=headers,
+ data=json.dumps(data),
+ timeout=timeout,
+ verify=verify,
+ )
redfish_request.log_console(response)
return response
- def request_put(self, headers, url, files=None, data=None,
- timeout=10, verify=False):
+ def request_put(
+ self, headers, url, files=None, data=None, timeout=10, verify=False
+ ):
r"""
Redfish put request.
@@ -239,25 +276,41 @@
is not considered.
"""
- if headers.get('Content-Type', None) is None:
- headers['Content-Type'] = 'application/json'
+ if headers.get("Content-Type", None) is None:
+ headers["Content-Type"] = "application/json"
url = redfish_request.form_url(url)
- logger.console(msg='', newline=True)
- msg = "Request Method : PUT ,headers = " + \
- json.dumps(headers) + " ,uri = " + str(url) + " ,data = " + \
- json.dumps(data) + " ,timeout = " + str(timeout) + \
- " ,verify = " + str(verify)
+ logger.console(msg="", newline=True)
+ msg = (
+ "Request Method : PUT ,headers = "
+ + json.dumps(headers)
+ + " ,uri = "
+ + str(url)
+ + " ,data = "
+ + json.dumps(data)
+ + " ,timeout = "
+ + str(timeout)
+ + " ,verify = "
+ + str(verify)
+ )
logger.info(msg, also_console=True)
- response = requests.put(url, headers=headers, files=files, data=data,
- timeout=timeout, verify=verify)
+ response = requests.put(
+ url,
+ headers=headers,
+ files=files,
+ data=data,
+ timeout=timeout,
+ verify=verify,
+ )
redfish_request.log_console(response)
return response
- def request_delete(self, headers, url, data=None, timeout=10, verify=False):
+ def request_delete(
+ self, headers, url, data=None, timeout=10, verify=False
+ ):
r"""
Redfish delete request.
@@ -279,20 +332,29 @@
is not considered.
"""
- if headers.get('Content-Type', None) is None:
- headers['Content-Type'] = 'application/json'
+ if headers.get("Content-Type", None) is None:
+ headers["Content-Type"] = "application/json"
url = redfish_request.form_url(url)
- logger.console(msg='', newline=True)
- msg = "Request Method : DELETE ,headers = " + \
- json.dumps(headers) + " ,uri = " + str(url) + " ,data = " + \
- json.dumps(data) + " ,timeout = " + str(timeout) + \
- " ,verify = " + str(verify)
- logger.console(msg='', newline=True)
+ logger.console(msg="", newline=True)
+ msg = (
+ "Request Method : DELETE ,headers = "
+ + json.dumps(headers)
+ + " ,uri = "
+ + str(url)
+ + " ,data = "
+ + json.dumps(data)
+ + " ,timeout = "
+ + str(timeout)
+ + " ,verify = "
+ + str(verify)
+ )
+ logger.console(msg="", newline=True)
- response = requests.delete(url, headers=headers, data=data,
- timeout=timeout, verify=verify)
+ response = requests.delete(
+ url, headers=headers, data=data, timeout=timeout, verify=verify
+ )
redfish_request.log_console(response)
return response
diff --git a/lib/secureboot/secureboot.py b/lib/secureboot/secureboot.py
index 1aa5f06..f38458e 100644
--- a/lib/secureboot/secureboot.py
+++ b/lib/secureboot/secureboot.py
@@ -11,17 +11,14 @@
# Define 'constant' functions.
def secure_boot_mask():
-
return 0x08000000
def jumper_mask():
-
return 0x04000000
class secureboot(object):
-
def get_secure_boot_info(self, quiet=None):
r"""
Get secure-boot information and return it as a tuple consisting of
diff --git a/lib/state.py b/lib/state.py
index 26c3f79..00fa124 100755
--- a/lib/state.py
+++ b/lib/state.py
@@ -27,27 +27,27 @@
compared with the expected state.
"""
-import gen_print as gp
-import gen_valid as gv
-import gen_robot_utils as gru
-import gen_cmd as gc
-import bmc_ssh_utils as bsu
+import imp
+import os
+import re
+import sys
+import bmc_ssh_utils as bsu
+import gen_cmd as gc
+import gen_print as gp
+import gen_robot_utils as gru
+import gen_valid as gv
from robot.libraries.BuiltIn import BuiltIn
from robot.utils import DotDict
-import re
-import os
-import sys
-import imp
-
-
# NOTE: Avoid importing utils.robot because utils.robot imports state.py
# (indirectly) which will cause failures.
gru.my_import_resource("rest_client.robot")
-base_path = os.path.dirname(os.path.dirname(
- imp.find_module("gen_robot_print")[1])) + os.sep
+base_path = (
+ os.path.dirname(os.path.dirname(imp.find_module("gen_robot_print")[1]))
+ + os.sep
+)
sys.path.append(base_path + "data/")
# Previously, I had this coded:
@@ -76,192 +76,243 @@
# is being removed but the OBMC_STATES_VERSION value will stay for now in the
# event that it is needed in the future.
-OBMC_STATES_VERSION = int(os.environ.get('OBMC_STATES_VERSION', 1))
+OBMC_STATES_VERSION = int(os.environ.get("OBMC_STATES_VERSION", 1))
-redfish_support_trans_state = int(os.environ.get('REDFISH_SUPPORT_TRANS_STATE', 0)) or \
- int(BuiltIn().get_variable_value("${REDFISH_SUPPORT_TRANS_STATE}", default=0))
+redfish_support_trans_state = int(
+ os.environ.get("REDFISH_SUPPORT_TRANS_STATE", 0)
+) or int(
+ BuiltIn().get_variable_value("${REDFISH_SUPPORT_TRANS_STATE}", default=0)
+)
-platform_arch_type = os.environ.get('PLATFORM_ARCH_TYPE', '') or \
- BuiltIn().get_variable_value("${PLATFORM_ARCH_TYPE}", default="power")
+platform_arch_type = os.environ.get(
+ "PLATFORM_ARCH_TYPE", ""
+) or BuiltIn().get_variable_value("${PLATFORM_ARCH_TYPE}", default="power")
# valid_os_req_states and default_os_req_states are used by the os_get_state
# function.
# valid_os_req_states is a list of state information supported by the
# get_os_state function.
-valid_os_req_states = ['os_ping',
- 'os_login',
- 'os_run_cmd']
+valid_os_req_states = ["os_ping", "os_login", "os_run_cmd"]
# When a user calls get_os_state w/o specifying req_states,
# default_os_req_states is used as its value.
-default_os_req_states = ['os_ping',
- 'os_login',
- 'os_run_cmd']
+default_os_req_states = ["os_ping", "os_login", "os_run_cmd"]
# Presently, some BMCs appear to not keep time very well. This environment
# variable directs the get_state function to use either the BMC's epoch time
# or the local epoch time.
-USE_BMC_EPOCH_TIME = int(os.environ.get('USE_BMC_EPOCH_TIME', 0))
+USE_BMC_EPOCH_TIME = int(os.environ.get("USE_BMC_EPOCH_TIME", 0))
# Useful state constant definition(s).
if not redfish_support_trans_state:
# When a user calls get_state w/o specifying req_states, default_req_states
# is used as its value.
- default_req_states = ['rest',
- 'chassis',
- 'bmc',
- 'boot_progress',
- 'operating_system',
- 'host',
- 'os_ping',
- 'os_login',
- 'os_run_cmd']
+ default_req_states = [
+ "rest",
+ "chassis",
+ "bmc",
+ "boot_progress",
+ "operating_system",
+ "host",
+ "os_ping",
+ "os_login",
+ "os_run_cmd",
+ ]
# valid_req_states is a list of sub states supported by the get_state function.
# valid_req_states, default_req_states and master_os_up_match are used by the
# get_state function.
- valid_req_states = ['ping',
- 'packet_loss',
- 'uptime',
- 'epoch_seconds',
- 'elapsed_boot_time',
- 'rest',
- 'chassis',
- 'requested_chassis',
- 'bmc',
- 'requested_bmc',
- 'boot_progress',
- 'operating_system',
- 'host',
- 'requested_host',
- 'attempts_left',
- 'os_ping',
- 'os_login',
- 'os_run_cmd']
+ valid_req_states = [
+ "ping",
+ "packet_loss",
+ "uptime",
+ "epoch_seconds",
+ "elapsed_boot_time",
+ "rest",
+ "chassis",
+ "requested_chassis",
+ "bmc",
+ "requested_bmc",
+ "boot_progress",
+ "operating_system",
+ "host",
+ "requested_host",
+ "attempts_left",
+ "os_ping",
+ "os_login",
+ "os_run_cmd",
+ ]
# default_state is an initial value which may be of use to callers.
- default_state = DotDict([('rest', '1'),
- ('chassis', 'On'),
- ('bmc', 'Ready'),
- ('boot_progress', 'OSStart'),
- ('operating_system', 'BootComplete'),
- ('host', 'Running'),
- ('os_ping', '1'),
- ('os_login', '1'),
- ('os_run_cmd', '1')])
+ default_state = DotDict(
+ [
+ ("rest", "1"),
+ ("chassis", "On"),
+ ("bmc", "Ready"),
+ ("boot_progress", "OSStart"),
+ ("operating_system", "BootComplete"),
+ ("host", "Running"),
+ ("os_ping", "1"),
+ ("os_login", "1"),
+ ("os_run_cmd", "1"),
+ ]
+ )
# A match state for checking that the system is at "standby".
- standby_match_state = DotDict([('rest', '^1$'),
- ('chassis', '^Off$'),
- ('bmc', '^Ready$'),
- ('boot_progress', '^Off|Unspecified$'),
- ('operating_system', '^Inactive$'),
- ('host', '^Off$')])
+ standby_match_state = DotDict(
+ [
+ ("rest", "^1$"),
+ ("chassis", "^Off$"),
+ ("bmc", "^Ready$"),
+ ("boot_progress", "^Off|Unspecified$"),
+ ("operating_system", "^Inactive$"),
+ ("host", "^Off$"),
+ ]
+ )
# A match state for checking that the system is at "os running".
- os_running_match_state = DotDict([('chassis', '^On$'),
- ('bmc', '^Ready$'),
- ('boot_progress',
- 'FW Progress, Starting OS|OSStart'),
- ('operating_system', 'BootComplete'),
- ('host', '^Running$'),
- ('os_ping', '^1$'),
- ('os_login', '^1$'),
- ('os_run_cmd', '^1$')])
+ os_running_match_state = DotDict(
+ [
+ ("chassis", "^On$"),
+ ("bmc", "^Ready$"),
+ ("boot_progress", "FW Progress, Starting OS|OSStart"),
+ ("operating_system", "BootComplete"),
+ ("host", "^Running$"),
+ ("os_ping", "^1$"),
+ ("os_login", "^1$"),
+ ("os_run_cmd", "^1$"),
+ ]
+ )
# A master dictionary to determine whether the os may be up.
- master_os_up_match = DotDict([('chassis', '^On$'),
- ('bmc', '^Ready$'),
- ('boot_progress',
- 'FW Progress, Starting OS|OSStart'),
- ('operating_system', 'BootComplete'),
- ('host', '^Running|Quiesced$')])
+ master_os_up_match = DotDict(
+ [
+ ("chassis", "^On$"),
+ ("bmc", "^Ready$"),
+ ("boot_progress", "FW Progress, Starting OS|OSStart"),
+ ("operating_system", "BootComplete"),
+ ("host", "^Running|Quiesced$"),
+ ]
+ )
- invalid_state_match = DotDict([('rest', '^$'),
- ('chassis', '^$'),
- ('bmc', '^$'),
- ('boot_progress', '^$'),
- ('operating_system', '^$'),
- ('host', '^$')])
+ invalid_state_match = DotDict(
+ [
+ ("rest", "^$"),
+ ("chassis", "^$"),
+ ("bmc", "^$"),
+ ("boot_progress", "^$"),
+ ("operating_system", "^$"),
+ ("host", "^$"),
+ ]
+ )
else:
# When a user calls get_state w/o specifying req_states, default_req_states
# is used as its value.
- default_req_states = ['redfish',
- 'chassis',
- 'bmc',
- 'boot_progress',
- 'host',
- 'os_ping',
- 'os_login',
- 'os_run_cmd']
+ default_req_states = [
+ "redfish",
+ "chassis",
+ "bmc",
+ "boot_progress",
+ "host",
+ "os_ping",
+ "os_login",
+ "os_run_cmd",
+ ]
# valid_req_states is a list of sub states supported by the get_state function.
# valid_req_states, default_req_states and master_os_up_match are used by the
# get_state function.
- valid_req_states = ['ping',
- 'packet_loss',
- 'uptime',
- 'epoch_seconds',
- 'elapsed_boot_time',
- 'redfish',
- 'chassis',
- 'requested_chassis',
- 'bmc',
- 'requested_bmc',
- 'boot_progress',
- 'host',
- 'requested_host',
- 'attempts_left',
- 'os_ping',
- 'os_login',
- 'os_run_cmd']
+ valid_req_states = [
+ "ping",
+ "packet_loss",
+ "uptime",
+ "epoch_seconds",
+ "elapsed_boot_time",
+ "redfish",
+ "chassis",
+ "requested_chassis",
+ "bmc",
+ "requested_bmc",
+ "boot_progress",
+ "host",
+ "requested_host",
+ "attempts_left",
+ "os_ping",
+ "os_login",
+ "os_run_cmd",
+ ]
# default_state is an initial value which may be of use to callers.
- default_state = DotDict([('redfish', '1'),
- ('chassis', 'On'),
- ('bmc', 'Enabled'),
- ('boot_progress',
- 'SystemHardwareInitializationComplete|OSBootStarted|OSRunning'),
- ('host', 'Enabled'),
- ('os_ping', '1'),
- ('os_login', '1'),
- ('os_run_cmd', '1')])
+ default_state = DotDict(
+ [
+ ("redfish", "1"),
+ ("chassis", "On"),
+ ("bmc", "Enabled"),
+ (
+ "boot_progress",
+ "SystemHardwareInitializationComplete|OSBootStarted|OSRunning",
+ ),
+ ("host", "Enabled"),
+ ("os_ping", "1"),
+ ("os_login", "1"),
+ ("os_run_cmd", "1"),
+ ]
+ )
# A match state for checking that the system is at "standby".
- standby_match_state = DotDict([('redfish', '^1$'),
- ('chassis', '^Off$'),
- ('bmc', '^Enabled$'),
- ('boot_progress', '^None$'),
- ('host', '^Disabled$')])
+ standby_match_state = DotDict(
+ [
+ ("redfish", "^1$"),
+ ("chassis", "^Off$"),
+ ("bmc", "^Enabled$"),
+ ("boot_progress", "^None$"),
+ ("host", "^Disabled$"),
+ ]
+ )
# A match state for checking that the system is at "os running".
- os_running_match_state = DotDict([('chassis', '^On$'),
- ('bmc', '^Enabled$'),
- ('boot_progress',
- 'SystemHardwareInitializationComplete|OSBootStarted|OSRunning'),
- ('host', '^Enabled$'),
- ('os_ping', '^1$'),
- ('os_login', '^1$'),
- ('os_run_cmd', '^1$')])
+ os_running_match_state = DotDict(
+ [
+ ("chassis", "^On$"),
+ ("bmc", "^Enabled$"),
+ (
+ "boot_progress",
+ "SystemHardwareInitializationComplete|OSBootStarted|OSRunning",
+ ),
+ ("host", "^Enabled$"),
+ ("os_ping", "^1$"),
+ ("os_login", "^1$"),
+ ("os_run_cmd", "^1$"),
+ ]
+ )
# A master dictionary to determine whether the os may be up.
- master_os_up_match = DotDict([('chassis', '^On$'),
- ('bmc', '^Enabled$'),
- ('boot_progress',
- 'SystemHardwareInitializationComplete|OSBootStarted|OSRunning'),
- ('host', '^Enabled$')])
+ master_os_up_match = DotDict(
+ [
+ ("chassis", "^On$"),
+ ("bmc", "^Enabled$"),
+ (
+ "boot_progress",
+ "SystemHardwareInitializationComplete|OSBootStarted|OSRunning",
+ ),
+ ("host", "^Enabled$"),
+ ]
+ )
- invalid_state_match = DotDict([('redfish', '^$'),
- ('chassis', '^$'),
- ('bmc', '^$'),
- ('boot_progress', '^$'),
- ('host', '^$')])
+ invalid_state_match = DotDict(
+ [
+ ("redfish", "^$"),
+ ("chassis", "^$"),
+ ("bmc", "^$"),
+ ("boot_progress", "^$"),
+ ("host", "^$"),
+ ]
+ )
# Filter the states based on platform type.
if platform_arch_type == "x86":
-
if not redfish_support_trans_state:
default_req_states.remove("operating_system")
valid_req_states.remove("operating_system")
@@ -280,7 +331,7 @@
del invalid_state_match["boot_progress"]
-def return_state_constant(state_name='default_state'):
+def return_state_constant(state_name="default_state"):
r"""
Return the named state dictionary constant.
"""
@@ -328,12 +379,10 @@
r"""
Return expressions key constant.
"""
- return '<expressions>'
+ return "<expressions>"
-def compare_states(state,
- match_state,
- match_type='and'):
+def compare_states(state, match_state, match_type="and"):
r"""
Compare 2 state dictionaries. Return True if they match and False if they
don't. Note that the match_state dictionary does not need to have an entry
@@ -373,7 +422,7 @@
match_type This may be 'and' or 'or'.
"""
- error_message = gv.valid_value(match_type, valid_values=['and', 'or'])
+ error_message = gv.valid_value(match_type, valid_values=["and", "or"])
if error_message != "":
BuiltIn().fail(gp.sprint_error(error_message))
@@ -382,7 +431,7 @@
except TypeError:
pass
- default_match = (match_type == 'and')
+ default_match = match_type == "and"
for key, match_state_value in match_state.items():
# Blank match_state_value means "don't care".
if match_state_value == "":
@@ -395,7 +444,9 @@
return match
else:
try:
- match = (re.match(match_state_value, str(state[key])) is not None)
+ match = (
+ re.match(match_state_value, str(state[key])) is not None
+ )
except KeyError:
match = False
if match != default_match:
@@ -404,12 +455,14 @@
return default_match
-def get_os_state(os_host="",
- os_username="",
- os_password="",
- req_states=default_os_req_states,
- os_up=True,
- quiet=None):
+def get_os_state(
+ os_host="",
+ os_username="",
+ os_password="",
+ req_states=default_os_req_states,
+ os_up=True,
+ quiet=None,
+):
r"""
Get component states for the operating system such as ping, login,
etc, put them into a dictionary and return them to the caller.
@@ -455,11 +508,16 @@
if error_message != "":
BuiltIn().fail(gp.sprint_error(error_message))
- invalid_req_states = [sub_state for sub_state in req_states
- if sub_state not in valid_os_req_states]
+ invalid_req_states = [
+ sub_state
+ for sub_state in req_states
+ if sub_state not in valid_os_req_states
+ ]
if len(invalid_req_states) > 0:
- error_message = "The following req_states are not supported:\n" +\
- gp.sprint_var(invalid_req_states)
+ error_message = (
+ "The following req_states are not supported:\n"
+ + gp.sprint_var(invalid_req_states)
+ )
BuiltIn().fail(gp.sprint_error(error_message))
# Initialize all substate values supported by this function.
@@ -468,28 +526,37 @@
os_run_cmd = 0
if os_up:
- if 'os_ping' in req_states:
+ if "os_ping" in req_states:
# See if the OS pings.
- rc, out_buf = gc.shell_cmd("ping -c 1 -w 2 " + os_host,
- print_output=0, show_err=0,
- ignore_err=1)
+ rc, out_buf = gc.shell_cmd(
+ "ping -c 1 -w 2 " + os_host,
+ print_output=0,
+ show_err=0,
+ ignore_err=1,
+ )
if rc == 0:
os_ping = 1
# Programming note: All attributes which do not require an ssh login
# should have been processed by this point.
- master_req_login = ['os_login', 'os_run_cmd']
- req_login = [sub_state for sub_state in req_states if sub_state in
- master_req_login]
- must_login = (len(req_login) > 0)
+ master_req_login = ["os_login", "os_run_cmd"]
+ req_login = [
+ sub_state
+ for sub_state in req_states
+ if sub_state in master_req_login
+ ]
+ must_login = len(req_login) > 0
if must_login:
- output, stderr, rc = bsu.os_execute_command("uptime", quiet=quiet,
- ignore_err=1,
- time_out=20,
- os_host=os_host,
- os_username=os_username,
- os_password=os_password)
+ output, stderr, rc = bsu.os_execute_command(
+ "uptime",
+ quiet=quiet,
+ ignore_err=1,
+ time_out=20,
+ os_host=os_host,
+ os_username=os_username,
+ os_password=os_password,
+ )
if rc == 0:
os_login = 1
os_run_cmd = 1
@@ -505,14 +572,16 @@
return os_state
-def get_state(openbmc_host="",
- openbmc_username="",
- openbmc_password="",
- os_host="",
- os_username="",
- os_password="",
- req_states=default_req_states,
- quiet=None):
+def get_state(
+ openbmc_host="",
+ openbmc_username="",
+ openbmc_password="",
+ os_host="",
+ os_username="",
+ os_password="",
+ req_states=default_req_states,
+ quiet=None,
+):
r"""
Get component states such as chassis state, bmc state, etc, put them into a
dictionary and return them to the caller.
@@ -581,116 +650,149 @@
if os_password is None:
os_password = ""
- invalid_req_states = [sub_state for sub_state in req_states
- if sub_state not in valid_req_states]
+ invalid_req_states = [
+ sub_state
+ for sub_state in req_states
+ if sub_state not in valid_req_states
+ ]
if len(invalid_req_states) > 0:
- error_message = "The following req_states are not supported:\n" +\
- gp.sprint_var(invalid_req_states)
+ error_message = (
+ "The following req_states are not supported:\n"
+ + gp.sprint_var(invalid_req_states)
+ )
BuiltIn().fail(gp.sprint_error(error_message))
# Initialize all substate values supported by this function.
ping = 0
- packet_loss = ''
- uptime = ''
- epoch_seconds = ''
- elapsed_boot_time = ''
- rest = ''
- redfish = ''
- chassis = ''
- requested_chassis = ''
- bmc = ''
- requested_bmc = ''
+ packet_loss = ""
+ uptime = ""
+ epoch_seconds = ""
+ elapsed_boot_time = ""
+ rest = ""
+ redfish = ""
+ chassis = ""
+ requested_chassis = ""
+ bmc = ""
+ requested_bmc = ""
# BootProgress state will get populated when state logic enumerates the
# state URI. This is to prevent state dictionary boot_progress value
# getting empty when the BootProgress is NOT found, making it optional.
- boot_progress = 'NA'
- operating_system = ''
- host = ''
- requested_host = ''
- attempts_left = ''
+ boot_progress = "NA"
+ operating_system = ""
+ host = ""
+ requested_host = ""
+ attempts_left = ""
# Get the component states.
- if 'ping' in req_states:
+ if "ping" in req_states:
# See if the OS pings.
- rc, out_buf = gc.shell_cmd("ping -c 1 -w 2 " + openbmc_host,
- print_output=0, show_err=0,
- ignore_err=1)
+ rc, out_buf = gc.shell_cmd(
+ "ping -c 1 -w 2 " + openbmc_host,
+ print_output=0,
+ show_err=0,
+ ignore_err=1,
+ )
if rc == 0:
ping = 1
- if 'packet_loss' in req_states:
+ if "packet_loss" in req_states:
# See if the OS pings.
- cmd_buf = "ping -c 5 -w 5 " + openbmc_host +\
- " | egrep 'packet loss' | sed -re 's/.* ([0-9]+)%.*/\\1/g'"
- rc, out_buf = gc.shell_cmd(cmd_buf,
- print_output=0, show_err=0,
- ignore_err=1)
+ cmd_buf = (
+ "ping -c 5 -w 5 "
+ + openbmc_host
+ + " | egrep 'packet loss' | sed -re 's/.* ([0-9]+)%.*/\\1/g'"
+ )
+ rc, out_buf = gc.shell_cmd(
+ cmd_buf, print_output=0, show_err=0, ignore_err=1
+ )
if rc == 0:
packet_loss = out_buf.rstrip("\n")
- if 'uptime' in req_states:
+ if "uptime" in req_states:
# Sometimes reading uptime results in a blank value. Call with
# wait_until_keyword_succeeds to ensure a non-blank value is obtained.
- remote_cmd_buf = "bash -c 'read uptime filler 2>/dev/null < /proc/uptime" +\
- " && [ ! -z \"${uptime}\" ] && echo ${uptime}'"
- cmd_buf = ["BMC Execute Command",
- re.sub('\\$', '\\$', remote_cmd_buf), 'quiet=1',
- 'test_mode=0', 'time_out=5']
+ remote_cmd_buf = (
+ "bash -c 'read uptime filler 2>/dev/null < /proc/uptime"
+ + ' && [ ! -z "${uptime}" ] && echo ${uptime}\''
+ )
+ cmd_buf = [
+ "BMC Execute Command",
+ re.sub("\\$", "\\$", remote_cmd_buf),
+ "quiet=1",
+ "test_mode=0",
+ "time_out=5",
+ ]
gp.qprint_issuing(cmd_buf, 0)
gp.qprint_issuing(remote_cmd_buf, 0)
try:
- stdout, stderr, rc =\
- BuiltIn().wait_until_keyword_succeeds("10 sec", "5 sec",
- *cmd_buf)
+ stdout, stderr, rc = BuiltIn().wait_until_keyword_succeeds(
+ "10 sec", "5 sec", *cmd_buf
+ )
if rc == 0 and stderr == "":
uptime = stdout
except AssertionError as my_assertion_error:
pass
- if 'epoch_seconds' in req_states or 'elapsed_boot_time' in req_states:
+ if "epoch_seconds" in req_states or "elapsed_boot_time" in req_states:
date_cmd_buf = "date -u +%s"
if USE_BMC_EPOCH_TIME:
- cmd_buf = ["BMC Execute Command", date_cmd_buf, 'quiet=${1}']
+ cmd_buf = ["BMC Execute Command", date_cmd_buf, "quiet=${1}"]
if not quiet:
gp.print_issuing(cmd_buf)
- status, ret_values = \
- BuiltIn().run_keyword_and_ignore_error(*cmd_buf)
+ status, ret_values = BuiltIn().run_keyword_and_ignore_error(
+ *cmd_buf
+ )
if status == "PASS":
stdout, stderr, rc = ret_values
if rc == 0 and stderr == "":
epoch_seconds = stdout.rstrip("\n")
else:
- shell_rc, out_buf = gc.cmd_fnc_u(date_cmd_buf,
- quiet=quiet,
- print_output=0)
+ shell_rc, out_buf = gc.cmd_fnc_u(
+ date_cmd_buf, quiet=quiet, print_output=0
+ )
if shell_rc == 0:
epoch_seconds = out_buf.rstrip("\n")
- if 'elapsed_boot_time' in req_states:
+ if "elapsed_boot_time" in req_states:
global start_boot_seconds
elapsed_boot_time = int(epoch_seconds) - start_boot_seconds
if not redfish_support_trans_state:
- master_req_rest = ['rest', 'host', 'requested_host', 'operating_system',
- 'attempts_left', 'boot_progress', 'chassis',
- 'requested_chassis' 'bmc' 'requested_bmc']
+ master_req_rest = [
+ "rest",
+ "host",
+ "requested_host",
+ "operating_system",
+ "attempts_left",
+ "boot_progress",
+ "chassis",
+ "requested_chassisbmcrequested_bmc",
+ ]
- req_rest = [sub_state for sub_state in req_states if sub_state in
- master_req_rest]
- need_rest = (len(req_rest) > 0)
+ req_rest = [
+ sub_state
+ for sub_state in req_states
+ if sub_state in master_req_rest
+ ]
+ need_rest = len(req_rest) > 0
state = DotDict()
if need_rest:
- cmd_buf = ["Read Properties", SYSTEM_STATE_URI + "enumerate",
- "quiet=${" + str(quiet) + "}", "timeout=30"]
+ cmd_buf = [
+ "Read Properties",
+ SYSTEM_STATE_URI + "enumerate",
+ "quiet=${" + str(quiet) + "}",
+ "timeout=30",
+ ]
gp.dprint_issuing(cmd_buf)
- status, ret_values = \
- BuiltIn().run_keyword_and_ignore_error(*cmd_buf)
+ status, ret_values = BuiltIn().run_keyword_and_ignore_error(
+ *cmd_buf
+ )
if status == "PASS":
- state['rest'] = '1'
+ state["rest"] = "1"
else:
- state['rest'] = '0'
+ state["rest"] = "0"
- if int(state['rest']):
+ if int(state["rest"]):
for url_path in ret_values:
# Skip conflicting "CurrentHostState" URL from the enum
# /xyz/openbmc_project/state/hypervisor0
@@ -706,54 +808,69 @@
for attr_name in ret_values[url_path]:
# Create a state key value based on the attr_name.
try:
- ret_values[url_path][attr_name] = \
- re.sub(r'.*\.', "",
- ret_values[url_path][attr_name])
+ ret_values[url_path][attr_name] = re.sub(
+ r".*\.", "", ret_values[url_path][attr_name]
+ )
except TypeError:
pass
# Do some key name manipulations.
- new_attr_name = re.sub(r'^Current|(State|Transition)$',
- "", attr_name)
- new_attr_name = re.sub(r'BMC', r'Bmc', new_attr_name)
- new_attr_name = re.sub(r'([A-Z][a-z])', r'_\1',
- new_attr_name)
+ new_attr_name = re.sub(
+ r"^Current|(State|Transition)$", "", attr_name
+ )
+ new_attr_name = re.sub(r"BMC", r"Bmc", new_attr_name)
+ new_attr_name = re.sub(
+ r"([A-Z][a-z])", r"_\1", new_attr_name
+ )
new_attr_name = new_attr_name.lower().lstrip("_")
- new_attr_name = re.sub(r'power', r'chassis', new_attr_name)
+ new_attr_name = re.sub(
+ r"power", r"chassis", new_attr_name
+ )
if new_attr_name in req_states:
- state[new_attr_name] = ret_values[url_path][attr_name]
+ state[new_attr_name] = ret_values[url_path][
+ attr_name
+ ]
else:
- master_req_rf = ['redfish', 'host', 'requested_host',
- 'attempts_left', 'boot_progress', 'chassis',
- 'requested_chassis' 'bmc' 'requested_bmc']
+ master_req_rf = [
+ "redfish",
+ "host",
+ "requested_host",
+ "attempts_left",
+ "boot_progress",
+ "chassis",
+ "requested_chassisbmcrequested_bmc",
+ ]
- req_rf = [sub_state for sub_state in req_states if sub_state in
- master_req_rf]
- need_rf = (len(req_rf) > 0)
+ req_rf = [
+ sub_state for sub_state in req_states if sub_state in master_req_rf
+ ]
+ need_rf = len(req_rf) > 0
state = DotDict()
if need_rf:
cmd_buf = ["Redfish Get States"]
gp.dprint_issuing(cmd_buf)
try:
- status, ret_values = \
- BuiltIn().run_keyword_and_ignore_error(*cmd_buf)
+ status, ret_values = BuiltIn().run_keyword_and_ignore_error(
+ *cmd_buf
+ )
except Exception as ex:
# Robot raised UserKeywordExecutionFailed error exception.
gp.dprint_issuing("Retrying Redfish Get States")
- status, ret_values = \
- BuiltIn().run_keyword_and_ignore_error(*cmd_buf)
+ status, ret_values = BuiltIn().run_keyword_and_ignore_error(
+ *cmd_buf
+ )
gp.dprint_vars(status, ret_values)
if status == "PASS":
- state['redfish'] = '1'
+ state["redfish"] = "1"
else:
- state['redfish'] = '0'
+ state["redfish"] = "0"
- if int(state['redfish']):
- state['chassis'] = ret_values['chassis']
- state['host'] = ret_values['host']
- state['bmc'] = ret_values['bmc']
+ if int(state["redfish"]):
+ state["chassis"] = ret_values["chassis"]
+ state["host"] = ret_values["host"]
+ state["bmc"] = ret_values["bmc"]
if platform_arch_type != "x86":
- state['boot_progress'] = ret_values['boot_progress']
+ state["boot_progress"] = ret_values["boot_progress"]
for sub_state in req_states:
if sub_state in state:
@@ -769,8 +886,9 @@
# it doesn't exist.
return state
- os_req_states = [sub_state for sub_state in req_states
- if sub_state.startswith('os_')]
+ os_req_states = [
+ sub_state for sub_state in req_states if sub_state.startswith("os_")
+ ]
if len(os_req_states) > 0:
# The caller has specified an os_host and they have requested
@@ -784,12 +902,14 @@
if sub_state in req_states:
os_up_match[sub_state] = master_os_up_match[sub_state]
os_up = compare_states(state, os_up_match)
- os_state = get_os_state(os_host=os_host,
- os_username=os_username,
- os_password=os_password,
- req_states=os_req_states,
- os_up=os_up,
- quiet=quiet)
+ os_state = get_os_state(
+ os_host=os_host,
+ os_username=os_username,
+ os_password=os_password,
+ req_states=os_req_states,
+ os_up=os_up,
+ quiet=quiet,
+ )
# Append os_state dictionary to ours.
state.update(os_state)
@@ -815,16 +935,18 @@
exit_wait_early_message = value
-def check_state(match_state,
- invert=0,
- print_string="",
- openbmc_host="",
- openbmc_username="",
- openbmc_password="",
- os_host="",
- os_username="",
- os_password="",
- quiet=None):
+def check_state(
+ match_state,
+ invert=0,
+ print_string="",
+ openbmc_host="",
+ openbmc_username="",
+ openbmc_password="",
+ os_host="",
+ os_username="",
+ os_password="",
+ quiet=None,
+):
r"""
Check that the Open BMC machine's composite state matches the specified
state. On success, this keyword returns the machine's composite state as a
@@ -873,14 +995,16 @@
if expressions_key() in req_states:
req_states.remove(expressions_key())
# Initialize state.
- state = get_state(openbmc_host=openbmc_host,
- openbmc_username=openbmc_username,
- openbmc_password=openbmc_password,
- os_host=os_host,
- os_username=os_username,
- os_password=os_password,
- req_states=req_states,
- quiet=quiet)
+ state = get_state(
+ openbmc_host=openbmc_host,
+ openbmc_username=openbmc_username,
+ openbmc_password=openbmc_password,
+ os_host=os_host,
+ os_username=os_username,
+ os_password=os_password,
+ req_states=req_states,
+ quiet=quiet,
+ )
if not quiet:
gp.print_var(state)
@@ -894,29 +1018,36 @@
match = compare_states(state, match_state)
if invert and match:
- fail_msg = "The current state of the machine matches the match" +\
- " state:\n" + gp.sprint_varx("state", state)
+ fail_msg = (
+ "The current state of the machine matches the match"
+ + " state:\n"
+ + gp.sprint_varx("state", state)
+ )
BuiltIn().fail("\n" + gp.sprint_error(fail_msg))
elif not invert and not match:
- fail_msg = "The current state of the machine does NOT match the" +\
- " match state:\n" +\
- gp.sprint_varx("state", state)
+ fail_msg = (
+ "The current state of the machine does NOT match the"
+ + " match state:\n"
+ + gp.sprint_varx("state", state)
+ )
BuiltIn().fail("\n" + gp.sprint_error(fail_msg))
return state
-def wait_state(match_state=(),
- wait_time="1 min",
- interval="1 second",
- invert=0,
- openbmc_host="",
- openbmc_username="",
- openbmc_password="",
- os_host="",
- os_username="",
- os_password="",
- quiet=None):
+def wait_state(
+ match_state=(),
+ wait_time="1 min",
+ interval="1 second",
+ invert=0,
+ openbmc_host="",
+ openbmc_username="",
+ openbmc_password="",
+ os_host="",
+ os_username="",
+ os_password="",
+ quiet=None,
+):
r"""
Wait for the Open BMC machine's composite state to match the specified
state. On success, this keyword returns the machine's composite state as
@@ -967,9 +1098,15 @@
alt_text = "cease to "
else:
alt_text = ""
- gp.print_timen("Checking every " + str(interval) + " for up to "
- + str(wait_time) + " for the state of the machine to "
- + alt_text + "match the state shown below.")
+ gp.print_timen(
+ "Checking every "
+ + str(interval)
+ + " for up to "
+ + str(wait_time)
+ + " for the state of the machine to "
+ + alt_text
+ + "match the state shown below."
+ )
gp.print_var(match_state)
if quiet:
@@ -982,16 +1119,24 @@
# In debug we print state so no need to print the "#".
print_string = ""
check_state_quiet = 1 - debug
- cmd_buf = ["Check State", match_state, "invert=${" + str(invert) + "}",
- "print_string=" + print_string, "openbmc_host=" + openbmc_host,
- "openbmc_username=" + openbmc_username,
- "openbmc_password=" + openbmc_password, "os_host=" + os_host,
- "os_username=" + os_username, "os_password=" + os_password,
- "quiet=${" + str(check_state_quiet) + "}"]
+ cmd_buf = [
+ "Check State",
+ match_state,
+ "invert=${" + str(invert) + "}",
+ "print_string=" + print_string,
+ "openbmc_host=" + openbmc_host,
+ "openbmc_username=" + openbmc_username,
+ "openbmc_password=" + openbmc_password,
+ "os_host=" + os_host,
+ "os_username=" + os_username,
+ "os_password=" + os_password,
+ "quiet=${" + str(check_state_quiet) + "}",
+ ]
gp.dprint_issuing(cmd_buf)
try:
- state = BuiltIn().wait_until_keyword_succeeds(wait_time, interval,
- *cmd_buf)
+ state = BuiltIn().wait_until_keyword_succeeds(
+ wait_time, interval, *cmd_buf
+ )
except AssertionError as my_assertion_error:
gp.printn()
message = my_assertion_error.args[0]
@@ -1024,8 +1169,7 @@
set_start_boot_seconds(0)
-def wait_for_comm_cycle(start_boot_seconds,
- quiet=None):
+def wait_for_comm_cycle(start_boot_seconds, quiet=None):
r"""
Wait for the BMC uptime to be less than elapsed_boot_time.
@@ -1055,15 +1199,19 @@
# Wait for uptime to be less than elapsed_boot_time.
set_start_boot_seconds(start_boot_seconds)
- expr = 'int(float(state[\'uptime\'])) < int(state[\'elapsed_boot_time\'])'
- match_state = DotDict([('uptime', '^[0-9\\.]+$'),
- ('elapsed_boot_time', '^[0-9]+$'),
- (expressions_key(), [expr])])
+ expr = "int(float(state['uptime'])) < int(state['elapsed_boot_time'])"
+ match_state = DotDict(
+ [
+ ("uptime", "^[0-9\\.]+$"),
+ ("elapsed_boot_time", "^[0-9]+$"),
+ (expressions_key(), [expr]),
+ ]
+ )
wait_state(match_state, wait_time="12 mins", interval="5 seconds")
gp.qprint_timen("Verifying that REST/Redfish API interface is working.")
if not redfish_support_trans_state:
- match_state = DotDict([('rest', '^1$')])
+ match_state = DotDict([("rest", "^1$")])
else:
- match_state = DotDict([('redfish', '^1$')])
+ match_state = DotDict([("redfish", "^1$")])
state = wait_state(match_state, wait_time="5 mins", interval="2 seconds")
diff --git a/lib/state_map.py b/lib/state_map.py
index abff002..e2271b7 100644
--- a/lib/state_map.py
+++ b/lib/state_map.py
@@ -13,93 +13,87 @@
from robot.libraries.BuiltIn import BuiltIn
robot_pgm_dir_path = os.path.dirname(__file__) + os.sep
-repo_data_dir_path = re.sub('/lib', '/data', robot_pgm_dir_path)
+repo_data_dir_path = re.sub("/lib", "/data", robot_pgm_dir_path)
sys.path.append(repo_data_dir_path)
import gen_robot_keyword as keyword # NOQA
-import variables as var # NOQA
+import variables as var # NOQA
BuiltIn().import_resource("state_manager.robot")
BuiltIn().import_resource("rest_client.robot")
-platform_arch_type = os.environ.get('PLATFORM_ARCH_TYPE', '') or \
- BuiltIn().get_variable_value("${PLATFORM_ARCH_TYPE}", default="power")
+platform_arch_type = os.environ.get(
+ "PLATFORM_ARCH_TYPE", ""
+) or BuiltIn().get_variable_value("${PLATFORM_ARCH_TYPE}", default="power")
# We will build eventually the mapping for warm, cold reset as well.
VALID_STATES = {
- 'reboot':
- {
+ "reboot": {
# (Power Policy, BMC state, Chassis State, Host State)
- ('LEAVE_OFF', 'Ready', 'Off', 'Off'),
- ('ALWAYS_POWER_ON', 'Ready', 'On', 'Running'),
- ('ALWAYS_POWER_ON', 'Ready', 'On', 'Off'),
- ('RESTORE_LAST_STATE', 'Ready', 'On', 'Running'),
- ('RESTORE_LAST_STATE', 'Ready', 'On', 'Off'),
- ('ALWAYS_POWER_OFF', 'Ready', 'On', 'Running'),
- ('ALWAYS_POWER_OFF', 'Ready', 'Off', 'Off'),
+ ("LEAVE_OFF", "Ready", "Off", "Off"),
+ ("ALWAYS_POWER_ON", "Ready", "On", "Running"),
+ ("ALWAYS_POWER_ON", "Ready", "On", "Off"),
+ ("RESTORE_LAST_STATE", "Ready", "On", "Running"),
+ ("RESTORE_LAST_STATE", "Ready", "On", "Off"),
+ ("ALWAYS_POWER_OFF", "Ready", "On", "Running"),
+ ("ALWAYS_POWER_OFF", "Ready", "Off", "Off"),
},
}
VALID_BOOT_STATES = {
- 'Off': # Valid states when Host is Off.
- {
+ "Off": { # Valid states when Host is Off.
# (BMC , Chassis , Host , BootProgress, OperatingSystemState)
(
"xyz.openbmc_project.State.BMC.BMCState.Ready",
"xyz.openbmc_project.State.Chassis.PowerState.Off",
"xyz.openbmc_project.State.Host.HostState.Off",
"xyz.openbmc_project.State.Boot.Progress.ProgressStages.Unspecified",
- "xyz.openbmc_project.State.OperatingSystem.Status.OSStatus.Inactive"
+ "xyz.openbmc_project.State.OperatingSystem.Status.OSStatus.Inactive",
),
},
- 'Reboot': # Valid states when BMC reset to standby.
- {
+ "Reboot": { # Valid states when BMC reset to standby.
# (BMC , Chassis , Host , BootProgress, OperatingSystemState)
(
"xyz.openbmc_project.State.BMC.BMCState.Ready",
"xyz.openbmc_project.State.Chassis.PowerState.Off",
"xyz.openbmc_project.State.Host.HostState.Off",
"xyz.openbmc_project.State.Boot.Progress.ProgressStages.Unspecified",
- "xyz.openbmc_project.State.OperatingSystem.Status.OSStatus.Inactive"
+ "xyz.openbmc_project.State.OperatingSystem.Status.OSStatus.Inactive",
),
},
- 'Running': # Valid states when Host is powering on.
- {
+ "Running": { # Valid states when Host is powering on.
# (BMC , Chassis , Host , BootProgress, OperatingSystemState)
(
"xyz.openbmc_project.State.BMC.BMCState.Ready",
"xyz.openbmc_project.State.Chassis.PowerState.On",
"xyz.openbmc_project.State.Host.HostState.Running",
"xyz.openbmc_project.State.Boot.Progress.ProgressStages.MotherboardInit",
- "xyz.openbmc_project.State.OperatingSystem.Status.OSStatus.Inactive"
+ "xyz.openbmc_project.State.OperatingSystem.Status.OSStatus.Inactive",
),
},
- 'Booted': # Valid state when Host is booted.
- {
+ "Booted": { # Valid state when Host is booted.
# (BMC , Chassis , Host , BootProgress, OperatingSystemState)
(
"xyz.openbmc_project.State.BMC.BMCState.Ready",
"xyz.openbmc_project.State.Chassis.PowerState.On",
"xyz.openbmc_project.State.Host.HostState.Running",
"xyz.openbmc_project.State.Boot.Progress.ProgressStages.OSStart",
- "xyz.openbmc_project.State.OperatingSystem.Status.OSStatus.BootComplete"
+ "xyz.openbmc_project.State.OperatingSystem.Status.OSStatus.BootComplete",
),
},
- 'ResetReload': # Valid state BMC reset reload when host is booted.
- {
+ "ResetReload": { # Valid state BMC reset reload when host is booted.
# (BMC , Chassis , Host , BootProgress, OperatingSystemState)
(
"xyz.openbmc_project.State.BMC.BMCState.Ready",
"xyz.openbmc_project.State.Chassis.PowerState.On",
"xyz.openbmc_project.State.Host.HostState.Running",
"xyz.openbmc_project.State.Boot.Progress.ProgressStages.OSStart",
- "xyz.openbmc_project.State.OperatingSystem.Status.OSStatus.BootComplete"
+ "xyz.openbmc_project.State.OperatingSystem.Status.OSStatus.BootComplete",
),
},
}
REDFISH_VALID_BOOT_STATES = {
- 'Off': # Valid states when Host is Off.
- {
+ "Off": { # Valid states when Host is Off.
# (BMC , Chassis , Host , BootProgress)
(
"Enabled",
@@ -108,8 +102,7 @@
"None",
),
},
- 'Reboot': # Valid states when BMC reset to standby.
- {
+ "Reboot": { # Valid states when BMC reset to standby.
# (BMC , Chassis , Host , BootProgress)
(
"Enabled",
@@ -118,8 +111,7 @@
"None",
),
},
- 'Running': # Valid states when Host is powering on.
- {
+ "Running": { # Valid states when Host is powering on.
# (BMC , Chassis , Host , BootProgress)
(
"Enabled",
@@ -128,8 +120,7 @@
"OSRunning",
),
},
- 'Booted': # Valid state when Host is booted.
- {
+ "Booted": { # Valid state when Host is booted.
# (BMC , Chassis , Host , BootProgress)
(
"Enabled",
@@ -138,8 +129,7 @@
"OSRunning",
),
},
- 'ResetReload': # Valid state BMC reset reload when host is booted.
- {
+ "ResetReload": { # Valid state BMC reset reload when host is booted.
# (BMC , Chassis , Host , BootProgress)
(
"Enabled",
@@ -160,41 +150,45 @@
for x in state_tuple
if not (
x.startswith("xyz.openbmc_project.State.Boot.Progress")
- or x.startswith("xyz.openbmc_project.State.OperatingSystem")
+ or x.startswith(
+ "xyz.openbmc_project.State.OperatingSystem"
+ )
)
)
VALID_BOOT_STATES_X86[state_name].add(state_tuple_new)
VALID_BOOT_STATES = VALID_BOOT_STATES_X86
-class state_map():
-
+class state_map:
def get_boot_state(self):
r"""
Return the system state as a tuple of bmc, chassis, host state,
BootProgress and OperatingSystemState.
"""
- status, state = keyword.run_key("Read Properties "
- + var.SYSTEM_STATE_URI + "enumerate")
- bmc_state = state[var.SYSTEM_STATE_URI + 'bmc0']['CurrentBMCState']
- chassis_state = \
- state[var.SYSTEM_STATE_URI + 'chassis0']['CurrentPowerState']
- host_state = state[var.SYSTEM_STATE_URI + 'host0']['CurrentHostState']
+ status, state = keyword.run_key(
+ "Read Properties " + var.SYSTEM_STATE_URI + "enumerate"
+ )
+ bmc_state = state[var.SYSTEM_STATE_URI + "bmc0"]["CurrentBMCState"]
+ chassis_state = state[var.SYSTEM_STATE_URI + "chassis0"][
+ "CurrentPowerState"
+ ]
+ host_state = state[var.SYSTEM_STATE_URI + "host0"]["CurrentHostState"]
if platform_arch_type == "x86":
- return (str(bmc_state),
- str(chassis_state),
- str(host_state))
+ return (str(bmc_state), str(chassis_state), str(host_state))
else:
- boot_state = state[var.SYSTEM_STATE_URI + 'host0']['BootProgress']
- os_state = \
- state[var.SYSTEM_STATE_URI + 'host0']['OperatingSystemState']
+ boot_state = state[var.SYSTEM_STATE_URI + "host0"]["BootProgress"]
+ os_state = state[var.SYSTEM_STATE_URI + "host0"][
+ "OperatingSystemState"
+ ]
- return (str(bmc_state),
- str(chassis_state),
- str(host_state),
- str(boot_state),
- str(os_state))
+ return (
+ str(bmc_state),
+ str(chassis_state),
+ str(host_state),
+ str(boot_state),
+ str(os_state),
+ )
def valid_boot_state(self, boot_type, state_set):
r"""
@@ -222,7 +216,9 @@
state_dict State dictionary.
"""
- if set(state_dict.values()) in set(REDFISH_VALID_BOOT_STATES[boot_type]):
+ if set(state_dict.values()) in set(
+ REDFISH_VALID_BOOT_STATES[boot_type]
+ ):
return True
else:
return False
diff --git a/lib/tally_sheet.py b/lib/tally_sheet.py
index 03162af..52ed279 100755
--- a/lib/tally_sheet.py
+++ b/lib/tally_sheet.py
@@ -4,10 +4,10 @@
Define the tally_sheet class.
"""
-import sys
import collections
import copy
import re
+import sys
try:
from robot.utils import DotDict
@@ -18,7 +18,6 @@
class tally_sheet:
-
r"""
This class is the implementation of a tally sheet. The sheet can be viewed as rows and columns. Each
row has a unique key field.
@@ -63,10 +62,12 @@
"""
- def __init__(self,
- row_key_field_name='Description',
- init_fields_dict=dict(),
- obj_name='tally_sheet'):
+ def __init__(
+ self,
+ row_key_field_name="Description",
+ init_fields_dict=dict(),
+ obj_name="tally_sheet",
+ ):
r"""
Create a tally sheet object.
@@ -92,13 +93,12 @@
self.__sum_fields = []
self.__calc_fields = []
- def init(self,
- row_key_field_name,
- init_fields_dict,
- obj_name='tally_sheet'):
- self.__init__(row_key_field_name,
- init_fields_dict,
- obj_name='tally_sheet')
+ def init(
+ self, row_key_field_name, init_fields_dict, obj_name="tally_sheet"
+ ):
+ self.__init__(
+ row_key_field_name, init_fields_dict, obj_name="tally_sheet"
+ )
def set_sum_fields(self, sum_fields):
r"""
@@ -137,7 +137,7 @@
if row_key in self.__table:
# If we allow this, the row values get re-initialized.
- message = "An entry for \"" + row_key + "\" already exists in"
+ message = 'An entry for "' + row_key + '" already exists in'
message += " tally sheet."
raise ValueError(message)
if init_fields_dict is None:
@@ -193,7 +193,7 @@
for row_key, value in self.__table.items():
# Walk through the calc fields and process them.
for calc_field in self.__calc_fields:
- tokens = [i for i in re.split(r'(\d+|\W+)', calc_field) if i]
+ tokens = [i for i in re.split(r"(\d+|\W+)", calc_field) if i]
cmd_buf = ""
for token in tokens:
if token in ("=", "+", "-", "*", "/"):
@@ -201,9 +201,15 @@
else:
# Note: Using "mangled" name for the sake of the exec
# statement (below).
- cmd_buf += "self._" + self.__class__.__name__ +\
- "__table['" + row_key + "']['" +\
- token + "'] "
+ cmd_buf += (
+ "self._"
+ + self.__class__.__name__
+ + "__table['"
+ + row_key
+ + "']['"
+ + token
+ + "'] "
+ )
exec(cmd_buf)
for field_key, sub_value in value.items():
@@ -248,8 +254,8 @@
col_names = [self.__row_key_field_name.title()]
report_width = 40
key_width = 40
- format_string = '{0:<' + str(key_width) + '}'
- dash_format_string = '{0:-<' + str(key_width) + '}'
+ format_string = "{0:<" + str(key_width) + "}"
+ dash_format_string = "{0:-<" + str(key_width) + "}"
field_num = 0
try:
@@ -257,28 +263,31 @@
for row_key, value in first_rec[1].items():
field_num += 1
if isinstance(value, int):
- align = ':>'
+ align = ":>"
else:
- align = ':<'
- format_string += ' {' + str(field_num) + align +\
- str(len(row_key)) + '}'
- dash_format_string += ' {' + str(field_num) + ':->' +\
- str(len(row_key)) + '}'
+ align = ":<"
+ format_string += (
+ " {" + str(field_num) + align + str(len(row_key)) + "}"
+ )
+ dash_format_string += (
+ " {" + str(field_num) + ":->" + str(len(row_key)) + "}"
+ )
report_width += 1 + len(row_key)
col_names.append(row_key.title())
except StopIteration:
pass
num_fields = field_num + 1
- totals_line_fmt = '{0:=<' + str(report_width) + '}'
+ totals_line_fmt = "{0:=<" + str(report_width) + "}"
buffer += format_string.format(*col_names) + "\n"
- buffer += dash_format_string.format(*([''] * num_fields)) + "\n"
+ buffer += dash_format_string.format(*([""] * num_fields)) + "\n"
for row_key, value in self.__table.items():
buffer += format_string.format(row_key, *value.values()) + "\n"
- buffer += totals_line_fmt.format('') + "\n"
- buffer += format_string.format('Totals',
- *self.__totals_line.values()) + "\n"
+ buffer += totals_line_fmt.format("") + "\n"
+ buffer += (
+ format_string.format("Totals", *self.__totals_line.values()) + "\n"
+ )
return buffer
diff --git a/lib/tftp_update_utils.py b/lib/tftp_update_utils.py
index 266e774..8e389f8 100644
--- a/lib/tftp_update_utils.py
+++ b/lib/tftp_update_utils.py
@@ -4,10 +4,9 @@
This module contains functions for tftp update.
"""
-from robot.libraries.BuiltIn import BuiltIn
-
-import state as st
import gen_print as gp
+import state as st
+from robot.libraries.BuiltIn import BuiltIn
def get_pre_reboot_state():
@@ -18,7 +17,7 @@
global state
- req_states = ['epoch_seconds'] + st.default_req_states
+ req_states = ["epoch_seconds"] + st.default_req_states
gp.qprint_timen("Get system state.")
state = st.get_state(req_states=req_states, quiet=0)
@@ -49,4 +48,6 @@
gp.qprintn()
if wait_state_check:
- st.wait_state(st.standby_match_state, wait_time="10 mins", interval="10 seconds")
+ st.wait_state(
+ st.standby_match_state, wait_time="10 mins", interval="10 seconds"
+ )
diff --git a/lib/utilities.py b/lib/utilities.py
index 1599ffb..96421e8 100755
--- a/lib/utilities.py
+++ b/lib/utilities.py
@@ -4,9 +4,10 @@
Generic utility functions.
"""
import imp
-import string
import random
+import string
import subprocess
+
from robot.libraries.BuiltIn import BuiltIn
from robot.utils import DotDict
@@ -16,8 +17,12 @@
Return random mac address in the following format.
Example: 00:01:6C:80:02:78
"""
- return ":".join(map(lambda x: "%02x" % x, (random.randint(0x00, 0xff)
- for _ in range(6))))
+ return ":".join(
+ map(
+ lambda x: "%02x" % x,
+ (random.randint(0x00, 0xFF) for _ in range(6)),
+ )
+ )
def random_ip():
@@ -25,19 +30,17 @@
Return random ip address in the following format.
Example: 9.3.128.100
"""
- return ".".join(map(str, (random.randint(0, 255)
- for _ in range(4))))
+ return ".".join(map(str, (random.randint(0, 255) for _ in range(4))))
def get_sensor(module_name, value):
r"""
Return sensor matched ID name.
"""
- m = imp.load_source('module.name', module_name)
+ m = imp.load_source("module.name", module_name)
- for i in m.ID_LOOKUP['SENSOR']:
-
- if m.ID_LOOKUP['SENSOR'][i] == value:
+ for i in m.ID_LOOKUP["SENSOR"]:
+ if m.ID_LOOKUP["SENSOR"][i] == value:
return i
return 0xFF
@@ -47,13 +50,12 @@
r"""
Return sensor matched ID name from inventory.
"""
- m = imp.load_source('module.name', module_name)
+ m = imp.load_source("module.name", module_name)
- value = string.replace(value, m.INVENTORY_ROOT, '<inventory_root>')
+ value = string.replace(value, m.INVENTORY_ROOT, "<inventory_root>")
- for i in m.ID_LOOKUP['SENSOR']:
-
- if m.ID_LOOKUP['SENSOR'][i] == value:
+ for i in m.ID_LOOKUP["SENSOR"]:
+ if m.ID_LOOKUP["SENSOR"][i] == value:
return i
return 0xFF
@@ -73,11 +75,11 @@
"""
inventory_list = []
- m = imp.load_source('module.name', module_name)
+ m = imp.load_source("module.name", module_name)
- for i in m.ID_LOOKUP['FRU']:
- s = m.ID_LOOKUP['FRU'][i]
- s = s.replace('<inventory_root>', m.INVENTORY_ROOT)
+ for i in m.ID_LOOKUP["FRU"]:
+ s = m.ID_LOOKUP["FRU"][i]
+ s = s.replace("<inventory_root>", m.INVENTORY_ROOT)
inventory_list.append(s)
return inventory_list
@@ -96,11 +98,11 @@
Return FRU URI(s) list of a given type from inventory.
"""
inventory_list = []
- m = imp.load_source('module.name', module_name)
+ m = imp.load_source("module.name", module_name)
for i in m.FRU_INSTANCES.keys():
- if m.FRU_INSTANCES[i]['fru_type'] == fru:
- s = i.replace('<inventory_root>', m.INVENTORY_ROOT)
+ if m.FRU_INSTANCES[i]["fru_type"] == fru:
+ s = i.replace("<inventory_root>", m.INVENTORY_ROOT)
inventory_list.append(s)
return inventory_list
@@ -119,13 +121,13 @@
Return VPD URI(s) list of a FRU type from inventory.
"""
inventory_list = []
- m = imp.load_source('module.name', module_name)
+ m = imp.load_source("module.name", module_name)
- for i in m.ID_LOOKUP['FRU_STR']:
- x = m.ID_LOOKUP['FRU_STR'][i]
+ for i in m.ID_LOOKUP["FRU_STR"]:
+ x = m.ID_LOOKUP["FRU_STR"][i]
- if m.FRU_INSTANCES[x]['fru_type'] == fru:
- s = x.replace('<inventory_root>', m.INVENTORY_ROOT)
+ if m.FRU_INSTANCES[x]["fru_type"] == fru:
+ s = x.replace("<inventory_root>", m.INVENTORY_ROOT)
inventory_list.append(s)
return inventory_list
@@ -142,7 +144,7 @@
r"""
Python main func call.
"""
- print(get_vpd_inventory_list('../data/Palmetto.py', 'DIMM'))
+ print(get_vpd_inventory_list("../data/Palmetto.py", "DIMM"))
if __name__ == "__main__":
@@ -187,15 +189,19 @@
# Run the mtr command. Exclude the header line. Trim leading space from
# each line. Change all multiple spaces delims to single space delims.
- cmd_buf = "mtr --report " + host +\
- " | tail -n +2 | sed -r -e 's/^[ ]+//g' -e 's/[ ]+/ /g'"
- sub_proc = subprocess.Popen(cmd_buf, shell=True, stdout=subprocess.PIPE,
- stderr=subprocess.STDOUT)
+ cmd_buf = (
+ "mtr --report "
+ + host
+ + " | tail -n +2 | sed -r -e 's/^[ ]+//g' -e 's/[ ]+/ /g'"
+ )
+ sub_proc = subprocess.Popen(
+ cmd_buf, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT
+ )
out_buf, err_buf = sub_proc.communicate()
shell_rc = sub_proc.returncode
# Split the output by line.
- rows = out_buf.rstrip('\n').split("\n")
+ rows = out_buf.rstrip("\n").split("\n")
# Initialize report dictionary.
report = DotDict()
@@ -205,16 +211,16 @@
row_list = row.split(" ")
# Create dictionary for the row.
row = DotDict()
- row['row_num'] = row_list[0].rstrip('.')
- row['host'] = row_list[1]
- row['loss'] = row_list[2].rstrip('%')
- row['snt'] = row_list[3]
- row['last'] = row_list[4]
- row['avg'] = row_list[5]
- row['best'] = row_list[6]
- row['wrst'] = row_list[7]
- row['stdev'] = row_list[8]
- report[row['host']] = row
+ row["row_num"] = row_list[0].rstrip(".")
+ row["host"] = row_list[1]
+ row["loss"] = row_list[2].rstrip("%")
+ row["snt"] = row_list[3]
+ row["last"] = row_list[4]
+ row["avg"] = row_list[5]
+ row["best"] = row_list[6]
+ row["wrst"] = row_list[7]
+ row["stdev"] = row_list[8]
+ report[row["host"]] = row
# Return the full report as dictionary of dictionaries.
return report
@@ -296,8 +302,8 @@
Input string 0a 01
Return string 0x0a 0x01
"""
- prefix_string = ''
+ prefix_string = ""
data_list = string.strip().split(" ")
for item in data_list:
- prefix_string += prefix + item + ' '
+ prefix_string += prefix + item + " "
return prefix_string.strip()
diff --git a/lib/utils.py b/lib/utils.py
index 59d63e3..c8971b1 100644
--- a/lib/utils.py
+++ b/lib/utils.py
@@ -4,15 +4,17 @@
Companion file to utils.robot.
"""
-import os
-import json
import collections
+import json
+import os
+
+import bmc_ssh_utils as bsu
import gen_print as gp
import gen_robot_keyword as grk
-import bmc_ssh_utils as bsu
import var_funcs as vf
-from robot.libraries.BuiltIn import BuiltIn
from robot.libraries import DateTime
+from robot.libraries.BuiltIn import BuiltIn
+
try:
from robot.utils import DotDict
except ImportError:
@@ -47,12 +49,12 @@
"""
# Retrieve global variables.
- power_policy_setup = \
- int(BuiltIn().get_variable_value("${power_policy_setup}",
- default=0))
- bmc_power_policy_method = \
- BuiltIn().get_variable_value("${bmc_power_policy_method}",
- default=0)
+ power_policy_setup = int(
+ BuiltIn().get_variable_value("${power_policy_setup}", default=0)
+ )
+ bmc_power_policy_method = BuiltIn().get_variable_value(
+ "${bmc_power_policy_method}", default=0
+ )
gp.dpvar(power_policy_setup)
# If this function has already been run once, we need not continue.
@@ -65,25 +67,27 @@
# determine what it should be.
if bmc_power_policy_method == "":
status, ret_values = grk.run_key_u("New Get Power Policy", ignore=1)
- if status == 'PASS':
- bmc_power_policy_method = 'New'
+ if status == "PASS":
+ bmc_power_policy_method = "New"
else:
- bmc_power_policy_method = 'Old'
+ bmc_power_policy_method = "Old"
gp.qpvar(bmc_power_policy_method)
# For old style, we will rewrite these global variable settings to old
# values.
if bmc_power_policy_method == "Old":
- BuiltIn().set_global_variable("${RESTORE_LAST_STATE}",
- "RESTORE_LAST_STATE")
- BuiltIn().set_global_variable("${ALWAYS_POWER_ON}",
- "ALWAYS_POWER_ON")
- BuiltIn().set_global_variable("${ALWAYS_POWER_OFF}",
- "ALWAYS_POWER_OFF")
+ BuiltIn().set_global_variable(
+ "${RESTORE_LAST_STATE}", "RESTORE_LAST_STATE"
+ )
+ BuiltIn().set_global_variable("${ALWAYS_POWER_ON}", "ALWAYS_POWER_ON")
+ BuiltIn().set_global_variable(
+ "${ALWAYS_POWER_OFF}", "ALWAYS_POWER_OFF"
+ )
# Set global variables to control subsequent calls to this function.
- BuiltIn().set_global_variable("${bmc_power_policy_method}",
- bmc_power_policy_method)
+ BuiltIn().set_global_variable(
+ "${bmc_power_policy_method}", bmc_power_policy_method
+ )
BuiltIn().set_global_variable("${power_policy_setup}", 1)
@@ -103,14 +107,16 @@
method of storing the policy value.
"""
- valid_power_policy_vars = \
- BuiltIn().get_variable_value("${valid_power_policy_vars}")
+ valid_power_policy_vars = BuiltIn().get_variable_value(
+ "${valid_power_policy_vars}"
+ )
if policy not in valid_power_policy_vars:
return policy
- status, ret_values = grk.run_key_u("Get Variable Value ${" + policy + "}",
- quiet=1)
+ status, ret_values = grk.run_key_u(
+ "Get Variable Value ${" + policy + "}", quiet=1
+ )
return ret_values
@@ -132,7 +138,7 @@
[rtc_in_local_tz]: no
"""
- out_buf, stderr, rc = bsu.bmc_execute_command('timedatectl')
+ out_buf, stderr, rc = bsu.bmc_execute_command("timedatectl")
# Example of output returned by call to timedatectl:
# Local time: Fri 2017-11-03 15:27:56 UTC
# Universal time: Fri 2017-11-03 15:27:56 UTC
@@ -157,40 +163,41 @@
result_time_dict[key] = value
if not key.endswith("_time"):
continue
- result_time_dict[key + '_seconds'] = \
- int(DateTime.convert_date(value, result_format='epoch'))
+ result_time_dict[key + "_seconds"] = int(
+ DateTime.convert_date(value, result_format="epoch")
+ )
return result_time_dict
def get_bmc_df(df_parm_string=""):
r"""
- Get df report from BMC and return as a report "object".
+ Get df report from BMC and return as a report "object".
- A df report object is a list where each entry is a dictionary whose keys
- are the field names from the first entry in report_list.
+ A df report object is a list where each entry is a dictionary whose keys
+ are the field names from the first entry in report_list.
- Example df report object:
+ Example df report object:
- df_report:
- df_report[0]:
- [filesystem]: dev
- [1k-blocks]: 247120
- [used]: 0
- [available]: 247120
- [use%]: 0%
- [mounted]: /dev
- df_report[1]:
- [filesystem]: dev
- [1k-blocks]: 247120
- [used]: 0
- [available]: 247120
- [use%]: 0%
- [mounted]: /dev
+ df_report:
+ df_report[0]:
+ [filesystem]: dev
+ [1k-blocks]: 247120
+ [used]: 0
+ [available]: 247120
+ [use%]: 0%
+ [mounted]: /dev
+ df_report[1]:
+ [filesystem]: dev
+ [1k-blocks]: 247120
+ [used]: 0
+ [available]: 247120
+ [use%]: 0%
+ [mounted]: /dev
-. Description of argument(s):
- df_parm_string A string containing valid df command parms (e.g.
- "-h /var").
+ . Description of argument(s):
+ df_parm_string A string containing valid df command parms (e.g.
+ "-h /var").
"""
out_buf, stderr, rc = bsu.bmc_execute_command("df " + df_parm_string)
@@ -210,11 +217,11 @@
def compare_mac_address(sys_mac_addr, user_mac_addr):
r"""
- Return 1 if the MAC value matched, otherwise 0.
+ Return 1 if the MAC value matched, otherwise 0.
-. Description of argument(s):
- sys_mac_addr A valid system MAC string (e.g. "70:e2:84:14:2a:08")
- user_mac_addr A user provided MAC string (e.g. "70:e2:84:14:2a:08")
+ . Description of argument(s):
+ sys_mac_addr A valid system MAC string (e.g. "70:e2:84:14:2a:08")
+ user_mac_addr A user provided MAC string (e.g. "70:e2:84:14:2a:08")
"""
index = 0
@@ -295,8 +302,11 @@
# Using sed and tail to massage the data a bit before running
# key_value_outbuf_to_dict.
- cmd_buf = "ethtool " + interface_name +\
- " | sed -re 's/(.* link modes:)(.*)/\\1\\n\\2/g' | tail -n +2"
+ cmd_buf = (
+ "ethtool "
+ + interface_name
+ + " | sed -re 's/(.* link modes:)(.*)/\\1\\n\\2/g' | tail -n +2"
+ )
stdout, stderr, rc = bsu.os_execute_command(cmd_buf)
result = vf.key_value_outbuf_to_dict(stdout, process_indent=1, strip=" \t")
@@ -338,7 +348,7 @@
[openbmc_target_machine]: witherspoon
"""
- out_buf, stderr, rc = bsu.bmc_execute_command('cat /etc/os-release')
+ out_buf, stderr, rc = bsu.bmc_execute_command("cat /etc/os-release")
return vf.key_value_outbuf_to_dict(out_buf, delim="=", strip='"')
@@ -371,7 +381,7 @@
[redhat_support_product_version]: 7.6
"""
- out_buf, stderr, rc = bsu.os_execute_command('cat /etc/os-release')
+ out_buf, stderr, rc = bsu.os_execute_command("cat /etc/os-release")
return vf.key_value_outbuf_to_dict(out_buf, delim="=", strip='"')
@@ -386,10 +396,12 @@
"""
# Default print_out to 1.
- if 'print_out' not in bsu_options:
- bsu_options['print_out'] = 1
+ if "print_out" not in bsu_options:
+ bsu_options["print_out"] = 1
- stdout, stderr, rc = bsu.bmc_execute_command('pdbg ' + option_string, **bsu_options)
+ stdout, stderr, rc = bsu.bmc_execute_command(
+ "pdbg " + option_string, **bsu_options
+ )
return stdout
@@ -406,8 +418,8 @@
"""
# Default print_out to 1.
- if 'print_out' not in bsu_options:
- bsu_options['print_out'] = 1
+ if "print_out" not in bsu_options:
+ bsu_options["print_out"] = 1
stdout, stderr, rc = bsu.bmc_execute_command(option_string, **bsu_options)
return stdout
@@ -423,7 +435,7 @@
"""
n = int(n)
- data = [stri[index: index + n] for index in range(0, len(stri), n)]
+ data = [stri[index : index + n] for index in range(0, len(stri), n)]
return data
@@ -469,6 +481,6 @@
returns decoded string of encoded byte.
"""
- encoded_string = input.encode('ascii', 'ignore')
+ encoded_string = input.encode("ascii", "ignore")
decoded_string = encoded_string.decode()
return decoded_string
diff --git a/lib/utils_files.py b/lib/utils_files.py
index 0b19432..70e0887 100755
--- a/lib/utils_files.py
+++ b/lib/utils_files.py
@@ -4,22 +4,21 @@
This module contains file functions such as file_diff.
"""
-import time
import os
import re
+import time
+
from gen_cmd import cmd_fnc_u
+
robot_env = 1
try:
- from robot.libraries.BuiltIn import BuiltIn
from robot.libraries import DateTime
+ from robot.libraries.BuiltIn import BuiltIn
except ImportError:
robot_env = 0
-def file_diff(file1_path,
- file2_path,
- diff_file_path,
- skip_string):
+def file_diff(file1_path, file2_path, diff_file_path, skip_string):
r"""
Compare the contents of two text files. The comparison uses the Unix
'diff' command. Differences can be selectively ignored by use of
@@ -59,12 +58,12 @@
now = time.strftime("%Y-%m-%d %H:%M:%S")
- if (not os.path.exists(file1_path) or (not os.path.exists(file2_path))):
+ if not os.path.exists(file1_path) or (not os.path.exists(file2_path)):
return INPUT_FILE_DOES_NOT_EXIST
try:
- with open(file1_path, 'r') as file:
+ with open(file1_path, "r") as file:
initial = file.readlines()
- with open(file2_path, 'r') as file:
+ with open(file2_path, "r") as file:
final = file.readlines()
except IOError:
file.close()
@@ -79,24 +78,30 @@
if len(initial) < min_file_byte_size:
return INPUT_FILE_MALFORMED
- if (initial == final):
+ if initial == final:
try:
- file = open(diff_file_path, 'w')
+ file = open(diff_file_path, "w")
except IOError:
file.close()
- line_to_print = "Specified skip (ignore) string = " + \
- skip_string + "\n\n"
+ line_to_print = (
+ "Specified skip (ignore) string = " + skip_string + "\n\n"
+ )
file.write(line_to_print)
- line_to_print = now + " found no difference between file " + \
- file1_path + " and " + \
- file2_path + "\n"
+ line_to_print = (
+ now
+ + " found no difference between file "
+ + file1_path
+ + " and "
+ + file2_path
+ + "\n"
+ )
file.write(line_to_print)
file.close()
return FILES_MATCH
# Find the differences and write difference report to diff_file_path file
try:
- file = open(diff_file_path, 'w')
+ file = open(diff_file_path, "w")
except IOError:
file.close()
return IO_EXCEPTION_WRITING_FILE
@@ -105,9 +110,10 @@
# if skip_string="size,capacity", command = 'diff -I "size"
# -I "capacity" file1_path file2_path'.
skip_list = filter(None, re.split(r"[ ]*,[ ]*", skip_string))
- ignore_string = ' '.join([("-I " + '"' + x + '"') for x in skip_list])
- command = ' '.join(filter(None, ["diff", ignore_string, file1_path,
- file2_path]))
+ ignore_string = " ".join([("-I " + '"' + x + '"') for x in skip_list])
+ command = " ".join(
+ filter(None, ["diff", ignore_string, file1_path, file2_path])
+ )
line_to_print = now + " " + command + "\n"
file.write(line_to_print)
diff --git a/lib/var_funcs.py b/lib/var_funcs.py
index fdde68e..0009b54 100644
--- a/lib/var_funcs.py
+++ b/lib/var_funcs.py
@@ -14,9 +14,9 @@
import collections
-import gen_print as gp
-import gen_misc as gm
import func_args as fa
+import gen_misc as gm
+import gen_print as gp
def create_var_dict(*args):
@@ -57,13 +57,15 @@
return result_dict
-default_record_delim = ':'
-default_key_val_delim = '.'
+default_record_delim = ":"
+default_key_val_delim = "."
-def join_dict(dict,
- record_delim=default_record_delim,
- key_val_delim=default_key_val_delim):
+def join_dict(
+ dict,
+ record_delim=default_record_delim,
+ key_val_delim=default_key_val_delim,
+):
r"""
Join a dictionary's keys and values into a string and return the string.
@@ -87,14 +89,17 @@
str1: first_name.Steve:last_name.Smith
"""
- format_str = '%s' + key_val_delim + '%s'
- return record_delim.join([format_str % (key, value) for (key, value) in
- dict.items()])
+ format_str = "%s" + key_val_delim + "%s"
+ return record_delim.join(
+ [format_str % (key, value) for (key, value) in dict.items()]
+ )
-def split_to_dict(string,
- record_delim=default_record_delim,
- key_val_delim=default_key_val_delim):
+def split_to_dict(
+ string,
+ record_delim=default_record_delim,
+ key_val_delim=default_key_val_delim,
+):
r"""
Split a string into a dictionary and return it.
@@ -136,9 +141,7 @@
return result_dict
-def create_file_path(file_name_dict,
- dir_path="/tmp/",
- file_suffix=""):
+def create_file_path(file_name_dict, dir_path="/tmp/", file_suffix=""):
r"""
Create a file path using the given parameters and return it.
@@ -187,18 +190,14 @@
dir_path = os.path.dirname(file_path) + os.sep
file_path = os.path.basename(file_path)
- result_dict['dir_path'] = dir_path
+ result_dict["dir_path"] = dir_path
result_dict.update(split_to_dict(file_path))
return result_dict
-def parse_key_value(string,
- delim=":",
- strip=" ",
- to_lower=1,
- underscores=1):
+def parse_key_value(string, delim=":", strip=" ", to_lower=1, underscores=1):
r"""
Parse a key/value string and return as a key/value tuple.
@@ -252,9 +251,7 @@
return key, value
-def key_value_list_to_dict(key_value_list,
- process_indent=0,
- **args):
+def key_value_list_to_dict(key_value_list, process_indent=0, **args):
r"""
Convert a list containing key/value strings or tuples to a dictionary and return it.
@@ -371,8 +368,9 @@
if len(sub_list) > 0:
if any(delim in word for word in sub_list):
# If delim is found anywhere in the sub_list, we'll process as a sub-dictionary.
- result_dict[parent_key] = key_value_list_to_dict(sub_list,
- **args)
+ result_dict[parent_key] = key_value_list_to_dict(
+ sub_list, **args
+ )
else:
result_dict[parent_key] = list(map(str.strip, sub_list))
del sub_list[:]
@@ -394,8 +392,7 @@
return result_dict
-def key_value_outbuf_to_dict(out_buf,
- **args):
+def key_value_outbuf_to_dict(out_buf, **args):
r"""
Convert a buffer with a key/value string on each line to a dictionary and return it.
@@ -438,8 +435,7 @@
return key_value_list_to_dict(key_var_list, **args)
-def key_value_outbuf_to_dicts(out_buf,
- **args):
+def key_value_outbuf_to_dicts(out_buf, **args):
r"""
Convert a buffer containing multiple sections with key/value strings on each line to a list of
dictionaries and return it.
@@ -507,11 +503,13 @@
**args Arguments to be interpreted by parse_key_value. (See docstring of
parse_key_value function for details).
"""
- return [key_value_outbuf_to_dict(x, **args) for x in re.split('\n[\n]+', out_buf)]
+ return [
+ key_value_outbuf_to_dict(x, **args)
+ for x in re.split("\n[\n]+", out_buf)
+ ]
def create_field_desc_regex(line):
-
r"""
Create a field descriptor regular expression based on the input line and return it.
@@ -567,14 +565,12 @@
regexes.append("(.{" + str(len(descriptor)) + "})")
# Join the regexes list into a regex string.
- field_desc_regex = ' '.join(regexes)
+ field_desc_regex = " ".join(regexes)
return field_desc_regex
-def list_to_report(report_list,
- to_lower=1,
- field_delim=None):
+def list_to_report(report_list, to_lower=1, field_delim=None):
r"""
Convert a list containing report text lines to a report "object" and return it.
@@ -660,8 +656,9 @@
else:
# Pad the line with spaces on the right to facilitate processing with field_desc_regex.
header_line = pad_format_string % header_line
- columns = list(map(str.strip,
- re.findall(field_desc_regex, header_line)[0]))
+ columns = list(
+ map(str.strip, re.findall(field_desc_regex, header_line)[0])
+ )
report_obj = []
for report_line in report_list[1:]:
@@ -670,8 +667,9 @@
else:
# Pad the line with spaces on the right to facilitate processing with field_desc_regex.
report_line = pad_format_string % report_line
- line = list(map(str.strip,
- re.findall(field_desc_regex, report_line)[0]))
+ line = list(
+ map(str.strip, re.findall(field_desc_regex, report_line)[0])
+ )
try:
line_dict = collections.OrderedDict(zip(columns, line))
except AttributeError:
@@ -681,8 +679,7 @@
return report_obj
-def outbuf_to_report(out_buf,
- **args):
+def outbuf_to_report(out_buf, **args):
r"""
Convert a text buffer containing report lines to a report "object" and return it.
@@ -825,8 +822,11 @@
if len(struct_key_values) == 0:
return False
if regex:
- matches = [x for x in struct_key_values
- if re.search(match_value, str(x))]
+ matches = [
+ x
+ for x in struct_key_values
+ if re.search(match_value, str(x))
+ ]
if not matches:
return False
elif match_value not in struct_key_values:
diff --git a/lib/var_stack.py b/lib/var_stack.py
index 3ea3813..77cf4a0 100644
--- a/lib/var_stack.py
+++ b/lib/var_stack.py
@@ -4,9 +4,9 @@
Define the var_stack class.
"""
-import sys
import collections
import copy
+import sys
try:
from robot.utils import DotDict
@@ -17,7 +17,6 @@
class var_stack:
-
r"""
Define the variable stack class.
@@ -65,8 +64,7 @@
[var1][0]: mike
"""
- def __init__(self,
- obj_name='var_stack'):
+ def __init__(self, obj_name="var_stack"):
r"""
Initialize a new object of this class type.
@@ -90,7 +88,7 @@
buffer += self.__obj_name + ":\n"
indent = 2
- buffer += gp.sprint_varx('stack_dict', self.__stack_dict, indent)
+ buffer += gp.sprint_varx("stack_dict", self.__stack_dict, indent)
return buffer
@@ -101,9 +99,7 @@
sys.stdout.write(self.sprint_obj())
- def push(self,
- var_value,
- var_name=""):
+ def push(self, var_value, var_name=""):
r"""
push the var_name/var_value pair onto the stack.
@@ -124,8 +120,7 @@
else:
self.__stack_dict[var_name] = copy.deepcopy([var_value])
- def pop(self,
- var_name=""):
+ def pop(self, var_name=""):
r"""
Pop the value for the given var_name from the stack and return it.
diff --git a/lib/vpd_utils.py b/lib/vpd_utils.py
index 03581bb..754d255 100644
--- a/lib/vpd_utils.py
+++ b/lib/vpd_utils.py
@@ -5,8 +5,9 @@
"""
import json
-import func_args as fa
+
import bmc_ssh_utils as bsu
+import func_args as fa
def vpdtool(option_string, **bsu_options):
@@ -44,12 +45,14 @@
"""
bsu_options = fa.args_to_objects(bsu_options)
- out_buf, stderr, rc = bsu.bmc_execute_command('vpd-tool ' + option_string, **bsu_options)
+ out_buf, stderr, rc = bsu.bmc_execute_command(
+ "vpd-tool " + option_string, **bsu_options
+ )
# Only return output if its not a VPD write command.
- if '-w' not in option_string:
+ if "-w" not in option_string:
out_buf = json.loads(out_buf)
- if '-r' in option_string:
+ if "-r" in option_string:
return out_buf
else:
return out_buf[0]
diff --git a/lib/wrap_utils.py b/lib/wrap_utils.py
index 231dff8..ebf4f61 100755
--- a/lib/wrap_utils.py
+++ b/lib/wrap_utils.py
@@ -7,10 +7,9 @@
"""
-def create_func_def_string(base_func_name,
- wrap_func_name,
- func_body_template,
- replace_dict):
+def create_func_def_string(
+ base_func_name, wrap_func_name, func_body_template, replace_dict
+):
r"""
Create and return a complete function definition as a string. The caller may run "exec" on the resulting
string to create the desired function.
@@ -41,10 +40,10 @@
func_def.insert(0, func_def_line)
# Make sure the replace_dict has a 'call_line'/call_line pair so that any '<call_line>' text gets
# replaced as intended.
- replace_dict['call_line'] = call_line
+ replace_dict["call_line"] = call_line
# Do the replacements.
for key, value in replace_dict.items():
func_def = [w.replace("<" + key + ">", value) for w in func_def]
- return '\n'.join(func_def) + "\n"
+ return "\n".join(func_def) + "\n"
diff --git a/robot_custom_rules.py b/robot_custom_rules.py
index b71b289..7c7ddee 100644
--- a/robot_custom_rules.py
+++ b/robot_custom_rules.py
@@ -3,18 +3,27 @@
# Installation : pip3 install --upgrade robotframework-lint
# Example usage: python3 -m rflint -rA robot_standards -R robot_custom_rules.py .
import re
-from rflint.common import SuiteRule, ERROR
+
+from rflint.common import ERROR, SuiteRule
class ExtendInvalidTable(SuiteRule):
- r'''
+ r"""
Extend robotframework-lint SuiteRule function for InvalidTable to allow a table section if it is
a section of comments. e.g "*** Comments ***"
- '''
+ """
severity = ERROR
def apply(self, suite):
for table in suite.tables:
- if (not re.match(r'^(settings?|metadata|(test )?cases?|(user )?keywords?|variables?|comments?)$',
- table.name, re.IGNORECASE)):
- self.report(suite, "Unknown table name '%s'" % table.name, table.linenumber)
+ if not re.match(
+ r"^(settings?|metadata|(test )?cases?|(user"
+ r" )?keywords?|variables?|comments?)$",
+ table.name,
+ re.IGNORECASE,
+ ):
+ self.report(
+ suite,
+ "Unknown table name '%s'" % table.name,
+ table.linenumber,
+ )
diff --git a/syslib/utils_keywords.py b/syslib/utils_keywords.py
index f2368b6..2a33736 100644
--- a/syslib/utils_keywords.py
+++ b/syslib/utils_keywords.py
@@ -6,18 +6,15 @@
"""
try:
- from robot.libraries.BuiltIn import BuiltIn
from robot.libraries import DateTime
+ from robot.libraries.BuiltIn import BuiltIn
except ImportError:
pass
-import time
import os
+import time
-def run_until_keyword_fails(retry,
- retry_interval,
- name,
- *args):
+def run_until_keyword_fails(retry, retry_interval, name, *args):
r"""
Execute a robot keyword repeatedly until it either fails or the timeout
value is exceeded.
diff --git a/syslib/utils_os.py b/syslib/utils_os.py
index 0c6131a..a1b4223 100755
--- a/syslib/utils_os.py
+++ b/syslib/utils_os.py
@@ -4,68 +4,67 @@
This file contains utilities associated with the host OS.
"""
-import sys
import os
+import sys
sys.path.append(os.path.join(os.path.dirname(__file__), "../lib"))
import bmc_ssh_utils # NOQA
-import var_funcs # NOQA
+import var_funcs # NOQA
def get_os_release_info(default_cmd="cat /etc/os-release"):
r"""
- Get os-release info and return it as a dictionary.
+ Get os-release info and return it as a dictionary.
- An example of the contents of /etc/os-release:
+ An example of the contents of /etc/os-release:
- NAME="Red Hat Enterprise Linux Server"
- VERSION="7.5 (Maipo)"
- ID="rhel"
- ID_LIKE="fedora"
- VARIANT="Server"
- VARIANT_ID="server"
- VERSION_ID="7.5"
- PRETTY_NAME="Red Hat Enterprise Linux Server 7.5 Beta (Maipo)"
- ANSI_COLOR="0;31"
- CPE_NAME="cpe:/o:redhat:enterprise_linux:7.5:beta:server"
- HOME_URL="https://www.redhat.com/"
- BUG_REPORT_URL="https://bugzilla.redhat.com/"
+ NAME="Red Hat Enterprise Linux Server"
+ VERSION="7.5 (Maipo)"
+ ID="rhel"
+ ID_LIKE="fedora"
+ VARIANT="Server"
+ VARIANT_ID="server"
+ VERSION_ID="7.5"
+ PRETTY_NAME="Red Hat Enterprise Linux Server 7.5 Beta (Maipo)"
+ ANSI_COLOR="0;31"
+ CPE_NAME="cpe:/o:redhat:enterprise_linux:7.5:beta:server"
+ HOME_URL="https://www.redhat.com/"
+ BUG_REPORT_URL="https://bugzilla.redhat.com/"
- REDHAT_BUGZILLA_PRODUCT="Red Hat Enterprise Linux 7"
- REDHAT_BUGZILLA_PRODUCT_VERSION=7.5
- REDHAT_SUPPORT_PRODUCT="Red Hat Enterprise Linux"
- REDHAT_SUPPORT_PRODUCT_VERSION="7.5 Beta"
+ REDHAT_BUGZILLA_PRODUCT="Red Hat Enterprise Linux 7"
+ REDHAT_BUGZILLA_PRODUCT_VERSION=7.5
+ REDHAT_SUPPORT_PRODUCT="Red Hat Enterprise Linux"
+ REDHAT_SUPPORT_PRODUCT_VERSION="7.5 Beta"
- For the data shown above, this function will return the following
- dictionary:
+ For the data shown above, this function will return the following
+ dictionary:
- result:
- [name]: Red Hat Enterprise Linux Server
- [version]: 7.5 (Maipo)
- [id]: rhel
- [id_like]: fedora
- [variant]: Server
- [variant_id]: server
- [version_id]: 7.5
- [pretty_name]: Red Hat Enterprise Linux Server 7.5 Beta (Maipo)
- [ansi_color]: 0;31
- [cpe_name]: cpe:/o:redhat:enterprise_linux:7.5:beta:server
- [home_url]: https://www.redhat.com/
- [bug_report_url]: https://bugzilla.redhat.com/
- [redhat_bugzilla_product]: Red Hat Enterprise Linux 7
- [redhat_bugzilla_product_version]: 7.5
- [redhat_support_product]: Red Hat Enterprise Linux
- [redhat_support_product_version]: 7.5 Beta
+ result:
+ [name]: Red Hat Enterprise Linux Server
+ [version]: 7.5 (Maipo)
+ [id]: rhel
+ [id_like]: fedora
+ [variant]: Server
+ [variant_id]: server
+ [version_id]: 7.5
+ [pretty_name]: Red Hat Enterprise Linux Server 7.5 Beta (Maipo)
+ [ansi_color]: 0;31
+ [cpe_name]: cpe:/o:redhat:enterprise_linux:7.5:beta:server
+ [home_url]: https://www.redhat.com/
+ [bug_report_url]: https://bugzilla.redhat.com/
+ [redhat_bugzilla_product]: Red Hat Enterprise Linux 7
+ [redhat_bugzilla_product_version]: 7.5
+ [redhat_support_product]: Red Hat Enterprise Linux
+ [redhat_support_product_version]: 7.5 Beta
-. Description of argument(s):
- default_cmd A string command to be executed (e.g cat /etc/os-release).
+ . Description of argument(s):
+ default_cmd A string command to be executed (e.g cat /etc/os-release).
"""
- stdout, stderr, rc =\
- bmc_ssh_utils.os_execute_command(default_cmd)
+ stdout, stderr, rc = bmc_ssh_utils.os_execute_command(default_cmd)
return var_funcs.key_value_outbuf_to_dict(stdout, delim="=", strip='"')
diff --git a/templates/python_pgm_template b/templates/python_pgm_template
index fcd0c6f..e2ded37 100644
--- a/templates/python_pgm_template
+++ b/templates/python_pgm_template
@@ -1,19 +1,17 @@
#!/usr/bin/env python3
-from gen_print import *
from gen_arg import *
+from gen_print import *
from gen_valid import *
parser = argparse.ArgumentParser(
- usage='%(prog)s [OPTIONS]',
+ usage="%(prog)s [OPTIONS]",
description="%(prog)s will...",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
- prefix_chars='-+')
+ prefix_chars="-+",
+)
-parser.add_argument(
- '--whatever',
- default='',
- help='bla, bla.')
+parser.add_argument("--whatever", default="", help="bla, bla.")
# Populate stock_list with options we want.
stock_list = [("test_mode", 0), ("quiet", 0), ("debug", 0)]
@@ -42,7 +40,6 @@
def main():
-
gen_setup()
# Your code here.
diff --git a/tools/ct_metrics/gen_csv_results.py b/tools/ct_metrics/gen_csv_results.py
index 5f53e86..bbc66d9 100755
--- a/tools/ct_metrics/gen_csv_results.py
+++ b/tools/ct_metrics/gen_csv_results.py
@@ -6,25 +6,25 @@
http://robot-framework.readthedocs.io/en/3.0/autodoc/robot.result.html
"""
-from robot.api import ExecutionResult
-from robot.result.visitor import ResultVisitor
-from xml.etree import ElementTree
-
-import sys
-import os
-import getopt
import csv
-import robot.errors
+import datetime
+import getopt
+import os
import re
import stat
-import datetime
+import sys
+from xml.etree import ElementTree
+
+import robot.errors
+from robot.api import ExecutionResult
+from robot.result.visitor import ResultVisitor
# Remove the python library path to restore with local project path later.
save_path_0 = sys.path[0]
del sys.path[0]
sys.path.append(os.path.join(os.path.dirname(__file__), "../../lib"))
-from gen_arg import * # NOQA
+from gen_arg import * # NOQA
from gen_print import * # NOQA
from gen_valid import * # NOQA
@@ -33,7 +33,7 @@
this_program = sys.argv[0]
-info = " For more information: " + this_program + ' -h'
+info = " For more information: " + this_program + " -h"
if len(sys.argv) == 1:
print(info)
sys.exit(1)
@@ -41,64 +41,88 @@
parser = argparse.ArgumentParser(
usage=info,
- description="%(prog)s uses a robot framework API to extract test result\
- data from output.xml generated by robot tests. For more information on the\
- Robot Framework API, see\
- http://robot-framework.readthedocs.io/en/3.0/autodoc/robot.result.html",
+ description=(
+ "%(prog)s uses a robot framework API to extract test result data"
+ " from output.xml generated by robot tests. For more information on"
+ " the Robot Framework API, see "
+ " http://robot-framework.readthedocs.io/en/3.0/autodoc/robot.result.html"
+ ),
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
- prefix_chars='-+')
+ prefix_chars="-+",
+)
parser.add_argument(
- '--source',
- '-s',
- help='The output.xml robot test result file path. This parameter is \
- required.')
+ "--source",
+ "-s",
+ help=(
+ "The output.xml robot test result file path. This parameter is "
+ " required."
+ ),
+)
parser.add_argument(
- '--dest',
- '-d',
- help='The directory path where the generated .csv files will go. This \
- parameter is required.')
+ "--dest",
+ "-d",
+ help=(
+ "The directory path where the generated .csv files will go. This "
+ " parameter is required."
+ ),
+)
parser.add_argument(
- '--version_id',
- help='Driver version of openbmc firmware which was used during test,\
- e.g. "v2.1-215-g6e7eacb". This parameter is required.')
+ "--version_id",
+ help=(
+ "Driver version of openbmc firmware which was used during test, "
+ ' e.g. "v2.1-215-g6e7eacb". This parameter is required.'
+ ),
+)
parser.add_argument(
- '--platform',
- help='OpenBMC platform which was used during test,\
- e.g. "Witherspoon". This parameter is required.')
+ "--platform",
+ help=(
+ "OpenBMC platform which was used during test, e.g."
+ ' "Witherspoon". This parameter is required.'
+ ),
+)
parser.add_argument(
- '--level',
- help='OpenBMC release level which was used during test,\
- e.g. "Master", "OBMC920". This parameter is required.')
+ "--level",
+ help=(
+ "OpenBMC release level which was used during test, e.g."
+ ' "Master", "OBMC920". This parameter is required.'
+ ),
+)
parser.add_argument(
- '--test_phase',
- help='Name of testing phase, e.g. "DVT", "SVT", etc.\
- This parameter is optional.',
- default="FVT")
+ "--test_phase",
+ help=(
+ 'Name of testing phase, e.g. "DVT", "SVT", etc. This'
+ " parameter is optional."
+ ),
+ default="FVT",
+)
parser.add_argument(
- '--subsystem',
- help='Name of the subsystem, e.g. "OPENBMC" etc.\
- This parameter is optional.',
- default="OPENBMC")
+ "--subsystem",
+ help=(
+ 'Name of the subsystem, e.g. "OPENBMC" etc. This parameter is'
+ " optional."
+ ),
+ default="OPENBMC",
+)
parser.add_argument(
- '--processor',
+ "--processor",
help='Name of processor, e.g. "P9". This parameter is optional.',
- default="OPENPOWER")
+ default="OPENPOWER",
+)
# Populate stock_list with options we want.
stock_list = [("test_mode", 0), ("quiet", 0), ("debug", 0)]
-def exit_function(signal_number=0,
- frame=None):
+def exit_function(signal_number=0, frame=None):
r"""
Execute whenever the program ends normally or with the signals that we
catch (i.e. TERM, INT).
@@ -111,8 +135,7 @@
qprint_pgm_footer()
-def signal_handler(signal_number,
- frame):
+def signal_handler(signal_number, frame):
r"""
Handle signals. Without a function to catch a SIGTERM or SIGINT, the
program would terminate immediately with return code 143 and without
@@ -146,8 +169,15 @@
return True
-def parse_output_xml(xml_file_path, csv_dir_path, version_id, platform, level,
- test_phase, processor):
+def parse_output_xml(
+ xml_file_path,
+ csv_dir_path,
+ version_id,
+ platform,
+ level,
+ test_phase,
+ processor,
+):
r"""
Parse the robot-generated output.xml file and extract various test
output data. Put the extracted information into a csv file in the "dest"
@@ -175,13 +205,19 @@
total_non_critical_failed = 0
result = ExecutionResult(xml_file_path)
- result.configure(stat_config={'suite_stat_level': 2,
- 'tag_stat_combine': 'tagANDanother'})
+ result.configure(
+ stat_config={
+ "suite_stat_level": 2,
+ "tag_stat_combine": "tagANDanother",
+ }
+ )
stats = result.statistics
print("--------------------------------------")
try:
- total_critical_tc = stats.total.critical.passed + stats.total.critical.failed
+ total_critical_tc = (
+ stats.total.critical.passed + stats.total.critical.failed
+ )
total_critical_passed = stats.total.critical.passed
total_critical_failed = stats.total.critical.failed
except AttributeError:
@@ -194,7 +230,9 @@
except AttributeError:
pass
- print("Total Test Count:\t %d" % (total_non_critical_tc + total_critical_tc))
+ print(
+ "Total Test Count:\t %d" % (total_non_critical_tc + total_critical_tc)
+ )
print("Total Critical Test Failed:\t %d" % total_critical_failed)
print("Total Critical Test Passed:\t %d" % total_critical_passed)
@@ -221,11 +259,11 @@
# Default Test data
l_test_type = test_phase
- l_pse_rel = 'Master'
+ l_pse_rel = "Master"
if level:
l_pse_rel = level
- l_env = 'HW'
+ l_env = "HW"
l_proc = processor
l_platform_type = ""
l_func_area = ""
@@ -252,14 +290,27 @@
if l_driver and l_platform_type:
print("Driver and system info set.")
else:
- print("Both driver and system info need to be set.\
- CSV file is not generated.")
+ print(
+ "Both driver and system info need to be set. CSV"
+ " file is not generated."
+ )
sys.exit()
# Default header
- l_header = ['test_start', 'test_end', 'subsys', 'test_type',
- 'test_result', 'test_name', 'pse_rel', 'driver',
- 'env', 'proc', 'platform_type', 'test_func_area']
+ l_header = [
+ "test_start",
+ "test_end",
+ "subsys",
+ "test_type",
+ "test_result",
+ "test_name",
+ "pse_rel",
+ "driver",
+ "env",
+ "proc",
+ "platform_type",
+ "test_func_area",
+ ]
l_csvlist.append(l_header)
@@ -274,11 +325,11 @@
for testcase in collectDataObj.testData:
# Functional Area: Suite Name
# Test Name: Test Case Name
- l_func_area = str(testcase.parent).split(' ', 1)[1]
+ l_func_area = str(testcase.parent).split(" ", 1)[1]
l_test_name = str(testcase)
# Test Result pass=0 fail=1
- if testcase.status == 'PASS':
+ if testcase.status == "PASS":
l_test_result = 0
else:
l_test_result = 1
@@ -289,18 +340,36 @@
# Data Sequence: test_start,test_end,subsys,test_type,
# test_result,test_name,pse_rel,driver,
# env,proc,platform_type,test_func_area,
- l_data = [l_stime, l_etime, subsystem, l_test_type, l_test_result,
- l_test_name, l_pse_rel, l_driver, l_env, l_proc,
- l_platform_type, l_func_area]
+ l_data = [
+ l_stime,
+ l_etime,
+ subsystem,
+ l_test_type,
+ l_test_result,
+ l_test_name,
+ l_pse_rel,
+ l_driver,
+ l_env,
+ l_proc,
+ l_platform_type,
+ l_func_area,
+ ]
l_csvlist.append(l_data)
# Open the file and write to the CSV file
l_file = open(l_csvfile, "w")
- l_writer = csv.writer(l_file, lineterminator='\n')
+ l_writer = csv.writer(l_file, lineterminator="\n")
l_writer.writerows(l_csvlist)
l_file.close()
# Set file permissions 666.
- perm = stat.S_IRUSR + stat.S_IWUSR + stat.S_IRGRP + stat.S_IWGRP + stat.S_IROTH + stat.S_IWOTH
+ perm = (
+ stat.S_IRUSR
+ + stat.S_IWUSR
+ + stat.S_IRGRP
+ + stat.S_IWGRP
+ + stat.S_IROTH
+ + stat.S_IWOTH
+ )
os.chmod(l_csvfile, perm)
@@ -336,19 +405,19 @@
bmc_version_id = ""
bmc_platform = ""
- with open(xml_file_path, 'rt') as output:
+ with open(xml_file_path, "rt") as output:
tree = ElementTree.parse(output)
- for node in tree.iter('msg'):
+ for node in tree.iter("msg"):
# /etc/os-release output is logged in the XML as msg
# Example: ${output} = VERSION_ID="v1.99.2-71-gbc49f79"
- if '${output} = VERSION_ID=' in node.text:
+ if "${output} = VERSION_ID=" in node.text:
# Get BMC version (e.g. v1.99.1-96-g2a46570)
bmc_version_id = str(node.text.split("VERSION_ID=")[1])[1:-1]
# Platform is logged in the XML as msg.
# Example: ${bmc_model} = Witherspoon BMC
- if '${bmc_model} = ' in node.text:
+ if "${bmc_model} = " in node.text:
bmc_platform = node.text.split(" = ")[1]
print_vars(bmc_version_id, bmc_platform)
@@ -356,7 +425,6 @@
def main():
-
if not gen_get_options(parser, stock_list):
return False
@@ -365,8 +433,9 @@
qprint_pgm_header()
- parse_output_xml(source, dest, version_id, platform, level,
- test_phase, processor)
+ parse_output_xml(
+ source, dest, version_id, platform, level, test_phase, processor
+ )
return True
diff --git a/tools/github_issues_to_csv b/tools/github_issues_to_csv
index c287f7c..ce6e1e2 100644
--- a/tools/github_issues_to_csv
+++ b/tools/github_issues_to_csv
@@ -8,10 +8,11 @@
import argparse
import csv
import getpass
+
import requests
auth = None
-states = 'all'
+states = "all"
def write_issues(response, csv_out):
@@ -22,36 +23,45 @@
if response.status_code != 200:
raise Exception(response.status_code)
for issue in response.json():
- if 'pull_request' not in issue:
- labels = ', '.join([lable['name'] for lable in issue['labels']])
+ if "pull_request" not in issue:
+ labels = ", ".join([lable["name"] for lable in issue["labels"]])
# Below lines to overcome "TypeError: 'NoneType' object has
# no attribute '__getitem__'"
- close_date = issue.get('closed_at')
+ close_date = issue.get("closed_at")
if close_date:
- close_date = issue.get('closed_at').split('T')[0]
+ close_date = issue.get("closed_at").split("T")[0]
- assignee_resp = issue.get('assignees', 'Not Assigned')
+ assignee_resp = issue.get("assignees", "Not Assigned")
if assignee_resp:
- owners = ','.join([assignee_login['login'] for
- assignee_login in assignee_resp])
+ owners = ",".join(
+ [
+ assignee_login["login"]
+ for assignee_login in assignee_resp
+ ]
+ )
else:
owners = "Not Assigned"
- milestone_resp = issue.get('milestone', 'Not Assigned')
+ milestone_resp = issue.get("milestone", "Not Assigned")
if milestone_resp:
- milestone_resp = milestone_resp['title'].encode('utf-8')
+ milestone_resp = milestone_resp["title"].encode("utf-8")
# Change the following line to write out additional fields
- csv_out.writerow([labels.encode('utf-8'),
- issue.get('title').encode('utf-8'),
- issue.get('state').encode('utf-8'),
- issue.get('created_at').split('T')[0],
- close_date,
- issue.get('html_url').encode('utf-8'),
- issue.get('user').get('login').encode('utf-8'),
- owners, milestone_resp])
+ csv_out.writerow(
+ [
+ labels.encode("utf-8"),
+ issue.get("title").encode("utf-8"),
+ issue.get("state").encode("utf-8"),
+ issue.get("created_at").split("T")[0],
+ close_date,
+ issue.get("html_url").encode("utf-8"),
+ issue.get("user").get("login").encode("utf-8"),
+ owners,
+ milestone_resp,
+ ]
+ )
def get_issues_from_github_to_csv(name, response):
@@ -65,36 +75,49 @@
print(states)
# Multiple requests are required if response is paged
- if 'link' in response.headers:
- pages = {rel[6:-1]: url[url.index('<') + 1:-1] for url, rel in
- (link.split(';') for link in
- response.headers['link'].split(','))}
- while 'last' in pages and 'next' in pages:
- pages = {rel[6:-1]: url[url.index('<') + 1:-1] for url, rel in
- (link.split(';') for link in
- response.headers['link'].split(','))}
- response = requests.get(pages['next'], auth=auth)
+ if "link" in response.headers:
+ pages = {
+ rel[6:-1]: url[url.index("<") + 1 : -1]
+ for url, rel in (
+ link.split(";") for link in response.headers["link"].split(",")
+ )
+ }
+ while "last" in pages and "next" in pages:
+ pages = {
+ rel[6:-1]: url[url.index("<") + 1 : -1]
+ for url, rel in (
+ link.split(";")
+ for link in response.headers["link"].split(",")
+ )
+ }
+ response = requests.get(pages["next"], auth=auth)
write_issues(response, csv_out)
- if pages['next'] == pages['last']:
+ if pages["next"] == pages["last"]:
break
-parser = argparse.ArgumentParser(description="Write GitHub repository issues "
- "to CSV file.")
+parser = argparse.ArgumentParser(
+ description="Write GitHub repository issues to CSV file."
+)
-parser.add_argument('username', nargs='?', help="GitHub user name, "
- "formatted as 'username'")
+parser.add_argument(
+ "username", nargs="?", help="GitHub user name, formatted as 'username'"
+)
-parser.add_argument('repositories', nargs='+', help="Repository names, "
- "formatted as 'basereponame/repo'")
+parser.add_argument(
+ "repositories",
+ nargs="+",
+ help="Repository names, formatted as 'basereponame/repo'",
+)
-parser.add_argument('--all', action='store_true', help="Returns both open "
- "and closed issues.")
+parser.add_argument(
+ "--all", action="store_true", help="Returns both open and closed issues."
+)
args = parser.parse_args()
if args.all:
- state = 'all'
+ state = "all"
username = args.username
@@ -105,16 +128,26 @@
# To set the csv filename
csvfilename = ""
for repository in args.repositories:
- csvfilename_temp = '{}'.format(repository.replace('/', '-'))
+ csvfilename_temp = "{}".format(repository.replace("/", "-"))
csvfilename = csvfilename + csvfilename_temp
-csvfilename = csvfilename + '-issues.csv'
-with open(csvfilename, 'w') as csvfileout:
+csvfilename = csvfilename + "-issues.csv"
+with open(csvfilename, "w") as csvfileout:
csv_out = csv.writer(csvfileout)
- csv_out.writerow(['Labels', 'Title', 'State', 'Open Date',
- 'Close Date', 'URL', 'Author', 'Assignees',
- 'Milestone'])
+ csv_out.writerow(
+ [
+ "Labels",
+ "Title",
+ "State",
+ "Open Date",
+ "Close Date",
+ "URL",
+ "Author",
+ "Assignees",
+ "Milestone",
+ ]
+ )
for repository in args.repositories:
- l_url = 'https://api.github.com/repos/{}/issues?state={}'
+ l_url = "https://api.github.com/repos/{}/issues?state={}"
l_url = l_url.format(repository, states)
response = requests.get(l_url, auth=auth)
write_issues(response, csv_out)