Revert "black: re-format"

This reverts commit 5731818de0ce446ceaafc7e75ae39da1b69942ae.

Signed-off-by: George Keishing <gkeishin@in.ibm.com>
Change-Id: Ie61cdc8c7f2825b0d9d66be87a6a3a058de2b372
diff --git a/lib/bmc_network_utils.py b/lib/bmc_network_utils.py
index 82b10fd..9a4a1dc 100644
--- a/lib/bmc_network_utils.py
+++ b/lib/bmc_network_utils.py
@@ -5,19 +5,18 @@
 
 """
 
-import collections
-import ipaddress
-import json
-import re
-import socket
-import subprocess
-
-import bmc_ssh_utils as bsu
+import gen_print as gp
 import gen_cmd as gc
 import gen_misc as gm
-import gen_print as gp
 import var_funcs as vf
+import collections
+import re
+import ipaddress
+import subprocess
+import socket
 from robot.libraries.BuiltIn import BuiltIn
+import json
+import bmc_ssh_utils as bsu
 
 ip_regex = r"\d{1,3}.\d{1,3}.\d{1,3}.\d{1,3}"
 
@@ -29,7 +28,7 @@
     """
 
     ip_list = list()
-    stdout = subprocess.check_output(["hostname", "--all-fqdns"], shell=True)
+    stdout = subprocess.check_output(['hostname', '--all-fqdns'], shell=True)
     host_fqdns = stdout.decode("utf-8").strip()
     ip_address = socket.gethostbyname(str(host_fqdns))
     ip_list.append(ip_address)
@@ -47,7 +46,7 @@
     """
 
     # IP address netmask format: '0.0.0.0/255.255.252.0'
-    return ipaddress.ip_network("0.0.0.0/" + netmask).prefixlen
+    return ipaddress.ip_network('0.0.0.0/' + netmask).prefixlen
 
 
 def get_netmask_address(prefix_len):
@@ -59,7 +58,7 @@
     """
 
     # IP address netmask format: '0.0.0.0/24'
-    return ipaddress.ip_network("0.0.0.0/" + prefix_len).netmask
+    return ipaddress.ip_network('0.0.0.0/' + prefix_len).netmask
 
 
 def parse_nping_output(output):
@@ -102,26 +101,20 @@
 
     lines = output.split("\n")
     # Obtain only the lines of interest.
-    lines = list(
-        filter(
-            lambda x: re.match(r"(Max rtt|Raw packets|TCP connection)", x),
-            lines,
-        )
-    )
+    lines = list(filter(lambda x: re.match(r"(Max rtt|Raw packets|TCP connection)", x),
+                        lines))
 
     key_value_list = []
     for line in lines:
         key_value_list += line.split("|")
     nping_result = vf.key_value_list_to_dict(key_value_list)
     # Extract percent_lost/percent_failed value from lost/failed field.
-    if "lost" in nping_result:
-        nping_result["percent_lost"] = float(
-            nping_result["lost"].split(" ")[-1].strip("()%")
-        )
+    if 'lost' in nping_result:
+        nping_result['percent_lost'] = \
+            float(nping_result['lost'].split(" ")[-1].strip("()%"))
     else:
-        nping_result["percent_failed"] = float(
-            nping_result["failed"].split(" ")[-1].strip("()%")
-        )
+        nping_result['percent_failed'] = \
+            float(nping_result['failed'].split(" ")[-1].strip("()%"))
     return nping_result
 
 
@@ -165,7 +158,7 @@
                                     command.  Do a 'man nping' for details.
     """
 
-    command_string = gc.create_command_string("nping", host, options)
+    command_string = gc.create_command_string('nping', host, options)
     rc, output = gc.shell_cmd(command_string, print_output=0, ignore_err=0)
     if parse_results:
         return parse_nping_output(output)
@@ -212,9 +205,7 @@
     (etc.)
     """
 
-    stdout, stderr, rc = bsu.bmc_execute_command(
-        "cat /usr/share/ipmi-providers/channel_config.json"
-    )
+    stdout, stderr, rc = bsu.bmc_execute_command("cat /usr/share/ipmi-providers/channel_config.json")
     return json.loads(stdout)
 
 
@@ -224,11 +215,7 @@
      this function.
     """
 
-    return vf.filter_struct(
-        get_channel_config(),
-        "[('medium_type', 'other-lan|lan-802.3')]",
-        regex=1,
-    )
+    return vf.filter_struct(get_channel_config(), "[('medium_type', 'other-lan|lan-802.3')]", regex=1)
 
 
 def get_channel_access_config(file_name):
diff --git a/lib/bmc_redfish.py b/lib/bmc_redfish.py
index 0e97af7..2ae405a 100644
--- a/lib/bmc_redfish.py
+++ b/lib/bmc_redfish.py
@@ -4,16 +4,17 @@
 See class prolog below for details.
 """
 
-import json
-import re
 import sys
+import re
+import json
+from redfish_plus import redfish_plus
+from robot.libraries.BuiltIn import BuiltIn
 from json.decoder import JSONDecodeError
+from redfish.rest.v1 import InvalidCredentialsError
 
 import func_args as fa
 import gen_print as gp
-from redfish.rest.v1 import InvalidCredentialsError
-from redfish_plus import redfish_plus
-from robot.libraries.BuiltIn import BuiltIn
+
 
 MTLS_ENABLED = BuiltIn().get_variable_value("${MTLS_ENABLED}")
 
@@ -43,7 +44,7 @@
         """
         self.__inited__ = False
         try:
-            if MTLS_ENABLED == "True":
+            if MTLS_ENABLED == 'True':
                 self.__inited__ = True
             else:
                 super(bmc_redfish, self).__init__(*args, **kwargs)
@@ -69,7 +70,7 @@
         kwargs                      See parent class method prolog for details.
         """
 
-        if MTLS_ENABLED == "True":
+        if MTLS_ENABLED == 'True':
             return None
         if not self.__inited__:
             message = "bmc_redfish.__init__() was never successfully run.  It "
@@ -81,12 +82,11 @@
         openbmc_password = BuiltIn().get_variable_value("${OPENBMC_PASSWORD}")
         username, args, kwargs = fa.pop_arg(openbmc_username, *args, **kwargs)
         password, args, kwargs = fa.pop_arg(openbmc_password, *args, **kwargs)
-        auth, args, kwargs = fa.pop_arg("session", *args, **kwargs)
+        auth, args, kwargs = fa.pop_arg('session', *args, **kwargs)
 
         try:
-            super(bmc_redfish, self).login(
-                username, password, auth, *args, **kwargs
-            )
+            super(bmc_redfish, self).login(username, password, auth,
+                                           *args, **kwargs)
         # Handle InvalidCredentialsError.
         # (raise redfish.rest.v1.InvalidCredentialsError if not [200, 201, 202, 204])
         except InvalidCredentialsError:
@@ -96,9 +96,8 @@
             e_message = "Re-try login due to exception and "
             e_message += "it is likely error response from server side."
             BuiltIn().log_to_console(e_message)
-            super(bmc_redfish, self).login(
-                username, password, auth, *args, **kwargs
-            )
+            super(bmc_redfish, self).login(username, password, auth,
+                                           *args, **kwargs)
         # Handle JSONDecodeError and others.
         except JSONDecodeError:
             except_type, except_value, except_traceback = sys.exc_info()
@@ -107,9 +106,8 @@
             e_message = "Re-try login due to JSONDecodeError exception and "
             e_message += "it is likely error response from server side."
             BuiltIn().log_to_console(e_message)
-            super(bmc_redfish, self).login(
-                username, password, auth, *args, **kwargs
-            )
+            super(bmc_redfish, self).login(username, password, auth,
+                                           *args, **kwargs)
         except ValueError:
             except_type, except_value, except_traceback = sys.exc_info()
             BuiltIn().log_to_console(str(except_type))
@@ -118,7 +116,8 @@
             BuiltIn().log_to_console(e_message)
 
     def logout(self):
-        if MTLS_ENABLED == "True":
+
+        if MTLS_ENABLED == 'True':
             return None
         else:
             super(bmc_redfish, self).logout()
@@ -151,7 +150,7 @@
         """
 
         resp = self.get(*args, **kwargs)
-        return resp.dict if hasattr(resp, "dict") else {}
+        return resp.dict if hasattr(resp, 'dict') else {}
 
     def get_attribute(self, path, attribute, default=None, *args, **kwargs):
         r"""
@@ -184,9 +183,8 @@
         kwargs                      See parent class get() prolog for details.
         """
 
-        return self.get_properties(path, *args, **kwargs).get(
-            attribute, default
-        )
+        return self.get_properties(path, *args, **kwargs).get(attribute,
+                                                              default)
 
     def get_session_info(self):
         r"""
@@ -196,9 +194,7 @@
 
         return self.get_session_key(), self.get_session_location()
 
-    def enumerate(
-        self, resource_path, return_json=1, include_dead_resources=False
-    ):
+    def enumerate(self, resource_path, return_json=1, include_dead_resources=False):
         r"""
         Perform a GET enumerate request and return available resource paths.
 
@@ -228,61 +224,38 @@
                 # Example: '/redfish/v1/JsonSchemas/' and sub resources.
                 #          '/redfish/v1/SessionService'
                 #          '/redfish/v1/Managers/bmc#/Oem'
-                if (
-                    ("JsonSchemas" in resource)
-                    or ("SessionService" in resource)
-                    or ("#" in resource)
-                ):
+                if ('JsonSchemas' in resource) or ('SessionService' in resource) or ('#' in resource):
                     continue
 
-                self._rest_response_ = self.get(
-                    resource, valid_status_codes=[200, 404, 500]
-                )
+                self._rest_response_ = self.get(resource, valid_status_codes=[200, 404, 500])
                 # Enumeration is done for available resources ignoring the ones for which response is not
                 # obtained.
                 if self._rest_response_.status != 200:
                     if include_dead_resources:
                         try:
-                            dead_resources[self._rest_response_.status].append(
-                                resource
-                            )
+                            dead_resources[self._rest_response_.status].append(resource)
                         except KeyError:
-                            dead_resources[self._rest_response_.status] = [
-                                resource
-                            ]
+                            dead_resources[self._rest_response_.status] = [resource]
                     continue
                 self.walk_nested_dict(self._rest_response_.dict, url=resource)
 
             enumerated_resources.update(set(resources_to_be_enumerated))
-            resources_to_be_enumerated = tuple(
-                self.__pending_enumeration - enumerated_resources
-            )
+            resources_to_be_enumerated = tuple(self.__pending_enumeration - enumerated_resources)
 
         if return_json:
             if include_dead_resources:
-                return (
-                    json.dumps(
-                        self.__result,
-                        sort_keys=True,
-                        indent=4,
-                        separators=(",", ": "),
-                    ),
-                    dead_resources,
-                )
+                return json.dumps(self.__result, sort_keys=True,
+                                  indent=4, separators=(',', ': ')), dead_resources
             else:
-                return json.dumps(
-                    self.__result,
-                    sort_keys=True,
-                    indent=4,
-                    separators=(",", ": "),
-                )
+                return json.dumps(self.__result, sort_keys=True,
+                                  indent=4, separators=(',', ': '))
         else:
             if include_dead_resources:
                 return self.__result, dead_resources
             else:
                 return self.__result
 
-    def walk_nested_dict(self, data, url=""):
+    def walk_nested_dict(self, data, url=''):
         r"""
         Parse through the nested dictionary and get the resource id paths.
 
@@ -290,22 +263,21 @@
         data                        Nested dictionary data from response message.
         url                         Resource for which the response is obtained in data.
         """
-        url = url.rstrip("/")
+        url = url.rstrip('/')
 
         for key, value in data.items():
+
             # Recursion if nested dictionary found.
             if isinstance(value, dict):
                 self.walk_nested_dict(value)
             else:
                 # Value contains a list of dictionaries having member data.
-                if "Members" == key:
+                if 'Members' == key:
                     if isinstance(value, list):
                         for memberDict in value:
-                            self.__pending_enumeration.add(
-                                memberDict["@odata.id"]
-                            )
-                if "@odata.id" == key:
-                    value = value.rstrip("/")
+                            self.__pending_enumeration.add(memberDict['@odata.id'])
+                if '@odata.id' == key:
+                    value = value.rstrip('/')
                     # Data for the given url.
                     if value == url:
                         self.__result[url] = data
@@ -351,9 +323,7 @@
         """
 
         member_list = []
-        self._rest_response_ = self.get(
-            resource_path, valid_status_codes=[200]
-        )
+        self._rest_response_ = self.get(resource_path, valid_status_codes=[200])
 
         try:
             for member in self._rest_response_.dict["Members"]:
@@ -364,7 +334,7 @@
 
         # Filter elements in the list and return matched elements.
         if filter is not None:
-            regex = ".*/" + filter + "[^/]*$"
+            regex = '.*/' + filter + '[^/]*$'
             return [x for x in member_list if re.match(regex, x)]
 
         return member_list
diff --git a/lib/bmc_redfish_utils.py b/lib/bmc_redfish_utils.py
index e93eee3..8eade5f 100644
--- a/lib/bmc_redfish_utils.py
+++ b/lib/bmc_redfish_utils.py
@@ -6,15 +6,15 @@
 
 import json
 import re
-
-import gen_print as gp
 from robot.libraries.BuiltIn import BuiltIn
+import gen_print as gp
 
 MTLS_ENABLED = BuiltIn().get_variable_value("${MTLS_ENABLED}")
 
 
 class bmc_redfish_utils(object):
-    ROBOT_LIBRARY_SCOPE = "TEST SUITE"
+
+    ROBOT_LIBRARY_SCOPE = 'TEST SUITE'
 
     def __init__(self):
         r"""
@@ -22,25 +22,22 @@
         """
         # Obtain a reference to the global redfish object.
         self.__inited__ = False
-        self._redfish_ = BuiltIn().get_library_instance("redfish")
+        self._redfish_ = BuiltIn().get_library_instance('redfish')
 
-        if MTLS_ENABLED == "True":
+        if MTLS_ENABLED == 'True':
             self.__inited__ = True
         else:
             # There is a possibility that a given driver support both redfish and
             # legacy REST.
             self._redfish_.login()
-            self._rest_response_ = self._redfish_.get(
-                "/xyz/openbmc_project/", valid_status_codes=[200, 404]
-            )
+            self._rest_response_ = \
+                self._redfish_.get("/xyz/openbmc_project/", valid_status_codes=[200, 404])
 
             # If REST URL /xyz/openbmc_project/ is supported.
             if self._rest_response_.status == 200:
                 self.__inited__ = True
 
-        BuiltIn().set_global_variable(
-            "${REDFISH_REST_SUPPORTED}", self.__inited__
-        )
+        BuiltIn().set_global_variable("${REDFISH_REST_SUPPORTED}", self.__inited__)
 
     def get_redfish_session_info(self):
         r"""
@@ -53,7 +50,7 @@
         """
         session_dict = {
             "key": self._redfish_.get_session_key(),
-            "location": self._redfish_.get_session_location(),
+            "location": self._redfish_.get_session_location()
         }
         return session_dict
 
@@ -118,11 +115,9 @@
 
             # Iterate and check if path object has the attribute.
             for child_path_idx in child_path_list:
-                if (
-                    ("JsonSchemas" in child_path_idx)
-                    or ("SessionService" in child_path_idx)
-                    or ("#" in child_path_idx)
-                ):
+                if ('JsonSchemas' in child_path_idx)\
+                        or ('SessionService' in child_path_idx)\
+                        or ('#' in child_path_idx):
                     continue
                 if self.get_attribute(child_path_idx, attribute):
                     valid_path_list.append(child_path_idx)
@@ -185,7 +180,7 @@
         # Return the matching target URL entry.
         for target in target_list:
             # target "/redfish/v1/Systems/system/Actions/ComputerSystem.Reset"
-            attribute_in_uri = target.rsplit("/", 1)[-1]
+            attribute_in_uri = target.rsplit('/', 1)[-1]
             # attribute_in_uri "ComputerSystem.Reset"
             if target_attribute == attribute_in_uri:
                 return target
@@ -231,9 +226,9 @@
         # Set quiet variable to keep subordinate get() calls quiet.
         quiet = 1
         self.__pending_enumeration = set()
-        self._rest_response_ = self._redfish_.get(
-            resource_path, valid_status_codes=[200, 404, 500]
-        )
+        self._rest_response_ = \
+            self._redfish_.get(resource_path,
+                               valid_status_codes=[200, 404, 500])
 
         # Return empty list.
         if self._rest_response_.status != 200:
@@ -242,18 +237,17 @@
         if not self.__pending_enumeration:
             return resource_path
         for resource in self.__pending_enumeration.copy():
-            self._rest_response_ = self._redfish_.get(
-                resource, valid_status_codes=[200, 404, 500]
-            )
+            self._rest_response_ = \
+                self._redfish_.get(resource,
+                                   valid_status_codes=[200, 404, 500])
 
             if self._rest_response_.status != 200:
                 continue
             self.walk_nested_dict(self._rest_response_.dict)
         return list(sorted(self.__pending_enumeration))
 
-    def enumerate_request(
-        self, resource_path, return_json=1, include_dead_resources=False
-    ):
+    def enumerate_request(self, resource_path, return_json=1,
+                          include_dead_resources=False):
         r"""
         Perform a GET enumerate request and return available resource paths.
 
@@ -298,91 +292,71 @@
                 # Example: '/redfish/v1/JsonSchemas/' and sub resources.
                 #          '/redfish/v1/SessionService'
                 #          '/redfish/v1/Managers/bmc#/Oem'
-                if (
-                    ("JsonSchemas" in resource)
-                    or ("SessionService" in resource)
-                    or ("PostCodes" in resource)
-                    or ("Registries" in resource)
-                    or ("Journal" in resource)
-                    or ("#" in resource)
-                ):
+                if ('JsonSchemas' in resource) or ('SessionService' in resource)\
+                        or ('PostCodes' in resource) or ('Registries' in resource)\
+                        or ('Journal' in resource)\
+                        or ('#' in resource):
                     continue
 
-                self._rest_response_ = self._redfish_.get(
-                    resource, valid_status_codes=[200, 404, 405, 500]
-                )
+                self._rest_response_ = \
+                    self._redfish_.get(resource, valid_status_codes=[200, 404, 405, 500])
                 # Enumeration is done for available resources ignoring the
                 # ones for which response is not obtained.
                 if self._rest_response_.status != 200:
                     if include_dead_resources:
                         try:
                             dead_resources[self._rest_response_.status].append(
-                                resource
-                            )
+                                resource)
                         except KeyError:
-                            dead_resources[self._rest_response_.status] = [
-                                resource
-                            ]
+                            dead_resources[self._rest_response_.status] = \
+                                [resource]
                     continue
 
                 self.walk_nested_dict(self._rest_response_.dict, url=resource)
 
             enumerated_resources.update(set(resources_to_be_enumerated))
-            resources_to_be_enumerated = tuple(
-                self.__pending_enumeration - enumerated_resources
-            )
+            resources_to_be_enumerated = \
+                tuple(self.__pending_enumeration - enumerated_resources)
 
         if return_json:
             if include_dead_resources:
-                return (
-                    json.dumps(
-                        self.__result,
-                        sort_keys=True,
-                        indent=4,
-                        separators=(",", ": "),
-                    ),
-                    dead_resources,
-                )
+                return json.dumps(self.__result, sort_keys=True,
+                                  indent=4, separators=(',', ': ')), dead_resources
             else:
-                return json.dumps(
-                    self.__result,
-                    sort_keys=True,
-                    indent=4,
-                    separators=(",", ": "),
-                )
+                return json.dumps(self.__result, sort_keys=True,
+                                  indent=4, separators=(',', ': '))
         else:
             if include_dead_resources:
                 return self.__result, dead_resources
             else:
                 return self.__result
 
-    def walk_nested_dict(self, data, url=""):
+    def walk_nested_dict(self, data, url=''):
         r"""
         Parse through the nested dictionary and get the resource id paths.
         Description of argument(s):
         data    Nested dictionary data from response message.
         url     Resource for which the response is obtained in data.
         """
-        url = url.rstrip("/")
+        url = url.rstrip('/')
 
         for key, value in data.items():
+
             # Recursion if nested dictionary found.
             if isinstance(value, dict):
                 self.walk_nested_dict(value)
             else:
                 # Value contains a list of dictionaries having member data.
-                if "Members" == key:
+                if 'Members' == key:
                     if isinstance(value, list):
                         for memberDict in value:
                             if isinstance(memberDict, str):
                                 self.__pending_enumeration.add(memberDict)
                             else:
-                                self.__pending_enumeration.add(
-                                    memberDict["@odata.id"]
-                                )
+                                self.__pending_enumeration.add(memberDict['@odata.id'])
 
-                if "@odata.id" == key:
-                    value = value.rstrip("/")
+                if '@odata.id' == key:
+                    value = value.rstrip('/')
                     # Data for the given url.
                     if value == url:
                         self.__result[url] = data
diff --git a/lib/bmc_ssh_utils.py b/lib/bmc_ssh_utils.py
index b9ead59..fdd376f 100755
--- a/lib/bmc_ssh_utils.py
+++ b/lib/bmc_ssh_utils.py
@@ -5,22 +5,19 @@
 """
 
 import os
-
-import gen_robot_ssh as grs
 import gen_valid as gv
+import gen_robot_ssh as grs
 from robot.libraries.BuiltIn import BuiltIn
 
 
-def bmc_execute_command(
-    cmd_buf,
-    print_out=0,
-    print_err=0,
-    ignore_err=0,
-    fork=0,
-    quiet=None,
-    test_mode=None,
-    time_out=None,
-):
+def bmc_execute_command(cmd_buf,
+                        print_out=0,
+                        print_err=0,
+                        ignore_err=0,
+                        fork=0,
+                        quiet=None,
+                        test_mode=None,
+                        time_out=None):
     r"""
     Run the given command in an BMC SSH session and return the stdout, stderr and the return code.
 
@@ -48,12 +45,10 @@
     # Get global BMC variable values.
     openbmc_host = BuiltIn().get_variable_value("${OPENBMC_HOST}", default="")
     ssh_port = BuiltIn().get_variable_value("${SSH_PORT}", default="22")
-    openbmc_username = BuiltIn().get_variable_value(
-        "${OPENBMC_USERNAME}", default=""
-    )
-    openbmc_password = BuiltIn().get_variable_value(
-        "${OPENBMC_PASSWORD}", default=""
-    )
+    openbmc_username = BuiltIn().get_variable_value("${OPENBMC_USERNAME}",
+                                                    default="")
+    openbmc_password = BuiltIn().get_variable_value("${OPENBMC_PASSWORD}",
+                                                    default="")
 
     if not gv.valid_value(openbmc_host):
         return "", "", 1
@@ -64,47 +59,30 @@
     if not gv.valid_value(ssh_port):
         return "", "", 1
 
-    open_connection_args = {
-        "host": openbmc_host,
-        "alias": "bmc_connection",
-        "timeout": "25.0",
-        "prompt": "# ",
-        "port": ssh_port,
-    }
-    login_args = {"username": openbmc_username, "password": openbmc_password}
+    open_connection_args = {'host': openbmc_host, 'alias': 'bmc_connection',
+                            'timeout': '25.0', 'prompt': '# ', 'port': ssh_port}
+    login_args = {'username': openbmc_username, 'password': openbmc_password}
 
-    openbmc_user_type = os.environ.get(
-        "USER_TYPE", ""
-    ) or BuiltIn().get_variable_value("${USER_TYPE}", default="")
-    if openbmc_user_type == "sudo":
-        cmd_buf = "sudo -i " + cmd_buf
-    return grs.execute_ssh_command(
-        cmd_buf,
-        open_connection_args,
-        login_args,
-        print_out,
-        print_err,
-        ignore_err,
-        fork,
-        quiet,
-        test_mode,
-        time_out,
-    )
+    openbmc_user_type = os.environ.get('USER_TYPE', "") or \
+        BuiltIn().get_variable_value("${USER_TYPE}", default="")
+    if openbmc_user_type == 'sudo':
+        cmd_buf = 'sudo -i ' + cmd_buf
+    return grs.execute_ssh_command(cmd_buf, open_connection_args, login_args,
+                                   print_out, print_err, ignore_err, fork,
+                                   quiet, test_mode, time_out)
 
 
-def os_execute_command(
-    cmd_buf,
-    print_out=0,
-    print_err=0,
-    ignore_err=0,
-    fork=0,
-    quiet=None,
-    test_mode=None,
-    time_out=None,
-    os_host="",
-    os_username="",
-    os_password="",
-):
+def os_execute_command(cmd_buf,
+                       print_out=0,
+                       print_err=0,
+                       ignore_err=0,
+                       fork=0,
+                       quiet=None,
+                       test_mode=None,
+                       time_out=None,
+                       os_host="",
+                       os_username="",
+                       os_password=""):
     r"""
     Run the given command in an OS SSH session and return the stdout, stderr and the return code.
 
@@ -133,13 +111,9 @@
     if os_host == "":
         os_host = BuiltIn().get_variable_value("${OS_HOST}", default="")
     if os_username == "":
-        os_username = BuiltIn().get_variable_value(
-            "${OS_USERNAME}", default=""
-        )
+        os_username = BuiltIn().get_variable_value("${OS_USERNAME}", default="")
     if os_password == "":
-        os_password = BuiltIn().get_variable_value(
-            "${OS_PASSWORD}", default=""
-        )
+        os_password = BuiltIn().get_variable_value("${OS_PASSWORD}", default="")
 
     if not gv.valid_value(os_host):
         return "", "", 1
@@ -148,32 +122,21 @@
     if not gv.valid_value(os_password):
         return "", "", 1
 
-    open_connection_args = {"host": os_host, "alias": "os_connection"}
-    login_args = {"username": os_username, "password": os_password}
+    open_connection_args = {'host': os_host, 'alias': 'os_connection'}
+    login_args = {'username': os_username, 'password': os_password}
 
-    return grs.execute_ssh_command(
-        cmd_buf,
-        open_connection_args,
-        login_args,
-        print_out,
-        print_err,
-        ignore_err,
-        fork,
-        quiet,
-        test_mode,
-        time_out,
-    )
+    return grs.execute_ssh_command(cmd_buf, open_connection_args, login_args,
+                                   print_out, print_err, ignore_err, fork,
+                                   quiet, test_mode, time_out)
 
 
-def xcat_execute_command(
-    cmd_buf,
-    print_out=0,
-    print_err=0,
-    ignore_err=0,
-    fork=0,
-    quiet=None,
-    test_mode=None,
-):
+def xcat_execute_command(cmd_buf,
+                         print_out=0,
+                         print_err=0,
+                         ignore_err=0,
+                         fork=0,
+                         quiet=None,
+                         test_mode=None):
     r"""
     Run the given command in an XCAT SSH session and return the stdout, stderr and the return code.
 
@@ -198,13 +161,12 @@
 
     # Get global XCAT variable values.
     xcat_host = BuiltIn().get_variable_value("${XCAT_HOST}", default="")
-    xcat_username = BuiltIn().get_variable_value(
-        "${XCAT_USERNAME}", default=""
-    )
-    xcat_password = BuiltIn().get_variable_value(
-        "${XCAT_PASSWORD}", default=""
-    )
-    xcat_port = BuiltIn().get_variable_value("${XCAT_PORT}", default="22")
+    xcat_username = BuiltIn().get_variable_value("${XCAT_USERNAME}",
+                                                 default="")
+    xcat_password = BuiltIn().get_variable_value("${XCAT_PASSWORD}",
+                                                 default="")
+    xcat_port = BuiltIn().get_variable_value("${XCAT_PORT}",
+                                             default="22")
 
     if not gv.valid_value(xcat_host):
         return "", "", 1
@@ -215,27 +177,19 @@
     if not gv.valid_value(xcat_port):
         return "", "", 1
 
-    open_connection_args = {
-        "host": xcat_host,
-        "alias": "xcat_connection",
-        "port": xcat_port,
-    }
-    login_args = {"username": xcat_username, "password": xcat_password}
+    open_connection_args = {'host': xcat_host, 'alias': 'xcat_connection',
+                            'port': xcat_port}
+    login_args = {'username': xcat_username, 'password': xcat_password}
 
-    return grs.execute_ssh_command(
-        cmd_buf,
-        open_connection_args,
-        login_args,
-        print_out,
-        print_err,
-        ignore_err,
-        fork,
-        quiet,
-        test_mode,
-    )
+    return grs.execute_ssh_command(cmd_buf, open_connection_args, login_args,
+                                   print_out, print_err, ignore_err, fork,
+                                   quiet, test_mode)
 
 
-def device_write(cmd_buf, print_out=0, quiet=None, test_mode=None):
+def device_write(cmd_buf,
+                 print_out=0,
+                 quiet=None,
+                 test_mode=None):
     r"""
     Write the given command in a device SSH session and return the stdout, stderr and the return code.
 
@@ -262,13 +216,12 @@
 
     # Get global DEVICE variable values.
     device_host = BuiltIn().get_variable_value("${DEVICE_HOST}", default="")
-    device_username = BuiltIn().get_variable_value(
-        "${DEVICE_USERNAME}", default=""
-    )
-    device_password = BuiltIn().get_variable_value(
-        "${DEVICE_PASSWORD}", default=""
-    )
-    device_port = BuiltIn().get_variable_value("${DEVICE_PORT}", default="22")
+    device_username = BuiltIn().get_variable_value("${DEVICE_USERNAME}",
+                                                   default="")
+    device_password = BuiltIn().get_variable_value("${DEVICE_PASSWORD}",
+                                                   default="")
+    device_port = BuiltIn().get_variable_value("${DEVICE_PORT}",
+                                               default="22")
 
     if not gv.valid_value(device_host):
         return "", "", 1
@@ -279,21 +232,10 @@
     if not gv.valid_value(device_port):
         return "", "", 1
 
-    open_connection_args = {
-        "host": device_host,
-        "alias": "device_connection",
-        "port": device_port,
-    }
-    login_args = {"username": device_username, "password": device_password}
+    open_connection_args = {'host': device_host, 'alias': 'device_connection',
+                            'port': device_port}
+    login_args = {'username': device_username, 'password': device_password}
 
-    return grs.execute_ssh_command(
-        cmd_buf,
-        open_connection_args,
-        login_args,
-        print_out,
-        print_err=0,
-        ignore_err=1,
-        fork=0,
-        quiet=quiet,
-        test_mode=test_mode,
-    )
+    return grs.execute_ssh_command(cmd_buf, open_connection_args, login_args,
+                                   print_out, print_err=0, ignore_err=1,
+                                   fork=0, quiet=quiet, test_mode=test_mode)
diff --git a/lib/boot_data.py b/lib/boot_data.py
index b767e21..eaa7a52 100755
--- a/lib/boot_data.py
+++ b/lib/boot_data.py
@@ -5,40 +5,36 @@
 boot_results_table.
 """
 
-import glob
-import json
 import os
 import tempfile
-
-from robot.libraries.BuiltIn import BuiltIn
+import json
+import glob
 from tally_sheet import *
 
+from robot.libraries.BuiltIn import BuiltIn
 try:
     from robot.utils import DotDict
 except ImportError:
     import collections
 
-import gen_cmd as gc
-import gen_misc as gm
 import gen_print as gp
 import gen_valid as gv
+import gen_misc as gm
+import gen_cmd as gc
 import var_funcs as vf
 
 # The code base directory will be one level up from the directory containing this module.
 code_base_dir_path = os.path.dirname(os.path.dirname(__file__)) + os.sep
 
-redfish_support_trans_state = int(
-    os.environ.get("REDFISH_SUPPORT_TRANS_STATE", 0)
-) or int(
-    BuiltIn().get_variable_value("${REDFISH_SUPPORT_TRANS_STATE}", default=0)
-)
+redfish_support_trans_state = int(os.environ.get('REDFISH_SUPPORT_TRANS_STATE', 0)) or \
+    int(BuiltIn().get_variable_value("${REDFISH_SUPPORT_TRANS_STATE}", default=0))
 
-platform_arch_type = os.environ.get(
-    "PLATFORM_ARCH_TYPE", ""
-) or BuiltIn().get_variable_value("${PLATFORM_ARCH_TYPE}", default="power")
+platform_arch_type = os.environ.get('PLATFORM_ARCH_TYPE', '') or \
+    BuiltIn().get_variable_value("${PLATFORM_ARCH_TYPE}", default="power")
 
 
-def create_boot_table(file_path=None, os_host=""):
+def create_boot_table(file_path=None,
+                      os_host=""):
     r"""
     Read the boot table JSON file, convert it to an object and return it.
 
@@ -58,17 +54,11 @@
     """
     if file_path is None:
         if redfish_support_trans_state and platform_arch_type != "x86":
-            file_path = os.environ.get(
-                "BOOT_TABLE_PATH", "data/boot_table_redfish.json"
-            )
+            file_path = os.environ.get('BOOT_TABLE_PATH', 'data/boot_table_redfish.json')
         elif platform_arch_type == "x86":
-            file_path = os.environ.get(
-                "BOOT_TABLE_PATH", "data/boot_table_x86.json"
-            )
+            file_path = os.environ.get('BOOT_TABLE_PATH', 'data/boot_table_x86.json')
         else:
-            file_path = os.environ.get(
-                "BOOT_TABLE_PATH", "data/boot_table.json"
-            )
+            file_path = os.environ.get('BOOT_TABLE_PATH', 'data/boot_table.json')
 
     if not file_path.startswith("/"):
         file_path = code_base_dir_path + file_path
@@ -87,7 +77,7 @@
     # the boot entries.
     if os_host == "":
         for boot in boot_table:
-            state_keys = ["start", "end"]
+            state_keys = ['start', 'end']
             for state_key in state_keys:
                 for sub_state in list(boot_table[boot][state_key]):
                     if sub_state.startswith("os_"):
@@ -159,7 +149,8 @@
     return boot_lists
 
 
-def valid_boot_list(boot_list, valid_boot_types):
+def valid_boot_list(boot_list,
+                    valid_boot_types):
     r"""
     Verify that each entry in boot_list is a supported boot test.
 
@@ -171,21 +162,24 @@
 
     for boot_name in boot_list:
         boot_name = boot_name.strip(" ")
-        error_message = gv.valid_value(
-            boot_name, valid_values=valid_boot_types, var_name="boot_name"
-        )
+        error_message = gv.valid_value(boot_name,
+                                       valid_values=valid_boot_types,
+                                       var_name="boot_name")
         if error_message != "":
             BuiltIn().fail(gp.sprint_error(error_message))
 
 
 class boot_results:
+
     r"""
     This class defines a boot_results table.
     """
 
-    def __init__(
-        self, boot_table, boot_pass=0, boot_fail=0, obj_name="boot_results"
-    ):
+    def __init__(self,
+                 boot_table,
+                 boot_pass=0,
+                 boot_fail=0,
+                 obj_name='boot_results'):
         r"""
         Initialize the boot results object.
 
@@ -208,13 +202,13 @@
         self.__initial_boot_fail = boot_fail
 
         # Create boot_results_fields for use in creating boot_results table.
-        boot_results_fields = DotDict([("total", 0), ("pass", 0), ("fail", 0)])
+        boot_results_fields = DotDict([('total', 0), ('pass', 0), ('fail', 0)])
         # Create boot_results table.
-        self.__boot_results = tally_sheet(
-            "boot type", boot_results_fields, "boot_test_results"
-        )
-        self.__boot_results.set_sum_fields(["total", "pass", "fail"])
-        self.__boot_results.set_calc_fields(["total=pass+fail"])
+        self.__boot_results = tally_sheet('boot type',
+                                          boot_results_fields,
+                                          'boot_test_results')
+        self.__boot_results.set_sum_fields(['total', 'pass', 'fail'])
+        self.__boot_results.set_calc_fields(['total=pass+fail'])
         # Create one row in the result table for each kind of boot test in the boot_table (i.e. for all
         # supported boot tests).
         for boot_name in list(boot_table.keys()):
@@ -236,12 +230,12 @@
         """
 
         totals_line = self.__boot_results.calc()
-        return (
-            totals_line["pass"] + self.__initial_boot_pass,
-            totals_line["fail"] + self.__initial_boot_fail,
-        )
+        return totals_line['pass'] + self.__initial_boot_pass,\
+            totals_line['fail'] + self.__initial_boot_fail
 
-    def update(self, boot_type, boot_status):
+    def update(self,
+               boot_type,
+               boot_status):
         r"""
         Update our boot_results_table.  This includes:
         - Updating the record for the given boot_type by incrementing the pass or fail field.
@@ -256,7 +250,8 @@
         self.__boot_results.inc_row_field(boot_type, boot_status.lower())
         self.__boot_results.calc()
 
-    def sprint_report(self, header_footer="\n"):
+    def sprint_report(self,
+                      header_footer="\n"):
         r"""
         String-print the formatted boot_resuls_table and return them.
 
@@ -273,7 +268,9 @@
 
         return buffer
 
-    def print_report(self, header_footer="\n", quiet=None):
+    def print_report(self,
+                     header_footer="\n",
+                     quiet=None):
         r"""
         Print the formatted boot_resuls_table to the console.
 
@@ -283,7 +280,7 @@
                                     stack to get the default value.
         """
 
-        quiet = int(gm.dft(quiet, gp.get_stack_var("quiet", 0)))
+        quiet = int(gm.dft(quiet, gp.get_stack_var('quiet', 0)))
 
         gp.qprint(self.sprint_report(header_footer))
 
@@ -310,7 +307,9 @@
         gp.gp_print(self.sprint_obj())
 
 
-def create_boot_results_file_path(pgm_name, openbmc_nickname, master_pid):
+def create_boot_results_file_path(pgm_name,
+                                  openbmc_nickname,
+                                  master_pid):
     r"""
     Create a file path to be used to store a boot_results object.
 
@@ -319,7 +318,7 @@
     openbmc_nickname                The name of the system.  This could be a nickname, a hostname, an IP,
                                     etc.  This will form part of the resulting file name.
     master_pid                      The master process id which will form part of the file name.
-    """
+   """
 
     USER = os.environ.get("USER", "")
     dir_path = "/tmp/" + USER + "/"
@@ -327,9 +326,8 @@
         os.makedirs(dir_path)
 
     file_name_dict = vf.create_var_dict(pgm_name, openbmc_nickname, master_pid)
-    return vf.create_file_path(
-        file_name_dict, dir_path=dir_path, file_suffix=":boot_results"
-    )
+    return vf.create_file_path(file_name_dict, dir_path=dir_path,
+                               file_suffix=":boot_results")
 
 
 def cleanup_boot_results_file():
@@ -343,7 +341,7 @@
     for file_path in file_list:
         # Use parse_file_path to extract info from the file path.
         file_dict = vf.parse_file_path(file_path)
-        if gm.pid_active(file_dict["master_pid"]):
+        if gm.pid_active(file_dict['master_pid']):
             gp.qprint_timen("Preserving " + file_path + ".")
         else:
             gc.cmd_fnc("rm -f " + file_path)
@@ -365,7 +363,7 @@
     boot_history.append(boot_start_message)
 
     # Trim list to max number of entries.
-    del boot_history[: max(0, len(boot_history) - max_boot_history)]
+    del boot_history[:max(0, len(boot_history) - max_boot_history)]
 
 
 def print_boot_history(boot_history, quiet=None):
@@ -377,7 +375,7 @@
                                     stack to get the default value.
     """
 
-    quiet = int(gm.dft(quiet, gp.get_stack_var("quiet", 0)))
+    quiet = int(gm.dft(quiet, gp.get_stack_var('quiet', 0)))
 
     # indent 0, 90 chars wide, linefeed, char is "="
     gp.qprint_dashes(0, 90)
diff --git a/lib/code_update_utils.py b/lib/code_update_utils.py
index 6b44af9..5d69db6 100644
--- a/lib/code_update_utils.py
+++ b/lib/code_update_utils.py
@@ -4,21 +4,22 @@
 This module provides utilities for code updates.
 """
 
-import collections
+from robot.libraries.BuiltIn import BuiltIn
+
+import bmc_ssh_utils as bsu
+import gen_robot_keyword as keyword
+import gen_print as gp
+import variables as var
+
 import os
 import re
 import sys
 import tarfile
 import time
-
-import bmc_ssh_utils as bsu
-import gen_print as gp
-import gen_robot_keyword as keyword
-import variables as var
-from robot.libraries.BuiltIn import BuiltIn
+import collections
 
 robot_pgm_dir_path = os.path.dirname(__file__) + os.sep
-repo_data_path = re.sub("/lib", "/data", robot_pgm_dir_path)
+repo_data_path = re.sub('/lib', '/data', robot_pgm_dir_path)
 sys.path.append(repo_data_path)
 
 
@@ -33,7 +34,7 @@
 
     temp_dict = collections.OrderedDict()
     for key, value in sw_dict.items():
-        if value["image_type"] == image_type:
+        if value['image_type'] == image_type:
             temp_dict[key] = value
         else:
             pass
@@ -50,9 +51,8 @@
     """
 
     taken_priorities = {}
-    _, image_names = keyword.run_key(
-        "Get Software Objects  " + "version_type=" + image_purpose
-    )
+    _, image_names = keyword.run_key("Get Software Objects  "
+                                     + "version_type=" + image_purpose)
 
     for image_name in image_names:
         _, image = keyword.run_key("Get Host Software Property  " + image_name)
@@ -60,10 +60,9 @@
             continue
         image_priority = image["Priority"]
         if image_priority in taken_priorities:
-            BuiltIn().fail(
-                "Found active images with the same priority.\n"
-                + gp.sprint_vars(image, taken_priorities[image_priority])
-            )
+            BuiltIn().fail("Found active images with the same priority.\n"
+                           + gp.sprint_vars(image,
+                                            taken_priorities[image_priority]))
         taken_priorities[image_priority] = image
 
 
@@ -75,22 +74,17 @@
     # Get the version of the image currently running on the BMC.
     _, cur_img_version = keyword.run_key("Get BMC Version")
     # Remove the surrounding double quotes from the version.
-    cur_img_version = cur_img_version.replace('"', "")
+    cur_img_version = cur_img_version.replace('"', '')
 
-    _, images = keyword.run_key(
-        "Read Properties  " + var.SOFTWARE_VERSION_URI + "enumerate"
-    )
+    _, images = keyword.run_key("Read Properties  "
+                                + var.SOFTWARE_VERSION_URI + "enumerate")
 
     for image_name in images:
         _, image_properties = keyword.run_key(
-            "Get Host Software Property  " + image_name
-        )
-        if (
-            "Purpose" in image_properties
-            and "Version" in image_properties
-            and image_properties["Purpose"] != var.VERSION_PURPOSE_HOST
-            and image_properties["Version"] != cur_img_version
-        ):
+            "Get Host Software Property  " + image_name)
+        if 'Purpose' in image_properties and 'Version' in image_properties \
+                and image_properties['Purpose'] != var.VERSION_PURPOSE_HOST \
+                and image_properties['Version'] != cur_img_version:
             return image_name
     BuiltIn().fail("Did not find any non-running BMC images.")
 
@@ -102,16 +96,11 @@
 
     keyword.run_key("Initiate Host PowerOff")
 
-    status, images = keyword.run_key(
-        "Get Software Objects  " + var.VERSION_PURPOSE_HOST
-    )
+    status, images = keyword.run_key("Get Software Objects  "
+                                     + var.VERSION_PURPOSE_HOST)
     for image_name in images:
-        keyword.run_key(
-            "Delete Image And Verify  "
-            + image_name
-            + "  "
-            + var.VERSION_PURPOSE_HOST
-        )
+        keyword.run_key("Delete Image And Verify  " + image_name + "  "
+                        + var.VERSION_PURPOSE_HOST)
 
 
 def wait_for_activation_state_change(version_id, initial_state):
@@ -129,23 +118,22 @@
     retry = 0
     num_read_errors = 0
     read_fail_threshold = 1
-    while retry < 60:
-        status, software_state = keyword.run_key(
-            "Read Properties  " + var.SOFTWARE_VERSION_URI + str(version_id),
-            ignore=1,
-        )
-        if status == "FAIL":
+    while (retry < 60):
+        status, software_state = keyword.run_key("Read Properties  "
+                                                 + var.SOFTWARE_VERSION_URI
+                                                 + str(version_id),
+                                                 ignore=1)
+        if status == 'FAIL':
             num_read_errors += 1
             if num_read_errors > read_fail_threshold:
-                message = "Read errors exceeds threshold:\n " + gp.sprint_vars(
-                    num_read_errors, read_fail_threshold
-                )
+                message = "Read errors exceeds threshold:\n " \
+                    + gp.sprint_vars(num_read_errors, read_fail_threshold)
                 BuiltIn().fail(message)
             time.sleep(10)
             continue
 
         current_state = (software_state)["Activation"]
-        if initial_state == current_state:
+        if (initial_state == current_state):
             time.sleep(10)
             retry += 1
             num_read_errors = 0
@@ -164,12 +152,10 @@
                                     returned to the calling function.
     """
 
-    stdout, stderr, rc = bsu.bmc_execute_command(
-        "cd "
-        + dir_path
-        + "; stat -c '%Y %n' * |"
-        + " sort -k1,1nr | head -n 1"
-    )
+    stdout, stderr, rc = \
+        bsu.bmc_execute_command("cd " + dir_path
+                                + "; stat -c '%Y %n' * |"
+                                + " sort -k1,1nr | head -n 1")
     return stdout.split(" ")[-1]
 
 
@@ -210,9 +196,9 @@
                                     version.
     """
 
-    stdout, stderr, rc = bsu.bmc_execute_command(
-        "cat " + file_path + ' | grep "version="', ignore_err=1
-    )
+    stdout, stderr, rc = \
+        bsu.bmc_execute_command("cat " + file_path
+                                + " | grep \"version=\"", ignore_err=1)
     return (stdout.split("\n")[0]).split("=")[-1]
 
 
@@ -225,9 +211,9 @@
                                     purpose.
     """
 
-    stdout, stderr, rc = bsu.bmc_execute_command(
-        "cat " + file_path + ' | grep "purpose="', ignore_err=1
-    )
+    stdout, stderr, rc = \
+        bsu.bmc_execute_command("cat " + file_path
+                                + " | grep \"purpose=\"", ignore_err=1)
     return stdout.split("=")[-1]
 
 
@@ -243,22 +229,22 @@
                                     one of the images in the upload dir.
     """
 
-    stdout, stderr, rc = bsu.bmc_execute_command(
-        "ls -d " + var.IMAGE_UPLOAD_DIR_PATH + "*/"
-    )
+    stdout, stderr, rc = \
+        bsu.bmc_execute_command("ls -d " + var.IMAGE_UPLOAD_DIR_PATH + "*/")
 
     image_list = stdout.split("\n")
     retry = 0
-    while retry < 10:
+    while (retry < 10):
         for i in range(0, len(image_list)):
             version = get_image_version(image_list[i] + "MANIFEST")
-            if version == image_version:
+            if (version == image_version):
                 return image_list[i]
         time.sleep(10)
         retry += 1
 
 
-def verify_image_upload(image_version, timeout=3):
+def verify_image_upload(image_version,
+                        timeout=3):
     r"""
     Verify the image was uploaded correctly and that it created
     a valid d-bus object. If the first check for the image
@@ -276,22 +262,16 @@
 
     keyword.run_key_u("Open Connection And Log In")
     image_purpose = get_image_purpose(image_path + "MANIFEST")
-    if (
-        image_purpose == var.VERSION_PURPOSE_BMC
-        or image_purpose == var.VERSION_PURPOSE_HOST
-    ):
+    if (image_purpose == var.VERSION_PURPOSE_BMC
+            or image_purpose == var.VERSION_PURPOSE_HOST):
         uri = var.SOFTWARE_VERSION_URI + image_version_id
         ret_values = ""
         for itr in range(timeout * 2):
-            status, ret_values = keyword.run_key(
-                "Read Attribute  " + uri + "  Activation"
-            )
+            status, ret_values = \
+                keyword.run_key("Read Attribute  " + uri + "  Activation")
 
-            if (
-                (ret_values == var.READY)
-                or (ret_values == var.INVALID)
-                or (ret_values == var.ACTIVE)
-            ):
+            if ((ret_values == var.READY) or (ret_values == var.INVALID)
+                    or (ret_values == var.ACTIVE)):
                 return True, image_version_id
             else:
                 time.sleep(30)
@@ -319,16 +299,13 @@
     """
 
     for i in range(timeout * 2):
-        stdout, stderr, rc = bsu.bmc_execute_command(
-            "ls "
-            + var.IMAGE_UPLOAD_DIR_PATH
-            + "*/MANIFEST 2>/dev/null "
-            + '| xargs grep -rl "version='
-            + image_version
-            + '"'
-        )
-        image_dir = os.path.dirname(stdout.split("\n")[0])
-        if "" != image_dir:
-            bsu.bmc_execute_command("rm -rf " + image_dir)
-            BuiltIn().fail("Found invalid BMC Image: " + image_dir)
+        stdout, stderr, rc = \
+            bsu.bmc_execute_command('ls ' + var.IMAGE_UPLOAD_DIR_PATH
+                                    + '*/MANIFEST 2>/dev/null '
+                                    + '| xargs grep -rl "version='
+                                    + image_version + '"')
+        image_dir = os.path.dirname(stdout.split('\n')[0])
+        if '' != image_dir:
+            bsu.bmc_execute_command('rm -rf ' + image_dir)
+            BuiltIn().fail('Found invalid BMC Image: ' + image_dir)
         time.sleep(30)
diff --git a/lib/disable_warning_urllib.py b/lib/disable_warning_urllib.py
index 8527b6d..4c08a24 100644
--- a/lib/disable_warning_urllib.py
+++ b/lib/disable_warning_urllib.py
@@ -1,7 +1,6 @@
 #!/usr/bin/env python3
 import logging
 import warnings
-
 try:
     import httplib
 except ImportError:
@@ -15,6 +14,6 @@
 requests_log.propagate = False
 
 
-class disable_warning_urllib:
+class disable_warning_urllib():
     def do_nothing():
         return
diff --git a/lib/dump_utils.py b/lib/dump_utils.py
index 819d436..7c65df9 100755
--- a/lib/dump_utils.py
+++ b/lib/dump_utils.py
@@ -4,22 +4,21 @@
 This file contains functions which are useful for processing BMC dumps.
 """
 
-import imp
+import gen_print as gp
+import gen_misc as gm
+import gen_robot_keyword as grk
+import bmc_ssh_utils as bsu
+import var_funcs as vf
 import os
 import sys
-
-import bmc_ssh_utils as bsu
-import gen_misc as gm
-import gen_print as gp
-import gen_robot_keyword as grk
-import var_funcs as vf
+import os
+import imp
 import variables as var
+
 from robot.libraries.BuiltIn import BuiltIn
 
-base_path = (
-    os.path.dirname(os.path.dirname(imp.find_module("gen_robot_print")[1]))
-    + os.sep
-)
+base_path = os.path.dirname(os.path.dirname(
+                            imp.find_module("gen_robot_print")[1])) + os.sep
 sys.path.append(base_path + "data/")
 
 
@@ -58,7 +57,9 @@
     return output.split("\n")
 
 
-def valid_dump(dump_id, dump_dict=None, quiet=None):
+def valid_dump(dump_id,
+               dump_dict=None,
+               quiet=None):
     r"""
     Verify that dump_id is a valid.  If it is not valid, issue robot failure
     message.
@@ -80,26 +81,25 @@
         dump_dict = get_dump_dict(quiet=quiet)
 
     if dump_id not in dump_dict:
-        message = (
-            "The specified dump ID was not found among the existing"
+        message = "The specified dump ID was not found among the existing" \
             + " dumps:\n"
-        )
         message += gp.sprint_var(dump_id)
         message += gp.sprint_var(dump_dict)
         BuiltIn().fail(gp.sprint_error(message))
 
     if not dump_dict[dump_id].endswith("tar.xz"):
-        message = (
-            'There is no "tar.xz" file associated with the given'
+        message = "There is no \"tar.xz\" file associated with the given" \
             + " dump_id:\n"
-        )
         message += gp.sprint_var(dump_id)
         dump_file_path = dump_dict[dump_id]
         message += gp.sprint_var(dump_file_path)
         BuiltIn().fail(gp.sprint_error(message))
 
 
-def scp_dumps(targ_dir_path, targ_file_prefix="", dump_dict=None, quiet=None):
+def scp_dumps(targ_dir_path,
+              targ_file_prefix="",
+              dump_dict=None,
+              quiet=None):
     r"""
     SCP all dumps from the BMC to the indicated directory on the local system
     and return a list of the new files.
@@ -126,12 +126,10 @@
 
     dump_file_list = []
     for file_path in dump_list:
-        targ_file_path = (
-            targ_dir_path + targ_file_prefix + os.path.basename(file_path)
-        )
-        status, ret_values = grk.run_key(
-            "scp.Get File  " + file_path + "  " + targ_file_path, quiet=quiet
-        )
+        targ_file_path = targ_dir_path + targ_file_prefix \
+            + os.path.basename(file_path)
+        status, ret_values = grk.run_key("scp.Get File  " + file_path
+                                         + "  " + targ_file_path, quiet=quiet)
         dump_file_list.append(targ_file_path)
 
     return dump_file_list
diff --git a/lib/event_notification.py b/lib/event_notification.py
index 53c010e..363caf4 100755
--- a/lib/event_notification.py
+++ b/lib/event_notification.py
@@ -1,15 +1,14 @@
 #!/usr/bin/env python3
 
-import json
-import ssl
-
-import gen_print as gp
-import gen_valid as gv
 import requests
 import websocket
+import json
+import ssl
+import gen_valid as gv
+import gen_print as gp
 
 
-class event_notification:
+class event_notification():
     r"""
     Main class to subscribe and receive event notifications.
     """
@@ -37,22 +36,20 @@
         r"""
         Login and return session object.
         """
-        http_header = {"Content-Type": "application/json"}
+        http_header = {'Content-Type': 'application/json'}
         session = requests.session()
-        response = session.post(
-            "https://" + self.__host + "/login",
-            headers=http_header,
-            json={"data": [self.__user, self.__password]},
-            verify=False,
-            timeout=30,
-        )
+        response = session.post('https://' + self.__host + '/login',
+                                headers=http_header,
+                                json={"data": [self.__user, self.__password]},
+                                verify=False, timeout=30)
         gv.valid_value(response.status_code, valid_values=[200])
         login_response = json.loads(response.text)
         gp.qprint_var(login_response)
-        gv.valid_value(login_response["status"], valid_values=["ok"])
+        gv.valid_value(login_response['status'], valid_values=['ok'])
         return session
 
     def subscribe(self, dbus_path, enable_trace=False):
+
         r"""
         Subscribe to the given path and return a list of event notifications.
 
@@ -82,21 +79,15 @@
         cookies = session.cookies.get_dict()
         # Convert from dictionary to a string of the following format:
         # key=value;key=value...
-        cookies = gp.sprint_var(
-            cookies,
-            fmt=gp.no_header() | gp.strip_brackets(),
-            col1_width=0,
-            trailing_char="",
-            delim="=",
-        ).replace("\n", ";")
+        cookies = gp.sprint_var(cookies, fmt=gp.no_header() | gp.strip_brackets(),
+                                col1_width=0, trailing_char="",
+                                delim="=").replace("\n", ";")
 
         websocket.enableTrace(enable_trace)
-        self.__websocket = websocket.create_connection(
-            "wss://{host}/subscribe".format(host=self.__host),
-            sslopt={"cert_reqs": ssl.CERT_NONE},
-            cookie=cookies,
-        )
-        dbus_path = [path.strip() for path in dbus_path.split(",")]
+        self.__websocket = websocket.create_connection("wss://{host}/subscribe".format(host=self.__host),
+                                                       sslopt={"cert_reqs": ssl.CERT_NONE},
+                                                       cookie=cookies)
+        dbus_path = [path.strip() for path in dbus_path.split(',')]
         dbus_path = {"paths": dbus_path}
 
         self.__websocket.send(json.dumps(dbus_path))
diff --git a/lib/external_intf/management_console_utils.py b/lib/external_intf/management_console_utils.py
index 5f02732..d754bea 100644
--- a/lib/external_intf/management_console_utils.py
+++ b/lib/external_intf/management_console_utils.py
@@ -1,15 +1,14 @@
 #!/usr/bin/env python3
 
-import json
 import os
 import re
+import json
+from data import variables
 from collections import OrderedDict
 
-from data import variables
-
-bmc_rec_pattern = "^=(.*)\n(.*)\n(.*)\n(.*)\n(.*)"
-bmc_prop_pattern = [r"\w+", r"\d{1,3}.\d{1,3}.\d{1,3}.\d{1,3}", "443"]
-bmc_rec_prop = ["hostname", "address", "port", "txt"]
+bmc_rec_pattern = '^=(.*)\n(.*)\n(.*)\n(.*)\n(.*)'
+bmc_prop_pattern = [r"\w+", r"\d{1,3}.\d{1,3}.\d{1,3}.\d{1,3}", '443']
+bmc_rec_prop = ['hostname', 'address', 'port', 'txt']
 
 
 class Exception(Exception):
@@ -31,7 +30,7 @@
 
     try:
         for bmc_key, bmc_val in bmc_records.items():
-            temp_ip = bmc_val.get("address", None)
+            temp_ip = bmc_val.get('address', None)
             if bmc_ip.strip() == temp_ip.strip():
                 return True
         else:
@@ -40,9 +39,8 @@
         return exc_obj
 
 
-def validate_bmc_properties(
-    bmc_prop_pattern, bmc_prop, bmc_value, bmc_rec_valid
-):
+def validate_bmc_properties(bmc_prop_pattern, bmc_prop, bmc_value,
+                            bmc_rec_valid):
     r"""
     This function is to check pattern match in bmc properties.
 
@@ -54,10 +52,9 @@
     """
 
     try:
-        status = [
-            lambda bmc_prop: re.search(bmc_prop_pattern, bmc_prob),
-            bmc_value,
-        ]
+        status = \
+            [lambda bmc_prop: re.search(bmc_prop_pattern, bmc_prob),
+                bmc_value]
         if None in status:
             bmc_rec_valid[bmc_prop] = None
     except Exception as exc_obj:
@@ -75,17 +72,14 @@
     """
 
     try:
-        for bmc_prop_key, bmc_pattern_val in zip(
-            bmc_rec_prop, bmc_prop_pattern
-        ):
+        for bmc_prop_key, bmc_pattern_val in \
+                zip(bmc_rec_prop, bmc_prop_pattern):
             bmc_prop_value = bmc_rec_valid.get(bmc_prop_key, False)
             if bmc_rec_valid[bmc_prop_key] is not False:
-                valid_status = validate_bmc_properties(
-                    bmc_pattern_val,
-                    bmc_prop_key,
-                    bmc_prop_value,
-                    bmc_rec_valid,
-                )
+                valid_status = validate_bmc_properties(bmc_pattern_val,
+                                                       bmc_prop_key,
+                                                       bmc_prop_value,
+                                                       bmc_rec_valid)
                 if None not in bmc_rec_valid.values():
                     return bmc_rec_valid
                 else:
@@ -118,16 +112,15 @@
         exc_obj = None
         bmc_inv = OrderedDict()
         service_count = 0
-        for line in bmc_inv_record.split("\n"):
+        for line in bmc_inv_record.split('\n'):
             if line == "":
                 pass
             elif service_type in line:
-                bmc_inv["service"] = service_type
+                bmc_inv['service'] = service_type
                 service_count += 1
-            elif not line.startswith("=") and service_count == 1:
-                bmc_inv[line.split("=")[0].strip()] = str(
-                    line.split("=")[-1].strip()
-                )[1:-1]
+            elif not line.startswith('=') and service_count == 1:
+                bmc_inv[line.split('=')[0].strip()] = \
+                    str(line.split('=')[-1].strip())[1:-1]
     except Exception as exc_obj:
         return exc_obj
     finally:
@@ -168,8 +161,10 @@
         count = 0
         exe_obj = None
         bmc_inv_list = OrderedDict()
-        for match in re.finditer(bmc_rec_pattern, bmc_records, re.MULTILINE):
-            bmc_record, exc_msg = bmc_inventory(service_type, match.group())
+        for match in re.finditer(bmc_rec_pattern, bmc_records,
+                                 re.MULTILINE):
+            bmc_record, exc_msg = \
+                bmc_inventory(service_type, match.group())
             if bmc_record is not None and exc_msg is None:
                 count += 1
                 bmc_inv_list[count] = bmc_record
@@ -177,6 +172,6 @@
         return exe_obj
     finally:
         if len(bmc_inv_list) == 0:
-            "", exe_obj
+            '', exe_obj
         else:
             return bmc_inv_list, exe_obj
diff --git a/lib/ffdc_cli_robot_script.py b/lib/ffdc_cli_robot_script.py
index 5d2cd18..eb2cb2e 100644
--- a/lib/ffdc_cli_robot_script.py
+++ b/lib/ffdc_cli_robot_script.py
@@ -3,10 +3,12 @@
 import os
 import sys
 
+
 from ffdc_collector import ffdc_collector
-from robot.libraries.BuiltIn import BuiltIn as robotBuildIn
 from ssh_utility import SSHRemoteclient
 
+from robot.libraries.BuiltIn import BuiltIn as robotBuildIn
+
 sys.path.append(__file__.split(__file__.split("/")[-1])[0] + "../ffdc")
 
 # (Sub) String constants used for input dictionary key search
@@ -74,15 +76,12 @@
         # When method is invoked with no parm,
         # use robot variables
         # OPENBMC_HOST, OPENBMC_USERNAME, OPENBMC_PASSWORD, OPENBMC (type)
-        dict_of_parms["OPENBMC_HOST"] = robotBuildIn().get_variable_value(
-            "${OPENBMC_HOST}", default=None
-        )
-        dict_of_parms["OPENBMC_USERNAME"] = robotBuildIn().get_variable_value(
-            "${OPENBMC_USERNAME}", default=None
-        )
-        dict_of_parms["OPENBMC_PASSWORD"] = robotBuildIn().get_variable_value(
-            "${OPENBMC_PASSWORD}", default=None
-        )
+        dict_of_parms["OPENBMC_HOST"] = \
+            robotBuildIn().get_variable_value("${OPENBMC_HOST}", default=None)
+        dict_of_parms["OPENBMC_USERNAME"] = \
+            robotBuildIn().get_variable_value("${OPENBMC_USERNAME}", default=None)
+        dict_of_parms["OPENBMC_PASSWORD"] = \
+            robotBuildIn().get_variable_value("${OPENBMC_PASSWORD}", default=None)
         dict_of_parms["REMOTE_TYPE"] = "OPENBMC"
 
         run_ffdc_collector(dict_of_parms)
@@ -155,10 +154,7 @@
     # that are not specified with input and have acceptable defaults.
     if not location:
         # Default FFDC store location
-        location = (
-            robotBuildIn().get_variable_value("${EXECDIR}", default=None)
-            + "/logs"
-        )
+        location = robotBuildIn().get_variable_value("${EXECDIR}", default=None) + "/logs"
         ffdc_collector.validate_local_store(location)
 
     if not config:
@@ -179,83 +175,76 @@
         log_level = "INFO"
 
     # If minimum required inputs are met, go collect.
-    if remote and username and password and remote_type:
+    if (remote and username and password and remote_type):
         # Execute data collection
-        this_ffdc = ffdc_collector(
-            remote,
-            username,
-            password,
-            config,
-            location,
-            remote_type,
-            protocol,
-            env_vars,
-            econfig,
-            log_level,
-        )
+        this_ffdc = ffdc_collector(remote,
+                                   username,
+                                   password,
+                                   config,
+                                   location,
+                                   remote_type,
+                                   protocol,
+                                   env_vars,
+                                   econfig,
+                                   log_level)
         this_ffdc.collect_ffdc()
 
         # If original ffdc request is for BMC,
         #  attempt to also collect ffdc for HOST_OS if possible.
-        if remote_type.upper() == "OPENBMC":
-            os_host = robotBuildIn().get_variable_value(
-                "${OS_HOST}", default=None
-            )
-            os_username = robotBuildIn().get_variable_value(
-                "${OS_USERNAME}", default=None
-            )
-            os_password = robotBuildIn().get_variable_value(
-                "${OS_PASSWORD}", default=None
-            )
+        if remote_type.upper() == 'OPENBMC':
+            os_host = \
+                robotBuildIn().get_variable_value("${OS_HOST}", default=None)
+            os_username = \
+                robotBuildIn().get_variable_value("${OS_USERNAME}", default=None)
+            os_password =  \
+                robotBuildIn().get_variable_value("${OS_PASSWORD}", default=None)
 
             if os_host and os_username and os_password:
                 os_type = get_os_type(os_host, os_username, os_password)
                 if os_type:
-                    os_ffdc = ffdc_collector(
-                        os_host,
-                        os_username,
-                        os_password,
-                        config,
-                        location,
-                        os_type,
-                        protocol,
-                        env_vars,
-                        econfig,
-                        log_level,
-                    )
+                    os_ffdc = ffdc_collector(os_host,
+                                             os_username,
+                                             os_password,
+                                             config,
+                                             location,
+                                             os_type,
+                                             protocol,
+                                             env_vars,
+                                             econfig,
+                                             log_level)
                     os_ffdc.collect_ffdc()
 
 
 def get_os_type(os_host, os_username, os_password):
+
     os_type = None
 
     # If HOST_OS is pingable
     if os.system("ping -c 1 " + os_host) == 0:
         r"""
-        Open a ssh connection to targeted system.
+            Open a ssh connection to targeted system.
         """
-        ssh_remoteclient = SSHRemoteclient(os_host, os_username, os_password)
+        ssh_remoteclient = SSHRemoteclient(os_host,
+                                           os_username,
+                                           os_password)
 
         if ssh_remoteclient.ssh_remoteclient_login():
+
             # Find OS_TYPE
-            cmd_exit_code, err, response = ssh_remoteclient.execute_command(
-                "uname"
-            )
+            cmd_exit_code, err, response = \
+                ssh_remoteclient.execute_command('uname')
             os_type = response.strip()
 
             # If HOST_OS is linux, expands os_type to one of
             # the 2 linux distros that have more details in ffdc_config.yaml
-            if os_type.upper() == "LINUX":
-                (
-                    cmd_exit_code,
-                    err,
-                    response,
-                ) = ssh_remoteclient.execute_command("cat /etc/os-release")
+            if os_type.upper() == 'LINUX':
+                cmd_exit_code, err, response = \
+                    ssh_remoteclient.execute_command('cat /etc/os-release')
                 linux_distro = response
-                if "redhat" in linux_distro:
-                    os_type = "RHEL"
-                elif "ubuntu" in linux_distro:
-                    os_type = "UBUNTU"
+                if 'redhat' in linux_distro:
+                    os_type = 'RHEL'
+                elif 'ubuntu' in linux_distro:
+                    os_type = 'UBUNTU'
 
         if ssh_remoteclient:
             ssh_remoteclient.ssh_remoteclient_disconnect()
diff --git a/lib/firmware_utils.py b/lib/firmware_utils.py
index 61d1e0b..959693d 100755
--- a/lib/firmware_utils.py
+++ b/lib/firmware_utils.py
@@ -61,17 +61,15 @@
 
     """
 
-    cmd_buf = (
-        "hdparm -I " + device + ' | egrep ":.+" | sed -re' + ' "s/[ \t]+/ /g"'
-    )
+    cmd_buf = "hdparm -I " + device + " | egrep \":.+\" | sed -re" +\
+        " \"s/[ \t]+/ /g\""
     stdout, stderr, rc = bsu.os_execute_command(cmd_buf)
 
     firmware_dict = vf.key_value_outbuf_to_dict(stdout)
 
     cmd_buf = "lsblk -P " + device + " | sed -re 's/\" /\"\\n/g'"
     stdout, stderr, rc = bsu.os_execute_command(cmd_buf)
-    firmware_dict.update(
-        vf.key_value_outbuf_to_dict(stdout, delim="=", strip=' "')
-    )
+    firmware_dict.update(vf.key_value_outbuf_to_dict(stdout, delim='=',
+                                                     strip=" \""))
 
     return firmware_dict
diff --git a/lib/func_args.py b/lib/func_args.py
index d779b48..306af38 100644
--- a/lib/func_args.py
+++ b/lib/func_args.py
@@ -4,9 +4,8 @@
 This module provides argument manipulation functions like pop_arg.
 """
 
-import collections
-
 import gen_print as gp
+import collections
 
 
 def pop_arg(pop_arg_default=None, *args, **kwargs):
diff --git a/lib/func_timer.py b/lib/func_timer.py
index fc339c2..852bbf2 100644
--- a/lib/func_timer.py
+++ b/lib/func_timer.py
@@ -5,12 +5,11 @@
 """
 
 import os
-import signal
 import sys
+import signal
 import time
-
-import gen_misc as gm
 import gen_print as gp
+import gen_misc as gm
 import gen_valid as gv
 
 
@@ -31,7 +30,9 @@
     second.  "sleep 2" is a positional parm for the run_key function.
     """
 
-    def __init__(self, obj_name="func_timer_class"):
+    def __init__(self,
+                 obj_name='func_timer_class'):
+
         # Initialize object variables.
         self.__obj_name = obj_name
         self.__func = None
@@ -58,11 +59,9 @@
         buffer += gp.sprint_var(func_name, indent=indent)
         buffer += gp.sprint_varx("time_out", self.__time_out, indent=indent)
         buffer += gp.sprint_varx("child_pid", self.__child_pid, indent=indent)
-        buffer += gp.sprint_varx(
-            "original_SIGUSR1_handler",
-            self.__original_SIGUSR1_handler,
-            indent=indent,
-        )
+        buffer += gp.sprint_varx("original_SIGUSR1_handler",
+                                 self.__original_SIGUSR1_handler,
+                                 indent=indent)
         return buffer
 
     def print_obj(self):
@@ -96,7 +95,8 @@
         if self.__original_SIGUSR1_handler != 0:
             signal.signal(signal.SIGUSR1, self.__original_SIGUSR1_handler)
         try:
-            gp.lprint_timen("Killing child pid " + str(self.__child_pid) + ".")
+            gp.lprint_timen("Killing child pid " + str(self.__child_pid)
+                            + ".")
             os.kill(self.__child_pid, signal.SIGKILL)
         except OSError:
             gp.lprint_timen("Tolerated kill failure.")
@@ -110,7 +110,9 @@
         children = gm.get_child_pids()
         gp.lprint_var(children)
 
-    def timed_out(self, signal_number, frame):
+    def timed_out(self,
+                  signal_number,
+                  frame):
         r"""
         Handle a SIGUSR1 generated by the child process after the time_out has expired.
 
@@ -133,6 +135,7 @@
         raise ValueError(err_msg)
 
     def run(self, func, *args, **kwargs):
+
         r"""
         Run the indicated function with the given args and kwargs and return the value that the function
         returns.  If the time_out value expires, raise a ValueError exception with a detailed error message.
@@ -168,9 +171,9 @@
         # Get self.__time_out value from kwargs.  If kwargs['time_out'] is not present, self.__time_out will
         # default to None.
         self.__time_out = None
-        if "time_out" in kwargs:
-            self.__time_out = kwargs["time_out"]
-            del kwargs["time_out"]
+        if 'time_out' in kwargs:
+            self.__time_out = kwargs['time_out']
+            del kwargs['time_out']
             # Convert "none" string to None.
             try:
                 if self.__time_out.lower() == "none":
@@ -180,13 +183,12 @@
             if self.__time_out is not None:
                 self.__time_out = int(self.__time_out)
                 # Ensure that time_out is non-negative.
-                message = gv.valid_range(
-                    self.__time_out, 0, var_name="time_out"
-                )
+                message = gv.valid_range(self.__time_out, 0,
+                                         var_name="time_out")
                 if message != "":
-                    raise ValueError(
-                        "\n" + gp.sprint_error_report(message, format="long")
-                    )
+                    raise ValueError("\n"
+                                     + gp.sprint_error_report(message,
+                                                              format='long'))
 
         gp.lprint_varx("time_out", self.__time_out)
         self.__child_pid = 0
@@ -198,21 +200,13 @@
             parent_pid = os.getpid()
             self.__child_pid = os.fork()
             if self.__child_pid == 0:
-                gp.dprint_timen(
-                    "Child timer pid "
-                    + str(os.getpid())
-                    + ": Sleeping for "
-                    + str(self.__time_out)
-                    + " seconds."
-                )
+                gp.dprint_timen("Child timer pid " + str(os.getpid())
+                                + ": Sleeping for " + str(self.__time_out)
+                                + " seconds.")
                 time.sleep(self.__time_out)
-                gp.dprint_timen(
-                    "Child timer pid "
-                    + str(os.getpid())
-                    + ": Sending SIGUSR1 to parent pid "
-                    + str(parent_pid)
-                    + "."
-                )
+                gp.dprint_timen("Child timer pid " + str(os.getpid())
+                                + ": Sending SIGUSR1 to parent pid "
+                                + str(parent_pid) + ".")
                 os.kill(parent_pid, signal.SIGUSR1)
                 os._exit(0)
 
diff --git a/lib/gen_arg.py b/lib/gen_arg.py
index 2d2ed68..afa7b57 100755
--- a/lib/gen_arg.py
+++ b/lib/gen_arg.py
@@ -4,13 +4,11 @@
 This module provides valuable argument processing functions like gen_get_options and sprint_args.
 """
 
+import sys
 import os
 import re
-import sys
-
 try:
     import psutil
-
     psutil_imported = True
 except ImportError:
     psutil_imported = False
@@ -18,16 +16,15 @@
     import __builtin__
 except ImportError:
     import builtins as __builtin__
-
-import argparse
 import atexit
 import signal
+import argparse
 import textwrap as textwrap
 
-import gen_cmd as gc
-import gen_misc as gm
 import gen_print as gp
 import gen_valid as gv
+import gen_cmd as gc
+import gen_misc as gm
 
 
 class MultilineFormatter(argparse.HelpFormatter):
@@ -35,20 +32,13 @@
         r"""
         Split text into formatted lines for every "%%n" encountered in the text and return the result.
         """
-        lines = self._whitespace_matcher.sub(" ", text).strip().split("%n")
-        formatted_lines = [
-            textwrap.fill(
-                x, width, initial_indent=indent, subsequent_indent=indent
-            )
-            + "\n"
-            for x in lines
-        ]
-        return "".join(formatted_lines)
+        lines = self._whitespace_matcher.sub(' ', text).strip().split('%n')
+        formatted_lines = \
+            [textwrap.fill(x, width, initial_indent=indent, subsequent_indent=indent) + '\n' for x in lines]
+        return ''.join(formatted_lines)
 
 
-class ArgumentDefaultsHelpMultilineFormatter(
-    MultilineFormatter, argparse.ArgumentDefaultsHelpFormatter
-):
+class ArgumentDefaultsHelpMultilineFormatter(MultilineFormatter, argparse.ArgumentDefaultsHelpFormatter):
     pass
 
 
@@ -56,7 +46,8 @@
 module = sys.modules["__main__"]
 
 
-def gen_get_options(parser, stock_list=[]):
+def gen_get_options(parser,
+                    stock_list=[]):
     r"""
     Parse the command line arguments using the parser object passed and return True/False (i.e. pass/fail).
     However, if gv.exit_on_error is set, simply exit the program on failure.  Also set the following built in
@@ -84,14 +75,11 @@
     # Process stock_list.
     for ix in range(0, len(stock_list)):
         if len(stock_list[ix]) < 1:
-            error_message = (
-                "Programmer error - stock_list["
-                + str(ix)
-                + "] is supposed to be a tuple containing at"
-                + " least one element which is the name of"
-                + " the desired stock parameter:\n"
-                + gp.sprint_var(stock_list)
-            )
+            error_message = "Programmer error - stock_list[" + str(ix) +\
+                            "] is supposed to be a tuple containing at" +\
+                            " least one element which is the name of" +\
+                            " the desired stock parameter:\n" +\
+                            gp.sprint_var(stock_list)
             return gv.process_error_message(error_message)
         if isinstance(stock_list[ix], tuple):
             arg_name = stock_list[ix][0]
@@ -101,86 +89,65 @@
             default = None
 
         if arg_name not in master_stock_list:
-            error_message = (
-                'Programmer error - arg_name "'
-                + arg_name
-                + '" not found found in stock list:\n'
-                + gp.sprint_var(master_stock_list)
-            )
+            error_message = "Programmer error - arg_name \"" + arg_name +\
+                            "\" not found found in stock list:\n" +\
+                            gp.sprint_var(master_stock_list)
             return gv.process_error_message(error_message)
 
         if arg_name == "quiet":
             if default is None:
                 default = 0
             parser.add_argument(
-                "--quiet",
+                '--quiet',
                 default=default,
                 type=int,
                 choices=[1, 0],
                 help='If this parameter is set to "1", %(prog)s'
-                + " will print only essential information, i.e. it will"
-                + " not echo parameters, echo commands, print the total"
-                + " run time, etc."
-                + default_string,
-            )
+                     + ' will print only essential information, i.e. it will'
+                     + ' not echo parameters, echo commands, print the total'
+                     + ' run time, etc.' + default_string)
         elif arg_name == "test_mode":
             if default is None:
                 default = 0
             parser.add_argument(
-                "--test_mode",
+                '--test_mode',
                 default=default,
                 type=int,
                 choices=[1, 0],
-                help="This means that %(prog)s should go through all the"
-                + " motions but not actually do anything substantial."
-                + "  This is mainly to be used by the developer of"
-                + " %(prog)s."
-                + default_string,
-            )
+                help='This means that %(prog)s should go through all the'
+                     + ' motions but not actually do anything substantial.'
+                     + '  This is mainly to be used by the developer of'
+                     + ' %(prog)s.' + default_string)
         elif arg_name == "debug":
             if default is None:
                 default = 0
             parser.add_argument(
-                "--debug",
+                '--debug',
                 default=default,
                 type=int,
                 choices=[1, 0],
                 help='If this parameter is set to "1", %(prog)s will print'
-                + " additional debug information.  This is mainly to be"
-                + " used by the developer of %(prog)s."
-                + default_string,
-            )
+                     + ' additional debug information.  This is mainly to be'
+                     + ' used by the developer of %(prog)s.' + default_string)
         elif arg_name == "loglevel":
             if default is None:
                 default = "info"
             parser.add_argument(
-                "--loglevel",
+                '--loglevel',
                 default=default,
                 type=str,
-                choices=[
-                    "DEBUG",
-                    "INFO",
-                    "WARNING",
-                    "ERROR",
-                    "CRITICAL",
-                    "debug",
-                    "info",
-                    "warning",
-                    "error",
-                    "critical",
-                ],
+                choices=['DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL',
+                         'debug', 'info', 'warning', 'error', 'critical'],
                 help='If this parameter is set to "1", %(prog)s will print'
-                + " additional debug information.  This is mainly to be"
-                + " used by the developer of %(prog)s."
-                + default_string,
-            )
+                     + ' additional debug information.  This is mainly to be'
+                     + ' used by the developer of %(prog)s.' + default_string)
 
     arg_obj = parser.parse_args()
 
     __builtin__.quiet = 0
     __builtin__.test_mode = 0
     __builtin__.debug = 0
-    __builtin__.loglevel = "WARNING"
+    __builtin__.loglevel = 'WARNING'
     for ix in range(0, len(stock_list)):
         if isinstance(stock_list[ix], tuple):
             arg_name = stock_list[ix][0]
@@ -203,14 +170,15 @@
     # For each command line parameter, create a corresponding global variable and assign it the appropriate
     # value.  For example, if the command line contained "--last_name='Smith', we'll create a global variable
     # named "last_name" with the value "Smith".
-    module = sys.modules["__main__"]
+    module = sys.modules['__main__']
     for key in arg_obj.__dict__:
         setattr(module, key, getattr(__builtin__.arg_obj, key))
 
     return True
 
 
-def set_pgm_arg(var_value, var_name=None):
+def set_pgm_arg(var_value,
+                var_name=None):
     r"""
     Set the value of the arg_obj.__dict__ entry named in var_name with the var_value provided.  Also, set
     corresponding global variable.
@@ -225,7 +193,7 @@
         var_name = gp.get_arg_name(None, 1, 2)
 
     arg_obj.__dict__[var_name] = var_value
-    module = sys.modules["__main__"]
+    module = sys.modules['__main__']
     setattr(module, var_name, var_value)
     if var_name == "quiet":
         __builtin__.quiet = var_value
@@ -235,7 +203,8 @@
         __builtin__.test_mode = var_value
 
 
-def sprint_args(arg_obj, indent=0):
+def sprint_args(arg_obj,
+                indent=0):
     r"""
     sprint_var all of the arguments found in arg_obj and return the result as a string.
 
@@ -249,9 +218,8 @@
 
     buffer = ""
     for key in arg_obj.__dict__:
-        buffer += gp.sprint_varx(
-            key, getattr(arg_obj, key), 0, indent, col1_width
-        )
+        buffer += gp.sprint_varx(key, getattr(arg_obj, key), 0, indent,
+                                 col1_width)
     return buffer
 
 
@@ -270,7 +238,7 @@
         # Set a default value for dir_path argument.
         dir_path = gm.add_trailing_slash(gm.dft(dir_path, os.getcwd()))
     """
-    module = sys.modules["__main__"]
+    module = sys.modules['__main__']
     for key in arg_obj.__dict__:
         arg_obj.__dict__[key] = getattr(module, key)
 
@@ -298,17 +266,16 @@
     global term_options
     # Validation:
     arg_names = list(kwargs.keys())
-    gv.valid_list(arg_names, ["term_requests"])
-    if type(kwargs["term_requests"]) is dict:
-        keys = list(kwargs["term_requests"].keys())
-        gv.valid_list(keys, ["pgm_names"])
+    gv.valid_list(arg_names, ['term_requests'])
+    if type(kwargs['term_requests']) is dict:
+        keys = list(kwargs['term_requests'].keys())
+        gv.valid_list(keys, ['pgm_names'])
     else:
-        gv.valid_value(kwargs["term_requests"], ["children", "descendants"])
+        gv.valid_value(kwargs['term_requests'], ['children', 'descendants'])
     term_options = kwargs
 
 
 if psutil_imported:
-
     def match_process_by_pgm_name(process, pgm_name):
         r"""
         Return True or False to indicate whether the process matches the program name.
@@ -358,10 +325,9 @@
         # Because "sleep" is a compiled executable, it will appear in entry 0.
 
         optional_dir_path_regex = "(.*/)?"
-        cmdline = process.as_dict()["cmdline"]
-        return re.match(
-            optional_dir_path_regex + pgm_name + "( |$)", cmdline[0]
-        ) or re.match(optional_dir_path_regex + pgm_name + "( |$)", cmdline[1])
+        cmdline = process.as_dict()['cmdline']
+        return re.match(optional_dir_path_regex + pgm_name + '( |$)', cmdline[0]) \
+            or re.match(optional_dir_path_regex + pgm_name + '( |$)', cmdline[1])
 
     def select_processes_by_pgm_name(processes, pgm_name):
         r"""
@@ -374,11 +340,7 @@
                                     object.
         """
 
-        return [
-            process
-            for process in processes
-            if match_process_by_pgm_name(process, pgm_name)
-        ]
+        return [process for process in processes if match_process_by_pgm_name(process, pgm_name)]
 
     def sprint_process_report(pids):
         r"""
@@ -388,10 +350,7 @@
         pids                        A list of process IDs for processes to be included in the report.
         """
         report = "\n"
-        cmd_buf = (
-            "echo ; ps wwo user,pgrp,pid,ppid,lstart,cmd --forest "
-            + " ".join(pids)
-        )
+        cmd_buf = "echo ; ps wwo user,pgrp,pid,ppid,lstart,cmd --forest " + ' '.join(pids)
         report += gp.sprint_issuing(cmd_buf)
         rc, outbuf = gc.shell_cmd(cmd_buf, quiet=1)
         report += outbuf + "\n"
@@ -412,9 +371,7 @@
         descendants = process.children(recursive=True)
         descendant_pids = [str(process.pid) for process in descendants]
         if descendants:
-            process_report = sprint_process_report(
-                [str(process.pid)] + descendant_pids
-            )
+            process_report = sprint_process_report([str(process.pid)] + descendant_pids)
         else:
             process_report = ""
         return descendants, descendant_pids, process_report
@@ -440,15 +397,12 @@
         children it produces.
         """
 
-        message = (
-            "\n" + gp.sprint_dashes(width=120) + gp.sprint_executing() + "\n"
-        )
+        message = "\n" + gp.sprint_dashes(width=120) \
+            + gp.sprint_executing() + "\n"
 
         current_process = psutil.Process()
 
-        descendants, descendant_pids, process_report = get_descendant_info(
-            current_process
-        )
+        descendants, descendant_pids, process_report = get_descendant_info(current_process)
         if not descendants:
             # If there are no descendants, then we have nothing to do.
             return
@@ -456,50 +410,38 @@
         terminate_descendants_temp_file_path = gm.create_temp_file_path()
         gp.print_vars(terminate_descendants_temp_file_path)
 
-        message += (
-            gp.sprint_varx("pgm_name", gp.pgm_name)
-            + gp.sprint_vars(term_options)
+        message += gp.sprint_varx("pgm_name", gp.pgm_name) \
+            + gp.sprint_vars(term_options) \
             + process_report
-        )
 
         # Process the termination requests:
-        if term_options["term_requests"] == "children":
+        if term_options['term_requests'] == 'children':
             term_processes = current_process.children(recursive=False)
             term_pids = [str(process.pid) for process in term_processes]
-        elif term_options["term_requests"] == "descendants":
+        elif term_options['term_requests'] == 'descendants':
             term_processes = descendants
             term_pids = descendant_pids
         else:
             # Process term requests by pgm_names.
             term_processes = []
-            for pgm_name in term_options["term_requests"]["pgm_names"]:
-                term_processes.extend(
-                    select_processes_by_pgm_name(descendants, pgm_name)
-                )
+            for pgm_name in term_options['term_requests']['pgm_names']:
+                term_processes.extend(select_processes_by_pgm_name(descendants, pgm_name))
             term_pids = [str(process.pid) for process in term_processes]
 
-        message += gp.sprint_timen(
-            "Processes to be terminated:"
-        ) + gp.sprint_var(term_pids)
+        message += gp.sprint_timen("Processes to be terminated:") \
+            + gp.sprint_var(term_pids)
         for process in term_processes:
             process.terminate()
-        message += gp.sprint_timen(
-            "Waiting on the following pids: " + " ".join(descendant_pids)
-        )
+        message += gp.sprint_timen("Waiting on the following pids: " + ' '.join(descendant_pids))
         gm.append_file(terminate_descendants_temp_file_path, message)
         psutil.wait_procs(descendants)
 
         # Checking after the fact to see whether any descendant processes are still alive.  If so, a process
         # report showing this will be included in the output.
-        descendants, descendant_pids, process_report = get_descendant_info(
-            current_process
-        )
+        descendants, descendant_pids, process_report = get_descendant_info(current_process)
         if descendants:
-            message = (
-                "\n"
-                + gp.sprint_timen("Not all of the processes terminated:")
+            message = "\n" + gp.sprint_timen("Not all of the processes terminated:") \
                 + process_report
-            )
             gm.append_file(terminate_descendants_temp_file_path, message)
 
         message = gp.sprint_dashes(width=120)
@@ -528,7 +470,8 @@
     gp.qprint_pgm_footer()
 
 
-def gen_signal_handler(signal_number, frame):
+def gen_signal_handler(signal_number,
+                       frame):
     r"""
     Handle signals.  Without a function to catch a SIGTERM or SIGINT, the program would terminate immediately
     with return code 143 and without calling the exit_function.
@@ -543,7 +486,8 @@
     exit(0)
 
 
-def gen_post_validation(exit_function=None, signal_handler=None):
+def gen_post_validation(exit_function=None,
+                        signal_handler=None):
     r"""
     Do generic post-validation processing.  By "post", we mean that this is to be called from a validation
     function after the caller has done any validation desired.  If the calling program passes exit_function
diff --git a/lib/gen_call_robot.py b/lib/gen_call_robot.py
index e873c52..2e51626 100755
--- a/lib/gen_call_robot.py
+++ b/lib/gen_call_robot.py
@@ -5,22 +5,21 @@
 robot program calls.
 """
 
-import imp
-import os
-import re
-import subprocess
 import sys
+import os
+import subprocess
+import re
 import time
+import imp
 
-import gen_cmd as gc
-import gen_misc as gm
 import gen_print as gp
 import gen_valid as gv
+import gen_misc as gm
+import gen_cmd as gc
 
-base_path = (
-    os.path.dirname(os.path.dirname(imp.find_module("gen_robot_print")[1]))
-    + os.sep
-)
+base_path = \
+    os.path.dirname(os.path.dirname(imp.find_module("gen_robot_print")[1])) +\
+    os.sep
 
 
 def init_robot_out_parms(extra_prefix=""):
@@ -45,19 +44,17 @@
     # Environment variable TMP_ROBOT_DIR_PATH can be set by the user to indicate that robot-generated output
     # should initially be written to the specified temporary directory and then moved to the normal output
     # location after completion.
-    outputdir = os.environ.get(
-        "TMP_ROBOT_DIR_PATH",
-        os.environ.get(
-            "STATUS_DIR_PATH", os.environ.get("HOME", ".") + "/status"
-        ),
-    )
+    outputdir =\
+        os.environ.get("TMP_ROBOT_DIR_PATH",
+                       os.environ.get("STATUS_DIR_PATH",
+                                      os.environ.get("HOME", ".")
+                                      + "/status"))
     outputdir = gm.add_trailing_slash(outputdir)
     seconds = time.time()
     loc_time = time.localtime(seconds)
     time_string = time.strftime("%y%m%d.%H%M%S", loc_time)
-    file_prefix = (
-        AUTOBOOT_OPENBMC_NICKNAME + "." + extra_prefix + time_string + "."
-    )
+    file_prefix = AUTOBOOT_OPENBMC_NICKNAME + "." + extra_prefix +\
+        time_string + "."
     # Environment variable SAVE_STATUS_POLICY governs when robot-generated output files (e.g. the log.html)
     # will be moved from TMP_ROBOT_DIR_PATH to FFDC_DIR_PATH.  Valid values are "ALWAYS", "NEVER" and "FAIL".
     SAVE_STATUS_POLICY = os.environ.get("SAVE_STATUS_POLICY", "ALWAYS")
@@ -70,8 +67,8 @@
         log = file_prefix + "log.html"
         report = file_prefix + "report.html"
     loglevel = "TRACE"
-    consolecolors = "off"
-    consolemarkers = "off"
+    consolecolors = 'off'
+    consolemarkers = 'off'
 
     # Make create_robot_cmd_string values global.
     gm.set_mod_global(outputdir)
@@ -82,15 +79,7 @@
     gm.set_mod_global(consolecolors)
     gm.set_mod_global(consolemarkers)
 
-    return (
-        outputdir,
-        output,
-        log,
-        report,
-        loglevel,
-        consolecolors,
-        consolemarkers,
-    )
+    return outputdir, output, log, report, loglevel, consolecolors, consolemarkers
 
 
 def init_robot_test_base_dir_path():
@@ -111,13 +100,12 @@
     # - Not in user sandbox:
     #   ROBOT_TEST_BASE_DIR_PATH will be set to <program dir path>/git/openbmc-test-automation/
 
-    ROBOT_TEST_BASE_DIR_PATH = os.environ.get("ROBOT_TEST_BASE_DIR_PATH", "")
-    ROBOT_TEST_RUNNING_FROM_SB = int(
-        os.environ.get("ROBOT_TEST_RUNNING_FROM_SB", "0")
-    )
+    ROBOT_TEST_BASE_DIR_PATH = os.environ.get('ROBOT_TEST_BASE_DIR_PATH', "")
+    ROBOT_TEST_RUNNING_FROM_SB = \
+        int(os.environ.get('ROBOT_TEST_RUNNING_FROM_SB', "0"))
     if ROBOT_TEST_BASE_DIR_PATH == "":
         # ROBOT_TEST_BASE_DIR_PATH was not set by user/caller.
-        AUTOIPL_VERSION = os.environ.get("AUTOIPL_VERSION", "")
+        AUTOIPL_VERSION = os.environ.get('AUTOIPL_VERSION', '')
         if AUTOIPL_VERSION == "":
             ROBOT_TEST_BASE_DIR_PATH = base_path
         else:
@@ -125,26 +113,17 @@
 
             # Determine whether we're running out of a developer sandbox or simply out of an apolloxxx/bin
             # path.
-            shell_rc, out_buf = gc.shell_cmd(
-                "dirname $(which gen_print.py)",
-                quiet=(not debug),
-                print_output=0,
-            )
+            shell_rc, out_buf = gc.shell_cmd('dirname $(which gen_print.py)',
+                                             quiet=(not debug), print_output=0)
             executable_base_dir_path = os.path.realpath(out_buf.rstrip()) + "/"
-            apollo_dir_path = (
-                os.environ["AUTO_BASE_PATH"] + AUTOIPL_VERSION + "/bin/"
-            )
-            developer_home_dir_path = re.sub(
-                "/sandbox.*", "", executable_base_dir_path
-            )
-            developer_home_dir_path = gm.add_trailing_slash(
-                developer_home_dir_path
-            )
-            gp.dprint_vars(
-                executable_base_dir_path,
-                developer_home_dir_path,
-                apollo_dir_path,
-            )
+            apollo_dir_path = os.environ['AUTO_BASE_PATH'] + AUTOIPL_VERSION +\
+                "/bin/"
+            developer_home_dir_path = re.sub('/sandbox.*', '',
+                                             executable_base_dir_path)
+            developer_home_dir_path = \
+                gm.add_trailing_slash(developer_home_dir_path)
+            gp.dprint_vars(executable_base_dir_path, developer_home_dir_path,
+                           apollo_dir_path)
 
             ROBOT_TEST_RUNNING_FROM_SB = 0
             if executable_base_dir_path != apollo_dir_path:
@@ -152,61 +131,46 @@
                 gp.dprint_vars(ROBOT_TEST_RUNNING_FROM_SB)
                 ROBOT_TEST_BASE_DIR_PATH = developer_home_dir_path + suffix
                 if not os.path.isdir(ROBOT_TEST_BASE_DIR_PATH):
-                    gp.dprint_timen(
-                        "NOTE: Sandbox directory "
-                        + ROBOT_TEST_BASE_DIR_PATH
-                        + " does not"
-                        + " exist."
-                    )
+                    gp.dprint_timen("NOTE: Sandbox directory "
+                                    + ROBOT_TEST_BASE_DIR_PATH + " does not"
+                                    + " exist.")
                     # Fall back to the apollo dir path.
                     ROBOT_TEST_BASE_DIR_PATH = apollo_dir_path + suffix
             else:
                 # Use to the apollo dir path.
                 ROBOT_TEST_BASE_DIR_PATH = apollo_dir_path + suffix
 
-    OBMC_TOOLS_BASE_DIR_PATH = (
-        os.path.dirname(ROBOT_TEST_BASE_DIR_PATH.rstrip("/"))
+    OBMC_TOOLS_BASE_DIR_PATH = \
+        os.path.dirname(ROBOT_TEST_BASE_DIR_PATH.rstrip("/")) \
         + "/openbmc-tools/"
-    )
     OPENBMCTOOL_DIR_PATH = OBMC_TOOLS_BASE_DIR_PATH + "openbmctool/"
-    JSON_CHECKER_TOOLS_DIR_PATH = (
-        OBMC_TOOLS_BASE_DIR_PATH + "expectedJsonChecker/"
-    )
+    JSON_CHECKER_TOOLS_DIR_PATH = OBMC_TOOLS_BASE_DIR_PATH + "expectedJsonChecker/"
 
     gv.valid_value(ROBOT_TEST_BASE_DIR_PATH)
-    gp.dprint_vars(
-        ROBOT_TEST_RUNNING_FROM_SB,
-        ROBOT_TEST_BASE_DIR_PATH,
-        OBMC_TOOLS_BASE_DIR_PATH,
-        OPENBMCTOOL_DIR_PATH,
-        JSON_CHECKER_TOOLS_DIR_PATH,
-    )
+    gp.dprint_vars(ROBOT_TEST_RUNNING_FROM_SB, ROBOT_TEST_BASE_DIR_PATH, OBMC_TOOLS_BASE_DIR_PATH,
+                   OPENBMCTOOL_DIR_PATH, JSON_CHECKER_TOOLS_DIR_PATH)
     gv.valid_dir_path(ROBOT_TEST_BASE_DIR_PATH)
 
     ROBOT_TEST_BASE_DIR_PATH = gm.add_trailing_slash(ROBOT_TEST_BASE_DIR_PATH)
     gm.set_mod_global(ROBOT_TEST_BASE_DIR_PATH)
-    os.environ["ROBOT_TEST_BASE_DIR_PATH"] = ROBOT_TEST_BASE_DIR_PATH
+    os.environ['ROBOT_TEST_BASE_DIR_PATH'] = ROBOT_TEST_BASE_DIR_PATH
 
     gm.set_mod_global(ROBOT_TEST_RUNNING_FROM_SB)
-    os.environ["ROBOT_TEST_RUNNING_FROM_SB"] = str(ROBOT_TEST_RUNNING_FROM_SB)
+    os.environ['ROBOT_TEST_RUNNING_FROM_SB'] = str(ROBOT_TEST_RUNNING_FROM_SB)
 
     gm.set_mod_global(OBMC_TOOLS_BASE_DIR_PATH)
-    os.environ["OBMC_TOOLS_BASE_DIR_PATH"] = str(OBMC_TOOLS_BASE_DIR_PATH)
+    os.environ['OBMC_TOOLS_BASE_DIR_PATH'] = str(OBMC_TOOLS_BASE_DIR_PATH)
 
     gm.set_mod_global(OPENBMCTOOL_DIR_PATH)
-    os.environ["OPENBMCTOOL_DIR_PATH"] = str(OPENBMCTOOL_DIR_PATH)
+    os.environ['OPENBMCTOOL_DIR_PATH'] = str(OPENBMCTOOL_DIR_PATH)
 
     gm.set_mod_global(JSON_CHECKER_TOOLS_DIR_PATH)
-    os.environ["JSON_CHECKER_TOOLS_DIR_PATH"] = str(
-        JSON_CHECKER_TOOLS_DIR_PATH
-    )
+    os.environ['JSON_CHECKER_TOOLS_DIR_PATH'] = str(JSON_CHECKER_TOOLS_DIR_PATH)
 
 
-raw_robot_file_search_path = (
-    "${ROBOT_TEST_BASE_DIR_PATH}:"
-    + "${ROBOT_TEST_BASE_DIR_PATH}tests:${ROBOT_TEST_BASE_DIR_PATH}extended:"
-    + "${ROBOT_TEST_BASE_DIR_PATH}scratch:${PATH}"
-)
+raw_robot_file_search_path = "${ROBOT_TEST_BASE_DIR_PATH}:" +\
+    "${ROBOT_TEST_BASE_DIR_PATH}tests:${ROBOT_TEST_BASE_DIR_PATH}extended:" +\
+    "${ROBOT_TEST_BASE_DIR_PATH}scratch:${PATH}"
 
 
 def init_robot_file_path(robot_file_path):
@@ -242,13 +206,12 @@
     gp.dprint_vars(abs_path, robot_file_path)
 
     if not abs_path:
-        cmd_buf = 'echo -n "' + raw_robot_file_search_path + '"'
-        shell_rc, out_buf = gc.shell_cmd(
-            cmd_buf, quiet=(not debug), print_output=0
-        )
+        cmd_buf = "echo -n \"" + raw_robot_file_search_path + "\""
+        shell_rc, out_buf = gc.shell_cmd(cmd_buf, quiet=(not debug),
+                                         print_output=0)
         robot_file_search_paths = out_buf
         gp.dprint_var(robot_file_search_paths)
-        robot_file_search_paths_list = robot_file_search_paths.split(":")
+        robot_file_search_paths_list = robot_file_search_paths.split(':')
         for search_path in robot_file_search_paths_list:
             search_path = gm.add_trailing_slash(search_path)
             candidate_file_path = search_path + robot_file_path
@@ -270,11 +233,9 @@
     Double dashes are not included in the names returned.
     """
 
-    cmd_buf = (
-        "robot -h | egrep "
-        + "'^([ ]\\-[a-zA-Z0-9])?[ ]+--[a-zA-Z0-9]+[ ]+' | sed -re"
-        + " s'/.*\\-\\-//g' -e s'/ .*//g' | sort -u"
-    )
+    cmd_buf = "robot -h | egrep " +\
+        "'^([ ]\\-[a-zA-Z0-9])?[ ]+--[a-zA-Z0-9]+[ ]+' | sed -re" +\
+        " s'/.*\\-\\-//g' -e s'/ .*//g' | sort -u"
     shell_rc, out_buf = gc.shell_cmd(cmd_buf, quiet=1, print_output=0)
 
     return out_buf.split("\n")
@@ -324,9 +285,8 @@
             robot_parm_list.append(p_string)
         ix += 1
 
-    robot_cmd_buf = (
-        "robot " + " ".join(robot_parm_list) + " " + robot_file_path
-    )
+    robot_cmd_buf = "robot " + ' '.join(robot_parm_list) + " " +\
+        robot_file_path
 
     return robot_cmd_buf
 
@@ -336,7 +296,9 @@
 gcr_last_robot_rc = 0
 
 
-def process_robot_output_files(robot_cmd_buf=None, robot_rc=None, gzip=None):
+def process_robot_output_files(robot_cmd_buf=None,
+                               robot_rc=None,
+                               gzip=None):
     r"""
     Process robot output files which can involve several operations:
     - If the files are in a temporary location, using SAVE_STATUS_POLICY to decide whether to move them to a
@@ -367,36 +329,25 @@
 
     # Compose file_list based on robot command buffer passed in.
     robot_cmd_buf_dict = gc.parse_command_string(robot_cmd_buf)
-    outputdir = robot_cmd_buf_dict["outputdir"]
+    outputdir = robot_cmd_buf_dict['outputdir']
     outputdir = gm.add_trailing_slash(outputdir)
-    file_list = (
-        outputdir
-        + robot_cmd_buf_dict["output"]
-        + " "
-        + outputdir
-        + robot_cmd_buf_dict["log"]
-        + " "
-        + outputdir
-        + robot_cmd_buf_dict["report"]
-    )
+    file_list = outputdir + robot_cmd_buf_dict['output'] + " " + outputdir\
+        + robot_cmd_buf_dict['log'] + " " + outputdir\
+        + robot_cmd_buf_dict['report']
 
     # Double checking that files are present.
-    shell_rc, out_buf = gc.shell_cmd(
-        "ls -1 " + file_list + " 2>/dev/null", show_err=0
-    )
+    shell_rc, out_buf = gc.shell_cmd("ls -1 " + file_list + " 2>/dev/null",
+                                     show_err=0)
     file_list = re.sub("\n", " ", out_buf.rstrip("\n"))
 
     if file_list == "":
-        gp.qprint_timen(
-            "No robot output files were found in " + outputdir + "."
-        )
+        gp.qprint_timen("No robot output files were found in " + outputdir
+                        + ".")
         return
     gp.qprint_var(robot_rc, gp.hexa())
     if SAVE_STATUS_POLICY == "FAIL" and robot_rc == 0:
-        gp.qprint_timen(
-            "The call to robot produced no failures."
-            + "  Deleting robot output files."
-        )
+        gp.qprint_timen("The call to robot produced no failures."
+                        + "  Deleting robot output files.")
         gc.shell_cmd("rm -rf " + file_list)
         return
 
@@ -412,29 +363,23 @@
         return
 
     # We're directing these to the FFDC dir path so that they'll be subjected to FFDC cleanup.
-    target_dir_path = os.environ.get(
-        "FFDC_DIR_PATH", os.environ.get("HOME", ".") + "/ffdc"
-    )
+    target_dir_path = os.environ.get("FFDC_DIR_PATH",
+                                     os.environ.get("HOME", ".")
+                                     + "/ffdc")
     target_dir_path = gm.add_trailing_slash(target_dir_path)
 
-    targ_file_list = [
-        re.sub(".*/", target_dir_path, x) for x in file_list.split(" ")
-    ]
+    targ_file_list = [re.sub(".*/", target_dir_path, x)
+                      for x in file_list.split(" ")]
 
-    gc.shell_cmd(
-        "mv " + file_list + " " + target_dir_path + " >/dev/null", time_out=600
-    )
+    gc.shell_cmd("mv " + file_list + " " + target_dir_path + " >/dev/null",
+                 time_out=600)
 
     gp.qprint_timen("New robot log file locations:")
-    gp.qprintn("\n".join(targ_file_list))
+    gp.qprintn('\n'.join(targ_file_list))
 
 
-def robot_cmd_fnc(
-    robot_cmd_buf,
-    robot_jail=os.environ.get("ROBOT_JAIL", ""),
-    quiet=None,
-    test_mode=0,
-):
+def robot_cmd_fnc(robot_cmd_buf,
+                  robot_jail=os.environ.get('ROBOT_JAIL', ''), quiet=None, test_mode=0):
     r"""
     Run the robot command string.
 
@@ -448,7 +393,7 @@
     test_mode                       If test_mode is set, this function will not actually run the command.
     """
 
-    quiet = int(gm.dft(quiet, gp.get_stack_var("quiet", 0)))
+    quiet = int(gm.dft(quiet, gp.get_stack_var('quiet', 0)))
     gv.valid_value(robot_cmd_buf)
 
     # Set global variables to aid in cleanup with process_robot_output_files.
@@ -464,9 +409,7 @@
         init_robot_test_base_dir_path()
         ROBOT_TEST_BASE_DIR_PATH = getattr(module, "ROBOT_TEST_BASE_DIR_PATH")
 
-    ROBOT_TEST_RUNNING_FROM_SB = gm.get_mod_global(
-        "ROBOT_TEST_RUNNING_FROM_SB"
-    )
+    ROBOT_TEST_RUNNING_FROM_SB = gm.get_mod_global("ROBOT_TEST_RUNNING_FROM_SB")
     OPENBMCTOOL_DIR_PATH = gm.get_mod_global("OPENBMCTOOL_DIR_PATH")
 
     if robot_jail == "":
@@ -476,13 +419,9 @@
             robot_jail = 1
 
     robot_jail = int(robot_jail)
-    ROBOT_JAIL = os.environ.get("ROBOT_JAIL", "")
-    gp.dprint_vars(
-        ROBOT_TEST_BASE_DIR_PATH,
-        ROBOT_TEST_RUNNING_FROM_SB,
-        ROBOT_JAIL,
-        robot_jail,
-    )
+    ROBOT_JAIL = os.environ.get('ROBOT_JAIL', '')
+    gp.dprint_vars(ROBOT_TEST_BASE_DIR_PATH, ROBOT_TEST_RUNNING_FROM_SB,
+                   ROBOT_JAIL, robot_jail)
 
     # Save PATH and PYTHONPATH to be restored later.
     os.environ["SAVED_PYTHONPATH"] = os.environ.get("PYTHONPATH", "")
@@ -494,50 +433,28 @@
         # It is expected that there will be a "python" program in the tool base bin path which is really a
         # link to select_version.  Ditto for "robot".  Call each with the --print_only option to get the
         # paths to the "real" programs.
-        cmd_buf = (
-            "for program in "
-            + required_programs
+        cmd_buf = "for program in " + required_programs \
             + " ; do dirname $(${program} --print_only) ; done 2>/dev/null"
-        )
         rc, out_buf = gc.shell_cmd(cmd_buf, quiet=1, print_output=0)
         PYTHONPATH = ROBOT_TEST_BASE_DIR_PATH + "lib"
         NEW_PATH_LIST = [ROBOT_TEST_BASE_DIR_PATH + "bin"]
         NEW_PATH_LIST.extend(list(set(out_buf.rstrip("\n").split("\n"))))
-        NEW_PATH_LIST.extend(
-            [
-                "/usr/local/sbin",
-                "/usr/local/bin",
-                "/usr/sbin",
-                "/usr/bin",
-                "/sbin",
-                "/bin",
-                OPENBMCTOOL_DIR_PATH.rstrip("/"),
-            ]
-        )
+        NEW_PATH_LIST.extend(["/usr/local/sbin", "/usr/local/bin", "/usr/sbin",
+                              "/usr/bin", "/sbin", "/bin",
+                              OPENBMCTOOL_DIR_PATH.rstrip('/')])
         PATH = ":".join(NEW_PATH_LIST)
     else:
-        PYTHONPATH = (
-            os.environ.get("PYTHONPATH", "")
-            + ":"
-            + ROBOT_TEST_BASE_DIR_PATH
-            + "lib"
-        )
-        PATH = (
-            os.environ.get("PATH", "")
-            + ":"
-            + ROBOT_TEST_BASE_DIR_PATH
-            + "bin"
-            + ":"
-            + OPENBMCTOOL_DIR_PATH.rstrip("/")
-        )
+        PYTHONPATH = os.environ.get('PYTHONPATH', '') + ":" +\
+            ROBOT_TEST_BASE_DIR_PATH + "lib"
+        PATH = os.environ.get('PATH', '') + ":" + ROBOT_TEST_BASE_DIR_PATH +\
+            "bin" + ":" + OPENBMCTOOL_DIR_PATH.rstrip('/')
 
-    os.environ["PYTHONPATH"] = PYTHONPATH
-    os.environ["PATH"] = PATH
+    os.environ['PYTHONPATH'] = PYTHONPATH
+    os.environ['PATH'] = PATH
     gp.dprint_vars(PATH, PYTHONPATH)
 
-    os.environ["FFDC_DIR_PATH_STYLE"] = os.environ.get(
-        "FFDC_DIR_PATH_STYLE", "1"
-    )
+    os.environ['FFDC_DIR_PATH_STYLE'] = os.environ.get('FFDC_DIR_PATH_STYLE',
+                                                       '1')
     gp.qpissuing(robot_cmd_buf, test_mode)
     if test_mode:
         os.environ["PATH"] = os.environ.get("SAVED_PATH", "")
@@ -545,7 +462,7 @@
         return True
 
     if quiet:
-        DEVNULL = open(os.devnull, "wb")
+        DEVNULL = open(os.devnull, 'wb')
         stdout = DEVNULL
     else:
         stdout = None
diff --git a/lib/gen_cmd.py b/lib/gen_cmd.py
index 03e8a11..cac5ba4 100644
--- a/lib/gen_cmd.py
+++ b/lib/gen_cmd.py
@@ -4,19 +4,19 @@
 This module provides command execution functions such as cmd_fnc and cmd_fnc_u.
 """
 
-import collections
-import inspect
 import os
-import re
-import signal
-import subprocess
 import sys
+import subprocess
+import collections
+import signal
 import time
+import re
+import inspect
 
-import func_args as fa
-import gen_misc as gm
 import gen_print as gp
 import gen_valid as gv
+import gen_misc as gm
+import func_args as fa
 
 robot_env = gp.robot_env
 
@@ -26,16 +26,14 @@
 
 # cmd_fnc and cmd_fnc_u should now be considered deprecated.  shell_cmd and t_shell_cmd should be used
 # instead.
-def cmd_fnc(
-    cmd_buf,
-    quiet=None,
-    test_mode=None,
-    debug=0,
-    print_output=1,
-    show_err=1,
-    return_stderr=0,
-    ignore_err=1,
-):
+def cmd_fnc(cmd_buf,
+            quiet=None,
+            test_mode=None,
+            debug=0,
+            print_output=1,
+            show_err=1,
+            return_stderr=0,
+            ignore_err=1):
     r"""
     Run the given command in a shell and return the shell return code and the output.
 
@@ -82,15 +80,13 @@
     else:
         stderr = subprocess.STDOUT
 
-    sub_proc = subprocess.Popen(
-        cmd_buf,
-        bufsize=1,
-        shell=True,
-        universal_newlines=True,
-        executable="/bin/bash",
-        stdout=subprocess.PIPE,
-        stderr=stderr,
-    )
+    sub_proc = subprocess.Popen(cmd_buf,
+                                bufsize=1,
+                                shell=True,
+                                universal_newlines=True,
+                                executable='/bin/bash',
+                                stdout=subprocess.PIPE,
+                                stderr=stderr)
     out_buf = ""
     if return_stderr:
         for line in sub_proc.stderr:
@@ -135,31 +131,22 @@
         return shell_rc, out_buf
 
 
-def cmd_fnc_u(
-    cmd_buf,
-    quiet=None,
-    debug=None,
-    print_output=1,
-    show_err=1,
-    return_stderr=0,
-    ignore_err=1,
-):
+def cmd_fnc_u(cmd_buf,
+              quiet=None,
+              debug=None,
+              print_output=1,
+              show_err=1,
+              return_stderr=0,
+              ignore_err=1):
     r"""
     Call cmd_fnc with test_mode=0.  See cmd_fnc (above) for details.
 
     Note the "u" in "cmd_fnc_u" stands for "unconditional".
     """
 
-    return cmd_fnc(
-        cmd_buf,
-        test_mode=0,
-        quiet=quiet,
-        debug=debug,
-        print_output=print_output,
-        show_err=show_err,
-        return_stderr=return_stderr,
-        ignore_err=ignore_err,
-    )
+    return cmd_fnc(cmd_buf, test_mode=0, quiet=quiet, debug=debug,
+                   print_output=print_output, show_err=show_err,
+                   return_stderr=return_stderr, ignore_err=ignore_err)
 
 
 def parse_command_string(command_string):
@@ -213,18 +200,16 @@
 
     # We want the parms in the string broken down the way bash would do it, so we'll call upon bash to do
     # that by creating a simple inline bash function.
-    bash_func_def = (
-        'function parse { for parm in "${@}" ; do' + " echo $parm ; done ; }"
-    )
+    bash_func_def = "function parse { for parm in \"${@}\" ; do" +\
+        " echo $parm ; done ; }"
 
-    rc, outbuf = cmd_fnc_u(
-        bash_func_def + " ; parse " + command_string, quiet=1, print_output=0
-    )
+    rc, outbuf = cmd_fnc_u(bash_func_def + " ; parse " + command_string,
+                           quiet=1, print_output=0)
     command_string_list = outbuf.rstrip("\n").split("\n")
 
     command_string_dict = collections.OrderedDict()
     ix = 1
-    command_string_dict["command"] = command_string_list[0]
+    command_string_dict['command'] = command_string_list[0]
     while ix < len(command_string_list):
         if command_string_list[ix].startswith("--"):
             key, value = command_string_list[ix].split("=")
@@ -237,7 +222,7 @@
             except IndexError:
                 value = ""
         else:
-            key = "positional"
+            key = 'positional'
             value = command_string_list[ix]
         if key in command_string_dict:
             if isinstance(command_string_dict[key], str):
@@ -254,7 +239,8 @@
 original_sigalrm_handler = signal.getsignal(signal.SIGALRM)
 
 
-def shell_cmd_timed_out(signal_number, frame):
+def shell_cmd_timed_out(signal_number,
+                        frame):
     r"""
     Handle an alarm signal generated during the shell_cmd function.
     """
@@ -263,7 +249,7 @@
     global command_timed_out
     command_timed_out = True
     # Get subprocess pid from shell_cmd's call stack.
-    sub_proc = gp.get_stack_var("sub_proc", 0)
+    sub_proc = gp.get_stack_var('sub_proc', 0)
     pid = sub_proc.pid
     gp.dprint_var(pid)
     # Terminate the child process group.
@@ -274,21 +260,19 @@
     return
 
 
-def shell_cmd(
-    command_string,
-    quiet=None,
-    print_output=None,
-    show_err=1,
-    test_mode=0,
-    time_out=None,
-    max_attempts=1,
-    retry_sleep_time=5,
-    valid_rcs=[0],
-    ignore_err=None,
-    return_stderr=0,
-    fork=0,
-    error_regexes=None,
-):
+def shell_cmd(command_string,
+              quiet=None,
+              print_output=None,
+              show_err=1,
+              test_mode=0,
+              time_out=None,
+              max_attempts=1,
+              retry_sleep_time=5,
+              valid_rcs=[0],
+              ignore_err=None,
+              return_stderr=0,
+              fork=0,
+              error_regexes=None):
     r"""
     Run the given command string in a shell and return a tuple consisting of the shell return code and the
     output.
@@ -346,10 +330,10 @@
         raise ValueError(err_msg)
 
     # Assign default values to some of the arguments to this function.
-    quiet = int(gm.dft(quiet, gp.get_stack_var("quiet", 0)))
+    quiet = int(gm.dft(quiet, gp.get_stack_var('quiet', 0)))
     print_output = int(gm.dft(print_output, not quiet))
     show_err = int(show_err)
-    ignore_err = int(gm.dft(ignore_err, gp.get_stack_var("ignore_err", 1)))
+    ignore_err = int(gm.dft(ignore_err, gp.get_stack_var('ignore_err', 1)))
 
     gp.qprint_issuing(command_string, test_mode)
     if test_mode:
@@ -369,16 +353,14 @@
     command_timed_out = False
     func_out_history_buf = ""
     for attempt_num in range(1, max_attempts + 1):
-        sub_proc = subprocess.Popen(
-            command_string,
-            bufsize=1,
-            shell=True,
-            universal_newlines=True,
-            executable="/bin/bash",
-            stdin=subprocess.PIPE,
-            stdout=subprocess.PIPE,
-            stderr=stderr,
-        )
+        sub_proc = subprocess.Popen(command_string,
+                                    bufsize=1,
+                                    shell=True,
+                                    universal_newlines=True,
+                                    executable='/bin/bash',
+                                    stdin=subprocess.PIPE,
+                                    stdout=subprocess.PIPE,
+                                    stderr=stderr)
         if fork:
             return sub_proc
 
@@ -405,7 +387,7 @@
         shell_rc = sub_proc.returncode
         if shell_rc in valid_rcs:
             # Check output for text indicating there is an error.
-            if error_regexes and re.match("|".join(error_regexes), stdout_buf):
+            if error_regexes and re.match('|'.join(error_regexes), stdout_buf):
                 shell_rc = -1
             else:
                 break
@@ -433,25 +415,21 @@
         gp.gp_print(func_out_buf)
     else:
         if show_err:
-            gp.gp_print(func_out_history_buf, stream="stderr")
+            gp.gp_print(func_out_history_buf, stream='stderr')
         else:
             # There is no error information to show so just print output from last loop iteration.
             gp.gp_print(func_out_buf)
         if not ignore_err:
             # If the caller has already asked to show error info, avoid repeating that in the failure message.
-            err_msg = (
-                "The prior shell command failed.\n" if show_err else err_msg
-            )
+            err_msg = "The prior shell command failed.\n" if show_err \
+                else err_msg
             if robot_env:
                 BuiltIn().fail(err_msg)
             else:
                 raise ValueError(err_msg)
 
-    return (
-        (shell_rc, stdout_buf, stderr_buf)
-        if return_stderr
+    return (shell_rc, stdout_buf, stderr_buf) if return_stderr \
         else (shell_rc, stdout_buf)
-    )
 
 
 def t_shell_cmd(command_string, **kwargs):
@@ -462,16 +440,14 @@
     See shell_cmd prolog for details on all arguments.
     """
 
-    if "test_mode" in kwargs:
-        error_message = (
-            "Programmer error - test_mode is not a valid"
-            + " argument to this function."
-        )
+    if 'test_mode' in kwargs:
+        error_message = "Programmer error - test_mode is not a valid" +\
+            " argument to this function."
         gp.print_error_report(error_message)
         exit(1)
 
-    test_mode = int(gp.get_stack_var("test_mode", 0))
-    kwargs["test_mode"] = test_mode
+    test_mode = int(gp.get_stack_var('test_mode', 0))
+    kwargs['test_mode'] = test_mode
 
     return shell_cmd(command_string, **kwargs)
 
@@ -545,9 +521,8 @@
     new_kwargs = collections.OrderedDict()
 
     # Get position number of first keyword on the calling line of code.
-    (args, varargs, keywords, locals) = inspect.getargvalues(
-        inspect.stack()[stack_frame_ix][0]
-    )
+    (args, varargs, keywords, locals) =\
+        inspect.getargvalues(inspect.stack()[stack_frame_ix][0])
     first_kwarg_pos = 1 + len(args)
     if varargs is not None:
         first_kwarg_pos += len(locals[varargs])
@@ -556,7 +531,7 @@
         arg_name = gp.get_arg_name(None, arg_num, stack_frame_ix + 2)
         # Continuing with the prior example, the following line will result
         # in key being set to 'arg1'.
-        key = arg_name.split("=")[0]
+        key = arg_name.split('=')[0]
         new_kwargs[key] = kwargs[key]
 
     return new_kwargs
@@ -709,7 +684,7 @@
         del pos_parms[-1]
     else:
         # Either get stack_frame_ix from the caller via options or set it to the default value.
-        stack_frame_ix = options.pop("_stack_frame_ix_", 1)
+        stack_frame_ix = options.pop('_stack_frame_ix_', 1)
         if gm.python_version < gm.ordered_dict_version:
             # Re-establish the original options order as specified on the original line of code.  This
             # function depends on correct order.
@@ -734,6 +709,6 @@
                 command_string += gm.quote_bash_parm(str(value))
     # Finally, append the pos_parms to the end of the command_string.  Use filter to eliminate blank pos
     # parms.
-    command_string = " ".join([command_string] + list(filter(None, pos_parms)))
+    command_string = ' '.join([command_string] + list(filter(None, pos_parms)))
 
     return command_string
diff --git a/lib/gen_misc.py b/lib/gen_misc.py
index d7a6373..ad5beea 100755
--- a/lib/gen_misc.py
+++ b/lib/gen_misc.py
@@ -4,18 +4,16 @@
 This module provides many valuable functions such as my_parm_file.
 """
 
-import collections
-import errno
-import inspect
-import json
-import os
-import random
-import shutil
-
 # sys and os are needed to get the program dir path and program name.
 import sys
+import errno
+import os
+import shutil
+import collections
+import json
 import time
-
+import inspect
+import random
 try:
     import ConfigParser
 except ImportError:
@@ -24,20 +22,17 @@
     import StringIO
 except ImportError:
     import io
-
 import re
 import socket
 import tempfile
-
 try:
     import psutil
-
     psutil_imported = True
 except ImportError:
     psutil_imported = False
 
-import gen_cmd as gc
 import gen_print as gp
+import gen_cmd as gc
 
 robot_env = gp.robot_env
 if robot_env:
@@ -70,7 +65,7 @@
     mode                            The mode or permissions to be granted to the created directories.
     quiet                           Indicates whether this function should run the print_issuing() function.
     """
-    quiet = int(dft(quiet, gp.get_stack_var("quiet", 0)))
+    quiet = int(dft(quiet, gp.get_stack_var('quiet', 0)))
     gp.qprint_issuing("os.makedirs('" + path + "', mode=" + oct(mode) + ")")
     try:
         os.makedirs(path, mode)
@@ -89,11 +84,9 @@
     (All parms are passed directly to shutil.rmtree.  See its prolog for details)
     quiet                           Indicates whether this function should run the print_issuing() function.
     """
-    quiet = int(dft(quiet, gp.get_stack_var("quiet", 0)))
+    quiet = int(dft(quiet, gp.get_stack_var('quiet', 0)))
     print_string = gp.sprint_executing(max_width=2000)
-    print_string = re.sub(
-        r"Executing: ", "Issuing: shutil.", print_string.rstrip("\n")
-    )
+    print_string = re.sub(r"Executing: ", "Issuing: shutil.", print_string.rstrip("\n"))
     gp.qprintn(re.sub(r", quiet[ ]?=.*", ")", print_string))
     shutil.rmtree(path, ignore_errors, onerror)
 
@@ -109,7 +102,7 @@
     path                            The path of the directory to change to.
     quiet                           Indicates whether this function should run the print_issuing() function.
     """
-    quiet = int(dft(quiet, gp.get_stack_var("quiet", 0)))
+    quiet = int(dft(quiet, gp.get_stack_var('quiet', 0)))
     gp.qprint_issuing("os.chdir('" + path + "')")
     os.chdir(path)
 
@@ -124,13 +117,11 @@
     file_path                       The relative file path (e.g. "my_file" or "lib/my_file").
     """
 
-    shell_rc, out_buf = gc.cmd_fnc_u(
-        "which " + file_path, quiet=1, print_output=0, show_err=0
-    )
+    shell_rc, out_buf = gc.cmd_fnc_u("which " + file_path, quiet=1,
+                                     print_output=0, show_err=0)
     if shell_rc != 0:
-        error_message = (
-            'Failed to find complete path for file "' + file_path + '".\n'
-        )
+        error_message = "Failed to find complete path for file \"" +\
+                        file_path + "\".\n"
         error_message += gp.sprint_var(shell_rc, gp.hexa())
         error_message += out_buf
         if robot_env:
@@ -144,7 +135,9 @@
     return file_path
 
 
-def add_path(new_path, path, position=0):
+def add_path(new_path,
+             path,
+             position=0):
     r"""
     Add new_path to path, provided that path doesn't already contain new_path, and return the result.
 
@@ -190,7 +183,9 @@
     return default if value is None else value
 
 
-def get_mod_global(var_name, default=None, mod_name="__main__"):
+def get_mod_global(var_name,
+                   default=None,
+                   mod_name="__main__"):
     r"""
     Get module global variable value and return it.
 
@@ -209,12 +204,10 @@
     try:
         module = sys.modules[mod_name]
     except KeyError:
-        gp.print_error_report(
-            "Programmer error - The mod_name passed to"
-            + " this function is invalid:\n"
-            + gp.sprint_var(mod_name)
-        )
-        raise ValueError("Programmer error.")
+        gp.print_error_report("Programmer error - The mod_name passed to"
+                              + " this function is invalid:\n"
+                              + gp.sprint_var(mod_name))
+        raise ValueError('Programmer error.')
 
     if default is None:
         return getattr(module, var_name)
@@ -222,7 +215,8 @@
         return getattr(module, var_name, default)
 
 
-def global_default(var_value, default=0):
+def global_default(var_value,
+                   default=0):
     r"""
     If var_value is not None, return it.  Otherwise, return the global
     variable of the same name, if it exists.  If not, return default.
@@ -245,7 +239,9 @@
     return dft(var_value, get_mod_global(var_name, 0))
 
 
-def set_mod_global(var_value, mod_name="__main__", var_name=None):
+def set_mod_global(var_value,
+                   mod_name="__main__",
+                   var_name=None):
     r"""
     Set a global variable for a given module.
 
@@ -259,12 +255,10 @@
     try:
         module = sys.modules[mod_name]
     except KeyError:
-        gp.print_error_report(
-            "Programmer error - The mod_name passed to"
-            + " this function is invalid:\n"
-            + gp.sprint_var(mod_name)
-        )
-        raise ValueError("Programmer error.")
+        gp.print_error_report("Programmer error - The mod_name passed to"
+                              + " this function is invalid:\n"
+                              + gp.sprint_var(mod_name))
+        raise ValueError('Programmer error.')
 
     if var_name is None:
         var_name = gp.get_arg_name(None, 1, 2)
@@ -298,7 +292,7 @@
         string_file = io.StringIO()
 
     # Write the dummy section header to the string file.
-    string_file.write("[dummysection]\n")
+    string_file.write('[dummysection]\n')
     # Write the entire contents of the properties file to the string file.
     string_file.write(open(prop_file_path).read())
     # Rewind the string file.
@@ -315,12 +309,15 @@
     config_parser.readfp(string_file)
     # Return the properties as a dictionary.
     if robot_env:
-        return DotDict(config_parser.items("dummysection"))
+        return DotDict(config_parser.items('dummysection'))
     else:
-        return collections.OrderedDict(config_parser.items("dummysection"))
+        return collections.OrderedDict(config_parser.items('dummysection'))
 
 
-def file_to_list(file_path, newlines=0, comments=1, trim=0):
+def file_to_list(file_path,
+                 newlines=0,
+                 comments=1,
+                 trim=0):
     r"""
     Return the contents of a file as a list.  Each element of the resulting
     list is one line from the file.
@@ -357,7 +354,7 @@
     See file_to_list defined above for description of arguments.
     """
 
-    return "\n".join(file_to_list(*args, **kwargs))
+    return '\n'.join(file_to_list(*args, **kwargs))
 
 
 def append_file(file_path, buffer):
@@ -379,7 +376,7 @@
     the list will be normalized and have a trailing slash added.
     """
 
-    PATH_LIST = os.environ["PATH"].split(":")
+    PATH_LIST = os.environ['PATH'].split(":")
     PATH_LIST = [os.path.normpath(path) + os.sep for path in PATH_LIST]
 
     return PATH_LIST
@@ -405,7 +402,7 @@
     buffer                          The string whose quotes are to be escaped.
     """
 
-    return re.sub("'", "'\\''", buffer)
+    return re.sub("\'", "\'\\\'\'", buffer)
 
 
 def quote_bash_parm(parm):
@@ -427,18 +424,19 @@
     # Tilde expansion: ~
     # Piped commands: |
     # Bash re-direction: >, <
-    bash_special_chars = set(" '\"$*?[]+@!{}~|><")
+    bash_special_chars = set(' \'"$*?[]+@!{}~|><')
 
     if any((char in bash_special_chars) for char in parm):
         return "'" + escape_bash_quotes(parm) + "'"
 
-    if parm == "":
+    if parm == '':
         parm = "''"
 
     return parm
 
 
-def get_host_name_ip(host=None, short_name=0):
+def get_host_name_ip(host=None,
+                     short_name=0):
     r"""
     Get the host name and the IP address for the given host and return them as a tuple.
 
@@ -453,11 +451,8 @@
     try:
         host_ip = socket.gethostbyname(host)
     except socket.gaierror as my_gaierror:
-        message = (
-            "Unable to obtain the host name for the following host:"
-            + "\n"
-            + gp.sprint_var(host)
-        )
+        message = "Unable to obtain the host name for the following host:" +\
+                  "\n" + gp.sprint_var(host)
         gp.print_error_report(message)
         raise my_gaierror
 
@@ -493,7 +488,8 @@
     return True
 
 
-def to_signed(number, bit_width=None):
+def to_signed(number,
+              bit_width=None):
     r"""
     Convert number to a signed number and return the result.
 
@@ -534,7 +530,7 @@
 
     if number < 0:
         return number
-    neg_bit_mask = 2 ** (bit_width - 1)
+    neg_bit_mask = 2**(bit_width - 1)
     if number & neg_bit_mask:
         return ((2**bit_width) - number) * -1
     else:
@@ -542,6 +538,7 @@
 
 
 def get_child_pids(quiet=1):
+
     r"""
     Get and return a list of pids representing all first-generation processes that are the children of the
     current process.
@@ -567,30 +564,24 @@
         # Otherwise, find child pids using shell commands.
         print_output = not quiet
 
-        ps_cmd_buf = (
-            "ps --no-headers --ppid " + str(os.getpid()) + " -o pid,args"
-        )
+        ps_cmd_buf = "ps --no-headers --ppid " + str(os.getpid()) +\
+            " -o pid,args"
         # Route the output of ps to a temporary file for later grepping.  Avoid using " | grep" in the ps
         # command string because it creates yet another process which is of no interest to the caller.
         temp = tempfile.NamedTemporaryFile()
         temp_file_path = temp.name
-        gc.shell_cmd(
-            ps_cmd_buf + " > " + temp_file_path, print_output=print_output
-        )
+        gc.shell_cmd(ps_cmd_buf + " > " + temp_file_path,
+                     print_output=print_output)
         # Sample contents of the temporary file:
         # 30703 sleep 2
         # 30795 /bin/bash -c ps --no-headers --ppid 30672 -o pid,args > /tmp/tmpqqorWY
         # Use egrep to exclude the "ps" process itself from the results collected with the prior shell_cmd
         # invocation.  Only the other children are of interest to the caller.  Use cut on the grep results to
         # obtain only the pid column.
-        rc, output = gc.shell_cmd(
-            "egrep -v '"
-            + re.escape(ps_cmd_buf)
-            + "' "
-            + temp_file_path
-            + " | cut -c1-5",
-            print_output=print_output,
-        )
+        rc, output = \
+            gc.shell_cmd("egrep -v '" + re.escape(ps_cmd_buf) + "' "
+                         + temp_file_path + " | cut -c1-5",
+                         print_output=print_output)
         # Split the output buffer by line into a list.  Strip each element of extra spaces and convert each
         # element to an integer.
         return map(int, map(str.strip, filter(None, output.split("\n"))))
@@ -680,7 +671,8 @@
     return re.sub("[^0-9\\.]", "", sys_version)
 
 
-python_version = version_tuple(get_python_version())
+python_version = \
+    version_tuple(get_python_version())
 ordered_dict_version = version_tuple("3.6")
 
 
@@ -722,14 +714,11 @@
         pass
 
     callers_stack_frame = inspect.stack()[1]
-    file_name_elements = [
-        gp.pgm_name,
-        callers_stack_frame.function,
-        "line_" + str(callers_stack_frame.lineno),
-        "pid_" + str(os.getpid()),
-        str(random.randint(0, 1000000)),
-        suffix,
-    ]
+    file_name_elements = \
+        [
+            gp.pgm_name, callers_stack_frame.function, "line_" + str(callers_stack_frame.lineno),
+            "pid_" + str(os.getpid()), str(random.randint(0, 1000000)), suffix
+        ]
     temp_file_name = delim.join(file_name_elements)
 
     temp_file_path = temp_dir_path + temp_file_name
diff --git a/lib/gen_plug_in.py b/lib/gen_plug_in.py
index d6ab0a6..fc57cef 100755
--- a/lib/gen_plug_in.py
+++ b/lib/gen_plug_in.py
@@ -4,29 +4,27 @@
 This module provides functions which are useful for running plug-ins.
 """
 
-import glob
-import os
 import sys
+import os
+import glob
 
-import gen_misc as gm
 import gen_print as gp
+import gen_misc as gm
 
 # Some help text that is common to more than one program.
-plug_in_dir_paths_help_text = (
-    "This is a colon-separated list of plug-in directory paths.  If one"
-    + " of the entries in the list is a plain directory name (i.e. no"
-    + " path info), it will be taken to be a native plug-in.  In that case,"
-    + ' %(prog)s will search for the native plug-in in the "plug-ins"'
-    + " subdirectory of each path in the PATH environment variable until it"
-    + " is found.  Also, integrated plug-ins will automatically be appended"
-    + " to your plug_in_dir_paths list.  An integrated plug-in is any plug-in"
-    + ' found using the PATH variable that contains a file named "integrated".'
-)
+plug_in_dir_paths_help_text = \
+    'This is a colon-separated list of plug-in directory paths.  If one' +\
+    ' of the entries in the list is a plain directory name (i.e. no' +\
+    ' path info), it will be taken to be a native plug-in.  In that case,' +\
+    ' %(prog)s will search for the native plug-in in the "plug-ins"' +\
+    ' subdirectory of each path in the PATH environment variable until it' +\
+    ' is found.  Also, integrated plug-ins will automatically be appended' +\
+    ' to your plug_in_dir_paths list.  An integrated plug-in is any plug-in' +\
+    ' found using the PATH variable that contains a file named "integrated".'
 
-mch_class_help_text = (
-    'The class of machine that we are testing (e.g. "op" = "open power",'
-    + ' "obmc" = "open bmc", etc).'
-)
+mch_class_help_text = \
+    'The class of machine that we are testing (e.g. "op" = "open power",' +\
+    ' "obmc" = "open bmc", etc).'
 
 PATH_LIST = gm.return_path_list()
 
@@ -66,16 +64,17 @@
 
     global plug_in_base_path_list
     for plug_in_base_dir_path in plug_in_base_path_list:
-        candidate_plug_in_dir_path = (
-            os.path.normpath(plug_in_base_dir_path + plug_in_name) + os.sep
-        )
+        candidate_plug_in_dir_path = os.path.normpath(plug_in_base_dir_path
+                                                      + plug_in_name) + \
+            os.sep
         if os.path.isdir(candidate_plug_in_dir_path):
             return candidate_plug_in_dir_path
 
     return ""
 
 
-def validate_plug_in_package(plug_in_dir_path, mch_class="obmc"):
+def validate_plug_in_package(plug_in_dir_path,
+                             mch_class="obmc"):
     r"""
     Validate the plug in package and return the normalized plug-in directory path.
 
@@ -89,42 +88,33 @@
 
     if os.path.isabs(plug_in_dir_path):
         # plug_in_dir_path begins with a slash so it is an absolute path.
-        candidate_plug_in_dir_path = (
-            os.path.normpath(plug_in_dir_path) + os.sep
-        )
+        candidate_plug_in_dir_path = os.path.normpath(plug_in_dir_path) +\
+            os.sep
         if not os.path.isdir(candidate_plug_in_dir_path):
-            gp.print_error_report(
-                'Plug-in directory path "'
-                + plug_in_dir_path
-                + '" does not exist.\n'
-            )
+            gp.print_error_report("Plug-in directory path \""
+                                  + plug_in_dir_path + "\" does not exist.\n")
             exit(1)
     else:
         # The plug_in_dir_path is actually a simple name (e.g. "OBMC_Sample")...
         candidate_plug_in_dir_path = find_plug_in_package(plug_in_dir_path)
         if candidate_plug_in_dir_path == "":
             global PATH_LIST
-            gp.print_error_report(
-                'Plug-in directory path "'
-                + plug_in_dir_path
-                + '" could not be found'
-                + " in any of the following directories:\n"
-                + gp.sprint_var(PATH_LIST)
-            )
+            gp.print_error_report("Plug-in directory path \""
+                                  + plug_in_dir_path + "\" could not be found"
+                                  + " in any of the following directories:\n"
+                                  + gp.sprint_var(PATH_LIST))
             exit(1)
     # Make sure that this plug-in supports us...
     supports_file_path = candidate_plug_in_dir_path + "supports_" + mch_class
     if not os.path.exists(supports_file_path):
-        gp.print_error_report(
-            "The following file path could not be"
-            + " found:\n"
-            + gp.sprint_varx("supports_file_path", supports_file_path)
-            + "\nThis file is necessary to indicate that"
-            + " the given plug-in supports the class of"
-            + ' machine we are testing, namely "'
-            + mch_class
-            + '".\n'
-        )
+        gp.print_error_report("The following file path could not be"
+                              + " found:\n"
+                              + gp.sprint_varx("supports_file_path",
+                                               supports_file_path)
+                              + "\nThis file is necessary to indicate that"
+                              + " the given plug-in supports the class of"
+                              + " machine we are testing, namely \""
+                              + mch_class + "\".\n")
         exit(1)
 
     return candidate_plug_in_dir_path
@@ -146,25 +136,22 @@
 
     integrated_plug_ins_list = []
 
-    DEBUG_SKIP_INTEGRATED = int(os.getenv("DEBUG_SKIP_INTEGRATED", "0"))
+    DEBUG_SKIP_INTEGRATED = int(os.getenv('DEBUG_SKIP_INTEGRATED', '0'))
 
     if DEBUG_SKIP_INTEGRATED:
         return integrated_plug_ins_list
 
     for plug_in_base_path in plug_in_base_path_list:
         # Get a list of all plug-in paths that support our mch_class.
-        mch_class_candidate_list = glob.glob(
-            plug_in_base_path + "*/supports_" + mch_class
-        )
+        mch_class_candidate_list = glob.glob(plug_in_base_path
+                                             + "*/supports_" + mch_class)
         for candidate_path in mch_class_candidate_list:
-            integrated_plug_in_dir_path = (
-                os.path.dirname(candidate_path) + os.sep
-            )
+            integrated_plug_in_dir_path = os.path.dirname(candidate_path) +\
+                os.sep
             integrated_file_path = integrated_plug_in_dir_path + "integrated"
             if os.path.exists(integrated_file_path):
-                plug_in_name = os.path.basename(
-                    os.path.dirname(candidate_path)
-                )
+                plug_in_name = \
+                    os.path.basename(os.path.dirname(candidate_path))
                 if plug_in_name not in integrated_plug_ins_list:
                     # If this plug-in has not already been added to the list...
                     integrated_plug_ins_list.append(plug_in_name)
@@ -172,7 +159,8 @@
     return integrated_plug_ins_list
 
 
-def return_plug_in_packages_list(plug_in_dir_paths, mch_class="obmc"):
+def return_plug_in_packages_list(plug_in_dir_paths,
+                                 mch_class="obmc"):
     r"""
     Return a list of plug-in packages given the plug_in_dir_paths string.  This function calls
     validate_plug_in_package so it will fail if plug_in_dir_paths contains any invalid plug-ins.
@@ -196,13 +184,8 @@
 
     plug_in_packages_list = plug_in_packages_list + integrated_plug_ins_list
 
-    plug_in_packages_list = list(
-        set(
-            [
-                validate_plug_in_package(path, mch_class)
-                for path in plug_in_packages_list
-            ]
-        )
-    )
+    plug_in_packages_list = \
+        list(set([validate_plug_in_package(path, mch_class)
+                  for path in plug_in_packages_list]))
 
     return plug_in_packages_list
diff --git a/lib/gen_plug_in_utils.py b/lib/gen_plug_in_utils.py
index dbbafe5..0cf3262 100755
--- a/lib/gen_plug_in_utils.py
+++ b/lib/gen_plug_in_utils.py
@@ -4,16 +4,16 @@
 This module provides functions which are useful to plug-in call point programs.
 """
 
-import collections
+import sys
 import os
 import re
-import sys
+import collections
 
-import func_args as fa
-import gen_cmd as gc
-import gen_misc as gm
 import gen_print as gp
 import gen_valid as gv
+import gen_misc as gm
+import gen_cmd as gc
+import func_args as fa
 
 PLUG_VAR_PREFIX = os.environ.get("PLUG_VAR_PREFIX", "AUTOBOOT")
 
@@ -36,7 +36,9 @@
         return plug_in_package_name
 
 
-def return_plug_vars(general=True, custom=True, plug_in_package_name=None):
+def return_plug_vars(general=True,
+                     custom=True,
+                     plug_in_package_name=None):
     r"""
     Return an OrderedDict which is sorted by key and which contains all of the plug-in environment variables.
 
@@ -80,9 +82,7 @@
     regex_list = []
     if not (general or custom):
         return collections.OrderedDict()
-    plug_in_package_name = gm.dft(
-        plug_in_package_name, get_plug_in_package_name()
-    )
+    plug_in_package_name = gm.dft(plug_in_package_name, get_plug_in_package_name())
     if general:
         regex_list = [PLUG_VAR_PREFIX, "AUTOGUI"]
     if custom:
@@ -92,23 +92,17 @@
 
     # Set a default for nickname.
     if os.environ.get("AUTOBOOT_OPENBMC_NICKNAME", "") == "":
-        os.environ["AUTOBOOT_OPENBMC_NICKNAME"] = os.environ.get(
-            "AUTOBOOT_OPENBMC_HOST", ""
-        )
+        os.environ['AUTOBOOT_OPENBMC_NICKNAME'] = \
+            os.environ.get("AUTOBOOT_OPENBMC_HOST", "")
 
     if os.environ.get("AUTOIPL_FSP1_NICKNAME", "") == "":
-        os.environ["AUTOIPL_FSP1_NICKNAME"] = os.environ.get(
-            "AUTOIPL_FSP1_NAME", ""
-        ).split(".")[0]
+        os.environ['AUTOIPL_FSP1_NICKNAME'] = \
+            os.environ.get("AUTOIPL_FSP1_NAME", "").split(".")[0]
 
     # For all variables specified in the parm_def file, we want them to default to "" rather than being unset.
     # Process the parm_def file if it exists.
-    parm_def_file_path = (
-        os.path.dirname(gp.pgm_dir_path.rstrip("/"))
-        + "/"
-        + plug_in_package_name
+    parm_def_file_path = os.path.dirname(gp.pgm_dir_path.rstrip("/")) + "/" + plug_in_package_name \
         + "/parm_def"
-    )
     if os.path.exists(parm_def_file_path):
         parm_defs = gm.my_parm_file(parm_def_file_path)
     else:
@@ -121,10 +115,8 @@
 
     # Create a list of plug-in environment variables by pre-pending <all caps plug-in package name>_<all
     # caps var name>
-    plug_in_parm_names = [
-        plug_in_package_name.upper() + "_" + x
-        for x in map(str.upper, parm_defs.keys())
-    ]
+    plug_in_parm_names = [plug_in_package_name.upper() + "_" + x for x in
+                          map(str.upper, parm_defs.keys())]
     # Example plug_in_parm_names:
     # plug_in_parm_names:
     #  plug_in_parm_names[0]: STOP_REST_FAIL
@@ -145,20 +137,13 @@
         if os.environ[var_name] == "":
             os.environ[var_name] = str(default_value)
 
-    plug_var_dict = collections.OrderedDict(
-        sorted(
-            {
-                k: v for (k, v) in os.environ.items() if re.match(regex, k)
-            }.items()
-        )
-    )
+    plug_var_dict = \
+        collections.OrderedDict(sorted({k: v for (k, v) in
+                                        os.environ.items()
+                                        if re.match(regex, k)}.items()))
     # Restore the types of any variables where the caller had defined default values.
     for key, value in non_string_defaults.items():
-        cmd_buf = (
-            "plug_var_dict[key] = "
-            + str(value).split("'")[1]
-            + "(plug_var_dict[key]"
-        )
+        cmd_buf = "plug_var_dict[key] = " + str(value).split("'")[1] + "(plug_var_dict[key]"
         if value is int:
             # Use int base argument of 0 to allow it to interpret hex strings.
             cmd_buf += ", 0)"
@@ -167,11 +152,8 @@
         exec(cmd_buf) in globals(), locals()
     # Register password values to prevent printing them out.  Any plug var whose name ends in PASSWORD will
     # be registered.
-    password_vals = {
-        k: v
-        for (k, v) in plug_var_dict.items()
-        if re.match(r".*_PASSWORD$", k)
-    }.values()
+    password_vals = {k: v for (k, v) in plug_var_dict.items()
+                     if re.match(r".*_PASSWORD$", k)}.values()
     map(gp.register_passwords, password_vals)
 
     return plug_var_dict
@@ -249,7 +231,8 @@
         setattr(module, re.sub("^" + PLUG_VAR_PREFIX + "_", "", key), value)
 
 
-def get_plug_default(var_name, default=None):
+def get_plug_default(var_name,
+                     default=None):
     r"""
     Derive and return a default value for the given parm variable.
 
@@ -305,7 +288,7 @@
     default_value = os.environ.get(package_var_name, None)
     if default_value is not None:
         # A package-name version of the variable was found so return its value.
-        return default_value
+        return (default_value)
 
     plug_var_name = PLUG_VAR_PREFIX + "_OVERRIDE_" + var_name
     default_value = os.environ.get(plug_var_name, None)
@@ -322,7 +305,8 @@
     return default
 
 
-def required_plug_in(required_plug_in_names, plug_in_dir_paths=None):
+def required_plug_in(required_plug_in_names,
+                     plug_in_dir_paths=None):
     r"""
     Determine whether the required_plug_in_names are in plug_in_dir_paths, construct an error_message and
     call gv.process_error_message(error_message).
@@ -343,22 +327,15 @@
     """
 
     # Calculate default value for plug_in_dir_paths.
-    plug_in_dir_paths = gm.dft(
-        plug_in_dir_paths,
-        os.environ.get(
-            "AUTOGUI_PLUG_IN_DIR_PATHS",
-            os.environ.get(PLUG_VAR_PREFIX + "_PLUG_IN_DIR_PATHS", ""),
-        ),
-    )
+    plug_in_dir_paths = gm.dft(plug_in_dir_paths,
+                               os.environ.get('AUTOGUI_PLUG_IN_DIR_PATHS',
+                                              os.environ.get(PLUG_VAR_PREFIX + "_PLUG_IN_DIR_PATHS", "")))
 
     # Convert plug_in_dir_paths to a list of base names.
-    plug_in_dir_paths = list(
-        filter(None, map(os.path.basename, plug_in_dir_paths.split(":")))
-    )
+    plug_in_dir_paths = \
+        list(filter(None, map(os.path.basename, plug_in_dir_paths.split(":"))))
 
-    error_message = gv.valid_list(
-        plug_in_dir_paths, required_values=required_plug_in_names
-    )
+    error_message = gv.valid_list(plug_in_dir_paths, required_values=required_plug_in_names)
     if error_message:
         return gv.process_error_message(error_message)
 
@@ -379,31 +356,20 @@
                                     to retrieve data saved by another plug-in package.
     """
 
-    plug_in_package_name = gm.dft(
-        plug_in_package_name, get_plug_in_package_name()
-    )
+    plug_in_package_name = gm.dft(plug_in_package_name,
+                                  get_plug_in_package_name())
 
-    BASE_TOOL_DIR_PATH = gm.add_trailing_slash(
-        os.environ.get(PLUG_VAR_PREFIX + "_BASE_TOOL_DIR_PATH", "/tmp/")
-    )
+    BASE_TOOL_DIR_PATH = \
+        gm.add_trailing_slash(os.environ.get(PLUG_VAR_PREFIX
+                                             + "_BASE_TOOL_DIR_PATH",
+                                             "/tmp/"))
     NICKNAME = os.environ.get("AUTOBOOT_OPENBMC_NICKNAME", "")
     if NICKNAME == "":
         NICKNAME = os.environ["AUTOIPL_FSP1_NICKNAME"]
     MASTER_PID = os.environ[PLUG_VAR_PREFIX + "_MASTER_PID"]
-    gp.dprint_vars(
-        BASE_TOOL_DIR_PATH, NICKNAME, plug_in_package_name, MASTER_PID
-    )
-    return (
-        BASE_TOOL_DIR_PATH
-        + gm.username()
-        + "/"
-        + NICKNAME
-        + "/"
-        + plug_in_package_name
-        + "/"
-        + str(MASTER_PID)
-        + "/"
-    )
+    gp.dprint_vars(BASE_TOOL_DIR_PATH, NICKNAME, plug_in_package_name, MASTER_PID)
+    return BASE_TOOL_DIR_PATH + gm.username() + "/" + NICKNAME + "/" +\
+        plug_in_package_name + "/" + str(MASTER_PID) + "/"
 
 
 def create_plug_in_save_dir(plug_in_package_name=None):
@@ -431,9 +397,8 @@
     plug_in_package_name            See compose_plug_in_save_dir_path for details.
     """
 
-    gc.shell_cmd(
-        "rm -rf " + compose_plug_in_save_dir_path(plug_in_package_name)
-    )
+    gc.shell_cmd("rm -rf "
+                 + compose_plug_in_save_dir_path(plug_in_package_name))
 
 
 def save_plug_in_value(var_value=None, plug_in_package_name=None, **kwargs):
@@ -474,7 +439,7 @@
         var_name = gp.get_arg_name(0, 1, stack_frame_ix=2)
     plug_in_save_dir_path = create_plug_in_save_dir(plug_in_package_name)
     save_file_path = plug_in_save_dir_path + var_name
-    gp.qprint_timen('Saving "' + var_name + '" value.')
+    gp.qprint_timen("Saving \"" + var_name + "\" value.")
     gp.qprint_varx(var_name, var_value)
     gc.shell_cmd("echo '" + str(var_value) + "' > " + save_file_path)
 
@@ -520,32 +485,22 @@
     default, args, kwargs = fa.pop_arg("", *args, **kwargs)
     plug_in_package_name, args, kwargs = fa.pop_arg(None, *args, **kwargs)
     if args or kwargs:
-        error_message = (
-            "Programmer error - Too many arguments passed for this function."
-        )
+        error_message = "Programmer error - Too many arguments passed for this function."
         raise ValueError(error_message)
     plug_in_save_dir_path = create_plug_in_save_dir(plug_in_package_name)
     save_file_path = plug_in_save_dir_path + var_name
     if os.path.isfile(save_file_path):
-        gp.qprint_timen(
-            "Restoring " + var_name + " value from " + save_file_path + "."
-        )
-        var_value = gm.file_to_list(
-            save_file_path, newlines=0, comments=0, trim=1
-        )[0]
+        gp.qprint_timen("Restoring " + var_name + " value from " + save_file_path + ".")
+        var_value = gm.file_to_list(save_file_path, newlines=0, comments=0, trim=1)[0]
         if type(default) is bool:
             # Convert from string to bool.
-            var_value = var_value == "True"
+            var_value = (var_value == 'True')
         if type(default) is int:
             # Convert from string to int.
             var_value = int(var_value)
     else:
         var_value = default
-        gp.qprint_timen(
-            "Save file "
-            + save_file_path
-            + " does not exist so returning default value."
-        )
+        gp.qprint_timen("Save file " + save_file_path + " does not exist so returning default value.")
 
     gp.qprint_varx(var_name, var_value)
     return var_value
@@ -572,14 +527,9 @@
     AUTOBOOT_PROGRAM_PID = gm.get_mod_global("AUTOBOOT_PROGRAM_PID")
 
     if AUTOBOOT_MASTER_PID != AUTOBOOT_PROGRAM_PID:
-        message = (
-            get_plug_in_package_name()
-            + "/"
-            + gp.pgm_name
-            + " is not"
-            + " being called by the master program in the stack so no action"
+        message = get_plug_in_package_name() + "/" + gp.pgm_name + " is not" \
+            + " being called by the master program in the stack so no action" \
             + " will be taken."
-        )
         gp.qprint_timen(message)
         gp.qprint_vars(AUTOBOOT_MASTER_PID, AUTOBOOT_PROGRAM_PID)
         exit(0)
@@ -591,22 +541,13 @@
 
     The calling program is responsible for making sure that the tarball has been unpacked.
     """
-    AUTOBOOT_BASE_TOOL_DIR_PATH = gm.get_mod_global(
-        "AUTOBOOT_BASE_TOOL_DIR_PATH"
-    )
+    AUTOBOOT_BASE_TOOL_DIR_PATH = gm.get_mod_global("AUTOBOOT_BASE_TOOL_DIR_PATH")
     AUTOBOOT_OPENBMC_NICKNAME = gm.get_mod_global("AUTOBOOT_OPENBMC_NICKNAME")
 
-    tool_dir_path = (
-        AUTOBOOT_BASE_TOOL_DIR_PATH
-        + os.environ.get("USER")
-        + os.sep
-        + AUTOBOOT_OPENBMC_NICKNAME
-        + os.sep
-    )
-    tarball_tools_dir_path = tool_dir_path + "tarball/x86/bin"
-    os.environ["PATH"] = gm.add_path(
-        tarball_tools_dir_path, os.environ.get("PATH", "")
-    )
+    tool_dir_path = AUTOBOOT_BASE_TOOL_DIR_PATH + os.environ.get('USER') + os.sep \
+        + AUTOBOOT_OPENBMC_NICKNAME + os.sep
+    tarball_tools_dir_path = tool_dir_path + 'tarball/x86/bin'
+    os.environ['PATH'] = gm.add_path(tarball_tools_dir_path, os.environ.get('PATH', ''))
 
 
 def stop_test_rc():
@@ -632,15 +573,14 @@
 
 # Create print wrapper functions for all sprint functions defined above.
 # func_names contains a list of all print functions which should be created from their sprint counterparts.
-func_names = ["print_plug_vars"]
+func_names = ['print_plug_vars']
 
 # stderr_func_names is a list of functions whose output should go to stderr rather than stdout.
 stderr_func_names = []
 
 replace_dict = dict(gp.replace_dict)
-replace_dict["mod_qualifier"] = "gp."
-func_defs = gp.create_print_wrapper_funcs(
-    func_names, stderr_func_names, replace_dict
-)
+replace_dict['mod_qualifier'] = 'gp.'
+func_defs = gp.create_print_wrapper_funcs(func_names, stderr_func_names,
+                                          replace_dict)
 gp.gp_debug_print(func_defs)
 exec(func_defs)
diff --git a/lib/gen_print.py b/lib/gen_print.py
index 8372832..5f87343 100755
--- a/lib/gen_print.py
+++ b/lib/gen_print.py
@@ -4,31 +4,28 @@
 This module provides many print functions such as sprint_var, sprint_time, sprint_error, sprint_call_stack.
 """
 
+import sys
+import os
+import time
+import inspect
+import re
+import grp
+import socket
 import argparse
 import copy
-import grp
-import inspect
-import os
-import re
-import socket
-import sys
-import time
-
 try:
     import __builtin__
 except ImportError:
     import builtins as __builtin__
-
-import collections
 import logging
-
+import collections
 from wrap_utils import *
 
 try:
     robot_env = 1
+    from robot.utils import DotDict
+    from robot.utils import NormalizedDict
     from robot.libraries.BuiltIn import BuiltIn
-    from robot.utils import DotDict, NormalizedDict
-
     # Having access to the robot libraries alone does not indicate that we are in a robot environment.  The
     # following try block should confirm that.
     try:
@@ -43,9 +40,8 @@
 # Setting these variables for use both inside this module and by programs importing this module.
 pgm_file_path = sys.argv[0]
 pgm_name = os.path.basename(pgm_file_path)
-pgm_dir_path = (
-    os.path.normpath(re.sub("/" + pgm_name, "", pgm_file_path)) + os.path.sep
-)
+pgm_dir_path = os.path.normpath(re.sub("/" + pgm_name, "", pgm_file_path)) +\
+    os.path.sep
 
 
 # Some functions (e.g. sprint_pgm_header) have need of a program name value that looks more like a valid
@@ -59,12 +55,12 @@
 # objective is to make the variable values line up nicely with the time stamps.
 dft_col1_width = 29
 
-NANOSECONDS = os.environ.get("NANOSECONDS", "1")
+NANOSECONDS = os.environ.get('NANOSECONDS', '1')
 
 if NANOSECONDS == "1":
     dft_col1_width = dft_col1_width + 7
 
-SHOW_ELAPSED_TIME = os.environ.get("SHOW_ELAPSED_TIME", "1")
+SHOW_ELAPSED_TIME = os.environ.get('SHOW_ELAPSED_TIME', '1')
 
 if SHOW_ELAPSED_TIME == "1":
     if NANOSECONDS == "1":
@@ -111,7 +107,7 @@
 
 
 # The user can set environment variable "GEN_PRINT_DEBUG" to get debug output from this module.
-gen_print_debug = int(os.environ.get("GEN_PRINT_DEBUG", 0))
+gen_print_debug = int(os.environ.get('GEN_PRINT_DEBUG', 0))
 
 
 def sprint_func_name(stack_frame_ix=None):
@@ -160,12 +156,14 @@
     Return the number of spaces at the beginning of the line.
     """
 
-    return len(line) - len(line.lstrip(" "))
+    return len(line) - len(line.lstrip(' '))
 
 
 # get_arg_name is not a print function per se.  It has been included in this module because it is used by
 # sprint_var which is defined in this module.
-def get_arg_name(var, arg_num=1, stack_frame_ix=1):
+def get_arg_name(var,
+                 arg_num=1,
+                 stack_frame_ix=1):
     r"""
     Return the "name" of an argument passed to a function.  This could be a literal or a variable name.
 
@@ -223,22 +221,17 @@
     # sprint_var, valid_value, etc.).
 
     # The user can set environment variable "GET_ARG_NAME_DEBUG" to get debug output from this function.
-    local_debug = int(os.environ.get("GET_ARG_NAME_DEBUG", 0))
+    local_debug = int(os.environ.get('GET_ARG_NAME_DEBUG', 0))
     # In addition to GET_ARG_NAME_DEBUG, the user can set environment variable "GET_ARG_NAME_SHOW_SOURCE" to
     # have this function include source code in the debug output.
     local_debug_show_source = int(
-        os.environ.get("GET_ARG_NAME_SHOW_SOURCE", 0)
-    )
+        os.environ.get('GET_ARG_NAME_SHOW_SOURCE', 0))
 
     if stack_frame_ix < 1:
-        print_error(
-            'Programmer error - Variable "stack_frame_ix" has an'
-            + ' invalid value of "'
-            + str(stack_frame_ix)
-            + '".  The'
-            + " value must be an integer that is greater than or equal"
-            + " to 1.\n"
-        )
+        print_error("Programmer error - Variable \"stack_frame_ix\" has an"
+                    + " invalid value of \"" + str(stack_frame_ix) + "\".  The"
+                    + " value must be an integer that is greater than or equal"
+                    + " to 1.\n")
         return
 
     if local_debug:
@@ -255,25 +248,15 @@
     work_around_inspect_stack_cwd_failure()
     for count in range(0, 2):
         try:
-            (
-                frame,
-                filename,
-                cur_line_no,
-                function_name,
-                lines,
-                index,
-            ) = inspect.stack()[stack_frame_ix]
+            frame, filename, cur_line_no, function_name, lines, index = \
+                inspect.stack()[stack_frame_ix]
         except IndexError:
-            print_error(
-                "Programmer error - The caller has asked for"
-                + ' information about the stack frame at index "'
-                + str(stack_frame_ix)
-                + '".  However, the stack'
-                + " only contains "
-                + str(len(inspect.stack()))
-                + " entries.  Therefore the stack frame index is out"
-                + " of range.\n"
-            )
+            print_error("Programmer error - The caller has asked for"
+                        + " information about the stack frame at index \""
+                        + str(stack_frame_ix) + "\".  However, the stack"
+                        + " only contains " + str(len(inspect.stack()))
+                        + " entries.  Therefore the stack frame index is out"
+                        + " of range.\n")
             return
         if filename != "<string>":
             break
@@ -292,10 +275,12 @@
     # Though one would expect inspect.getsourcelines(frame) to get all module source lines if the frame is
     # "<module>", it doesn't do that.  Therefore, for this special case, do inspect.getsourcelines(module).
     if function_name == "<module>":
-        source_lines, source_line_num = inspect.getsourcelines(module)
+        source_lines, source_line_num =\
+            inspect.getsourcelines(module)
         line_ix = cur_line_no - source_line_num - 1
     else:
-        source_lines, source_line_num = inspect.getsourcelines(frame)
+        source_lines, source_line_num =\
+            inspect.getsourcelines(frame)
         line_ix = cur_line_no - source_line_num
 
     if local_debug:
@@ -310,9 +295,8 @@
         print_varx("line_ix", line_ix, indent=debug_indent)
         if local_debug_show_source:
             print_varx("source_lines", source_lines, indent=debug_indent)
-        print_varx(
-            "real_called_func_name", real_called_func_name, indent=debug_indent
-        )
+        print_varx("real_called_func_name", real_called_func_name,
+                   indent=debug_indent)
 
     # Get a list of all functions defined for the module.  Note that this doesn't work consistently when
     # _run_exitfuncs is at the top of the stack (i.e. if we're running an exit function).  I've coded a
@@ -346,9 +330,8 @@
     # The call to the function could be encased in a recast (e.g. int(func_name())).
     recast_regex = "([^ ]+\\([ ]*)?"
     import_name_regex = "([a-zA-Z0-9_]+\\.)?"
-    func_name_regex = (
-        recast_regex + import_name_regex + "(" + "|".join(aliases) + ")"
-    )
+    func_name_regex = recast_regex + import_name_regex + "(" +\
+        '|'.join(aliases) + ")"
     pre_args_regex = ".*" + func_name_regex + "[ ]*\\("
 
     # Search backward through source lines looking for the calling function name.
@@ -361,12 +344,9 @@
             found = True
             break
     if not found:
-        print_error(
-            "Programmer error - Could not find the source line with"
-            + ' a reference to function "'
-            + real_called_func_name
-            + '".\n'
-        )
+        print_error("Programmer error - Could not find the source line with"
+                    + " a reference to function \"" + real_called_func_name
+                    + "\".\n")
         return
 
     # Search forward through the source lines looking for a line whose indentation is the same or less than
@@ -385,18 +365,15 @@
         prior_line = source_lines[start_line_ix - 1]
         prior_line_stripped = re.sub(r"[ ]*\\([\r\n]$)", " \\1", prior_line)
         prior_line_indent = get_line_indent(prior_line)
-        if (
-            prior_line != prior_line_stripped
-            and prior_line_indent < start_indent
-        ):
+        if prior_line != prior_line_stripped and\
+           prior_line_indent < start_indent:
             start_line_ix -= 1
             # Remove the backslash (continuation char) from prior line.
             source_lines[start_line_ix] = prior_line_stripped
 
     # Join the start line through the end line into a composite line.
-    composite_line = "".join(
-        map(str.strip, source_lines[start_line_ix : end_line_ix + 1])
-    )
+    composite_line = ''.join(map(str.strip,
+                                 source_lines[start_line_ix:end_line_ix + 1]))
     # Insert one space after first "=" if there isn't one already.
     composite_line = re.sub("=[ ]*([^ ])", "= \\1", composite_line, 1)
 
@@ -416,9 +393,8 @@
         lvalues[ix] = lvalue
         ix += 1
     lvalue_prefix_regex = "(.*=[ ]+)?"
-    called_func_name_regex = (
-        lvalue_prefix_regex + func_name_regex + "[ ]*\\(.*"
-    )
+    called_func_name_regex = lvalue_prefix_regex + func_name_regex +\
+        "[ ]*\\(.*"
     called_func_name = re.sub(called_func_name_regex, "\\4", composite_line)
     arg_list_etc = "(" + re.sub(pre_args_regex, "", composite_line)
     if local_debug:
@@ -432,11 +408,8 @@
         print_varx("lvalue_regex", lvalue_regex, indent=debug_indent)
         print_varx("lvalue_string", lvalue_string, indent=debug_indent)
         print_varx("lvalues", lvalues, indent=debug_indent)
-        print_varx(
-            "called_func_name_regex",
-            called_func_name_regex,
-            indent=debug_indent,
-        )
+        print_varx("called_func_name_regex", called_func_name_regex,
+                   indent=debug_indent)
         print_varx("called_func_name", called_func_name, indent=debug_indent)
         print_varx("arg_list_etc", arg_list_etc, indent=debug_indent)
 
@@ -557,11 +530,8 @@
 
     if SHOW_ELAPSED_TIME == "1":
         cur_time_seconds = seconds
-        math_string = (
-            "%9.9f" % cur_time_seconds
-            + " - "
-            + "%9.9f" % sprint_time_last_seconds[last_seconds_ix]
-        )
+        math_string = "%9.9f" % cur_time_seconds + " - " + "%9.9f" % \
+            sprint_time_last_seconds[last_seconds_ix]
         elapsed_seconds = eval(math_string)
         if NANOSECONDS == "1":
             elapsed_seconds = "%11.6f" % elapsed_seconds
@@ -678,9 +648,8 @@
         return word_length_in_digits()
 
     num_length_in_bits = bit_length(working_number)
-    num_hex_digits, remainder = divmod(
-        num_length_in_bits, digit_length_in_bits()
-    )
+    num_hex_digits, remainder = divmod(num_length_in_bits,
+                                       digit_length_in_bits())
     if remainder > 0:
         # Example: the number 7 requires 3 bits.  The divmod above produces, 0 with remainder of 3.  So
         # because we have a remainder, we increment num_hex_digits from 0 to 1.
@@ -811,17 +780,16 @@
     """
 
     return [
-        "hexa",
-        "octal",
-        "binary",
-        "blank",
-        "verbose",
-        "quote_keys",
-        "show_type",
-        "strip_brackets",
-        "no_header",
-        "quote_values",
-    ]
+        'hexa',
+        'octal',
+        'binary',
+        'blank',
+        'verbose',
+        'quote_keys',
+        'show_type',
+        'strip_brackets',
+        'no_header',
+        'quote_values']
 
 
 def create_fmt_definition():
@@ -992,16 +960,14 @@
         return fmt, fmt
 
 
-def sprint_varx(
-    var_name,
-    var_value,
-    fmt=0,
-    indent=dft_indent,
-    col1_width=dft_col1_width,
-    trailing_char="\n",
-    key_list=None,
-    delim=":",
-):
+def sprint_varx(var_name,
+                var_value,
+                fmt=0,
+                indent=dft_indent,
+                col1_width=dft_col1_width,
+                trailing_char="\n",
+                key_list=None,
+                delim=":"):
     r"""
     Print the var name/value passed to it.  If the caller lets col1_width default, the printing lines up
     nicely with output generated by the print_time functions.
@@ -1105,9 +1071,8 @@
         if type(var_value) in int_types:
             # Process format values pertaining to int types.
             if fmt & hexa():
-                num_hex_digits = max(
-                    dft_num_hex_digits(), get_req_num_hex_digits(var_value)
-                )
+                num_hex_digits = max(dft_num_hex_digits(),
+                                     get_req_num_hex_digits(var_value))
                 # Convert a negative number to its positive twos complement for proper printing.  For
                 # example, instead of printing -1 as "0x-000000000000001" it will be printed as
                 # "0xffffffffffffffff".
@@ -1116,14 +1081,13 @@
             elif fmt & octal():
                 value_format = "0o%016o"
             elif fmt & binary():
-                num_digits, remainder = divmod(
-                    max(bit_length(var_value), 1), 8
-                )
+                num_digits, remainder = \
+                    divmod(max(bit_length(var_value), 1), 8)
                 num_digits *= 8
                 if remainder:
                     num_digits += 8
                 num_digits += 2
-                value_format = "#0" + str(num_digits) + "b"
+                value_format = '#0' + str(num_digits) + 'b'
                 var_value = format(var_value, value_format)
                 value_format = "%s"
         elif type(var_value) in string_types:
@@ -1133,9 +1097,8 @@
                 var_value = "<blank>"
         elif type(var_value) is type:
             var_value = str(var_value).split("'")[1]
-        format_string = (
-            "%" + str(indent) + "s%-" + str(col1_width) + "s" + value_format
-        )
+        format_string = "%" + str(indent) + "s%-" + str(col1_width) + "s" \
+            + value_format
         if fmt & show_type():
             if var_value != "":
                 format_string += " "
@@ -1146,19 +1109,16 @@
         if not (fmt & verbose()):
             # Strip everything leading up to the first left square brace.
             var_name = re.sub(r".*\[", "[", var_name)
-        if fmt & strip_brackets():
+        if (fmt & strip_brackets()):
             var_name = re.sub(r"[\[\]]", "", var_name)
         if value_format == "0x%08x":
-            return format_string % (
-                "",
-                str(var_name) + delim,
-                var_value & 0xFFFFFFFF,
-            )
+            return format_string % ("", str(var_name) + delim,
+                                    var_value & 0xffffffff)
         else:
             return format_string % ("", str(var_name) + delim, var_value)
     else:
         # The data type is complex in the sense that it has subordinate parts.
-        if fmt & no_header():
+        if (fmt & no_header()):
             buffer = ""
         else:
             # Create header line.
@@ -1167,7 +1127,7 @@
                 loc_var_name = re.sub(r".*\[", "[", var_name)
             else:
                 loc_var_name = var_name
-            if fmt & strip_brackets():
+            if (fmt & strip_brackets()):
                 loc_var_name = re.sub(r"[\[\]]", "", loc_var_name)
             format_string = "%" + str(indent) + "s%s\n"
             buffer = format_string % ("", loc_var_name + ":")
@@ -1182,9 +1142,9 @@
         loc_trailing_char = "\n"
         if is_dict(var_value):
             if type(child_fmt) is list:
-                child_quote_keys = child_fmt[0] & quote_keys()
+                child_quote_keys = (child_fmt[0] & quote_keys())
             else:
-                child_quote_keys = child_fmt & quote_keys()
+                child_quote_keys = (child_fmt & quote_keys())
             for key, value in var_value.items():
                 if key_list is not None:
                     key_list_regex = "^" + "|".join(key_list) + "$"
@@ -1196,65 +1156,39 @@
                 if child_quote_keys:
                     key = "'" + key + "'"
                 key = "[" + str(key) + "]"
-                buffer += sprint_varx(
-                    var_name + key,
-                    value,
-                    child_fmt,
-                    indent,
-                    col1_width,
-                    loc_trailing_char,
-                    key_list,
-                    delim,
-                )
+                buffer += sprint_varx(var_name + key, value, child_fmt, indent,
+                                      col1_width, loc_trailing_char, key_list,
+                                      delim)
         elif type(var_value) in (list, tuple, set):
             for key, value in enumerate(var_value):
                 ix += 1
                 if ix == length:
                     loc_trailing_char = trailing_char
                 key = "[" + str(key) + "]"
-                buffer += sprint_varx(
-                    var_name + key,
-                    value,
-                    child_fmt,
-                    indent,
-                    col1_width,
-                    loc_trailing_char,
-                    key_list,
-                    delim,
-                )
+                buffer += sprint_varx(var_name + key, value, child_fmt, indent,
+                                      col1_width, loc_trailing_char, key_list,
+                                      delim)
         elif isinstance(var_value, argparse.Namespace):
             for key in var_value.__dict__:
                 ix += 1
                 if ix == length:
                     loc_trailing_char = trailing_char
-                cmd_buf = (
-                    'buffer += sprint_varx(var_name + "." + str(key)'
-                    + ", var_value."
-                    + key
-                    + ", child_fmt, indent,"
-                    + " col1_width, loc_trailing_char, key_list,"
-                    + " delim)"
-                )
+                cmd_buf = "buffer += sprint_varx(var_name + \".\" + str(key)" \
+                          + ", var_value." + key + ", child_fmt, indent," \
+                          + " col1_width, loc_trailing_char, key_list," \
+                          + " delim)"
                 exec(cmd_buf)
         else:
             var_type = type(var_value).__name__
             func_name = sys._getframe().f_code.co_name
-            var_value = (
-                "<" + var_type + " type not supported by " + func_name + "()>"
-            )
+            var_value = "<" + var_type + " type not supported by " + \
+                        func_name + "()>"
             value_format = "%s"
             indent -= 2
             # Adjust col1_width.
             col1_width = col1_width - indent
-            format_string = (
-                "%"
-                + str(indent)
-                + "s%-"
-                + str(col1_width)
-                + "s"
-                + value_format
-                + trailing_char
-            )
+            format_string = "%" + str(indent) + "s%-" \
+                + str(col1_width) + "s" + value_format + trailing_char
             return format_string % ("", str(var_name) + ":", var_value)
 
         return buffer
@@ -1305,7 +1239,10 @@
     return buffer
 
 
-def sprint_dashes(indent=dft_indent, width=80, line_feed=1, char="-"):
+def sprint_dashes(indent=dft_indent,
+                  width=80,
+                  line_feed=1,
+                  char="-"):
     r"""
     Return a string of dashes to the caller.
 
@@ -1324,7 +1261,8 @@
     return buffer
 
 
-def sindent(text="", indent=0):
+def sindent(text="",
+            indent=0):
     r"""
     Pre-pend the specified number of characters to the text string (i.e. indent it) and return it.
 
@@ -1372,41 +1310,36 @@
 
     if func_name == "<module>":
         # If the func_name is the "main" program, we simply get the command line call string.
-        func_and_args = " ".join(sys.argv)
+        func_and_args = ' '.join(sys.argv)
     else:
         # Get the program arguments.
-        (args, varargs, keywords, locals) = inspect.getargvalues(
-            stack_frame[0]
-        )
+        (args, varargs, keywords, locals) =\
+            inspect.getargvalues(stack_frame[0])
 
         args_list = []
         for arg_name in filter(None, args + [varargs, keywords]):
             # Get the arg value from frame locals.
             arg_value = locals[arg_name]
-            if arg_name == "self":
+            if arg_name == 'self':
                 if style == func_line_style_short:
                     continue
                 # Manipulations to improve output for class methods.
                 func_name = arg_value.__class__.__name__ + "." + func_name
                 args_list.append(arg_name + " = <self>")
-            elif (
-                style == func_line_style_short
-                and arg_name == "args"
-                and type(arg_value) in (list, tuple)
-            ):
+            elif (style == func_line_style_short
+                  and arg_name == 'args'
+                  and type(arg_value) in (list, tuple)):
                 if len(arg_value) == 0:
                     continue
-                args_list.append(repr(", ".join(arg_value)))
-            elif (
-                style == func_line_style_short
-                and arg_name == "kwargs"
-                and type(arg_value) is dict
-            ):
+                args_list.append(repr(', '.join(arg_value)))
+            elif (style == func_line_style_short
+                  and arg_name == 'kwargs'
+                  and type(arg_value) is dict):
                 for key, value in arg_value.items():
                     args_list.append(key + "=" + repr(value))
             else:
                 args_list.append(arg_name + " = " + repr(arg_value))
-        args_str = "(" + ", ".join(map(str, args_list)) + ")"
+        args_str = "(" + ', '.join(map(str, args_list)) + ")"
 
         # Now we need to print this in a nicely-wrapped way.
         func_and_args = func_name + args_str
@@ -1416,7 +1349,9 @@
     return func_and_args
 
 
-def sprint_call_stack(indent=0, stack_frame_ix=0, style=None):
+def sprint_call_stack(indent=0,
+                      stack_frame_ix=0,
+                      style=None):
     r"""
     Return a call stack report for the given point in the program with line numbers, function names and
     function parameters and arguments.
@@ -1509,7 +1444,8 @@
     return sprint_time() + "Executing: " + func_and_args + "\n"
 
 
-def sprint_pgm_header(indent=0, linefeed=1):
+def sprint_pgm_header(indent=0,
+                      linefeed=1):
     r"""
     Return a standardized header that programs should print at the beginning of the run.  It includes useful
     information like command line, pid, userid, program parameters, etc.
@@ -1528,25 +1464,20 @@
 
     if robot_env:
         suite_name = BuiltIn().get_variable_value("${suite_name}")
-        buffer += sindent(
-            sprint_time('Running test suite "' + suite_name + '".\n'), indent
-        )
+        buffer += sindent(sprint_time("Running test suite \"" + suite_name
+                                      + "\".\n"), indent)
 
     buffer += sindent(sprint_time() + "Running " + pgm_name + ".\n", indent)
-    buffer += sindent(
-        sprint_time() + "Program parameter values, etc.:\n\n", indent
-    )
-    buffer += sprint_varx(
-        "command_line", " ".join(sys.argv), 0, indent, col1_width
-    )
+    buffer += sindent(sprint_time() + "Program parameter values, etc.:\n\n",
+                      indent)
+    buffer += sprint_varx("command_line", ' '.join(sys.argv), 0, indent,
+                          col1_width)
     # We want the output to show a customized name for the pid and pgid but we want it to look like a valid
     # variable name.  Therefore, we'll use pgm_name_var_name which was set when this module was imported.
-    buffer += sprint_varx(
-        pgm_name_var_name + "_pid", os.getpid(), 0, indent, col1_width
-    )
-    buffer += sprint_varx(
-        pgm_name_var_name + "_pgid", os.getpgrp(), 0, indent, col1_width
-    )
+    buffer += sprint_varx(pgm_name_var_name + "_pid", os.getpid(), 0, indent,
+                          col1_width)
+    buffer += sprint_varx(pgm_name_var_name + "_pgid", os.getpgrp(), 0, indent,
+                          col1_width)
     userid_num = str(os.geteuid())
     try:
         username = os.getlogin()
@@ -1555,36 +1486,30 @@
             username = "root"
         else:
             username = "?"
-    buffer += sprint_varx(
-        "uid", userid_num + " (" + username + ")", 0, indent, col1_width
-    )
-    buffer += sprint_varx(
-        "gid",
-        str(os.getgid()) + " (" + str(grp.getgrgid(os.getgid()).gr_name) + ")",
-        0,
-        indent,
-        col1_width,
-    )
-    buffer += sprint_varx(
-        "host_name", socket.gethostname(), 0, indent, col1_width
-    )
+    buffer += sprint_varx("uid", userid_num + " (" + username
+                          + ")", 0, indent, col1_width)
+    buffer += sprint_varx("gid", str(os.getgid()) + " ("
+                          + str(grp.getgrgid(os.getgid()).gr_name) + ")", 0,
+                          indent, col1_width)
+    buffer += sprint_varx("host_name", socket.gethostname(), 0, indent,
+                          col1_width)
     try:
-        DISPLAY = os.environ["DISPLAY"]
+        DISPLAY = os.environ['DISPLAY']
     except KeyError:
         DISPLAY = ""
     buffer += sprint_var(DISPLAY, 0, indent, col1_width)
-    PYTHON_VERSION = os.environ.get("PYTHON_VERSION", None)
+    PYTHON_VERSION = os.environ.get('PYTHON_VERSION', None)
     if PYTHON_VERSION is not None:
         buffer += sprint_var(PYTHON_VERSION, 0, indent, col1_width)
-    PYTHON_PGM_PATH = os.environ.get("PYTHON_PGM_PATH", None)
+    PYTHON_PGM_PATH = os.environ.get('PYTHON_PGM_PATH', None)
     if PYTHON_PGM_PATH is not None:
         buffer += sprint_var(PYTHON_PGM_PATH, 0, indent, col1_width)
     python_version = sys.version.replace("\n", "")
     buffer += sprint_var(python_version, 0, indent, col1_width)
-    ROBOT_VERSION = os.environ.get("ROBOT_VERSION", None)
+    ROBOT_VERSION = os.environ.get('ROBOT_VERSION', None)
     if ROBOT_VERSION is not None:
         buffer += sprint_var(ROBOT_VERSION, 0, indent, col1_width)
-    ROBOT_PGM_PATH = os.environ.get("ROBOT_PGM_PATH", None)
+    ROBOT_PGM_PATH = os.environ.get('ROBOT_PGM_PATH', None)
     if ROBOT_PGM_PATH is not None:
         buffer += sprint_var(ROBOT_PGM_PATH, 0, indent, col1_width)
 
@@ -1611,9 +1536,10 @@
     return buffer
 
 
-def sprint_error_report(
-    error_text="\n", indent=2, format=None, stack_frame_ix=None
-):
+def sprint_error_report(error_text="\n",
+                        indent=2,
+                        format=None,
+                        stack_frame_ix=None):
     r"""
     Return a string with a standardized report which includes the caller's error text, the call stack and the
     program header.
@@ -1632,12 +1558,12 @@
     indent = int(indent)
     if format is None:
         if robot_env:
-            format = "short"
+            format = 'short'
         else:
-            format = "long"
-    error_text = error_text.rstrip("\n") + "\n"
+            format = 'long'
+    error_text = error_text.rstrip('\n') + '\n'
 
-    if format == "short":
+    if format == 'short':
         return sprint_error(error_text)
 
     buffer = ""
@@ -1662,7 +1588,8 @@
     return buffer
 
 
-def sprint_issuing(cmd_buf, test_mode=0):
+def sprint_issuing(cmd_buf,
+                   test_mode=0):
     r"""
     Return a line indicating a command that the program is about to execute.
 
@@ -1683,7 +1610,7 @@
         buffer += "(test_mode) "
     if type(cmd_buf) is list:
         # Assume this is a robot command in the form of a list.
-        cmd_buf = "  ".join([str(element) for element in cmd_buf])
+        cmd_buf = '  '.join([str(element) for element in cmd_buf])
     buffer += "Issuing: " + cmd_buf + "\n"
 
     return buffer
@@ -1714,7 +1641,7 @@
     file_path                       The path to a file (e.g. "/tmp/file1").
     """
 
-    with open(file_path, "r") as file:
+    with open(file_path, 'r') as file:
         buffer = file.read()
     return buffer
 
@@ -1751,7 +1678,8 @@
     return buffer
 
 
-def gp_print(buffer, stream="stdout"):
+def gp_print(buffer,
+             stream='stdout'):
     r"""
     Print the buffer using either sys.stdout.write or BuiltIn().log_to_console depending on whether we are
     running in a robot environment.
@@ -1807,7 +1735,9 @@
     gp_print(buffer)
 
 
-def get_var_value(var_value=None, default=1, var_name=None):
+def get_var_value(var_value=None,
+                  default=1,
+                  var_name=None):
     r"""
     Return either var_value, the corresponding global value or default.
 
@@ -1857,16 +1787,17 @@
         var_name = get_arg_name(None, 1, 2)
 
     if robot_env:
-        var_value = BuiltIn().get_variable_value(
-            "${" + var_name + "}", default
-        )
+        var_value = BuiltIn().get_variable_value("${" + var_name + "}",
+                                                 default)
     else:
         var_value = getattr(__builtin__, var_name, default)
 
     return var_value
 
 
-def get_stack_var(var_name, default="", init_stack_ix=2):
+def get_stack_var(var_name,
+                  default="",
+                  init_stack_ix=2):
     r"""
     Starting with the caller's stack level, search upward in the call stack for a variable named var_name and
     return its value.  If the variable cannot be found in the stack, attempt to get the global value.  If the
@@ -1894,14 +1825,9 @@
 
     work_around_inspect_stack_cwd_failure()
     default = get_var_value(var_name=var_name, default=default)
-    return next(
-        (
-            frame[0].f_locals[var_name]
-            for frame in inspect.stack()[init_stack_ix:]
-            if var_name in frame[0].f_locals
-        ),
-        default,
-    )
+    return next((frame[0].f_locals[var_name]
+                 for frame in inspect.stack()[init_stack_ix:]
+                 if var_name in frame[0].f_locals), default)
 
 
 # hidden_text is a list of passwords which are to be replaced with asterisks by print functions defined in
@@ -1935,9 +1861,8 @@
         # Place the password into the hidden_text list.
         hidden_text.append(password)
         # Create a corresponding password regular expression.  Escape regex special characters too.
-        password_regex = (
-            "(" + "|".join([re.escape(x) for x in hidden_text]) + ")"
-        )
+        password_regex = '(' +\
+            '|'.join([re.escape(x) for x in hidden_text]) + ')'
 
 
 def replace_passwords(buffer):
@@ -1961,9 +1886,10 @@
     return re.sub(password_regex, "********", buffer)
 
 
-def create_print_wrapper_funcs(
-    func_names, stderr_func_names, replace_dict, func_prefix=""
-):
+def create_print_wrapper_funcs(func_names,
+                               stderr_func_names,
+                               replace_dict,
+                               func_prefix=""):
     r"""
     Generate code for print wrapper functions and return the generated code as a string.
 
@@ -1993,9 +1919,9 @@
 
     for func_name in func_names:
         if func_name in stderr_func_names:
-            replace_dict["output_stream"] = "stderr"
+            replace_dict['output_stream'] = "stderr"
         else:
-            replace_dict["output_stream"] = "stdout"
+            replace_dict['output_stream'] = "stdout"
 
         s_func_name = "s" + func_name
         q_func_name = "q" + func_name
@@ -2003,48 +1929,32 @@
 
         # We don't want to try to redefine the "print" function, thus the following if statement.
         if func_name != "print":
-            func_def = create_func_def_string(
-                s_func_name,
-                func_prefix + func_name,
-                print_func_template,
-                replace_dict,
-            )
+            func_def = create_func_def_string(s_func_name,
+                                              func_prefix + func_name,
+                                              print_func_template,
+                                              replace_dict)
             buffer += func_def
 
-        func_def = create_func_def_string(
-            s_func_name,
-            func_prefix + "q" + func_name,
-            qprint_func_template,
-            replace_dict,
-        )
+        func_def = create_func_def_string(s_func_name,
+                                          func_prefix + "q" + func_name,
+                                          qprint_func_template, replace_dict)
         buffer += func_def
 
-        func_def = create_func_def_string(
-            s_func_name,
-            func_prefix + "d" + func_name,
-            dprint_func_template,
-            replace_dict,
-        )
+        func_def = create_func_def_string(s_func_name,
+                                          func_prefix + "d" + func_name,
+                                          dprint_func_template, replace_dict)
         buffer += func_def
 
-        func_def = create_func_def_string(
-            s_func_name,
-            func_prefix + "l" + func_name,
-            lprint_func_template,
-            replace_dict,
-        )
+        func_def = create_func_def_string(s_func_name,
+                                          func_prefix + "l" + func_name,
+                                          lprint_func_template, replace_dict)
         buffer += func_def
 
         # Create abbreviated aliases (e.g. spvar is an alias for sprint_var).
         alias = re.sub("print_", "p", func_name)
         alias = re.sub("print", "p", alias)
-        prefixes = [
-            func_prefix + "",
-            "s",
-            func_prefix + "q",
-            func_prefix + "d",
-            func_prefix + "l",
-        ]
+        prefixes = [func_prefix + "", "s", func_prefix + "q",
+                    func_prefix + "d", func_prefix + "l"]
         for prefix in prefixes:
             if alias == "p":
                 continue
@@ -2074,61 +1984,49 @@
 # means use of the logging module.  For robot programs it means use of the BuiltIn().log() function.
 
 # Templates for the various print wrapper functions.
-print_func_template = [
-    "    <mod_qualifier>gp_print(<mod_qualifier>replace_passwords("
-    + "<call_line>), stream='<output_stream>')"
-]
+print_func_template = \
+    [
+        "    <mod_qualifier>gp_print(<mod_qualifier>replace_passwords("
+        + "<call_line>), stream='<output_stream>')"
+    ]
 
-qprint_func_template = [
-    '    quiet = <mod_qualifier>get_stack_var("quiet", 0)',
-    "    if int(quiet): return",
-] + print_func_template
+qprint_func_template = \
+    [
+        "    quiet = <mod_qualifier>get_stack_var(\"quiet\", 0)",
+        "    if int(quiet): return"
+    ] + print_func_template
 
-dprint_func_template = [
-    '    debug = <mod_qualifier>get_stack_var("debug", 0)',
-    "    if not int(debug): return",
-] + print_func_template
+dprint_func_template = \
+    [
+        "    debug = <mod_qualifier>get_stack_var(\"debug\", 0)",
+        "    if not int(debug): return"
+    ] + print_func_template
 
-lprint_func_template = [
-    "    <mod_qualifier>set_last_seconds_ix(<mod_qualifier>"
-    + "lprint_last_seconds_ix())",
-    "    <mod_qualifier>gp_log(<mod_qualifier>replace_passwords"
-    + "(<call_line>))",
-    "    <mod_qualifier>set_last_seconds_ix(<mod_qualifier>"
-    + "standard_print_last_seconds_ix())",
-]
+lprint_func_template = \
+    [
+        "    <mod_qualifier>set_last_seconds_ix(<mod_qualifier>"
+        + "lprint_last_seconds_ix())",
+        "    <mod_qualifier>gp_log(<mod_qualifier>replace_passwords"
+        + "(<call_line>))",
+        "    <mod_qualifier>set_last_seconds_ix(<mod_qualifier>"
+        + "standard_print_last_seconds_ix())"
+    ]
 
-replace_dict = {"output_stream": "stdout", "mod_qualifier": ""}
+replace_dict = {'output_stream': 'stdout', 'mod_qualifier': ''}
 
 gp_debug_print("robot_env: " + str(robot_env) + "\n")
 
 # func_names contains a list of all print functions which should be created from their sprint counterparts.
-func_names = [
-    "print_time",
-    "print_timen",
-    "print_error",
-    "print_varx",
-    "print_var",
-    "print_vars",
-    "print_dashes",
-    "indent",
-    "print_call_stack",
-    "print_func_name",
-    "print_executing",
-    "print_pgm_header",
-    "print_issuing",
-    "print_pgm_footer",
-    "print_file",
-    "print_error_report",
-    "print",
-    "printn",
-]
+func_names = ['print_time', 'print_timen', 'print_error', 'print_varx',
+              'print_var', 'print_vars', 'print_dashes', 'indent',
+              'print_call_stack', 'print_func_name', 'print_executing',
+              'print_pgm_header', 'print_issuing', 'print_pgm_footer',
+              'print_file', 'print_error_report', 'print', 'printn']
 
 # stderr_func_names is a list of functions whose output should go to stderr rather than stdout.
-stderr_func_names = ["print_error", "print_error_report"]
+stderr_func_names = ['print_error', 'print_error_report']
 
-func_defs = create_print_wrapper_funcs(
-    func_names, stderr_func_names, replace_dict
-)
+func_defs = create_print_wrapper_funcs(func_names, stderr_func_names,
+                                       replace_dict)
 gp_debug_print(func_defs)
 exec(func_defs)
diff --git a/lib/gen_robot_keyword.py b/lib/gen_robot_keyword.py
index 304a836..f4b2e73 100755
--- a/lib/gen_robot_keyword.py
+++ b/lib/gen_robot_keyword.py
@@ -8,7 +8,10 @@
 from robot.libraries.BuiltIn import BuiltIn
 
 
-def run_key(keyword_buf, quiet=None, test_mode=None, ignore=0):
+def run_key(keyword_buf,
+            quiet=None,
+            test_mode=None,
+            ignore=0):
     r"""
     Run the given keyword, return the status and the keyword return values.
 
@@ -44,29 +47,28 @@
     ignore = int(ignore)
 
     # Convert the keyword_buf into a list split wherever 2 or more spaces are found.
-    keyword_list = keyword_buf.split("  ")
+    keyword_list = keyword_buf.split('  ')
     # Strip spaces from each argument to make the output look clean and uniform.
-    keyword_list = [item.strip(" ") for item in keyword_list]
+    keyword_list = [item.strip(' ') for item in keyword_list]
 
     if not quiet:
         # Join the list back into keyword_buf for the sake of output.
-        keyword_buf = "  ".join(keyword_list)
+        keyword_buf = '  '.join(keyword_list)
         gp.pissuing(keyword_buf, test_mode)
 
     if test_mode:
-        return "PASS", ""
+        return 'PASS', ""
 
     try:
-        status, ret_values = BuiltIn().run_keyword_and_ignore_error(
-            *keyword_list
-        )
+        status, ret_values = \
+            BuiltIn().run_keyword_and_ignore_error(*keyword_list)
     except Exception as my_assertion_error:
         status = "FAIL"
         ret_values = my_assertion_error.args[0]
 
-    if status != "PASS":
+    if status != 'PASS':
         # Output the error message to stderr.
-        BuiltIn().log_to_console(ret_values, stream="STDERR")
+        BuiltIn().log_to_console(ret_values, stream='STDERR')
         if not ignore:
             # Fail with the given error message.
             BuiltIn().fail(ret_values)
@@ -74,7 +76,9 @@
     return status, ret_values
 
 
-def run_key_u(keyword_buf, quiet=None, ignore=0):
+def run_key_u(keyword_buf,
+              quiet=None,
+              ignore=0):
     r"""
     Run keyword unconditionally (i.e. without regard to global test_mode setting).
 
diff --git a/lib/gen_robot_plug_in.py b/lib/gen_robot_plug_in.py
index 77a1f35..0f6deda 100755
--- a/lib/gen_robot_plug_in.py
+++ b/lib/gen_robot_plug_in.py
@@ -4,18 +4,19 @@
 This module provides functions which are useful for running plug-ins from a robot program.
 """
 
-import os
-import subprocess
 import sys
+import subprocess
+from robot.libraries.BuiltIn import BuiltIn
+import os
 import tempfile
 
-import gen_cmd as gc
-import gen_misc as gm
 import gen_print as gp
-from robot.libraries.BuiltIn import BuiltIn
+import gen_misc as gm
+import gen_cmd as gc
 
 
-def rvalidate_plug_ins(plug_in_dir_paths, quiet=1):
+def rvalidate_plug_ins(plug_in_dir_paths,
+                       quiet=1):
     r"""
     Call the external validate_plug_ins.py program which validates the plug-in dir paths given to it.  Return
     a list containing a normalized path for each plug-in selected.
@@ -26,15 +27,11 @@
                                     stdout.
     """
 
-    cmd_buf = 'validate_plug_ins.py "' + plug_in_dir_paths + '"'
+    cmd_buf = "validate_plug_ins.py \"" + plug_in_dir_paths + "\""
     rc, out_buf = gc.shell_cmd(cmd_buf, print_output=0)
     if rc != 0:
-        BuiltIn().fail(
-            gp.sprint_error(
-                "Validate plug ins call failed.  See"
-                + " stderr text for details.\n"
-            )
-        )
+        BuiltIn().fail(gp.sprint_error("Validate plug ins call failed.  See"
+                                       + " stderr text for details.\n"))
 
     # plug_in_packages_list = out_buf.split("\n")
     plug_in_packages_list = list(filter(None, out_buf.split("\n")))
@@ -44,17 +41,15 @@
     return plug_in_packages_list
 
 
-def rprocess_plug_in_packages(
-    plug_in_packages_list=None,
-    call_point="setup",
-    shell_rc="0x00000000",
-    stop_on_plug_in_failure=1,
-    stop_on_non_zero_rc=0,
-    release_type="obmc",
-    quiet=None,
-    debug=None,
-    return_history=False,
-):
+def rprocess_plug_in_packages(plug_in_packages_list=None,
+                              call_point="setup",
+                              shell_rc="0x00000000",
+                              stop_on_plug_in_failure=1,
+                              stop_on_non_zero_rc=0,
+                              release_type="obmc",
+                              quiet=None,
+                              debug=None,
+                              return_history=False):
     r"""
     Call the external process_plug_in_packages.py to process the plug-in packages.  Return the following:
     rc                              The return code - 0 = PASS, 1 = FAIL.
@@ -115,7 +110,7 @@
     debug = int(gp.get_var_value(debug, 0))
 
     # Create string from list.
-    plug_in_dir_paths = ":".join(plug_in_packages_list)
+    plug_in_dir_paths = ':'.join(plug_in_packages_list)
 
     temp = tempfile.NamedTemporaryFile()
     temp_file_path = temp.name
@@ -130,37 +125,23 @@
 
     loc_shell_rc = 0
 
-    sub_cmd_buf = (
-        "process_plug_in_packages.py"
-        + debug_string
-        + " --call_point="
-        + call_point
-        + " --allow_shell_rc="
-        + str(shell_rc)
-        + " --stop_on_plug_in_failure="
-        + str(stop_on_plug_in_failure)
-        + " --stop_on_non_zero_rc="
-        + str(stop_on_non_zero_rc)
-        + " "
-        + plug_in_dir_paths
-    )
+    sub_cmd_buf = "process_plug_in_packages.py" + debug_string +\
+                  " --call_point=" + call_point + " --allow_shell_rc=" +\
+                  str(shell_rc) + " --stop_on_plug_in_failure=" +\
+                  str(stop_on_plug_in_failure) + " --stop_on_non_zero_rc=" +\
+                  str(stop_on_non_zero_rc) + " " + plug_in_dir_paths
     if quiet:
         cmd_buf = sub_cmd_buf + " > " + temp_file_path + " 2>&1"
     else:
-        cmd_buf = (
-            "set -o pipefail ; "
-            + sub_cmd_buf
-            + " 2>&1 | tee "
-            + temp_file_path
-        )
+        cmd_buf = "set -o pipefail ; " + sub_cmd_buf + " 2>&1 | tee " +\
+                  temp_file_path
         if debug:
             gp.print_issuing(cmd_buf)
         else:
-            gp.print_timen(
-                "Processing " + call_point + " call point programs."
-            )
+            gp.print_timen("Processing " + call_point
+                           + " call point programs.")
 
-    sub_proc = subprocess.Popen(cmd_buf, shell=True, executable="/bin/bash")
+    sub_proc = subprocess.Popen(cmd_buf, shell=True, executable='/bin/bash')
     sub_proc.communicate()
     proc_plug_pkg_rc = sub_proc.returncode
 
@@ -168,13 +149,8 @@
         # Get the "Running" statements from the output.
         regex = " Running [^/]+/cp_"
         cmd_buf = "egrep '" + regex + "' " + temp_file_path
-        _, history = gc.shell_cmd(
-            cmd_buf,
-            quiet=(not debug),
-            print_output=0,
-            show_err=0,
-            ignore_err=1,
-        )
+        _, history = gc.shell_cmd(cmd_buf, quiet=(not debug), print_output=0,
+                                  show_err=0, ignore_err=1)
         history = [x + "\n" for x in filter(None, history.split("\n"))]
     else:
         history = []
@@ -191,14 +167,8 @@
     # - Zero or more spaces
     bash_var_regex = "[_[:alpha:]][_[:alnum:]]*"
     regex = "^" + bash_var_regex + ":[ ]*"
-    cmd_buf = (
-        "egrep '"
-        + regex
-        + "' "
-        + temp_file_path
-        + " > "
-        + temp_properties_file_path
-    )
+    cmd_buf = "egrep '" + regex + "' " + temp_file_path + " > " +\
+              temp_properties_file_path
     gp.dprint_issuing(cmd_buf)
     grep_rc = os.system(cmd_buf)
 
@@ -206,8 +176,8 @@
     properties = gm.my_parm_file(temp_properties_file_path)
 
     # Finally, we access the 2 values that we need.
-    shell_rc = int(properties.get("shell_rc", "0x0000000000000000"), 16)
-    failed_plug_in_name = properties.get("failed_plug_in_name", "")
+    shell_rc = int(properties.get('shell_rc', '0x0000000000000000'), 16)
+    failed_plug_in_name = properties.get('failed_plug_in_name', '')
 
     if proc_plug_pkg_rc != 0:
         if quiet:
@@ -216,13 +186,9 @@
             gp.print_var(grep_rc, gp.hexa())
         gp.print_var(proc_plug_pkg_rc, gp.hexa())
         gp.print_timen("Re-cap of plug-in failures:")
-        gc.cmd_fnc_u(
-            "egrep -A 1 '^failed_plug_in_name:[ ]+' "
-            + temp_properties_file_path
-            + " | egrep -v '^\\--'",
-            quiet=1,
-            show_err=0,
-        )
+        gc.cmd_fnc_u("egrep -A 1 '^failed_plug_in_name:[ ]+' "
+                     + temp_properties_file_path + " | egrep -v '^\\--'",
+                     quiet=1, show_err=0)
         rc = 1
 
     if return_history:
diff --git a/lib/gen_robot_print.py b/lib/gen_robot_print.py
index b0e6a94..fb958e0 100755
--- a/lib/gen_robot_print.py
+++ b/lib/gen_robot_print.py
@@ -4,14 +4,15 @@
 This file contains functions useful for printing to stdout from robot programs.
 """
 
-import os
 import re
+import os
 
-import func_args as fa
 import gen_print as gp
+import func_args as fa
+
 from robot.libraries.BuiltIn import BuiltIn
 
-gen_robot_print_debug = int(os.environ.get("GEN_ROBOT_PRINT_DEBUG", "0"))
+gen_robot_print_debug = int(os.environ.get('GEN_ROBOT_PRINT_DEBUG', '0'))
 
 
 def sprint_vars(*args, **kwargs):
@@ -27,14 +28,15 @@
     kwargs                          See sprint_varx in gen_print.py for descriptions of all other arguments.
     """
 
-    if "fmt" in kwargs:
+    if 'fmt' in kwargs:
         # Find format option names in kwargs['fmt'] and wrap them with "gp." and "()" to make them into
         # function calls.  For example, verbose would be converted to "gp.verbose()".  This allows the user
         # to simply specify "fmt=verbose" (vs. fmt=gp.verbose()).
         # Note "terse" has been explicitly added for backward compatibility.  Once the repo has been purged
         # of its use, this code can return to its original form.
         regex = "(" + "|".join(gp.valid_fmts()) + "|terse)"
-        kwargs["fmt"] = re.sub(regex, "gp.\\1()", kwargs["fmt"])
+        kwargs['fmt'] = \
+            re.sub(regex, "gp.\\1()", kwargs['fmt'])
     kwargs = fa.args_to_objects(kwargs)
     buffer = ""
     for var_name in args:
@@ -59,32 +61,15 @@
         buffer += gp.sprint_dashes()
         buffer += "Automatic Variables:"
 
-    buffer += sprint_vars(
-        "TEST_NAME",
-        "TEST_TAGS",
-        "TEST_DOCUMENTATION",
-        "TEST_STATUS",
-        "TEST_DOCUMENTATION",
-        "TEST_STATUS",
-        "TEST_MESSAGE",
-        "PREV_TEST_NAME",
-        "PREV_TEST_STATUS",
-        "PREV_TEST_MESSAGE",
-        "SUITE_NAME",
-        "SUITE_SOURCE",
-        "SUITE_DOCUMENTATION",
-        "SUITE_METADATA",
-        "SUITE_STATUS",
-        "SUITE_MESSAGE",
-        "KEYWORD_STATUS",
-        "KEYWORD_MESSAGE",
-        "LOG_LEVEL",
-        "OUTPUT_FILE",
-        "LOG_FILE",
-        "REPORT_FILE",
-        "DEBUG_FILE",
-        "OUTPUT_DIR",
-    )
+    buffer += \
+        sprint_vars(
+            "TEST_NAME", "TEST_TAGS", "TEST_DOCUMENTATION", "TEST_STATUS",
+            "TEST_DOCUMENTATION", "TEST_STATUS", "TEST_MESSAGE",
+            "PREV_TEST_NAME", "PREV_TEST_STATUS", "PREV_TEST_MESSAGE",
+            "SUITE_NAME", "SUITE_SOURCE", "SUITE_DOCUMENTATION",
+            "SUITE_METADATA", "SUITE_STATUS", "SUITE_MESSAGE",
+            "KEYWORD_STATUS", "KEYWORD_MESSAGE", "LOG_LEVEL", "OUTPUT_FILE",
+            "LOG_FILE", "REPORT_FILE", "DEBUG_FILE", "OUTPUT_DIR")
 
     if int(headers) == 1:
         buffer += gp.sprint_dashes()
@@ -135,19 +120,20 @@
 # full names.
 # Rprint Vars (instead of Rpvars)
 
-replace_dict = {"output_stream": "stdout", "mod_qualifier": "gp."}
+replace_dict = {'output_stream': 'stdout', 'mod_qualifier': 'gp.'}
 
 gp_debug_print("gp.robot_env: " + str(gp.robot_env) + "\n")
 
 # func_names contains a list of all rprint functions which should be created from their sprint counterparts.
-func_names = ["print_vars", "print_auto_vars"]
+func_names = [
+    'print_vars', 'print_auto_vars'
+]
 
 # stderr_func_names is a list of functions whose output should go to stderr rather than stdout.
 stderr_func_names = []
 
-func_defs = gp.create_print_wrapper_funcs(
-    func_names, stderr_func_names, replace_dict, "r"
-)
+func_defs = gp.create_print_wrapper_funcs(func_names, stderr_func_names,
+                                          replace_dict, "r")
 gp_debug_print(func_defs)
 exec(func_defs)
 
diff --git a/lib/gen_robot_ssh.py b/lib/gen_robot_ssh.py
index e3b4436..b0cd049 100755
--- a/lib/gen_robot_ssh.py
+++ b/lib/gen_robot_ssh.py
@@ -4,20 +4,20 @@
 This module provides many valuable ssh functions such as sprint_connection, execute_ssh_command, etc.
 """
 
-import re
-import socket
 import sys
 import traceback
-
+import re
+import socket
 import paramiko
-
 try:
     import exceptions
 except ImportError:
     import builtins as exceptions
 
-import func_timer as ft
 import gen_print as gp
+import func_timer as ft
+
+
 from robot.libraries.BuiltIn import BuiltIn
 from SSHLibrary import SSHLibrary
 
@@ -26,7 +26,8 @@
 sshlib = SSHLibrary()
 
 
-def sprint_connection(connection, indent=0):
+def sprint_connection(connection,
+                      indent=0):
     r"""
     sprint data from the connection object to a string and return it.
 
@@ -48,15 +49,15 @@
     buffer += gp.sprint_varx("term_type", connection.term_type, 0, indent)
     buffer += gp.sprint_varx("width", connection.width, 0, indent)
     buffer += gp.sprint_varx("height", connection.height, 0, indent)
-    buffer += gp.sprint_varx(
-        "path_separator", connection.path_separator, 0, indent
-    )
+    buffer += gp.sprint_varx("path_separator", connection.path_separator, 0,
+                             indent)
     buffer += gp.sprint_varx("encoding", connection.encoding, 0, indent)
 
     return buffer
 
 
-def sprint_connections(connections=None, indent=0):
+def sprint_connections(connections=None,
+                       indent=0):
     r"""
     sprint data from the connections list to a string and return it.
 
@@ -92,16 +93,16 @@
 
     for connection in sshlib.get_connections():
         # Create connection_dict from connection object.
-        connection_dict = dict(
-            (key, str(value)) for key, value in connection._config.items()
-        )
+        connection_dict = dict((key, str(value)) for key, value in
+                               connection._config.items())
         if dict(connection_dict, **open_connection_args) == connection_dict:
             return connection
 
     return False
 
 
-def login_ssh(login_args={}, max_login_attempts=5):
+def login_ssh(login_args={},
+              max_login_attempts=5):
     r"""
     Login on the latest open SSH connection.  Retry on failure up to max_login_attempts.
 
@@ -133,10 +134,8 @@
             except_type, except_value, except_traceback = sys.exc_info()
             gp.lprint_var(except_type)
             gp.lprint_varx("except_value", str(except_value))
-            if (
-                except_type is paramiko.ssh_exception.SSHException
-                and re.match(r"No existing session", str(except_value))
-            ):
+            if except_type is paramiko.ssh_exception.SSHException and\
+                    re.match(r"No existing session", str(except_value)):
                 continue
             else:
                 # We don't tolerate any other error so break from loop and re-raise exception.
@@ -149,18 +148,16 @@
     raise (except_value)
 
 
-def execute_ssh_command(
-    cmd_buf,
-    open_connection_args={},
-    login_args={},
-    print_out=0,
-    print_err=0,
-    ignore_err=1,
-    fork=0,
-    quiet=None,
-    test_mode=None,
-    time_out=None,
-):
+def execute_ssh_command(cmd_buf,
+                        open_connection_args={},
+                        login_args={},
+                        print_out=0,
+                        print_err=0,
+                        ignore_err=1,
+                        fork=0,
+                        quiet=None,
+                        test_mode=None,
+                        time_out=None):
     r"""
     Run the given command in an SSH session and return the stdout, stderr and the return code.
 
@@ -226,12 +223,11 @@
             index_or_alias = connection.index
         else:
             index_or_alias = connection.alias
-        gp.lprint_timen(
-            'Switching to existing connection: "' + str(index_or_alias) + '".'
-        )
+        gp.lprint_timen("Switching to existing connection: \""
+                        + str(index_or_alias) + "\".")
         sshlib.switch_connection(index_or_alias)
     else:
-        gp.lprint_timen("Connecting to " + open_connection_args["host"] + ".")
+        gp.lprint_timen("Connecting to " + open_connection_args['host'] + ".")
         cix = sshlib.open_connection(**open_connection_args)
         try:
             login_ssh(login_args)
@@ -248,19 +244,18 @@
             if fork:
                 sshlib.start_command(cmd_buf)
             else:
-                if open_connection_args["alias"] == "device_connection":
+                if open_connection_args['alias'] == "device_connection":
                     stdout = sshlib.write(cmd_buf)
                     stderr = ""
                     rc = 0
                 else:
-                    stdout, stderr, rc = func_timer.run(
-                        sshlib.execute_command,
-                        cmd_buf,
-                        return_stdout=True,
-                        return_stderr=True,
-                        return_rc=True,
-                        time_out=time_out,
-                    )
+                    stdout, stderr, rc = \
+                        func_timer.run(sshlib.execute_command,
+                                       cmd_buf,
+                                       return_stdout=True,
+                                       return_stderr=True,
+                                       return_rc=True,
+                                       time_out=time_out)
                     BuiltIn().log_to_console(stdout)
         except Exception:
             except_type, except_value, except_traceback = sys.exc_info()
@@ -272,47 +267,30 @@
             stderr = str(except_value)
             stdout = ""
 
-            if except_type is exceptions.AssertionError and re.match(
-                r"Connection not open", str(except_value)
-            ):
+            if except_type is exceptions.AssertionError and\
+               re.match(r"Connection not open", str(except_value)):
                 try:
                     login_ssh(login_args)
                     # Now we must continue to next loop iteration to retry the
                     # execute_command.
                     continue
                 except Exception:
-                    (
-                        except_type,
-                        except_value,
-                        except_traceback,
-                    ) = sys.exc_info()
+                    except_type, except_value, except_traceback =\
+                        sys.exc_info()
                     rc = 1
                     stderr = str(except_value)
                     stdout = ""
                     break
 
-            if (
-                (
-                    except_type is paramiko.ssh_exception.SSHException
-                    and re.match(r"SSH session not active", str(except_value))
-                )
-                or (
-                    (
-                        except_type is socket.error
-                        or except_type is ConnectionResetError
-                    )
-                    and re.match(
-                        r"\[Errno 104\] Connection reset by peer",
-                        str(except_value),
-                    )
-                )
-                or (
-                    except_type is paramiko.ssh_exception.SSHException
-                    and re.match(
-                        r"Timeout opening channel\.", str(except_value)
-                    )
-                )
-            ):
+            if (except_type is paramiko.ssh_exception.SSHException
+                and re.match(r"SSH session not active", str(except_value))) or\
+               ((except_type is socket.error
+                 or except_type is ConnectionResetError)
+                and re.match(r"\[Errno 104\] Connection reset by peer",
+                             str(except_value))) or\
+               (except_type is paramiko.ssh_exception.SSHException
+                and re.match(r"Timeout opening channel\.",
+                             str(except_value))):
                 # Close and re-open a connection.
                 # Note: close_connection() doesn't appear to get rid of the
                 # connection.  It merely closes it.  Since there is a concern
@@ -321,9 +299,8 @@
                 # connections.
                 gp.lprint_timen("Closing all connections.")
                 sshlib.close_all_connections()
-                gp.lprint_timen(
-                    "Connecting to " + open_connection_args["host"] + "."
-                )
+                gp.lprint_timen("Connecting to "
+                                + open_connection_args['host'] + ".")
                 cix = sshlib.open_connection(**open_connection_args)
                 login_ssh(login_args)
                 continue
@@ -349,16 +326,13 @@
         gp.printn(stderr + stdout)
 
     if not ignore_err:
-        message = gp.sprint_error(
-            "The prior SSH"
-            + " command returned a non-zero return"
-            + " code:\n"
-            + gp.sprint_var(rc, gp.hexa())
-            + stderr
-            + "\n"
-        )
+        message = gp.sprint_error("The prior SSH"
+                                  + " command returned a non-zero return"
+                                  + " code:\n"
+                                  + gp.sprint_var(rc, gp.hexa()) + stderr
+                                  + "\n")
         BuiltIn().should_be_equal(rc, 0, message)
 
-    if open_connection_args["alias"] == "device_connection":
+    if open_connection_args['alias'] == "device_connection":
         return stdout
     return stdout, stderr, rc
diff --git a/lib/gen_robot_utils.py b/lib/gen_robot_utils.py
index 07ff1b4..bd61a87 100644
--- a/lib/gen_robot_utils.py
+++ b/lib/gen_robot_utils.py
@@ -6,7 +6,6 @@
 """
 
 import re
-
 from robot.libraries.BuiltIn import BuiltIn
 
 
@@ -71,6 +70,5 @@
         if key in pre_var_dict:
             if value != pre_var_dict[key]:
                 global_var_name = re.sub("[@&]", "$", key)
-                BuiltIn().set_global_variable(
-                    global_var_name, pre_var_dict[key]
-                )
+                BuiltIn().set_global_variable(global_var_name,
+                                              pre_var_dict[key])
diff --git a/lib/gen_robot_valid.py b/lib/gen_robot_valid.py
index d1e8d23..5580a5e 100755
--- a/lib/gen_robot_valid.py
+++ b/lib/gen_robot_valid.py
@@ -5,10 +5,10 @@
 """
 
 import re
-
-import func_args as fa
 import gen_print as gp
 import gen_valid as gv
+import func_args as fa
+
 from robot.libraries.BuiltIn import BuiltIn
 
 
@@ -27,9 +27,8 @@
     var_value = BuiltIn().get_variable_value("${" + var_name + "}")
     if var_value is None:
         var_value = "<undefined>"
-        error_message = gv.valid_value(
-            var_value, invalid_values=[var_value], var_name=var_name
-        )
+        error_message = gv.valid_value(var_value, invalid_values=[var_value],
+                                       var_name=var_name)
         BuiltIn().fail(error_message)
 
     return var_value
@@ -74,7 +73,8 @@
 
 
 # The docstring header will be pre-pended to each validation function's existing docstring.
-docstring_header = r"""
+docstring_header = \
+    r"""
     Fail if the variable named by var_name is invalid.
     """
 
@@ -105,19 +105,12 @@
 
     start_ix = 0
     # Find the "var_value" line.
-    start_ix = next(
-        (
-            index
-            for index, value in enumerate(doc_string[start_ix:], start_ix)
-            if re.match("[ ]+var_value  ", value)
-        ),
-        None,
-    )
+    start_ix = next((index for index, value in
+                     enumerate(doc_string[start_ix:], start_ix)
+                     if re.match("[ ]+var_value  ", value)), None)
     # Replace the "var_value" line with our "var_name" line.
-    doc_string[start_ix] = (
-        "    var_name                        "
+    doc_string[start_ix] = "    var_name                        " \
         + "The name of the variable to be validated."
-    )
 
     return "\n".join(doc_string)
 
@@ -127,134 +120,121 @@
 # the gv.<function name> which they call.  Also, note that the docstring for each is created by modifying the
 # docstring from the supporting gen_valid.py function.
 
-
 def valid_type(var_name, *args, **kwargs):
+
     var_value, args, kwargs = valid_init(var_name, *args, **kwargs)
-    error_message = gv.valid_type(
-        var_value, *args, var_name=var_name, **kwargs
-    )
+    error_message = \
+        gv.valid_type(var_value, *args, var_name=var_name, **kwargs)
     process_error_message(error_message)
 
 
 def valid_value(var_name, *args, **kwargs):
+
     var_value, args, kwargs = valid_init(var_name, *args, **kwargs)
-    error_message = gv.valid_value(
-        var_value, *args, var_name=var_name, **kwargs
-    )
+    error_message = \
+        gv.valid_value(var_value, *args, var_name=var_name, **kwargs)
     process_error_message(error_message)
 
 
 def valid_range(var_name, *args, **kwargs):
+
     var_value, args, kwargs = valid_init(var_name, *args, **kwargs)
-    error_message = gv.valid_range(
-        var_value, *args, var_name=var_name, **kwargs
-    )
+    error_message = \
+        gv.valid_range(var_value, *args, var_name=var_name, **kwargs)
     process_error_message(error_message)
 
 
 def valid_integer(var_name, *args, **kwargs):
+
     var_value, args, kwargs = valid_init(var_name, *args, **kwargs)
-    error_message = gv.valid_integer(
-        var_value, *args, var_name=var_name, **kwargs
-    )
+    error_message = \
+        gv.valid_integer(var_value, *args, var_name=var_name, **kwargs)
     process_error_message(error_message)
 
 
 def valid_float(var_name, *args, **kwargs):
+
     var_value, args, kwargs = valid_init(var_name, *args, **kwargs)
-    error_message = gv.valid_float(
-        var_value, *args, var_name=var_name, **kwargs
-    )
+    error_message = \
+        gv.valid_float(var_value, *args, var_name=var_name, **kwargs)
     process_error_message(error_message)
 
 
 def valid_date_time(var_name, *args, **kwargs):
+
     var_value, args, kwargs = valid_init(var_name, *args, **kwargs)
-    error_message = gv.valid_date_time(
-        var_value, *args, var_name=var_name, **kwargs
-    )
+    error_message = \
+        gv.valid_date_time(var_value, *args, var_name=var_name, **kwargs)
     process_error_message(error_message)
 
 
 def valid_dir_path(var_name, *args, **kwargs):
+
     var_value, args, kwargs = valid_init(var_name, *args, **kwargs)
-    error_message = gv.valid_dir_path(
-        var_value, *args, var_name=var_name, **kwargs
-    )
+    error_message = \
+        gv.valid_dir_path(var_value, *args, var_name=var_name, **kwargs)
     process_error_message(error_message)
 
 
 def valid_file_path(var_name, *args, **kwargs):
+
     var_value, args, kwargs = valid_init(var_name, *args, **kwargs)
-    error_message = gv.valid_file_path(
-        var_value, *args, var_name=var_name, **kwargs
-    )
+    error_message = \
+        gv.valid_file_path(var_value, *args, var_name=var_name, **kwargs)
     process_error_message(error_message)
 
 
 def valid_path(var_name, *args, **kwargs):
+
     var_value, args, kwargs = valid_init(var_name, *args, **kwargs)
-    error_message = gv.valid_path(
-        var_value, *args, var_name=var_name, **kwargs
-    )
+    error_message = \
+        gv.valid_path(var_value, *args, var_name=var_name, **kwargs)
     process_error_message(error_message)
 
 
 def valid_list(var_name, *args, **kwargs):
+
     var_value, args, kwargs = valid_init(var_name, *args, **kwargs)
-    error_message = gv.valid_list(
-        var_value, *args, var_name=var_name, **kwargs
-    )
+    error_message = \
+        gv.valid_list(var_value, *args, var_name=var_name, **kwargs)
     process_error_message(error_message)
 
 
 def valid_dict(var_name, *args, **kwargs):
+
     var_value, args, kwargs = valid_init(var_name, *args, **kwargs)
-    error_message = gv.valid_dict(
-        var_value, *args, var_name=var_name, **kwargs
-    )
+    error_message = \
+        gv.valid_dict(var_value, *args, var_name=var_name, **kwargs)
     process_error_message(error_message)
 
 
 def valid_program(var_name, *args, **kwargs):
+
     var_value, args, kwargs = valid_init(var_name, *args, **kwargs)
-    error_message = gv.valid_program(
-        var_value, *args, var_name=var_name, **kwargs
-    )
+    error_message = \
+        gv.valid_program(var_value, *args, var_name=var_name, **kwargs)
     process_error_message(error_message)
 
 
 def valid_length(var_name, *args, **kwargs):
+
     var_value, args, kwargs = valid_init(var_name, *args, **kwargs)
-    error_message = gv.valid_length(
-        var_value, *args, var_name=var_name, **kwargs
-    )
+    error_message = \
+        gv.valid_length(var_value, *args, var_name=var_name, **kwargs)
     process_error_message(error_message)
 
 
 # Modify the validation function docstrings by calling customize_doc_string for each function in the
 # func_names list.
 func_names = [
-    "valid_type",
-    "valid_value",
-    "valid_range",
-    "valid_integer",
-    "valid_dir_path",
-    "valid_file_path",
-    "valid_path",
-    "valid_list",
-    "valid_dict",
-    "valid_program",
-    "valid_length",
-    "valid_float",
-    "valid_date_time",
+    "valid_type", "valid_value", "valid_range", "valid_integer",
+    "valid_dir_path", "valid_file_path", "valid_path", "valid_list",
+    "valid_dict", "valid_program", "valid_length", "valid_float",
+    "valid_date_time"
 ]
 
 for func_name in func_names:
-    cmd_buf = (
-        func_name
-        + ".__doc__ = customize_doc_string(gv.raw_doc_strings['"
-        + func_name
-        + "'])"
-    )
+    cmd_buf = func_name \
+        + ".__doc__ = customize_doc_string(gv.raw_doc_strings['" \
+        + func_name + "'])"
     exec(cmd_buf)
diff --git a/lib/gen_valid.py b/lib/gen_valid.py
index a422e0f..57c1a1d 100755
--- a/lib/gen_valid.py
+++ b/lib/gen_valid.py
@@ -4,12 +4,11 @@
 This module provides validation functions like valid_value(), valid_integer(), etc.
 """
 
-import datetime
 import os
-
-import func_args as fa
-import gen_cmd as gc
 import gen_print as gp
+import gen_cmd as gc
+import func_args as fa
+import datetime
 
 exit_on_error = False
 
@@ -135,7 +134,8 @@
 
 
 # The docstring header and footer will be added to each validation function's existing docstring.
-docstring_header = r"""
+docstring_header = \
+    r"""
     Determine whether var_value is valid, construct an error_message and call
     process_error_message(error_message).
 
@@ -143,7 +143,8 @@
     are processed.
     """
 
-additional_args_docstring_footer = r"""
+additional_args_docstring_footer = \
+    r"""
     var_name                        The name of the variable whose value is passed in var_value.  For the
                                     general case, this argument is unnecessary as this function can figure
                                     out the var_name.  This is provided for Robot callers in which case, this
@@ -177,9 +178,8 @@
     # If we get to this point, the validation has failed.
     var_name = get_var_name(var_name)
     error_message += "Invalid variable type:\n"
-    error_message += gp.sprint_varx(
-        var_name, var_value, gp.blank() | gp.show_type()
-    )
+    error_message += gp.sprint_varx(var_name, var_value,
+                                    gp.blank() | gp.show_type())
     error_message += "\n"
     error_message += gp.sprint_var(required_type)
 
@@ -187,6 +187,7 @@
 
 
 def valid_value(var_value, valid_values=[], invalid_values=[], var_name=None):
+
     r"""
     The variable value is valid if it is either contained in the valid_values list or if it is NOT contained
     in the invalid_values list.  If the caller specifies nothing for either of these 2 arguments,
@@ -229,11 +230,11 @@
         error_message += gp.sprint_var(valid_values)
         return process_error_message(error_message)
 
-    error_message = valid_type(valid_values, list, var_name="valid_values")
+    error_message = valid_type(valid_values, list, var_name='valid_values')
     if error_message:
         return process_error_message(error_message)
 
-    error_message = valid_type(invalid_values, list, var_name="invalid_values")
+    error_message = valid_type(invalid_values, list, var_name='invalid_values')
     if error_message:
         return process_error_message(error_message)
 
@@ -243,15 +244,14 @@
             return process_error_message(error_message)
         var_name = get_var_name(var_name)
         error_message += "Invalid variable value:\n"
-        error_message += gp.sprint_varx(
-            var_name, var_value, gp.blank() | gp.verbose() | gp.show_type()
-        )
+        error_message += gp.sprint_varx(var_name, var_value,
+                                        gp.blank() | gp.verbose()
+                                        | gp.show_type())
         error_message += "\n"
         error_message += "It must be one of the following values:\n"
         error_message += "\n"
-        error_message += gp.sprint_var(
-            valid_values, gp.blank() | gp.show_type()
-        )
+        error_message += gp.sprint_var(valid_values,
+                                       gp.blank() | gp.show_type())
         return process_error_message(error_message)
 
     if len_invalid_values == 0:
@@ -264,13 +264,14 @@
 
     var_name = get_var_name(var_name)
     error_message += "Invalid variable value:\n"
-    error_message += gp.sprint_varx(
-        var_name, var_value, gp.blank() | gp.verbose() | gp.show_type()
-    )
+    error_message += gp.sprint_varx(var_name, var_value,
+                                    gp.blank() | gp.verbose()
+                                    | gp.show_type())
     error_message += "\n"
     error_message += "It must NOT be any of the following values:\n"
     error_message += "\n"
-    error_message += gp.sprint_var(invalid_values, gp.blank() | gp.show_type())
+    error_message += gp.sprint_var(invalid_values,
+                                   gp.blank() | gp.show_type())
     return process_error_message(error_message)
 
 
@@ -337,9 +338,8 @@
         var_value = int(str(var_value), 0)
     except ValueError:
         error_message += "Invalid integer value:\n"
-        error_message += gp.sprint_varx(
-            var_name, var_value, gp.blank() | gp.show_type()
-        )
+        error_message += gp.sprint_varx(var_name, var_value,
+                                        gp.blank() | gp.show_type())
         return process_error_message(error_message)
 
     # Check the range (if any).
@@ -373,9 +373,8 @@
         var_value = float(str(var_value))
     except ValueError:
         error_message += "Invalid float value:\n"
-        error_message += gp.sprint_varx(
-            var_name, var_value, gp.blank() | gp.show_type()
-        )
+        error_message += gp.sprint_varx(var_name, var_value,
+                                        gp.blank() | gp.show_type())
         return process_error_message(error_message)
 
     # Check the range (if any).
@@ -398,15 +397,12 @@
     """
 
     error_message = ""
-    rc, out_buf = gc.shell_cmd(
-        "date -d '" + str(var_value) + "'", quiet=1, show_err=0, ignore_err=1
-    )
+    rc, out_buf = gc.shell_cmd("date -d '" + str(var_value) + "'", quiet=1, show_err=0, ignore_err=1)
     if rc:
         var_name = get_var_name(var_name)
         error_message += "Invalid date/time value:\n"
-        error_message += gp.sprint_varx(
-            var_name, var_value, gp.blank() | gp.show_type()
-        )
+        error_message += gp.sprint_varx(var_name, var_value,
+                                        gp.blank() | gp.show_type())
         return process_error_message(error_message)
 
     return process_error_message(error_message)
@@ -463,14 +459,8 @@
     return process_error_message(error_message)
 
 
-def valid_list(
-    var_value,
-    valid_values=[],
-    invalid_values=[],
-    required_values=[],
-    fail_on_empty=False,
-    var_name=None,
-):
+def valid_list(var_value, valid_values=[], invalid_values=[],
+               required_values=[], fail_on_empty=False, var_name=None):
     r"""
     The variable value is valid if it is a list where each entry can be found in the valid_values list or if
     none of its values can be found in the invalid_values list or if all of the values in the required_values
@@ -493,11 +483,7 @@
     error_message = ""
 
     # Validate this function's arguments.
-    if not (
-        bool(len(valid_values))
-        ^ bool(len(invalid_values))
-        ^ bool(len(required_values))
-    ):
+    if not (bool(len(valid_values)) ^ bool(len(invalid_values)) ^ bool(len(required_values))):
         error_message += "Programmer error - You must provide only one of the"
         error_message += " following: valid_values, invalid_values,"
         error_message += " required_values.\n"
@@ -524,25 +510,21 @@
         for ix in range(0, len(required_values)):
             if required_values[ix] not in var_value:
                 found_error = 1
-                display_required_values[ix] = (
+                display_required_values[ix] = \
                     str(display_required_values[ix]) + "*"
-                )
         if found_error:
             var_name = get_var_name(var_name)
             error_message += "The following list is invalid:\n"
-            error_message += gp.sprint_varx(
-                var_name, var_value, gp.blank() | gp.show_type()
-            )
+            error_message += gp.sprint_varx(var_name, var_value,
+                                            gp.blank() | gp.show_type())
             error_message += "\n"
             error_message += "Because some of the values in the "
             error_message += "required_values list are not present (see"
-            error_message += ' entries marked with "*"):\n'
+            error_message += " entries marked with \"*\"):\n"
             error_message += "\n"
-            error_message += gp.sprint_varx(
-                "required_values",
-                display_required_values,
-                gp.blank() | gp.show_type(),
-            )
+            error_message += gp.sprint_varx('required_values',
+                                            display_required_values,
+                                            gp.blank() | gp.show_type())
             error_message += "\n"
 
         return process_error_message(error_message)
@@ -558,10 +540,9 @@
         if found_error:
             var_name = get_var_name(var_name)
             error_message += "The following list is invalid (see entries"
-            error_message += ' marked with "*"):\n'
-            error_message += gp.sprint_varx(
-                var_name, display_var_value, gp.blank() | gp.show_type()
-            )
+            error_message += " marked with \"*\"):\n"
+            error_message += gp.sprint_varx(var_name, display_var_value,
+                                            gp.blank() | gp.show_type())
             error_message += "\n"
             error_message += gp.sprint_var(invalid_values, gp.show_type())
         return process_error_message(error_message)
@@ -576,10 +557,9 @@
     if found_error:
         var_name = get_var_name(var_name)
         error_message += "The following list is invalid (see entries marked"
-        error_message += ' with "*"):\n'
-        error_message += gp.sprint_varx(
-            var_name, display_var_value, gp.blank() | gp.show_type()
-        )
+        error_message += " with \"*\"):\n"
+        error_message += gp.sprint_varx(var_name, display_var_value,
+                                        gp.blank() | gp.show_type())
         error_message += "\n"
         error_message += gp.sprint_var(valid_values, gp.show_type())
         return process_error_message(error_message)
@@ -587,13 +567,7 @@
     return process_error_message(error_message)
 
 
-def valid_dict(
-    var_value,
-    required_keys=[],
-    valid_values={},
-    invalid_values={},
-    var_name=None,
-):
+def valid_dict(var_value, required_keys=[], valid_values={}, invalid_values={}, var_name=None):
     r"""
     The dictionary variable value is valid if it contains all required keys and each entry passes the
     valid_value() call.
@@ -627,9 +601,7 @@
         var_name = get_var_name(var_name)
         error_message += "The following dictionary is invalid because it is"
         error_message += " missing required keys:\n"
-        error_message += gp.sprint_varx(
-            var_name, var_value, gp.blank() | gp.show_type()
-        )
+        error_message += gp.sprint_varx(var_name, var_value, gp.blank() | gp.show_type())
         error_message += "\n"
         error_message += gp.sprint_var(missing_keys, gp.show_type())
         return process_error_message(error_message)
@@ -637,24 +609,15 @@
     var_name = get_var_name(var_name)
     if len(valid_values):
         keys = valid_values.keys()
-        error_message = valid_dict(
-            var_value, required_keys=keys, var_name=var_name
-        )
+        error_message = valid_dict(var_value, required_keys=keys, var_name=var_name)
         if error_message:
             return process_error_message(error_message)
     for key, value in valid_values.items():
         key_name = "  [" + key + "]"
-        sub_error_message = valid_value(
-            var_value[key], valid_values=value, var_name=key_name
-        )
+        sub_error_message = valid_value(var_value[key], valid_values=value, var_name=key_name)
         if sub_error_message:
-            error_message += (
-                "The following dictionary is invalid because one of its"
-                " entries is invalid:\n"
-            )
-            error_message += gp.sprint_varx(
-                var_name, var_value, gp.blank() | gp.show_type()
-            )
+            error_message += "The following dictionary is invalid because one of its entries is invalid:\n"
+            error_message += gp.sprint_varx(var_name, var_value, gp.blank() | gp.show_type())
             error_message += "\n"
             error_message += sub_error_message
             return process_error_message(error_message)
@@ -663,17 +626,10 @@
         if key not in var_value:
             continue
         key_name = "  [" + key + "]"
-        sub_error_message = valid_value(
-            var_value[key], invalid_values=value, var_name=key_name
-        )
+        sub_error_message = valid_value(var_value[key], invalid_values=value, var_name=key_name)
         if sub_error_message:
-            error_message += (
-                "The following dictionary is invalid because one of its"
-                " entries is invalid:\n"
-            )
-            error_message += gp.sprint_varx(
-                var_name, var_value, gp.blank() | gp.show_type()
-            )
+            error_message += "The following dictionary is invalid because one of its entries is invalid:\n"
+            error_message += gp.sprint_varx(var_name, var_value, gp.blank() | gp.show_type())
             error_message += "\n"
             error_message += sub_error_message
             return process_error_message(error_message)
@@ -691,9 +647,8 @@
     """
 
     error_message = ""
-    rc, out_buf = gc.shell_cmd(
-        "which " + var_value, quiet=1, show_err=0, ignore_err=1
-    )
+    rc, out_buf = gc.shell_cmd("which " + var_value, quiet=1, show_err=0,
+                               ignore_err=1)
     if rc:
         var_name = get_var_name(var_name)
         error_message += "The following required program could not be found"
@@ -737,19 +692,10 @@
 # Modify selected function docstrings by adding headers/footers.
 
 func_names = [
-    "valid_type",
-    "valid_value",
-    "valid_range",
-    "valid_integer",
-    "valid_dir_path",
-    "valid_file_path",
-    "valid_path",
-    "valid_list",
-    "valid_dict",
-    "valid_program",
-    "valid_length",
-    "valid_float",
-    "valid_date_time",
+    "valid_type", "valid_value", "valid_range", "valid_integer",
+    "valid_dir_path", "valid_file_path", "valid_path", "valid_list",
+    "valid_dict", "valid_program", "valid_length", "valid_float",
+    "valid_date_time"
 ]
 
 raw_doc_strings = {}
@@ -759,5 +705,5 @@
     cmd_buf += ".__doc__"
     exec(cmd_buf)
     cmd_buf = func_name + ".__doc__ = docstring_header + " + func_name
-    cmd_buf += '.__doc__.rstrip(" \\n") + additional_args_docstring_footer'
+    cmd_buf += ".__doc__.rstrip(\" \\n\") + additional_args_docstring_footer"
     exec(cmd_buf)
diff --git a/lib/ipmi_client.py b/lib/ipmi_client.py
index 7d4e582..7eb8f08 100644
--- a/lib/ipmi_client.py
+++ b/lib/ipmi_client.py
@@ -5,32 +5,32 @@
 """
 
 import collections
-
-import gen_cmd as gc
 import gen_print as gp
+import gen_cmd as gc
 from robot.libraries.BuiltIn import BuiltIn
 
+
 # Set default values for required IPMI options.
-ipmi_interface = "lanplus"
-ipmi_cipher_suite = BuiltIn().get_variable_value("${IPMI_CIPHER_LEVEL}", "17")
-ipmi_timeout = BuiltIn().get_variable_value("${IPMI_TIMEOUT}", "3")
-ipmi_port = BuiltIn().get_variable_value("${IPMI_PORT}", "623")
+ipmi_interface = 'lanplus'
+ipmi_cipher_suite = BuiltIn().get_variable_value("${IPMI_CIPHER_LEVEL}", '17')
+ipmi_timeout = BuiltIn().get_variable_value("${IPMI_TIMEOUT}", '3')
+ipmi_port = BuiltIn().get_variable_value("${IPMI_PORT}", '623')
 ipmi_username = BuiltIn().get_variable_value("${IPMI_USERNAME}", "root")
 ipmi_password = BuiltIn().get_variable_value("${IPMI_PASSWORD}", "0penBmc")
 ipmi_host = BuiltIn().get_variable_value("${OPENBMC_HOST}")
 
 # Create a list of the required IPMI options.
-ipmi_required_options = ["I", "C", "N", "p", "U", "P", "H"]
+ipmi_required_options = ['I', 'C', 'N', 'p', 'U', 'P', 'H']
 # The following dictionary maps the ipmitool option names (e.g. "I") to our
 # more descriptive names (e.g. "interface") for the required options.
 ipmi_option_name_map = {
-    "I": "interface",
-    "C": "cipher_suite",
-    "N": "timeout",
-    "p": "port",
-    "U": "username",
-    "P": "password",
-    "H": "host",
+    'I': 'interface',
+    'C': 'cipher_suite',
+    'N': 'timeout',
+    'p': 'port',
+    'U': 'username',
+    'P': 'password',
+    'H': 'host',
 }
 
 
@@ -78,7 +78,7 @@
         else:
             # The caller hasn't specified this required option so specify it
             # for them using the global value.
-            var_name = "ipmi_" + ipmi_option_name_map[option]
+            var_name = 'ipmi_' + ipmi_option_name_map[option]
             value = eval(var_name)
             new_options[option] = value
     # Include the remainder of the caller's options in the new options
@@ -86,7 +86,7 @@
     for key, value in options.items():
         new_options[key] = value
 
-    return gc.create_command_string("ipmitool", command, new_options)
+    return gc.create_command_string('ipmitool', command, new_options)
 
 
 def verify_ipmi_user_parm_accepted():
@@ -99,10 +99,11 @@
     global ipmi_required_options
     print_output = 0
 
-    command_string = create_ipmi_ext_command_string("power status")
-    rc, stdout = gc.shell_cmd(
-        command_string, print_output=print_output, show_err=0, ignore_err=1
-    )
+    command_string = create_ipmi_ext_command_string('power status')
+    rc, stdout = gc.shell_cmd(command_string,
+                              print_output=print_output,
+                              show_err=0,
+                              ignore_err=1)
     gp.qprint_var(rc, 1)
     if rc == 0:
         # The OBMC accepts the ipmitool "-U" option so new further work needs
@@ -111,12 +112,13 @@
 
     # Remove the "U" option from ipmi_required_options to allow us to create a
     # command string without the "U" option.
-    if "U" in ipmi_required_options:
-        del ipmi_required_options[ipmi_required_options.index("U")]
-    command_string = create_ipmi_ext_command_string("power status")
-    rc, stdout = gc.shell_cmd(
-        command_string, print_output=print_output, show_err=0, ignore_err=1
-    )
+    if 'U' in ipmi_required_options:
+        del ipmi_required_options[ipmi_required_options.index('U')]
+    command_string = create_ipmi_ext_command_string('power status')
+    rc, stdout = gc.shell_cmd(command_string,
+                              print_output=print_output,
+                              show_err=0,
+                              ignore_err=1)
     gp.qprint_var(rc, 1)
     if rc == 0:
         # The "U" option has been removed from the ipmi_required_options
@@ -128,7 +130,7 @@
 
     # Revert to original ipmi_required_options by inserting 'U' right before
     # 'P'.
-    ipmi_required_options.insert(ipmi_required_options.index("P"), "U")
+    ipmi_required_options.insert(ipmi_required_options.index('P'), 'U')
 
 
 def ipmi_setup():
@@ -150,9 +152,7 @@
     command                         An IPMI command (e.g. "power status").
     """
 
-    ipmi_user_options = BuiltIn().get_variable_value(
-        "${IPMI_USER_OPTIONS}", ""
-    )
+    ipmi_user_options = BuiltIn().get_variable_value("${IPMI_USER_OPTIONS}", '')
     if ipmi_user_options == "":
         return command
     return ipmi_user_options + " " + command
diff --git a/lib/ipmi_utils.py b/lib/ipmi_utils.py
index 5d1598c..b2a1b2b 100644
--- a/lib/ipmi_utils.py
+++ b/lib/ipmi_utils.py
@@ -4,18 +4,17 @@
 Provide useful ipmi functions.
 """
 
-import json
 import re
-import tempfile
-
-import bmc_ssh_utils as bsu
-import gen_cmd as gc
-import gen_misc as gm
 import gen_print as gp
+import gen_misc as gm
+import gen_cmd as gc
 import gen_robot_keyword as grk
 import gen_robot_utils as gru
-import ipmi_client as ic
+import bmc_ssh_utils as bsu
 import var_funcs as vf
+import ipmi_client as ic
+import tempfile
+import json
 from robot.libraries.BuiltIn import BuiltIn
 
 gru.my_import_resource("ipmi_client.robot")
@@ -77,19 +76,17 @@
     # setting_value                 Value which needs to be set (e.g. "7").
     """
 
-    status, ret_values = grk.run_key_u(
-        "Run IPMI Standard Command  sol set "
-        + setting_name
-        + " "
-        + setting_value
-    )
+    status, ret_values = grk.run_key_u("Run IPMI Standard Command  sol set "
+                                       + setting_name + " " + setting_value)
 
     return status
 
 
-def execute_ipmi_cmd(
-    cmd_string, ipmi_cmd_type="inband", print_output=1, ignore_err=0, **options
-):
+def execute_ipmi_cmd(cmd_string,
+                     ipmi_cmd_type='inband',
+                     print_output=1,
+                     ignore_err=0,
+                     **options):
     r"""
     Run the given command string as an IPMI command and return the stdout,
     stderr and the return code.
@@ -110,25 +107,23 @@
                                     See that function's prolog for details.
     """
 
-    if ipmi_cmd_type == "inband":
+    if ipmi_cmd_type == 'inband':
         IPMI_INBAND_CMD = BuiltIn().get_variable_value("${IPMI_INBAND_CMD}")
         cmd_buf = IPMI_INBAND_CMD + " " + cmd_string
-        return bsu.os_execute_command(
-            cmd_buf, print_out=print_output, ignore_err=ignore_err
-        )
+        return bsu.os_execute_command(cmd_buf,
+                                      print_out=print_output,
+                                      ignore_err=ignore_err)
 
-    if ipmi_cmd_type == "external":
+    if ipmi_cmd_type == 'external':
         cmd_buf = ic.create_ipmi_ext_command_string(cmd_string, **options)
-        rc, stdout, stderr = gc.shell_cmd(
-            cmd_buf,
-            print_output=print_output,
-            ignore_err=ignore_err,
-            return_stderr=1,
-        )
+        rc, stdout, stderr = gc.shell_cmd(cmd_buf,
+                                          print_output=print_output,
+                                          ignore_err=ignore_err,
+                                          return_stderr=1)
         return stdout, stderr, rc
 
 
-def get_lan_print_dict(channel_number="", ipmi_cmd_type="external"):
+def get_lan_print_dict(channel_number='', ipmi_cmd_type='external'):
     r"""
     Get IPMI 'lan print' output and return it as a dictionary.
 
@@ -180,34 +175,26 @@
     # special processing.  We essentially want to isolate its data and remove
     # the 'Auth Type Enable' string so that key_value_outbuf_to_dict can
     # process it as a sub-dictionary.
-    cmd_buf = (
-        "lan print "
-        + channel_number
-        + " | grep -E '^(Auth Type Enable)"
-        + "?[ ]+: ' | sed -re 's/^(Auth Type Enable)?[ ]+: //g'"
-    )
-    stdout1, stderr, rc = execute_ipmi_cmd(
-        cmd_buf, ipmi_cmd_type, print_output=0
-    )
+    cmd_buf = "lan print " + channel_number + " | grep -E '^(Auth Type Enable)" +\
+        "?[ ]+: ' | sed -re 's/^(Auth Type Enable)?[ ]+: //g'"
+    stdout1, stderr, rc = execute_ipmi_cmd(cmd_buf, ipmi_cmd_type,
+                                           print_output=0)
 
     # Now get the remainder of the data and exclude the lines with no field
     # names (i.e. the 'Auth Type Enable' sub-fields).
     cmd_buf = "lan print " + channel_number + " | grep -E -v '^[ ]+: '"
-    stdout2, stderr, rc = execute_ipmi_cmd(
-        cmd_buf, ipmi_cmd_type, print_output=0
-    )
+    stdout2, stderr, rc = execute_ipmi_cmd(cmd_buf, ipmi_cmd_type,
+                                           print_output=0)
 
     # Make auth_type_enable_dict sub-dictionary...
-    auth_type_enable_dict = vf.key_value_outbuf_to_dict(
-        stdout1, to_lower=0, underscores=0
-    )
+    auth_type_enable_dict = vf.key_value_outbuf_to_dict(stdout1, to_lower=0,
+                                                        underscores=0)
 
     # Create the lan_print_dict...
-    lan_print_dict = vf.key_value_outbuf_to_dict(
-        stdout2, to_lower=0, underscores=0
-    )
+    lan_print_dict = vf.key_value_outbuf_to_dict(stdout2, to_lower=0,
+                                                 underscores=0)
     # Re-assign 'Auth Type Enable' to contain the auth_type_enable_dict.
-    lan_print_dict["Auth Type Enable"] = auth_type_enable_dict
+    lan_print_dict['Auth Type Enable'] = auth_type_enable_dict
 
     return lan_print_dict
 
@@ -243,13 +230,12 @@
                                     trailing " Watts" substring.
     """
 
-    status, ret_values = grk.run_key_u(
-        "Run IPMI Standard Command  dcmi power reading"
-    )
+    status, ret_values = \
+        grk.run_key_u("Run IPMI Standard Command  dcmi power reading")
     result = vf.key_value_outbuf_to_dict(ret_values)
 
     if strip_watts:
-        result.update((k, re.sub(" Watts$", "", v)) for k, v in result.items())
+        result.update((k, re.sub(' Watts$', '', v)) for k, v in result.items())
 
     return result
 
@@ -306,7 +292,8 @@
         [aux_firmware_rev_info][3]:      0x00
     """
 
-    status, ret_values = grk.run_key_u("Run IPMI Standard Command  mc info")
+    status, ret_values = \
+        grk.run_key_u("Run IPMI Standard Command  mc info")
     result = vf.key_value_outbuf_to_dict(ret_values, process_indent=1)
 
     return result
@@ -347,7 +334,8 @@
       [sdr_repository_alloc_info_supported]: no
     """
 
-    status, ret_values = grk.run_key_u("Run IPMI Standard Command  sdr info")
+    status, ret_values = \
+        grk.run_key_u("Run IPMI Standard Command  sdr info")
     result = vf.key_value_outbuf_to_dict(ret_values, process_indent=1)
 
     return result
@@ -428,21 +416,19 @@
         [board_part_number]:       02CY209
     """
 
-    status, ret_values = grk.run_key_u(
-        "Run IPMI Standard Command  fru print -N 50"
-    )
+    status, ret_values = \
+        grk.run_key_u("Run IPMI Standard Command  fru print -N 50")
 
     # Manipulate the "Device not present" line to create a "state" key.
-    ret_values = re.sub(
-        "Device not present", "state : Device not present", ret_values
-    )
+    ret_values = re.sub("Device not present", "state : Device not present",
+                        ret_values)
 
-    return [
-        vf.key_value_outbuf_to_dict(x) for x in re.split("\n\n", ret_values)
-    ]
+    return [vf.key_value_outbuf_to_dict(x) for x in re.split("\n\n",
+                                                             ret_values)]
 
 
-def get_component_fru_info(component="cpu", fru_objs=None):
+def get_component_fru_info(component='cpu',
+                           fru_objs=None):
     r"""
     Get fru info for the given component and return it as a list of
     dictionaries.
@@ -463,11 +449,9 @@
 
     if fru_objs is None:
         fru_objs = get_fru_info()
-    return [
-        x
-        for x in fru_objs
-        if re.match(component + "([0-9]+)? ", x["fru_device_description"])
-    ]
+    return\
+        [x for x in fru_objs
+         if re.match(component + '([0-9]+)? ', x['fru_device_description'])]
 
 
 def get_user_info(userid, channel_number=1):
@@ -510,12 +494,8 @@
       [enable_status]        enabled
     """
 
-    status, ret_values = grk.run_key_u(
-        "Run IPMI Standard Command  channel getaccess "
-        + str(channel_number)
-        + " "
-        + str(userid)
-    )
+    status, ret_values = grk.run_key_u("Run IPMI Standard Command  channel getaccess "
+                                       + str(channel_number) + " " + str(userid))
 
     if userid == "":
         return vf.key_value_outbuf_to_dicts(ret_values, process_indent=1)
@@ -524,6 +504,7 @@
 
 
 def channel_getciphers_ipmi():
+
     r"""
     Run 'channel getciphers ipmi' command and return the result as a list of dictionaries.
 
@@ -570,9 +551,7 @@
         [revision]:             129
         [device_revision]:        1
     """
-    stdout, stderr, rc = bsu.bmc_execute_command(
-        "cat /usr/share/ipmi-providers/dev_id.json"
-    )
+    stdout, stderr, rc = bsu.bmc_execute_command("cat /usr/share/ipmi-providers/dev_id.json")
 
     result = json.loads(stdout)
 
@@ -583,7 +562,7 @@
     # [6:4] reserved. Return as 0.
     # [3:0] Device Revision, binary encoded.
 
-    result["device_revision"] = result["revision"] & 0x0F
+    result['device_revision'] = result['revision'] & 0x0F
 
     return result
 
@@ -639,9 +618,8 @@
       [power_button_disabled]:               false
     """
 
-    status, ret_values = grk.run_key_u(
-        "Run IPMI Standard Command  chassis status"
-    )
+    status, ret_values = \
+        grk.run_key_u("Run IPMI Standard Command  chassis status")
     result = vf.key_value_outbuf_to_dict(ret_values, process_indent=1)
 
     return result
@@ -671,19 +649,19 @@
         [access_mode]:                                always available
     """
 
-    status, ret_values = grk.run_key_u(
-        "Run IPMI Standard Command  channel info " + str(channel_number)
-    )
+    status, ret_values = \
+        grk.run_key_u("Run IPMI Standard Command  channel info " + str(channel_number))
     key_var_list = list(filter(None, ret_values.split("\n")))
     # To match the dict format, add a colon after 'Volatile(active) Settings' and 'Non-Volatile Settings'
     # respectively.
-    key_var_list[6] = "Volatile(active) Settings:"
-    key_var_list[11] = "Non-Volatile Settings:"
+    key_var_list[6] = 'Volatile(active) Settings:'
+    key_var_list[11] = 'Non-Volatile Settings:'
     result = vf.key_value_list_to_dict(key_var_list, process_indent=1)
     return result
 
 
 def get_user_access_ipmi(channel_number=1):
+
     r"""
     Run 'user list [<channel number>]' command and return the result as a list of dictionaries.
 
@@ -733,12 +711,9 @@
         [channel_supports_ipmi_v2.0]:                   yes
     """
 
-    status, ret_values = grk.run_key_u(
-        "Run IPMI Standard Command  channel authcap "
-        + str(channel_number)
-        + " "
-        + str(privilege_level)
-    )
+    status, ret_values = \
+        grk.run_key_u("Run IPMI Standard Command  channel authcap " + str(channel_number) + " "
+                      + str(privilege_level))
     result = vf.key_value_outbuf_to_dict(ret_values, process_indent=1)
 
     return result
@@ -815,8 +790,8 @@
     newthreshold_list = []
     for th in old_threshold:
         th = th.strip()
-        if th == "na":
-            newthreshold_list.append("na")
+        if th == 'na':
+            newthreshold_list.append('na')
         else:
             x = int(float(th)) + n
             newthreshold_list.append(x)
diff --git a/lib/jobs_processing.py b/lib/jobs_processing.py
index 2f698c9..5555b62 100644
--- a/lib/jobs_processing.py
+++ b/lib/jobs_processing.py
@@ -6,12 +6,11 @@
 
 """
 
-import datetime
-import os
-from multiprocessing import Manager, Process
-
-import gen_print as gp
 from robot.libraries.BuiltIn import BuiltIn
+from multiprocessing import Process, Manager
+import os
+import datetime
+import gen_print as gp
 
 
 def execute_keyword(keyword_name, return_dict):
@@ -49,9 +48,8 @@
 
     # Append user-defined times process needed to execute.
     for ix in range(int(num_process)):
-        task = Process(
-            target=execute_keyword, args=(keyword_name, return_dict)
-        )
+        task = Process(target=execute_keyword,
+                       args=(keyword_name, return_dict))
         process_list.append(task)
         task.start()
 
@@ -100,10 +98,8 @@
     for keywords_data in keyword_names:
         keyword_args = tuple(keywords_data.split(" ")[-number_args:])
         keyword_name = " ".join(keywords_data.split(" ")[:-number_args])
-        task = Process(
-            target=execute_keyword_args,
-            args=(keyword_name, keyword_args, return_dict),
-        )
+        task = Process(target=execute_keyword_args,
+                       args=(keyword_name, keyword_args, return_dict))
         process_list.append(task)
         task.start()
 
diff --git a/lib/logging_utils.py b/lib/logging_utils.py
index 7936c25..0cdbea9 100644
--- a/lib/logging_utils.py
+++ b/lib/logging_utils.py
@@ -4,28 +4,23 @@
 Provide useful error log utility keywords.
 """
 
-import imp
-import os
-import sys
-
 import gen_print as gp
-import gen_robot_utils as gru
 import variables as var
+import gen_robot_utils as gru
+import sys
+import os
+import imp
+
 from robot.libraries.BuiltIn import BuiltIn
 
-base_path = (
-    os.path.dirname(os.path.dirname(imp.find_module("gen_robot_print")[1]))
-    + os.sep
-)
+base_path = os.path.dirname(os.path.dirname(
+                            imp.find_module("gen_robot_print")[1])) + os.sep
 sys.path.append(base_path + "data/")
 gru.my_import_resource("logging_utils.robot")
 
 
-redfish_support_trans_state = int(
-    os.environ.get("REDFISH_SUPPORT_TRANS_STATE", 0)
-) or int(
-    BuiltIn().get_variable_value("${REDFISH_SUPPORT_TRANS_STATE}", default=0)
-)
+redfish_support_trans_state = int(os.environ.get('REDFISH_SUPPORT_TRANS_STATE', 0)) or \
+    int(BuiltIn().get_variable_value("${REDFISH_SUPPORT_TRANS_STATE}", default=0))
 
 
 def print_error_logs(error_logs, key_list=None):
@@ -112,7 +107,7 @@
     """
 
     if error_logs is None:
-        error_logs = BuiltIn().run_keyword("Get Error Logs")
+        error_logs = BuiltIn().run_keyword('Get Error Logs')
 
     # Look for any error log entries containing the 'AdditionalData' field
     # which in turn has an entry starting with "ESEL=".  Here is an excerpt of
@@ -124,9 +119,9 @@
     #       [AdditionalData][1]:   ESEL=00 00 df 00 00 00 00 20 00 04...
     esels = []
     for error_log in error_logs.values():
-        if "AdditionalData" in error_log:
-            for additional_data in error_log["AdditionalData"]:
-                if additional_data.startswith("ESEL="):
+        if 'AdditionalData' in error_log:
+            for additional_data in error_log['AdditionalData']:
+                if additional_data.startswith('ESEL='):
                     esels.append(additional_data)
 
     return esels
diff --git a/lib/obmc_boot_test.py b/lib/obmc_boot_test.py
index 5435dcb..79358a6 100755
--- a/lib/obmc_boot_test.py
+++ b/lib/obmc_boot_test.py
@@ -4,49 +4,47 @@
 This module is the python counterpart to obmc_boot_test.
 """
 
-import glob
-import imp
 import os
+import imp
+import time
+import glob
 import random
 import re
 import signal
-import time
-
 try:
     import cPickle as pickle
 except ImportError:
     import pickle
-
 import socket
 
-import gen_arg as ga
-import gen_cmd as gc
-import gen_misc as gm
-import gen_plug_in_utils as gpu
+from robot.utils import DotDict
+from robot.libraries.BuiltIn import BuiltIn
+
+from boot_data import *
 import gen_print as gp
-import gen_robot_keyword as grk
 import gen_robot_plug_in as grpi
+import gen_arg as ga
 import gen_valid as gv
-import logging_utils as log
-import pel_utils as pel
-import run_keyword as rk
+import gen_misc as gm
+import gen_cmd as gc
+import gen_robot_keyword as grk
 import state as st
 import var_stack as vs
-from boot_data import *
-from robot.libraries.BuiltIn import BuiltIn
-from robot.utils import DotDict
+import gen_plug_in_utils as gpu
+import pel_utils as pel
+import logging_utils as log
+import run_keyword as rk
 
-base_path = (
-    os.path.dirname(os.path.dirname(imp.find_module("gen_robot_print")[1]))
-    + os.sep
-)
+base_path = os.path.dirname(os.path.dirname(
+                            imp.find_module("gen_robot_print")[1])) +\
+    os.sep
 sys.path.append(base_path + "extended/")
 
 # Setting master_pid correctly influences the behavior of plug-ins like
 # DB_Logging
 program_pid = os.getpid()
-master_pid = os.environ.get("AUTOBOOT_MASTER_PID", program_pid)
-pgm_name = re.sub("\\.py$", "", os.path.basename(__file__))
+master_pid = os.environ.get('AUTOBOOT_MASTER_PID', program_pid)
+pgm_name = re.sub('\\.py$', '', os.path.basename(__file__))
 
 # Set up boot data structures.
 os_host = BuiltIn().get_variable_value("${OS_HOST}", default="")
@@ -57,41 +55,29 @@
 max_boot_history = 10
 boot_history = []
 
-state = st.return_state_constant("default_state")
+state = st.return_state_constant('default_state')
 cp_setup_called = 0
 next_boot = ""
-base_tool_dir_path = (
-    os.path.normpath(os.environ.get("AUTOBOOT_BASE_TOOL_DIR_PATH", "/tmp"))
-    + os.sep
-)
+base_tool_dir_path = os.path.normpath(os.environ.get(
+    'AUTOBOOT_BASE_TOOL_DIR_PATH', "/tmp")) + os.sep
 
-ffdc_dir_path = os.path.normpath(os.environ.get("FFDC_DIR_PATH", "")) + os.sep
+ffdc_dir_path = os.path.normpath(os.environ.get('FFDC_DIR_PATH', '')) + os.sep
 boot_success = 0
 
-status_dir_path = os.environ.get(
-    "STATUS_DIR_PATH", ""
-) or BuiltIn().get_variable_value("${STATUS_DIR_PATH}", default="")
+status_dir_path = os.environ.get('STATUS_DIR_PATH', "") or \
+    BuiltIn().get_variable_value("${STATUS_DIR_PATH}", default="")
 if status_dir_path != "":
     status_dir_path = os.path.normpath(status_dir_path) + os.sep
     # For plugin expecting env gen_call_robot.py
-    os.environ["STATUS_DIR_PATH"] = status_dir_path
+    os.environ['STATUS_DIR_PATH'] = status_dir_path
 
-redfish_support_trans_state = int(
-    os.environ.get("REDFISH_SUPPORT_TRANS_STATE", 0)
-) or int(
-    BuiltIn().get_variable_value("${REDFISH_SUPPORT_TRANS_STATE}", default=0)
-)
-redfish_supported = BuiltIn().get_variable_value(
-    "${REDFISH_SUPPORTED}", default=False
-)
-redfish_rest_supported = BuiltIn().get_variable_value(
-    "${REDFISH_REST_SUPPORTED}", default=False
-)
-redfish_delete_sessions = int(
-    BuiltIn().get_variable_value("${REDFISH_DELETE_SESSIONS}", default=1)
-)
+redfish_support_trans_state = int(os.environ.get('REDFISH_SUPPORT_TRANS_STATE', 0)) or \
+    int(BuiltIn().get_variable_value("${REDFISH_SUPPORT_TRANS_STATE}", default=0))
+redfish_supported = BuiltIn().get_variable_value("${REDFISH_SUPPORTED}", default=False)
+redfish_rest_supported = BuiltIn().get_variable_value("${REDFISH_REST_SUPPORTED}", default=False)
+redfish_delete_sessions = int(BuiltIn().get_variable_value("${REDFISH_DELETE_SESSIONS}", default=1))
 if redfish_supported:
-    redfish = BuiltIn().get_library_instance("redfish")
+    redfish = BuiltIn().get_library_instance('redfish')
     default_power_on = "Redfish Power On"
     default_power_off = "Redfish Power Off"
     if not redfish_support_trans_state:
@@ -102,9 +88,7 @@
         delete_errlogs_cmd = "Redfish Purge Event Log"
         delete_bmcdump_cmd = "Redfish Delete All BMC Dumps"
         delete_sysdump_cmd = "Redfish Delete All System Dumps"
-        default_set_power_policy = (
-            "Redfish Set Power Restore Policy  AlwaysOff"
-        )
+        default_set_power_policy = "Redfish Set Power Restore Policy  AlwaysOff"
 else:
     default_power_on = "REST Power On"
     default_power_off = "REST Power Off"
@@ -114,12 +98,12 @@
 boot_count = 0
 
 LOG_LEVEL = BuiltIn().get_variable_value("${LOG_LEVEL}")
-AUTOBOOT_FFDC_PREFIX = os.environ.get("AUTOBOOT_FFDC_PREFIX", "")
+AUTOBOOT_FFDC_PREFIX = os.environ.get('AUTOBOOT_FFDC_PREFIX', '')
 ffdc_prefix = AUTOBOOT_FFDC_PREFIX
 boot_start_time = ""
 boot_end_time = ""
-save_stack = vs.var_stack("save_stack")
-main_func_parm_list = ["boot_stack", "stack_mode", "quiet"]
+save_stack = vs.var_stack('save_stack')
+main_func_parm_list = ['boot_stack', 'stack_mode', 'quiet']
 
 
 def dump_ffdc_rc():
@@ -144,7 +128,8 @@
     return 0x00000200
 
 
-def process_host(host, host_var_name=""):
+def process_host(host,
+                 host_var_name=""):
     r"""
     Process a host by getting the associated host name and IP address and
     setting them in global variables.
@@ -177,19 +162,9 @@
 
     host_name_var_name = re.sub("host", "host_name", host_var_name)
     ip_var_name = re.sub("host", "ip", host_var_name)
-    cmd_buf = (
-        "global "
-        + host_name_var_name
-        + ", "
-        + ip_var_name
-        + " ; "
-        + host_name_var_name
-        + ", "
-        + ip_var_name
-        + " = gm.get_host_name_ip('"
-        + host
-        + "')"
-    )
+    cmd_buf = "global " + host_name_var_name + ", " + ip_var_name + " ; " +\
+        host_name_var_name + ", " + ip_var_name + " = gm.get_host_name_ip('" +\
+        host + "')"
     exec(cmd_buf)
 
 
@@ -206,26 +181,16 @@
     global parm_list
     parm_list = BuiltIn().get_variable_value("${parm_list}")
     # The following subset of parms should be processed as integers.
-    int_list = [
-        "max_num_tests",
-        "boot_pass",
-        "boot_fail",
-        "ffdc_only",
-        "boot_fail_threshold",
-        "delete_errlogs",
-        "call_post_stack_plug",
-        "do_pre_boot_plug_in_setup",
-        "quiet",
-        "test_mode",
-        "debug",
-    ]
+    int_list = ['max_num_tests', 'boot_pass', 'boot_fail', 'ffdc_only',
+                'boot_fail_threshold', 'delete_errlogs',
+                'call_post_stack_plug', 'do_pre_boot_plug_in_setup', 'quiet',
+                'test_mode', 'debug']
     for parm in parm_list:
         if parm in int_list:
-            sub_cmd = (
-                'int(BuiltIn().get_variable_value("${' + parm + '}", "0"))'
-            )
+            sub_cmd = "int(BuiltIn().get_variable_value(\"${" + parm +\
+                      "}\", \"0\"))"
         else:
-            sub_cmd = 'BuiltIn().get_variable_value("${' + parm + '}")'
+            sub_cmd = "BuiltIn().get_variable_value(\"${" + parm + "}\")"
         cmd_buf = "global " + parm + " ; " + parm + " = " + sub_cmd
         gp.dpissuing(cmd_buf)
         exec(cmd_buf)
@@ -252,7 +217,7 @@
     global valid_boot_types
 
     if ffdc_dir_path_style == "":
-        ffdc_dir_path_style = int(os.environ.get("FFDC_DIR_PATH_STYLE", "0"))
+        ffdc_dir_path_style = int(os.environ.get('FFDC_DIR_PATH_STYLE', '0'))
 
     # Convert these program parms to lists for easier processing..
     boot_list = list(filter(None, boot_list.split(":")))
@@ -262,29 +227,25 @@
     valid_boot_types = create_valid_boot_list(boot_table)
 
     cleanup_boot_results_file()
-    boot_results_file_path = create_boot_results_file_path(
-        pgm_name, openbmc_nickname, master_pid
-    )
+    boot_results_file_path = create_boot_results_file_path(pgm_name,
+                                                           openbmc_nickname,
+                                                           master_pid)
 
     if os.path.isfile(boot_results_file_path):
         # We've been called before in this run so we'll load the saved
         # boot_results and boot_history objects.
-        boot_results, boot_history = pickle.load(
-            open(boot_results_file_path, "rb")
-        )
+        boot_results, boot_history =\
+            pickle.load(open(boot_results_file_path, 'rb'))
     else:
         boot_results = boot_results(boot_table, boot_pass, boot_fail)
 
-    ffdc_list_file_path = (
-        base_tool_dir_path + openbmc_nickname + "/FFDC_FILE_LIST"
-    )
-    ffdc_report_list_path = (
-        base_tool_dir_path + openbmc_nickname + "/FFDC_REPORT_FILE_LIST"
-    )
+    ffdc_list_file_path = base_tool_dir_path + openbmc_nickname +\
+        "/FFDC_FILE_LIST"
+    ffdc_report_list_path = base_tool_dir_path + openbmc_nickname +\
+        "/FFDC_REPORT_FILE_LIST"
 
-    ffdc_summary_list_path = (
-        base_tool_dir_path + openbmc_nickname + "/FFDC_SUMMARY_FILE_LIST"
-    )
+    ffdc_summary_list_path = base_tool_dir_path + openbmc_nickname +\
+        "/FFDC_SUMMARY_FILE_LIST"
 
 
 def initial_plug_in_setup():
@@ -301,38 +262,26 @@
     BuiltIn().set_global_variable("${FFDC_DIR_PATH}", ffdc_dir_path)
     BuiltIn().set_global_variable("${STATUS_DIR_PATH}", status_dir_path)
     BuiltIn().set_global_variable("${BASE_TOOL_DIR_PATH}", base_tool_dir_path)
-    BuiltIn().set_global_variable(
-        "${FFDC_LIST_FILE_PATH}", ffdc_list_file_path
-    )
-    BuiltIn().set_global_variable(
-        "${FFDC_REPORT_LIST_PATH}", ffdc_report_list_path
-    )
-    BuiltIn().set_global_variable(
-        "${FFDC_SUMMARY_LIST_PATH}", ffdc_summary_list_path
-    )
+    BuiltIn().set_global_variable("${FFDC_LIST_FILE_PATH}",
+                                  ffdc_list_file_path)
+    BuiltIn().set_global_variable("${FFDC_REPORT_LIST_PATH}",
+                                  ffdc_report_list_path)
+    BuiltIn().set_global_variable("${FFDC_SUMMARY_LIST_PATH}",
+                                  ffdc_summary_list_path)
 
-    BuiltIn().set_global_variable(
-        "${FFDC_DIR_PATH_STYLE}", ffdc_dir_path_style
-    )
-    BuiltIn().set_global_variable("${FFDC_CHECK}", ffdc_check)
+    BuiltIn().set_global_variable("${FFDC_DIR_PATH_STYLE}",
+                                  ffdc_dir_path_style)
+    BuiltIn().set_global_variable("${FFDC_CHECK}",
+                                  ffdc_check)
 
     # For each program parameter, set the corresponding AUTOBOOT_ environment
     # variable value.  Also, set an AUTOBOOT_ environment variable for every
     # element in additional_values.
-    additional_values = [
-        "program_pid",
-        "master_pid",
-        "ffdc_dir_path",
-        "status_dir_path",
-        "base_tool_dir_path",
-        "ffdc_list_file_path",
-        "ffdc_report_list_path",
-        "ffdc_summary_list_path",
-        "execdir",
-        "redfish_supported",
-        "redfish_rest_supported",
-        "redfish_support_trans_state",
-    ]
+    additional_values = ["program_pid", "master_pid", "ffdc_dir_path",
+                         "status_dir_path", "base_tool_dir_path",
+                         "ffdc_list_file_path", "ffdc_report_list_path",
+                         "ffdc_summary_list_path", "execdir", "redfish_supported",
+                         "redfish_rest_supported", "redfish_support_trans_state"]
 
     plug_in_vars = parm_list + additional_values
 
@@ -368,9 +317,8 @@
     else:
         test_really_running = 0
 
-    BuiltIn().set_global_variable(
-        "${test_really_running}", test_really_running
-    )
+    BuiltIn().set_global_variable("${test_really_running}",
+                                  test_really_running)
     BuiltIn().set_global_variable("${boot_type_desc}", next_boot)
     BuiltIn().set_global_variable("${boot_pass}", boot_pass)
     BuiltIn().set_global_variable("${boot_fail}", boot_fail)
@@ -382,16 +330,9 @@
     # For each program parameter, set the corresponding AUTOBOOT_ environment
     # variable value.  Also, set an AUTOBOOT_ environment variable for every
     # element in additional_values.
-    additional_values = [
-        "boot_type_desc",
-        "boot_success",
-        "boot_pass",
-        "boot_fail",
-        "test_really_running",
-        "ffdc_prefix",
-        "boot_start_time",
-        "boot_end_time",
-    ]
+    additional_values = ["boot_type_desc", "boot_success", "boot_pass",
+                         "boot_fail", "test_really_running", "ffdc_prefix",
+                         "boot_start_time", "boot_end_time"]
 
     plug_in_vars = additional_values
 
@@ -403,14 +344,14 @@
         os.environ["AUTOBOOT_" + var_name] = str(var_value)
 
     if debug:
-        shell_rc, out_buf = gc.cmd_fnc_u(
-            "printenv | egrep AUTOBOOT_ | sort -u"
-        )
+        shell_rc, out_buf = \
+            gc.cmd_fnc_u("printenv | egrep AUTOBOOT_ | sort -u")
 
     BuiltIn().set_log_level(LOG_LEVEL)
 
 
 def pre_boot_plug_in_setup():
+
     # Clear the ffdc_list_file_path file.  Plug-ins may now write to it.
     try:
         os.remove(ffdc_list_file_path)
@@ -438,7 +379,8 @@
     ffdc_prefix = openbmc_nickname + "." + time_string
 
 
-def default_sigusr1(signal_number=0, frame=None):
+def default_sigusr1(signal_number=0,
+                    frame=None):
     r"""
     Handle SIGUSR1 by doing nothing.
 
@@ -486,22 +428,19 @@
     repo_bin_path = robot_pgm_dir_path.replace("/lib/", "/bin/")
     # If we can't find process_plug_in_packages.py, ssh_pw or
     # validate_plug_ins.py, then we don't have our repo bin in PATH.
-    shell_rc, out_buf = gc.cmd_fnc_u(
-        "which process_plug_in_packages.py" + " ssh_pw validate_plug_ins.py",
-        quiet=1,
-        print_output=0,
-        show_err=0,
-    )
+    shell_rc, out_buf = gc.cmd_fnc_u("which process_plug_in_packages.py"
+                                     + " ssh_pw validate_plug_ins.py", quiet=1,
+                                     print_output=0, show_err=0)
     if shell_rc != 0:
-        os.environ["PATH"] = repo_bin_path + ":" + os.environ.get("PATH", "")
+        os.environ['PATH'] = repo_bin_path + ":" + os.environ.get('PATH', "")
     # Likewise, our repo lib subdir needs to be in sys.path and PYTHONPATH.
     if robot_pgm_dir_path not in sys.path:
         sys.path.append(robot_pgm_dir_path)
         PYTHONPATH = os.environ.get("PYTHONPATH", "")
         if PYTHONPATH == "":
-            os.environ["PYTHONPATH"] = robot_pgm_dir_path
+            os.environ['PYTHONPATH'] = robot_pgm_dir_path
         else:
-            os.environ["PYTHONPATH"] = robot_pgm_dir_path + ":" + PYTHONPATH
+            os.environ['PYTHONPATH'] = robot_pgm_dir_path + ":" + PYTHONPATH
 
     validate_parms()
 
@@ -513,8 +452,7 @@
 
     plug_in_setup()
     rc, shell_rc, failed_plug_in_name = grpi.rprocess_plug_in_packages(
-        call_point="setup"
-    )
+        call_point='setup')
     if rc != 0:
         error_message = "Plug-in setup failed.\n"
         gp.print_error_report(error_message)
@@ -549,10 +487,11 @@
 
     global openbmc_model
     if openbmc_model == "":
-        status, ret_values = grk.run_key_u("Get BMC System Model", ignore=1)
+        status, ret_values =\
+            grk.run_key_u("Get BMC System Model", ignore=1)
         # Set the model to default "OPENBMC" if getting it from BMC fails.
-        if status == "FAIL":
-            openbmc_model = "OPENBMC"
+        if status == 'FAIL':
+            openbmc_model = 'OPENBMC'
         else:
             openbmc_model = ret_values
         BuiltIn().set_global_variable("${openbmc_model}", openbmc_model)
@@ -578,27 +517,21 @@
     gv.valid_integer(boot_pass)
     gv.valid_integer(boot_fail)
     plug_in_packages_list = grpi.rvalidate_plug_ins(plug_in_dir_paths)
-    BuiltIn().set_global_variable(
-        "${plug_in_packages_list}", plug_in_packages_list
-    )
-    gv.valid_value(stack_mode, valid_values=["normal", "skip"])
+    BuiltIn().set_global_variable("${plug_in_packages_list}",
+                                  plug_in_packages_list)
+    gv.valid_value(stack_mode, valid_values=['normal', 'skip'])
     gv.set_exit_on_error(False)
     if len(boot_list) == 0 and len(boot_stack) == 0 and not ffdc_only:
-        error_message = (
-            "You must provide either a value for either the"
-            + " boot_list or the boot_stack parm.\n"
-        )
+        error_message = "You must provide either a value for either the" +\
+            " boot_list or the boot_stack parm.\n"
         BuiltIn().fail(gp.sprint_error(error_message))
     valid_boot_list(boot_list, valid_boot_types)
     valid_boot_list(boot_stack, valid_boot_types)
-    selected_PDU_boots = list(
-        set(boot_list + boot_stack) & set(boot_lists["PDU_reboot"])
-    )
+    selected_PDU_boots = list(set(boot_list + boot_stack)
+                              & set(boot_lists['PDU_reboot']))
     if len(selected_PDU_boots) > 0 and pdu_host == "":
-        error_message = (
-            "You have selected the following boots which"
-            + " require a PDU host but no value for pdu_host:\n"
-        )
+        error_message = "You have selected the following boots which" +\
+                        " require a PDU host but no value for pdu_host:\n"
         error_message += gp.sprint_var(selected_PDU_boots)
         error_message += gp.sprint_var(pdu_host, fmt=gp.blank())
         BuiltIn().fail(gp.sprint_error(error_message))
@@ -613,11 +546,11 @@
 
     global state
 
-    req_states = ["epoch_seconds"] + st.default_req_states
+    req_states = ['epoch_seconds'] + st.default_req_states
 
     gp.qprint_timen("Getting system state.")
     if test_mode:
-        state["epoch_seconds"] = int(time.time())
+        state['epoch_seconds'] = int(time.time())
     else:
         state = st.get_state(req_states=req_states, quiet=quiet)
     gp.qprint_var(state)
@@ -629,12 +562,9 @@
     valid state data, we cannot continue to work.
     """
 
-    if st.compare_states(state, st.invalid_state_match, "or"):
-        error_message = (
-            "The state dictionary contains blank fields which"
-            + " is illegal.\n"
-            + gp.sprint_var(state)
-        )
+    if st.compare_states(state, st.invalid_state_match, 'or'):
+        error_message = "The state dictionary contains blank fields which" +\
+            " is illegal.\n" + gp.sprint_var(state)
         BuiltIn().fail(gp.sprint_error(error_message))
 
 
@@ -655,20 +585,12 @@
     if transitional_boot_selected and not boot_success:
         prior_boot = next_boot
         boot_candidate = boot_stack.pop()
-        gp.qprint_timen(
-            "The prior '"
-            + next_boot
-            + "' was chosen to"
-            + " transition to a valid state for '"
-            + boot_candidate
-            + "' which was at the top of the boot_stack.  Since"
-            + " the '"
-            + next_boot
-            + "' failed, the '"
-            + boot_candidate
-            + "' has been removed from the stack"
-            + " to avoid and endless failure loop."
-        )
+        gp.qprint_timen("The prior '" + next_boot + "' was chosen to"
+                        + " transition to a valid state for '" + boot_candidate
+                        + "' which was at the top of the boot_stack.  Since"
+                        + " the '" + next_boot + "' failed, the '"
+                        + boot_candidate + "' has been removed from the stack"
+                        + " to avoid and endless failure loop.")
         if len(boot_stack) == 0:
             return ""
 
@@ -685,19 +607,17 @@
         skip_boot_printed = 0
         while len(boot_stack) > 0:
             boot_candidate = boot_stack.pop()
-            if stack_mode == "normal":
+            if stack_mode == 'normal':
                 break
             else:
-                if st.compare_states(state, boot_table[boot_candidate]["end"]):
+                if st.compare_states(state, boot_table[boot_candidate]['end']):
                     if not skip_boot_printed:
                         gp.qprint_var(stack_mode)
                         gp.qprintn()
-                        gp.qprint_timen(
-                            "Skipping the following boot tests"
-                            + " which are unnecessary since their"
-                            + " required end states match the"
-                            + " current machine state:"
-                        )
+                        gp.qprint_timen("Skipping the following boot tests"
+                                        + " which are unnecessary since their"
+                                        + " required end states match the"
+                                        + " current machine state:")
                         skip_boot_printed = 1
                     gp.qprint_var(boot_candidate)
                     boot_candidate = ""
@@ -706,26 +626,19 @@
             gp.qprint_var(boot_stack)
             gp.qprint_dashes()
             return boot_candidate
-        if st.compare_states(state, boot_table[boot_candidate]["start"]):
-            gp.qprint_timen(
-                "The machine state is valid for a '"
-                + boot_candidate
-                + "' boot test."
-            )
+        if st.compare_states(state, boot_table[boot_candidate]['start']):
+            gp.qprint_timen("The machine state is valid for a '"
+                            + boot_candidate + "' boot test.")
             gp.qprint_dashes()
             gp.qprint_var(boot_stack)
             gp.qprint_dashes()
             return boot_candidate
         else:
-            gp.qprint_timen(
-                "The machine state does not match the required"
-                + " starting state for a '"
-                + boot_candidate
-                + "' boot test:"
-            )
-            gp.qprint_varx(
-                "boot_table_start_entry", boot_table[boot_candidate]["start"]
-            )
+            gp.qprint_timen("The machine state does not match the required"
+                            + " starting state for a '" + boot_candidate
+                            + "' boot test:")
+            gp.qprint_varx("boot_table_start_entry",
+                           boot_table[boot_candidate]['start'])
             boot_stack.append(boot_candidate)
             transitional_boot_selected = True
             popped_boot = boot_candidate
@@ -733,30 +646,23 @@
     # Loop through your list selecting a boot_candidates
     boot_candidates = []
     for boot_candidate in boot_list:
-        if st.compare_states(state, boot_table[boot_candidate]["start"]):
+        if st.compare_states(state, boot_table[boot_candidate]['start']):
             if stack_popped:
-                if st.compare_states(
-                    boot_table[boot_candidate]["end"],
-                    boot_table[popped_boot]["start"],
-                ):
+                if st.compare_states(boot_table[boot_candidate]['end'],
+                                     boot_table[popped_boot]['start']):
                     boot_candidates.append(boot_candidate)
             else:
                 boot_candidates.append(boot_candidate)
 
     if len(boot_candidates) == 0:
-        gp.qprint_timen(
-            "The user's boot list contained no boot tests"
-            + " which are valid for the current machine state."
-        )
+        gp.qprint_timen("The user's boot list contained no boot tests"
+                        + " which are valid for the current machine state.")
         boot_candidate = default_power_on
-        if not st.compare_states(state, boot_table[default_power_on]["start"]):
+        if not st.compare_states(state, boot_table[default_power_on]['start']):
             boot_candidate = default_power_off
         boot_candidates.append(boot_candidate)
-        gp.qprint_timen(
-            "Using default '"
-            + boot_candidate
-            + "' boot type to transition to valid state."
-        )
+        gp.qprint_timen("Using default '" + boot_candidate
+                        + "' boot type to transition to valid state.")
 
     gp.dprint_var(boot_candidates)
 
@@ -777,41 +683,29 @@
     # Making deliberate choice to NOT run plug_in_setup().  We don't want
     # ffdc_prefix updated.
     rc, shell_rc, failed_plug_in_name = grpi.rprocess_plug_in_packages(
-        call_point="ffdc_report", stop_on_plug_in_failure=0
-    )
+        call_point='ffdc_report', stop_on_plug_in_failure=0)
 
     # Get additional header data which may have been created by ffdc plug-ins.
     # Also, delete the individual header files to cleanup.
-    cmd_buf = (
-        "file_list=$(cat "
-        + ffdc_report_list_path
-        + " 2>/dev/null)"
-        + ' ; [ ! -z "${file_list}" ] && cat ${file_list}'
-        + " 2>/dev/null ; rm -rf ${file_list} 2>/dev/null || :"
-    )
-    shell_rc, more_header_info = gc.cmd_fnc_u(
-        cmd_buf, print_output=0, show_err=0
-    )
+    cmd_buf = "file_list=$(cat " + ffdc_report_list_path + " 2>/dev/null)" +\
+              " ; [ ! -z \"${file_list}\" ] && cat ${file_list}" +\
+              " 2>/dev/null ; rm -rf ${file_list} 2>/dev/null || :"
+    shell_rc, more_header_info = gc.cmd_fnc_u(cmd_buf, print_output=0,
+                                              show_err=0)
 
     # Get additional summary data which may have been created by ffdc plug-ins.
     # Also, delete the individual header files to cleanup.
-    cmd_buf = (
-        "file_list=$(cat "
-        + ffdc_summary_list_path
-        + " 2>/dev/null)"
-        + ' ; [ ! -z "${file_list}" ] && cat ${file_list}'
-        + " 2>/dev/null ; rm -rf ${file_list} 2>/dev/null || :"
-    )
-    shell_rc, ffdc_summary_info = gc.cmd_fnc_u(
-        cmd_buf, print_output=0, show_err=0
-    )
+    cmd_buf = "file_list=$(cat " + ffdc_summary_list_path + " 2>/dev/null)" +\
+              " ; [ ! -z \"${file_list}\" ] && cat ${file_list}" +\
+              " 2>/dev/null ; rm -rf ${file_list} 2>/dev/null || :"
+    shell_rc, ffdc_summary_info = gc.cmd_fnc_u(cmd_buf, print_output=0,
+                                               show_err=0)
 
     # ffdc_list_file_path contains a list of any ffdc files created by plug-
     # ins, etc.  Read that data into a list.
     try:
-        plug_in_ffdc_list = (
-            open(ffdc_list_file_path, "r").read().rstrip("\n").split("\n")
-        )
+        plug_in_ffdc_list = \
+            open(ffdc_list_file_path, 'r').read().rstrip("\n").split("\n")
         plug_in_ffdc_list = list(filter(None, plug_in_ffdc_list))
     except IOError:
         plug_in_ffdc_list = []
@@ -828,7 +722,7 @@
 
     # Open ffdc_file_list for writing.  We will write a complete list of
     # FFDC files to it for possible use by plug-ins like cp_stop_check.
-    ffdc_list_file = open(ffdc_list_file_path, "w")
+    ffdc_list_file = open(ffdc_list_file_path, 'w')
     ffdc_list_file.write(printable_ffdc_file_list + "\n")
     ffdc_list_file.close()
 
@@ -843,35 +737,13 @@
 
     if len(more_header_info) > 0:
         gp.qprintn(more_header_info)
-    gp.qpvars(
-        host_name,
-        host_ip,
-        openbmc_nickname,
-        openbmc_host,
-        openbmc_host_name,
-        openbmc_ip,
-        openbmc_username,
-        openbmc_password,
-        rest_username,
-        rest_password,
-        ipmi_username,
-        ipmi_password,
-        os_host,
-        os_host_name,
-        os_ip,
-        os_username,
-        os_password,
-        pdu_host,
-        pdu_host_name,
-        pdu_ip,
-        pdu_username,
-        pdu_password,
-        pdu_slot_no,
-        openbmc_serial_host,
-        openbmc_serial_host_name,
-        openbmc_serial_ip,
-        openbmc_serial_port,
-    )
+    gp.qpvars(host_name, host_ip, openbmc_nickname, openbmc_host,
+              openbmc_host_name, openbmc_ip, openbmc_username,
+              openbmc_password, rest_username, rest_password, ipmi_username,
+              ipmi_password, os_host, os_host_name, os_ip, os_username,
+              os_password, pdu_host, pdu_host_name, pdu_ip, pdu_username,
+              pdu_password, pdu_slot_no, openbmc_serial_host,
+              openbmc_serial_host_name, openbmc_serial_ip, openbmc_serial_port)
 
     gp.qprintn()
     print_boot_history(boot_history)
@@ -897,18 +769,14 @@
 
     plug_in_setup()
     rc, shell_rc, failed_plug_in_name = grpi.rprocess_plug_in_packages(
-        call_point="ffdc", stop_on_plug_in_failure=0
-    )
+        call_point='ffdc', stop_on_plug_in_failure=0)
 
-    AUTOBOOT_FFDC_PREFIX = os.environ["AUTOBOOT_FFDC_PREFIX"]
-    status, ffdc_file_list = grk.run_key_u(
-        "FFDC  ffdc_prefix="
-        + AUTOBOOT_FFDC_PREFIX
-        + "  ffdc_function_list="
-        + ffdc_function_list,
-        ignore=1,
-    )
-    if status != "PASS":
+    AUTOBOOT_FFDC_PREFIX = os.environ['AUTOBOOT_FFDC_PREFIX']
+    status, ffdc_file_list = grk.run_key_u("FFDC  ffdc_prefix="
+                                           + AUTOBOOT_FFDC_PREFIX
+                                           + "  ffdc_function_list="
+                                           + ffdc_function_list, ignore=1)
+    if status != 'PASS':
         gp.qprint_error("Call to ffdc failed.\n")
         if type(ffdc_file_list) is not list:
             ffdc_file_list = []
@@ -933,7 +801,7 @@
     global boot_history
     global boot_start_time
 
-    doing_msg = gp.sprint_timen('Doing "' + boot_keyword + '".')
+    doing_msg = gp.sprint_timen("Doing \"" + boot_keyword + "\".")
 
     # Set boot_start_time for use by plug-ins.
     boot_start_time = doing_msg[1:33]
@@ -944,7 +812,8 @@
     update_boot_history(boot_history, doing_msg, max_boot_history)
 
 
-def stop_boot_test(signal_number=0, frame=None):
+def stop_boot_test(signal_number=0,
+                   frame=None):
     r"""
     Handle SIGUSR1 by aborting the boot test that is running.
 
@@ -986,40 +855,34 @@
     print_test_start_message(boot)
 
     plug_in_setup()
-    rc, shell_rc, failed_plug_in_name = grpi.rprocess_plug_in_packages(
-        call_point="pre_boot"
-    )
+    rc, shell_rc, failed_plug_in_name = \
+        grpi.rprocess_plug_in_packages(call_point="pre_boot")
     if rc != 0:
-        error_message = (
-            "Plug-in failed with non-zero return code.\n"
-            + gp.sprint_var(rc, fmt=gp.hexa())
-        )
+        error_message = "Plug-in failed with non-zero return code.\n" +\
+            gp.sprint_var(rc, fmt=gp.hexa())
         set_default_siguser1()
         BuiltIn().fail(gp.sprint_error(error_message))
 
     if test_mode:
         # In test mode, we'll pretend the boot worked by assigning its
         # required end state to the default state value.
-        state = st.strip_anchor_state(boot_table[boot]["end"])
+        state = st.strip_anchor_state(boot_table[boot]['end'])
     else:
         # Assertion:  We trust that the state data was made fresh by the
         # caller.
 
         gp.qprintn()
 
-        if boot_table[boot]["method_type"] == "keyword":
-            rk.my_run_keywords(
-                boot_table[boot].get("lib_file_path", ""),
-                boot_table[boot]["method"],
-                quiet=quiet,
-            )
+        if boot_table[boot]['method_type'] == "keyword":
+            rk.my_run_keywords(boot_table[boot].get('lib_file_path', ''),
+                               boot_table[boot]['method'],
+                               quiet=quiet)
 
-        if boot_table[boot]["bmc_reboot"]:
-            st.wait_for_comm_cycle(int(state["epoch_seconds"]))
+        if boot_table[boot]['bmc_reboot']:
+            st.wait_for_comm_cycle(int(state['epoch_seconds']))
             plug_in_setup()
-            rc, shell_rc, failed_plug_in_name = grpi.rprocess_plug_in_packages(
-                call_point="post_reboot"
-            )
+            rc, shell_rc, failed_plug_in_name = \
+                grpi.rprocess_plug_in_packages(call_point="post_reboot")
             if rc != 0:
                 error_message = "Plug-in failed with non-zero return code.\n"
                 error_message += gp.sprint_var(rc, fmt=gp.hexa())
@@ -1027,35 +890,25 @@
                 BuiltIn().fail(gp.sprint_error(error_message))
         else:
             match_state = st.anchor_state(state)
-            del match_state["epoch_seconds"]
+            del match_state['epoch_seconds']
             # Wait for the state to change in any way.
-            st.wait_state(
-                match_state,
-                wait_time=state_change_timeout,
-                interval="10 seconds",
-                invert=1,
-            )
+            st.wait_state(match_state, wait_time=state_change_timeout,
+                          interval="10 seconds", invert=1)
 
         gp.qprintn()
-        if boot_table[boot]["end"]["chassis"] == "Off":
+        if boot_table[boot]['end']['chassis'] == "Off":
             boot_timeout = power_off_timeout
         else:
             boot_timeout = power_on_timeout
-        st.wait_state(
-            boot_table[boot]["end"],
-            wait_time=boot_timeout,
-            interval="10 seconds",
-        )
+        st.wait_state(boot_table[boot]['end'], wait_time=boot_timeout,
+                      interval="10 seconds")
 
     plug_in_setup()
-    rc, shell_rc, failed_plug_in_name = grpi.rprocess_plug_in_packages(
-        call_point="post_boot"
-    )
+    rc, shell_rc, failed_plug_in_name = \
+        grpi.rprocess_plug_in_packages(call_point="post_boot")
     if rc != 0:
-        error_message = (
-            "Plug-in failed with non-zero return code.\n"
-            + gp.sprint_var(rc, fmt=gp.hexa())
-        )
+        error_message = "Plug-in failed with non-zero return code.\n" +\
+            gp.sprint_var(rc, fmt=gp.hexa())
         set_default_siguser1()
         BuiltIn().fail(gp.sprint_error(error_message))
 
@@ -1096,14 +949,12 @@
     gp.qprintn()
     if boot_status == "PASS":
         boot_success = 1
-        completion_msg = gp.sprint_timen(
-            'BOOT_SUCCESS: "' + next_boot + '" succeeded.'
-        )
+        completion_msg = gp.sprint_timen("BOOT_SUCCESS: \"" + next_boot
+                                         + "\" succeeded.")
     else:
         boot_success = 0
-        completion_msg = gp.sprint_timen(
-            'BOOT_FAILED: "' + next_boot + '" failed.'
-        )
+        completion_msg = gp.sprint_timen("BOOT_FAILED: \"" + next_boot
+                                         + "\" failed.")
 
     # Set boot_end_time for use by plug-ins.
     boot_end_time = completion_msg[1:33]
@@ -1117,19 +968,16 @@
     # NOTE: A post_test_case call point failure is NOT counted as a boot
     # failure.
     rc, shell_rc, failed_plug_in_name = grpi.rprocess_plug_in_packages(
-        call_point="post_test_case", stop_on_plug_in_failure=0
-    )
+        call_point='post_test_case', stop_on_plug_in_failure=0)
 
     plug_in_setup()
     rc, shell_rc, failed_plug_in_name = grpi.rprocess_plug_in_packages(
-        call_point="ffdc_check",
-        shell_rc=dump_ffdc_rc(),
-        stop_on_plug_in_failure=1,
-        stop_on_non_zero_rc=1,
-    )
-    if ffdc_check == "All" or shell_rc == dump_ffdc_rc():
+        call_point='ffdc_check', shell_rc=dump_ffdc_rc(),
+        stop_on_plug_in_failure=1, stop_on_non_zero_rc=1)
+    if ffdc_check == "All" or\
+       shell_rc == dump_ffdc_rc():
         status, ret_values = grk.run_key_u("my_ffdc", ignore=1)
-        if status != "PASS":
+        if status != 'PASS':
             gp.qprint_error("Call to my_ffdc failed.\n")
             # Leave a record for caller that "soft" errors occurred.
             soft_errors = 1
@@ -1139,9 +987,7 @@
         # print error logs before delete
         if redfish_support_trans_state:
             status, error_logs = grk.run_key_u("Get Redfish Event Logs")
-            log.print_error_logs(
-                error_logs, "AdditionalDataURI Message Severity"
-            )
+            log.print_error_logs(error_logs, "AdditionalDataURI Message Severity")
         else:
             status, error_logs = grk.run_key_u("Get Error Logs")
             log.print_error_logs(error_logs, "AdditionalData Message Severity")
@@ -1159,8 +1005,8 @@
 
     plug_in_setup()
     rc, shell_rc, failed_plug_in_name = grpi.rprocess_plug_in_packages(
-        call_point="stop_check", shell_rc=stop_test_rc(), stop_on_non_zero_rc=1
-    )
+        call_point='stop_check', shell_rc=stop_test_rc(),
+        stop_on_non_zero_rc=1)
     if shell_rc == stop_test_rc():
         message = "Stopping as requested by user.\n"
         gp.qprint_time(message)
@@ -1187,19 +1033,16 @@
     if cp_setup_called:
         plug_in_setup()
         rc, shell_rc, failed_plug_in_name = grpi.rprocess_plug_in_packages(
-            call_point="cleanup", stop_on_plug_in_failure=0
-        )
+            call_point='cleanup', stop_on_plug_in_failure=0)
 
-    if "boot_results_file_path" in globals():
+    if 'boot_results_file_path' in globals():
         # Save boot_results and boot_history objects to a file in case they are
         # needed again.
         gp.qprint_timen("Saving boot_results to the following path.")
         gp.qprint_var(boot_results_file_path)
-        pickle.dump(
-            (boot_results, boot_history),
-            open(boot_results_file_path, "wb"),
-            pickle.HIGHEST_PROTOCOL,
-        )
+        pickle.dump((boot_results, boot_history),
+                    open(boot_results_file_path, 'wb'),
+                    pickle.HIGHEST_PROTOCOL)
 
     global save_stack
     # Restore any global values saved on the save_stack.
@@ -1212,9 +1055,8 @@
             continue
 
         # Restore the saved value.
-        cmd_buf = (
-            'BuiltIn().set_global_variable("${' + parm_name + '}", parm_value)'
-        )
+        cmd_buf = "BuiltIn().set_global_variable(\"${" + parm_name +\
+            "}\", parm_value)"
         gp.dpissuing(cmd_buf)
         exec(cmd_buf)
 
@@ -1232,10 +1074,8 @@
     if ga.psutil_imported:
         ga.terminate_descendants()
 
-    cmd_buf = [
-        "Print Error",
-        "A keyword timeout occurred ending this program.\n",
-    ]
+    cmd_buf = ["Print Error",
+               "A keyword timeout occurred ending this program.\n"]
     BuiltIn().run_keyword_if_timeout_occurred(*cmd_buf)
 
     if redfish_supported:
@@ -1260,29 +1100,24 @@
     # For the purposes of the following plug-ins, mark the "boot" as a success.
     boot_success = 1
     plug_in_setup()
-    (
-        rc,
-        shell_rc,
-        failed_plug_in_name,
-        history,
-    ) = grpi.rprocess_plug_in_packages(
-        call_point="post_stack", stop_on_plug_in_failure=0, return_history=True
-    )
+    rc, shell_rc, failed_plug_in_name, history =\
+        grpi.rprocess_plug_in_packages(call_point='post_stack',
+                                       stop_on_plug_in_failure=0,
+                                       return_history=True)
     for doing_msg in history:
         update_boot_history(boot_history, doing_msg, max_boot_history)
     if rc != 0:
         boot_success = 0
 
     plug_in_setup()
-    rc, shell_rc, failed_plug_in_name = grpi.rprocess_plug_in_packages(
-        call_point="ffdc_check",
-        shell_rc=dump_ffdc_rc(),
-        stop_on_plug_in_failure=1,
-        stop_on_non_zero_rc=1,
-    )
+    rc, shell_rc, failed_plug_in_name =\
+        grpi.rprocess_plug_in_packages(call_point='ffdc_check',
+                                       shell_rc=dump_ffdc_rc(),
+                                       stop_on_plug_in_failure=1,
+                                       stop_on_non_zero_rc=1)
     if shell_rc == dump_ffdc_rc():
         status, ret_values = grk.run_key_u("my_ffdc", ignore=1)
-        if status != "PASS":
+        if status != 'PASS':
             gp.qprint_error("Call to my_ffdc failed.\n")
             # Leave a record for caller that "soft" errors occurred.
             soft_errors = 1
@@ -1290,26 +1125,24 @@
 
     plug_in_setup()
     rc, shell_rc, failed_plug_in_name = grpi.rprocess_plug_in_packages(
-        call_point="stop_check", shell_rc=stop_test_rc(), stop_on_non_zero_rc=1
-    )
+        call_point='stop_check', shell_rc=stop_test_rc(),
+        stop_on_non_zero_rc=1)
     if shell_rc == stop_test_rc():
         message = "Stopping as requested by user.\n"
         gp.qprint_time(message)
         BuiltIn().fail(message)
 
 
-def obmc_boot_test_py(
-    loc_boot_stack=None, loc_stack_mode=None, loc_quiet=None
-):
+def obmc_boot_test_py(loc_boot_stack=None,
+                      loc_stack_mode=None,
+                      loc_quiet=None):
     r"""
     Do main program processing.
     """
 
     global save_stack
 
-    ga.set_term_options(
-        term_requests={"pgm_names": ["process_plug_in_packages.py"]}
-    )
+    ga.set_term_options(term_requests={'pgm_names': ['process_plug_in_packages.py']})
 
     gp.dprintn()
     # Process function parms.
@@ -1320,24 +1153,14 @@
 
         if parm_value is not None:
             # Save the global value on a stack.
-            cmd_buf = (
-                'save_stack.push(BuiltIn().get_variable_value("${'
-                + parm_name
-                + '}"), "'
-                + parm_name
-                + '")'
-            )
+            cmd_buf = "save_stack.push(BuiltIn().get_variable_value(\"${" +\
+                parm_name + "}\"), \"" + parm_name + "\")"
             gp.dpissuing(cmd_buf)
             exec(cmd_buf)
 
             # Set the global value to the passed value.
-            cmd_buf = (
-                'BuiltIn().set_global_variable("${'
-                + parm_name
-                + '}", loc_'
-                + parm_name
-                + ")"
-            )
+            cmd_buf = "BuiltIn().set_global_variable(\"${" + parm_name +\
+                "}\", loc_" + parm_name + ")"
             gp.dpissuing(cmd_buf)
             exec(cmd_buf)
 
@@ -1358,9 +1181,7 @@
         # print error logs before delete
         if redfish_support_trans_state:
             status, error_logs = grk.run_key_u("Get Redfish Event Logs")
-            log.print_error_logs(
-                error_logs, "AdditionalDataURI Message Severity"
-            )
+            log.print_error_logs(error_logs, "AdditionalDataURI Message Severity")
         else:
             status, error_logs = grk.run_key_u("Get Error Logs")
             log.print_error_logs(error_logs, "AdditionalData Message Severity")
@@ -1374,7 +1195,7 @@
             grk.run_key(delete_sysdump_cmd, ignore=1)
 
     # Process caller's boot_stack.
-    while len(boot_stack) > 0:
+    while (len(boot_stack) > 0):
         test_loop_body()
 
     gp.qprint_timen("Finished processing stack.")
@@ -1391,10 +1212,8 @@
     boot_pass, boot_fail = boot_results.return_total_pass_fail()
     new_fail = boot_fail - init_boot_fail
     if new_fail > boot_fail_threshold:
-        error_message = (
-            "Boot failures exceed the boot failure"
-            + " threshold:\n"
-            + gp.sprint_var(new_fail)
-            + gp.sprint_var(boot_fail_threshold)
-        )
+        error_message = "Boot failures exceed the boot failure" +\
+                        " threshold:\n" +\
+                        gp.sprint_var(new_fail) +\
+                        gp.sprint_var(boot_fail_threshold)
         BuiltIn().fail(gp.sprint_error(error_message))
diff --git a/lib/openbmc_ffdc.py b/lib/openbmc_ffdc.py
index b5efa7a..78f596c 100644
--- a/lib/openbmc_ffdc.py
+++ b/lib/openbmc_ffdc.py
@@ -7,24 +7,20 @@
 import os
 
 import gen_print as gp
-import gen_robot_keyword as grk
 import gen_valid as gv
+import gen_robot_keyword as grk
 import state as st
+
 from robot.libraries.BuiltIn import BuiltIn
 
-redfish_support_trans_state = int(
-    os.environ.get("REDFISH_SUPPORT_TRANS_STATE", 0)
-) or int(
-    BuiltIn().get_variable_value("${REDFISH_SUPPORT_TRANS_STATE}", default=0)
-)
+redfish_support_trans_state = int(os.environ.get('REDFISH_SUPPORT_TRANS_STATE', 0)) or \
+    int(BuiltIn().get_variable_value("${REDFISH_SUPPORT_TRANS_STATE}", default=0))
 
 
-def ffdc(
-    ffdc_dir_path=None,
-    ffdc_prefix=None,
-    ffdc_function_list="",
-    comm_check=True,
-):
+def ffdc(ffdc_dir_path=None,
+         ffdc_prefix=None,
+         ffdc_function_list="",
+         comm_check=True):
     r"""
     Gather First Failure Data Capture (FFDC).
 
@@ -52,31 +48,26 @@
 
     if comm_check:
         if not redfish_support_trans_state:
-            interface = "rest"
+            interface = 'rest'
         else:
-            interface = "redfish"
+            interface = 'redfish'
 
-        state = st.get_state(req_states=["ping", "uptime", interface])
+        state = st.get_state(req_states=['ping', 'uptime', interface])
         gp.qprint_var(state)
-        if not int(state["ping"]):
-            gp.print_error(
-                "BMC is not ping-able.  Terminating FFDC collection.\n"
-            )
+        if not int(state['ping']):
+            gp.print_error("BMC is not ping-able.  Terminating FFDC collection.\n")
             return ffdc_file_list
 
         if not int(state[interface]):
             gp.print_error("%s commands to the BMC are failing." % interface)
 
-        if state["uptime"] == "":
+        if state['uptime'] == "":
             gp.print_error("BMC is not communicating via ssh.\n")
 
         # If SSH and Redfish connection doesn't works, abort.
-        if not int(state[interface]) and state["uptime"] == "":
-            gp.print_error(
-                "BMC is not communicating via ssh or Redfish.  Terminating"
-                " FFDC"
-                + " collection.\n"
-            )
+        if not int(state[interface]) and state['uptime'] == "":
+            gp.print_error("BMC is not communicating via ssh or Redfish.  Terminating FFDC"
+                           + " collection.\n")
             return ffdc_file_list
 
     gp.qprint_timen("Collecting FFDC.")
@@ -94,12 +85,9 @@
     gp.qprint_issuing(cmd_buf)
     status, output = BuiltIn().run_keyword_and_ignore_error(*cmd_buf)
     if status != "PASS":
-        error_message = gp.sprint_error_report(
-            "Create Directory failed"
-            + " with the following"
-            + " error:\n"
-            + output
-        )
+        error_message = gp.sprint_error_report("Create Directory failed"
+                                               + " with the following"
+                                               + " error:\n" + output)
         BuiltIn().fail(error_message)
 
     # FFDC_FILE_PATH is used by Header Message.
@@ -107,9 +95,9 @@
     BuiltIn().set_global_variable("${FFDC_FILE_PATH}", FFDC_FILE_PATH)
 
     status, ffdc_file_list = grk.run_key_u("Header Message")
-    status, ffdc_file_sub_list = grk.run_key_u(
-        "Call FFDC Methods  ffdc_function_list=" + ffdc_function_list
-    )
+    status, ffdc_file_sub_list = \
+        grk.run_key_u("Call FFDC Methods  ffdc_function_list="
+                      + ffdc_function_list)
 
     # Combine lists, remove duplicates and sort.
     ffdc_file_list = sorted(set(ffdc_file_list + ffdc_file_sub_list))
@@ -119,7 +107,8 @@
     return ffdc_file_list
 
 
-def set_ffdc_defaults(ffdc_dir_path=None, ffdc_prefix=None):
+def set_ffdc_defaults(ffdc_dir_path=None,
+                      ffdc_prefix=None):
     r"""
     Set a default value for ffdc_dir_path and ffdc_prefix if they don't
     already have values.  Return both values.
@@ -141,34 +130,24 @@
     BuiltIn().set_global_variable("${FFDC_TIME}", FFDC_TIME)
 
     ffdc_dir_path_style = BuiltIn().get_variable_value(
-        "${ffdc_dir_path_style}"
-    )
+        "${ffdc_dir_path_style}")
 
     if ffdc_dir_path is None:
         if ffdc_dir_path_style:
             try:
-                ffdc_dir_path = os.environ["FFDC_DIR_PATH"]
+                ffdc_dir_path = os.environ['FFDC_DIR_PATH']
             except KeyError:
-                ffdc_dir_path = (
-                    os.path.dirname(
-                        BuiltIn().get_variable_value("${LOG_FILE}")
-                    )
-                    + "/"
-                )
+                ffdc_dir_path = os.path.dirname(
+                    BuiltIn().get_variable_value("${LOG_FILE}")) + "/"
         else:
             FFDC_LOG_PATH = os.getcwd() + "/logs/"
             if FFDC_LOG_PATH is None:
                 FFDC_LOG_PATH = ""
             if FFDC_LOG_PATH == "":
-                FFDC_LOG_PATH = (
-                    os.path.dirname(
-                        BuiltIn().get_variable_value("${LOG_FILE}")
-                    )
-                    + "/"
-                )
-            error_message = gv.valid_value(
-                FFDC_LOG_PATH, var_name="FFDC_LOG_PATH"
-            )
+                FFDC_LOG_PATH = os.path.dirname(
+                    BuiltIn().get_variable_value("${LOG_FILE}")) + "/"
+            error_message = gv.valid_value(FFDC_LOG_PATH,
+                                           var_name="FFDC_LOG_PATH")
             if error_message != "":
                 error_message = gp.sprint_error_report(error_message)
                 BuiltIn().fail(error_message)
@@ -189,16 +168,9 @@
             if ffdc_dir_path_style:
                 OPENBMC_HOST = BuiltIn().get_variable_value("${OPENBMC_HOST}")
                 OPENBMC_NICKNAME = BuiltIn().get_variable_value(
-                    "${OPENBMC_NICKNAME}", default=OPENBMC_HOST
-                )
-                ffdc_prefix = (
-                    OPENBMC_NICKNAME
-                    + "."
-                    + FFDC_TIME[2:8]
-                    + "."
-                    + FFDC_TIME[8:14]
-                    + "."
-                )
+                    "${OPENBMC_NICKNAME}", default=OPENBMC_HOST)
+                ffdc_prefix = OPENBMC_NICKNAME + "." + FFDC_TIME[2:8] + "." +\
+                    FFDC_TIME[8:14] + "."
             else:
                 ffdc_prefix = FFDC_TIME + "_"
 
diff --git a/lib/openbmc_ffdc_list.py b/lib/openbmc_ffdc_list.py
index ea79a81..9fb882a 100755
--- a/lib/openbmc_ffdc_list.py
+++ b/lib/openbmc_ffdc_list.py
@@ -17,203 +17,159 @@
 # -----------------------------------------------------------------
 # Add cmd's needed to be part of the ffdc report manifest file
 FFDC_BMC_CMD = {
-    "DRIVER INFO": {
+    'DRIVER INFO':
+    {
         # String Name         Command
-        "FW Level": "cat /etc/os-release",
-        "FW Timestamp": "cat /etc/timestamp",
+        'FW Level': 'cat /etc/os-release',
+        'FW Timestamp': 'cat /etc/timestamp',
     },
-    "BMC DATA": {
-        "BMC OS": "uname -a",
-        "BMC Uptime": "uptime;cat /proc/uptime",
-        "BMC File System Disk Space Usage": "df -hT",
-        "BMC Date Time": "date;/sbin/hwclock --show;/usr/bin/timedatectl",
+    'BMC DATA':
+    {
+        'BMC OS': 'uname -a',
+        'BMC Uptime': 'uptime;cat /proc/uptime',
+        'BMC File System Disk Space Usage': 'df -hT',
+        'BMC Date Time': 'date;/sbin/hwclock --show;/usr/bin/timedatectl'
     },
-    "APPLICATION DATA": {
-        "BMC state": "/usr/bin/obmcutil state",
+    'APPLICATION DATA':
+    {
+        'BMC state': '/usr/bin/obmcutil state',
     },
 }
 # Add file name and corresponding command needed for BMC
 FFDC_BMC_FILE = {
-    "BMC FILES": {
+    'BMC FILES':
+    {
         # File Name         Command
-        "BMC_flash_side.txt": (
-            "cat /sys/class/watchdog/watchdog1/bootstatus"
-            " >/tmp/BMC_flash_side.txt 2>&1"
-        ),
-        "BMC_hwmon.txt": (
-            "grep -r . /sys/class/hwmon/* >/tmp/BMC_hwmon.txt 2>&1"
-        ),
-        "BMC_proc_list.txt": "top -n 1 -b >/tmp/BMC_proc_list.txt 2>&1",
-        "BMC_proc_fd_active_list.txt": (
-            "ls -Al /proc/*/fd/ >/tmp/BMC_proc_fd_active_list.txt 2>&1"
-        ),
-        "BMC_journalctl_nopager.txt": (
-            "journalctl --no-pager >/tmp/BMC_journalctl_nopager.txt 2>&1"
-        ),
-        "BMC_journalctl_pretty.json": (
-            "journalctl -o json-pretty >/tmp/BMC_journalctl_pretty.json 2>&1"
-        ),
-        "BMC_dmesg.txt": "dmesg >/tmp/BMC_dmesg.txt 2>&1",
-        "BMC_procinfo.txt": "cat /proc/cpuinfo >/tmp/BMC_procinfo.txt 2>&1",
-        "BMC_meminfo.txt": "cat /proc/meminfo >/tmp/BMC_meminfo.txt 2>&1",
-        "BMC_systemd.txt": "systemctl status --all >/tmp/BMC_systemd.txt 2>&1",
-        "BMC_failed_service.txt": (
-            "systemctl list-units --failed >/tmp/BMC_failed_service.txt 2>&1"
-        ),
-        "BMC_list_service.txt": (
-            "systemctl list-jobs >/tmp/BMC_list_service.txt 2>&1"
-        ),
-        "BMC_obmc_console.txt": (
-            "cat /var/log/obmc-console.log >/tmp/BMC_obmc_console.txt 2>&1"
-        ),
-        "BMC_obmc_console1.txt": (
-            "cat /var/log/obmc-console1.log >/tmp/BMC_obmc_console1.txt 2>&1"
-        ),
-        "PEL_logs_list.json": "peltool -l >/tmp/PEL_logs_list.json 2>&1",
-        "PEL_logs_complete_list.json": (
-            "peltool -l -a -f >/tmp/PEL_logs_complete_list.json 2>&1"
-        ),
-        "PEL_logs_display.json": "peltool -a >/tmp/PEL_logs_display.json 2>&1",
-        "PEL_logs_complete_display.json": (
-            "peltool -a -f -h>/tmp/PEL_logs_complete_display.json 2>&1"
-        ),
-        "PEL_logs_badPEL.txt": "hexdump -C"
-        + " /var/lib/phosphor-logging/extensions/pels/badPEL>/tmp/PEL_logs_badPEL.txt"
-        " 2>&1",
-        "PLDM_fru_record.txt": (
-            "pldmtool fru getfrurecordtable>/tmp/PLDM_fru_record.txt 2>&1"
-        ),
-        "BMC_pldm_flight_recorder.txt": (
-            "rm -rf /tmp/pldm_flight_recorder; killall -s SIGUSR1 pldmd;"
-        )
-        + " sleep 5; cat /tmp/pldm_flight_recorder >"
-        " /tmp/BMC_pldm_flight_recorder.txt 2>&1;",
-        "OCC_state.txt": 'echo "OCC state check";for i in {0..3};'
-        + " do (echo /org/open_power/control/occ$i;"
-        + " busctl get-property org.open_power.OCC.Control"
-        " /org/open_power/control/occ$i"
-        + " org.open_power.OCC.Status OccActive) done > /tmp/OCC_state.txt"
-        " 2>&1",
-        "bmcweb_persistent_data.json": (
-            "cat /home/root/bmcweb_persistent_data.json"
-        )
-        + " > /tmp/bmcweb_persistent_data.json",
-        "GUARD_list.txt": "guard -l > /tmp/GUARD_list.txt 2>&1",
-        "fan_control_dump.json": "fanctl dump; sleep 5",
-        "DEVTREE": (
-            "cat /var/lib/phosphor-software-manager/pnor/rw/DEVTREE >"
-            " /tmp/DEVTREE 2>&1"
-        ),
+        'BMC_flash_side.txt': 'cat /sys/class/watchdog/watchdog1/bootstatus >/tmp/BMC_flash_side.txt 2>&1',
+        'BMC_hwmon.txt': 'grep -r . /sys/class/hwmon/* >/tmp/BMC_hwmon.txt 2>&1',
+        'BMC_proc_list.txt': 'top -n 1 -b >/tmp/BMC_proc_list.txt 2>&1',
+        'BMC_proc_fd_active_list.txt': 'ls -Al /proc/*/fd/ >/tmp/BMC_proc_fd_active_list.txt 2>&1',
+        'BMC_journalctl_nopager.txt': 'journalctl --no-pager >/tmp/BMC_journalctl_nopager.txt 2>&1',
+        'BMC_journalctl_pretty.json': 'journalctl -o json-pretty >/tmp/BMC_journalctl_pretty.json 2>&1',
+        'BMC_dmesg.txt': 'dmesg >/tmp/BMC_dmesg.txt 2>&1',
+        'BMC_procinfo.txt': 'cat /proc/cpuinfo >/tmp/BMC_procinfo.txt 2>&1',
+        'BMC_meminfo.txt': 'cat /proc/meminfo >/tmp/BMC_meminfo.txt 2>&1',
+        'BMC_systemd.txt': 'systemctl status --all >/tmp/BMC_systemd.txt 2>&1',
+        'BMC_failed_service.txt': 'systemctl list-units --failed >/tmp/BMC_failed_service.txt 2>&1',
+        'BMC_list_service.txt': 'systemctl list-jobs >/tmp/BMC_list_service.txt 2>&1',
+        'BMC_obmc_console.txt': 'cat /var/log/obmc-console.log >/tmp/BMC_obmc_console.txt 2>&1',
+        'BMC_obmc_console1.txt': 'cat /var/log/obmc-console1.log >/tmp/BMC_obmc_console1.txt 2>&1',
+        'PEL_logs_list.json': 'peltool -l >/tmp/PEL_logs_list.json 2>&1',
+        'PEL_logs_complete_list.json': 'peltool -l -a -f >/tmp/PEL_logs_complete_list.json 2>&1',
+        'PEL_logs_display.json': 'peltool -a >/tmp/PEL_logs_display.json 2>&1',
+        'PEL_logs_complete_display.json': 'peltool -a -f -h>/tmp/PEL_logs_complete_display.json 2>&1',
+        'PEL_logs_badPEL.txt': 'hexdump -C'
+        + ' /var/lib/phosphor-logging/extensions/pels/badPEL>/tmp/PEL_logs_badPEL.txt 2>&1',
+        'PLDM_fru_record.txt': 'pldmtool fru getfrurecordtable>/tmp/PLDM_fru_record.txt 2>&1',
+        'BMC_pldm_flight_recorder.txt': 'rm -rf /tmp/pldm_flight_recorder; killall -s SIGUSR1 pldmd;'
+        + ' sleep 5; cat /tmp/pldm_flight_recorder > /tmp/BMC_pldm_flight_recorder.txt 2>&1;',
+        'OCC_state.txt': 'echo "OCC state check";for i in {0..3};'
+        + ' do (echo /org/open_power/control/occ$i;'
+        + ' busctl get-property org.open_power.OCC.Control /org/open_power/control/occ$i'
+        + ' org.open_power.OCC.Status OccActive) done > /tmp/OCC_state.txt 2>&1',
+        'bmcweb_persistent_data.json': 'cat /home/root/bmcweb_persistent_data.json'
+        + ' > /tmp/bmcweb_persistent_data.json',
+        'GUARD_list.txt': 'guard -l > /tmp/GUARD_list.txt 2>&1',
+        'fan_control_dump.json': 'fanctl dump; sleep 5',
+        'DEVTREE': 'cat /var/lib/phosphor-software-manager/pnor/rw/DEVTREE > /tmp/DEVTREE 2>&1',
     },
 }
 # Add file name and corresponding command needed for all Linux distributions
 FFDC_OS_ALL_DISTROS_FILE = {
-    "OS FILES": {
+    'OS FILES':
+    {
         # File Name         Command
-        "OS_msglog.txt": (
-            "cat /sys/firmware/opal/msglog >/tmp/OS_msglog.txt 2>&1"
-        ),
-        "OS_cpufrequency.txt": "ppc64_cpu --frequency "
-        + ">/tmp/OS_cpufrequency.txt 2>&1",
-        "OS_dmesg.txt": "dmesg >/tmp/OS_dmesg.txt 2>&1",
-        "OS_opal_prd.txt": "cat /var/log/opal-prd* >/tmp/OS_opal_prd.txt 2>&1",
-        "OS_boot.txt": "cat /var/log/boot.log >/tmp/OS_boot.txt 2>&1",
-        "OS_procinfo.txt": "cat /proc/cpuinfo >/tmp/OS_procinfo.txt 2>&1",
-        "OS_meminfo.txt": "cat /proc/meminfo >/tmp/OS_meminfo.txt 2>&1",
-        "OS_netstat.txt": "netstat -a >/tmp/OS_netstat.txt 2>&1",
-        "OS_lspci.txt": "lspci >/tmp/OS_lspci.txt 2>&1",
-        "OS_lscpu.txt": "lscpu >/tmp/OS_lscpu.txt 2>&1",
-        "OS_lscfg.txt": "lscfg >/tmp/OS_lscfg.txt 2>&1",
-        "OS_journalctl_nopager.txt": "journalctl --no-pager -b "
-        + "> /tmp/OS_journalctl_nopager.txt  2>&1",
+        'OS_msglog.txt': 'cat /sys/firmware/opal/msglog >/tmp/OS_msglog.txt 2>&1',
+        'OS_cpufrequency.txt': 'ppc64_cpu --frequency '
+        + '>/tmp/OS_cpufrequency.txt 2>&1',
+        'OS_dmesg.txt': 'dmesg >/tmp/OS_dmesg.txt 2>&1',
+        'OS_opal_prd.txt': 'cat /var/log/opal-prd* >/tmp/OS_opal_prd.txt 2>&1',
+        'OS_boot.txt': 'cat /var/log/boot.log >/tmp/OS_boot.txt 2>&1',
+        'OS_procinfo.txt': 'cat /proc/cpuinfo >/tmp/OS_procinfo.txt 2>&1',
+        'OS_meminfo.txt': 'cat /proc/meminfo >/tmp/OS_meminfo.txt 2>&1',
+        'OS_netstat.txt': 'netstat -a >/tmp/OS_netstat.txt 2>&1',
+        'OS_lspci.txt': 'lspci >/tmp/OS_lspci.txt 2>&1',
+        'OS_lscpu.txt': 'lscpu >/tmp/OS_lscpu.txt 2>&1',
+        'OS_lscfg.txt': 'lscfg >/tmp/OS_lscfg.txt 2>&1',
+        'OS_journalctl_nopager.txt': 'journalctl --no-pager -b '
+        + '> /tmp/OS_journalctl_nopager.txt  2>&1',
     },
 }
 # Add file name and corresponding command needed for Ubuntu Linux
 FFDC_OS_UBUNTU_FILE = {
-    "OS FILES": {
+    'OS FILES':
+    {
         # File Name         Command
-        "OS_isusb.txt": "{ lsusb -t ; lsusb -v ; } >/tmp/OS_isusb.txt 2>&1",
-        "OS_kern.txt": (
-            "tail -n 50000 /var/log/kern.log >/tmp/OS_kern.txt 2>&1"
-        ),
-        "OS_authlog.txt": (
-            "{ cat /var/log/auth.log; cat /var/log/auth.log.1 ; } "
-        )
-        + ">/tmp/OS_authlog.txt 2>&1",
-        "OS_syslog.txt": (
-            "tail -n 200000 /var/log/syslog >/tmp/OS_syslog.txt 2>&1"
-        ),
-        "OS_info.txt": "{ uname -a; dpkg -s opal-prd; dpkg -s ipmitool ; } "
-        + ">/tmp/OS_info.txt 2>&1",
-        "OS_sosreport.txt": (
-            "{ rm -rf /tmp/sosreport*FFDC* ; sosreport --batch --tmp-dir "
-        )
-        + "/tmp --ticket-number FFDC ; } >/tmp/OS_sosreport.txt 2>&1",
+        'OS_isusb.txt': '{ lsusb -t ; lsusb -v ; } >/tmp/OS_isusb.txt 2>&1',
+        'OS_kern.txt': 'tail -n 50000 /var/log/kern.log >/tmp/OS_kern.txt 2>&1',
+        'OS_authlog.txt': '{ cat /var/log/auth.log; cat /var/log/auth.log.1 ; } '
+        + '>/tmp/OS_authlog.txt 2>&1',
+        'OS_syslog.txt': 'tail -n 200000 /var/log/syslog >/tmp/OS_syslog.txt 2>&1',
+        'OS_info.txt': '{ uname -a; dpkg -s opal-prd; dpkg -s ipmitool ; } '
+        + '>/tmp/OS_info.txt 2>&1',
+        'OS_sosreport.txt': '{ rm -rf /tmp/sosreport*FFDC* ; sosreport --batch --tmp-dir '
+        + '/tmp --ticket-number FFDC ; } >/tmp/OS_sosreport.txt 2>&1',
     },
 }
 # Add file name and corresponding command needed for RHEL Linux
 FFDC_OS_RHEL_FILE = {
-    "OS FILES": {
+    'OS FILES':
+    {
         # File Name         Command
-        "OS_rsct.txt": "/usr/bin/ctversion -bv >/tmp/OS_rsct.txt 2>&1",
-        "OS_secure.txt": "cat /var/log/secure >/tmp/OS_secure.txt 2>&1",
-        "OS_syslog.txt": "tail -n 200000 /var/log/messages "
-        + ">/tmp/OS_syslog.txt 2>&1",
-        "OS_info.txt": "{ lsb_release -a; cat /etc/redhat-release; "
-        + "uname -a; rpm -qa ; } >/tmp/OS_info.txt 2>&1",
-        "OS_sosreport.txt": (
-            "{ rm -rf /tmp/sosreport*FFDC* ; sosreport --batch --tmp-dir "
-        )
-        + "/tmp --label FFDC ; } >/tmp/OS_sosreport.txt 2>&1",
+        'OS_rsct.txt': '/usr/bin/ctversion -bv >/tmp/OS_rsct.txt 2>&1',
+        'OS_secure.txt': 'cat /var/log/secure >/tmp/OS_secure.txt 2>&1',
+        'OS_syslog.txt': 'tail -n 200000 /var/log/messages '
+        + '>/tmp/OS_syslog.txt 2>&1',
+        'OS_info.txt': '{ lsb_release -a; cat /etc/redhat-release; '
+        + 'uname -a; rpm -qa ; } >/tmp/OS_info.txt 2>&1',
+        'OS_sosreport.txt': '{ rm -rf /tmp/sosreport*FFDC* ; sosreport --batch --tmp-dir '
+        + '/tmp --label FFDC ; } >/tmp/OS_sosreport.txt 2>&1',
     },
 }
 # Add file name and corresponding command needed for AIX.
 FFDC_OS_AIX_FILE = {
-    "OS FILES": {
+    'OS FILES':
+    {
         # File Name         Command
-        "OS_errpt.txt": "errpt >/tmp/OS_errpt.txt 2>&1 ; errclear 0;",
-        "OS_processors.txt": "bindprocessor -q >/tmp/OS_processors.txt 2>&1",
+        'OS_errpt.txt': 'errpt >/tmp/OS_errpt.txt 2>&1 ; errclear 0;',
+        'OS_processors.txt': 'bindprocessor -q >/tmp/OS_processors.txt 2>&1',
     },
 }
 
 try:
-    redfish_support_trans_state = os.environ.get(
-        "REDFISH_SUPPORT_TRANS_STATE", 0
-    ) or int(
-        BuiltIn().get_variable_value(
-            "${REDFISH_SUPPORT_TRANS_STATE}", default=0
-        )
-    )
+    redfish_support_trans_state = os.environ.get('REDFISH_SUPPORT_TRANS_STATE', 0) or \
+        int(BuiltIn().get_variable_value("${REDFISH_SUPPORT_TRANS_STATE}", default=0))
 except RobotNotRunningError:
     pass
 
-OPENBMC_BASE = "/xyz/openbmc_project/"
-OPENPOWER_BASE = "/org/open_power/"
-ENUMERATE_SENSORS = OPENBMC_BASE + "sensors/enumerate"
-ENUMERATE_INVENTORY = OPENBMC_BASE + "inventory/enumerate"
-ENUMERATE_ELOG = OPENBMC_BASE + "logging/entry/enumerate"
-ENUMERATE_LED = OPENBMC_BASE + "led/enumerate"
-ENUMERATE_SW = OPENBMC_BASE + "software/enumerate"
-ENUMERATE_CONTROL = OPENBMC_BASE + "control/enumerate"
-ENUMERATE_STATE = OPENBMC_BASE + "state/enumerate"
-ENUMERATE_OCC = OPENPOWER_BASE + "/enumerate"
-ENUMERATE_DUMPS = OPENBMC_BASE + "dumps/enumerate"
-ENUMERATE_USER = OPENBMC_BASE + "user/enumerate"
+OPENBMC_BASE = '/xyz/openbmc_project/'
+OPENPOWER_BASE = '/org/open_power/'
+ENUMERATE_SENSORS = OPENBMC_BASE + 'sensors/enumerate'
+ENUMERATE_INVENTORY = OPENBMC_BASE + 'inventory/enumerate'
+ENUMERATE_ELOG = OPENBMC_BASE + 'logging/entry/enumerate'
+ENUMERATE_LED = OPENBMC_BASE + 'led/enumerate'
+ENUMERATE_SW = OPENBMC_BASE + 'software/enumerate'
+ENUMERATE_CONTROL = OPENBMC_BASE + 'control/enumerate'
+ENUMERATE_STATE = OPENBMC_BASE + 'state/enumerate'
+ENUMERATE_OCC = OPENPOWER_BASE + '/enumerate'
+ENUMERATE_DUMPS = OPENBMC_BASE + 'dumps/enumerate'
+ENUMERATE_USER = OPENBMC_BASE + 'user/enumerate'
 
 # Add file name and corresponding Get Request
 FFDC_GET_REQUEST = {
-    "GET REQUESTS": {
+    'GET REQUESTS':
+    {
         # File Name         Command
-        "FIRMWARE_list.txt": ENUMERATE_SW,
-        "BMC_sensor_list.txt": ENUMERATE_SENSORS,
-        "BMC_control_list.txt": ENUMERATE_CONTROL,
-        "BMC_inventory.txt": ENUMERATE_INVENTORY,
-        "BMC_elog.txt": ENUMERATE_ELOG,
-        "BMC_led.txt": ENUMERATE_LED,
-        "BMC_state.txt": ENUMERATE_STATE,
-        "OCC_state.txt": ENUMERATE_OCC,
-        "BMC_dumps.txt": ENUMERATE_DUMPS,
-        "BMC_USER.txt": ENUMERATE_USER,
+        'FIRMWARE_list.txt': ENUMERATE_SW,
+        'BMC_sensor_list.txt': ENUMERATE_SENSORS,
+        'BMC_control_list.txt': ENUMERATE_CONTROL,
+        'BMC_inventory.txt': ENUMERATE_INVENTORY,
+        'BMC_elog.txt': ENUMERATE_ELOG,
+        'BMC_led.txt': ENUMERATE_LED,
+        'BMC_state.txt': ENUMERATE_STATE,
+        'OCC_state.txt': ENUMERATE_OCC,
+        'BMC_dumps.txt': ENUMERATE_DUMPS,
+        'BMC_USER.txt': ENUMERATE_USER,
     },
 }
 
@@ -222,60 +178,61 @@
     for key in list(FFDC_GET_REQUEST):
         del FFDC_GET_REQUEST[key]
 
-REDFISH_BASE = "/redfish/v1/"
-REDFISH_ELOG = REDFISH_BASE + "Systems/system/LogServices/EventLog/Entries"
-REDFISH_FIRMWARE = REDFISH_BASE + "UpdateService/FirmwareInventory"
+REDFISH_BASE = '/redfish/v1/'
+REDFISH_ELOG = REDFISH_BASE + 'Systems/system/LogServices/EventLog/Entries'
+REDFISH_FIRMWARE = REDFISH_BASE + 'UpdateService/FirmwareInventory'
 
 # Add file name and corresponding Get Request
 FFDC_GET_REDFISH_REQUEST = {
-    "GET REQUESTS": {
+    'GET REQUESTS':
+    {
         # File Name         Command
-        "BMC_redfish_elog.txt": REDFISH_ELOG,
+        'BMC_redfish_elog.txt': REDFISH_ELOG,
     },
 }
 
 # Define your keywords in method/utils and call here
 FFDC_METHOD_CALL = {
-    "BMC LOGS": {
+    'BMC LOGS':
+    {
         # Description               Keyword name
-        "Start ffdc cleanup": "BMC FFDC Cleanup",
-        "FFDC Generic Report": "BMC FFDC Manifest",
-        "BMC Specific Files": "BMC FFDC Files",
-        "Get Request FFDC": "BMC FFDC Get Requests",
-        "Get Redfish Request FFDC": "BMC FFDC Get Redfish Requests",
-        "OS FFDC": "OS FFDC Files",
-        "Core Files": "SCP Coredump Files",
-        "SEL Log": "Collect eSEL Log",
-        "Sys Inventory Files": "System Inventory Files",
-        "Dump Files": "SCP Dump Files",
-        "PEL Files": "Collect PEL Log",
-        "Redfish Log": "Enumerate Redfish Resources",
-        "Firmware Log": "Enumerate Redfish Resources  "
-        + " enum_uri=/redfish/v1/UpdateService/FirmwareInventory  "
-        + " file_enum_name=redfish_FIRMWARE_list.txt",
-        "Redfish OEM Log": "Enumerate Redfish OEM Resources",
-        "End ffdc cleanup": "BMC FFDC Cleanup",
+        'Start ffdc cleanup': 'BMC FFDC Cleanup',
+        'FFDC Generic Report': 'BMC FFDC Manifest',
+        'BMC Specific Files': 'BMC FFDC Files',
+        'Get Request FFDC': 'BMC FFDC Get Requests',
+        'Get Redfish Request FFDC': 'BMC FFDC Get Redfish Requests',
+        'OS FFDC': 'OS FFDC Files',
+        'Core Files': 'SCP Coredump Files',
+        'SEL Log': 'Collect eSEL Log',
+        'Sys Inventory Files': 'System Inventory Files',
+        'Dump Files': 'SCP Dump Files',
+        'PEL Files': 'Collect PEL Log',
+        'Redfish Log': 'Enumerate Redfish Resources',
+        'Firmware Log': 'Enumerate Redfish Resources  '
+        + ' enum_uri=/redfish/v1/UpdateService/FirmwareInventory  '
+        + ' file_enum_name=redfish_FIRMWARE_list.txt',
+        'Redfish OEM Log': 'Enumerate Redfish OEM Resources',
+        'End ffdc cleanup': 'BMC FFDC Cleanup',
     },
 }
 
 try:
-    platform_arch_type = os.environ.get(
-        "PLATFORM_ARCH_TYPE", ""
-    ) or BuiltIn().get_variable_value("${PLATFORM_ARCH_TYPE}", default="power")
+    platform_arch_type = os.environ.get('PLATFORM_ARCH_TYPE', '') or \
+        BuiltIn().get_variable_value("${PLATFORM_ARCH_TYPE}", default="power")
 except RobotNotRunningError:
     pass
 
 # Filter the logs based on platform type.
 if platform_arch_type == "x86":
-    del FFDC_BMC_FILE["BMC FILES"]["PEL_logs_list.json"]
-    del FFDC_BMC_FILE["BMC FILES"]["PEL_logs_display.json"]
-    del FFDC_METHOD_CALL["BMC LOGS"]["PEL Files"]
+    del FFDC_BMC_FILE['BMC FILES']['PEL_logs_list.json']
+    del FFDC_BMC_FILE['BMC FILES']['PEL_logs_display.json']
+    del FFDC_METHOD_CALL['BMC LOGS']['PEL Files']
 
 # -----------------------------------------------------------------
 # base class for FFDC default list
 
 
-class openbmc_ffdc_list:
+class openbmc_ffdc_list():
     def get_ffdc_bmc_cmd(self, i_type):
         r"""
         #######################################################################
@@ -361,7 +318,8 @@
         """
         return FFDC_METHOD_CALL.keys()
 
-    def get_ffdc_method_desc(self, index):
+    def get_ffdc_method_desc(self,
+                             index):
         r"""
         #######################################################################
         #   @brief   This method returns the just the keys from the dictionary.
@@ -425,7 +383,7 @@
         #   @return   Remove all special chars and return the string
         #######################################################################
         """
-        return "".join(e for e in i_str if e.isalnum())
+        return ''.join(e for e in i_str if e.isalnum())
 
     def get_esel_index(self, esel_list):
         r"""
@@ -435,7 +393,7 @@
         #   @return   Index of "ESEL=" in the list.
         #######################################################################
         """
-        index = [i for i, str in enumerate(esel_list) if "ESEL=" in str]
+        index = [i for i, str in enumerate(esel_list) if 'ESEL=' in str]
         return index[0]
 
     def get_dump_index(self, dump_list):
@@ -446,5 +404,5 @@
         #   @return   Index of "ESEL=" in the list.
         #######################################################################
         """
-        index = [i for i, str in enumerate(dump_list) if "DUMP=" in str]
+        index = [i for i, str in enumerate(dump_list) if 'DUMP=' in str]
         return index[0]
diff --git a/lib/openbmctool_utils.py b/lib/openbmctool_utils.py
index dfe84e7..a6c94e9 100755
--- a/lib/openbmctool_utils.py
+++ b/lib/openbmctool_utils.py
@@ -5,21 +5,22 @@
 openbmctool_execute_command.
 """
 
-import collections
-import json
+import gen_print as gp
+import gen_cmd as gc
+import gen_valid as gv
+import gen_misc as gm
+import var_funcs as vf
+import utils as utils
+from robot.libraries.BuiltIn import BuiltIn
 import re
 import tempfile
-
-import gen_cmd as gc
-import gen_misc as gm
-import gen_print as gp
-import gen_valid as gv
-import utils as utils
-import var_funcs as vf
-from robot.libraries.BuiltIn import BuiltIn
+import collections
+import json
 
 
-def openbmctool_execute_command(command_string, *args, **kwargs):
+def openbmctool_execute_command(command_string,
+                                *args,
+                                **kwargs):
     r"""
     Run the command string as an argument to the openbmctool.py program and
     return the stdout and the return code.
@@ -63,12 +64,10 @@
     # Get global BMC variable values.
     openbmc_host = BuiltIn().get_variable_value("${OPENBMC_HOST}", default="")
     https_port = BuiltIn().get_variable_value("${HTTPS_PORT}", default="443")
-    openbmc_username = BuiltIn().get_variable_value(
-        "${OPENBMC_USERNAME}", default=""
-    )
-    openbmc_password = BuiltIn().get_variable_value(
-        "${OPENBMC_PASSWORD}", default=""
-    )
+    openbmc_username = BuiltIn().get_variable_value("${OPENBMC_USERNAME}",
+                                                    default="")
+    openbmc_password = BuiltIn().get_variable_value("${OPENBMC_PASSWORD}",
+                                                    default="")
     if not gv.valid_value(openbmc_host):
         return "", "", 1
     if not gv.valid_value(openbmc_username):
@@ -82,35 +81,24 @@
     # example, the user may have specified "fru status | head -n 2" which
     # would be broken into 2 list elements.  We will also break on ">"
     # (re-direct).
-    pipeline = list(
-        map(str.strip, re.split(r" ([\|>]) ", str(command_string)))
-    )
+    pipeline = list(map(str.strip, re.split(r' ([\|>]) ',
+                        str(command_string))))
     # The "tail" command below prevents a "egrep: write error: Broken pipe"
     # error if the user is piping the output to a sub-process.
     # Use "egrep -v" to get rid of editorial output from openbmctool.py.
-    pipeline.insert(
-        1,
-        "| tail -n +1 | egrep -v 'Attempting login|User [^ ]+"
-        " has been logged out'",
-    )
+    pipeline.insert(1, "| tail -n +1 | egrep -v 'Attempting login|User [^ ]+"
+                    " has been logged out'")
 
-    command_string = (
-        "set -o pipefail ; python3 $(which openbmctool.py) -H "
-        + openbmc_host
-        + ":"
-        + https_port
-        + " -U "
-        + openbmc_username
-        + " -P "
-        + openbmc_password
-        + " "
-        + " ".join(pipeline)
-    )
+    command_string = "set -o pipefail ; python3 $(which openbmctool.py) -H "\
+        + openbmc_host + ":" + https_port + " -U " + openbmc_username + " -P " + openbmc_password\
+        + " " + " ".join(pipeline)
 
     return gc.shell_cmd(command_string, *args, **kwargs)
 
 
-def openbmctool_execute_command_json(command_string, *args, **kwargs):
+def openbmctool_execute_command_json(command_string,
+                                     *args,
+                                     **kwargs):
     r"""
     Run the command string as an argument to the openbmctool.py program, parse
     the JSON output into a dictionary and return the dictionary.
@@ -124,19 +112,21 @@
     See openbmctool_execute_command (above) for all field descriptions.
     """
 
-    rc, output = openbmctool_execute_command(command_string, *args, **kwargs)
+    rc, output = openbmctool_execute_command(command_string,
+                                             *args,
+                                             **kwargs)
     try:
         json_object = utils.to_json_ordered(output)
     except json.JSONDecodeError:
         BuiltIn().fail(gp.sprint_error(output))
 
-    if json_object["status"] != "ok":
+    if json_object['status'] != "ok":
         err_msg = "Error found in JSON data returned by the openbmctool.py "
         err_msg += "command. Expected a 'status' field value of \"ok\":\n"
         err_msg += gp.sprint_var(json_object, 1)
         BuiltIn().fail(gp.sprint_error(err_msg))
 
-    return json_object["data"]
+    return json_object['data']
 
 
 def get_fru_status():
@@ -165,9 +155,8 @@
         [functional]:            No
     ...
     """
-    rc, output = openbmctool_execute_command(
-        "fru status", print_output=False, ignore_err=False
-    )
+    rc, output = openbmctool_execute_command("fru status", print_output=False,
+                                             ignore_err=False)
     # Example value for output (partial):
     # Component     | Is a FRU  | Present  | Functional  | Has Logs
     # cpu0          | Yes       | Yes      | Yes         | No
@@ -245,9 +234,8 @@
                                     parsed into a list of dictionaries.
     """
 
-    rc, output = openbmctool_execute_command(
-        "fru print", print_output=False, ignore_err=False
-    )
+    rc, output = openbmctool_execute_command("fru print", print_output=False,
+                                             ignore_err=False)
     if parse_json:
         return gm.json_loads_multiple(output)
     else:
@@ -317,9 +305,8 @@
                                     parsed into a list of dictionaries.
     """
 
-    rc, output = openbmctool_execute_command(
-        "fru list", print_output=False, ignore_err=False
-    )
+    rc, output = openbmctool_execute_command("fru list", print_output=False,
+                                             ignore_err=False)
     if parse_json:
         return gm.json_loads_multiple(output)
     else:
@@ -327,6 +314,7 @@
 
 
 def get_sensors_print():
+
     r"""
     Get the output of the sensors print command and return as a list of
     dictionaries.
@@ -353,9 +341,9 @@
         [target]:                Active
     ...
     """
-    rc, output = openbmctool_execute_command(
-        "sensors print", print_output=False, ignore_err=False
-    )
+    rc, output = openbmctool_execute_command("sensors print",
+                                             print_output=False,
+                                             ignore_err=False)
     # Example value for output (partial):
     # sensor                 | type         | units     | value    | target
     # OCC0                   | Discrete     | N/A       | Active   | Active
@@ -365,6 +353,7 @@
 
 
 def get_sensors_list():
+
     r"""
     Get the output of the sensors list command and return as a list of
     dictionaries.
@@ -391,9 +380,9 @@
         [target]:                Active
     ...
     """
-    rc, output = openbmctool_execute_command(
-        "sensors list", print_output=False, ignore_err=False
-    )
+    rc, output = openbmctool_execute_command("sensors list",
+                                             print_output=False,
+                                             ignore_err=False)
     # Example value for output (partial):
     # sensor                 | type         | units     | value    | target
     # OCC0                   | Discrete     | N/A       | Active   | Active
@@ -413,9 +402,9 @@
     Example result (excerpt):
     openbmctool_version:         1.06
     """
-    rc, output = openbmctool_execute_command(
-        "-V | cut -f 2 -d ' '", print_output=False, ignore_err=False
-    )
+    rc, output = openbmctool_execute_command("-V | cut -f 2 -d ' '",
+                                             print_output=False,
+                                             ignore_err=False)
     return output
 
 
@@ -425,14 +414,15 @@
     the collect_service_data command.
     """
 
-    return [
-        "inventory.txt",
-        "sensorReadings.txt",
-        "ledStatus.txt",
-        "SELshortlist.txt",
-        "parsedSELs.txt",
-        "bmcFullRaw.txt",
-    ]
+    return\
+        [
+            "inventory.txt",
+            "sensorReadings.txt",
+            "ledStatus.txt",
+            "SELshortlist.txt",
+            "parsedSELs.txt",
+            "bmcFullRaw.txt"
+        ]
 
 
 def collect_service_data(verify=False):
@@ -449,41 +439,32 @@
     # Route the output of collect_service_data to a file for easier parsing.
     temp = tempfile.NamedTemporaryFile()
     temp_file_path = temp.name
-    openbmctool_execute_command(
-        "collect_service_data > " + temp_file_path, ignore_err=False
-    )
+    openbmctool_execute_command("collect_service_data > " + temp_file_path,
+                                ignore_err=False)
     # Isolate the file paths in the collect_service_data output.  We're
     # looking for output lines like this from which to extract the file paths:
     # Inventory collected and stored in /tmp/dummy--2018-09-26_17.59.18/inventory.txt
-    rc, file_paths = gc.shell_cmd(
-        "egrep 'collected and' " + temp_file_path
-        # + " | sed -re 's#.*/tmp#/tmp#g'",
-        + " | sed -re 's#[^/]*/#/#'",
-        quiet=1,
-        print_output=0,
-    )
+    rc, file_paths = gc.shell_cmd("egrep 'collected and' " + temp_file_path
+                                  # + " | sed -re 's#.*/tmp#/tmp#g'",
+                                  + " | sed -re 's#[^/]*/#/#'",
+                                  quiet=1, print_output=0)
     # Example file_paths value:
     # /tmp/dummy--2018-09-26_17.59.18/inventory.txt
     # /tmp/dummy--2018-09-26_17.59.18/sensorReadings.txt
     # etc.
     # Convert from output to list.
-    collect_service_data_file_paths = list(
-        filter(None, file_paths.split("\n"))
-    )
+    collect_service_data_file_paths =\
+        list(filter(None, file_paths.split("\n")))
     if int(verify):
         # Create a list of files by stripping the dir names from the elements
         # of collect_service_data_file_paths.
-        files_obtained = [
-            re.sub(r".*/", "", file_path)
-            for file_path in collect_service_data_file_paths
-        ]
+        files_obtained = [re.sub(r".*/", "", file_path)
+                          for file_path in collect_service_data_file_paths]
         files_expected = service_data_files()
         files_missing = list(set(files_expected) - set(files_obtained))
         if len(files_missing) > 0:
-            gp.printn(
-                "collect_service_data output:\n"
-                + gm.file_to_str(temp_file_path)
-            )
+            gp.printn("collect_service_data output:\n"
+                      + gm.file_to_str(temp_file_path))
             err_msg = "The following files are missing from the list of files"
             err_msg += " returned by collect_service_data:\n"
             err_msg += gp.sprint_var(files_missing)
@@ -498,7 +479,11 @@
     Return a complete list of field names returned by the health_check command.
     """
 
-    return ["hardware_status", "performance"]
+    return\
+        [
+            "hardware_status",
+            "performance"
+        ]
 
 
 def get_health_check(verify=False):
@@ -522,9 +507,9 @@
                                     health_check command.
     """
 
-    rc, output = openbmctool_execute_command(
-        "health_check", print_output=False, ignore_err=False
-    )
+    rc, output = openbmctool_execute_command("health_check",
+                                             print_output=False,
+                                             ignore_err=False)
     health_check = vf.key_value_outbuf_to_dict(output, delim=":")
     if int(verify):
         err_msg = gv.valid_dict(health_check, health_check_fields())
@@ -540,7 +525,11 @@
     remote_logging view command.
     """
 
-    return ["Address", "Port"]
+    return\
+        [
+            "Address",
+            "Port"
+        ]
 
 
 def get_remote_logging_view(verify=False):
@@ -565,14 +554,14 @@
                                     remote_logging view' command.
     """
 
-    remote_logging_view = openbmctool_execute_command_json(
-        "logging remote_logging view", print_output=False, ignore_err=False
-    )
+    remote_logging_view =\
+        openbmctool_execute_command_json("logging remote_logging view",
+                                         print_output=False,
+                                         ignore_err=False)
 
     if int(verify):
-        err_msg = gv.valid_dict(
-            remote_logging_view, remote_logging_view_fields()
-        )
+        err_msg = gv.valid_dict(remote_logging_view,
+                                remote_logging_view_fields())
         if err_msg != "":
             BuiltIn().fail(gp.sprint_error(err_msg))
 
@@ -615,9 +604,8 @@
     else:
         new_options = options
 
-    command_string = gc.create_command_string(
-        "network " + sub_command, new_options
-    )
-    return openbmctool_execute_command_json(
-        command_string, print_output=False, ignore_err=False
-    )
+    command_string = gc.create_command_string('network ' + sub_command,
+                                              new_options)
+    return openbmctool_execute_command_json(command_string,
+                                            print_output=False,
+                                            ignore_err=False)
diff --git a/lib/pel_utils.py b/lib/pel_utils.py
index 50eefd3..b06124c 100644
--- a/lib/pel_utils.py
+++ b/lib/pel_utils.py
@@ -4,13 +4,13 @@
 PEL functions.
 """
 
+import func_args as fa
+import bmc_ssh_utils as bsu
+import pel_variables
+
 import json
 import os
 import sys
-
-import bmc_ssh_utils as bsu
-import func_args as fa
-import pel_variables
 from robot.libraries.BuiltIn import BuiltIn
 
 base_path = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
@@ -56,9 +56,7 @@
     """
 
     bsu_options = fa.args_to_objects(bsu_options)
-    out_buf, stderr, rc = bsu.bmc_execute_command(
-        "peltool " + option_string, **bsu_options
-    )
+    out_buf, stderr, rc = bsu.bmc_execute_command('peltool ' + option_string, **bsu_options)
     if parse_json:
         try:
             return json.loads(out_buf)
@@ -67,9 +65,8 @@
     return out_buf
 
 
-def get_pel_data_from_bmc(
-    include_hidden_pels=False, include_informational_pels=False
-):
+def get_pel_data_from_bmc(include_hidden_pels=False,
+                          include_informational_pels=False):
     r"""
     Returns PEL data from BMC else throws exception.
 
@@ -112,19 +109,13 @@
         pel_id_list = pel_data.keys()
         for pel_id in pel_id_list:
             # Check if required SRC ID with severity is present
-            if (pel_data[pel_id]["SRC"] == src_id) and (
-                pel_data[pel_id]["Sev"] == severity
-            ):
+            if ((pel_data[pel_id]["SRC"] == src_id) and (pel_data[pel_id]["Sev"] == severity)):
                 src_pel_ids.append(pel_id)
 
         if not src_pel_ids:
-            raise peltool_exception(
-                src_id + " with severity " + severity + " not present"
-            )
+            raise peltool_exception(src_id + " with severity " + severity + " not present")
     except Exception as e:
-        raise peltool_exception(
-            "Failed to fetch PEL ID for required SRC : " + str(e)
-        )
+        raise peltool_exception("Failed to fetch PEL ID for required SRC : " + str(e))
     return src_pel_ids
 
 
@@ -148,9 +139,7 @@
     return src_id
 
 
-def check_for_unexpected_src(
-    unexpected_src_list=[], include_hidden_pels=False
-):
+def check_for_unexpected_src(unexpected_src_list=[], include_hidden_pels=False):
     r"""
     From the given unexpected SRC list, check if any unexpected SRC created
     on the BMC. Returns 0 if no SRC found else throws exception.
@@ -171,13 +160,11 @@
             if src in src_data:
                 print("Found an unexpected SRC : " + src)
                 unexpected_src_count = unexpected_src_count + 1
-        if unexpected_src_count >= 1:
+        if (unexpected_src_count >= 1):
             raise peltool_exception("Unexpected SRC found.")
 
     except Exception as e:
-        raise peltool_exception(
-            "Failed to verify unexpected SRC list : " + str(e)
-        )
+        raise peltool_exception("Failed to verify unexpected SRC list : " + str(e))
     return unexpected_src_count
 
 
diff --git a/lib/pldm_utils.py b/lib/pldm_utils.py
index 81e53fe..35fe929 100755
--- a/lib/pldm_utils.py
+++ b/lib/pldm_utils.py
@@ -4,18 +4,18 @@
 PLDM functions.
 """
 
+import re
+import var_funcs as vf
+import func_args as fa
+import bmc_ssh_utils as bsu
 import json
 import random
-import re
 import string
-
-import bmc_ssh_utils as bsu
-import func_args as fa
-import var_funcs as vf
 from robot.api import logger
 
 
 def pldmtool(option_string, **bsu_options):
+
     r"""
     Run pldmtool on the BMC with the caller's option string and return the result.
 
@@ -41,9 +41,7 @@
     # This allows callers to specify arguments in python style (e.g. print_out=1 vs. print_out=${1}).
     bsu_options = fa.args_to_objects(bsu_options)
 
-    stdout, stderr, rc = bsu.bmc_execute_command(
-        "pldmtool " + option_string, **bsu_options
-    )
+    stdout, stderr, rc = bsu.bmc_execute_command('pldmtool ' + option_string, **bsu_options)
     if stderr:
         return stderr
     try:
@@ -53,6 +51,7 @@
 
 
 def GetBIOSEnumAttributeOptionalValues(attr_val_table_data):
+
     """
     From pldmtool GetBIOSTable of type AttributeValueTable get the dict of
     attribute handle and its optional values for BIOS Enumeration type.
@@ -78,30 +77,27 @@
     attr_val_data_dict = {}
     for item in attr_val_table_data:
         for attr in item:
-            if attr == "NumberOfPossibleValues":
+            if (attr == "NumberOfPossibleValues"):
                 value_list = []
                 for i in range(0, int(item[attr])):
-                    attr_values = item[
-                        "PossibleValueStringHandle[" + str(i) + "]"
-                    ]
-                    value = re.search(r"\((.*?)\)", attr_values).group(1)
+                    attr_values = item["PossibleValueStringHandle[" + str(i) + "]"]
+                    value = re.search(r'\((.*?)\)', attr_values).group(1)
                     if value:
                         # Example:
                         # value = '"Power Off"'
-                        if " " in value:
+                        if ' ' in value:
                             value = '"' + value + '"'
                         value_list.append(value)
                     else:
-                        value_list.append("")
+                        value_list.append('')
 
-                attr_handle = re.findall(
-                    r"\(.*?\)", item["AttributeNameHandle"]
-                )
+                attr_handle = re.findall(r'\(.*?\)', item["AttributeNameHandle"])
                 attr_val_data_dict[attr_handle[0][1:-1]] = value_list
     return attr_val_data_dict
 
 
 def GetBIOSStrAndIntAttributeHandles(attr_type, attr_val_table_data):
+
     """
     From pldmtool GetBIOSTable of type AttributeValueTable get the dict of
     attribute handle and its values based on the attribute type.
@@ -117,27 +113,28 @@
     attr_val_str_dict = {}
     for item in attr_val_table_data:
         value_dict = {}
-        attr_handle = re.findall(r"\(.*?\)", item["AttributeNameHandle"])
+        attr_handle = re.findall(r'\(.*?\)', item["AttributeNameHandle"])
         # Example:
         # {'vmi_if0_ipv4_prefix_length': {'UpperBound': 32, 'LowerBound': 0}
-        if item["AttributeType"] == "BIOSInteger":
+        if (item["AttributeType"] == "BIOSInteger"):
             value_dict["LowerBound"] = item["LowerBound"]
             value_dict["UpperBound"] = item["UpperBound"]
             attr_val_int_dict[attr_handle[0][1:-1]] = value_dict
         # Example:
         # {'vmi_if1_ipv4_ipaddr': {'MaximumStringLength': 15, 'MinimumStringLength': 7}}
-        elif item["AttributeType"] == "BIOSString":
+        elif (item["AttributeType"] == "BIOSString"):
             value_dict["MinimumStringLength"] = item["MinimumStringLength"]
             value_dict["MaximumStringLength"] = item["MaximumStringLength"]
             attr_val_str_dict[attr_handle[0][1:-1]] = value_dict
 
-    if attr_type == "BIOSInteger":
+    if (attr_type == "BIOSInteger"):
         return attr_val_int_dict
-    elif attr_type == "BIOSString":
+    elif (attr_type == "BIOSString"):
         return attr_val_str_dict
 
 
 def GetRandomBIOSIntAndStrValues(attr_name, count):
+
     """
     Get random integer or string values for BIOS attribute values based on the count.
 
@@ -149,35 +146,27 @@
                             or string.
 
     """
-    attr_random_value = ""
+    attr_random_value = ''
 
     # Example
     # 12.13.14.15
-    if "gateway" in attr_name:
-        attr_random_value = ".".join(
-            map(str, (random.randint(0, 255) for _ in range(4)))
-        )
+    if 'gateway' in attr_name:
+        attr_random_value = ".".join(map(str, (random.randint(0, 255) for _ in range(4))))
     # Example
     # 11.11.11.11
-    elif "ipaddr" in attr_name:
-        attr_random_value = ".".join(
-            map(str, (random.randint(0, 255) for _ in range(4)))
-        )
+    elif 'ipaddr' in attr_name:
+        attr_random_value = ".".join(map(str, (random.randint(0, 255) for _ in range(4))))
     # Example
     # E5YWEDWJJ
-    elif "name" in attr_name:
+    elif 'name' in attr_name:
         data = string.ascii_uppercase + string.digits
-        attr_random_value = "".join(
-            random.choice(data) for _ in range(int(count))
-        )
+        attr_random_value = ''.join(random.choice(data) for _ in range(int(count)))
 
-    elif "mfg_flags" in attr_name:
+    elif 'mfg_flags' in attr_name:
         data = string.ascii_uppercase + string.digits
-        attr_random_value = "".join(
-            random.choice(data) for _ in range(int(count))
-        )
+        attr_random_value = ''.join(random.choice(data) for _ in range(int(count)))
 
-    elif "hb_lid_ids" in attr_name:
+    elif 'hb_lid_ids' in attr_name:
         attr_random_value = str(random.randint(0, int(count)))
 
     else:
@@ -186,6 +175,7 @@
 
 
 def GetBIOSAttrOriginalValues(attr_val_table_data):
+
     """
     From pldmtool GetBIOSTable of type AttributeValueTable get the dict of
     attribute handle and its values.
@@ -198,22 +188,23 @@
     """
     attr_val_data_dict = {}
     for item in attr_val_table_data:
-        attr_handle = re.findall(r"\(.*?\)", item["AttributeNameHandle"])
+        attr_handle = re.findall(r'\(.*?\)', item["AttributeNameHandle"])
         attr_name = attr_handle[0][1:-1]
 
         command = "bios GetBIOSAttributeCurrentValueByHandle -a " + attr_name
         value = pldmtool(command)
         attr_val_data_dict[attr_name] = value["CurrentValue"]
         if not value["CurrentValue"]:
-            if "name" in attr_name:
+            if 'name' in attr_name:
                 attr_val_data_dict[attr_name] = '""'
-            elif "hb_lid_ids" in attr_name:
+            elif 'hb_lid_ids' in attr_name:
                 attr_val_data_dict[attr_name] = '""'
 
     return attr_val_data_dict
 
 
 def GetBIOSAttrDefaultValues(attr_val_table_data):
+
     """
     From pldmtool GetBIOSTable of type AttributeValueTable get the dict of
     attribute handle and its default attribute values.
@@ -226,26 +217,27 @@
     """
     attr_val_data_dict = {}
     for item in attr_val_table_data:
-        attr_handle = re.findall(r"\(.*?\)", item["AttributeNameHandle"])
+        attr_handle = re.findall(r'\(.*?\)', item["AttributeNameHandle"])
         attr_name = attr_handle[0][1:-1]
 
         if "DefaultString" in item:
             attr_val_data_dict[attr_name] = item["DefaultString"]
             if not item["DefaultString"]:
-                if "name" in attr_name:
+                if 'name' in attr_name:
                     attr_val_data_dict[attr_name] = '""'
-                elif "hb_lid_ids" in attr_name:
+                elif 'hb_lid_ids' in attr_name:
                     attr_val_data_dict[attr_name] = '""'
         elif "DefaultValue" in item:
             attr_val_data_dict[attr_name] = item["DefaultValue"]
         elif "StringHandle" in item:
-            attr_default_value = re.findall(r"\(.*?\)", item["StringHandle"])
+            attr_default_value = re.findall(r'\(.*?\)', item["StringHandle"])
             attr_val_data_dict[attr_name] = attr_default_value[0][1:-1]
 
     return attr_val_data_dict
 
 
 def GetNewValuesForAllBIOSAttrs(attr_table_data):
+
     """
     Get a new set of values for all attributes in Attribute Table.
 
@@ -257,13 +249,9 @@
     """
     existing_data = GetBIOSAttrOriginalValues(attr_table_data)
     logger.info(existing_data)
-    string_attr_data = GetBIOSStrAndIntAttributeHandles(
-        "BIOSString", attr_table_data
-    )
+    string_attr_data = GetBIOSStrAndIntAttributeHandles("BIOSString", attr_table_data)
     logger.info(string_attr_data)
-    int_attr_data = GetBIOSStrAndIntAttributeHandles(
-        "BIOSInteger", attr_table_data
-    )
+    int_attr_data = GetBIOSStrAndIntAttributeHandles("BIOSInteger", attr_table_data)
     logger.info(int_attr_data)
     enum_attr_data = GetBIOSEnumAttributeOptionalValues(attr_table_data)
     logger.info(enum_attr_data)
@@ -280,12 +268,8 @@
                 data = '"' + str(existing_data[attr]) + '"'
                 temp_list[attr].remove(data)
             except ValueError:
-                logger.info(
-                    "Unable to remove the existing value "
-                    + str(data)
-                    + " from list "
-                    + str(temp_list[attr])
-                )
+                logger.info("Unable to remove the existing value "
+                            + str(data) + " from list " + str(temp_list[attr]))
         valid_values = temp_list[attr][:]
         value = random.choice(valid_values)
         attr_random_data[attr] = value.strip('"')
@@ -295,9 +279,7 @@
         # Iterating to make sure we have a different value
         # other than the existing value.
         for iter in range(5):
-            random_val = GetRandomBIOSIntAndStrValues(
-                attr, string_attr_data[attr]["MaximumStringLength"]
-            )
+            random_val = GetRandomBIOSIntAndStrValues(attr, string_attr_data[attr]["MaximumStringLength"])
             if random_val != existing_data[attr]:
                 break
         attr_random_data[attr] = random_val.strip('"')
@@ -305,9 +287,7 @@
 
     for attr in int_attr_data:
         for iter in range(5):
-            random_val = GetRandomBIOSIntAndStrValues(
-                attr, int_attr_data[attr]["UpperBound"]
-            )
+            random_val = GetRandomBIOSIntAndStrValues(attr, int_attr_data[attr]["UpperBound"])
             if random_val != existing_data[attr]:
                 break
         attr_random_data[attr] = random_val
diff --git a/lib/pythonutil.py b/lib/pythonutil.py
index bf5e1a4..3fd6ffb 100644
--- a/lib/pythonutil.py
+++ b/lib/pythonutil.py
@@ -5,7 +5,7 @@
 def calcDottedNetmask(mask):
     bits = 0
     for i in xrange(32 - mask, 32):
-        bits |= 1 << i
-    packed_value = pack("!I", bits)
+        bits |= (1 << i)
+    packed_value = pack('!I', bits)
     addr = inet_ntoa(packed_value)
     return addr
diff --git a/lib/ras/variables.py b/lib/ras/variables.py
index e0369e7..4525ef3 100644
--- a/lib/ras/variables.py
+++ b/lib/ras/variables.py
@@ -1,3 +1,4 @@
+
 r"""
 Signature description in error log corresponding to error injection.
 """
@@ -47,39 +48,41 @@
 #     - field2: chip address.
 #     - field3: Error log signature description.
 
-ERROR_INJECT_DICT = {
-    "MCACALIFIR_RECV1": ["07010900", "8000000000000000", DES_MCA_RECV1],
-    "MCACALIFIR_RECV32": ["07010900", "2000000000000000", DES_MCA_RECV32],
-    "MCACALIFIR_UE": ["07010900", "0020000000000000", DES_MCA_UE],
-    "MCI_RECV1": ["05010800", "8000000000000000", DES_MCI_RECV1],
-    "MCI_UE": ["05010800", "4000000000000000", DES_MCI_UE],
-    "NX_RECV1": ["02011100", "0004000000000000", DES_NX_RECV1],
-    "NX_UE": ["02011100", "0400000000000000", DES_NX_UE],
-    "NX_RECV32": ["02011100", "0800000000000000", DES_NX_RECV32],
-    "CXA_RECV5": ["02010800", "0000000020000000", DES_CXA_RECV5],
-    "CXA_RECV32": ["02010800", "2000000000000000", DES_CXA_RECV32],
-    "CXA_UE": ["02010800", "4000000000000000", DES_CXA_UE],
-    "OBUS_RECV32": ["0904000a", "8000000000000000", DES_OBUS_RECV32],
-    "NPU0_RECV32": ["05013C00", "0004000000000000", DES_NPU0_RECV32],
-    "L2FIR_RECV1": ["10010800", "0080000000000000", DES_L2_RECV1],
-    "L2FIR_RECV32": ["10010800", "0200000000000000", DES_L2_RECV32],
-    "L2FIR_UE": ["10010800", "0040000000000000", DES_L2_UE],
-    "L3FIR_RECV1": ["10011800", "0000400000000000", DES_L3_RECV1],
-    "L3FIR_RECV32": ["10011800", "0100000000000000", DES_L3_RECV32],
-    "L3FIR_UE": ["10011800", "0000800000000000", DES_L3_UE],
-    "OCCFIR_RECV1": ["01010800", "0000000000040000", DES_OCC_RECV1],
-    "CMEFIR_RECV1": ["10012000", "0100000000000000", DES_CME_RECV1],
-    "EQFIR_RECV32": ["1004000A", "8000000000000000", DES_EQ_RECV32],
-    "NCUFIR_RECV1": ["10011400", "0080000000000000", DES_NCU_RECV1],
-    "NCUFIR_UE": ["10011400", "8000000000000000", DES_NCU_UE],
-    "COREFIR_RECV5": ["20010A40", "8000000000000000", DES_CORE_RECV5],
-    "COREFIR_RECV1": ["20010A40", "0000000200000000", DES_CORE_RECV1],
-    "COREFIR_UE": ["20010A40", "4000000000000000", DES_CORE_UE],
-}
+ERROR_INJECT_DICT = {'MCACALIFIR_RECV1': ['07010900', '8000000000000000',
+                                          DES_MCA_RECV1],
+                     'MCACALIFIR_RECV32': ['07010900', '2000000000000000',
+                                           DES_MCA_RECV32],
+                     'MCACALIFIR_UE': ['07010900', '0020000000000000', DES_MCA_UE],
+                     'MCI_RECV1': ['05010800', '8000000000000000', DES_MCI_RECV1],
+                     'MCI_UE': ['05010800', '4000000000000000', DES_MCI_UE],
+                     'NX_RECV1': ['02011100', '0004000000000000', DES_NX_RECV1],
+                     'NX_UE': ['02011100', '0400000000000000', DES_NX_UE],
+                     'NX_RECV32': ['02011100', '0800000000000000', DES_NX_RECV32],
+                     'CXA_RECV5': ['02010800', '0000000020000000', DES_CXA_RECV5],
+                     'CXA_RECV32': ['02010800', '2000000000000000', DES_CXA_RECV32],
+                     'CXA_UE': ['02010800', '4000000000000000', DES_CXA_UE],
+                     'OBUS_RECV32': ['0904000a', '8000000000000000', DES_OBUS_RECV32],
+                     'NPU0_RECV32': ['05013C00', '0004000000000000', DES_NPU0_RECV32],
+                     'L2FIR_RECV1': ['10010800', '0080000000000000', DES_L2_RECV1],
+                     'L2FIR_RECV32': ['10010800', '0200000000000000', DES_L2_RECV32],
+                     'L2FIR_UE': ['10010800', '0040000000000000', DES_L2_UE],
+                     'L3FIR_RECV1': ['10011800', '0000400000000000', DES_L3_RECV1],
+                     'L3FIR_RECV32': ['10011800', '0100000000000000', DES_L3_RECV32],
+                     'L3FIR_UE': ['10011800', '0000800000000000', DES_L3_UE],
+                     'OCCFIR_RECV1': ['01010800', '0000000000040000', DES_OCC_RECV1],
+                     'CMEFIR_RECV1': ['10012000', '0100000000000000', DES_CME_RECV1],
+                     'EQFIR_RECV32': ['1004000A', '8000000000000000', DES_EQ_RECV32],
+                     'NCUFIR_RECV1': ['10011400', '0080000000000000', DES_NCU_RECV1],
+                     'NCUFIR_UE': ['10011400', '8000000000000000', DES_NCU_UE],
+                     'COREFIR_RECV5': ['20010A40', '8000000000000000', DES_CORE_RECV5],
+                     'COREFIR_RECV1': ['20010A40', '0000000200000000', DES_CORE_RECV1],
+                     'COREFIR_UE': ['20010A40', '4000000000000000', DES_CORE_UE],
+
+                     }
 
 # Address translation files
-probe_cpu_file_path = "/root/probe_cpus.sh"
-addr_translation_file_path = "/root/scom_addr_p9.sh"
+probe_cpu_file_path = '/root/probe_cpus.sh'
+addr_translation_file_path = '/root/scom_addr_p9.sh'
 
 cfam_address = "2801"
 mem_address = "8208000"
diff --git a/lib/redfish_plus.py b/lib/redfish_plus.py
index 2cc7531..e2125ca 100755
--- a/lib/redfish_plus.py
+++ b/lib/redfish_plus.py
@@ -4,14 +4,14 @@
 See redfish_plus class prolog below for details.
 """
 
-import json
-
-import func_args as fa
-import gen_print as gp
-import requests
 from redfish.rest.v1 import HttpClient
+import gen_print as gp
+import func_args as fa
+import requests
+import json
 from robot.libraries.BuiltIn import BuiltIn
 
+
 host = BuiltIn().get_variable_value("${OPENBMC_HOST}")
 MTLS_ENABLED = BuiltIn().get_variable_value("${MTLS_ENABLED}")
 CERT_DIR_PATH = BuiltIn().get_variable_value("${CERT_DIR_PATH}")
@@ -55,7 +55,7 @@
         - Easily used from robot programs.
     """
 
-    ROBOT_LIBRARY_SCOPE = "TEST SUITE"
+    ROBOT_LIBRARY_SCOPE = 'TEST SUITE'
 
     def rest_request(self, func, *args, **kwargs):
         r"""
@@ -115,75 +115,74 @@
         # Convert python string object definitions to objects (mostly useful for robot callers).
         args = fa.args_to_objects(args)
         kwargs = fa.args_to_objects(kwargs)
-        timeout = kwargs.pop("timeout", 30)
+        timeout = kwargs.pop('timeout', 30)
         self._timeout = timeout
-        max_retry = kwargs.pop("max_retry", 10)
+        max_retry = kwargs.pop('max_retry', 10)
         self._max_retry = max_retry
-        valid_status_codes = kwargs.pop("valid_status_codes", [200])
+        valid_status_codes = kwargs.pop('valid_status_codes', [200])
         response = func(*args, **kwargs)
         valid_http_status_code(response.status, valid_status_codes)
         return response
 
     # Define rest function wrappers.
     def get(self, *args, **kwargs):
-        if MTLS_ENABLED == "True":
+
+        if MTLS_ENABLED == 'True':
             return self.rest_request(self.get_with_mtls, *args, **kwargs)
         else:
-            return self.rest_request(
-                super(redfish_plus, self).get, *args, **kwargs
-            )
+            return self.rest_request(super(redfish_plus, self).get, *args,
+                                     **kwargs)
 
     def head(self, *args, **kwargs):
-        if MTLS_ENABLED == "True":
+
+        if MTLS_ENABLED == 'True':
             return self.rest_request(self.head_with_mtls, *args, **kwargs)
         else:
-            return self.rest_request(
-                super(redfish_plus, self).head, *args, **kwargs
-            )
+            return self.rest_request(super(redfish_plus, self).head, *args,
+                                     **kwargs)
 
     def post(self, *args, **kwargs):
-        if MTLS_ENABLED == "True":
+
+        if MTLS_ENABLED == 'True':
             return self.rest_request(self.post_with_mtls, *args, **kwargs)
         else:
-            return self.rest_request(
-                super(redfish_plus, self).post, *args, **kwargs
-            )
+            return self.rest_request(super(redfish_plus, self).post, *args,
+                                     **kwargs)
 
     def put(self, *args, **kwargs):
-        if MTLS_ENABLED == "True":
+
+        if MTLS_ENABLED == 'True':
             return self.rest_request(self.put_with_mtls, *args, **kwargs)
         else:
-            return self.rest_request(
-                super(redfish_plus, self).put, *args, **kwargs
-            )
+            return self.rest_request(super(redfish_plus, self).put, *args,
+                                     **kwargs)
 
     def patch(self, *args, **kwargs):
-        if MTLS_ENABLED == "True":
+
+        if MTLS_ENABLED == 'True':
             return self.rest_request(self.patch_with_mtls, *args, **kwargs)
         else:
-            return self.rest_request(
-                super(redfish_plus, self).patch, *args, **kwargs
-            )
+            return self.rest_request(super(redfish_plus, self).patch, *args,
+                                     **kwargs)
 
     def delete(self, *args, **kwargs):
-        if MTLS_ENABLED == "True":
+
+        if MTLS_ENABLED == 'True':
             return self.rest_request(self.delete_with_mtls, *args, **kwargs)
         else:
-            return self.rest_request(
-                super(redfish_plus, self).delete, *args, **kwargs
-            )
+            return self.rest_request(super(redfish_plus, self).delete, *args,
+                                     **kwargs)
 
     def __del__(self):
         del self
 
     def get_with_mtls(self, *args, **kwargs):
-        cert_dict = kwargs.pop("certificate", {"certificate_name": VALID_CERT})
-        response = requests.get(
-            url="https://" + host + args[0],
-            cert=CERT_DIR_PATH + "/" + cert_dict["certificate_name"],
-            verify=False,
-            headers={"Cache-Control": "no-cache"},
-        )
+
+        cert_dict = kwargs.pop('certificate', {"certificate_name": VALID_CERT})
+        response = requests.get(url='https://' + host + args[0],
+                                cert=CERT_DIR_PATH + '/' + cert_dict['certificate_name'],
+                                verify=False,
+                                headers={"Cache-Control": "no-cache"})
 
         response.status = response.status_code
         if response.status == 200:
@@ -192,73 +191,68 @@
         return response
 
     def post_with_mtls(self, *args, **kwargs):
-        cert_dict = kwargs.pop("certificate", {"certificate_name": VALID_CERT})
-        body = kwargs.pop("body", {})
-        response = requests.post(
-            url="https://" + host + args[0],
-            json=body,
-            cert=CERT_DIR_PATH + "/" + cert_dict["certificate_name"],
-            verify=False,
-            headers={"Content-Type": "application/json"},
-        )
+
+        cert_dict = kwargs.pop('certificate', {"certificate_name": VALID_CERT})
+        body = kwargs.pop('body', {})
+        response = requests.post(url='https://' + host + args[0],
+                                 json=body,
+                                 cert=CERT_DIR_PATH + '/' + cert_dict['certificate_name'],
+                                 verify=False,
+                                 headers={"Content-Type": "application/json"})
 
         response.status = response.status_code
 
         return response
 
     def patch_with_mtls(self, *args, **kwargs):
-        cert_dict = kwargs.pop("certificate", {"certificate_name": VALID_CERT})
-        body = kwargs.pop("body", {})
-        response = requests.patch(
-            url="https://" + host + args[0],
-            json=body,
-            cert=CERT_DIR_PATH + "/" + cert_dict["certificate_name"],
-            verify=False,
-            headers={"Content-Type": "application/json"},
-        )
+
+        cert_dict = kwargs.pop('certificate', {"certificate_name": VALID_CERT})
+        body = kwargs.pop('body', {})
+        response = requests.patch(url='https://' + host + args[0],
+                                  json=body,
+                                  cert=CERT_DIR_PATH + '/' + cert_dict['certificate_name'],
+                                  verify=False,
+                                  headers={"Content-Type": "application/json"})
 
         response.status = response.status_code
 
         return response
 
     def delete_with_mtls(self, *args, **kwargs):
-        cert_dict = kwargs.pop("certificate", {"certificate_name": VALID_CERT})
-        response = requests.delete(
-            url="https://" + host + args[0],
-            cert=CERT_DIR_PATH + "/" + cert_dict["certificate_name"],
-            verify=False,
-            headers={"Content-Type": "application/json"},
-        )
+
+        cert_dict = kwargs.pop('certificate', {"certificate_name": VALID_CERT})
+        response = requests.delete(url='https://' + host + args[0],
+                                   cert=CERT_DIR_PATH + '/' + cert_dict['certificate_name'],
+                                   verify=False,
+                                   headers={"Content-Type": "application/json"})
 
         response.status = response.status_code
 
         return response
 
     def put_with_mtls(self, *args, **kwargs):
-        cert_dict = kwargs.pop("certificate", {"certificate_name": VALID_CERT})
-        body = kwargs.pop("body", {})
-        response = requests.put(
-            url="https://" + host + args[0],
-            json=body,
-            cert=CERT_DIR_PATH + "/" + cert_dict["certificate_name"],
-            verify=False,
-            headers={"Content-Type": "application/json"},
-        )
+
+        cert_dict = kwargs.pop('certificate', {"certificate_name": VALID_CERT})
+        body = kwargs.pop('body', {})
+        response = requests.put(url='https://' + host + args[0],
+                                json=body,
+                                cert=CERT_DIR_PATH + '/' + cert_dict['certificate_name'],
+                                verify=False,
+                                headers={"Content-Type": "application/json"})
 
         response.status = response.status_code
 
         return response
 
     def head_with_mtls(self, *args, **kwargs):
-        cert_dict = kwargs.pop("certificate", {"certificate_name": VALID_CERT})
-        body = kwargs.pop("body", {})
-        response = requests.head(
-            url="https://" + host + args[0],
-            json=body,
-            cert=CERT_DIR_PATH + "/" + cert_dict["certificate_name"],
-            verify=False,
-            headers={"Content-Type": "application/json"},
-        )
+
+        cert_dict = kwargs.pop('certificate', {"certificate_name": VALID_CERT})
+        body = kwargs.pop('body', {})
+        response = requests.head(url='https://' + host + args[0],
+                                 json=body,
+                                 cert=CERT_DIR_PATH + '/' + cert_dict['certificate_name'],
+                                 verify=False,
+                                 headers={"Content-Type": "application/json"})
 
         response.status = response.status_code
 
diff --git a/lib/redfish_request.py b/lib/redfish_request.py
index da455a3..6add29f 100644
--- a/lib/redfish_request.py
+++ b/lib/redfish_request.py
@@ -1,18 +1,19 @@
 #!/usr/bin/env python3
 
+import requests
+import urllib.request
+from urllib3.exceptions import InsecureRequestWarning
 import json
 import secrets
 import string
-import urllib.request
 
-import requests
 from robot.api import logger
-from robot.api.deco import keyword
 from robot.libraries.BuiltIn import BuiltIn
-from urllib3.exceptions import InsecureRequestWarning
+from robot.api.deco import keyword
 
 
 class redfish_request(object):
+
     @staticmethod
     def generate_clientid():
         r"""
@@ -22,11 +23,9 @@
 
         """
 
-        clientid = "".join(
-            secrets.choice(string.ascii_letters + string.digits)
-            for i in range(10)
-        )
-        clientid = "".join(str(i) for i in clientid)
+        clientid = ''.join(secrets.choice(
+            string.ascii_letters + string.digits) for i in range(10))
+        clientid = ''.join(str(i) for i in clientid)
 
         return clientid
 
@@ -39,13 +38,11 @@
         url        Url passed by user e.g. /redfish/v1/Systems/system.
         """
 
-        openbmc_host = BuiltIn().get_variable_value(
-            "${OPENBMC_HOST}", default=""
-        )
+        openbmc_host = \
+            BuiltIn().get_variable_value("${OPENBMC_HOST}", default="")
         https_port = BuiltIn().get_variable_value("${HTTPS_PORT}", default="")
-        form_url = (
+        form_url = \
             "https://" + str(openbmc_host) + ":" + str(https_port) + str(url)
-        )
 
         return form_url
 
@@ -58,11 +55,10 @@
         response        Response from requests.
         """
 
-        logger.console(msg="", newline=True)
-        logger.info(
-            "Response : [%s]" % response.status_code, also_console=True
-        )
-        logger.console(msg="", newline=True)
+        logger.console(msg='', newline=True)
+        logger.info("Response : [%s]" % response.status_code,
+                    also_console=True)
+        logger.console(msg='', newline=True)
 
     def request_login(self, headers, url, credential, timeout=10):
         r"""
@@ -85,21 +81,19 @@
 
         if headers == "None":
             headers = dict()
-            headers["Content-Type"] = "application/json"
+            headers['Content-Type'] = 'application/json'
 
-        client_id = credential["Oem"]["OpenBMC"].get("ClientID", "None")
+        client_id = credential['Oem']['OpenBMC'].get('ClientID', "None")
 
         if "None" == client_id:
             self.clientid = redfish_request.generate_clientid()
-            credential["Oem"]["OpenBMC"]["ClientID"] = self.clientid
+            credential['Oem']['OpenBMC']['ClientID'] = self.clientid
 
-        logger.console(msg="", newline=True)
-        requests.packages.urllib3.disable_warnings(
-            category=InsecureRequestWarning
-        )
-        response = redfish_request.request_post(
-            self, headers=headers, url=url, data=credential
-        )
+        logger.console(msg='', newline=True)
+        requests.packages.urllib3.\
+            disable_warnings(category=InsecureRequestWarning)
+        response = redfish_request.request_post(self, headers=headers,
+                                                url=url, data=credential)
 
         return response
 
@@ -122,27 +116,19 @@
                        is not considered.
         """
 
-        if headers.get("Content-Type", None) is None:
-            headers["Content-Type"] = "application/json"
+        if headers.get('Content-Type', None) is None:
+            headers['Content-Type'] = 'application/json'
 
         url = redfish_request.form_url(url)
 
-        logger.console(msg="", newline=True)
-        msg = (
-            "Request Method : GET  ,headers = "
-            + json.dumps(headers)
-            + " ,uri = "
-            + str(url)
-            + " ,timeout = "
-            + str(timeout)
-            + " ,verify = "
-            + str(verify)
-        )
+        logger.console(msg='', newline=True)
+        msg = "Request Method : GET  ,headers = " + \
+              json.dumps(headers) + " ,uri = " + str(url) + " ,timeout = " + \
+              str(timeout) + " ,verify = " + str(verify)
         logger.info(msg, also_console=True)
 
-        response = requests.get(
-            url, headers=headers, timeout=timeout, verify=verify
-        )
+        response = requests.get(url, headers=headers,
+                                timeout=timeout, verify=verify)
         redfish_request.log_console(response)
 
         return response
@@ -169,29 +155,20 @@
                        is not considered.
         """
 
-        if headers.get("Content-Type", None) is None:
-            headers["Content-Type"] = "application/json"
+        if headers.get('Content-Type', None) is None:
+            headers['Content-Type'] = 'application/json'
 
         url = redfish_request.form_url(url)
 
-        logger.console(msg="", newline=True)
-        msg = (
-            "Request Method : PATCH  ,headers = "
-            + json.dumps(headers)
-            + " ,uri = "
-            + str(url)
-            + " ,data = "
-            + json.dumps(data)
-            + " ,timeout = "
-            + str(timeout)
-            + " ,verify = "
-            + str(verify)
-        )
+        logger.console(msg='', newline=True)
+        msg = "Request Method : PATCH  ,headers = " + \
+              json.dumps(headers) + " ,uri = " + str(url) + " ,data = " + \
+              json.dumps(data) + " ,timeout = " + str(timeout) + \
+              " ,verify = " + str(verify)
         logger.info(msg, also_console=True)
 
-        response = requests.patch(
-            url, headers=headers, data=data, timeout=timeout, verify=verify
-        )
+        response = requests.patch(url, headers=headers, data=data,
+                                  timeout=timeout, verify=verify)
         redfish_request.log_console(response)
 
         return response
@@ -218,40 +195,26 @@
                        is not considered.
         """
 
-        if headers.get("Content-Type", None) is None:
-            headers["Content-Type"] = "application/json"
+        if headers.get('Content-Type', None) is None:
+            headers['Content-Type'] = 'application/json'
 
         url = redfish_request.form_url(url)
 
-        logger.console(msg="", newline=True)
-        msg = (
-            "Request Method : POST  ,headers = "
-            + json.dumps(headers)
-            + " ,uri = "
-            + str(url)
-            + " ,data = "
-            + json.dumps(data)
-            + " ,timeout = "
-            + str(timeout)
-            + " ,verify = "
-            + str(verify)
-        )
+        logger.console(msg='', newline=True)
+        msg = "Request Method : POST  ,headers = " + \
+              json.dumps(headers) + " ,uri = " + str(url) + " ,data = " + \
+              json.dumps(data) + " ,timeout = " + str(timeout) + \
+              " ,verify = " + str(verify)
         logger.info(msg, also_console=True)
 
-        response = requests.post(
-            url,
-            headers=headers,
-            data=json.dumps(data),
-            timeout=timeout,
-            verify=verify,
-        )
+        response = requests.post(url, headers=headers, data=json.dumps(data),
+                                 timeout=timeout, verify=verify)
         redfish_request.log_console(response)
 
         return response
 
-    def request_put(
-        self, headers, url, files=None, data=None, timeout=10, verify=False
-    ):
+    def request_put(self, headers, url, files=None, data=None,
+                    timeout=10, verify=False):
         r"""
         Redfish put request.
 
@@ -276,41 +239,25 @@
                        is not considered.
         """
 
-        if headers.get("Content-Type", None) is None:
-            headers["Content-Type"] = "application/json"
+        if headers.get('Content-Type', None) is None:
+            headers['Content-Type'] = 'application/json'
 
         url = redfish_request.form_url(url)
 
-        logger.console(msg="", newline=True)
-        msg = (
-            "Request Method : PUT  ,headers = "
-            + json.dumps(headers)
-            + " ,uri = "
-            + str(url)
-            + " ,data = "
-            + json.dumps(data)
-            + " ,timeout = "
-            + str(timeout)
-            + " ,verify = "
-            + str(verify)
-        )
+        logger.console(msg='', newline=True)
+        msg = "Request Method : PUT  ,headers = " + \
+              json.dumps(headers) + " ,uri = " + str(url) + " ,data = " + \
+              json.dumps(data) + " ,timeout = " + str(timeout) + \
+              " ,verify = " + str(verify)
         logger.info(msg, also_console=True)
 
-        response = requests.put(
-            url,
-            headers=headers,
-            files=files,
-            data=data,
-            timeout=timeout,
-            verify=verify,
-        )
+        response = requests.put(url, headers=headers, files=files, data=data,
+                                timeout=timeout, verify=verify)
         redfish_request.log_console(response)
 
         return response
 
-    def request_delete(
-        self, headers, url, data=None, timeout=10, verify=False
-    ):
+    def request_delete(self, headers, url, data=None, timeout=10, verify=False):
         r"""
         Redfish delete request.
 
@@ -332,29 +279,20 @@
                        is not considered.
         """
 
-        if headers.get("Content-Type", None) is None:
-            headers["Content-Type"] = "application/json"
+        if headers.get('Content-Type', None) is None:
+            headers['Content-Type'] = 'application/json'
 
         url = redfish_request.form_url(url)
 
-        logger.console(msg="", newline=True)
-        msg = (
-            "Request Method : DELETE  ,headers = "
-            + json.dumps(headers)
-            + " ,uri = "
-            + str(url)
-            + " ,data = "
-            + json.dumps(data)
-            + " ,timeout = "
-            + str(timeout)
-            + " ,verify = "
-            + str(verify)
-        )
-        logger.console(msg="", newline=True)
+        logger.console(msg='', newline=True)
+        msg = "Request Method : DELETE  ,headers = " + \
+              json.dumps(headers) + " ,uri = " + str(url) + " ,data = " + \
+              json.dumps(data) + " ,timeout = " + str(timeout) + \
+              " ,verify = " + str(verify)
+        logger.console(msg='', newline=True)
 
-        response = requests.delete(
-            url, headers=headers, data=data, timeout=timeout, verify=verify
-        )
+        response = requests.delete(url, headers=headers, data=data,
+                                   timeout=timeout, verify=verify)
         redfish_request.log_console(response)
 
         return response
diff --git a/lib/secureboot/secureboot.py b/lib/secureboot/secureboot.py
index f38458e..1aa5f06 100644
--- a/lib/secureboot/secureboot.py
+++ b/lib/secureboot/secureboot.py
@@ -11,14 +11,17 @@
 
 # Define 'constant' functions.
 def secure_boot_mask():
+
     return 0x08000000
 
 
 def jumper_mask():
+
     return 0x04000000
 
 
 class secureboot(object):
+
     def get_secure_boot_info(self, quiet=None):
         r"""
         Get secure-boot information and return it as a tuple consisting of
diff --git a/lib/state.py b/lib/state.py
index 00fa124..26c3f79 100755
--- a/lib/state.py
+++ b/lib/state.py
@@ -27,27 +27,27 @@
 compared with the expected state.
 """
 
-import imp
-import os
-import re
-import sys
-
-import bmc_ssh_utils as bsu
-import gen_cmd as gc
 import gen_print as gp
-import gen_robot_utils as gru
 import gen_valid as gv
+import gen_robot_utils as gru
+import gen_cmd as gc
+import bmc_ssh_utils as bsu
+
 from robot.libraries.BuiltIn import BuiltIn
 from robot.utils import DotDict
 
+import re
+import os
+import sys
+import imp
+
+
 # NOTE: Avoid importing utils.robot because utils.robot imports state.py
 # (indirectly) which will cause failures.
 gru.my_import_resource("rest_client.robot")
 
-base_path = (
-    os.path.dirname(os.path.dirname(imp.find_module("gen_robot_print")[1]))
-    + os.sep
-)
+base_path = os.path.dirname(os.path.dirname(
+                            imp.find_module("gen_robot_print")[1])) + os.sep
 sys.path.append(base_path + "data/")
 
 # Previously, I had this coded:
@@ -76,243 +76,192 @@
 # is being removed but the OBMC_STATES_VERSION value will stay for now in the
 # event that it is needed in the future.
 
-OBMC_STATES_VERSION = int(os.environ.get("OBMC_STATES_VERSION", 1))
+OBMC_STATES_VERSION = int(os.environ.get('OBMC_STATES_VERSION', 1))
 
-redfish_support_trans_state = int(
-    os.environ.get("REDFISH_SUPPORT_TRANS_STATE", 0)
-) or int(
-    BuiltIn().get_variable_value("${REDFISH_SUPPORT_TRANS_STATE}", default=0)
-)
+redfish_support_trans_state = int(os.environ.get('REDFISH_SUPPORT_TRANS_STATE', 0)) or \
+    int(BuiltIn().get_variable_value("${REDFISH_SUPPORT_TRANS_STATE}", default=0))
 
-platform_arch_type = os.environ.get(
-    "PLATFORM_ARCH_TYPE", ""
-) or BuiltIn().get_variable_value("${PLATFORM_ARCH_TYPE}", default="power")
+platform_arch_type = os.environ.get('PLATFORM_ARCH_TYPE', '') or \
+    BuiltIn().get_variable_value("${PLATFORM_ARCH_TYPE}", default="power")
 
 # valid_os_req_states and default_os_req_states are used by the os_get_state
 # function.
 # valid_os_req_states is a list of state information supported by the
 # get_os_state function.
-valid_os_req_states = ["os_ping", "os_login", "os_run_cmd"]
+valid_os_req_states = ['os_ping',
+                       'os_login',
+                       'os_run_cmd']
 
 # When a user calls get_os_state w/o specifying req_states,
 # default_os_req_states is used as its value.
-default_os_req_states = ["os_ping", "os_login", "os_run_cmd"]
+default_os_req_states = ['os_ping',
+                         'os_login',
+                         'os_run_cmd']
 
 # Presently, some BMCs appear to not keep time very well.  This environment
 # variable directs the get_state function to use either the BMC's epoch time
 # or the local epoch time.
-USE_BMC_EPOCH_TIME = int(os.environ.get("USE_BMC_EPOCH_TIME", 0))
+USE_BMC_EPOCH_TIME = int(os.environ.get('USE_BMC_EPOCH_TIME', 0))
 
 # Useful state constant definition(s).
 if not redfish_support_trans_state:
     # When a user calls get_state w/o specifying req_states, default_req_states
     # is used as its value.
-    default_req_states = [
-        "rest",
-        "chassis",
-        "bmc",
-        "boot_progress",
-        "operating_system",
-        "host",
-        "os_ping",
-        "os_login",
-        "os_run_cmd",
-    ]
+    default_req_states = ['rest',
+                          'chassis',
+                          'bmc',
+                          'boot_progress',
+                          'operating_system',
+                          'host',
+                          'os_ping',
+                          'os_login',
+                          'os_run_cmd']
 
     # valid_req_states is a list of sub states supported by the get_state function.
     # valid_req_states, default_req_states and master_os_up_match are used by the
     # get_state function.
 
-    valid_req_states = [
-        "ping",
-        "packet_loss",
-        "uptime",
-        "epoch_seconds",
-        "elapsed_boot_time",
-        "rest",
-        "chassis",
-        "requested_chassis",
-        "bmc",
-        "requested_bmc",
-        "boot_progress",
-        "operating_system",
-        "host",
-        "requested_host",
-        "attempts_left",
-        "os_ping",
-        "os_login",
-        "os_run_cmd",
-    ]
+    valid_req_states = ['ping',
+                        'packet_loss',
+                        'uptime',
+                        'epoch_seconds',
+                        'elapsed_boot_time',
+                        'rest',
+                        'chassis',
+                        'requested_chassis',
+                        'bmc',
+                        'requested_bmc',
+                        'boot_progress',
+                        'operating_system',
+                        'host',
+                        'requested_host',
+                        'attempts_left',
+                        'os_ping',
+                        'os_login',
+                        'os_run_cmd']
 
     # default_state is an initial value which may be of use to callers.
-    default_state = DotDict(
-        [
-            ("rest", "1"),
-            ("chassis", "On"),
-            ("bmc", "Ready"),
-            ("boot_progress", "OSStart"),
-            ("operating_system", "BootComplete"),
-            ("host", "Running"),
-            ("os_ping", "1"),
-            ("os_login", "1"),
-            ("os_run_cmd", "1"),
-        ]
-    )
+    default_state = DotDict([('rest', '1'),
+                             ('chassis', 'On'),
+                             ('bmc', 'Ready'),
+                             ('boot_progress', 'OSStart'),
+                             ('operating_system', 'BootComplete'),
+                             ('host', 'Running'),
+                             ('os_ping', '1'),
+                             ('os_login', '1'),
+                             ('os_run_cmd', '1')])
 
     # A match state for checking that the system is at "standby".
-    standby_match_state = DotDict(
-        [
-            ("rest", "^1$"),
-            ("chassis", "^Off$"),
-            ("bmc", "^Ready$"),
-            ("boot_progress", "^Off|Unspecified$"),
-            ("operating_system", "^Inactive$"),
-            ("host", "^Off$"),
-        ]
-    )
+    standby_match_state = DotDict([('rest', '^1$'),
+                                   ('chassis', '^Off$'),
+                                   ('bmc', '^Ready$'),
+                                   ('boot_progress', '^Off|Unspecified$'),
+                                   ('operating_system', '^Inactive$'),
+                                   ('host', '^Off$')])
 
     # A match state for checking that the system is at "os running".
-    os_running_match_state = DotDict(
-        [
-            ("chassis", "^On$"),
-            ("bmc", "^Ready$"),
-            ("boot_progress", "FW Progress, Starting OS|OSStart"),
-            ("operating_system", "BootComplete"),
-            ("host", "^Running$"),
-            ("os_ping", "^1$"),
-            ("os_login", "^1$"),
-            ("os_run_cmd", "^1$"),
-        ]
-    )
+    os_running_match_state = DotDict([('chassis', '^On$'),
+                                      ('bmc', '^Ready$'),
+                                      ('boot_progress',
+                                       'FW Progress, Starting OS|OSStart'),
+                                      ('operating_system', 'BootComplete'),
+                                      ('host', '^Running$'),
+                                      ('os_ping', '^1$'),
+                                      ('os_login', '^1$'),
+                                      ('os_run_cmd', '^1$')])
 
     # A master dictionary to determine whether the os may be up.
-    master_os_up_match = DotDict(
-        [
-            ("chassis", "^On$"),
-            ("bmc", "^Ready$"),
-            ("boot_progress", "FW Progress, Starting OS|OSStart"),
-            ("operating_system", "BootComplete"),
-            ("host", "^Running|Quiesced$"),
-        ]
-    )
+    master_os_up_match = DotDict([('chassis', '^On$'),
+                                  ('bmc', '^Ready$'),
+                                  ('boot_progress',
+                                   'FW Progress, Starting OS|OSStart'),
+                                  ('operating_system', 'BootComplete'),
+                                  ('host', '^Running|Quiesced$')])
 
-    invalid_state_match = DotDict(
-        [
-            ("rest", "^$"),
-            ("chassis", "^$"),
-            ("bmc", "^$"),
-            ("boot_progress", "^$"),
-            ("operating_system", "^$"),
-            ("host", "^$"),
-        ]
-    )
+    invalid_state_match = DotDict([('rest', '^$'),
+                                   ('chassis', '^$'),
+                                   ('bmc', '^$'),
+                                   ('boot_progress', '^$'),
+                                   ('operating_system', '^$'),
+                                   ('host', '^$')])
 else:
     # When a user calls get_state w/o specifying req_states, default_req_states
     # is used as its value.
-    default_req_states = [
-        "redfish",
-        "chassis",
-        "bmc",
-        "boot_progress",
-        "host",
-        "os_ping",
-        "os_login",
-        "os_run_cmd",
-    ]
+    default_req_states = ['redfish',
+                          'chassis',
+                          'bmc',
+                          'boot_progress',
+                          'host',
+                          'os_ping',
+                          'os_login',
+                          'os_run_cmd']
 
     # valid_req_states is a list of sub states supported by the get_state function.
     # valid_req_states, default_req_states and master_os_up_match are used by the
     # get_state function.
 
-    valid_req_states = [
-        "ping",
-        "packet_loss",
-        "uptime",
-        "epoch_seconds",
-        "elapsed_boot_time",
-        "redfish",
-        "chassis",
-        "requested_chassis",
-        "bmc",
-        "requested_bmc",
-        "boot_progress",
-        "host",
-        "requested_host",
-        "attempts_left",
-        "os_ping",
-        "os_login",
-        "os_run_cmd",
-    ]
+    valid_req_states = ['ping',
+                        'packet_loss',
+                        'uptime',
+                        'epoch_seconds',
+                        'elapsed_boot_time',
+                        'redfish',
+                        'chassis',
+                        'requested_chassis',
+                        'bmc',
+                        'requested_bmc',
+                        'boot_progress',
+                        'host',
+                        'requested_host',
+                        'attempts_left',
+                        'os_ping',
+                        'os_login',
+                        'os_run_cmd']
 
     # default_state is an initial value which may be of use to callers.
-    default_state = DotDict(
-        [
-            ("redfish", "1"),
-            ("chassis", "On"),
-            ("bmc", "Enabled"),
-            (
-                "boot_progress",
-                "SystemHardwareInitializationComplete|OSBootStarted|OSRunning",
-            ),
-            ("host", "Enabled"),
-            ("os_ping", "1"),
-            ("os_login", "1"),
-            ("os_run_cmd", "1"),
-        ]
-    )
+    default_state = DotDict([('redfish', '1'),
+                             ('chassis', 'On'),
+                             ('bmc', 'Enabled'),
+                             ('boot_progress',
+                              'SystemHardwareInitializationComplete|OSBootStarted|OSRunning'),
+                             ('host', 'Enabled'),
+                             ('os_ping', '1'),
+                             ('os_login', '1'),
+                             ('os_run_cmd', '1')])
 
     # A match state for checking that the system is at "standby".
-    standby_match_state = DotDict(
-        [
-            ("redfish", "^1$"),
-            ("chassis", "^Off$"),
-            ("bmc", "^Enabled$"),
-            ("boot_progress", "^None$"),
-            ("host", "^Disabled$"),
-        ]
-    )
+    standby_match_state = DotDict([('redfish', '^1$'),
+                                   ('chassis', '^Off$'),
+                                   ('bmc', '^Enabled$'),
+                                   ('boot_progress', '^None$'),
+                                   ('host', '^Disabled$')])
 
     # A match state for checking that the system is at "os running".
-    os_running_match_state = DotDict(
-        [
-            ("chassis", "^On$"),
-            ("bmc", "^Enabled$"),
-            (
-                "boot_progress",
-                "SystemHardwareInitializationComplete|OSBootStarted|OSRunning",
-            ),
-            ("host", "^Enabled$"),
-            ("os_ping", "^1$"),
-            ("os_login", "^1$"),
-            ("os_run_cmd", "^1$"),
-        ]
-    )
+    os_running_match_state = DotDict([('chassis', '^On$'),
+                                      ('bmc', '^Enabled$'),
+                                      ('boot_progress',
+                                       'SystemHardwareInitializationComplete|OSBootStarted|OSRunning'),
+                                      ('host', '^Enabled$'),
+                                      ('os_ping', '^1$'),
+                                      ('os_login', '^1$'),
+                                      ('os_run_cmd', '^1$')])
 
     # A master dictionary to determine whether the os may be up.
-    master_os_up_match = DotDict(
-        [
-            ("chassis", "^On$"),
-            ("bmc", "^Enabled$"),
-            (
-                "boot_progress",
-                "SystemHardwareInitializationComplete|OSBootStarted|OSRunning",
-            ),
-            ("host", "^Enabled$"),
-        ]
-    )
+    master_os_up_match = DotDict([('chassis', '^On$'),
+                                  ('bmc', '^Enabled$'),
+                                  ('boot_progress',
+                                   'SystemHardwareInitializationComplete|OSBootStarted|OSRunning'),
+                                  ('host', '^Enabled$')])
 
-    invalid_state_match = DotDict(
-        [
-            ("redfish", "^$"),
-            ("chassis", "^$"),
-            ("bmc", "^$"),
-            ("boot_progress", "^$"),
-            ("host", "^$"),
-        ]
-    )
+    invalid_state_match = DotDict([('redfish', '^$'),
+                                   ('chassis', '^$'),
+                                   ('bmc', '^$'),
+                                   ('boot_progress', '^$'),
+                                   ('host', '^$')])
 
 # Filter the states based on platform type.
 if platform_arch_type == "x86":
+
     if not redfish_support_trans_state:
         default_req_states.remove("operating_system")
         valid_req_states.remove("operating_system")
@@ -331,7 +280,7 @@
     del invalid_state_match["boot_progress"]
 
 
-def return_state_constant(state_name="default_state"):
+def return_state_constant(state_name='default_state'):
     r"""
     Return the named state dictionary constant.
     """
@@ -379,10 +328,12 @@
     r"""
     Return expressions key constant.
     """
-    return "<expressions>"
+    return '<expressions>'
 
 
-def compare_states(state, match_state, match_type="and"):
+def compare_states(state,
+                   match_state,
+                   match_type='and'):
     r"""
     Compare 2 state dictionaries.  Return True if they match and False if they
     don't.  Note that the match_state dictionary does not need to have an entry
@@ -422,7 +373,7 @@
     match_type      This may be 'and' or 'or'.
     """
 
-    error_message = gv.valid_value(match_type, valid_values=["and", "or"])
+    error_message = gv.valid_value(match_type, valid_values=['and', 'or'])
     if error_message != "":
         BuiltIn().fail(gp.sprint_error(error_message))
 
@@ -431,7 +382,7 @@
     except TypeError:
         pass
 
-    default_match = match_type == "and"
+    default_match = (match_type == 'and')
     for key, match_state_value in match_state.items():
         # Blank match_state_value means "don't care".
         if match_state_value == "":
@@ -444,9 +395,7 @@
                     return match
         else:
             try:
-                match = (
-                    re.match(match_state_value, str(state[key])) is not None
-                )
+                match = (re.match(match_state_value, str(state[key])) is not None)
             except KeyError:
                 match = False
             if match != default_match:
@@ -455,14 +404,12 @@
     return default_match
 
 
-def get_os_state(
-    os_host="",
-    os_username="",
-    os_password="",
-    req_states=default_os_req_states,
-    os_up=True,
-    quiet=None,
-):
+def get_os_state(os_host="",
+                 os_username="",
+                 os_password="",
+                 req_states=default_os_req_states,
+                 os_up=True,
+                 quiet=None):
     r"""
     Get component states for the operating system such as ping, login,
     etc, put them into a dictionary and return them to the caller.
@@ -508,16 +455,11 @@
     if error_message != "":
         BuiltIn().fail(gp.sprint_error(error_message))
 
-    invalid_req_states = [
-        sub_state
-        for sub_state in req_states
-        if sub_state not in valid_os_req_states
-    ]
+    invalid_req_states = [sub_state for sub_state in req_states
+                          if sub_state not in valid_os_req_states]
     if len(invalid_req_states) > 0:
-        error_message = (
-            "The following req_states are not supported:\n"
-            + gp.sprint_var(invalid_req_states)
-        )
+        error_message = "The following req_states are not supported:\n" +\
+            gp.sprint_var(invalid_req_states)
         BuiltIn().fail(gp.sprint_error(error_message))
 
     # Initialize all substate values supported by this function.
@@ -526,37 +468,28 @@
     os_run_cmd = 0
 
     if os_up:
-        if "os_ping" in req_states:
+        if 'os_ping' in req_states:
             # See if the OS pings.
-            rc, out_buf = gc.shell_cmd(
-                "ping -c 1 -w 2 " + os_host,
-                print_output=0,
-                show_err=0,
-                ignore_err=1,
-            )
+            rc, out_buf = gc.shell_cmd("ping -c 1 -w 2 " + os_host,
+                                       print_output=0, show_err=0,
+                                       ignore_err=1)
             if rc == 0:
                 os_ping = 1
 
         # Programming note: All attributes which do not require an ssh login
         # should have been processed by this point.
-        master_req_login = ["os_login", "os_run_cmd"]
-        req_login = [
-            sub_state
-            for sub_state in req_states
-            if sub_state in master_req_login
-        ]
-        must_login = len(req_login) > 0
+        master_req_login = ['os_login', 'os_run_cmd']
+        req_login = [sub_state for sub_state in req_states if sub_state in
+                     master_req_login]
+        must_login = (len(req_login) > 0)
 
         if must_login:
-            output, stderr, rc = bsu.os_execute_command(
-                "uptime",
-                quiet=quiet,
-                ignore_err=1,
-                time_out=20,
-                os_host=os_host,
-                os_username=os_username,
-                os_password=os_password,
-            )
+            output, stderr, rc = bsu.os_execute_command("uptime", quiet=quiet,
+                                                        ignore_err=1,
+                                                        time_out=20,
+                                                        os_host=os_host,
+                                                        os_username=os_username,
+                                                        os_password=os_password)
             if rc == 0:
                 os_login = 1
                 os_run_cmd = 1
@@ -572,16 +505,14 @@
     return os_state
 
 
-def get_state(
-    openbmc_host="",
-    openbmc_username="",
-    openbmc_password="",
-    os_host="",
-    os_username="",
-    os_password="",
-    req_states=default_req_states,
-    quiet=None,
-):
+def get_state(openbmc_host="",
+              openbmc_username="",
+              openbmc_password="",
+              os_host="",
+              os_username="",
+              os_password="",
+              req_states=default_req_states,
+              quiet=None):
     r"""
     Get component states such as chassis state, bmc state, etc, put them into a
     dictionary and return them to the caller.
@@ -650,149 +581,116 @@
         if os_password is None:
             os_password = ""
 
-    invalid_req_states = [
-        sub_state
-        for sub_state in req_states
-        if sub_state not in valid_req_states
-    ]
+    invalid_req_states = [sub_state for sub_state in req_states
+                          if sub_state not in valid_req_states]
     if len(invalid_req_states) > 0:
-        error_message = (
-            "The following req_states are not supported:\n"
-            + gp.sprint_var(invalid_req_states)
-        )
+        error_message = "The following req_states are not supported:\n" +\
+            gp.sprint_var(invalid_req_states)
         BuiltIn().fail(gp.sprint_error(error_message))
 
     # Initialize all substate values supported by this function.
     ping = 0
-    packet_loss = ""
-    uptime = ""
-    epoch_seconds = ""
-    elapsed_boot_time = ""
-    rest = ""
-    redfish = ""
-    chassis = ""
-    requested_chassis = ""
-    bmc = ""
-    requested_bmc = ""
+    packet_loss = ''
+    uptime = ''
+    epoch_seconds = ''
+    elapsed_boot_time = ''
+    rest = ''
+    redfish = ''
+    chassis = ''
+    requested_chassis = ''
+    bmc = ''
+    requested_bmc = ''
     # BootProgress state will get populated when state logic enumerates the
     # state URI. This is to prevent state dictionary  boot_progress value
     # getting empty when the BootProgress is NOT found, making it optional.
-    boot_progress = "NA"
-    operating_system = ""
-    host = ""
-    requested_host = ""
-    attempts_left = ""
+    boot_progress = 'NA'
+    operating_system = ''
+    host = ''
+    requested_host = ''
+    attempts_left = ''
 
     # Get the component states.
-    if "ping" in req_states:
+    if 'ping' in req_states:
         # See if the OS pings.
-        rc, out_buf = gc.shell_cmd(
-            "ping -c 1 -w 2 " + openbmc_host,
-            print_output=0,
-            show_err=0,
-            ignore_err=1,
-        )
+        rc, out_buf = gc.shell_cmd("ping -c 1 -w 2 " + openbmc_host,
+                                   print_output=0, show_err=0,
+                                   ignore_err=1)
         if rc == 0:
             ping = 1
 
-    if "packet_loss" in req_states:
+    if 'packet_loss' in req_states:
         # See if the OS pings.
-        cmd_buf = (
-            "ping -c 5 -w 5 "
-            + openbmc_host
-            + " | egrep 'packet loss' | sed -re 's/.* ([0-9]+)%.*/\\1/g'"
-        )
-        rc, out_buf = gc.shell_cmd(
-            cmd_buf, print_output=0, show_err=0, ignore_err=1
-        )
+        cmd_buf = "ping -c 5 -w 5 " + openbmc_host +\
+            " | egrep 'packet loss' | sed -re 's/.* ([0-9]+)%.*/\\1/g'"
+        rc, out_buf = gc.shell_cmd(cmd_buf,
+                                   print_output=0, show_err=0,
+                                   ignore_err=1)
         if rc == 0:
             packet_loss = out_buf.rstrip("\n")
 
-    if "uptime" in req_states:
+    if 'uptime' in req_states:
         # Sometimes reading uptime results in a blank value. Call with
         # wait_until_keyword_succeeds to ensure a non-blank value is obtained.
-        remote_cmd_buf = (
-            "bash -c 'read uptime filler 2>/dev/null < /proc/uptime"
-            + ' && [ ! -z "${uptime}" ] && echo ${uptime}\''
-        )
-        cmd_buf = [
-            "BMC Execute Command",
-            re.sub("\\$", "\\$", remote_cmd_buf),
-            "quiet=1",
-            "test_mode=0",
-            "time_out=5",
-        ]
+        remote_cmd_buf = "bash -c 'read uptime filler 2>/dev/null < /proc/uptime" +\
+            " && [ ! -z \"${uptime}\" ] && echo ${uptime}'"
+        cmd_buf = ["BMC Execute Command",
+                   re.sub('\\$', '\\$', remote_cmd_buf), 'quiet=1',
+                   'test_mode=0', 'time_out=5']
         gp.qprint_issuing(cmd_buf, 0)
         gp.qprint_issuing(remote_cmd_buf, 0)
         try:
-            stdout, stderr, rc = BuiltIn().wait_until_keyword_succeeds(
-                "10 sec", "5 sec", *cmd_buf
-            )
+            stdout, stderr, rc =\
+                BuiltIn().wait_until_keyword_succeeds("10 sec", "5 sec",
+                                                      *cmd_buf)
             if rc == 0 and stderr == "":
                 uptime = stdout
         except AssertionError as my_assertion_error:
             pass
 
-    if "epoch_seconds" in req_states or "elapsed_boot_time" in req_states:
+    if 'epoch_seconds' in req_states or 'elapsed_boot_time' in req_states:
         date_cmd_buf = "date -u +%s"
         if USE_BMC_EPOCH_TIME:
-            cmd_buf = ["BMC Execute Command", date_cmd_buf, "quiet=${1}"]
+            cmd_buf = ["BMC Execute Command", date_cmd_buf, 'quiet=${1}']
             if not quiet:
                 gp.print_issuing(cmd_buf)
-            status, ret_values = BuiltIn().run_keyword_and_ignore_error(
-                *cmd_buf
-            )
+            status, ret_values = \
+                BuiltIn().run_keyword_and_ignore_error(*cmd_buf)
             if status == "PASS":
                 stdout, stderr, rc = ret_values
                 if rc == 0 and stderr == "":
                     epoch_seconds = stdout.rstrip("\n")
         else:
-            shell_rc, out_buf = gc.cmd_fnc_u(
-                date_cmd_buf, quiet=quiet, print_output=0
-            )
+            shell_rc, out_buf = gc.cmd_fnc_u(date_cmd_buf,
+                                             quiet=quiet,
+                                             print_output=0)
             if shell_rc == 0:
                 epoch_seconds = out_buf.rstrip("\n")
 
-    if "elapsed_boot_time" in req_states:
+    if 'elapsed_boot_time' in req_states:
         global start_boot_seconds
         elapsed_boot_time = int(epoch_seconds) - start_boot_seconds
 
     if not redfish_support_trans_state:
-        master_req_rest = [
-            "rest",
-            "host",
-            "requested_host",
-            "operating_system",
-            "attempts_left",
-            "boot_progress",
-            "chassis",
-            "requested_chassisbmcrequested_bmc",
-        ]
+        master_req_rest = ['rest', 'host', 'requested_host', 'operating_system',
+                           'attempts_left', 'boot_progress', 'chassis',
+                           'requested_chassis' 'bmc' 'requested_bmc']
 
-        req_rest = [
-            sub_state
-            for sub_state in req_states
-            if sub_state in master_req_rest
-        ]
-        need_rest = len(req_rest) > 0
+        req_rest = [sub_state for sub_state in req_states if sub_state in
+                    master_req_rest]
+        need_rest = (len(req_rest) > 0)
         state = DotDict()
         if need_rest:
-            cmd_buf = [
-                "Read Properties",
-                SYSTEM_STATE_URI + "enumerate",
-                "quiet=${" + str(quiet) + "}",
-                "timeout=30",
-            ]
+            cmd_buf = ["Read Properties", SYSTEM_STATE_URI + "enumerate",
+                       "quiet=${" + str(quiet) + "}", "timeout=30"]
             gp.dprint_issuing(cmd_buf)
-            status, ret_values = BuiltIn().run_keyword_and_ignore_error(
-                *cmd_buf
-            )
+            status, ret_values = \
+                BuiltIn().run_keyword_and_ignore_error(*cmd_buf)
             if status == "PASS":
-                state["rest"] = "1"
+                state['rest'] = '1'
             else:
-                state["rest"] = "0"
+                state['rest'] = '0'
 
-            if int(state["rest"]):
+            if int(state['rest']):
                 for url_path in ret_values:
                     # Skip conflicting "CurrentHostState" URL from the enum
                     # /xyz/openbmc_project/state/hypervisor0
@@ -808,69 +706,54 @@
                     for attr_name in ret_values[url_path]:
                         # Create a state key value based on the attr_name.
                         try:
-                            ret_values[url_path][attr_name] = re.sub(
-                                r".*\.", "", ret_values[url_path][attr_name]
-                            )
+                            ret_values[url_path][attr_name] = \
+                                re.sub(r'.*\.', "",
+                                       ret_values[url_path][attr_name])
                         except TypeError:
                             pass
                         # Do some key name manipulations.
-                        new_attr_name = re.sub(
-                            r"^Current|(State|Transition)$", "", attr_name
-                        )
-                        new_attr_name = re.sub(r"BMC", r"Bmc", new_attr_name)
-                        new_attr_name = re.sub(
-                            r"([A-Z][a-z])", r"_\1", new_attr_name
-                        )
+                        new_attr_name = re.sub(r'^Current|(State|Transition)$',
+                                               "", attr_name)
+                        new_attr_name = re.sub(r'BMC', r'Bmc', new_attr_name)
+                        new_attr_name = re.sub(r'([A-Z][a-z])', r'_\1',
+                                               new_attr_name)
                         new_attr_name = new_attr_name.lower().lstrip("_")
-                        new_attr_name = re.sub(
-                            r"power", r"chassis", new_attr_name
-                        )
+                        new_attr_name = re.sub(r'power', r'chassis', new_attr_name)
                         if new_attr_name in req_states:
-                            state[new_attr_name] = ret_values[url_path][
-                                attr_name
-                            ]
+                            state[new_attr_name] = ret_values[url_path][attr_name]
     else:
-        master_req_rf = [
-            "redfish",
-            "host",
-            "requested_host",
-            "attempts_left",
-            "boot_progress",
-            "chassis",
-            "requested_chassisbmcrequested_bmc",
-        ]
+        master_req_rf = ['redfish', 'host', 'requested_host',
+                         'attempts_left', 'boot_progress', 'chassis',
+                         'requested_chassis' 'bmc' 'requested_bmc']
 
-        req_rf = [
-            sub_state for sub_state in req_states if sub_state in master_req_rf
-        ]
-        need_rf = len(req_rf) > 0
+        req_rf = [sub_state for sub_state in req_states if sub_state in
+                  master_req_rf]
+        need_rf = (len(req_rf) > 0)
         state = DotDict()
         if need_rf:
             cmd_buf = ["Redfish Get States"]
             gp.dprint_issuing(cmd_buf)
             try:
-                status, ret_values = BuiltIn().run_keyword_and_ignore_error(
-                    *cmd_buf
-                )
+                status, ret_values = \
+                    BuiltIn().run_keyword_and_ignore_error(*cmd_buf)
             except Exception as ex:
                 # Robot raised UserKeywordExecutionFailed error exception.
                 gp.dprint_issuing("Retrying Redfish Get States")
-                status, ret_values = BuiltIn().run_keyword_and_ignore_error(
-                    *cmd_buf
-                )
+                status, ret_values = \
+                    BuiltIn().run_keyword_and_ignore_error(*cmd_buf)
 
             gp.dprint_vars(status, ret_values)
             if status == "PASS":
-                state["redfish"] = "1"
+                state['redfish'] = '1'
             else:
-                state["redfish"] = "0"
+                state['redfish'] = '0'
 
-            if int(state["redfish"]):
-                state["chassis"] = ret_values["chassis"]
-                state["host"] = ret_values["host"]
-                state["bmc"] = ret_values["bmc"]
+            if int(state['redfish']):
+                state['chassis'] = ret_values['chassis']
+                state['host'] = ret_values['host']
+                state['bmc'] = ret_values['bmc']
                 if platform_arch_type != "x86":
-                    state["boot_progress"] = ret_values["boot_progress"]
+                    state['boot_progress'] = ret_values['boot_progress']
 
     for sub_state in req_states:
         if sub_state in state:
@@ -886,9 +769,8 @@
         # it doesn't exist.
         return state
 
-    os_req_states = [
-        sub_state for sub_state in req_states if sub_state.startswith("os_")
-    ]
+    os_req_states = [sub_state for sub_state in req_states
+                     if sub_state.startswith('os_')]
 
     if len(os_req_states) > 0:
         # The caller has specified an os_host and they have requested
@@ -902,14 +784,12 @@
             if sub_state in req_states:
                 os_up_match[sub_state] = master_os_up_match[sub_state]
         os_up = compare_states(state, os_up_match)
-        os_state = get_os_state(
-            os_host=os_host,
-            os_username=os_username,
-            os_password=os_password,
-            req_states=os_req_states,
-            os_up=os_up,
-            quiet=quiet,
-        )
+        os_state = get_os_state(os_host=os_host,
+                                os_username=os_username,
+                                os_password=os_password,
+                                req_states=os_req_states,
+                                os_up=os_up,
+                                quiet=quiet)
         # Append os_state dictionary to ours.
         state.update(os_state)
 
@@ -935,18 +815,16 @@
     exit_wait_early_message = value
 
 
-def check_state(
-    match_state,
-    invert=0,
-    print_string="",
-    openbmc_host="",
-    openbmc_username="",
-    openbmc_password="",
-    os_host="",
-    os_username="",
-    os_password="",
-    quiet=None,
-):
+def check_state(match_state,
+                invert=0,
+                print_string="",
+                openbmc_host="",
+                openbmc_username="",
+                openbmc_password="",
+                os_host="",
+                os_username="",
+                os_password="",
+                quiet=None):
     r"""
     Check that the Open BMC machine's composite state matches the specified
     state.  On success, this keyword returns the machine's composite state as a
@@ -995,16 +873,14 @@
     if expressions_key() in req_states:
         req_states.remove(expressions_key())
     # Initialize state.
-    state = get_state(
-        openbmc_host=openbmc_host,
-        openbmc_username=openbmc_username,
-        openbmc_password=openbmc_password,
-        os_host=os_host,
-        os_username=os_username,
-        os_password=os_password,
-        req_states=req_states,
-        quiet=quiet,
-    )
+    state = get_state(openbmc_host=openbmc_host,
+                      openbmc_username=openbmc_username,
+                      openbmc_password=openbmc_password,
+                      os_host=os_host,
+                      os_username=os_username,
+                      os_password=os_password,
+                      req_states=req_states,
+                      quiet=quiet)
     if not quiet:
         gp.print_var(state)
 
@@ -1018,36 +894,29 @@
     match = compare_states(state, match_state)
 
     if invert and match:
-        fail_msg = (
-            "The current state of the machine matches the match"
-            + " state:\n"
-            + gp.sprint_varx("state", state)
-        )
+        fail_msg = "The current state of the machine matches the match" +\
+                   " state:\n" + gp.sprint_varx("state", state)
         BuiltIn().fail("\n" + gp.sprint_error(fail_msg))
     elif not invert and not match:
-        fail_msg = (
-            "The current state of the machine does NOT match the"
-            + " match state:\n"
-            + gp.sprint_varx("state", state)
-        )
+        fail_msg = "The current state of the machine does NOT match the" +\
+                   " match state:\n" +\
+                   gp.sprint_varx("state", state)
         BuiltIn().fail("\n" + gp.sprint_error(fail_msg))
 
     return state
 
 
-def wait_state(
-    match_state=(),
-    wait_time="1 min",
-    interval="1 second",
-    invert=0,
-    openbmc_host="",
-    openbmc_username="",
-    openbmc_password="",
-    os_host="",
-    os_username="",
-    os_password="",
-    quiet=None,
-):
+def wait_state(match_state=(),
+               wait_time="1 min",
+               interval="1 second",
+               invert=0,
+               openbmc_host="",
+               openbmc_username="",
+               openbmc_password="",
+               os_host="",
+               os_username="",
+               os_password="",
+               quiet=None):
     r"""
     Wait for the Open BMC machine's composite state to match the specified
     state.  On success, this keyword returns the machine's composite state as
@@ -1098,15 +967,9 @@
             alt_text = "cease to "
         else:
             alt_text = ""
-        gp.print_timen(
-            "Checking every "
-            + str(interval)
-            + " for up to "
-            + str(wait_time)
-            + " for the state of the machine to "
-            + alt_text
-            + "match the state shown below."
-        )
+        gp.print_timen("Checking every " + str(interval) + " for up to "
+                       + str(wait_time) + " for the state of the machine to "
+                       + alt_text + "match the state shown below.")
         gp.print_var(match_state)
 
     if quiet:
@@ -1119,24 +982,16 @@
         # In debug we print state so no need to print the "#".
         print_string = ""
     check_state_quiet = 1 - debug
-    cmd_buf = [
-        "Check State",
-        match_state,
-        "invert=${" + str(invert) + "}",
-        "print_string=" + print_string,
-        "openbmc_host=" + openbmc_host,
-        "openbmc_username=" + openbmc_username,
-        "openbmc_password=" + openbmc_password,
-        "os_host=" + os_host,
-        "os_username=" + os_username,
-        "os_password=" + os_password,
-        "quiet=${" + str(check_state_quiet) + "}",
-    ]
+    cmd_buf = ["Check State", match_state, "invert=${" + str(invert) + "}",
+               "print_string=" + print_string, "openbmc_host=" + openbmc_host,
+               "openbmc_username=" + openbmc_username,
+               "openbmc_password=" + openbmc_password, "os_host=" + os_host,
+               "os_username=" + os_username, "os_password=" + os_password,
+               "quiet=${" + str(check_state_quiet) + "}"]
     gp.dprint_issuing(cmd_buf)
     try:
-        state = BuiltIn().wait_until_keyword_succeeds(
-            wait_time, interval, *cmd_buf
-        )
+        state = BuiltIn().wait_until_keyword_succeeds(wait_time, interval,
+                                                      *cmd_buf)
     except AssertionError as my_assertion_error:
         gp.printn()
         message = my_assertion_error.args[0]
@@ -1169,7 +1024,8 @@
 set_start_boot_seconds(0)
 
 
-def wait_for_comm_cycle(start_boot_seconds, quiet=None):
+def wait_for_comm_cycle(start_boot_seconds,
+                        quiet=None):
     r"""
     Wait for the BMC uptime to be less than elapsed_boot_time.
 
@@ -1199,19 +1055,15 @@
 
     # Wait for uptime to be less than elapsed_boot_time.
     set_start_boot_seconds(start_boot_seconds)
-    expr = "int(float(state['uptime'])) < int(state['elapsed_boot_time'])"
-    match_state = DotDict(
-        [
-            ("uptime", "^[0-9\\.]+$"),
-            ("elapsed_boot_time", "^[0-9]+$"),
-            (expressions_key(), [expr]),
-        ]
-    )
+    expr = 'int(float(state[\'uptime\'])) < int(state[\'elapsed_boot_time\'])'
+    match_state = DotDict([('uptime', '^[0-9\\.]+$'),
+                           ('elapsed_boot_time', '^[0-9]+$'),
+                           (expressions_key(), [expr])])
     wait_state(match_state, wait_time="12 mins", interval="5 seconds")
 
     gp.qprint_timen("Verifying that REST/Redfish API interface is working.")
     if not redfish_support_trans_state:
-        match_state = DotDict([("rest", "^1$")])
+        match_state = DotDict([('rest', '^1$')])
     else:
-        match_state = DotDict([("redfish", "^1$")])
+        match_state = DotDict([('redfish', '^1$')])
     state = wait_state(match_state, wait_time="5 mins", interval="2 seconds")
diff --git a/lib/state_map.py b/lib/state_map.py
index cdd1014..f8353de 100644
--- a/lib/state_map.py
+++ b/lib/state_map.py
@@ -7,94 +7,101 @@
 
 """
 
+import gen_robot_keyword as keyword
+import variables as var
+
 import os
 import re
 import sys
 
-import gen_robot_keyword as keyword
-import variables as var
 from robot.libraries.BuiltIn import BuiltIn
 
 robot_pgm_dir_path = os.path.dirname(__file__) + os.sep
-repo_data_dir_path = re.sub("/lib", "/data", robot_pgm_dir_path)
+repo_data_dir_path = re.sub('/lib', '/data', robot_pgm_dir_path)
 sys.path.append(repo_data_dir_path)
 
 
 BuiltIn().import_resource("state_manager.robot")
 BuiltIn().import_resource("rest_client.robot")
 
-platform_arch_type = os.environ.get(
-    "PLATFORM_ARCH_TYPE", ""
-) or BuiltIn().get_variable_value("${PLATFORM_ARCH_TYPE}", default="power")
+platform_arch_type = os.environ.get('PLATFORM_ARCH_TYPE', '') or \
+    BuiltIn().get_variable_value("${PLATFORM_ARCH_TYPE}", default="power")
 
 # We will build eventually the mapping for warm, cold reset as well.
 VALID_STATES = {
-    "reboot": {
+    'reboot':
+    {
         # (Power Policy, BMC state, Chassis State, Host State)
-        ("LEAVE_OFF", "Ready", "Off", "Off"),
-        ("ALWAYS_POWER_ON", "Ready", "On", "Running"),
-        ("ALWAYS_POWER_ON", "Ready", "On", "Off"),
-        ("RESTORE_LAST_STATE", "Ready", "On", "Running"),
-        ("RESTORE_LAST_STATE", "Ready", "On", "Off"),
-        ("ALWAYS_POWER_OFF", "Ready", "On", "Running"),
-        ("ALWAYS_POWER_OFF", "Ready", "Off", "Off"),
+        ('LEAVE_OFF', 'Ready', 'Off', 'Off'),
+        ('ALWAYS_POWER_ON', 'Ready', 'On', 'Running'),
+        ('ALWAYS_POWER_ON', 'Ready', 'On', 'Off'),
+        ('RESTORE_LAST_STATE', 'Ready', 'On', 'Running'),
+        ('RESTORE_LAST_STATE', 'Ready', 'On', 'Off'),
+        ('ALWAYS_POWER_OFF', 'Ready', 'On', 'Running'),
+        ('ALWAYS_POWER_OFF', 'Ready', 'Off', 'Off'),
     },
 }
 
 VALID_BOOT_STATES = {
-    "Off": {  # Valid states when Host is Off.
+    'Off':  # Valid states when Host is Off.
+    {
         # (BMC , Chassis , Host , BootProgress, OperatingSystemState)
         (
             "xyz.openbmc_project.State.BMC.BMCState.Ready",
             "xyz.openbmc_project.State.Chassis.PowerState.Off",
             "xyz.openbmc_project.State.Host.HostState.Off",
             "xyz.openbmc_project.State.Boot.Progress.ProgressStages.Unspecified",
-            "xyz.openbmc_project.State.OperatingSystem.Status.OSStatus.Inactive",
+            "xyz.openbmc_project.State.OperatingSystem.Status.OSStatus.Inactive"
         ),
     },
-    "Reboot": {  # Valid states when BMC reset to standby.
+    'Reboot':  # Valid states when BMC reset to standby.
+    {
         # (BMC , Chassis , Host , BootProgress, OperatingSystemState)
         (
             "xyz.openbmc_project.State.BMC.BMCState.Ready",
             "xyz.openbmc_project.State.Chassis.PowerState.Off",
             "xyz.openbmc_project.State.Host.HostState.Off",
             "xyz.openbmc_project.State.Boot.Progress.ProgressStages.Unspecified",
-            "xyz.openbmc_project.State.OperatingSystem.Status.OSStatus.Inactive",
+            "xyz.openbmc_project.State.OperatingSystem.Status.OSStatus.Inactive"
         ),
     },
-    "Running": {  # Valid states when Host is powering on.
+    'Running':  # Valid states when Host is powering on.
+    {
         # (BMC , Chassis , Host , BootProgress, OperatingSystemState)
         (
             "xyz.openbmc_project.State.BMC.BMCState.Ready",
             "xyz.openbmc_project.State.Chassis.PowerState.On",
             "xyz.openbmc_project.State.Host.HostState.Running",
             "xyz.openbmc_project.State.Boot.Progress.ProgressStages.MotherboardInit",
-            "xyz.openbmc_project.State.OperatingSystem.Status.OSStatus.Inactive",
+            "xyz.openbmc_project.State.OperatingSystem.Status.OSStatus.Inactive"
         ),
     },
-    "Booted": {  # Valid state when Host is booted.
+    'Booted':  # Valid state when Host is booted.
+    {
         # (BMC , Chassis , Host , BootProgress, OperatingSystemState)
         (
             "xyz.openbmc_project.State.BMC.BMCState.Ready",
             "xyz.openbmc_project.State.Chassis.PowerState.On",
             "xyz.openbmc_project.State.Host.HostState.Running",
             "xyz.openbmc_project.State.Boot.Progress.ProgressStages.OSStart",
-            "xyz.openbmc_project.State.OperatingSystem.Status.OSStatus.BootComplete",
+            "xyz.openbmc_project.State.OperatingSystem.Status.OSStatus.BootComplete"
         ),
     },
-    "ResetReload": {  # Valid state BMC reset reload when host is booted.
+    'ResetReload':  # Valid state BMC reset reload when host is booted.
+    {
         # (BMC , Chassis , Host , BootProgress, OperatingSystemState)
         (
             "xyz.openbmc_project.State.BMC.BMCState.Ready",
             "xyz.openbmc_project.State.Chassis.PowerState.On",
             "xyz.openbmc_project.State.Host.HostState.Running",
             "xyz.openbmc_project.State.Boot.Progress.ProgressStages.OSStart",
-            "xyz.openbmc_project.State.OperatingSystem.Status.OSStatus.BootComplete",
+            "xyz.openbmc_project.State.OperatingSystem.Status.OSStatus.BootComplete"
         ),
     },
 }
 REDFISH_VALID_BOOT_STATES = {
-    "Off": {  # Valid states when Host is Off.
+    'Off':  # Valid states when Host is Off.
+    {
         # (BMC , Chassis , Host , BootProgress)
         (
             "Enabled",
@@ -103,7 +110,8 @@
             "None",
         ),
     },
-    "Reboot": {  # Valid states when BMC reset to standby.
+    'Reboot':  # Valid states when BMC reset to standby.
+    {
         # (BMC , Chassis , Host , BootProgress)
         (
             "Enabled",
@@ -112,7 +120,8 @@
             "None",
         ),
     },
-    "Running": {  # Valid states when Host is powering on.
+    'Running':  # Valid states when Host is powering on.
+    {
         # (BMC , Chassis , Host , BootProgress)
         (
             "Enabled",
@@ -121,7 +130,8 @@
             "OSRunning",
         ),
     },
-    "Booted": {  # Valid state when Host is booted.
+    'Booted':  # Valid state when Host is booted.
+    {
         # (BMC , Chassis , Host , BootProgress)
         (
             "Enabled",
@@ -130,7 +140,8 @@
             "OSRunning",
         ),
     },
-    "ResetReload": {  # Valid state BMC reset reload when host is booted.
+    'ResetReload':  # Valid state BMC reset reload when host is booted.
+    {
         # (BMC , Chassis , Host , BootProgress)
         (
             "Enabled",
@@ -151,45 +162,41 @@
                 for x in state_tuple
                 if not (
                     x.startswith("xyz.openbmc_project.State.Boot.Progress")
-                    or x.startswith(
-                        "xyz.openbmc_project.State.OperatingSystem"
-                    )
+                    or x.startswith("xyz.openbmc_project.State.OperatingSystem")
                 )
             )
             VALID_BOOT_STATES_X86[state_name].add(state_tuple_new)
     VALID_BOOT_STATES = VALID_BOOT_STATES_X86
 
 
-class state_map:
+class state_map():
+
     def get_boot_state(self):
         r"""
         Return the system state as a tuple of bmc, chassis, host state,
         BootProgress and OperatingSystemState.
         """
 
-        status, state = keyword.run_key(
-            "Read Properties  " + var.SYSTEM_STATE_URI + "enumerate"
-        )
-        bmc_state = state[var.SYSTEM_STATE_URI + "bmc0"]["CurrentBMCState"]
-        chassis_state = state[var.SYSTEM_STATE_URI + "chassis0"][
-            "CurrentPowerState"
-        ]
-        host_state = state[var.SYSTEM_STATE_URI + "host0"]["CurrentHostState"]
+        status, state = keyword.run_key("Read Properties  "
+                                        + var.SYSTEM_STATE_URI + "enumerate")
+        bmc_state = state[var.SYSTEM_STATE_URI + 'bmc0']['CurrentBMCState']
+        chassis_state = \
+            state[var.SYSTEM_STATE_URI + 'chassis0']['CurrentPowerState']
+        host_state = state[var.SYSTEM_STATE_URI + 'host0']['CurrentHostState']
         if platform_arch_type == "x86":
-            return (str(bmc_state), str(chassis_state), str(host_state))
+            return (str(bmc_state),
+                    str(chassis_state),
+                    str(host_state))
         else:
-            boot_state = state[var.SYSTEM_STATE_URI + "host0"]["BootProgress"]
-            os_state = state[var.SYSTEM_STATE_URI + "host0"][
-                "OperatingSystemState"
-            ]
+            boot_state = state[var.SYSTEM_STATE_URI + 'host0']['BootProgress']
+            os_state = \
+                state[var.SYSTEM_STATE_URI + 'host0']['OperatingSystemState']
 
-            return (
-                str(bmc_state),
-                str(chassis_state),
-                str(host_state),
-                str(boot_state),
-                str(os_state),
-            )
+            return (str(bmc_state),
+                    str(chassis_state),
+                    str(host_state),
+                    str(boot_state),
+                    str(os_state))
 
     def valid_boot_state(self, boot_type, state_set):
         r"""
@@ -217,9 +224,7 @@
         state_dict                  State dictionary.
         """
 
-        if set(state_dict.values()) in set(
-            REDFISH_VALID_BOOT_STATES[boot_type]
-        ):
+        if set(state_dict.values()) in set(REDFISH_VALID_BOOT_STATES[boot_type]):
             return True
         else:
             return False
diff --git a/lib/tally_sheet.py b/lib/tally_sheet.py
index 52ed279..03162af 100755
--- a/lib/tally_sheet.py
+++ b/lib/tally_sheet.py
@@ -4,10 +4,10 @@
 Define the tally_sheet class.
 """
 
+import sys
 import collections
 import copy
 import re
-import sys
 
 try:
     from robot.utils import DotDict
@@ -18,6 +18,7 @@
 
 
 class tally_sheet:
+
     r"""
     This class is the implementation of a tally sheet.  The sheet can be viewed as rows and columns.  Each
     row has a unique key field.
@@ -62,12 +63,10 @@
 
     """
 
-    def __init__(
-        self,
-        row_key_field_name="Description",
-        init_fields_dict=dict(),
-        obj_name="tally_sheet",
-    ):
+    def __init__(self,
+                 row_key_field_name='Description',
+                 init_fields_dict=dict(),
+                 obj_name='tally_sheet'):
         r"""
         Create a tally sheet object.
 
@@ -93,12 +92,13 @@
         self.__sum_fields = []
         self.__calc_fields = []
 
-    def init(
-        self, row_key_field_name, init_fields_dict, obj_name="tally_sheet"
-    ):
-        self.__init__(
-            row_key_field_name, init_fields_dict, obj_name="tally_sheet"
-        )
+    def init(self,
+             row_key_field_name,
+             init_fields_dict,
+             obj_name='tally_sheet'):
+        self.__init__(row_key_field_name,
+                      init_fields_dict,
+                      obj_name='tally_sheet')
 
     def set_sum_fields(self, sum_fields):
         r"""
@@ -137,7 +137,7 @@
 
         if row_key in self.__table:
             # If we allow this, the row values get re-initialized.
-            message = 'An entry for "' + row_key + '" already exists in'
+            message = "An entry for \"" + row_key + "\" already exists in"
             message += " tally sheet."
             raise ValueError(message)
         if init_fields_dict is None:
@@ -193,7 +193,7 @@
         for row_key, value in self.__table.items():
             # Walk through the calc fields and process them.
             for calc_field in self.__calc_fields:
-                tokens = [i for i in re.split(r"(\d+|\W+)", calc_field) if i]
+                tokens = [i for i in re.split(r'(\d+|\W+)', calc_field) if i]
                 cmd_buf = ""
                 for token in tokens:
                     if token in ("=", "+", "-", "*", "/"):
@@ -201,15 +201,9 @@
                     else:
                         # Note: Using "mangled" name for the sake of the exec
                         # statement (below).
-                        cmd_buf += (
-                            "self._"
-                            + self.__class__.__name__
-                            + "__table['"
-                            + row_key
-                            + "']['"
-                            + token
-                            + "'] "
-                        )
+                        cmd_buf += "self._" + self.__class__.__name__ +\
+                                   "__table['" + row_key + "']['" +\
+                                   token + "'] "
                 exec(cmd_buf)
 
             for field_key, sub_value in value.items():
@@ -254,8 +248,8 @@
         col_names = [self.__row_key_field_name.title()]
         report_width = 40
         key_width = 40
-        format_string = "{0:<" + str(key_width) + "}"
-        dash_format_string = "{0:-<" + str(key_width) + "}"
+        format_string = '{0:<' + str(key_width) + '}'
+        dash_format_string = '{0:-<' + str(key_width) + '}'
         field_num = 0
 
         try:
@@ -263,31 +257,28 @@
             for row_key, value in first_rec[1].items():
                 field_num += 1
                 if isinstance(value, int):
-                    align = ":>"
+                    align = ':>'
                 else:
-                    align = ":<"
-                format_string += (
-                    " {" + str(field_num) + align + str(len(row_key)) + "}"
-                )
-                dash_format_string += (
-                    " {" + str(field_num) + ":->" + str(len(row_key)) + "}"
-                )
+                    align = ':<'
+                format_string += ' {' + str(field_num) + align +\
+                                 str(len(row_key)) + '}'
+                dash_format_string += ' {' + str(field_num) + ':->' +\
+                                      str(len(row_key)) + '}'
                 report_width += 1 + len(row_key)
                 col_names.append(row_key.title())
         except StopIteration:
             pass
         num_fields = field_num + 1
-        totals_line_fmt = "{0:=<" + str(report_width) + "}"
+        totals_line_fmt = '{0:=<' + str(report_width) + '}'
 
         buffer += format_string.format(*col_names) + "\n"
-        buffer += dash_format_string.format(*([""] * num_fields)) + "\n"
+        buffer += dash_format_string.format(*([''] * num_fields)) + "\n"
         for row_key, value in self.__table.items():
             buffer += format_string.format(row_key, *value.values()) + "\n"
 
-        buffer += totals_line_fmt.format("") + "\n"
-        buffer += (
-            format_string.format("Totals", *self.__totals_line.values()) + "\n"
-        )
+        buffer += totals_line_fmt.format('') + "\n"
+        buffer += format_string.format('Totals',
+                                       *self.__totals_line.values()) + "\n"
 
         return buffer
 
diff --git a/lib/tftp_update_utils.py b/lib/tftp_update_utils.py
index 8e389f8..266e774 100644
--- a/lib/tftp_update_utils.py
+++ b/lib/tftp_update_utils.py
@@ -4,10 +4,11 @@
 This module contains functions for tftp update.
 """
 
-import gen_print as gp
-import state as st
 from robot.libraries.BuiltIn import BuiltIn
 
+import state as st
+import gen_print as gp
+
 
 def get_pre_reboot_state():
     r"""
@@ -17,7 +18,7 @@
 
     global state
 
-    req_states = ["epoch_seconds"] + st.default_req_states
+    req_states = ['epoch_seconds'] + st.default_req_states
 
     gp.qprint_timen("Get system state.")
     state = st.get_state(req_states=req_states, quiet=0)
@@ -48,6 +49,4 @@
 
     gp.qprintn()
     if wait_state_check:
-        st.wait_state(
-            st.standby_match_state, wait_time="10 mins", interval="10 seconds"
-        )
+        st.wait_state(st.standby_match_state, wait_time="10 mins", interval="10 seconds")
diff --git a/lib/utilities.py b/lib/utilities.py
index 96421e8..1599ffb 100755
--- a/lib/utilities.py
+++ b/lib/utilities.py
@@ -4,10 +4,9 @@
 Generic utility functions.
 """
 import imp
-import random
 import string
+import random
 import subprocess
-
 from robot.libraries.BuiltIn import BuiltIn
 from robot.utils import DotDict
 
@@ -17,12 +16,8 @@
     Return random mac address in the following format.
     Example: 00:01:6C:80:02:78
     """
-    return ":".join(
-        map(
-            lambda x: "%02x" % x,
-            (random.randint(0x00, 0xFF) for _ in range(6)),
-        )
-    )
+    return ":".join(map(lambda x: "%02x" % x, (random.randint(0x00, 0xff)
+                                               for _ in range(6))))
 
 
 def random_ip():
@@ -30,17 +25,19 @@
     Return random ip address in the following format.
     Example: 9.3.128.100
     """
-    return ".".join(map(str, (random.randint(0, 255) for _ in range(4))))
+    return ".".join(map(str, (random.randint(0, 255)
+                              for _ in range(4))))
 
 
 def get_sensor(module_name, value):
     r"""
     Return sensor matched ID name.
     """
-    m = imp.load_source("module.name", module_name)
+    m = imp.load_source('module.name', module_name)
 
-    for i in m.ID_LOOKUP["SENSOR"]:
-        if m.ID_LOOKUP["SENSOR"][i] == value:
+    for i in m.ID_LOOKUP['SENSOR']:
+
+        if m.ID_LOOKUP['SENSOR'][i] == value:
             return i
 
     return 0xFF
@@ -50,12 +47,13 @@
     r"""
     Return sensor matched ID name from inventory.
     """
-    m = imp.load_source("module.name", module_name)
+    m = imp.load_source('module.name', module_name)
 
-    value = string.replace(value, m.INVENTORY_ROOT, "<inventory_root>")
+    value = string.replace(value, m.INVENTORY_ROOT, '<inventory_root>')
 
-    for i in m.ID_LOOKUP["SENSOR"]:
-        if m.ID_LOOKUP["SENSOR"][i] == value:
+    for i in m.ID_LOOKUP['SENSOR']:
+
+        if m.ID_LOOKUP['SENSOR'][i] == value:
             return i
 
     return 0xFF
@@ -75,11 +73,11 @@
     """
 
     inventory_list = []
-    m = imp.load_source("module.name", module_name)
+    m = imp.load_source('module.name', module_name)
 
-    for i in m.ID_LOOKUP["FRU"]:
-        s = m.ID_LOOKUP["FRU"][i]
-        s = s.replace("<inventory_root>", m.INVENTORY_ROOT)
+    for i in m.ID_LOOKUP['FRU']:
+        s = m.ID_LOOKUP['FRU'][i]
+        s = s.replace('<inventory_root>', m.INVENTORY_ROOT)
         inventory_list.append(s)
 
     return inventory_list
@@ -98,11 +96,11 @@
     Return FRU URI(s) list of a given type from inventory.
     """
     inventory_list = []
-    m = imp.load_source("module.name", module_name)
+    m = imp.load_source('module.name', module_name)
 
     for i in m.FRU_INSTANCES.keys():
-        if m.FRU_INSTANCES[i]["fru_type"] == fru:
-            s = i.replace("<inventory_root>", m.INVENTORY_ROOT)
+        if m.FRU_INSTANCES[i]['fru_type'] == fru:
+            s = i.replace('<inventory_root>', m.INVENTORY_ROOT)
             inventory_list.append(s)
 
     return inventory_list
@@ -121,13 +119,13 @@
     Return VPD URI(s) list of a FRU type from inventory.
     """
     inventory_list = []
-    m = imp.load_source("module.name", module_name)
+    m = imp.load_source('module.name', module_name)
 
-    for i in m.ID_LOOKUP["FRU_STR"]:
-        x = m.ID_LOOKUP["FRU_STR"][i]
+    for i in m.ID_LOOKUP['FRU_STR']:
+        x = m.ID_LOOKUP['FRU_STR'][i]
 
-        if m.FRU_INSTANCES[x]["fru_type"] == fru:
-            s = x.replace("<inventory_root>", m.INVENTORY_ROOT)
+        if m.FRU_INSTANCES[x]['fru_type'] == fru:
+            s = x.replace('<inventory_root>', m.INVENTORY_ROOT)
             inventory_list.append(s)
 
     return inventory_list
@@ -144,7 +142,7 @@
     r"""
     Python main func call.
     """
-    print(get_vpd_inventory_list("../data/Palmetto.py", "DIMM"))
+    print(get_vpd_inventory_list('../data/Palmetto.py', 'DIMM'))
 
 
 if __name__ == "__main__":
@@ -189,19 +187,15 @@
 
     # Run the mtr command.  Exclude the header line.  Trim leading space from
     # each line.  Change all multiple spaces delims to single space delims.
-    cmd_buf = (
-        "mtr --report "
-        + host
-        + " | tail -n +2 | sed -r -e 's/^[ ]+//g' -e 's/[ ]+/ /g'"
-    )
-    sub_proc = subprocess.Popen(
-        cmd_buf, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT
-    )
+    cmd_buf = "mtr --report " + host +\
+        " | tail -n +2 | sed -r -e 's/^[ ]+//g' -e 's/[ ]+/ /g'"
+    sub_proc = subprocess.Popen(cmd_buf, shell=True, stdout=subprocess.PIPE,
+                                stderr=subprocess.STDOUT)
     out_buf, err_buf = sub_proc.communicate()
     shell_rc = sub_proc.returncode
 
     # Split the output by line.
-    rows = out_buf.rstrip("\n").split("\n")
+    rows = out_buf.rstrip('\n').split("\n")
 
     # Initialize report dictionary.
     report = DotDict()
@@ -211,16 +205,16 @@
         row_list = row.split(" ")
         # Create dictionary for the row.
         row = DotDict()
-        row["row_num"] = row_list[0].rstrip(".")
-        row["host"] = row_list[1]
-        row["loss"] = row_list[2].rstrip("%")
-        row["snt"] = row_list[3]
-        row["last"] = row_list[4]
-        row["avg"] = row_list[5]
-        row["best"] = row_list[6]
-        row["wrst"] = row_list[7]
-        row["stdev"] = row_list[8]
-        report[row["host"]] = row
+        row['row_num'] = row_list[0].rstrip('.')
+        row['host'] = row_list[1]
+        row['loss'] = row_list[2].rstrip('%')
+        row['snt'] = row_list[3]
+        row['last'] = row_list[4]
+        row['avg'] = row_list[5]
+        row['best'] = row_list[6]
+        row['wrst'] = row_list[7]
+        row['stdev'] = row_list[8]
+        report[row['host']] = row
 
     # Return the full report as dictionary of dictionaries.
     return report
@@ -302,8 +296,8 @@
     Input string      0a 01
     Return string     0x0a 0x01
     """
-    prefix_string = ""
+    prefix_string = ''
     data_list = string.strip().split(" ")
     for item in data_list:
-        prefix_string += prefix + item + " "
+        prefix_string += prefix + item + ' '
     return prefix_string.strip()
diff --git a/lib/utils.py b/lib/utils.py
index c8971b1..59d63e3 100644
--- a/lib/utils.py
+++ b/lib/utils.py
@@ -4,17 +4,15 @@
 Companion file to utils.robot.
 """
 
-import collections
-import json
 import os
-
-import bmc_ssh_utils as bsu
+import json
+import collections
 import gen_print as gp
 import gen_robot_keyword as grk
+import bmc_ssh_utils as bsu
 import var_funcs as vf
-from robot.libraries import DateTime
 from robot.libraries.BuiltIn import BuiltIn
-
+from robot.libraries import DateTime
 try:
     from robot.utils import DotDict
 except ImportError:
@@ -49,12 +47,12 @@
     """
 
     # Retrieve global variables.
-    power_policy_setup = int(
-        BuiltIn().get_variable_value("${power_policy_setup}", default=0)
-    )
-    bmc_power_policy_method = BuiltIn().get_variable_value(
-        "${bmc_power_policy_method}", default=0
-    )
+    power_policy_setup = \
+        int(BuiltIn().get_variable_value("${power_policy_setup}",
+                                         default=0))
+    bmc_power_policy_method = \
+        BuiltIn().get_variable_value("${bmc_power_policy_method}",
+                                     default=0)
     gp.dpvar(power_policy_setup)
 
     # If this function has already been run once, we need not continue.
@@ -67,27 +65,25 @@
     # determine what it should be.
     if bmc_power_policy_method == "":
         status, ret_values = grk.run_key_u("New Get Power Policy", ignore=1)
-        if status == "PASS":
-            bmc_power_policy_method = "New"
+        if status == 'PASS':
+            bmc_power_policy_method = 'New'
         else:
-            bmc_power_policy_method = "Old"
+            bmc_power_policy_method = 'Old'
 
     gp.qpvar(bmc_power_policy_method)
     # For old style, we will rewrite these global variable settings to old
     # values.
     if bmc_power_policy_method == "Old":
-        BuiltIn().set_global_variable(
-            "${RESTORE_LAST_STATE}", "RESTORE_LAST_STATE"
-        )
-        BuiltIn().set_global_variable("${ALWAYS_POWER_ON}", "ALWAYS_POWER_ON")
-        BuiltIn().set_global_variable(
-            "${ALWAYS_POWER_OFF}", "ALWAYS_POWER_OFF"
-        )
+        BuiltIn().set_global_variable("${RESTORE_LAST_STATE}",
+                                      "RESTORE_LAST_STATE")
+        BuiltIn().set_global_variable("${ALWAYS_POWER_ON}",
+                                      "ALWAYS_POWER_ON")
+        BuiltIn().set_global_variable("${ALWAYS_POWER_OFF}",
+                                      "ALWAYS_POWER_OFF")
 
     # Set global variables to control subsequent calls to this function.
-    BuiltIn().set_global_variable(
-        "${bmc_power_policy_method}", bmc_power_policy_method
-    )
+    BuiltIn().set_global_variable("${bmc_power_policy_method}",
+                                  bmc_power_policy_method)
     BuiltIn().set_global_variable("${power_policy_setup}", 1)
 
 
@@ -107,16 +103,14 @@
     method of storing the policy value.
     """
 
-    valid_power_policy_vars = BuiltIn().get_variable_value(
-        "${valid_power_policy_vars}"
-    )
+    valid_power_policy_vars = \
+        BuiltIn().get_variable_value("${valid_power_policy_vars}")
 
     if policy not in valid_power_policy_vars:
         return policy
 
-    status, ret_values = grk.run_key_u(
-        "Get Variable Value  ${" + policy + "}", quiet=1
-    )
+    status, ret_values = grk.run_key_u("Get Variable Value  ${" + policy + "}",
+                                       quiet=1)
     return ret_values
 
 
@@ -138,7 +132,7 @@
       [rtc_in_local_tz]:          no
     """
 
-    out_buf, stderr, rc = bsu.bmc_execute_command("timedatectl")
+    out_buf, stderr, rc = bsu.bmc_execute_command('timedatectl')
     # Example of output returned by call to timedatectl:
     #       Local time: Fri 2017-11-03 15:27:56 UTC
     #   Universal time: Fri 2017-11-03 15:27:56 UTC
@@ -163,41 +157,40 @@
         result_time_dict[key] = value
         if not key.endswith("_time"):
             continue
-        result_time_dict[key + "_seconds"] = int(
-            DateTime.convert_date(value, result_format="epoch")
-        )
+        result_time_dict[key + '_seconds'] = \
+            int(DateTime.convert_date(value, result_format='epoch'))
 
     return result_time_dict
 
 
 def get_bmc_df(df_parm_string=""):
     r"""
-        Get df report from BMC and return as a report "object".
+    Get df report from BMC and return as a report "object".
 
-        A df report object is a list where each entry is a dictionary whose keys
-        are the field names from the first entry in report_list.
+    A df report object is a list where each entry is a dictionary whose keys
+    are the field names from the first entry in report_list.
 
-        Example df report object:
+    Example df report object:
 
-        df_report:
-          df_report[0]:
-            [filesystem]:    dev
-            [1k-blocks]:     247120
-            [used]:          0
-            [available]:     247120
-            [use%]:          0%
-            [mounted]:       /dev
-          df_report[1]:
-            [filesystem]:    dev
-            [1k-blocks]:     247120
-            [used]:          0
-            [available]:     247120
-            [use%]:          0%
-            [mounted]:       /dev
+    df_report:
+      df_report[0]:
+        [filesystem]:    dev
+        [1k-blocks]:     247120
+        [used]:          0
+        [available]:     247120
+        [use%]:          0%
+        [mounted]:       /dev
+      df_report[1]:
+        [filesystem]:    dev
+        [1k-blocks]:     247120
+        [used]:          0
+        [available]:     247120
+        [use%]:          0%
+        [mounted]:       /dev
 
-    .   Description of argument(s):
-        df_parm_string  A string containing valid df command parms (e.g.
-                        "-h /var").
+.   Description of argument(s):
+    df_parm_string  A string containing valid df command parms (e.g.
+                    "-h /var").
     """
 
     out_buf, stderr, rc = bsu.bmc_execute_command("df " + df_parm_string)
@@ -217,11 +210,11 @@
 
 def compare_mac_address(sys_mac_addr, user_mac_addr):
     r"""
-        Return 1 if the MAC value matched, otherwise 0.
+    Return 1 if the MAC value matched, otherwise 0.
 
-    .   Description of argument(s):
-        sys_mac_addr   A valid system MAC string (e.g. "70:e2:84:14:2a:08")
-        user_mac_addr  A user provided MAC string (e.g. "70:e2:84:14:2a:08")
+.   Description of argument(s):
+    sys_mac_addr   A valid system MAC string (e.g. "70:e2:84:14:2a:08")
+    user_mac_addr  A user provided MAC string (e.g. "70:e2:84:14:2a:08")
     """
 
     index = 0
@@ -302,11 +295,8 @@
 
     # Using sed and tail to massage the data a bit before running
     # key_value_outbuf_to_dict.
-    cmd_buf = (
-        "ethtool "
-        + interface_name
-        + " | sed -re 's/(.* link modes:)(.*)/\\1\\n\\2/g' | tail -n +2"
-    )
+    cmd_buf = "ethtool " + interface_name +\
+        " | sed -re 's/(.* link modes:)(.*)/\\1\\n\\2/g' | tail -n +2"
     stdout, stderr, rc = bsu.os_execute_command(cmd_buf)
     result = vf.key_value_outbuf_to_dict(stdout, process_indent=1, strip=" \t")
 
@@ -348,7 +338,7 @@
       [openbmc_target_machine]:       witherspoon
     """
 
-    out_buf, stderr, rc = bsu.bmc_execute_command("cat /etc/os-release")
+    out_buf, stderr, rc = bsu.bmc_execute_command('cat /etc/os-release')
     return vf.key_value_outbuf_to_dict(out_buf, delim="=", strip='"')
 
 
@@ -381,7 +371,7 @@
       [redhat_support_product_version]:               7.6
     """
 
-    out_buf, stderr, rc = bsu.os_execute_command("cat /etc/os-release")
+    out_buf, stderr, rc = bsu.os_execute_command('cat /etc/os-release')
     return vf.key_value_outbuf_to_dict(out_buf, delim="=", strip='"')
 
 
@@ -396,12 +386,10 @@
     """
 
     # Default print_out to 1.
-    if "print_out" not in bsu_options:
-        bsu_options["print_out"] = 1
+    if 'print_out' not in bsu_options:
+        bsu_options['print_out'] = 1
 
-    stdout, stderr, rc = bsu.bmc_execute_command(
-        "pdbg " + option_string, **bsu_options
-    )
+    stdout, stderr, rc = bsu.bmc_execute_command('pdbg ' + option_string, **bsu_options)
     return stdout
 
 
@@ -418,8 +406,8 @@
     """
 
     # Default print_out to 1.
-    if "print_out" not in bsu_options:
-        bsu_options["print_out"] = 1
+    if 'print_out' not in bsu_options:
+        bsu_options['print_out'] = 1
 
     stdout, stderr, rc = bsu.bmc_execute_command(option_string, **bsu_options)
     return stdout
@@ -435,7 +423,7 @@
     """
 
     n = int(n)
-    data = [stri[index : index + n] for index in range(0, len(stri), n)]
+    data = [stri[index: index + n] for index in range(0, len(stri), n)]
     return data
 
 
@@ -481,6 +469,6 @@
     returns decoded string of encoded byte.
     """
 
-    encoded_string = input.encode("ascii", "ignore")
+    encoded_string = input.encode('ascii', 'ignore')
     decoded_string = encoded_string.decode()
     return decoded_string
diff --git a/lib/utils_files.py b/lib/utils_files.py
index 70e0887..0b19432 100755
--- a/lib/utils_files.py
+++ b/lib/utils_files.py
@@ -4,21 +4,22 @@
 This module contains file functions such as file_diff.
 """
 
+import time
 import os
 import re
-import time
-
 from gen_cmd import cmd_fnc_u
-
 robot_env = 1
 try:
-    from robot.libraries import DateTime
     from robot.libraries.BuiltIn import BuiltIn
+    from robot.libraries import DateTime
 except ImportError:
     robot_env = 0
 
 
-def file_diff(file1_path, file2_path, diff_file_path, skip_string):
+def file_diff(file1_path,
+              file2_path,
+              diff_file_path,
+              skip_string):
     r"""
     Compare the contents of two text files.  The comparison uses the Unix
     'diff' command.  Differences can be selectively ignored by use of
@@ -58,12 +59,12 @@
 
     now = time.strftime("%Y-%m-%d %H:%M:%S")
 
-    if not os.path.exists(file1_path) or (not os.path.exists(file2_path)):
+    if (not os.path.exists(file1_path) or (not os.path.exists(file2_path))):
         return INPUT_FILE_DOES_NOT_EXIST
     try:
-        with open(file1_path, "r") as file:
+        with open(file1_path, 'r') as file:
             initial = file.readlines()
-        with open(file2_path, "r") as file:
+        with open(file2_path, 'r') as file:
             final = file.readlines()
     except IOError:
         file.close()
@@ -78,30 +79,24 @@
     if len(initial) < min_file_byte_size:
         return INPUT_FILE_MALFORMED
 
-    if initial == final:
+    if (initial == final):
         try:
-            file = open(diff_file_path, "w")
+            file = open(diff_file_path, 'w')
         except IOError:
             file.close()
-        line_to_print = (
-            "Specified skip (ignore) string = " + skip_string + "\n\n"
-        )
+        line_to_print = "Specified skip (ignore) string = " + \
+            skip_string + "\n\n"
         file.write(line_to_print)
-        line_to_print = (
-            now
-            + " found no difference between file "
-            + file1_path
-            + " and "
-            + file2_path
-            + "\n"
-        )
+        line_to_print = now + " found no difference between file " + \
+            file1_path + " and " + \
+            file2_path + "\n"
         file.write(line_to_print)
         file.close()
         return FILES_MATCH
 
     # Find the differences and write difference report to diff_file_path file
     try:
-        file = open(diff_file_path, "w")
+        file = open(diff_file_path, 'w')
     except IOError:
         file.close()
         return IO_EXCEPTION_WRITING_FILE
@@ -110,10 +105,9 @@
     # if skip_string="size,capacity",  command = 'diff  -I "size"
     # -I "capacity"  file1_path file2_path'.
     skip_list = filter(None, re.split(r"[ ]*,[ ]*", skip_string))
-    ignore_string = " ".join([("-I " + '"' + x + '"') for x in skip_list])
-    command = " ".join(
-        filter(None, ["diff", ignore_string, file1_path, file2_path])
-    )
+    ignore_string = ' '.join([("-I " + '"' + x + '"') for x in skip_list])
+    command = ' '.join(filter(None, ["diff", ignore_string, file1_path,
+                                     file2_path]))
 
     line_to_print = now + "   " + command + "\n"
     file.write(line_to_print)
diff --git a/lib/var_funcs.py b/lib/var_funcs.py
index 0009b54..fdde68e 100644
--- a/lib/var_funcs.py
+++ b/lib/var_funcs.py
@@ -14,9 +14,9 @@
 
 import collections
 
-import func_args as fa
-import gen_misc as gm
 import gen_print as gp
+import gen_misc as gm
+import func_args as fa
 
 
 def create_var_dict(*args):
@@ -57,15 +57,13 @@
     return result_dict
 
 
-default_record_delim = ":"
-default_key_val_delim = "."
+default_record_delim = ':'
+default_key_val_delim = '.'
 
 
-def join_dict(
-    dict,
-    record_delim=default_record_delim,
-    key_val_delim=default_key_val_delim,
-):
+def join_dict(dict,
+              record_delim=default_record_delim,
+              key_val_delim=default_key_val_delim):
     r"""
     Join a dictionary's keys and values into a string and return the string.
 
@@ -89,17 +87,14 @@
     str1:                                             first_name.Steve:last_name.Smith
     """
 
-    format_str = "%s" + key_val_delim + "%s"
-    return record_delim.join(
-        [format_str % (key, value) for (key, value) in dict.items()]
-    )
+    format_str = '%s' + key_val_delim + '%s'
+    return record_delim.join([format_str % (key, value) for (key, value) in
+                              dict.items()])
 
 
-def split_to_dict(
-    string,
-    record_delim=default_record_delim,
-    key_val_delim=default_key_val_delim,
-):
+def split_to_dict(string,
+                  record_delim=default_record_delim,
+                  key_val_delim=default_key_val_delim):
     r"""
     Split a string into a dictionary and return it.
 
@@ -141,7 +136,9 @@
     return result_dict
 
 
-def create_file_path(file_name_dict, dir_path="/tmp/", file_suffix=""):
+def create_file_path(file_name_dict,
+                     dir_path="/tmp/",
+                     file_suffix=""):
     r"""
     Create a file path using the given parameters and return it.
 
@@ -190,14 +187,18 @@
     dir_path = os.path.dirname(file_path) + os.sep
     file_path = os.path.basename(file_path)
 
-    result_dict["dir_path"] = dir_path
+    result_dict['dir_path'] = dir_path
 
     result_dict.update(split_to_dict(file_path))
 
     return result_dict
 
 
-def parse_key_value(string, delim=":", strip=" ", to_lower=1, underscores=1):
+def parse_key_value(string,
+                    delim=":",
+                    strip=" ",
+                    to_lower=1,
+                    underscores=1):
     r"""
     Parse a key/value string and return as a key/value tuple.
 
@@ -251,7 +252,9 @@
     return key, value
 
 
-def key_value_list_to_dict(key_value_list, process_indent=0, **args):
+def key_value_list_to_dict(key_value_list,
+                           process_indent=0,
+                           **args):
     r"""
     Convert a list containing key/value strings or tuples to a dictionary and return it.
 
@@ -368,9 +371,8 @@
         if len(sub_list) > 0:
             if any(delim in word for word in sub_list):
                 # If delim is found anywhere in the sub_list, we'll process as a sub-dictionary.
-                result_dict[parent_key] = key_value_list_to_dict(
-                    sub_list, **args
-                )
+                result_dict[parent_key] = key_value_list_to_dict(sub_list,
+                                                                 **args)
             else:
                 result_dict[parent_key] = list(map(str.strip, sub_list))
             del sub_list[:]
@@ -392,7 +394,8 @@
     return result_dict
 
 
-def key_value_outbuf_to_dict(out_buf, **args):
+def key_value_outbuf_to_dict(out_buf,
+                             **args):
     r"""
     Convert a buffer with a key/value string on each line to a dictionary and return it.
 
@@ -435,7 +438,8 @@
     return key_value_list_to_dict(key_var_list, **args)
 
 
-def key_value_outbuf_to_dicts(out_buf, **args):
+def key_value_outbuf_to_dicts(out_buf,
+                              **args):
     r"""
     Convert a buffer containing multiple sections with key/value strings on each line to a list of
     dictionaries and return it.
@@ -503,13 +507,11 @@
     **args                          Arguments to be interpreted by parse_key_value.  (See docstring of
                                     parse_key_value function for details).
     """
-    return [
-        key_value_outbuf_to_dict(x, **args)
-        for x in re.split("\n[\n]+", out_buf)
-    ]
+    return [key_value_outbuf_to_dict(x, **args) for x in re.split('\n[\n]+', out_buf)]
 
 
 def create_field_desc_regex(line):
+
     r"""
     Create a field descriptor regular expression based on the input line and return it.
 
@@ -565,12 +567,14 @@
             regexes.append("(.{" + str(len(descriptor)) + "})")
 
     # Join the regexes list into a regex string.
-    field_desc_regex = " ".join(regexes)
+    field_desc_regex = ' '.join(regexes)
 
     return field_desc_regex
 
 
-def list_to_report(report_list, to_lower=1, field_delim=None):
+def list_to_report(report_list,
+                   to_lower=1,
+                   field_delim=None):
     r"""
     Convert a list containing report text lines to a report "object" and return it.
 
@@ -656,9 +660,8 @@
     else:
         # Pad the line with spaces on the right to facilitate processing with field_desc_regex.
         header_line = pad_format_string % header_line
-        columns = list(
-            map(str.strip, re.findall(field_desc_regex, header_line)[0])
-        )
+        columns = list(map(str.strip,
+                           re.findall(field_desc_regex, header_line)[0]))
 
     report_obj = []
     for report_line in report_list[1:]:
@@ -667,9 +670,8 @@
         else:
             # Pad the line with spaces on the right to facilitate processing with field_desc_regex.
             report_line = pad_format_string % report_line
-            line = list(
-                map(str.strip, re.findall(field_desc_regex, report_line)[0])
-            )
+            line = list(map(str.strip,
+                            re.findall(field_desc_regex, report_line)[0]))
         try:
             line_dict = collections.OrderedDict(zip(columns, line))
         except AttributeError:
@@ -679,7 +681,8 @@
     return report_obj
 
 
-def outbuf_to_report(out_buf, **args):
+def outbuf_to_report(out_buf,
+                     **args):
     r"""
     Convert a text buffer containing report lines to a report "object" and return it.
 
@@ -822,11 +825,8 @@
             if len(struct_key_values) == 0:
                 return False
             if regex:
-                matches = [
-                    x
-                    for x in struct_key_values
-                    if re.search(match_value, str(x))
-                ]
+                matches = [x for x in struct_key_values
+                           if re.search(match_value, str(x))]
                 if not matches:
                     return False
             elif match_value not in struct_key_values:
diff --git a/lib/var_stack.py b/lib/var_stack.py
index 77cf4a0..3ea3813 100644
--- a/lib/var_stack.py
+++ b/lib/var_stack.py
@@ -4,9 +4,9 @@
 Define the var_stack class.
 """
 
+import sys
 import collections
 import copy
-import sys
 
 try:
     from robot.utils import DotDict
@@ -17,6 +17,7 @@
 
 
 class var_stack:
+
     r"""
     Define the variable stack class.
 
@@ -64,7 +65,8 @@
           [var1][0]:  mike
     """
 
-    def __init__(self, obj_name="var_stack"):
+    def __init__(self,
+                 obj_name='var_stack'):
         r"""
         Initialize a new object of this class type.
 
@@ -88,7 +90,7 @@
 
         buffer += self.__obj_name + ":\n"
         indent = 2
-        buffer += gp.sprint_varx("stack_dict", self.__stack_dict, indent)
+        buffer += gp.sprint_varx('stack_dict', self.__stack_dict, indent)
 
         return buffer
 
@@ -99,7 +101,9 @@
 
         sys.stdout.write(self.sprint_obj())
 
-    def push(self, var_value, var_name=""):
+    def push(self,
+             var_value,
+             var_name=""):
         r"""
         push the var_name/var_value pair onto the stack.
 
@@ -120,7 +124,8 @@
         else:
             self.__stack_dict[var_name] = copy.deepcopy([var_value])
 
-    def pop(self, var_name=""):
+    def pop(self,
+            var_name=""):
         r"""
         Pop the value for the given var_name from the stack and return it.
 
diff --git a/lib/vpd_utils.py b/lib/vpd_utils.py
index 754d255..03581bb 100644
--- a/lib/vpd_utils.py
+++ b/lib/vpd_utils.py
@@ -5,9 +5,8 @@
 """
 
 import json
-
-import bmc_ssh_utils as bsu
 import func_args as fa
+import bmc_ssh_utils as bsu
 
 
 def vpdtool(option_string, **bsu_options):
@@ -45,14 +44,12 @@
     """
 
     bsu_options = fa.args_to_objects(bsu_options)
-    out_buf, stderr, rc = bsu.bmc_execute_command(
-        "vpd-tool " + option_string, **bsu_options
-    )
+    out_buf, stderr, rc = bsu.bmc_execute_command('vpd-tool ' + option_string, **bsu_options)
 
     # Only return output if its not a VPD write command.
-    if "-w" not in option_string:
+    if '-w' not in option_string:
         out_buf = json.loads(out_buf)
-        if "-r" in option_string:
+        if '-r' in option_string:
             return out_buf
         else:
             return out_buf[0]
diff --git a/lib/wrap_utils.py b/lib/wrap_utils.py
index ebf4f61..231dff8 100755
--- a/lib/wrap_utils.py
+++ b/lib/wrap_utils.py
@@ -7,9 +7,10 @@
 """
 
 
-def create_func_def_string(
-    base_func_name, wrap_func_name, func_body_template, replace_dict
-):
+def create_func_def_string(base_func_name,
+                           wrap_func_name,
+                           func_body_template,
+                           replace_dict):
     r"""
     Create and return a complete function definition as a string.  The caller may run "exec" on the resulting
     string to create the desired function.
@@ -40,10 +41,10 @@
     func_def.insert(0, func_def_line)
     # Make sure the replace_dict has a 'call_line'/call_line pair so that any '<call_line>' text gets
     # replaced as intended.
-    replace_dict["call_line"] = call_line
+    replace_dict['call_line'] = call_line
 
     # Do the replacements.
     for key, value in replace_dict.items():
         func_def = [w.replace("<" + key + ">", value) for w in func_def]
 
-    return "\n".join(func_def) + "\n"
+    return '\n'.join(func_def) + "\n"