black: re-format

black and isort are enabled in the openbmc-build-scripts on Python files
to have a consistent formatting.  Re-run the formatter on the whole
repository.

Signed-off-by: Patrick Williams <patrick@stwcx.xyz>
Change-Id: I944f1915ece753f72a3fa654902d445a9749d0f9
diff --git a/ffdc/collect_ffdc.py b/ffdc/collect_ffdc.py
index d709a96..ef96c6b 100644
--- a/ffdc/collect_ffdc.py
+++ b/ffdc/collect_ffdc.py
@@ -6,12 +6,13 @@
 
 import os
 import sys
+
 import click
 
 # ---------Set sys.path for cli command execution---------------------------------------
 # Absolute path to openbmc-test-automation/ffdc
 abs_path = os.path.abspath(os.path.dirname(sys.argv[0]))
-full_path = abs_path.split('ffdc')[0]
+full_path = abs_path.split("ffdc")[0]
 sys.path.append(full_path)
 # Walk path and append to sys.path
 for root, dirs, files in os.walk(full_path):
@@ -21,63 +22,105 @@
 from ffdc_collector import ffdc_collector  # NOQA
 
 
-@click.command(context_settings=dict(help_option_names=['-h', '--help']))
-@click.option('-r', '--remote',
-              help="Hostname/IP of the remote host")
-@click.option('-u', '--username',
-              help="Username of the remote host.")
-@click.option('-p', '--password',
-              help="Password of the remote host.")
-@click.option('-c', '--config', default=abs_path + "/ffdc_config.yaml",
-              show_default=True, help="YAML Configuration file for log collection.")
-@click.option('-l', '--location', default="/tmp",
-              show_default=True, help="Location to save logs")
-@click.option('-t', '--type',
-              help="OS type of the remote (targeting) host. OPENBMC, RHEL, UBUNTU, SLES, AIX")
-@click.option('-rp', '--protocol', default="ALL",
-              show_default=True,
-              help="Select protocol to communicate with remote host.")
-@click.option('-e', '--env_vars', show_default=True,
-              help="Environment variables e.g: {'var':value}")
-@click.option('-ec', '--econfig', show_default=True,
-              help="Predefine environment variables, refer en_vars_template.yaml ")
-@click.option('--log_level', default="INFO",
-              show_default=True,
-              help="Log level (CRITICAL, ERROR, WARNING, INFO, DEBUG)")
-def cli_ffdc(remote,
-             username,
-             password,
-             config,
-             location,
-             type,
-             protocol,
-             env_vars,
-             econfig,
-             log_level):
+@click.command(context_settings=dict(help_option_names=["-h", "--help"]))
+@click.option("-r", "--remote", help="Hostname/IP of the remote host")
+@click.option("-u", "--username", help="Username of the remote host.")
+@click.option("-p", "--password", help="Password of the remote host.")
+@click.option(
+    "-c",
+    "--config",
+    default=abs_path + "/ffdc_config.yaml",
+    show_default=True,
+    help="YAML Configuration file for log collection.",
+)
+@click.option(
+    "-l",
+    "--location",
+    default="/tmp",
+    show_default=True,
+    help="Location to save logs",
+)
+@click.option(
+    "-t",
+    "--type",
+    help=(
+        "OS type of the remote (targeting) host. OPENBMC, RHEL, UBUNTU,"
+        " SLES, AIX"
+    ),
+)
+@click.option(
+    "-rp",
+    "--protocol",
+    default="ALL",
+    show_default=True,
+    help="Select protocol to communicate with remote host.",
+)
+@click.option(
+    "-e",
+    "--env_vars",
+    show_default=True,
+    help="Environment variables e.g: {'var':value}",
+)
+@click.option(
+    "-ec",
+    "--econfig",
+    show_default=True,
+    help="Predefine environment variables, refer en_vars_template.yaml ",
+)
+@click.option(
+    "--log_level",
+    default="INFO",
+    show_default=True,
+    help="Log level (CRITICAL, ERROR, WARNING, INFO, DEBUG)",
+)
+def cli_ffdc(
+    remote,
+    username,
+    password,
+    config,
+    location,
+    type,
+    protocol,
+    env_vars,
+    econfig,
+    log_level,
+):
     r"""
     Stand alone CLI to generate and collect FFDC from the selected target.
     """
 
-    click.echo("\n********** FFDC (First Failure Data Collection) Starts **********")
+    click.echo(
+        "\n********** FFDC (First Failure Data Collection) Starts **********"
+    )
 
     if input_options_ok(remote, username, password, config, type):
-        this_ffdc = ffdc_collector(remote,
-                                   username,
-                                   password,
-                                   config,
-                                   location,
-                                   type,
-                                   protocol,
-                                   env_vars,
-                                   econfig,
-                                   log_level)
+        this_ffdc = ffdc_collector(
+            remote,
+            username,
+            password,
+            config,
+            location,
+            type,
+            protocol,
+            env_vars,
+            econfig,
+            log_level,
+        )
         this_ffdc.collect_ffdc()
 
         if len(os.listdir(this_ffdc.ffdc_dir_path)) == 0:
-            click.echo("\n\tFFDC Collection from " + remote + " has failed.\n\n")
+            click.echo(
+                "\n\tFFDC Collection from " + remote + " has failed.\n\n"
+            )
         else:
-            click.echo(str("\n\t" + str(len(os.listdir(this_ffdc.ffdc_dir_path)))
-                           + " files were retrieved from " + remote))
+            click.echo(
+                str(
+                    "\n\t"
+                    + str(len(os.listdir(this_ffdc.ffdc_dir_path)))
+                    + " files were retrieved from "
+                    + remote
+                )
+            )
             click.echo("\tFiles are stored in " + this_ffdc.ffdc_dir_path)
 
         click.echo("\tTotal elapsed time " + this_ffdc.elapsed_time + "\n\n")
@@ -93,27 +136,37 @@
 
     if not remote:
         all_options_ok = False
-        print("\
-        \n\tERROR: Name/IP of the remote host is not specified in CLI options.")
+        print(
+            "        \n\tERROR: Name/IP of the remote host is not specified in"
+            " CLI options."
+        )
     if not username:
         all_options_ok = False
-        print("\
-        \n\tERROR: User of the remote host is not specified in CLI options.")
+        print(
+            "        \n\tERROR: User of the remote host is not specified in"
+            " CLI options."
+        )
     if not password:
         all_options_ok = False
-        print("\
-        \n\tERROR: Password of the user remote host is not specified in CLI options.")
+        print(
+            "        \n\tERROR: Password of the user remote host is not"
+            " specified in CLI options."
+        )
     if not type:
         all_options_ok = False
-        print("\
-        \n\tERROR: Remote host os type is not specified in CLI options.")
+        print(
+            "        \n\tERROR: Remote host os type is not specified in CLI"
+            " options."
+        )
     if not os.path.isfile(config):
         all_options_ok = False
-        print("\
-        \n\tERROR: Config file %s is not found.  Please verify path and filename." % config)
+        print(
+            "        \n\tERROR: Config file %s is not found.  Please verify"
+            " path and filename." % config
+        )
 
     return all_options_ok
 
 
-if __name__ == '__main__':
+if __name__ == "__main__":
     cli_ffdc()
diff --git a/ffdc/ffdc_collector.py b/ffdc/ffdc_collector.py
index 91fe261..b21044c 100644
--- a/ffdc/ffdc_collector.py
+++ b/ffdc/ffdc_collector.py
@@ -4,18 +4,17 @@
 See class prolog below for details.
 """
 
+import json
+import logging
+import os
+import platform
+import re
+import subprocess
+import sys
+import time
 from errno import EACCES, EPERM
 
-import os
-import re
-import sys
 import yaml
-import json
-import time
-import logging
-import platform
-from errno import EACCES, EPERM
-import subprocess
 
 script_dir = os.path.dirname(os.path.abspath(__file__))
 sys.path.append(script_dir)
@@ -24,8 +23,8 @@
     for dir in dirs:
         sys.path.append(os.path.join(root, dir))
 
-from ssh_utility import SSHRemoteclient         # NOQA
-from telnet_utility import TelnetRemoteclient   # NOQA
+from ssh_utility import SSHRemoteclient  # NOQA
+from telnet_utility import TelnetRemoteclient  # NOQA
 
 r"""
 User define plugins python functions.
@@ -43,11 +42,11 @@
        - arg1
        - arg2
 """
-plugin_dir = __file__.split(__file__.split("/")[-1])[0] + '/plugins'
+plugin_dir = __file__.split(__file__.split("/")[-1])[0] + "/plugins"
 sys.path.append(plugin_dir)
 try:
     for module in os.listdir(plugin_dir):
-        if module == '__init__.py' or module[-3:] != '.py':
+        if module == "__init__.py" or module[-3:] != ".py":
             continue
         plugin_module = "plugins." + module[:-3]
         # To access the module plugin.<module name>.<function>
@@ -99,34 +98,35 @@
 global_plugin_type_list = []
 
 # Path where logs are to be stored or written.
-global_log_store_path = ''
+global_log_store_path = ""
 
 # Plugin error state defaults.
 plugin_error_dict = {
-    'exit_on_error': False,
-    'continue_on_error': False,
+    "exit_on_error": False,
+    "continue_on_error": False,
 }
 
 
 class ffdc_collector:
-
     r"""
     Execute commands from configuration file to collect log files.
     Fetch and store generated files at the specified location.
 
     """
 
-    def __init__(self,
-                 hostname,
-                 username,
-                 password,
-                 ffdc_config,
-                 location,
-                 remote_type,
-                 remote_protocol,
-                 env_vars,
-                 econfig,
-                 log_level):
+    def __init__(
+        self,
+        hostname,
+        username,
+        password,
+        ffdc_config,
+        location,
+        remote_type,
+        remote_protocol,
+        env_vars,
+        econfig,
+        log_level,
+    ):
         r"""
         Description of argument(s):
 
@@ -156,7 +156,7 @@
         self.env_vars = env_vars
         self.econfig = econfig
         self.start_time = 0
-        self.elapsed_time = ''
+        self.elapsed_time = ""
         self.logger = None
 
         # Set prefix values for scp files and directory.
@@ -174,7 +174,7 @@
 
         if self.verify_script_env():
             # Load default or user define YAML configuration file.
-            with open(self.ffdc_config, 'r') as file:
+            with open(self.ffdc_config, "r") as file:
                 try:
                     self.ffdc_actions = yaml.load(file, Loader=yaml.SafeLoader)
                 except yaml.YAMLError as e:
@@ -183,7 +183,9 @@
 
             if self.target_type not in self.ffdc_actions.keys():
                 self.logger.error(
-                    "\n\tERROR: %s is not listed in %s.\n\n" % (self.target_type, self.ffdc_config))
+                    "\n\tERROR: %s is not listed in %s.\n\n"
+                    % (self.target_type, self.ffdc_config)
+                )
                 sys.exit(-1)
         else:
             sys.exit(-1)
@@ -194,43 +196,62 @@
         self.load_env()
 
     def verify_script_env(self):
-
         # Import to log version
         import click
         import paramiko
 
         run_env_ok = True
 
-        redfishtool_version = self.run_tool_cmd('redfishtool -V').split(' ')[2].strip('\n')
-        ipmitool_version = self.run_tool_cmd('ipmitool -V').split(' ')[2]
+        redfishtool_version = (
+            self.run_tool_cmd("redfishtool -V").split(" ")[2].strip("\n")
+        )
+        ipmitool_version = self.run_tool_cmd("ipmitool -V").split(" ")[2]
 
         self.logger.info("\n\t---- Script host environment ----")
-        self.logger.info("\t{:<10}  {:<10}".format('Script hostname', os.uname()[1]))
-        self.logger.info("\t{:<10}  {:<10}".format('Script host os', platform.platform()))
-        self.logger.info("\t{:<10}  {:>10}".format('Python', platform.python_version()))
-        self.logger.info("\t{:<10}  {:>10}".format('PyYAML', yaml.__version__))
-        self.logger.info("\t{:<10}  {:>10}".format('click', click.__version__))
-        self.logger.info("\t{:<10}  {:>10}".format('paramiko', paramiko.__version__))
-        self.logger.info("\t{:<10}  {:>9}".format('redfishtool', redfishtool_version))
-        self.logger.info("\t{:<10}  {:>12}".format('ipmitool', ipmitool_version))
+        self.logger.info(
+            "\t{:<10}  {:<10}".format("Script hostname", os.uname()[1])
+        )
+        self.logger.info(
+            "\t{:<10}  {:<10}".format("Script host os", platform.platform())
+        )
+        self.logger.info(
+            "\t{:<10}  {:>10}".format("Python", platform.python_version())
+        )
+        self.logger.info("\t{:<10}  {:>10}".format("PyYAML", yaml.__version__))
+        self.logger.info("\t{:<10}  {:>10}".format("click", click.__version__))
+        self.logger.info(
+            "\t{:<10}  {:>10}".format("paramiko", paramiko.__version__)
+        )
+        self.logger.info(
+            "\t{:<10}  {:>9}".format("redfishtool", redfishtool_version)
+        )
+        self.logger.info(
+            "\t{:<10}  {:>12}".format("ipmitool", ipmitool_version)
+        )
 
-        if eval(yaml.__version__.replace('.', ',')) < (5, 3, 0):
-            self.logger.error("\n\tERROR: Python or python packages do not meet minimum version requirement.")
-            self.logger.error("\tERROR: PyYAML version 5.3.0 or higher is needed.\n")
+        if eval(yaml.__version__.replace(".", ",")) < (5, 3, 0):
+            self.logger.error(
+                "\n\tERROR: Python or python packages do not meet minimum"
+                " version requirement."
+            )
+            self.logger.error(
+                "\tERROR: PyYAML version 5.3.0 or higher is needed.\n"
+            )
             run_env_ok = False
 
         self.logger.info("\t---- End script host environment ----")
         return run_env_ok
 
-    def script_logging(self,
-                       log_level_attr):
+    def script_logging(self, log_level_attr):
         r"""
         Create logger
 
         """
         self.logger = logging.getLogger()
         self.logger.setLevel(log_level_attr)
-        log_file_handler = logging.FileHandler(self.ffdc_dir_path + "collector.log")
+        log_file_handler = logging.FileHandler(
+            self.ffdc_dir_path + "collector.log"
+        )
 
         stdout_handler = logging.StreamHandler(sys.stdout)
         self.logger.addHandler(log_file_handler)
@@ -246,11 +267,15 @@
         """
         response = os.system("ping -c 1 %s  2>&1 >/dev/null" % self.hostname)
         if response == 0:
-            self.logger.info("\n\t[Check] %s is ping-able.\t\t [OK]" % self.hostname)
+            self.logger.info(
+                "\n\t[Check] %s is ping-able.\t\t [OK]" % self.hostname
+            )
             return True
         else:
             self.logger.error(
-                "\n\tERROR: %s is not ping-able. FFDC collection aborted.\n" % self.hostname)
+                "\n\tERROR: %s is not ping-able. FFDC collection aborted.\n"
+                % self.hostname
+            )
             sys.exit(-1)
 
     def collect_ffdc(self):
@@ -259,7 +284,9 @@
 
         """
 
-        self.logger.info("\n\t---- Start communicating with %s ----" % self.hostname)
+        self.logger.info(
+            "\n\t---- Start communicating with %s ----" % self.hostname
+        )
         self.start_time = time.time()
 
         # Find the list of target and protocol supported.
@@ -271,24 +298,40 @@
                 continue
 
             for k, v in config_dict[target_type].items():
-                if config_dict[target_type][k]['PROTOCOL'][0] not in check_protocol_list:
-                    check_protocol_list.append(config_dict[target_type][k]['PROTOCOL'][0])
+                if (
+                    config_dict[target_type][k]["PROTOCOL"][0]
+                    not in check_protocol_list
+                ):
+                    check_protocol_list.append(
+                        config_dict[target_type][k]["PROTOCOL"][0]
+                    )
 
-        self.logger.info("\n\t %s protocol type: %s" % (self.target_type, check_protocol_list))
+        self.logger.info(
+            "\n\t %s protocol type: %s"
+            % (self.target_type, check_protocol_list)
+        )
 
         verified_working_protocol = self.verify_protocol(check_protocol_list)
 
         if verified_working_protocol:
-            self.logger.info("\n\t---- Completed protocol pre-requisite check ----\n")
+            self.logger.info(
+                "\n\t---- Completed protocol pre-requisite check ----\n"
+            )
 
         # Verify top level directory exists for storage
         self.validate_local_store(self.location)
 
-        if ((self.remote_protocol not in verified_working_protocol) and (self.remote_protocol != 'ALL')):
-            self.logger.info("\n\tWorking protocol list: %s" % verified_working_protocol)
+        if (self.remote_protocol not in verified_working_protocol) and (
+            self.remote_protocol != "ALL"
+        ):
+            self.logger.info(
+                "\n\tWorking protocol list: %s" % verified_working_protocol
+            )
             self.logger.error(
-                '\tERROR: Requested protocol %s is not in working protocol list.\n'
-                % self.remote_protocol)
+                "\tERROR: Requested protocol %s is not in working protocol"
+                " list.\n"
+                % self.remote_protocol
+            )
             sys.exit(-1)
         else:
             self.generate_ffdc(verified_working_protocol)
@@ -299,12 +342,15 @@
 
         """
 
-        self.ssh_remoteclient = SSHRemoteclient(self.hostname,
-                                                self.username,
-                                                self.password)
+        self.ssh_remoteclient = SSHRemoteclient(
+            self.hostname, self.username, self.password
+        )
 
         if self.ssh_remoteclient.ssh_remoteclient_login():
-            self.logger.info("\n\t[Check] %s SSH connection established.\t [OK]" % self.hostname)
+            self.logger.info(
+                "\n\t[Check] %s SSH connection established.\t [OK]"
+                % self.hostname
+            )
 
             # Check scp connection.
             # If scp connection fails,
@@ -312,21 +358,30 @@
             self.ssh_remoteclient.scp_connection()
             return True
         else:
-            self.logger.info("\n\t[Check] %s SSH connection.\t [NOT AVAILABLE]" % self.hostname)
+            self.logger.info(
+                "\n\t[Check] %s SSH connection.\t [NOT AVAILABLE]"
+                % self.hostname
+            )
             return False
 
     def telnet_to_target_system(self):
         r"""
         Open a telnet connection to targeted system.
         """
-        self.telnet_remoteclient = TelnetRemoteclient(self.hostname,
-                                                      self.username,
-                                                      self.password)
+        self.telnet_remoteclient = TelnetRemoteclient(
+            self.hostname, self.username, self.password
+        )
         if self.telnet_remoteclient.tn_remoteclient_login():
-            self.logger.info("\n\t[Check] %s Telnet connection established.\t [OK]" % self.hostname)
+            self.logger.info(
+                "\n\t[Check] %s Telnet connection established.\t [OK]"
+                % self.hostname
+            )
             return True
         else:
-            self.logger.info("\n\t[Check] %s Telnet connection.\t [NOT AVAILABLE]" % self.hostname)
+            self.logger.info(
+                "\n\t[Check] %s Telnet connection.\t [NOT AVAILABLE]"
+                % self.hostname
+            )
             return False
 
     def generate_ffdc(self, working_protocol_list):
@@ -337,8 +392,12 @@
         working_protocol_list    list of confirmed working protocols to connect to remote host.
         """
 
-        self.logger.info("\n\t---- Executing commands on " + self.hostname + " ----")
-        self.logger.info("\n\tWorking protocol list: %s" % working_protocol_list)
+        self.logger.info(
+            "\n\t---- Executing commands on " + self.hostname + " ----"
+        )
+        self.logger.info(
+            "\n\tWorking protocol list: %s" % working_protocol_list
+        )
 
         config_dict = self.ffdc_actions
         for target_type in config_dict.keys():
@@ -346,40 +405,47 @@
                 continue
 
             self.logger.info("\n\tFFDC Path: %s " % self.ffdc_dir_path)
-            global_plugin_dict['global_log_store_path'] = self.ffdc_dir_path
+            global_plugin_dict["global_log_store_path"] = self.ffdc_dir_path
             self.logger.info("\tSystem Type: %s" % target_type)
             for k, v in config_dict[target_type].items():
-
-                if self.remote_protocol not in working_protocol_list \
-                        and self.remote_protocol != 'ALL':
+                if (
+                    self.remote_protocol not in working_protocol_list
+                    and self.remote_protocol != "ALL"
+                ):
                     continue
 
-                protocol = config_dict[target_type][k]['PROTOCOL'][0]
+                protocol = config_dict[target_type][k]["PROTOCOL"][0]
 
                 if protocol not in working_protocol_list:
                     continue
 
                 if protocol in working_protocol_list:
-                    if protocol == 'SSH' or protocol == 'SCP':
+                    if protocol == "SSH" or protocol == "SCP":
                         self.protocol_ssh(protocol, target_type, k)
-                    elif protocol == 'TELNET':
+                    elif protocol == "TELNET":
                         self.protocol_telnet(target_type, k)
-                    elif protocol == 'REDFISH' or protocol == 'IPMI' or protocol == 'SHELL':
+                    elif (
+                        protocol == "REDFISH"
+                        or protocol == "IPMI"
+                        or protocol == "SHELL"
+                    ):
                         self.protocol_execute(protocol, target_type, k)
                 else:
-                    self.logger.error("\n\tERROR: %s is not available for %s." % (protocol, self.hostname))
+                    self.logger.error(
+                        "\n\tERROR: %s is not available for %s."
+                        % (protocol, self.hostname)
+                    )
 
         # Close network connection after collecting all files
-        self.elapsed_time = time.strftime("%H:%M:%S", time.gmtime(time.time() - self.start_time))
+        self.elapsed_time = time.strftime(
+            "%H:%M:%S", time.gmtime(time.time() - self.start_time)
+        )
         if self.ssh_remoteclient:
             self.ssh_remoteclient.ssh_remoteclient_disconnect()
         if self.telnet_remoteclient:
             self.telnet_remoteclient.tn_remoteclient_disconnect()
 
-    def protocol_ssh(self,
-                     protocol,
-                     target_type,
-                     sub_type):
+    def protocol_ssh(self, protocol, target_type, sub_type):
         r"""
         Perform actions using SSH and SCP protocols.
 
@@ -389,39 +455,50 @@
         sub_type            Group type of commands.
         """
 
-        if protocol == 'SCP':
+        if protocol == "SCP":
             self.group_copy(self.ffdc_actions[target_type][sub_type])
         else:
-            self.collect_and_copy_ffdc(self.ffdc_actions[target_type][sub_type])
+            self.collect_and_copy_ffdc(
+                self.ffdc_actions[target_type][sub_type]
+            )
 
-    def protocol_telnet(self,
-                        target_type,
-                        sub_type):
+    def protocol_telnet(self, target_type, sub_type):
         r"""
         Perform actions using telnet protocol.
         Description of argument(s):
         target_type          OS Type of remote host.
         """
-        self.logger.info("\n\t[Run] Executing commands on %s using %s" % (self.hostname, 'TELNET'))
+        self.logger.info(
+            "\n\t[Run] Executing commands on %s using %s"
+            % (self.hostname, "TELNET")
+        )
         telnet_files_saved = []
         progress_counter = 0
-        list_of_commands = self.ffdc_actions[target_type][sub_type]['COMMANDS']
+        list_of_commands = self.ffdc_actions[target_type][sub_type]["COMMANDS"]
         for index, each_cmd in enumerate(list_of_commands, start=0):
             command_txt, command_timeout = self.unpack_command(each_cmd)
-            result = self.telnet_remoteclient.execute_command(command_txt, command_timeout)
+            result = self.telnet_remoteclient.execute_command(
+                command_txt, command_timeout
+            )
             if result:
                 try:
-                    targ_file = self.ffdc_actions[target_type][sub_type]['FILES'][index]
+                    targ_file = self.ffdc_actions[target_type][sub_type][
+                        "FILES"
+                    ][index]
                 except IndexError:
                     targ_file = command_txt
                     self.logger.warning(
-                        "\n\t[WARN] Missing filename to store data from telnet %s." % each_cmd)
-                    self.logger.warning("\t[WARN] Data will be stored in %s." % targ_file)
-                targ_file_with_path = (self.ffdc_dir_path
-                                       + self.ffdc_prefix
-                                       + targ_file)
+                        "\n\t[WARN] Missing filename to store data from"
+                        " telnet %s." % each_cmd
+                    )
+                    self.logger.warning(
+                        "\t[WARN] Data will be stored in %s." % targ_file
+                    )
+                targ_file_with_path = (
+                    self.ffdc_dir_path + self.ffdc_prefix + targ_file
+                )
                 # Creates a new file
-                with open(targ_file_with_path, 'w') as fp:
+                with open(targ_file_with_path, "w") as fp:
                     fp.write(result)
                     fp.close
                     telnet_files_saved.append(targ_file)
@@ -431,10 +508,7 @@
         for file in telnet_files_saved:
             self.logger.info("\n\t\tSuccessfully save file " + file + ".")
 
-    def protocol_execute(self,
-                         protocol,
-                         target_type,
-                         sub_type):
+    def protocol_execute(self, protocol, target_type, sub_type):
         r"""
         Perform actions for a given protocol.
 
@@ -444,27 +518,36 @@
         sub_type            Group type of commands.
         """
 
-        self.logger.info("\n\t[Run] Executing commands to %s using %s" % (self.hostname, protocol))
+        self.logger.info(
+            "\n\t[Run] Executing commands to %s using %s"
+            % (self.hostname, protocol)
+        )
         executed_files_saved = []
         progress_counter = 0
-        list_of_cmd = self.get_command_list(self.ffdc_actions[target_type][sub_type])
+        list_of_cmd = self.get_command_list(
+            self.ffdc_actions[target_type][sub_type]
+        )
         for index, each_cmd in enumerate(list_of_cmd, start=0):
             plugin_call = False
             if isinstance(each_cmd, dict):
-                if 'plugin' in each_cmd:
+                if "plugin" in each_cmd:
                     # If the error is set and plugin explicitly
                     # requested to skip execution on error..
-                    if plugin_error_dict['exit_on_error'] and \
-                            self.plugin_error_check(each_cmd['plugin']):
-                        self.logger.info("\n\t[PLUGIN-ERROR] exit_on_error: %s" %
-                                         plugin_error_dict['exit_on_error'])
-                        self.logger.info("\t[PLUGIN-SKIP] %s" %
-                                         each_cmd['plugin'][0])
+                    if plugin_error_dict[
+                        "exit_on_error"
+                    ] and self.plugin_error_check(each_cmd["plugin"]):
+                        self.logger.info(
+                            "\n\t[PLUGIN-ERROR] exit_on_error: %s"
+                            % plugin_error_dict["exit_on_error"]
+                        )
+                        self.logger.info(
+                            "\t[PLUGIN-SKIP] %s" % each_cmd["plugin"][0]
+                        )
                         continue
                     plugin_call = True
                     # call the plugin
                     self.logger.info("\n\t[PLUGIN-START]")
-                    result = self.execute_plugin_block(each_cmd['plugin'])
+                    result = self.execute_plugin_block(each_cmd["plugin"])
                     self.logger.info("\t[PLUGIN-END]\n")
             else:
                 each_cmd = self.yaml_env_and_plugin_vars_populate(each_cmd)
@@ -473,23 +556,31 @@
                 result = self.run_tool_cmd(each_cmd)
             if result:
                 try:
-                    file_name = self.get_file_list(self.ffdc_actions[target_type][sub_type])[index]
+                    file_name = self.get_file_list(
+                        self.ffdc_actions[target_type][sub_type]
+                    )[index]
                     # If file is specified as None.
                     if file_name == "None":
                         continue
-                    targ_file = self.yaml_env_and_plugin_vars_populate(file_name)
+                    targ_file = self.yaml_env_and_plugin_vars_populate(
+                        file_name
+                    )
                 except IndexError:
-                    targ_file = each_cmd.split('/')[-1]
+                    targ_file = each_cmd.split("/")[-1]
                     self.logger.warning(
-                        "\n\t[WARN] Missing filename to store data from %s." % each_cmd)
-                    self.logger.warning("\t[WARN] Data will be stored in %s." % targ_file)
+                        "\n\t[WARN] Missing filename to store data from %s."
+                        % each_cmd
+                    )
+                    self.logger.warning(
+                        "\t[WARN] Data will be stored in %s." % targ_file
+                    )
 
-                targ_file_with_path = (self.ffdc_dir_path
-                                       + self.ffdc_prefix
-                                       + targ_file)
+                targ_file_with_path = (
+                    self.ffdc_dir_path + self.ffdc_prefix + targ_file
+                )
 
                 # Creates a new file
-                with open(targ_file_with_path, 'w') as fp:
+                with open(targ_file_with_path, "w") as fp:
                     if isinstance(result, dict):
                         fp.write(json.dumps(result))
                     else:
@@ -505,9 +596,9 @@
         for file in executed_files_saved:
             self.logger.info("\n\t\tSuccessfully save file " + file + ".")
 
-    def collect_and_copy_ffdc(self,
-                              ffdc_actions_for_target_type,
-                              form_filename=False):
+    def collect_and_copy_ffdc(
+        self, ffdc_actions_for_target_type, form_filename=False
+    ):
         r"""
         Send commands in ffdc_config file to targeted system.
 
@@ -517,21 +608,32 @@
         """
 
         # Executing commands, if any
-        self.ssh_execute_ffdc_commands(ffdc_actions_for_target_type,
-                                       form_filename)
+        self.ssh_execute_ffdc_commands(
+            ffdc_actions_for_target_type, form_filename
+        )
 
         # Copying files
         if self.ssh_remoteclient.scpclient:
-            self.logger.info("\n\n\tCopying FFDC files from remote system %s.\n" % self.hostname)
+            self.logger.info(
+                "\n\n\tCopying FFDC files from remote system %s.\n"
+                % self.hostname
+            )
 
             # Retrieving files from target system
             list_of_files = self.get_file_list(ffdc_actions_for_target_type)
-            self.scp_ffdc(self.ffdc_dir_path, self.ffdc_prefix, form_filename, list_of_files)
+            self.scp_ffdc(
+                self.ffdc_dir_path,
+                self.ffdc_prefix,
+                form_filename,
+                list_of_files,
+            )
         else:
-            self.logger.info("\n\n\tSkip copying FFDC files from remote system %s.\n" % self.hostname)
+            self.logger.info(
+                "\n\n\tSkip copying FFDC files from remote system %s.\n"
+                % self.hostname
+            )
 
-    def get_command_list(self,
-                         ffdc_actions_for_target_type):
+    def get_command_list(self, ffdc_actions_for_target_type):
         r"""
         Fetch list of commands from configuration file
 
@@ -539,13 +641,12 @@
         ffdc_actions_for_target_type    commands and files for the selected remote host type.
         """
         try:
-            list_of_commands = ffdc_actions_for_target_type['COMMANDS']
+            list_of_commands = ffdc_actions_for_target_type["COMMANDS"]
         except KeyError:
             list_of_commands = []
         return list_of_commands
 
-    def get_file_list(self,
-                      ffdc_actions_for_target_type):
+    def get_file_list(self, ffdc_actions_for_target_type):
         r"""
         Fetch list of commands from configuration file
 
@@ -553,13 +654,12 @@
         ffdc_actions_for_target_type    commands and files for the selected remote host type.
         """
         try:
-            list_of_files = ffdc_actions_for_target_type['FILES']
+            list_of_files = ffdc_actions_for_target_type["FILES"]
         except KeyError:
             list_of_files = []
         return list_of_files
 
-    def unpack_command(self,
-                       command):
+    def unpack_command(self, command):
         r"""
         Unpack command from config file
 
@@ -576,9 +676,9 @@
 
         return command_txt, command_timeout
 
-    def ssh_execute_ffdc_commands(self,
-                                  ffdc_actions_for_target_type,
-                                  form_filename=False):
+    def ssh_execute_ffdc_commands(
+        self, ffdc_actions_for_target_type, form_filename=False
+    ):
         r"""
         Send commands in ffdc_config file to targeted system.
 
@@ -586,8 +686,10 @@
         ffdc_actions_for_target_type    commands and files for the selected remote host type.
         form_filename                    if true, pre-pend self.target_type to filename
         """
-        self.logger.info("\n\t[Run] Executing commands on %s using %s"
-                         % (self.hostname, ffdc_actions_for_target_type['PROTOCOL'][0]))
+        self.logger.info(
+            "\n\t[Run] Executing commands on %s using %s"
+            % (self.hostname, ffdc_actions_for_target_type["PROTOCOL"][0])
+        )
 
         list_of_commands = self.get_command_list(ffdc_actions_for_target_type)
         # If command list is empty, returns
@@ -601,12 +703,19 @@
             if form_filename:
                 command_txt = str(command_txt % self.target_type)
 
-            cmd_exit_code, err, response = \
-                self.ssh_remoteclient.execute_command(command_txt, command_timeout)
+            (
+                cmd_exit_code,
+                err,
+                response,
+            ) = self.ssh_remoteclient.execute_command(
+                command_txt, command_timeout
+            )
 
             if cmd_exit_code:
                 self.logger.warning(
-                    "\n\t\t[WARN] %s exits with code %s." % (command_txt, str(cmd_exit_code)))
+                    "\n\t\t[WARN] %s exits with code %s."
+                    % (command_txt, str(cmd_exit_code))
+                )
                 self.logger.warning("\t\t[WARN] %s " % err)
 
             progress_counter += 1
@@ -614,8 +723,7 @@
 
         self.logger.info("\n\t[Run] Commands execution completed.\t\t [OK]")
 
-    def group_copy(self,
-                   ffdc_actions_for_target_type):
+    def group_copy(self, ffdc_actions_for_target_type):
         r"""
         scp group of files (wild card) from remote host.
 
@@ -624,9 +732,14 @@
         """
 
         if self.ssh_remoteclient.scpclient:
-            self.logger.info("\n\tCopying files from remote system %s via SCP.\n" % self.hostname)
+            self.logger.info(
+                "\n\tCopying files from remote system %s via SCP.\n"
+                % self.hostname
+            )
 
-            list_of_commands = self.get_command_list(ffdc_actions_for_target_type)
+            list_of_commands = self.get_command_list(
+                ffdc_actions_for_target_type
+            )
             # If command list is empty, returns
             if not list_of_commands:
                 return
@@ -638,29 +751,42 @@
                     self.logger.error("\t\tInvalid command %s" % command)
                     continue
 
-                cmd_exit_code, err, response = \
-                    self.ssh_remoteclient.execute_command(command)
+                (
+                    cmd_exit_code,
+                    err,
+                    response,
+                ) = self.ssh_remoteclient.execute_command(command)
 
                 # If file does not exist, code take no action.
                 # cmd_exit_code is ignored for this scenario.
                 if response:
-                    scp_result = \
-                        self.ssh_remoteclient.scp_file_from_remote(response.split('\n'),
-                                                                   self.ffdc_dir_path)
+                    scp_result = self.ssh_remoteclient.scp_file_from_remote(
+                        response.split("\n"), self.ffdc_dir_path
+                    )
                     if scp_result:
-                        self.logger.info("\t\tSuccessfully copied from " + self.hostname + ':' + command)
+                        self.logger.info(
+                            "\t\tSuccessfully copied from "
+                            + self.hostname
+                            + ":"
+                            + command
+                        )
                 else:
                     self.logger.info("\t\t%s has no result" % command)
 
         else:
-            self.logger.info("\n\n\tSkip copying files from remote system %s.\n" % self.hostname)
+            self.logger.info(
+                "\n\n\tSkip copying files from remote system %s.\n"
+                % self.hostname
+            )
 
-    def scp_ffdc(self,
-                 targ_dir_path,
-                 targ_file_prefix,
-                 form_filename,
-                 file_list=None,
-                 quiet=None):
+    def scp_ffdc(
+        self,
+        targ_dir_path,
+        targ_file_prefix,
+        form_filename,
+        file_list=None,
+        quiet=None,
+    ):
         r"""
         SCP all files in file_dict to the indicated directory on the local system.
 
@@ -677,21 +803,37 @@
             if form_filename:
                 filename = str(filename % self.target_type)
             source_file_path = filename
-            targ_file_path = targ_dir_path + targ_file_prefix + filename.split('/')[-1]
+            targ_file_path = (
+                targ_dir_path + targ_file_prefix + filename.split("/")[-1]
+            )
 
             # If source file name contains wild card, copy filename as is.
-            if '*' in source_file_path:
-                scp_result = self.ssh_remoteclient.scp_file_from_remote(source_file_path, self.ffdc_dir_path)
+            if "*" in source_file_path:
+                scp_result = self.ssh_remoteclient.scp_file_from_remote(
+                    source_file_path, self.ffdc_dir_path
+                )
             else:
-                scp_result = self.ssh_remoteclient.scp_file_from_remote(source_file_path, targ_file_path)
+                scp_result = self.ssh_remoteclient.scp_file_from_remote(
+                    source_file_path, targ_file_path
+                )
 
             if not quiet:
                 if scp_result:
                     self.logger.info(
-                        "\t\tSuccessfully copied from " + self.hostname + ':' + source_file_path + ".\n")
+                        "\t\tSuccessfully copied from "
+                        + self.hostname
+                        + ":"
+                        + source_file_path
+                        + ".\n"
+                    )
                 else:
                     self.logger.info(
-                        "\t\tFail to copy from " + self.hostname + ':' + source_file_path + ".\n")
+                        "\t\tFail to copy from "
+                        + self.hostname
+                        + ":"
+                        + source_file_path
+                        + ".\n"
+                    )
             else:
                 progress_counter += 1
                 self.print_progress(progress_counter)
@@ -710,7 +852,9 @@
         """
 
         timestr = time.strftime("%Y%m%d-%H%M%S")
-        self.ffdc_dir_path = self.location + "/" + self.hostname + "_" + timestr + "/"
+        self.ffdc_dir_path = (
+            self.location + "/" + self.hostname + "_" + timestr + "/"
+        )
         self.ffdc_prefix = timestr + "_"
         self.validate_local_store(self.ffdc_dir_path)
 
@@ -734,10 +878,14 @@
                 # PermissionError
                 if e.errno == EPERM or e.errno == EACCES:
                     self.logger.error(
-                        '\tERROR: os.makedirs %s failed with PermissionError.\n' % dir_path)
+                        "\tERROR: os.makedirs %s failed with"
+                        " PermissionError.\n" % dir_path
+                    )
                 else:
                     self.logger.error(
-                        '\tERROR: os.makedirs %s failed with %s.\n' % (dir_path, e.strerror))
+                        "\tERROR: os.makedirs %s failed with %s.\n"
+                        % (dir_path, e.strerror)
+                    )
                 sys.exit(-1)
 
     def print_progress(self, progress):
@@ -751,34 +899,47 @@
 
         sys.stdout.write("\r\t" + "+" * progress)
         sys.stdout.flush()
-        time.sleep(.1)
+        time.sleep(0.1)
 
     def verify_redfish(self):
         r"""
         Verify remote host has redfish service active
 
         """
-        redfish_parm = 'redfishtool -r ' \
-                       + self.hostname + ' -S Always raw GET /redfish/v1/'
-        return (self.run_tool_cmd(redfish_parm, True))
+        redfish_parm = (
+            "redfishtool -r "
+            + self.hostname
+            + " -S Always raw GET /redfish/v1/"
+        )
+        return self.run_tool_cmd(redfish_parm, True)
 
     def verify_ipmi(self):
         r"""
         Verify remote host has IPMI LAN service active
 
         """
-        if self.target_type == 'OPENBMC':
-            ipmi_parm = 'ipmitool -I lanplus -C 17  -U ' + self.username + ' -P ' \
-                + self.password + ' -H ' + self.hostname + ' power status'
+        if self.target_type == "OPENBMC":
+            ipmi_parm = (
+                "ipmitool -I lanplus -C 17  -U "
+                + self.username
+                + " -P "
+                + self.password
+                + " -H "
+                + self.hostname
+                + " power status"
+            )
         else:
-            ipmi_parm = 'ipmitool -I lanplus  -P ' \
-                + self.password + ' -H ' + self.hostname + ' power status'
+            ipmi_parm = (
+                "ipmitool -I lanplus  -P "
+                + self.password
+                + " -H "
+                + self.hostname
+                + " power status"
+            )
 
-        return (self.run_tool_cmd(ipmi_parm, True))
+        return self.run_tool_cmd(ipmi_parm, True)
 
-    def run_tool_cmd(self,
-                     parms_string,
-                     quiet=False):
+    def run_tool_cmd(self, parms_string, quiet=False):
         r"""
         Run CLI standard tool or scripts.
 
@@ -787,15 +948,17 @@
         quiet                do not print tool error message if True
         """
 
-        result = subprocess.run([parms_string],
-                                stdout=subprocess.PIPE,
-                                stderr=subprocess.PIPE,
-                                shell=True,
-                                universal_newlines=True)
+        result = subprocess.run(
+            [parms_string],
+            stdout=subprocess.PIPE,
+            stderr=subprocess.PIPE,
+            shell=True,
+            universal_newlines=True,
+        )
 
         if result.stderr and not quiet:
-            self.logger.error('\n\t\tERROR with %s ' % parms_string)
-            self.logger.error('\t\t' + result.stderr)
+            self.logger.error("\n\t\tERROR with %s " % parms_string)
+            self.logger.error("\t\t" + result.stderr)
 
         return result.stdout
 
@@ -812,37 +975,53 @@
             tmp_list.append("SHELL")
 
         for protocol in protocol_list:
-            if self.remote_protocol != 'ALL':
+            if self.remote_protocol != "ALL":
                 if self.remote_protocol != protocol:
                     continue
 
             # Only check SSH/SCP once for both protocols
-            if protocol == 'SSH' or protocol == 'SCP' and protocol not in tmp_list:
+            if (
+                protocol == "SSH"
+                or protocol == "SCP"
+                and protocol not in tmp_list
+            ):
                 if self.ssh_to_target_system():
                     # Add only what user asked.
-                    if self.remote_protocol != 'ALL':
+                    if self.remote_protocol != "ALL":
                         tmp_list.append(self.remote_protocol)
                     else:
-                        tmp_list.append('SSH')
-                        tmp_list.append('SCP')
+                        tmp_list.append("SSH")
+                        tmp_list.append("SCP")
 
-            if protocol == 'TELNET':
+            if protocol == "TELNET":
                 if self.telnet_to_target_system():
                     tmp_list.append(protocol)
 
-            if protocol == 'REDFISH':
+            if protocol == "REDFISH":
                 if self.verify_redfish():
                     tmp_list.append(protocol)
-                    self.logger.info("\n\t[Check] %s Redfish Service.\t\t [OK]" % self.hostname)
+                    self.logger.info(
+                        "\n\t[Check] %s Redfish Service.\t\t [OK]"
+                        % self.hostname
+                    )
                 else:
-                    self.logger.info("\n\t[Check] %s Redfish Service.\t\t [NOT AVAILABLE]" % self.hostname)
+                    self.logger.info(
+                        "\n\t[Check] %s Redfish Service.\t\t [NOT AVAILABLE]"
+                        % self.hostname
+                    )
 
-            if protocol == 'IPMI':
+            if protocol == "IPMI":
                 if self.verify_ipmi():
                     tmp_list.append(protocol)
-                    self.logger.info("\n\t[Check] %s IPMI LAN Service.\t\t [OK]" % self.hostname)
+                    self.logger.info(
+                        "\n\t[Check] %s IPMI LAN Service.\t\t [OK]"
+                        % self.hostname
+                    )
                 else:
-                    self.logger.info("\n\t[Check] %s IPMI LAN Service.\t\t [NOT AVAILABLE]" % self.hostname)
+                    self.logger.info(
+                        "\n\t[Check] %s IPMI LAN Service.\t\t [NOT AVAILABLE]"
+                        % self.hostname
+                    )
 
         return tmp_list
 
@@ -855,14 +1034,14 @@
         # Example YAML:
         # -COMMANDS:
         #    - my_command ${hostname}  ${username}   ${password}
-        os.environ['hostname'] = self.hostname
-        os.environ['username'] = self.username
-        os.environ['password'] = self.password
+        os.environ["hostname"] = self.hostname
+        os.environ["username"] = self.username
+        os.environ["password"] = self.password
 
         # Append default Env.
-        self.env_dict['hostname'] = self.hostname
-        self.env_dict['username'] = self.username
-        self.env_dict['password'] = self.password
+        self.env_dict["hostname"] = self.hostname
+        self.env_dict["username"] = self.username
+        self.env_dict["password"] = self.password
 
         try:
             tmp_env_dict = {}
@@ -874,14 +1053,14 @@
                     self.env_dict[key] = str(value)
 
             if self.econfig:
-                with open(self.econfig, 'r') as file:
+                with open(self.econfig, "r") as file:
                     try:
                         tmp_env_dict = yaml.load(file, Loader=yaml.SafeLoader)
                     except yaml.YAMLError as e:
                         self.logger.error(e)
                         sys.exit(-1)
                 # Export ENV vars.
-                for key, value in tmp_env_dict['env_params'].items():
+                for key, value in tmp_env_dict["env_params"].items():
                     os.environ[key] = str(value)
                     self.env_dict[key] = str(value)
         except json.decoder.JSONDecodeError as e:
@@ -894,8 +1073,9 @@
             if k.lower().find("password") != -1:
                 hidden_text = []
                 hidden_text.append(v)
-                password_regex = '(' +\
-                    '|'.join([re.escape(x) for x in hidden_text]) + ')'
+                password_regex = (
+                    "(" + "|".join([re.escape(x) for x in hidden_text]) + ")"
+                )
                 mask_dict[k] = re.sub(password_regex, "********", v)
 
         self.logger.info(json.dumps(mask_dict, indent=8, sort_keys=False))
@@ -915,16 +1095,18 @@
             self.logger.debug("\tCall func: %s" % eval_string)
             result = eval(eval_string)
             self.logger.info("\treturn: %s" % str(result))
-        except (ValueError,
-                SyntaxError,
-                NameError,
-                AttributeError,
-                TypeError) as e:
+        except (
+            ValueError,
+            SyntaxError,
+            NameError,
+            AttributeError,
+            TypeError,
+        ) as e:
             self.logger.error("\tERROR: execute_python_eval: %s" % e)
             # Set the plugin error state.
-            plugin_error_dict['exit_on_error'] = True
+            plugin_error_dict["exit_on_error"] = True
             self.logger.info("\treturn: PLUGIN_EVAL_ERROR")
-            return 'PLUGIN_EVAL_ERROR'
+            return "PLUGIN_EVAL_ERROR"
 
         return result
 
@@ -957,18 +1139,18 @@
                 - arg2
         """
         try:
-            idx = self.key_index_list_dict('plugin_name', plugin_cmd_list)
-            plugin_name = plugin_cmd_list[idx]['plugin_name']
+            idx = self.key_index_list_dict("plugin_name", plugin_cmd_list)
+            plugin_name = plugin_cmd_list[idx]["plugin_name"]
             # Equal separator means plugin function returns result.
-            if ' = ' in plugin_name:
+            if " = " in plugin_name:
                 # Ex. ['result', 'plugin.foo_func.my_func']
-                plugin_name_args = plugin_name.split(' = ')
+                plugin_name_args = plugin_name.split(" = ")
                 # plugin func return data.
                 for arg in plugin_name_args:
                     if arg == plugin_name_args[-1]:
                         plugin_name = arg
                     else:
-                        plugin_resp = arg.split(',')
+                        plugin_resp = arg.split(",")
                         # ['result1','result2']
                         for x in plugin_resp:
                             global_plugin_list.append(x)
@@ -976,9 +1158,9 @@
 
             # Walk the plugin args ['arg1,'arg2']
             # If the YAML plugin statement 'plugin_args' is not declared.
-            if any('plugin_args' in d for d in plugin_cmd_list):
-                idx = self.key_index_list_dict('plugin_args', plugin_cmd_list)
-                plugin_args = plugin_cmd_list[idx]['plugin_args']
+            if any("plugin_args" in d for d in plugin_cmd_list):
+                idx = self.key_index_list_dict("plugin_args", plugin_cmd_list)
+                plugin_args = plugin_cmd_list[idx]["plugin_args"]
                 if plugin_args:
                     plugin_args = self.yaml_args_populate(plugin_args)
                 else:
@@ -990,43 +1172,52 @@
             # "arg1","arg2","argn"  string as params for function.
             parm_args_str = self.yaml_args_string(plugin_args)
             if parm_args_str:
-                plugin_func = plugin_name + '(' + parm_args_str + ')'
+                plugin_func = plugin_name + "(" + parm_args_str + ")"
             else:
-                plugin_func = plugin_name + '()'
+                plugin_func = plugin_name + "()"
 
             # Execute plugin function.
             if global_plugin_dict:
                 resp = self.execute_python_eval(plugin_func)
                 # Update plugin vars dict if there is any.
-                if resp != 'PLUGIN_EVAL_ERROR':
+                if resp != "PLUGIN_EVAL_ERROR":
                     self.response_args_data(resp)
             else:
                 resp = self.execute_python_eval(plugin_func)
         except Exception as e:
             # Set the plugin error state.
-            plugin_error_dict['exit_on_error'] = True
+            plugin_error_dict["exit_on_error"] = True
             self.logger.error("\tERROR: execute_plugin_block: %s" % e)
             pass
 
         # There is a real error executing the plugin function.
-        if resp == 'PLUGIN_EVAL_ERROR':
+        if resp == "PLUGIN_EVAL_ERROR":
             return resp
 
         # Check if plugin_expects_return (int, string, list,dict etc)
-        if any('plugin_expects_return' in d for d in plugin_cmd_list):
-            idx = self.key_index_list_dict('plugin_expects_return', plugin_cmd_list)
-            plugin_expects = plugin_cmd_list[idx]['plugin_expects_return']
+        if any("plugin_expects_return" in d for d in plugin_cmd_list):
+            idx = self.key_index_list_dict(
+                "plugin_expects_return", plugin_cmd_list
+            )
+            plugin_expects = plugin_cmd_list[idx]["plugin_expects_return"]
             if plugin_expects:
                 if resp:
-                    if self.plugin_expect_type(plugin_expects, resp) == 'INVALID':
+                    if (
+                        self.plugin_expect_type(plugin_expects, resp)
+                        == "INVALID"
+                    ):
                         self.logger.error("\tWARN: Plugin error check skipped")
                     elif not self.plugin_expect_type(plugin_expects, resp):
-                        self.logger.error("\tERROR: Plugin expects return data: %s"
-                                          % plugin_expects)
-                        plugin_error_dict['exit_on_error'] = True
+                        self.logger.error(
+                            "\tERROR: Plugin expects return data: %s"
+                            % plugin_expects
+                        )
+                        plugin_error_dict["exit_on_error"] = True
                 elif not resp:
-                    self.logger.error("\tERROR: Plugin func failed to return data")
-                    plugin_error_dict['exit_on_error'] = True
+                    self.logger.error(
+                        "\tERROR: Plugin func failed to return data"
+                    )
+                    plugin_error_dict["exit_on_error"] = True
 
         return resp
 
@@ -1040,26 +1231,26 @@
         resp_data = ""
 
         # There is nothing to update the plugin response.
-        if len(global_plugin_list) == 0 or plugin_resp == 'None':
+        if len(global_plugin_list) == 0 or plugin_resp == "None":
             return
 
         if isinstance(plugin_resp, str):
-            resp_data = plugin_resp.strip('\r\n\t')
+            resp_data = plugin_resp.strip("\r\n\t")
             resp_list.append(resp_data)
         elif isinstance(plugin_resp, bytes):
-            resp_data = str(plugin_resp, 'UTF-8').strip('\r\n\t')
+            resp_data = str(plugin_resp, "UTF-8").strip("\r\n\t")
             resp_list.append(resp_data)
         elif isinstance(plugin_resp, tuple):
             if len(global_plugin_list) == 1:
                 resp_list.append(plugin_resp)
             else:
                 resp_list = list(plugin_resp)
-                resp_list = [x.strip('\r\n\t') for x in resp_list]
+                resp_list = [x.strip("\r\n\t") for x in resp_list]
         elif isinstance(plugin_resp, list):
             if len(global_plugin_list) == 1:
-                resp_list.append([x.strip('\r\n\t') for x in plugin_resp])
+                resp_list.append([x.strip("\r\n\t") for x in plugin_resp])
             else:
-                resp_list = [x.strip('\r\n\t') for x in plugin_resp]
+                resp_list = [x.strip("\r\n\t") for x in plugin_resp]
         elif isinstance(plugin_resp, int) or isinstance(plugin_resp, float):
             resp_list.append(plugin_resp)
 
@@ -1087,7 +1278,7 @@
 
         plugin_args            arg list ['arg1','arg2,'argn']
         """
-        args_str = ''
+        args_str = ""
         for args in plugin_args:
             if args:
                 if isinstance(args, (int, float)):
@@ -1095,7 +1286,7 @@
                 elif args in global_plugin_type_list:
                     args_str += str(global_plugin_dict[args])
                 else:
-                    args_str += '"' + str(args.strip('\r\n\t')) + '"'
+                    args_str += '"' + str(args.strip("\r\n\t")) + '"'
             # Skip last list element.
             if args != plugin_args[-1]:
                 args_str += ","
@@ -1148,11 +1339,11 @@
         try:
             # Example, list of matching env vars ['username', 'password', 'hostname']
             # Extra escape \ for special symbols. '\$\{([^\}]+)\}' works good.
-            var_name_regex = '\\$\\{([^\\}]+)\\}'
+            var_name_regex = "\\$\\{([^\\}]+)\\}"
             env_var_names_list = re.findall(var_name_regex, yaml_arg_str)
             for var in env_var_names_list:
                 env_var = os.environ[var]
-                env_replace = '${' + var + '}'
+                env_replace = "${" + var + "}"
                 yaml_arg_str = yaml_arg_str.replace(env_replace, env_var)
         except Exception as e:
             self.logger.error("\tERROR:yaml_env_vars_populate: %s" % e)
@@ -1175,10 +1366,14 @@
                         # in eval function call.
                         global_plugin_type_list.append(var)
                     else:
-                        yaml_arg_str = yaml_arg_str.replace(str(var), str(global_plugin_dict[var]))
+                        yaml_arg_str = yaml_arg_str.replace(
+                            str(var), str(global_plugin_dict[var])
+                        )
                 # Just a string like filename or command.
                 else:
-                    yaml_arg_str = yaml_arg_str.replace(str(var), str(global_plugin_dict[var]))
+                    yaml_arg_str = yaml_arg_str.replace(
+                        str(var), str(global_plugin_dict[var])
+                    )
         except (IndexError, ValueError) as e:
             self.logger.error("\tERROR: yaml_plugin_vars_populate: %s" % e)
             pass
@@ -1192,10 +1387,10 @@
         Description of argument(s):
         plugin_dict        Dictionary of plugin error.
         """
-        if any('plugin_error' in d for d in plugin_dict):
+        if any("plugin_error" in d for d in plugin_dict):
             for d in plugin_dict:
-                if 'plugin_error' in d:
-                    value = d['plugin_error']
+                if "plugin_error" in d:
+                    value = d["plugin_error"]
                     # Reference if the error is set or not by plugin.
                     return plugin_error_dict[value]
 
@@ -1215,18 +1410,18 @@
         r"""
         Plugin expect directive type check.
         """
-        if type == 'int':
+        if type == "int":
             return isinstance(data, int)
-        elif type == 'float':
+        elif type == "float":
             return isinstance(data, float)
-        elif type == 'str':
+        elif type == "str":
             return isinstance(data, str)
-        elif type == 'list':
+        elif type == "list":
             return isinstance(data, list)
-        elif type == 'dict':
+        elif type == "dict":
             return isinstance(data, dict)
-        elif type == 'tuple':
+        elif type == "tuple":
             return isinstance(data, tuple)
         else:
             self.logger.info("\tInvalid data type requested: %s" % type)
-            return 'INVALID'
+            return "INVALID"
diff --git a/ffdc/lib/ssh_utility.py b/ffdc/lib/ssh_utility.py
index 01b39dd..fb44121 100644
--- a/ffdc/lib/ssh_utility.py
+++ b/ffdc/lib/ssh_utility.py
@@ -1,17 +1,20 @@
 #!/usr/bin/env python3
 
-import paramiko
-from paramiko.ssh_exception import AuthenticationException
-from paramiko.ssh_exception import NoValidConnectionsError
-from paramiko.ssh_exception import SSHException
-from paramiko.ssh_exception import BadHostKeyException
-from paramiko.buffered_pipe import PipeTimeout as PipeTimeout
-from scp import SCPClient, SCPException
-import time
-import socket
 import logging
+import socket
+import time
 from socket import timeout as SocketTimeout
 
+import paramiko
+from paramiko.buffered_pipe import PipeTimeout as PipeTimeout
+from paramiko.ssh_exception import (
+    AuthenticationException,
+    BadHostKeyException,
+    NoValidConnectionsError,
+    SSHException,
+)
+from scp import SCPClient, SCPException
+
 
 class SSHRemoteclient:
     r"""
@@ -20,7 +23,6 @@
     """
 
     def __init__(self, hostname, username, password):
-
         r"""
         Description of argument(s):
 
@@ -38,7 +40,6 @@
         self.password = password
 
     def ssh_remoteclient_login(self):
-
         r"""
         Method to create a ssh connection to remote host.
         """
@@ -48,23 +49,31 @@
             # SSHClient to make connections to the remote server
             self.sshclient = paramiko.SSHClient()
             # setting set_missing_host_key_policy() to allow any host
-            self.sshclient.set_missing_host_key_policy(paramiko.AutoAddPolicy())
+            self.sshclient.set_missing_host_key_policy(
+                paramiko.AutoAddPolicy()
+            )
             # Connect to the server
-            self.sshclient.connect(hostname=self.hostname,
-                                   username=self.username,
-                                   password=self.password,
-                                   banner_timeout=120,
-                                   timeout=60,
-                                   look_for_keys=False)
+            self.sshclient.connect(
+                hostname=self.hostname,
+                username=self.username,
+                password=self.password,
+                banner_timeout=120,
+                timeout=60,
+                look_for_keys=False,
+            )
 
-        except (BadHostKeyException, AuthenticationException,
-                SSHException, NoValidConnectionsError, socket.error) as e:
+        except (
+            BadHostKeyException,
+            AuthenticationException,
+            SSHException,
+            NoValidConnectionsError,
+            socket.error,
+        ) as e:
             is_ssh_login = False
 
         return is_ssh_login
 
     def ssh_remoteclient_disconnect(self):
-
         r"""
         Clean up.
         """
@@ -75,8 +84,7 @@
         if self.scpclient:
             self.scpclient.close()
 
-    def execute_command(self, command,
-                        default_timeout=60):
+    def execute_command(self, command, default_timeout=60):
         """
         Execute command on the remote host.
 
@@ -85,25 +93,26 @@
 
         """
 
-        empty = ''
+        empty = ""
         cmd_start = time.time()
         try:
-            stdin, stdout, stderr = \
-                self.sshclient.exec_command(command, timeout=default_timeout)
+            stdin, stdout, stderr = self.sshclient.exec_command(
+                command, timeout=default_timeout
+            )
             start = time.time()
             while time.time() < start + default_timeout:
                 # Need to do read/write operation to trigger
                 # paramiko exec_command timeout mechanism.
                 xresults = stderr.readlines()
-                results = ''.join(xresults)
+                results = "".join(xresults)
                 time.sleep(1)
                 if stdout.channel.exit_status_ready():
                     break
             cmd_exit_code = stdout.channel.recv_exit_status()
 
             # Convert list of string to one string
-            err = ''
-            out = ''
+            err = ""
+            out = ""
             for item in results:
                 err += item
             for item in stdout.readlines():
@@ -111,30 +120,53 @@
 
             return cmd_exit_code, err, out
 
-        except (paramiko.AuthenticationException, paramiko.SSHException,
-                paramiko.ChannelException, SocketTimeout) as e:
+        except (
+            paramiko.AuthenticationException,
+            paramiko.SSHException,
+            paramiko.ChannelException,
+            SocketTimeout,
+        ) as e:
             # Log command with error. Return to caller for next command, if any.
-            logging.error("\n\tERROR: Fail remote command %s %s" % (e.__class__, e))
-            logging.error("\tCommand '%s' Elapsed Time %s" %
-                          (command, time.strftime("%H:%M:%S", time.gmtime(time.time() - cmd_start))))
+            logging.error(
+                "\n\tERROR: Fail remote command %s %s" % (e.__class__, e)
+            )
+            logging.error(
+                "\tCommand '%s' Elapsed Time %s"
+                % (
+                    command,
+                    time.strftime(
+                        "%H:%M:%S", time.gmtime(time.time() - cmd_start)
+                    ),
+                )
+            )
             return 0, empty, empty
 
     def scp_connection(self):
-
         r"""
         Create a scp connection for file transfer.
         """
         try:
-            self.scpclient = SCPClient(self.sshclient.get_transport(), sanitize=lambda x: x)
-            logging.info("\n\t[Check] %s SCP transport established.\t [OK]" % self.hostname)
+            self.scpclient = SCPClient(
+                self.sshclient.get_transport(), sanitize=lambda x: x
+            )
+            logging.info(
+                "\n\t[Check] %s SCP transport established.\t [OK]"
+                % self.hostname
+            )
         except (SCPException, SocketTimeout, PipeTimeout) as e:
             self.scpclient = None
-            logging.error("\n\tERROR: SCP get_transport has failed. %s %s" % (e.__class__, e))
-            logging.info("\tScript continues generating FFDC on %s." % self.hostname)
-            logging.info("\tCollected data will need to be manually offloaded.")
+            logging.error(
+                "\n\tERROR: SCP get_transport has failed. %s %s"
+                % (e.__class__, e)
+            )
+            logging.info(
+                "\tScript continues generating FFDC on %s." % self.hostname
+            )
+            logging.info(
+                "\tCollected data will need to be manually offloaded."
+            )
 
     def scp_file_from_remote(self, remote_file, local_file):
-
         r"""
         scp file in remote system to local with date-prefixed filename.
 
@@ -151,7 +183,9 @@
         except (SCPException, SocketTimeout, PipeTimeout, SSHException) as e:
             # Log command with error. Return to caller for next file, if any.
             logging.error(
-                "\n\tERROR: Fail scp %s from remotehost %s %s\n\n" % (remote_file, e.__class__, e))
+                "\n\tERROR: Fail scp %s from remotehost %s %s\n\n"
+                % (remote_file, e.__class__, e)
+            )
             # Pause for 2 seconds allowing Paramiko to finish error processing before next fetch.
             # Without the delay after SCPException,
             #    next fetch will get 'paramiko.ssh_exception.SSHException'> Channel closed Error.
diff --git a/ffdc/lib/telnet_utility.py b/ffdc/lib/telnet_utility.py
index 08e4071..03f7983 100644
--- a/ffdc/lib/telnet_utility.py
+++ b/ffdc/lib/telnet_utility.py
@@ -1,21 +1,21 @@
 #!/usr/bin/env python3
 
 
-import time
-import socket
 import logging
+import socket
 import telnetlib
+import time
 from collections import deque
 
 
 class TelnetRemoteclient:
-
     r"""
     Class to create telnet connection to remote host for command execution.
     """
 
-    def __init__(self, hostname, username, password, port=23, read_timeout=None):
-
+    def __init__(
+        self, hostname, username, password, port=23, read_timeout=None
+    ):
         r"""
         Description of argument(s):
 
@@ -33,23 +33,35 @@
         self.read_timeout = read_timeout
 
     def tn_remoteclient_login(self):
-
         is_telnet = True
         try:
-            self.tnclient = telnetlib.Telnet(self.hostname, self.port, timeout=15)
-            if b'login:' in self.tnclient.read_until(b'login:', timeout=self.read_timeout):
-                self.tnclient.write(self.username.encode('utf-8') + b"\n")
+            self.tnclient = telnetlib.Telnet(
+                self.hostname, self.port, timeout=15
+            )
+            if b"login:" in self.tnclient.read_until(
+                b"login:", timeout=self.read_timeout
+            ):
+                self.tnclient.write(self.username.encode("utf-8") + b"\n")
 
-                if b'Password:' in self.tnclient.read_until(b'Password:', timeout=self.read_timeout):
-                    self.tnclient.write(self.password.encode('utf-8') + b"\n")
+                if b"Password:" in self.tnclient.read_until(
+                    b"Password:", timeout=self.read_timeout
+                ):
+                    self.tnclient.write(self.password.encode("utf-8") + b"\n")
 
-                    n, match, pre_match = \
-                        self.tnclient.expect(
-                            [b'Login incorrect', b'invalid login name or password.', br'\#', br'\$'],
-                            timeout=self.read_timeout)
+                    n, match, pre_match = self.tnclient.expect(
+                        [
+                            b"Login incorrect",
+                            b"invalid login name or password.",
+                            rb"\#",
+                            rb"\$",
+                        ],
+                        timeout=self.read_timeout,
+                    )
                     if n == 0 or n == 1:
                         logging.error(
-                            "\n\tERROR: Telnet Authentication Failed.  Check userid and password.\n\n")
+                            "\n\tERROR: Telnet Authentication Failed.  Check"
+                            " userid and password.\n\n"
+                        )
                         is_telnet = False
                     else:
                         # login successful
@@ -76,17 +88,15 @@
             # the telnet object might not exist yet, so ignore this one
             pass
 
-    def execute_command(self, cmd,
-                        i_timeout=120):
+    def execute_command(self, cmd, i_timeout=120):
+        r"""
+        Executes commands on the remote host
 
-        r'''
-            Executes commands on the remote host
-
-            Description of argument(s):
-            cmd             Command to run on remote host
-            i_timeout       Timeout for command output
-                            default is 120 seconds
-        '''
+        Description of argument(s):
+        cmd             Command to run on remote host
+        i_timeout       Timeout for command output
+                        default is 120 seconds
+        """
 
         # Wait time for command execution before reading the output.
         # Use user input wait time for command execution if one exists.
@@ -97,23 +107,22 @@
             execution_time = 120
 
         # Execute the command and read the command output.
-        return_buffer = b''
+        return_buffer = b""
         try:
-
             # Do at least one non-blocking read.
             #  to flush whatever data is in the read buffer.
             while self.tnclient.read_very_eager():
                 continue
 
             # Execute the command
-            self.tnclient.write(cmd.encode('utf-8') + b'\n')
+            self.tnclient.write(cmd.encode("utf-8") + b"\n")
             time.sleep(execution_time)
 
-            local_buffer = b''
+            local_buffer = b""
             # Read the command output one block at a time.
             return_buffer = self.tnclient.read_very_eager()
             while return_buffer:
-                local_buffer = b''.join([local_buffer, return_buffer])
+                local_buffer = b"".join([local_buffer, return_buffer])
                 time.sleep(3)  # let the buffer fill up a bit
                 return_buffer = self.tnclient.read_very_eager()
         except (socket.error, EOFError) as e:
@@ -129,4 +138,4 @@
             logging.error("\t\t ERROR %s " % msg)
 
         # Return ASCII string data with ending PROMPT stripped
-        return local_buffer.decode('ascii', 'ignore').replace('$ ', '\n')
+        return local_buffer.decode("ascii", "ignore").replace("$ ", "\n")
diff --git a/ffdc/plugins/date_time_utils.py b/ffdc/plugins/date_time_utils.py
index 65bcb88..f3787bd 100644
--- a/ffdc/plugins/date_time_utils.py
+++ b/ffdc/plugins/date_time_utils.py
@@ -22,7 +22,11 @@
     if isinstance(date_str, list):
         tmp_date = []
         for date in date_str:
-            tmp_date.append(datetime.strptime(date, date_format).strftime(desired_format))
+            tmp_date.append(
+                datetime.strptime(date, date_format).strftime(desired_format)
+            )
         return tmp_date
     else:
-        return datetime.strptime(date_str, date_format).strftime(desired_format)
+        return datetime.strptime(date_str, date_format).strftime(
+            desired_format
+        )
diff --git a/ffdc/plugins/redfish.py b/ffdc/plugins/redfish.py
index 0ea7a00..74f1370 100644
--- a/ffdc/plugins/redfish.py
+++ b/ffdc/plugins/redfish.py
@@ -4,13 +4,13 @@
 This module contains functions having to do with redfish path walking.
 """
 
+import json
 import os
 import subprocess
-import json
 
 ERROR_RESPONSE = {
-    "404": 'Response Error: status_code: 404 -- Not Found',
-    "500": 'Response Error: status_code: 500 -- Internal Server Error',
+    "404": "Response Error: status_code: 404 -- Not Found",
+    "500": "Response Error: status_code: 500 -- Internal Server Error",
 }
 
 # Variable to hold enumerated data.
@@ -29,15 +29,17 @@
     parms_string         Command to execute from the current SHELL.
     quiet                do not print tool error message if True
     """
-    resp = subprocess.run([parms],
-                          stdout=subprocess.PIPE,
-                          stderr=subprocess.PIPE,
-                          shell=True,
-                          universal_newlines=True)
+    resp = subprocess.run(
+        [parms],
+        stdout=subprocess.PIPE,
+        stderr=subprocess.PIPE,
+        shell=True,
+        universal_newlines=True,
+    )
 
     if resp.stderr:
-        print('\n\t\tERROR with %s ' % parms)
-        print('\t\t' + resp.stderr)
+        print("\n\t\tERROR with %s " % parms)
+        print("\t\t" + resp.stderr)
         return resp.stderr
     elif json_type == "json":
         json_data = json.loads(resp.stdout)
@@ -57,8 +59,15 @@
                       returned as a json string or as a
                       dictionary.
     """
-    parms = 'redfishtool -u ' + username + ' -p ' + password + ' -r ' + \
-        hostname + ' -S Always raw GET '
+    parms = (
+        "redfishtool -u "
+        + username
+        + " -p "
+        + password
+        + " -r "
+        + hostname
+        + " -S Always raw GET "
+    )
 
     pending_enumeration.add(url)
 
@@ -74,31 +83,37 @@
             # Example: '/redfish/v1/JsonSchemas/' and sub resources.
             #          '/redfish/v1/SessionService'
             #          '/redfish/v1/Managers/bmc#/Oem'
-            if ('JsonSchemas' in resource) or ('SessionService' in resource)\
-                    or ('PostCodes' in resource) or ('Registries' in resource)\
-                    or ('#' in resource):
+            if (
+                ("JsonSchemas" in resource)
+                or ("SessionService" in resource)
+                or ("PostCodes" in resource)
+                or ("Registries" in resource)
+                or ("#" in resource)
+            ):
                 continue
 
             response = execute_redfish_cmd(parms + resource)
             # Enumeration is done for available resources ignoring the
             # ones for which response is not obtained.
-            if 'Error getting response' in response:
+            if "Error getting response" in response:
                 continue
 
             walk_nested_dict(response, url=resource)
 
         enumerated_resources.update(set(resources_to_be_enumerated))
-        resources_to_be_enumerated = \
-            tuple(pending_enumeration - enumerated_resources)
+        resources_to_be_enumerated = tuple(
+            pending_enumeration - enumerated_resources
+        )
 
     if return_json == "json":
-        return json.dumps(result, sort_keys=True,
-                          indent=4, separators=(',', ': '))
+        return json.dumps(
+            result, sort_keys=True, indent=4, separators=(",", ": ")
+        )
     else:
         return result
 
 
-def walk_nested_dict(data, url=''):
+def walk_nested_dict(data, url=""):
     r"""
     Parse through the nested dictionary and get the resource id paths.
 
@@ -106,25 +121,24 @@
     data    Nested dictionary data from response message.
     url     Resource for which the response is obtained in data.
     """
-    url = url.rstrip('/')
+    url = url.rstrip("/")
 
     for key, value in data.items():
-
         # Recursion if nested dictionary found.
         if isinstance(value, dict):
             walk_nested_dict(value)
         else:
             # Value contains a list of dictionaries having member data.
-            if 'Members' == key:
+            if "Members" == key:
                 if isinstance(value, list):
                     for memberDict in value:
                         if isinstance(memberDict, str):
                             pending_enumeration.add(memberDict)
                         else:
-                            pending_enumeration.add(memberDict['@odata.id'])
+                            pending_enumeration.add(memberDict["@odata.id"])
 
-            if '@odata.id' == key:
-                value = value.rstrip('/')
+            if "@odata.id" == key:
+                value = value.rstrip("/")
                 # Data for the given url.
                 if value == url:
                     result[url] = data
diff --git a/ffdc/plugins/scp_execution.py b/ffdc/plugins/scp_execution.py
index 3b4767a..f44735a 100644
--- a/ffdc/plugins/scp_execution.py
+++ b/ffdc/plugins/scp_execution.py
@@ -8,7 +8,7 @@
 # Absolute path to this plugin
 abs_path = os.path.abspath(os.path.dirname(sys.argv[0]))
 # full_path to plugins parent directory
-full_path = abs_path.split('plugins')[0]
+full_path = abs_path.split("plugins")[0]
 sys.path.append(full_path)
 # Walk path and append to sys.path
 for root, dirs, files in os.walk(full_path):
@@ -19,33 +19,28 @@
 from ssh_utility import SSHRemoteclient  # NOQA
 
 
-def scp_remote_file(hostname,
-                    username,
-                    password,
-                    filename,
-                    local_dir_path):
+def scp_remote_file(hostname, username, password, filename, local_dir_path):
     r"""
-        Description of argument(s):
+    Description of argument(s):
 
-        hostname        Name/IP of the remote (targeting) host
-        username        User on the remote host with access to files
-        password        Password for user on remote host
-        filename        Filename with full path on remote host
-                        Filename can contain wild cards for multiple files
-        local_dir_path  Location to store file on local host
+    hostname        Name/IP of the remote (targeting) host
+    username        User on the remote host with access to files
+    password        Password for user on remote host
+    filename        Filename with full path on remote host
+                    Filename can contain wild cards for multiple files
+    local_dir_path  Location to store file on local host
     """
-    ssh_remoteclient = SSHRemoteclient(hostname,
-                                       username,
-                                       password)
+    ssh_remoteclient = SSHRemoteclient(hostname, username, password)
 
     if ssh_remoteclient.ssh_remoteclient_login():
-
         # Obtain scp connection.
         ssh_remoteclient.scp_connection()
         if ssh_remoteclient.scpclient:
             if isinstance(filename, list):
                 for each_file in filename:
-                    ssh_remoteclient.scp_file_from_remote(each_file, local_dir_path)
+                    ssh_remoteclient.scp_file_from_remote(
+                        each_file, local_dir_path
+                    )
             else:
                 ssh_remoteclient.scp_file_from_remote(filename, local_dir_path)
 
diff --git a/ffdc/plugins/shell_execution.py b/ffdc/plugins/shell_execution.py
index 817dc1e..91a42b2 100644
--- a/ffdc/plugins/shell_execution.py
+++ b/ffdc/plugins/shell_execution.py
@@ -10,14 +10,16 @@
     quiet                do not print tool error message if True
     """
 
-    result = subprocess.run([parms_string],
-                            stdout=subprocess.PIPE,
-                            stderr=subprocess.PIPE,
-                            shell=True,
-                            universal_newlines=True)
+    result = subprocess.run(
+        [parms_string],
+        stdout=subprocess.PIPE,
+        stderr=subprocess.PIPE,
+        shell=True,
+        universal_newlines=True,
+    )
 
     if result.stderr and not quiet:
-        print('\n\t\tERROR with %s ' % parms_string)
-        print('\t\t' + result.stderr)
+        print("\n\t\tERROR with %s " % parms_string)
+        print("\t\t" + result.stderr)
 
     return result.stdout
diff --git a/ffdc/plugins/ssh_execution.py b/ffdc/plugins/ssh_execution.py
index 8623918..fd76583 100644
--- a/ffdc/plugins/ssh_execution.py
+++ b/ffdc/plugins/ssh_execution.py
@@ -8,7 +8,7 @@
 # Absolute path to this plugin
 abs_path = os.path.abspath(os.path.dirname(sys.argv[0]))
 # full_path to plugins parent directory
-full_path = abs_path.split('plugins')[0]
+full_path = abs_path.split("plugins")[0]
 sys.path.append(full_path)
 # Walk path and append to sys.path
 for root, dirs, files in os.walk(full_path):
@@ -19,44 +19,39 @@
 from ssh_utility import SSHRemoteclient  # NOQA
 
 
-def ssh_execute_cmd(hostname,
-                    username,
-                    password,
-                    command,
-                    timeout=60,
-                    type=None):
+def ssh_execute_cmd(
+    hostname, username, password, command, timeout=60, type=None
+):
     r"""
-        Description of argument(s):
+    Description of argument(s):
 
-        hostname        Name/IP of the remote (targeting) host
-        username        User on the remote host with access to FFCD files
-        password        Password for user on remote host
-        command         Command to run on remote host
-        timeout         Time, in second, to wait for command completion
-        type            Data type return as list or others.
+    hostname        Name/IP of the remote (targeting) host
+    username        User on the remote host with access to FFCD files
+    password        Password for user on remote host
+    command         Command to run on remote host
+    timeout         Time, in second, to wait for command completion
+    type            Data type return as list or others.
     """
-    ssh_remoteclient = SSHRemoteclient(hostname,
-                                       username,
-                                       password)
+    ssh_remoteclient = SSHRemoteclient(hostname, username, password)
 
     cmd_exit_code = 0
-    err = ''
-    response = ''
+    err = ""
+    response = ""
     if ssh_remoteclient.ssh_remoteclient_login():
-
         """
         cmd_exit_code: command exit status from remote host
         err: stderr from remote host
         response: stdout from remote host
         """
-        cmd_exit_code, err, response = \
-            ssh_remoteclient.execute_command(command, int(timeout))
+        cmd_exit_code, err, response = ssh_remoteclient.execute_command(
+            command, int(timeout)
+        )
 
     # Close ssh session
     if ssh_remoteclient:
         ssh_remoteclient.ssh_remoteclient_disconnect()
 
     if type == "list":
-        return response.split('\n')
+        return response.split("\n")
     else:
         return response
diff --git a/ffdc/plugins/telnet_execution.py b/ffdc/plugins/telnet_execution.py
index d55d18a..08aaf02 100644
--- a/ffdc/plugins/telnet_execution.py
+++ b/ffdc/plugins/telnet_execution.py
@@ -8,7 +8,7 @@
 # Absolute path to this plugin
 abs_path = os.path.abspath(os.path.dirname(sys.argv[0]))
 # full_path to plugins parent directory
-full_path = abs_path.split('plugins')[0]
+full_path = abs_path.split("plugins")[0]
 sys.path.append(full_path)
 # Walk path and append to sys.path
 for root, dirs, files in os.walk(full_path):
@@ -18,28 +18,21 @@
 from telnet_utility import TelnetRemoteclient  # NOQA
 
 
-def telnet_execute_cmd(hostname,
-                       username,
-                       password,
-                       command,
-                       timeout=60):
+def telnet_execute_cmd(hostname, username, password, command, timeout=60):
     r"""
-        Description of argument(s):
+    Description of argument(s):
 
-        hostname        Name/IP of the remote (targeting) host
-        username        User on the remote host with access to FFCD files
-        password        Password for user on remote host
-        command         Command to run on remote host
-        timeout         Time, in second, to wait for command completion
+    hostname        Name/IP of the remote (targeting) host
+    username        User on the remote host with access to FFCD files
+    password        Password for user on remote host
+    command         Command to run on remote host
+    timeout         Time, in second, to wait for command completion
     """
-    telnet_remoteclient = TelnetRemoteclient(hostname,
-                                             username,
-                                             password)
-    result = ''
+    telnet_remoteclient = TelnetRemoteclient(hostname, username, password)
+    result = ""
     if telnet_remoteclient.tn_remoteclient_login():
         # result: stdout from remote host
-        result = \
-            telnet_remoteclient.execute_command(command, timeout)
+        result = telnet_remoteclient.execute_command(command, timeout)
 
     # Close telnet session
     if telnet_remoteclient:
diff --git a/ffdc/setup.py b/ffdc/setup.py
index f8c6b18..ef360af 100644
--- a/ffdc/setup.py
+++ b/ffdc/setup.py
@@ -1,16 +1,12 @@
 from setuptools import setup
+
 setup(
-    name='ffdc',
-    version='0.1',
-    description=("A standalone script to collect logs from a given system."),
-    py_modules=['install'],
-    install_requires=[
-        'click',
-        'PyYAML',
-        'paramiko',
-        'redfishtool'
-    ],
+    name="ffdc",
+    version="0.1",
+    description="A standalone script to collect logs from a given system.",
+    py_modules=["install"],
+    install_requires=["click", "PyYAML", "paramiko", "redfishtool"],
     entry_points={
-        'console_scripts': ['collectFFDC=commands.install_cmd:main']
-    }
+        "console_scripts": ["collectFFDC=commands.install_cmd:main"]
+    },
 )