black: re-format

black and isort are enabled in the openbmc-build-scripts on Python files
to have a consistent formatting.  Re-run the formatter on the whole
repository.

Signed-off-by: Patrick Williams <patrick@stwcx.xyz>
Change-Id: I944f1915ece753f72a3fa654902d445a9749d0f9
diff --git a/tools/ct_metrics/gen_csv_results.py b/tools/ct_metrics/gen_csv_results.py
index 5f53e86..bbc66d9 100755
--- a/tools/ct_metrics/gen_csv_results.py
+++ b/tools/ct_metrics/gen_csv_results.py
@@ -6,25 +6,25 @@
 http://robot-framework.readthedocs.io/en/3.0/autodoc/robot.result.html
 """
 
-from robot.api import ExecutionResult
-from robot.result.visitor import ResultVisitor
-from xml.etree import ElementTree
-
-import sys
-import os
-import getopt
 import csv
-import robot.errors
+import datetime
+import getopt
+import os
 import re
 import stat
-import datetime
+import sys
+from xml.etree import ElementTree
+
+import robot.errors
+from robot.api import ExecutionResult
+from robot.result.visitor import ResultVisitor
 
 # Remove the python library path to restore with local project path later.
 save_path_0 = sys.path[0]
 del sys.path[0]
 sys.path.append(os.path.join(os.path.dirname(__file__), "../../lib"))
 
-from gen_arg import *    # NOQA
+from gen_arg import *  # NOQA
 from gen_print import *  # NOQA
 from gen_valid import *  # NOQA
 
@@ -33,7 +33,7 @@
 
 
 this_program = sys.argv[0]
-info = " For more information:  " + this_program + '  -h'
+info = " For more information:  " + this_program + "  -h"
 if len(sys.argv) == 1:
     print(info)
     sys.exit(1)
@@ -41,64 +41,88 @@
 
 parser = argparse.ArgumentParser(
     usage=info,
-    description="%(prog)s uses a robot framework API to extract test result\
-    data from output.xml generated by robot tests. For more information on the\
-    Robot Framework API, see\
-    http://robot-framework.readthedocs.io/en/3.0/autodoc/robot.result.html",
+    description=(
+        "%(prog)s uses a robot framework API to extract test result    data"
+        " from output.xml generated by robot tests. For more information on"
+        " the    Robot Framework API, see   "
+        " http://robot-framework.readthedocs.io/en/3.0/autodoc/robot.result.html"
+    ),
     formatter_class=argparse.ArgumentDefaultsHelpFormatter,
-    prefix_chars='-+')
+    prefix_chars="-+",
+)
 
 parser.add_argument(
-    '--source',
-    '-s',
-    help='The output.xml robot test result file path.  This parameter is \
-          required.')
+    "--source",
+    "-s",
+    help=(
+        "The output.xml robot test result file path.  This parameter is       "
+        "    required."
+    ),
+)
 
 parser.add_argument(
-    '--dest',
-    '-d',
-    help='The directory path where the generated .csv files will go.  This \
-          parameter is required.')
+    "--dest",
+    "-d",
+    help=(
+        "The directory path where the generated .csv files will go.  This     "
+        "      parameter is required."
+    ),
+)
 
 parser.add_argument(
-    '--version_id',
-    help='Driver version of openbmc firmware which was used during test,\
-          e.g. "v2.1-215-g6e7eacb".  This parameter is required.')
+    "--version_id",
+    help=(
+        "Driver version of openbmc firmware which was used during test,       "
+        '   e.g. "v2.1-215-g6e7eacb".  This parameter is required.'
+    ),
+)
 
 parser.add_argument(
-    '--platform',
-    help='OpenBMC platform which was used during test,\
-          e.g. "Witherspoon".  This parameter is required.')
+    "--platform",
+    help=(
+        "OpenBMC platform which was used during test,          e.g."
+        ' "Witherspoon".  This parameter is required.'
+    ),
+)
 
 parser.add_argument(
-    '--level',
-    help='OpenBMC release level which was used during test,\
-          e.g. "Master", "OBMC920".  This parameter is required.')
+    "--level",
+    help=(
+        "OpenBMC release level which was used during test,          e.g."
+        ' "Master", "OBMC920".  This parameter is required.'
+    ),
+)
 
 parser.add_argument(
-    '--test_phase',
-    help='Name of testing phase, e.g. "DVT", "SVT", etc.\
-          This parameter is optional.',
-    default="FVT")
+    "--test_phase",
+    help=(
+        'Name of testing phase, e.g. "DVT", "SVT", etc.          This'
+        " parameter is optional."
+    ),
+    default="FVT",
+)
 
 parser.add_argument(
-    '--subsystem',
-    help='Name of the subsystem, e.g. "OPENBMC" etc.\
-          This parameter is optional.',
-    default="OPENBMC")
+    "--subsystem",
+    help=(
+        'Name of the subsystem, e.g. "OPENBMC" etc.          This parameter is'
+        " optional."
+    ),
+    default="OPENBMC",
+)
 
 parser.add_argument(
-    '--processor',
+    "--processor",
     help='Name of processor, e.g. "P9". This parameter is optional.',
-    default="OPENPOWER")
+    default="OPENPOWER",
+)
 
 
 # Populate stock_list with options we want.
 stock_list = [("test_mode", 0), ("quiet", 0), ("debug", 0)]
 
 
-def exit_function(signal_number=0,
-                  frame=None):
+def exit_function(signal_number=0, frame=None):
     r"""
     Execute whenever the program ends normally or with the signals that we
     catch (i.e. TERM, INT).
@@ -111,8 +135,7 @@
     qprint_pgm_footer()
 
 
-def signal_handler(signal_number,
-                   frame):
+def signal_handler(signal_number, frame):
     r"""
     Handle signals.  Without a function to catch a SIGTERM or SIGINT, the
     program would terminate immediately with return code 143 and without
@@ -146,8 +169,15 @@
     return True
 
 
-def parse_output_xml(xml_file_path, csv_dir_path, version_id, platform, level,
-                     test_phase, processor):
+def parse_output_xml(
+    xml_file_path,
+    csv_dir_path,
+    version_id,
+    platform,
+    level,
+    test_phase,
+    processor,
+):
     r"""
     Parse the robot-generated output.xml file and extract various test
     output data. Put the extracted information into a csv file in the "dest"
@@ -175,13 +205,19 @@
     total_non_critical_failed = 0
 
     result = ExecutionResult(xml_file_path)
-    result.configure(stat_config={'suite_stat_level': 2,
-                                  'tag_stat_combine': 'tagANDanother'})
+    result.configure(
+        stat_config={
+            "suite_stat_level": 2,
+            "tag_stat_combine": "tagANDanother",
+        }
+    )
 
     stats = result.statistics
     print("--------------------------------------")
     try:
-        total_critical_tc = stats.total.critical.passed + stats.total.critical.failed
+        total_critical_tc = (
+            stats.total.critical.passed + stats.total.critical.failed
+        )
         total_critical_passed = stats.total.critical.passed
         total_critical_failed = stats.total.critical.failed
     except AttributeError:
@@ -194,7 +230,9 @@
     except AttributeError:
         pass
 
-    print("Total Test Count:\t %d" % (total_non_critical_tc + total_critical_tc))
+    print(
+        "Total Test Count:\t %d" % (total_non_critical_tc + total_critical_tc)
+    )
 
     print("Total Critical Test Failed:\t %d" % total_critical_failed)
     print("Total Critical Test Passed:\t %d" % total_critical_passed)
@@ -221,11 +259,11 @@
     # Default Test data
     l_test_type = test_phase
 
-    l_pse_rel = 'Master'
+    l_pse_rel = "Master"
     if level:
         l_pse_rel = level
 
-    l_env = 'HW'
+    l_env = "HW"
     l_proc = processor
     l_platform_type = ""
     l_func_area = ""
@@ -252,14 +290,27 @@
     if l_driver and l_platform_type:
         print("Driver and system info set.")
     else:
-        print("Both driver and system info need to be set.\
-                CSV file is not generated.")
+        print(
+            "Both driver and system info need to be set.                CSV"
+            " file is not generated."
+        )
         sys.exit()
 
     # Default header
-    l_header = ['test_start', 'test_end', 'subsys', 'test_type',
-                'test_result', 'test_name', 'pse_rel', 'driver',
-                'env', 'proc', 'platform_type', 'test_func_area']
+    l_header = [
+        "test_start",
+        "test_end",
+        "subsys",
+        "test_type",
+        "test_result",
+        "test_name",
+        "pse_rel",
+        "driver",
+        "env",
+        "proc",
+        "platform_type",
+        "test_func_area",
+    ]
 
     l_csvlist.append(l_header)
 
@@ -274,11 +325,11 @@
     for testcase in collectDataObj.testData:
         # Functional Area: Suite Name
         # Test Name: Test Case Name
-        l_func_area = str(testcase.parent).split(' ', 1)[1]
+        l_func_area = str(testcase.parent).split(" ", 1)[1]
         l_test_name = str(testcase)
 
         # Test Result pass=0 fail=1
-        if testcase.status == 'PASS':
+        if testcase.status == "PASS":
             l_test_result = 0
         else:
             l_test_result = 1
@@ -289,18 +340,36 @@
         # Data Sequence: test_start,test_end,subsys,test_type,
         #                test_result,test_name,pse_rel,driver,
         #                env,proc,platform_type,test_func_area,
-        l_data = [l_stime, l_etime, subsystem, l_test_type, l_test_result,
-                  l_test_name, l_pse_rel, l_driver, l_env, l_proc,
-                  l_platform_type, l_func_area]
+        l_data = [
+            l_stime,
+            l_etime,
+            subsystem,
+            l_test_type,
+            l_test_result,
+            l_test_name,
+            l_pse_rel,
+            l_driver,
+            l_env,
+            l_proc,
+            l_platform_type,
+            l_func_area,
+        ]
         l_csvlist.append(l_data)
 
     # Open the file and write to the CSV file
     l_file = open(l_csvfile, "w")
-    l_writer = csv.writer(l_file, lineterminator='\n')
+    l_writer = csv.writer(l_file, lineterminator="\n")
     l_writer.writerows(l_csvlist)
     l_file.close()
     # Set file permissions 666.
-    perm = stat.S_IRUSR + stat.S_IWUSR + stat.S_IRGRP + stat.S_IWGRP + stat.S_IROTH + stat.S_IWOTH
+    perm = (
+        stat.S_IRUSR
+        + stat.S_IWUSR
+        + stat.S_IRGRP
+        + stat.S_IWGRP
+        + stat.S_IROTH
+        + stat.S_IWOTH
+    )
     os.chmod(l_csvfile, perm)
 
 
@@ -336,19 +405,19 @@
 
     bmc_version_id = ""
     bmc_platform = ""
-    with open(xml_file_path, 'rt') as output:
+    with open(xml_file_path, "rt") as output:
         tree = ElementTree.parse(output)
 
-    for node in tree.iter('msg'):
+    for node in tree.iter("msg"):
         # /etc/os-release output is logged in the XML as msg
         # Example: ${output} = VERSION_ID="v1.99.2-71-gbc49f79"
-        if '${output} = VERSION_ID=' in node.text:
+        if "${output} = VERSION_ID=" in node.text:
             # Get BMC version (e.g. v1.99.1-96-g2a46570)
             bmc_version_id = str(node.text.split("VERSION_ID=")[1])[1:-1]
 
         # Platform is logged in the XML as msg.
         # Example: ${bmc_model} = Witherspoon BMC
-        if '${bmc_model} = ' in node.text:
+        if "${bmc_model} = " in node.text:
             bmc_platform = node.text.split(" = ")[1]
 
     print_vars(bmc_version_id, bmc_platform)
@@ -356,7 +425,6 @@
 
 
 def main():
-
     if not gen_get_options(parser, stock_list):
         return False
 
@@ -365,8 +433,9 @@
 
     qprint_pgm_header()
 
-    parse_output_xml(source, dest, version_id, platform, level,
-                     test_phase, processor)
+    parse_output_xml(
+        source, dest, version_id, platform, level, test_phase, processor
+    )
 
     return True
 
diff --git a/tools/github_issues_to_csv b/tools/github_issues_to_csv
index c287f7c..ce6e1e2 100644
--- a/tools/github_issues_to_csv
+++ b/tools/github_issues_to_csv
@@ -8,10 +8,11 @@
 import argparse
 import csv
 import getpass
+
 import requests
 
 auth = None
-states = 'all'
+states = "all"
 
 
 def write_issues(response, csv_out):
@@ -22,36 +23,45 @@
     if response.status_code != 200:
         raise Exception(response.status_code)
     for issue in response.json():
-        if 'pull_request' not in issue:
-            labels = ', '.join([lable['name'] for lable in issue['labels']])
+        if "pull_request" not in issue:
+            labels = ", ".join([lable["name"] for lable in issue["labels"]])
 
             # Below lines to overcome "TypeError: 'NoneType' object has
             # no attribute '__getitem__'"
 
-            close_date = issue.get('closed_at')
+            close_date = issue.get("closed_at")
             if close_date:
-                close_date = issue.get('closed_at').split('T')[0]
+                close_date = issue.get("closed_at").split("T")[0]
 
-            assignee_resp = issue.get('assignees', 'Not Assigned')
+            assignee_resp = issue.get("assignees", "Not Assigned")
             if assignee_resp:
-                owners = ','.join([assignee_login['login'] for
-                                   assignee_login in assignee_resp])
+                owners = ",".join(
+                    [
+                        assignee_login["login"]
+                        for assignee_login in assignee_resp
+                    ]
+                )
             else:
                 owners = "Not Assigned"
 
-            milestone_resp = issue.get('milestone', 'Not Assigned')
+            milestone_resp = issue.get("milestone", "Not Assigned")
             if milestone_resp:
-                milestone_resp = milestone_resp['title'].encode('utf-8')
+                milestone_resp = milestone_resp["title"].encode("utf-8")
 
             # Change the following line to write out additional fields
-            csv_out.writerow([labels.encode('utf-8'),
-                              issue.get('title').encode('utf-8'),
-                              issue.get('state').encode('utf-8'),
-                              issue.get('created_at').split('T')[0],
-                              close_date,
-                              issue.get('html_url').encode('utf-8'),
-                              issue.get('user').get('login').encode('utf-8'),
-                              owners, milestone_resp])
+            csv_out.writerow(
+                [
+                    labels.encode("utf-8"),
+                    issue.get("title").encode("utf-8"),
+                    issue.get("state").encode("utf-8"),
+                    issue.get("created_at").split("T")[0],
+                    close_date,
+                    issue.get("html_url").encode("utf-8"),
+                    issue.get("user").get("login").encode("utf-8"),
+                    owners,
+                    milestone_resp,
+                ]
+            )
 
 
 def get_issues_from_github_to_csv(name, response):
@@ -65,36 +75,49 @@
     print(states)
 
     # Multiple requests are required if response is paged
-    if 'link' in response.headers:
-        pages = {rel[6:-1]: url[url.index('<') + 1:-1] for url, rel in
-                 (link.split(';') for link in
-                  response.headers['link'].split(','))}
-        while 'last' in pages and 'next' in pages:
-            pages = {rel[6:-1]: url[url.index('<') + 1:-1] for url, rel in
-                     (link.split(';') for link in
-                      response.headers['link'].split(','))}
-            response = requests.get(pages['next'], auth=auth)
+    if "link" in response.headers:
+        pages = {
+            rel[6:-1]: url[url.index("<") + 1 : -1]
+            for url, rel in (
+                link.split(";") for link in response.headers["link"].split(",")
+            )
+        }
+        while "last" in pages and "next" in pages:
+            pages = {
+                rel[6:-1]: url[url.index("<") + 1 : -1]
+                for url, rel in (
+                    link.split(";")
+                    for link in response.headers["link"].split(",")
+                )
+            }
+            response = requests.get(pages["next"], auth=auth)
             write_issues(response, csv_out)
-            if pages['next'] == pages['last']:
+            if pages["next"] == pages["last"]:
                 break
 
 
-parser = argparse.ArgumentParser(description="Write GitHub repository issues "
-                                             "to CSV file.")
+parser = argparse.ArgumentParser(
+    description="Write GitHub repository issues to CSV file."
+)
 
-parser.add_argument('username', nargs='?', help="GitHub user name, "
-                    "formatted as 'username'")
+parser.add_argument(
+    "username", nargs="?", help="GitHub user name, formatted as 'username'"
+)
 
-parser.add_argument('repositories', nargs='+', help="Repository names, "
-                    "formatted as 'basereponame/repo'")
+parser.add_argument(
+    "repositories",
+    nargs="+",
+    help="Repository names, formatted as 'basereponame/repo'",
+)
 
-parser.add_argument('--all', action='store_true', help="Returns both open "
-                    "and closed issues.")
+parser.add_argument(
+    "--all", action="store_true", help="Returns both open and closed issues."
+)
 
 args = parser.parse_args()
 
 if args.all:
-    state = 'all'
+    state = "all"
 
 username = args.username
 
@@ -105,16 +128,26 @@
 # To set the csv filename
 csvfilename = ""
 for repository in args.repositories:
-    csvfilename_temp = '{}'.format(repository.replace('/', '-'))
+    csvfilename_temp = "{}".format(repository.replace("/", "-"))
     csvfilename = csvfilename + csvfilename_temp
-csvfilename = csvfilename + '-issues.csv'
-with open(csvfilename, 'w') as csvfileout:
+csvfilename = csvfilename + "-issues.csv"
+with open(csvfilename, "w") as csvfileout:
     csv_out = csv.writer(csvfileout)
-    csv_out.writerow(['Labels', 'Title', 'State', 'Open Date',
-                      'Close Date', 'URL', 'Author', 'Assignees',
-                      'Milestone'])
+    csv_out.writerow(
+        [
+            "Labels",
+            "Title",
+            "State",
+            "Open Date",
+            "Close Date",
+            "URL",
+            "Author",
+            "Assignees",
+            "Milestone",
+        ]
+    )
     for repository in args.repositories:
-        l_url = 'https://api.github.com/repos/{}/issues?state={}'
+        l_url = "https://api.github.com/repos/{}/issues?state={}"
         l_url = l_url.format(repository, states)
         response = requests.get(l_url, auth=auth)
         write_issues(response, csv_out)