Fixed PEP 8 style using autopep8

Used autopep8 to correct styling.
Ran autopep8 on openbmc-test-automation, autopep8 is not
able to fix all styling issues. There is still around
1000 styling violations in openbmc-test-automation.
More information on autopep8 can be found here,
https://pypi.python.org/pypi/autopep8

Change-Id: Iddc131da1d74d978eb3dd0fdd6ce5d0a0e49b0f8
Signed-off-by: Gunnar Mills <gmills@us.ibm.com>
diff --git a/tools/github_issues_to_csv b/tools/github_issues_to_csv
index 3053e10..224bad2 100644
--- a/tools/github_issues_to_csv
+++ b/tools/github_issues_to_csv
@@ -45,13 +45,13 @@
 
             # Change the following line to write out additional fields
             csv_out.writerow([labels.encode('utf-8'),
-                             issue.get('title').encode('utf-8'),
-                             issue.get('state').encode('utf-8'),
-                             issue.get('created_at').split('T')[0],
-                             close_date,
-                             issue.get('html_url').encode('utf-8'),
-                             issue.get('user').get('login').encode('utf-8'),
-                             owners, milestone_resp])
+                              issue.get('title').encode('utf-8'),
+                              issue.get('state').encode('utf-8'),
+                              issue.get('created_at').split('T')[0],
+                              close_date,
+                              issue.get('html_url').encode('utf-8'),
+                              issue.get('user').get('login').encode('utf-8'),
+                              owners, milestone_resp])
 
 
 def get_issues_from_github_to_csv(name, response):
@@ -68,11 +68,11 @@
     if 'link' in response.headers:
         pages = {rel[6:-1]: url[url.index('<')+1:-1] for url, rel in
                  (link.split(';') for link in
-                 response.headers['link'].split(','))}
+                  response.headers['link'].split(','))}
         while 'last' in pages and 'next' in pages:
             pages = {rel[6:-1]: url[url.index('<')+1:-1] for url, rel in
                      (link.split(';') for link in
-                     response.headers['link'].split(','))}
+                      response.headers['link'].split(','))}
             response = requests.get(pages['next'], auth=auth)
             write_issues(response, csv_out)
             if pages['next'] == pages['last']: