mirrored from https://chromium.googlesource.com/angle/angle
- Notifications
You must be signed in to change notification settings - Fork 646
/
Copy pathgenerate_stats.py
executable file
·818 lines (745 loc) · 32.8 KB
/
generate_stats.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
#!/usr/bin/env vpython
#
# [VPYTHON:BEGIN]
# wheel: <
# name: "infra/python/wheels/google-auth-py2_py3"
# version: "version:1.2.1"
# >
#
# wheel: <
# name: "infra/python/wheels/pyasn1-py2_py3"
# version: "version:0.4.5"
# >
#
# wheel: <
# name: "infra/python/wheels/pyasn1_modules-py2_py3"
# version: "version:0.2.4"
# >
#
# wheel: <
# name: "infra/python/wheels/six"
# version: "version:1.10.0"
# >
#
# wheel: <
# name: "infra/python/wheels/cachetools-py2_py3"
# version: "version:2.0.1"
# >
# wheel: <
# name: "infra/python/wheels/rsa-py2_py3"
# version: "version:4.0"
# >
#
# wheel: <
# name: "infra/python/wheels/requests"
# version: "version:2.13.0"
# >
#
# wheel: <
# name: "infra/python/wheels/google-api-python-client-py2_py3"
# version: "version:1.6.2"
# >
#
# wheel: <
# name: "infra/python/wheels/httplib2-py2_py3"
# version: "version:0.12.1"
# >
#
# wheel: <
# name: "infra/python/wheels/oauth2client-py2_py3"
# version: "version:3.0.0"
# >
#
# wheel: <
# name: "infra/python/wheels/uritemplate-py2_py3"
# version: "version:3.0.0"
# >
#
# wheel: <
# name: "infra/python/wheels/google-auth-oauthlib-py2_py3"
# version: "version:0.3.0"
# >
#
# wheel: <
# name: "infra/python/wheels/requests-oauthlib-py2_py3"
# version: "version:1.2.0"
# >
#
# wheel: <
# name: "infra/python/wheels/oauthlib-py2_py3"
# version: "version:3.0.1"
# >
#
# wheel: <
# name: "infra/python/wheels/google-auth-httplib2-py2_py3"
# version: "version:0.0.3"
# >
# [VPYTHON:END]
#
# Copyright 2019 The ANGLE Project Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
#
# generate_deqp_stats.py:
# Checks output of deqp testers and generates stats using the GDocs API
#
# prerequirements:
# https://devsite.googleplex.com/sheets/api/quickstart/python
# Follow the quickstart guide.
#
# usage: generate_deqp_stats.py [-h] [--auth_path [AUTH_PATH]] [--spreadsheet [SPREADSHEET]]
# [--verbosity [VERBOSITY]]
#
# optional arguments:
# -h, --help show this help message and exit
# --auth_path [AUTH_PATH]
# path to directory containing authorization data (credentials.json and
# token.pickle). [default=<home>/.auth]
# --spreadsheet [SPREADSHEET]
# ID of the spreadsheet to write stats to. [default
# ='1D6Yh7dAPP-aYLbX3HHQD8WubJV9XPuxvkKowmn2qhIw']
# --verbosity [VERBOSITY]
# Verbosity of output. Valid options are [DEBUG, INFO, WARNING, ERROR].
# [default=INFO]
importargparse
importdatetime
importlogging
importos
importpickle
importre
importsubprocess
importsys
importurllib
fromgoogle.auth.transport.requestsimportRequest
fromgoogleapiclient.discoveryimportbuild
fromgoogle_auth_oauthlib.flowimportInstalledAppFlow
####################
# Global Constants #
####################
HOME_DIR=os.path.expanduser('~')
SCRIPT_DIR=sys.path[0]
ROOT_DIR=os.path.abspath(os.path.join(SCRIPT_DIR, '..'))
LOGGER=logging.getLogger('generate_stats')
SCOPES= ['https://www.googleapis.com/auth/spreadsheets']
BOT_NAMES= [
'mac-angle-amd',
'mac-angle-intel',
'win10-angle-x64-nvidia',
'win10-angle-x64-intel',
'win7-angle-x64-nvidia',
'win7-angle-x86-amd',
'Linux FYI dEQP Release (Intel HD 630)',
'Linux FYI dEQP Release (NVIDIA)',
'Android FYI dEQP Release (Nexus 5X)',
'Android FYI 32 dEQP Vk Release (Pixel 2)',
'Android FYI 64 dEQP Vk Release (Pixel 2)',
]
BOT_NAME_PREFIX='chromium/ci/'
BUILD_LINK_PREFIX='https://ci.chromium.org/p/chromium/builders/ci/'
REQUIRED_COLUMNS= ['build_link', 'time', 'date', 'revision', 'angle_revision', 'duplicate']
MAIN_RESULT_COLUMNS= ['Passed', 'Failed', 'Skipped', 'Not Supported', 'Exception', 'Crashed']
INFO_TAG='*RESULT'
WORKAROUND_FORMATTING_ERROR_STRING="Still waiting for the following processes to finish:"
######################
# Build Info Parsing #
######################
# Returns a struct with info about the latest successful build given a bot name. Info contains the
# build_name, time, date, angle_revision, and chrome revision.
# Uses: bb ls '<botname>' -n 1 -status success -p
defget_latest_success_build_info(bot_name):
bb=subprocess.Popen(['bb', 'ls', bot_name, '-n', '1', '-status', 'success', '-p'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
LOGGER.debug("Ran [bb ls '"+bot_name+"' -n 1 -status success -p]")
out, err=bb.communicate()
iferr:
raiseValueError("Unexpected error from bb ls: '"+err+"'")
ifnotout:
raiseValueError("Unexpected empty result from bb ls of bot '"+bot_name+"'")
# Example output (line 1):
# ci.chromium.org/b/8915280275579996928 SUCCESS 'chromium/ci/Win10 FYI dEQP Release (NVIDIA)/26877'
# ...
if'SUCCESS'notinout:
raiseValueError("Unexpected result from bb ls: '"+out+"'")
info= {}
forlineinout.splitlines():
# The first line holds the build name
if'build_name'notininfo:
info['build_name'] =line.strip().split("'")[1]
# Remove the bot name and prepend the build link
info['build_link'] =BUILD_LINK_PREFIX+urllib.quote(
info['build_name'].split(BOT_NAME_PREFIX)[1])
if'Created'inline:
# Example output of line with 'Created':
# ...
# Created today at 12:26:39, waited 2.056319s, started at 12:26:41, ran for 1h16m48.14963s, ended at 13:43:30
# ...
info['time'] =re.findall(r'[0-9]{1,2}:[0-9]{2}:[0-9]{2}', line.split(',', 1)[0])[0]
# Format today's date in US format so Sheets can read it properly
info['date'] =datetime.datetime.now().strftime('%m/%d/%y')
if'got_angle_revision'inline:
# Example output of line with angle revision:
# ...
# "parent_got_angle_revision": "8cbd321cafa92ffbf0495e6d0aeb9e1a97940fee",
# ...
info['angle_revision'] =filter(str.isalnum, line.split(':')[1])
if'"revision"'inline:
# Example output of line with chromium revision:
# ...
# "revision": "3b68405a27f1f9590f83ae07757589dba862f141",
# ...
info['revision'] =filter(str.isalnum, line.split(':')[1])
if'build_name'notininfo:
raiseValueError("Could not find build_name from bot '"+bot_name+"'")
returninfo
# Returns a list of step names that we're interested in given a build name. We are interested in
# step names starting with 'angle_'. May raise an exception.
# Uses: bb get '<build_name>' -steps
defget_step_names(build_name):
bb=subprocess.Popen(['bb', 'get', build_name, '-steps'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
LOGGER.debug("Ran [bb get '"+build_name+"' -steps]")
out, err=bb.communicate()
iferr:
raiseValueError("Unexpected error from bb get: '"+err+"'")
step_names= []
# Example output (relevant lines to a single step):
# ...
# Step "angle_deqp_egl_vulkan_tests on (nvidia-quadro-p400-win10-stable) GPU on Windows on Windows-10" SUCCESS 4m12s Logs: "stdout", "chromium_swarming.summary", "Merge script log", "Flaky failure: dEQP.EGL/info_version (status CRASH,SUCCESS)", "step_metadata"
# Run on OS: 'Windows-10'<br>Max shard duration: 0:04:07.309848 (shard \#1)<br>Min shard duration: 0:02:26.402128 (shard \#0)<br/>flaky failures [ignored]:<br/>dEQP.EGL/info\_version<br/>
# * [shard #0 isolated out](https://isolateserver.appspot.com/browse?namespace=default-gzip&hash=9a5999a59d332e55f54f495948d0c9f959e60ed2)
# * [shard #0 (128.3 sec)](https://chromium-swarm.appspot.com/user/task/446903ae365b8110)
# * [shard #1 isolated out](https://isolateserver.appspot.com/browse?namespace=default-gzip&hash=d71e1bdd91dee61b536b4057a9222e642bd3809f)
# * [shard #1 (229.3 sec)](https://chromium-swarm.appspot.com/user/task/446903b7b0d90210)
# * [shard #2 isolated out](https://isolateserver.appspot.com/browse?namespace=default-gzip&hash=ac9ba85b1cca77774061b87335c077980e1eef85)
# * [shard #2 (144.5 sec)](https://chromium-swarm.appspot.com/user/task/446903c18e15a010)
# * [shard #3 isolated out](https://isolateserver.appspot.com/browse?namespace=default-gzip&hash=976d586386864abecf53915fbac3e085f672e30f)
# * [shard #3 (138.4 sec)](https://chromium-swarm.appspot.com/user/task/446903cc8da0ad10)
# ...
forlineinout.splitlines():
if'Step "angle_'notinline:
continue
step_names.append(line.split('"')[1])
returnstep_names
# Performs some heuristic validation of the step_info struct returned from a single step log.
# Returns True if valid, False if invalid. May write to stderr
defvalidate_step_info(step_info, build_name, step_name):
print_name="'"+build_name+"': '"+step_name+"'"
ifnotstep_info:
LOGGER.warning('Step info empty for '+print_name+'\n')
returnFalse
if'Total'instep_info:
partial_sum_keys=MAIN_RESULT_COLUMNS
partial_sum_values= [int(step_info[key]) forkeyinpartial_sum_keysifkeyinstep_info]
computed_total=sum(partial_sum_values)
ifstep_info['Total'] !=computed_total:
LOGGER.warning('Step info does not sum to total for '+print_name+' | Total: '+
str(step_info['Total']) +' - Computed total: '+str(computed_total) +
'\n')
returnTrue
# Returns a struct containing parsed info from a given step log. The info is parsed by looking for
# lines with the following format in stdout:
# '[TESTSTATS]: <key>: <value>''
# May write to stderr
# Uses: bb log '<build_name>' '<step_name>'
defget_step_info(build_name, step_name):
bb=subprocess.Popen(['bb', 'log', build_name, step_name],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
LOGGER.debug("Ran [bb log '"+build_name+"' '"+step_name+"']")
out, err=bb.communicate()
iferr:
LOGGER.warning("Unexpected error from bb log '"+build_name+"' '"+step_name+"': '"+
err+"'")
returnNone
step_info= {}
# Example output (relevant lines of stdout):
# ...
# *RESULT: Total: 155
# *RESULT: Passed: 11
# *RESULT: Failed: 0
# *RESULT: Skipped: 12
# *RESULT: Not Supported: 132
# *RESULT: Exception: 0
# *RESULT: Crashed: 0
# *RESULT: Unexpected Passed: 12
# ...
append_errors= []
# Hacky workaround to fix issue where messages are dropped into the middle of lines by another
# process:
# eg.
# *RESULT: <start_of_result>Still waiting for the following processes to finish:
# "c:\b\s\w\ir\out\Release\angle_deqp_gles3_tests.exe" --deqp-egl-display-type=angle-vulkan --gtest_flagfile="c:\b\s\w\itlcgdrz\scoped_dir7104_364984996\8ad93729-f679-406d-973b-06b9d1bf32de.tmp" --single-process-tests --test-launcher-batch-limit=400 --test-launcher-output="c:\b\s\w\itlcgdrz\7104_437216092\test_results.xml" --test-launcher-summary-output="c:\b\s\w\iosuk8ai\output.json"
# <end_of_result>
#
# Removes the message and skips the line following it, and then appends the <start_of_result>
# and <end_of_result> back together
workaround_prev_line=""
workaround_prev_line_count=0
forlineinout.splitlines():
# Skip lines if the workaround still has lines to skip
ifworkaround_prev_line_count>0:
workaround_prev_line_count-=1
continue
# If there are no more lines to skip and there is a previous <start_of_result> to append,
# append it and finish the workaround
elifworkaround_prev_line!="":
line=workaround_prev_line+line
workaround_prev_line=""
workaround_prev_line_count=0
LOGGER.debug("Formatting error workaround rebuilt line as: '"+line+"'\n")
ifINFO_TAGnotinline:
continue
# When the workaround string is detected, start the workaround with 1 line to skip and save
# the <start_of_result>, but continue the loop until the workaround is finished
ifWORKAROUND_FORMATTING_ERROR_STRINGinline:
workaround_prev_line=line.split(WORKAROUND_FORMATTING_ERROR_STRING)[0]
workaround_prev_line_count=1
continue
found_stat=True
line_columns=line.split(INFO_TAG, 1)[1].split(':')
iflen(line_columns) isnot3:
LOGGER.warning("Line improperly formatted: '"+line+"'\n")
continue
key=line_columns[1].strip()
# If the value is clearly an int, sum it. Otherwise, concatenate it as a string
isInt=False
intVal=0
try:
intVal=int(line_columns[2])
ifintValisnotNone:
isInt=True
exceptExceptionaserror:
isInt=False
ifisInt:
ifkeynotinstep_info:
step_info[key] =0
step_info[key] +=intVal
else:
ifkeynotinstep_info:
step_info[key] =line_columns[2].strip()
else:
append_string='\n'+line_columns[2].strip()
# Sheets has a limit of 50000 characters per cell, so make sure to stop appending
# below this limit
iflen(step_info[key]) +len(append_string) <50000:
step_info[key] +=append_string
else:
ifkeynotinappend_errors:
append_errors.append(key)
LOGGER.warning("Too many characters in column '"+key+
"'. Output capped.")
returnstep_info
# Returns the info for each step run on a given bot_name.
defget_bot_info(bot_name):
info=get_latest_success_build_info(bot_name)
info['step_names'] =get_step_names(info['build_name'])
broken_step_names= []
forstep_nameininfo['step_names']:
LOGGER.info("Parsing step '"+step_name+"'...")
step_info=get_step_info(info['build_name'], step_name)
ifvalidate_step_info(step_info, info['build_name'], step_name):
info[step_name] =step_info
else:
broken_step_names+=step_name
forstep_nameinbroken_step_names:
info['step_names'].remove(step_name)
returninfo
#####################
# Sheets Formatting #
#####################
# Get an individual spreadsheet based on the spreadsheet id. Returns the result of
# spreadsheets.get(), or throws an exception if the sheet could not open.
defget_spreadsheet(service, spreadsheet_id):
LOGGER.debug("Called [spreadsheets.get(spreadsheetId='"+spreadsheet_id+"')]")
request=service.get(spreadsheetId=spreadsheet_id)
spreadsheet=request.execute()
ifnotspreadsheet:
raiseException("Did not open spreadsheet '"+spreadsheet_id+"'")
returnspreadsheet
# Returns a nicely formatted string based on the bot_name and step_name
defformat_sheet_name(bot_name, step_name):
# Some tokens should be ignored for readability in the name
unneccesary_tokens= ['FYI', 'Release', 'Vk', 'dEQP', '(', ')']
fortokeninunneccesary_tokens:
bot_name=bot_name.replace(token, '')
bot_name=' '.join(bot_name.strip().split()) # Remove extra spaces
step_name=re.findall(r'angle\w*', step_name)[0] # Separate test name
# Test names are formatted as 'angle_deqp_<frontend>_<backend>_tests'
new_step_name=''
# Put the frontend first
if'_egl_'instep_name:
step_name=step_name.replace('_egl_', '_')
new_step_name+=' EGL'
if'_gles2_'instep_name:
step_name=step_name.replace('_gles2_', '_')
new_step_name+=' GLES 2.0 '
if'_gles3_'instep_name:
step_name=step_name.replace('_gles3_', '_')
new_step_name+=' GLES 3.0 '
if'_gles31_'instep_name:
step_name=step_name.replace('_gles31_', '_')
new_step_name+=' GLES 3.1 '
# Put the backend second
if'_d3d9_'instep_name:
step_name=step_name.replace('_d3d9_', '_')
new_step_name+=' D3D9 '
if'_d3d11'instep_name:
step_name=step_name.replace('_d3d11_', '_')
new_step_name+=' D3D11 '
if'_gl_'instep_name:
step_name=step_name.replace('_gl_', '_')
new_step_name+=' Desktop OpenGL '
if'_gles_'instep_name:
step_name=step_name.replace('_gles_', '_')
new_step_name+=' OpenGLES '
if'_vulkan_'instep_name:
step_name=step_name.replace('_vulkan_', '_')
new_step_name+=' Vulkan '
# Add any remaining keywords from the step name into the formatted name (formatted nicely)
step_name=step_name.replace('angle_', '_')
step_name=step_name.replace('_deqp_', '_')
step_name=step_name.replace('_tests', '_')
step_name=step_name.replace('_', ' ').strip()
new_step_name+=' '+step_name
new_step_name=' '.join(new_step_name.strip().split()) # Remove extra spaces
returnnew_step_name+' '+bot_name
# Returns the full list of sheet names that should be populated based on the info struct
defget_sheet_names(info):
sheet_names= []
forbot_nameininfo:
forstep_nameininfo[bot_name]['step_names']:
sheet_name=format_sheet_name(bot_name, step_name)
sheet_names.append(sheet_name)
returnsheet_names
# Returns True if the sheet is found in the spreadsheets object
defsheet_exists(spreadsheet, step_name):
forsheetinspreadsheet['sheets']:
ifsheet['properties']['title'] ==step_name:
returnTrue
returnFalse
# Validates the spreadsheets object against the list of sheet names which should appear. Returns a
# list of sheets that need creation.
defvalidate_sheets(spreadsheet, sheet_names):
create_sheets= []
forsheet_nameinsheet_names:
ifnotsheet_exists(spreadsheet, sheet_name):
create_sheets.append(sheet_name)
returncreate_sheets
# Performs a batch update with a given service, spreadsheet id, and list <object(Request)> of
# updates to do.
defbatch_update(service, spreadsheet_id, updates):
batch_update_request_body= {
'requests': updates,
}
LOGGER.debug("Called [spreadsheets.batchUpdate(spreadsheetId='"+spreadsheet_id+"', body="+
str(batch_update_request_body) +')]')
request=service.batchUpdate(spreadsheetId=spreadsheet_id, body=batch_update_request_body)
request.execute()
# Creates sheets given a service and spreadsheed id based on a list of sheet names input
defcreate_sheets(service, spreadsheet_id, sheet_names):
updates= [{'addSheet': {'properties': {'title': sheet_name,}}} forsheet_nameinsheet_names]
batch_update(service, spreadsheet_id, updates)
# Calls a values().batchGet() on the service to find the list of column names from each sheet in
# sheet_names. Returns a dictionary with one list per sheet_name.
defget_headers(service, spreadsheet_id, sheet_names):
header_ranges= [sheet_name+'!A1:Z'forsheet_nameinsheet_names]
LOGGER.debug("Called [spreadsheets.values().batchGet(spreadsheetId='"+spreadsheet_id+
', ranges='+str(header_ranges) +"')]")
request=service.values().batchGet(spreadsheetId=spreadsheet_id, ranges=header_ranges)
response=request.execute()
headers= {}
fork, sheet_nameinenumerate(sheet_names):
if'values'inresponse['valueRanges'][k]:
# Headers are in the first row of values
headers[sheet_name] =response['valueRanges'][k]['values'][0]
else:
headers[sheet_name] = []
returnheaders
# Calls values().batchUpdate() with supplied list of data <object(ValueRange)> to update on the
# service.
defbatch_update_values(service, spreadsheet_id, data):
batch_update_values_request_body= {
'valueInputOption': 'USER_ENTERED', # Helps with formatting of dates
'data': data,
}
LOGGER.debug("Called [spreadsheets.values().batchUpdate(spreadsheetId='"+spreadsheet_id+
"', body="+str(batch_update_values_request_body) +')]')
request=service.values().batchUpdate(
spreadsheetId=spreadsheet_id, body=batch_update_values_request_body)
request.execute()
# Get the sheetId of a sheet based on its name
defget_sheet_id(spreadsheet, sheet_name):
forsheetinspreadsheet['sheets']:
ifsheet['properties']['title'] ==sheet_name:
returnsheet['properties']['sheetId']
return-1
# Update the filters on sheets with a 'duplicate' column. Filter out any duplicate rows
defupdate_filters(service, spreadsheet_id, headers, info, spreadsheet):
updates= []
forbot_nameininfo:
forstep_nameininfo[bot_name]['step_names']:
sheet_name=format_sheet_name(bot_name, step_name)
duplicate_found='duplicate'inheaders[sheet_name]
ifduplicate_found:
sheet_id=get_sheet_id(spreadsheet, sheet_name)
ifsheet_id>-1:
updates.append({
"setBasicFilter": {
"filter": {
"range": {
"sheetId": sheet_id,
"startColumnIndex": 0,
"endColumnIndex": len(headers[sheet_name])
},
"sortSpecs": [{
"dimensionIndex": headers[sheet_name].index('date'),
"sortOrder": "ASCENDING"
}],
"criteria": {
str(headers[sheet_name].index('duplicate')): {
"hiddenValues":
["1"] # Hide rows when duplicate is 1 (true)
}
}
}
}
})
ifupdates:
LOGGER.info('Updating sheet filters...')
batch_update(service, spreadsheet_id, updates)
# Populates the headers with any missing/desired rows based on the info struct, and calls
# batch update to update the corresponding sheets if necessary.
defupdate_headers(service, spreadsheet_id, headers, info):
data= []
sheet_names= []
forbot_nameininfo:
forstep_nameininfo[bot_name]['step_names']:
ifnotstep_nameininfo[bot_name]:
LOGGER.error("Missing info for step name: '"+step_name+"'")
sheet_name=format_sheet_name(bot_name, step_name)
headers_stale=False
# Headers should always contain the following columns
forreqinREQUIRED_COLUMNS:
ifreqnotinheaders[sheet_name]:
headers_stale=True
headers[sheet_name].append(req)
# Headers also must contain all the keys seen in this step
forkeyininfo[bot_name][step_name].keys():
ifkeynotinheaders[sheet_name]:
headers_stale=True
headers[sheet_name].append(key)
# Update the Gdoc headers if necessary
ifheaders_stale:
sheet_names.append(sheet_name)
header_range=sheet_name+'!A1:Z'
data.append({
'range': header_range,
'majorDimension': 'ROWS',
'values': [headers[sheet_name]]
})
ifdata:
LOGGER.info('Updating sheet headers...')
batch_update_values(service, spreadsheet_id, data)
# Calls values().append() to append a list of values to a given sheet.
defappend_values(service, spreadsheet_id, sheet_name, values):
header_range=sheet_name+'!A1:Z'
insert_data_option='INSERT_ROWS'
value_input_option='USER_ENTERED'# Helps with formatting of dates
append_values_request_body= {
'range': header_range,
'majorDimension': 'ROWS',
'values': [values],
}
LOGGER.debug("Called [spreadsheets.values().append(spreadsheetId='"+spreadsheet_id+
"', body="+str(append_values_request_body) +", range='"+header_range+
"', insertDataOption='"+insert_data_option+"', valueInputOption='"+
value_input_option+"')]")
request=service.values().append(
spreadsheetId=spreadsheet_id,
body=append_values_request_body,
range=header_range,
insertDataOption=insert_data_option,
valueInputOption=value_input_option)
request.execute()
# Formula to determine whether a row is a duplicate of the previous row based on checking the
# columns listed in filter_columns.
# Eg.
# date | pass | fail
# Jan 1 100 50
# Jan 2 100 50
# Jan 3 99 51
#
# If we want to filter based on only the "pass" and "fail" columns, we generate the following
# formula in the 'duplicate' column: 'IF(B1=B0, IF(C1=C0,1,0) ,0);
# This formula is recursively generated for each column in filter_columns, using the column
# position as determined by headers. The formula uses a more generalized form with
# 'INDIRECT(ADDRESS(<row>, <col>))'' instead of 'B1', where <row> is Row() and Row()-1, and col is
# determined by the column's position in headers
defgenerate_duplicate_formula(headers, filter_columns):
# No more columns, put a 1 in the IF statement true branch
iflen(filter_columns) ==0:
return'1'
# Next column is found, generate the formula for duplicate checking, and remove from the list
# for recursion
foriinrange(len(headers)):
ifheaders[i] ==filter_columns[0]:
col=str(i+1)
formula="IF(INDIRECT(ADDRESS(ROW(), "+col+"))=INDIRECT(ADDRESS(ROW() - 1, "+ \
col+")),"+generate_duplicate_formula(headers, filter_columns[1:]) +",0)"
returnformula
# Next column not found, remove from recursion but just return whatever the next one is
returngenerate_duplicate_formula(headers, filter_columns[1:])
# Helper function to start the recursive call to generate_duplicate_formula
defgenerate_duplicate_formula_helper(headers):
filter_columns=MAIN_RESULT_COLUMNS
formula=generate_duplicate_formula(headers, filter_columns)
if (formula=="1"):
return""
else:
# Final result needs to be prepended with =
return"="+formula
# Uses the list of headers and the info struct to come up with a list of values for each step
# from the latest builds.
defupdate_values(service, spreadsheet_id, headers, info):
data= []
forbot_nameininfo:
forstep_nameininfo[bot_name]['step_names']:
sheet_name=format_sheet_name(bot_name, step_name)
values= []
# For each key in the list of headers, either add the corresponding value or add a blank
# value. It's necessary for the values to match the order of the headers
forkeyinheaders[sheet_name]:
ifkeyininfo[bot_name] andkeyinREQUIRED_COLUMNS:
values.append(info[bot_name][key])
elifkeyininfo[bot_name][step_name]:
values.append(info[bot_name][step_name][key])
elifkey=="duplicate"andkeyinREQUIRED_COLUMNS:
values.append(generate_duplicate_formula_helper(headers[sheet_name]))
else:
values.append('')
LOGGER.info("Appending new rows to sheet '"+sheet_name+"'...")
try:
append_values(service, spreadsheet_id, sheet_name, values)
exceptExceptionaserror:
LOGGER.warning('%s\n'%str(error))
# Updates the given spreadsheed_id with the info struct passed in.
defupdate_spreadsheet(service, spreadsheet_id, info):
LOGGER.info('Opening spreadsheet...')
spreadsheet=get_spreadsheet(service, spreadsheet_id)
LOGGER.info('Parsing sheet names...')
sheet_names=get_sheet_names(info)
new_sheets=validate_sheets(spreadsheet, sheet_names)
ifnew_sheets:
LOGGER.info('Creating new sheets...')
create_sheets(service, spreadsheet_id, new_sheets)
LOGGER.info('Parsing sheet headers...')
headers=get_headers(service, spreadsheet_id, sheet_names)
update_headers(service, spreadsheet_id, headers, info)
update_filters(service, spreadsheet_id, headers, info, spreadsheet)
update_values(service, spreadsheet_id, headers, info)
#####################
# Main/helpers #
#####################
# Loads or creates credentials and connects to the Sheets API. Returns a Spreadsheets object with
# an open connection.
defget_sheets_service(auth_path):
credentials_path=auth_path+'/credentials.json'
token_path=auth_path+'/token.pickle'
creds=None
ifnotos.path.exists(auth_path):
LOGGER.info("Creating auth dir '"+auth_path+"'")
os.makedirs(auth_path)
ifnotos.path.exists(credentials_path):
raiseException('Missing credentials.json.\n'
'Go to: https://developers.google.com/sheets/api/quickstart/python\n'
"Under Step 1, click 'ENABLE THE GOOGLE SHEETS API'\n"
"Click 'DOWNLOAD CLIENT CONFIGURATION'\n"
'Save to your auth_path ('+auth_path+') as credentials.json')
ifos.path.exists(token_path):
withopen(token_path, 'rb') astoken:
creds=pickle.load(token)
LOGGER.info('Loaded credentials from '+token_path)
ifnotcredsornotcreds.valid:
ifcredsandcreds.expiredandcreds.refresh_token:
LOGGER.info('Refreshing credentials...')
creds.refresh(Request())
else:
LOGGER.info('Could not find credentials. Requesting new credentials.')
flow=InstalledAppFlow.from_client_secrets_file(credentials_path, SCOPES)
creds=flow.run_local_server()
withopen(token_path, 'wb') astoken:
pickle.dump(creds, token)
service=build('sheets', 'v4', credentials=creds)
sheets=service.spreadsheets()
returnsheets
# Parse the input to the script
defparse_args():
parser=argparse.ArgumentParser(os.path.basename(sys.argv[0]))
parser.add_argument(
'--auth_path',
default=HOME_DIR+'/.auth',
nargs='?',
help='path to directory containing authorization data '
'(credentials.json and token.pickle). '
'[default=<home>/.auth]')
parser.add_argument(
'--spreadsheet',
default='1uttk1z8lJ4ZsUY7wMdFauMzUxb048nh5l52zdrAznek',
nargs='?',
help='ID of the spreadsheet to write stats to. '
"[default='1uttk1z8lJ4ZsUY7wMdFauMzUxb048nh5l52zdrAznek']")
parser.add_argument(
'--verbosity',
default='INFO',
nargs='?',
help='Verbosity of output. Valid options are '
'[DEBUG, INFO, WARNING, ERROR]. '
'[default=INFO]')
returnparser.parse_args()
# Set up the logging with the right verbosity and output.
definitialize_logging(verbosity):
handler=logging.StreamHandler()
formatter=logging.Formatter(fmt='%(levelname)s: %(message)s')
handler.setFormatter(formatter)
LOGGER.addHandler(handler)
if'DEBUG'inverbosity:
LOGGER.setLevel(level=logging.DEBUG)
elif'INFO'inverbosity:
LOGGER.setLevel(level=logging.INFO)
elif'WARNING'inverbosity:
LOGGER.setLevel(level=logging.WARNING)
elif'ERROR'inverbosity:
LOGGER.setLevel(level=logging.ERROR)
else:
LOGGER.setLevel(level=logging.INFO)
defmain():
os.chdir(ROOT_DIR)
args=parse_args()
verbosity=args.verbosity.strip().upper()
initialize_logging(verbosity)
auth_path=args.auth_path.replace('\\', '/')
try:
service=get_sheets_service(auth_path)
exceptExceptionaserror:
LOGGER.error('%s\n'%str(error))
exit(1)
info= {}
LOGGER.info('Building info struct...')
forbot_nameinBOT_NAMES:
LOGGER.info("Parsing bot '"+bot_name+"'...")
try:
info[bot_name] =get_bot_info(BOT_NAME_PREFIX+bot_name)
exceptExceptionaserror:
LOGGER.error('%s\n'%str(error))
LOGGER.info('Updating sheets...')
try:
update_spreadsheet(service, args.spreadsheet, info)
exceptExceptionaserror:
LOGGER.error('%s\n'%str(error))
quit(1)
LOGGER.info('Info was successfully parsed to sheet: https://docs.google.com/spreadsheets/d/'+
args.spreadsheet)
if__name__=='__main__':
sys.exit(main())