• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

cisagov / cyhy-feeds / 5428666627

pending completion
5428666627

push

github

web-flow
Merge pull request #45 from cisagov/improvement/avoid-missing-data

Capture past 26 hours of data instead of 24 in `cyhy-data-extract.py`

0 of 2 new or added lines in 2 files covered. (0.0%)

40 existing lines in 1 file now uncovered.

0 of 315 relevant lines covered (0.0%)

0.0 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

0.0
/aws_jobs/cyhy-data-extract.py
1
#!/usr/bin/env python3
2
"""Create compressed, encrypted, signed extract file with Federal CyHy data for integration with the Weathermap project.
×
3

4
Usage:
5
  COMMAND_NAME --config CONFIG_FILE [--cyhy-config CYHY_CONFIG] [--scan-config SCAN_CONFIG] [--assessment-config ASSESSMENT_CONFIG] [-v | --verbose] [-a | --aws ] [--cleanup-aws] [--date DATE] [--debug]
6
  COMMAND_NAME (-h | --help)
7
  COMMAND_NAME --version
8

9
Options:
10
  -h --help                                                         Show this screen
11
  --version                                                         Show version
12
  -x CYHY_CONFIG --cyhy-config=CYHY_CONFIG                          CyHy MongoDB configuration to use
13
  -y SCAN_CONFIG --scan-config=SCAN_CONFIG                          Scan MongoDB configuration to use
14
  -z ASSESSMENT_CONFIG --assessment-config=ASSESSMENT_CONFIG        Assessment MongoDB configuration to use
15
  -v --verbose                                                      Show verbose output
16
  -a --aws                                                          Output results to S3 bucket
17
  --cleanup-aws                                                     Delete old files from the S3 bucket
18
  -c CONFIG_FILE --config=CONFIG_FILE                               Configuration file for this script
19
  -d DATE --date=DATE                                               Specific date to export data from in form: %Y-%m-%d (eg. 2018-12-31) NOTE that this date is in UTC
20
  --debug                                                           Enable debug logging
21

22
"""
23

24
# Standard Python Libraries
25
from configparser import ConfigParser
×
26
from datetime import datetime
×
27
import json
×
28
import logging
×
29
from logging.handlers import RotatingFileHandler
×
30
import os
×
31
import sys
×
32
import tarfile
×
33
import time
×
34

35
# Third-Party Libraries
36
import boto3
×
37
import bson
×
38
from dateutil.relativedelta import relativedelta
×
39
import dateutil.tz as tz
×
40
from docopt import docopt
×
41
import gnupg  # pip install python-gnupg
×
42
import netaddr
×
43
from pytz import timezone
×
44

45
# cisagov Libraries
46
from dmarc import get_dmarc_data
×
47
from mongo_db_from_config import db_from_config
×
48

49
# Logging core variables
50
logger = logging.getLogger("cyhy-feeds")
×
51
LOG_FILE_NAME = "/var/log/cyhy/feeds.log"
×
52
LOG_FILE_MAX_SIZE = pow(1024, 2) * 128
×
53
LOG_FILE_BACKUP_COUNT = 9
×
54
DEFAULT_LOGGER_LEVEL = logging.INFO
×
55

56
BUCKET_NAME = "ncats-moe-data"
×
57
DOMAIN = "ncats-moe-data"
×
58
HEADER = ""
×
59
DEFAULT_ES_RETRIEVE_SIZE = 10000
×
60
DAYS_OF_DMARC_REPORTS = 1
×
61
PAGE_SIZE = 100000  # Number of documents per query
×
62
SAVEFILE_PREFIX = "cyhy_extract_"
×
63

64

65
def custom_json_handler(obj):
×
66
    """Format a provided JSON object."""
67
    if hasattr(obj, "isoformat"):
×
68
        return obj.isoformat()
×
69
    elif type(obj) == bson.objectid.ObjectId:
×
70
        return repr(obj)
×
71
    elif type(obj) == netaddr.IPAddress:
×
72
        return str(obj)
×
73
    elif type(obj) == netaddr.IPNetwork:
×
74
        return str(obj)
×
75
    elif type(obj) == netaddr.IPSet:
×
76
        return obj.iter_cidrs()
×
77
    else:
78
        raise TypeError(
×
79
            "Object of type {} with value of {} is not JSON serializable".format(
80
                type(obj), repr(obj)
81
            )
82
        )
83

84

85
def to_json(obj):
×
86
    """Return a string representation of a formatted JSON."""
87
    return json.dumps(obj, sort_keys=True, indent=4, default=custom_json_handler)
×
88

89

90
def flatten_datetime(in_datetime):
×
91
    """Flatten datetime to day, month, and year only."""
92
    return in_datetime.replace(hour=0, minute=0, second=0, microsecond=0)
×
93

94

95
# All logging code is pulled from cyhy-core and tweaked down to this single use-case.
96
# Since we are still running Python2 we cannot leverage some of the improvements
97
# made in the logging library in later version.
98
def setup_logging(debug_logging):
×
99
    """Set up logging for the script."""
100
    LOGGER_FORMAT = "%(asctime)-15s %(levelname)s %(name)s - %(message)s"
×
101
    formatter = logging.Formatter(LOGGER_FORMAT)
×
102
    formatter.converter = time.gmtime  # log times in UTC
×
103
    root = logging.getLogger()
×
104
    if debug_logging:
×
105
        root.setLevel(logging.DEBUG)
×
106
    else:
107
        root.setLevel(DEFAULT_LOGGER_LEVEL)
×
108
    file_handler = RotatingFileHandler(
×
109
        LOG_FILE_NAME, maxBytes=LOG_FILE_MAX_SIZE, backupCount=LOG_FILE_BACKUP_COUNT
110
    )
111
    file_handler.setFormatter(formatter)
×
112
    root.addHandler(file_handler)
×
113
    logger.debug("Debug mode enabled.")
×
114
    return root
×
115

116

117
def update_bucket(bucket_name, local_file, remote_file_name):
×
118
    """Update the s3 bucket with the new contents."""
119
    s3 = boto3.client("s3")
×
120
    s3.upload_file(local_file, bucket_name, remote_file_name)
×
121

122

123
def create_dummy_files(output_dir):
×
124
    """Create dummy files to test cleanup_old_files."""
125
    for n in range(1, 21):
×
126
        dummy_filename = "dummy_file_{!s}.gpg".format(n)
×
127
        full_path_dummy_filename = os.path.join(output_dir, dummy_filename)
×
128
        # Use open to create files.
129
        with open(full_path_dummy_filename, "w"):
×
130
            pass
×
131
        st = os.stat(full_path_dummy_filename)
×
132
        # Set file modification time to n days earlier than it was.
133
        # Note that there are 86400 seconds per day.
134
        os.utime(full_path_dummy_filename, (st.st_atime, st.st_mtime - (86400 * n)))
×
135

136

137
def cleanup_old_files(output_dir, file_retention_num_days):
×
138
    """Delete any *.gpg files older than file_retention_num_days in the specified output_dir."""
139
    now_unix = time.time()
×
140
    for filename in os.listdir(output_dir):
×
141
        # We only care about filenames that end with .gpg
142
        if filename.endswith(".gpg"):
×
143
            full_path_filename = os.path.join(output_dir, filename)
×
144
            # If file modification time is older than
145
            # file_retention_num_days.  Note that there are 86400
146
            # seconds per day.
147
            file_retention_in_secs = file_retention_num_days * 86400
×
148
            if os.stat(full_path_filename).st_mtime < now_unix - file_retention_in_secs:
×
149
                # Delete file locally
150
                os.remove(full_path_filename)
×
151

152

153
def cleanup_bucket_files(object_retention_days):
×
154
    """Delete oldest files if they are older than the provided retention time."""
155
    retention_time = flatten_datetime(
×
156
        datetime.now(tz.tzlocal()) - relativedelta(days=object_retention_days)
157
    )
158
    s3 = boto3.client("s3")
×
159
    response = None
×
160

161
    while True:
162
        if response is None:
×
163
            response = s3.list_objects_v2(Bucket=BUCKET_NAME, Prefix=SAVEFILE_PREFIX)
×
164
        elif response["IsTruncated"] is True:
×
165
            response = s3.list_objects_v2(
×
166
                Bucket=BUCKET_NAME,
167
                Prefix=SAVEFILE_PREFIX,
168
                ContinuationToken=response["NextContinuationToken"],
169
            )
170
        else:
171
            break
×
172

173
        del_list = [
×
174
            {"Key": o["Key"]}
175
            for o in response.get("Contents", [])
176
            if flatten_datetime(o["LastModified"]) < retention_time
177
        ]
178
        # AWS requires a list of objects and an empty list is seen as malformed.
179
        if len(del_list) > 0:
×
180
            del_resp = s3.delete_objects(
×
181
                Bucket=BUCKET_NAME, Delete={"Objects": del_list}
182
            )
183
            for err in del_resp.get("Errors", []):
×
184
                logger.error(
×
185
                    "Failed to delete '{}' :: {} - {}\n".format(
186
                        err["key"], err["Code"], err["Message"]
187
                    )
188
                )
189

190

191
def generate_cursor(collection, parameters):
×
192
    """Query collection and return a cursor to be used for data retrieval."""
193
    # We set no_cursor_timeout so that long retrievals do not cause generated
194
    # cursors to expire on the MongoDB server. This allows us to generate all cursors
195
    # up front and then pull results without worrying about a generated cursor
196
    # timing out on the server.
197
    return collection.find(
×
198
        parameters["query"], parameters["projection"], no_cursor_timeout=True
199
    )
200

201

202
def query_data(collection, cursor, tbz_file, tbz_filename, end_of_data_collection):
×
203
    """Query collection for data matching query and add it to tbz_file."""
204
    logger.info("Fetching from {} collection...".format(collection))
×
205

206
    json_filename = "{}_{!s}.json".format(
×
207
        collection,
208
        end_of_data_collection.isoformat().replace(":", "").split(".")[0],
209
    )
210

211
    # The previous method converted all documents retrieved into a JSON string at
212
    # once. This had a very large memory overhead and certain queries would
213
    # consume enough memory in this process to crash the AWS instance being used
214
    # before pagination was implemented. We are now retrieving and processing
215
    # a single document at a time and the memory overhead is drastically lower.
216
    with open(json_filename, "w") as collection_file:
×
217
        collection_file.write("[")
×
218

219
        file_position = collection_file.tell()
×
220
        for doc in cursor:
×
221
            collection_file.write(to_json([doc])[1:-2])
×
222
            file_position = collection_file.tell()
×
223
            collection_file.write(",")
×
224

225
        if cursor.retrieved != 0:
×
226
            # If we output documents then we have a trailing comma, so we need to
227
            # roll back the file location to before the comma to overwrite as we finish
228
            collection_file.seek(file_position)
×
229

230
        collection_file.write("\n]")
×
231

232
    logger.info("Finished writing {} to file.".format(collection))
×
233
    tbz_file.add(json_filename)
×
234
    logger.info("Added {} to {}".format(json_filename, tbz_filename))
×
235
    # Delete file once added to tar
236
    if os.path.exists(json_filename):
×
237
        os.remove(json_filename)
×
238
        logger.info("Deleted {} as part of cleanup.".format(json_filename))
×
239

240

241
def main():
×
242
    """Retrieve data, aggreate into a compressed archive, and encrypt it to store or upload to S3."""
243
    global __doc__
244
    __doc__ = __doc__.replace("COMMAND_NAME", __file__)
×
245
    args = docopt(__doc__, version="0.0.5-rc.1")
×
246

247
    setup_logging(args["--debug"])
×
248

249
    logger.info("Beginning data extraction process.")
×
250

251
    if not (
×
252
        args["--cyhy-config"] or args["--scan-config"] or args["--assessment-config"]
253
    ):
254
        logger.error("At least one database configuration must be supplied.")
×
255
        sys.exit(1)
×
256

257
    if args["--cyhy-config"]:
×
258
        logger.debug("Creating connection to cyhy database.")
×
259
        cyhy_db = db_from_config(args["--cyhy-config"])
×
260
    if args["--scan-config"]:
×
261
        logger.debug("Creating connection to scan database.")
×
262
        scan_db = db_from_config(args["--scan-config"])
×
263
    if args["--assessment-config"]:
×
264
        logger.debug("Creating connection to assessment database.")
×
265
        assessment_db = db_from_config(args["--assessment-config"])
×
266
    now = datetime.now(tz.tzutc())
×
267

268
    # Read parameters in from config file
269
    config = ConfigParser()
×
270
    config.read([args["--config"]])
×
271
    ORGS_EXCLUDED = set(config.get("DEFAULT", "FED_ORGS_EXCLUDED").split(","))
×
272
    if ORGS_EXCLUDED == {""}:
×
273
        ORGS_EXCLUDED = set()
×
274
    GNUPG_HOME = config.get("DEFAULT", "GNUPG_HOME")
×
275
    RECIPIENTS = config.get("DEFAULT", "RECIPIENTS").split(",")
×
276
    SIGNER = config.get("DEFAULT", "SIGNER")
×
277
    SIGNER_PASSPHRASE = config.get("DEFAULT", "SIGNER_PASSPHRASE")
×
278
    OUTPUT_DIR = config.get("DEFAULT", "OUTPUT_DIR")
×
279
    # Files older than this are deleted by cleanup_old_files()
280
    FILE_RETENTION_NUM_DAYS = int(config.get("DEFAULT", "FILE_RETENTION_NUM_DAYS"))
×
281
    ES_REGION = config.get("DMARC", "ES_REGION")
×
282
    ES_URL = config.get("DMARC", "ES_URL")
×
283
    ES_RETRIEVE_SIZE = int(config.get("DMARC", "ES_RETRIEVE_SIZE"))
×
284
    ES_AWS_CONFIG_SECTION_NAME = config.get("DMARC", "ES_AWS_CONFIG_SECTION_NAME")
×
285

286
    # Check if OUTPUT_DIR exists; if not, bail out
287
    if not os.path.exists(OUTPUT_DIR):
×
288
        logger.error("Output directory '{}' does not exist.".format(OUTPUT_DIR))
×
289
        sys.exit(1)
×
290

291
    # Set up GPG (used for encrypting and signing)
292
    gpg = gnupg.GPG(
×
293
        gpgbinary="gpg2",
294
        gnupghome=GNUPG_HOME,
295
        verbose=args["--verbose"],
296
        options=["--pinentry-mode", "loopback", "-u", SIGNER],
297
    )
298
    gpg.encoding = "utf-8"
×
299

300
    if args["--date"]:
×
301
        # Note this date is in UTC timezone
302
        date_of_data = datetime.strptime(args["--date"], "%Y-%m-%d")
×
303
        end_of_data_collection = flatten_datetime(
×
304
            timezone("UTC").localize(date_of_data)
305
        )
306
    else:
307
        end_of_data_collection = flatten_datetime(now)
×
308

309
    # Capture the past 26 hours of data in order to include up to 2 hours of
310
    # data that is saved to the database after the start of this script (which
311
    # is run daily). We have seen cases where data was scanned 1 hour prior to
312
    # the start of the script, yet it was not saved to the database until after
313
    # the script started, so it was excluded from the daily extract files.  We
314
    # chose 2 extra hours just to be safe. Although this means consecutive daily
315
    # extracts can have some duplicated data, that is preferable to missing
316
    # data.
NEW
317
    start_of_data_collection = end_of_data_collection + relativedelta(hours=-26)
×
318

UNCOV
319
    logger.debug(
×
320
        "Extracting data from {} to {}.".format(
321
            start_of_data_collection, end_of_data_collection
322
        )
323
    )
324

325
    # Create tar/bzip2 file for writing
UNCOV
326
    tbz_filename = "{}{!s}.tbz".format(
×
327
        SAVEFILE_PREFIX,
328
        end_of_data_collection.isoformat().replace(":", "").split(".")[0],
329
    )
UNCOV
330
    tbz_file = tarfile.open(tbz_filename, mode="w:bz2")
×
331

UNCOV
332
    if args["--cyhy-config"]:
×
333
        # Get a list of all non-retired orgs
334
        all_orgs = (
×
335
            cyhy_db["requests"]
336
            .find({"retired": {"$ne": True}}, {"_id": 1})
337
            .distinct("_id")
338
        )
UNCOV
339
        orgs = list(set(all_orgs) - ORGS_EXCLUDED)
×
340
    else:
UNCOV
341
        orgs = []
×
342

UNCOV
343
    default_projection = {"key": False}
×
344

UNCOV
345
    cyhy_collection = {
×
346
        "host_scans": {
347
            "query": {
348
                "owner": {"$in": orgs},
349
                "time": {
350
                    "$gte": start_of_data_collection,
351
                    "$lt": end_of_data_collection,
352
                },
353
            },
354
            "projection": default_projection,
355
        },
356
        "hosts": {
357
            "query": {
358
                "owner": {"$in": orgs},
359
                "last_change": {
360
                    "$gte": start_of_data_collection,
361
                    "$lt": end_of_data_collection,
362
                },
363
            },
364
            "projection": default_projection,
365
        },
366
        # The kevs collection does not have a field to indicate either
367
        # initial creation time or time of last modification. As a result we can
368
        # only pull the entire collection every time an extract is run.
369
        "kevs": {
370
            "query": {},
371
            "projection": default_projection,
372
        },
373
        "port_scans": {
374
            "query": {
375
                "owner": {"$in": orgs},
376
                "time": {
377
                    "$gte": start_of_data_collection,
378
                    "$lt": end_of_data_collection,
379
                },
380
            },
381
            "projection": default_projection,
382
        },
383
        # The requests collection does not have a field to indicate either
384
        # initial creation time or time of last modification. As a result we can
385
        # only pull the entire collection every time an extract is run.
386
        "requests": {
387
            "query": {},
388
            "projection": {
389
                "agency.acronym": True,
390
                "agency.location": True,
391
                "agency.name": True,
392
                "agency.type": True,
393
                "children": True,
394
                "networks": True,
395
                "period_start": True,
396
                "report_types": True,
397
                "retired": True,
398
                "scan_types": True,
399
                "stakeholder": True,
400
            },
401
        },
402
        "tickets": {
403
            "query": {
404
                "owner": {"$in": orgs},
405
                "last_change": {
406
                    "$gte": start_of_data_collection,
407
                    "$lt": end_of_data_collection,
408
                },
409
            },
410
            "projection": default_projection,
411
        },
412
        "vuln_scans": {
413
            "query": {
414
                "owner": {"$in": orgs},
415
                "time": {
416
                    "$gte": start_of_data_collection,
417
                    "$lt": end_of_data_collection,
418
                },
419
            },
420
            "projection": default_projection,
421
        },
422
    }
423

UNCOV
424
    scan_collection = {
×
425
        "certs": {
426
            "query": {
427
                "sct_or_not_before": {
428
                    "$gte": start_of_data_collection,
429
                    "$lt": end_of_data_collection,
430
                }
431
            },
432
            "projection": default_projection,
433
        },
434
        "https_scan": {
435
            "query": {
436
                "scan_date": {
437
                    "$gte": start_of_data_collection,
438
                    "$lt": end_of_data_collection,
439
                }
440
            },
441
            "projection": default_projection,
442
        },
443
        "sslyze_scan": {
444
            "query": {
445
                "scan_date": {
446
                    "$gte": start_of_data_collection,
447
                    "$lt": end_of_data_collection,
448
                }
449
            },
450
            "projection": default_projection,
451
        },
452
        "trustymail": {
453
            "query": {
454
                "scan_date": {
455
                    "$gte": start_of_data_collection,
456
                    "$lt": end_of_data_collection,
457
                }
458
            },
459
            "projection": default_projection,
460
        },
461
    }
462

463
    # Neither collection in the assessment database have fields that indicate an
464
    # initial creation time or time of last modification. As a result we can only
465
    # pull the entire collection every time an extract is run.
UNCOV
466
    assessment_collection = {
×
467
        "assessments": {"query": {}, "projection": default_projection},
468
        "findings": {"query": {}, "projection": default_projection},
469
    }
470

471
    # Get cursors for the results of our queries. Create a tuple of the collection
472
    # name and the generated cursor to later iterate over for data retrieval. We
473
    # create cursors all at once to "lock in" the query results to reduce timing
474
    # issues for data retrieval.
UNCOV
475
    logger.info("Creating cursors for query results.")
×
UNCOV
476
    cursor_list = []
×
UNCOV
477
    if args["--cyhy-config"]:
×
UNCOV
478
        for collection in cyhy_collection:
×
UNCOV
479
            logger.debug("Generating cursor for {}.{}".format(cyhy_db.name, collection))
×
UNCOV
480
            cursor_list.append(
×
481
                (
482
                    cyhy_db[collection].name,
483
                    generate_cursor(cyhy_db[collection], cyhy_collection[collection]),
484
                )
485
            )
486
    if args["--scan-config"]:
×
487
        for collection in scan_collection:
×
488
            logger.debug("Generating cursor for {}.{}".format(scan_db.name, collection))
×
UNCOV
489
            cursor_list.append(
×
490
                (
491
                    scan_db[collection].name,
492
                    generate_cursor(scan_db[collection], scan_collection[collection]),
493
                )
494
            )
495
    if args["--assessment-config"]:
×
496
        for collection in assessment_collection:
×
497
            logger.debug(
×
498
                "Generating cursor for {}.{}".format(assessment_db.name, collection)
499
            )
UNCOV
500
            cursor_list.append(
×
501
                (
502
                    assessment_db[collection].name,
503
                    generate_cursor(
504
                        assessment_db[collection], assessment_collection[collection]
505
                    ),
506
                )
507
            )
508

509
    # Use our generated cursors to pull data now.
UNCOV
510
    logger.info("Extracting data from database(s).")
×
UNCOV
511
    for collection, cursor in cursor_list:
×
UNCOV
512
        query_data(
×
513
            collection,
514
            cursor,
515
            tbz_file,
516
            tbz_filename,
517
            end_of_data_collection,
518
        )
519
        # Just to be safe we manually close the cursor.
520
        cursor.close()
×
521

522
    # Note that we use the elasticsearch AWS profile here
UNCOV
523
    json_data = to_json(
×
524
        get_dmarc_data(
525
            ES_REGION,
526
            ES_URL,
527
            DAYS_OF_DMARC_REPORTS,
528
            ES_RETRIEVE_SIZE,
529
            ES_AWS_CONFIG_SECTION_NAME,
530
        )
531
    )
UNCOV
532
    json_filename = "DMARC_{!s}.json".format(
×
533
        end_of_data_collection.isoformat().replace(":", "").split(".")[0]
534
    )
UNCOV
535
    dmarc_file = open(json_filename, "w")
×
UNCOV
536
    dmarc_file.write(json_data)
×
UNCOV
537
    dmarc_file.close()
×
UNCOV
538
    tbz_file.add(json_filename)
×
UNCOV
539
    tbz_file.close()
×
540
    if os.path.exists(json_filename):
×
UNCOV
541
        os.remove(json_filename)
×
UNCOV
542
        logger.info("Deleted {} as part of cleanup.".format(json_filename))
×
543

544
    gpg_file_name = tbz_filename + ".gpg"
×
545
    gpg_full_path_filename = os.path.join(OUTPUT_DIR, gpg_file_name)
×
546
    # Encrypt (with public keys for all RECIPIENTS) and sign (with
547
    # SIGNER's private key)
548
    with open(tbz_filename, "rb") as f:
×
549
        status = gpg.encrypt_file(
×
550
            f,
551
            RECIPIENTS,
552
            armor=False,
553
            sign=SIGNER,
554
            passphrase=SIGNER_PASSPHRASE,
555
            output=gpg_full_path_filename,
556
        )
557

UNCOV
558
    if not status.ok:
×
UNCOV
559
        logger.error("GPG Error {} :: {}".format(status.status, status.stderr))
×
UNCOV
560
        sys.exit(1)
×
561

UNCOV
562
    logger.info(
×
563
        "Encrypted, signed, and compressed JSON data written to file: {}".format(
564
            gpg_full_path_filename
565
        )
566
    )
567

568
    if args["--aws"]:
×
569
        # send the contents to the s3 bucket
570
        update_bucket(BUCKET_NAME, gpg_full_path_filename, gpg_file_name)
×
UNCOV
571
        logger.info("Upload to AWS bucket complete")
×
572

UNCOV
573
    if os.path.exists(tbz_filename):
×
UNCOV
574
        os.remove(tbz_filename)
×
UNCOV
575
        logger.info("Deleted {} as part of cleanup.".format(tbz_filename))
×
576

UNCOV
577
    cleanup_old_files(OUTPUT_DIR, FILE_RETENTION_NUM_DAYS)
×
578

579
    if args["--cleanup-aws"]:
×
UNCOV
580
        cleanup_bucket_files(FILE_RETENTION_NUM_DAYS)
×
581

582
    logger.info("Finished data extraction process.")
×
583

584

585
if __name__ == "__main__":
586
    main()
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2025 Coveralls, Inc