• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

cisagov / pe-reports / 5892227966

17 Aug 2023 02:27PM UTC coverage: 33.736% (+7.0%) from 26.737%
5892227966

Pull #565

github

web-flow
Merge 9adf19bbe into 998fa208f
Pull Request #565: Update report generator to use reportlab

93 of 477 branches covered (19.5%)

Branch coverage included in aggregate %.

443 of 1022 new or added lines in 8 files covered. (43.35%)

18 existing lines in 5 files now uncovered.

801 of 2173 relevant lines covered (36.86%)

1.83 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

8.86
/src/pe_reports/pages.py
1
"""Collect and distribute graphical data to readable charts in the presentation."""
2

3
# Standard Python Libraries
4
import datetime
5✔
5
import json
5✔
6
import logging
5✔
7
import os
5✔
8

9
# Third-Party Libraries
10
import pandas as pd
5✔
11

12
from .charts import Charts
5✔
13
from .data.db_query import sanitize_uid
5✔
14

15
# Import Classes
16
from .metrics import (
5✔
17
    Core_Cyber_Six,
18
    Credentials,
19
    Cyber_Six,
20
    Domains_Masqs,
21
    Malware_Vulns,
22
)
23

24
# Setup logging to central
25
LOGGER = logging.getLogger(__name__)
5✔
26

27

28
def credential(
5✔
29
    report_dict,
30
    trending_start_date,
31
    start_date,
32
    end_date,
33
    org_uid,
34
    org_code,
35
    output_directory,
36
):
37
    """Build exposed credential page."""
38
    Credential = Credentials(
×
39
        trending_start_date, start_date, end_date, sanitize_uid(org_uid)
40
    )
41
    # Build exposed credential stacked bar chart
NEW
42
    width = 16.51
×
NEW
43
    height = 10
×
44
    name = "inc_date_df"
×
45
    title = "Trending Exposures by Week"
×
46
    x_label = "Week Reported"
×
47
    y_label = "Creds Exposed"
×
48
    cred_date_chart = Charts(
×
49
        Credential.by_week(),
50
        width,
51
        height,
52
        name,
53
        title,
54
        x_label,
55
        y_label,
56
    )
57
    cred_date_chart.line_chart()
×
58
    # Filter breach details table down to top 15 rows.
NEW
59
    breach_table_row_count = 15
×
UNCOV
60
    creds_dict = {
×
61
        "breach": Credential.breaches(),
62
        "creds": Credential.total(),
63
        "pw_creds": Credential.password(),
64
        "breach_table": Credential.breach_details()[:breach_table_row_count],
65
        "breach_appendix": Credential.breach_appendix(),
66
    }
NEW
67
    report_dict.update(creds_dict)
×
68

69
    # Create Credential Exposure JSON file
NEW
70
    cred_json = f"{output_directory}/{org_code}/compromised_credentials.json"
×
NEW
71
    cred_dict = Credential.creds_view.to_dict(orient="records")
×
NEW
72
    final_dict = {"credentials": cred_dict}
×
NEW
73
    with open(cred_json, "w") as outfile:
×
NEW
74
        json.dump(final_dict, outfile, default=str)
×
75

76
    # Create Credential Exposure Excel file
NEW
77
    cred_xlsx = f"{output_directory}/{org_code}/compromised_credentials.xlsx"
×
NEW
78
    credWriter = pd.ExcelWriter(cred_xlsx, engine="xlsxwriter")
×
NEW
79
    Credential.creds_view.to_excel(credWriter, sheet_name="Credentials", index=False)
×
NEW
80
    credWriter.save()
×
81

NEW
82
    return report_dict, cred_json, cred_xlsx
×
83

84

85
def masquerading(
5✔
86
    report_dict, start_date, end_date, org_uid, org_code, output_directory
87
):
88
    """Build masquerading page."""
89
    Domain_Masq = Domains_Masqs(start_date, end_date, sanitize_uid(org_uid))
×
NEW
90
    report_dict.update(
×
91
        {
92
            "domain_table": Domain_Masq.summary()[:10],
93
            "domain_alerts_table": Domain_Masq.alerts()[:10],
94
            "suspectedDomains": Domain_Masq.count(),
95
            "domain_alerts": Domain_Masq.alert_count(),
96
        }
97
    )
98
    # Create Domain Masquerading JSON file
NEW
99
    da_json = f"{output_directory}/{org_code}/domain_alerts.json"
×
NEW
100
    susp_domains_dict = Domain_Masq.df_mal.to_dict(orient="records")
×
NEW
101
    dom_alerts_dict = Domain_Masq.alerts_sum().to_dict(orient="records")
×
NEW
102
    final_dict = {
×
103
        "suspected_domains": susp_domains_dict,
104
        "domain_alerts": dom_alerts_dict,
105
    }
NEW
106
    with open(da_json, "w") as outfile:
×
NEW
107
        json.dump(final_dict, outfile, default=str)
×
108

109
    # Create Domain Masquerading Excel file
NEW
110
    da_xlsx = f"{output_directory}/{org_code}/domain_alerts.xlsx"
×
NEW
111
    domWriter = pd.ExcelWriter(da_xlsx, engine="xlsxwriter")
×
NEW
112
    Domain_Masq.df_mal.to_excel(domWriter, sheet_name="Suspected Domains", index=False)
×
NEW
113
    Domain_Masq.alerts_sum().to_excel(
×
114
        domWriter, sheet_name="Domain Alerts", index=False
115
    )
NEW
116
    domWriter.save()
×
NEW
117
    return report_dict, da_json, da_xlsx
×
118

119

120
def mal_vuln(report_dict, start_date, end_date, org_uid, org_code, output_directory):
5✔
121
    """Build Malwares and Vulnerabilities page."""
122
    Malware_Vuln = Malware_Vulns(start_date, end_date, org_uid)
×
123
    # Build insecure protocol horizontal bar chart
NEW
124
    width = 16.51
×
NEW
125
    height = 5.3
×
126
    name = "pro_count"
×
127
    title = ""
×
NEW
128
    x_label = "Insecure Protocols"
×
129
    y_label = ""
×
130
    protocol_chart = Charts(
×
131
        Malware_Vuln.protocol_count(),
132
        width,
133
        height,
134
        name,
135
        title,
136
        x_label,
137
        y_label,
138
    )
139
    protocol_chart.h_bar()
×
140
    # Build unverified vulnerability horizontal bar chart
NEW
141
    width = 16.51
×
NEW
142
    height = 9
×
143
    name = "unverif_vuln_count"
×
144
    title = ""
×
145
    x_label = "Unverified CVEs"
×
146
    y_label = ""
×
147
    unverif_vuln_chart = Charts(
×
148
        Malware_Vuln.unverified_cv_count(),
149
        width,
150
        height,
151
        name,
152
        title,
153
        x_label,
154
        y_label,
155
    )
156
    unverif_vuln_chart.h_bar()
×
NEW
157
    unverif_vuln_chart.h_bar()
×
158
    # Build tables
NEW
159
    risky_assets = Malware_Vuln.insecure_protocols()
×
NEW
160
    risky_assets = risky_assets[:10]
×
NEW
161
    risky_assets.columns = ["Protocol", "IP", "Port"]
×
NEW
162
    verif_vulns = Malware_Vuln.verif_vulns()[:10]
×
163
    verif_vulns.columns = ["CVE", "IP", "Port"]
×
NEW
164
    risky_ports = Malware_Vuln.risky_ports_count()
×
NEW
165
    verif_vulns_count = Malware_Vuln.total_verif_vulns()
×
NEW
166
    unverif_vulns = Malware_Vuln.unverified_vuln_count()
×
167
    # Update chevron dictionary
168
    vulns_dict = {
×
169
        "verif_vulns": verif_vulns,
170
        "risky_assets": risky_assets,
171
        "riskyPorts": risky_ports,
172
        "verifVulns": verif_vulns_count,
173
        "unverifVulns": unverif_vulns,
174
        "verif_vulns_summary": Malware_Vuln.verif_vulns_summary(),
175
    }
NEW
176
    all_cves_df = Malware_Vuln.all_cves()
×
NEW
177
    report_dict.update(vulns_dict)
×
178

179
    # Create Suspected vulnerability JSON file
NEW
180
    vuln_json = f"{output_directory}/{org_code}/vuln_alerts.json"
×
NEW
181
    assets_dict = Malware_Vuln.assets_df.to_dict(orient="records")
×
NEW
182
    insecure_dict = Malware_Vuln.insecure_df.to_dict(orient="records")
×
NEW
183
    vulns_dict = Malware_Vuln.vulns_df.to_dict(orient="records")
×
NEW
184
    final_dict = {
×
185
        "assets": assets_dict,
186
        "insecure": insecure_dict,
187
        "verified_vulns": vulns_dict,
188
    }
NEW
189
    with open(vuln_json, "w") as outfile:
×
NEW
190
        json.dump(final_dict, outfile, default=str)
×
191

192
    # Create Suspected vulnerability Excel file
NEW
193
    vuln_xlsx = f"{output_directory}/{org_code}/vuln_alerts.xlsx"
×
NEW
194
    vulnWriter = pd.ExcelWriter(vuln_xlsx, engine="xlsxwriter")
×
NEW
195
    Malware_Vuln.assets_df.to_excel(vulnWriter, sheet_name="Assets", index=False)
×
NEW
196
    Malware_Vuln.insecure_df.to_excel(vulnWriter, sheet_name="Insecure", index=False)
×
NEW
197
    Malware_Vuln.vulns_df.to_excel(vulnWriter, sheet_name="Verified Vulns", index=False)
×
NEW
198
    vulnWriter.save()
×
NEW
199
    return (report_dict, vuln_json, all_cves_df, vuln_xlsx)
×
200

201

202
def dark_web(
5✔
203
    report_dict,
204
    trending_start_date,
205
    start_date,
206
    end_date,
207
    org_uid,
208
    all_cves_df,
209
    soc_med_included,
210
    org_code,
211
    output_directory,
212
):
213
    """Dark Web Mentions."""
NEW
214
    Cyber6 = Cyber_Six(
×
215
        trending_start_date,
216
        start_date,
217
        end_date,
218
        org_uid,
219
        all_cves_df,
220
        soc_med_included,
221
    )
222
    # Build dark web mentions over time line chart
NEW
223
    width = 16.51
×
NEW
224
    height = 10
×
225
    name = "web_only_df_2"
×
226
    title = ""
×
227
    x_label = "Dark Web Mentions"
×
228
    y_label = "Mentions count"
×
229
    dark_mentions_chart = Charts(
×
230
        Cyber6.dark_web_date(),
231
        width,
232
        height,
233
        name,
234
        title,
235
        x_label,
236
        y_label,
237
    )
238
    dark_mentions_chart.line_chart()
×
239

240
    # Limit the number of rows for large dataframes
NEW
241
    dark_web_actors = Cyber6.dark_web_bad_actors()
×
242

NEW
243
    social_media = Cyber6.social_media_most_act()
×
NEW
244
    if not soc_med_included:
×
NEW
245
        social_media = social_media[0:0]
×
UNCOV
246
    dark_web_dict = {
×
247
        "darkWeb": Cyber6.dark_web_count(),
248
        "mentions_count": Cyber6.dark_web_mentions_count(),
249
        "dark_web_sites": Cyber6.dark_web_sites(),
250
        "alerts_threats": Cyber6.alerts_threats(),
251
        "dark_web_actors": dark_web_actors,
252
        "alerts_exec": Cyber6.alerts_exec()[:10],
253
        "asset_alerts": Cyber6.asset_alerts()[:10],
254
        "dark_web_act": Cyber6.dark_web_most_act(),
255
        "social_med_act": social_media,
256
        "markets_table": Cyber6.invite_only_markets(),
257
        "top_cves": Cyber6.top_cve_table(),
258
    }
259

NEW
260
    report_dict.update(dark_web_dict)
×
261

262
    # Create dark web Excel file
NEW
263
    mentions_df = Cyber6.dark_web_mentions
×
NEW
264
    mentions_df["content"] = mentions_df["content"].str[:2000]
×
NEW
265
    mi_json = f"{output_directory}/{org_code}/mention_incidents.json"
×
NEW
266
    mentions_dict = mentions_df.to_dict(orient="records")
×
NEW
267
    alerts_dict = Cyber6.alerts.to_dict(orient="records")
×
NEW
268
    cve_dict = Cyber6.top_cves.to_dict(orient="records")
×
NEW
269
    final_dict = {
×
270
        "dark_web_mentions": mentions_dict,
271
        "dark_web_alerts": alerts_dict,
272
        "top_cves": cve_dict,
273
    }
NEW
274
    with open(mi_json, "w") as outfile:
×
NEW
275
        json.dump(final_dict, outfile, default=str)
×
276

277
    # Create dark web Excel file
NEW
278
    mi_xlsx = f"{output_directory}/{org_code}/mention_incidents.xlsx"
×
NEW
279
    miWriter = pd.ExcelWriter(mi_xlsx, engine="xlsxwriter")
×
NEW
280
    mentions_df.to_excel(miWriter, sheet_name="Dark Web Mentions", index=False)
×
NEW
281
    Cyber6.alerts.to_excel(miWriter, sheet_name="Dark Web Alerts", index=False)
×
NEW
282
    Cyber6.top_cves.to_excel(miWriter, sheet_name="Top CVEs", index=False)
×
NEW
283
    miWriter.save()
×
284

NEW
285
    return (report_dict, mi_json, mi_xlsx)
×
286

287

288
def init(
5✔
289
    datestring,
290
    org_name,
291
    org_code,
292
    org_uid,
293
    premium,
294
    output_directory,
295
    soc_med_included=False,
296
):
297
    """Call each page of the report."""
298
    # Format start_date and end_date for the bi-monthly reporting period.
299
    # If the given end_date is the 15th, then the start_date is the 1st.
300
    # Otherwise, the start_date will be the 16th of the respective month.
301

UNCOV
302
    end_date = datetime.datetime.strptime(datestring, "%Y-%m-%d").date()
×
303
    if end_date.day == 15:
×
304
        start_date = datetime.datetime(end_date.year, end_date.month, 1)
×
305
    else:
306
        start_date = datetime.datetime(end_date.year, end_date.month, 16)
×
307
    # create the trending start date which is 4 weeks from the last day of the report period
308
    # 27 days plus the last day is 4 weeks
309
    days = datetime.timedelta(27)
×
310
    trending_start_date = end_date - days
×
311

312
    # Get base directory to save images
NEW
313
    base_dir = os.path.abspath(os.path.dirname(__file__))
×
314
    start = start_date.strftime("%m/%d/%Y")
×
315
    end = end_date.strftime("%m/%d/%Y")
×
NEW
316
    report_dict = {
×
317
        "department": org_name,
318
        "dateRange": start + " - " + end,
319
        "endDate": end,
320
        "base_dir": base_dir,
321
    }
322
    # Fill credentials data
NEW
323
    (report_dict, cred_json, cred_xlsx) = credential(
×
324
        report_dict,
325
        trending_start_date,
326
        start_date,
327
        end_date,
328
        org_uid,
329
        org_code,
330
        output_directory,
331
    )
332

333
    # Domain Masquerading
NEW
334
    report_dict, da_json, da_xlsx = masquerading(
×
335
        report_dict,
336
        start_date,
337
        end_date,
338
        org_uid,
339
        org_code,
340
        output_directory,
341
    )
342

343
    # Inferred/Verified Vulnerabilities
NEW
344
    (report_dict, vuln_json, all_cves_df, vuln_xlsx) = mal_vuln(
×
345
        report_dict,
346
        start_date,
347
        end_date,
348
        org_uid,
349
        org_code,
350
        output_directory,
351
    )
352

353
    # Dark web mentions and alerts
NEW
354
    if premium:
×
NEW
355
        report_dict, mi_json, mi_xlsx = dark_web(
×
356
            report_dict,
357
            trending_start_date,
358
            start_date,
359
            end_date,
360
            org_uid,
361
            all_cves_df,
362
            soc_med_included,
363
            org_code,
364
            output_directory,
365
        )
366
    else:
NEW
367
        Core_Cyber = Core_Cyber_Six(all_cves_df)
×
NEW
368
        report_dict["top_cves"] = Core_Cyber.top_cve_table()
×
NEW
369
        mi_json = None
×
NEW
370
        mi_xlsx = None
×
371

372
    return (
×
373
        report_dict,
374
        cred_json,
375
        da_json,
376
        vuln_json,
377
        mi_json,
378
        cred_xlsx,
379
        da_xlsx,
380
        vuln_xlsx,
381
        mi_xlsx,
382
    )
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2025 Coveralls, Inc