• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

cisagov / pe-reports / 5268386652

pending completion
5268386652

Pull #565

github

web-flow
Merge 40eba2026 into 14755187f
Pull Request #565: Update report generator to use reportlab

79 of 415 branches covered (19.04%)

Branch coverage included in aggregate %.

404 of 676 new or added lines in 7 files covered. (59.76%)

16 existing lines in 5 files now uncovered.

748 of 1804 relevant lines covered (41.46%)

2.07 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

8.72
/src/pe_reports/pages.py
1
"""Collect and distribute graphical data to readable charts in the presentation."""
2

3
# Standard Python Libraries
4
import datetime
5✔
5
import json
5✔
6
import logging
5✔
7
import os
5✔
8

9
# Third-Party Libraries
10
import pandas as pd
5✔
11

12
from .charts import Charts
5✔
13

14
# Import Classes
15
from .metrics import Credentials, Cyber_Six, Domains_Masqs, Malware_Vulns
5✔
16

17
# Setup logging to central
18
LOGGER = logging.getLogger(__name__)
5✔
19

20

21
def credential(
5✔
22
    report_dict,
23
    trending_start_date,
24
    start_date,
25
    end_date,
26
    org_uid,
27
    org_code,
28
    output_directory,
29
):
30
    """Build exposed credential page."""
31
    Credential = Credentials(trending_start_date, start_date, end_date, org_uid)
×
32
    # Build exposed credential stacked bar chart
NEW
33
    width = 16.51
×
NEW
34
    height = 10
×
35
    name = "inc_date_df"
×
36
    title = "Trending Exposures by Week"
×
37
    x_label = "Week Reported"
×
38
    y_label = "Creds Exposed"
×
39
    cred_date_chart = Charts(
×
40
        Credential.by_week(),
41
        width,
42
        height,
43
        name,
44
        title,
45
        x_label,
46
        y_label,
47
    )
48
    cred_date_chart.line_chart()
×
49

UNCOV
50
    creds_dict = {
×
51
        "breach": Credential.breaches(),
52
        "creds": Credential.total(),
53
        "pw_creds": Credential.password(),
54
        "breach_table": Credential.breach_details()[:15],
55
        "breach_appendix": Credential.breach_appendix(),
56
    }
NEW
57
    report_dict.update(creds_dict)
×
58

59
    # Create Credential Exposure JSON file
NEW
60
    cred_json = f"{output_directory}/{org_code}/compromised_credentials.json"
×
NEW
61
    cred_dict = Credential.creds_view.to_dict(orient="records")
×
NEW
62
    final_dict = {"credentials": cred_dict}
×
NEW
63
    with open(cred_json, "w") as outfile:
×
NEW
64
        json.dump(final_dict, outfile, default=str)
×
65

66
    # Create Credential Exposure Excel file
NEW
67
    cred_xlsx = f"{output_directory}/{org_code}/compromised_credentials.xlsx"
×
NEW
68
    credWriter = pd.ExcelWriter(cred_xlsx, engine="xlsxwriter")
×
NEW
69
    Credential.creds_view.to_excel(credWriter, sheet_name="Credentials", index=False)
×
NEW
70
    credWriter.save()
×
71

NEW
72
    return report_dict, cred_json, cred_xlsx
×
73

74

75
def masquerading(
5✔
76
    report_dict, start_date, end_date, org_uid, org_code, output_directory
77
):
78
    """Build masquerading page."""
79
    Domain_Masq = Domains_Masqs(start_date, end_date, org_uid)
×
NEW
80
    report_dict.update(
×
81
        {
82
            "domain_table": Domain_Masq.summary()[:10],
83
            "domain_alerts_table": Domain_Masq.alerts()[:10],
84
            "suspectedDomains": Domain_Masq.count(),
85
            "domain_alerts": Domain_Masq.alert_count(),
86
        }
87
    )
88
    # Create Domain Masquerading JSON file
NEW
89
    da_json = f"{output_directory}/{org_code}/domain_alerts.json"
×
NEW
90
    susp_domains_dict = Domain_Masq.df_mal.to_dict(orient="records")
×
NEW
91
    dom_alerts_dict = Domain_Masq.alerts_sum().to_dict(orient="records")
×
NEW
92
    final_dict = {
×
93
        "suspected_domains": susp_domains_dict,
94
        "domain_alerts": dom_alerts_dict,
95
    }
NEW
96
    with open(da_json, "w") as outfile:
×
NEW
97
        json.dump(final_dict, outfile, default=str)
×
98

99
    # Create Domain Masquerading Excel file
NEW
100
    da_xlsx = f"{output_directory}/{org_code}/domain_alerts.xlsx"
×
NEW
101
    domWriter = pd.ExcelWriter(da_xlsx, engine="xlsxwriter")
×
NEW
102
    Domain_Masq.df_mal.to_excel(domWriter, sheet_name="Suspected Domains", index=False)
×
NEW
103
    Domain_Masq.alerts_sum().to_excel(
×
104
        domWriter, sheet_name="Domain Alerts", index=False
105
    )
NEW
106
    domWriter.save()
×
NEW
107
    return report_dict, da_json, da_xlsx
×
108

109

110
def mal_vuln(report_dict, start_date, end_date, org_uid, org_code, output_directory):
5✔
111
    """Build Malwares and Vulnerabilities page."""
112
    Malware_Vuln = Malware_Vulns(start_date, end_date, org_uid)
×
113
    # Build insecure protocol horizontal bar chart
NEW
114
    width = 16.51
×
NEW
115
    height = 5.3
×
116
    name = "pro_count"
×
117
    title = ""
×
NEW
118
    x_label = "Insecure Protocols"
×
119
    y_label = ""
×
120
    protocol_chart = Charts(
×
121
        Malware_Vuln.protocol_count(),
122
        width,
123
        height,
124
        name,
125
        title,
126
        x_label,
127
        y_label,
128
    )
129
    protocol_chart.h_bar()
×
130
    # Build unverified vulnerability horizontal bar chart
NEW
131
    width = 16.51
×
NEW
132
    height = 9
×
133
    name = "unverif_vuln_count"
×
134
    title = ""
×
135
    x_label = "Unverified CVEs"
×
136
    y_label = ""
×
137
    unverif_vuln_chart = Charts(
×
138
        Malware_Vuln.unverified_cv_count(),
139
        width,
140
        height,
141
        name,
142
        title,
143
        x_label,
144
        y_label,
145
    )
146
    unverif_vuln_chart.h_bar()
×
NEW
147
    unverif_vuln_chart.h_bar()
×
148
    # Build tables
NEW
149
    risky_assets = Malware_Vuln.insecure_protocols()
×
NEW
150
    risky_assets = risky_assets[:10]
×
NEW
151
    risky_assets.columns = ["Protocol", "IP", "Port"]
×
NEW
152
    verif_vulns = Malware_Vuln.verif_vulns()[:10]
×
153
    verif_vulns.columns = ["CVE", "IP", "Port"]
×
NEW
154
    risky_ports = Malware_Vuln.risky_ports_count()
×
NEW
155
    verif_vulns_count = Malware_Vuln.total_verif_vulns()
×
NEW
156
    unverif_vulns = Malware_Vuln.unverified_vuln_count()
×
157
    # Update chevron dictionary
158
    vulns_dict = {
×
159
        "verif_vulns": verif_vulns,
160
        "risky_assets": risky_assets,
161
        "riskyPorts": risky_ports,
162
        "verifVulns": verif_vulns_count,
163
        "unverifVulns": unverif_vulns,
164
        "verif_vulns_summary": Malware_Vuln.verif_vulns_summary(),
165
    }
NEW
166
    all_cves_df = Malware_Vuln.all_cves()
×
NEW
167
    report_dict.update(vulns_dict)
×
168

169
    # Create Suspected vulnerability JSON file
NEW
170
    vuln_json = f"{output_directory}/{org_code}/vuln_alerts.json"
×
NEW
171
    assets_dict = Malware_Vuln.assets_df.to_dict(orient="records")
×
NEW
172
    insecure_dict = Malware_Vuln.insecure_df.to_dict(orient="records")
×
NEW
173
    vulns_dict = Malware_Vuln.vulns_df.to_dict(orient="records")
×
NEW
174
    final_dict = {
×
175
        "assets": assets_dict,
176
        "insecure": insecure_dict,
177
        "verified_vulns": vulns_dict,
178
    }
NEW
179
    with open(vuln_json, "w") as outfile:
×
NEW
180
        json.dump(final_dict, outfile, default=str)
×
181

182
    # Create Suspected vulnerability Excel file
NEW
183
    vuln_xlsx = f"{output_directory}/{org_code}/vuln_alerts.xlsx"
×
NEW
184
    vulnWriter = pd.ExcelWriter(vuln_xlsx, engine="xlsxwriter")
×
NEW
185
    Malware_Vuln.assets_df.to_excel(vulnWriter, sheet_name="Assets", index=False)
×
NEW
186
    Malware_Vuln.insecure_df.to_excel(vulnWriter, sheet_name="Insecure", index=False)
×
NEW
187
    Malware_Vuln.vulns_df.to_excel(vulnWriter, sheet_name="Verified Vulns", index=False)
×
NEW
188
    vulnWriter.save()
×
NEW
189
    return (report_dict, vuln_json, all_cves_df, vuln_xlsx)
×
190

191

192
def dark_web(
5✔
193
    report_dict,
194
    trending_start_date,
195
    start_date,
196
    end_date,
197
    org_uid,
198
    all_cves_df,
199
    soc_med_included,
200
    org_code,
201
    output_directory,
202
):
203
    """Dark Web Mentions."""
NEW
204
    Cyber6 = Cyber_Six(
×
205
        trending_start_date,
206
        start_date,
207
        end_date,
208
        org_uid,
209
        all_cves_df,
210
        soc_med_included,
211
    )
212
    # Build dark web mentions over time line chart
NEW
213
    width = 16.51
×
NEW
214
    height = 10
×
215
    name = "web_only_df_2"
×
216
    title = ""
×
217
    x_label = "Dark Web Mentions"
×
218
    y_label = "Mentions count"
×
219
    dark_mentions_chart = Charts(
×
220
        Cyber6.dark_web_date(),
221
        width,
222
        height,
223
        name,
224
        title,
225
        x_label,
226
        y_label,
227
    )
228
    dark_mentions_chart.line_chart()
×
229

230
    # Limit the number of rows for large dataframes
NEW
231
    dark_web_actors = Cyber6.dark_web_bad_actors()
×
232

NEW
233
    social_media = Cyber6.social_media_most_act()
×
NEW
234
    if not soc_med_included:
×
NEW
235
        social_media = social_media[0:0]
×
UNCOV
236
    dark_web_dict = {
×
237
        "darkWeb": Cyber6.dark_web_count(),
238
        "mentions_count": Cyber6.dark_web_mentions_count(),
239
        "dark_web_sites": Cyber6.dark_web_sites(),
240
        "alerts_threats": Cyber6.alerts_threats(),
241
        "dark_web_actors": dark_web_actors,
242
        "alerts_exec": Cyber6.alerts_exec()[:10],
243
        "asset_alerts": Cyber6.asset_alerts()[:10],
244
        "dark_web_act": Cyber6.dark_web_most_act(),
245
        "social_med_act": social_media,
246
        "markets_table": Cyber6.invite_only_markets(),
247
        "top_cves": Cyber6.top_cve_table(),
248
    }
249

NEW
250
    report_dict.update(dark_web_dict)
×
251

252
    # Create dark web Excel file
NEW
253
    mentions_df = Cyber6.dark_web_mentions
×
NEW
254
    mentions_df["content"] = mentions_df["content"].str[:2000]
×
NEW
255
    mi_json = f"{output_directory}/{org_code}/mention_incidents.json"
×
NEW
256
    mentions_dict = mentions_df.to_dict(orient="records")
×
NEW
257
    alerts_dict = Cyber6.alerts.to_dict(orient="records")
×
NEW
258
    cve_dict = Cyber6.top_cves.to_dict(orient="records")
×
NEW
259
    final_dict = {
×
260
        "dark_web_mentions": mentions_dict,
261
        "dark_web_alerts": alerts_dict,
262
        "top_cves": cve_dict,
263
    }
NEW
264
    with open(mi_json, "w") as outfile:
×
NEW
265
        json.dump(final_dict, outfile, default=str)
×
266

267
    # Create dark web Excel file
NEW
268
    mi_xlsx = f"{output_directory}/{org_code}/mention_incidents.xlsx"
×
NEW
269
    miWriter = pd.ExcelWriter(mi_xlsx, engine="xlsxwriter")
×
NEW
270
    mentions_df.to_excel(miWriter, sheet_name="Dark Web Mentions", index=False)
×
NEW
271
    Cyber6.alerts.to_excel(miWriter, sheet_name="Dark Web Alerts", index=False)
×
NEW
272
    Cyber6.top_cves.to_excel(miWriter, sheet_name="Top CVEs", index=False)
×
NEW
273
    miWriter.save()
×
274

NEW
275
    return (report_dict, mi_json, mi_xlsx)
×
276

277

278
def init(
5✔
279
    datestring, org_name, org_code, org_uid, output_directory, soc_med_included=False
280
):
281
    """Call each page of the report."""
282
    # Format start_date and end_date for the bi-monthly reporting period.
283
    # If the given end_date is the 15th, then the start_date is the 1st.
284
    # Otherwise, the start_date will be the 16th of the respective month.
285

UNCOV
286
    end_date = datetime.datetime.strptime(datestring, "%Y-%m-%d").date()
×
287
    if end_date.day == 15:
×
288
        start_date = datetime.datetime(end_date.year, end_date.month, 1)
×
289
    else:
290
        start_date = datetime.datetime(end_date.year, end_date.month, 16)
×
291
    # create the trending start date which is 4 weeks from the last day of the report period
292
    # 27 days plus the last day is 4 weeks
293
    days = datetime.timedelta(27)
×
294
    trending_start_date = end_date - days
×
295

296
    # Get base directory to save images
NEW
297
    base_dir = os.path.abspath(os.path.dirname(__file__))
×
298
    start = start_date.strftime("%m/%d/%Y")
×
299
    end = end_date.strftime("%m/%d/%Y")
×
NEW
300
    report_dict = {
×
301
        "department": org_name,
302
        "dateRange": start + " - " + end,
303
        "endDate": end,
304
        "base_dir": base_dir,
305
    }
306
    # Fill credentials data
NEW
307
    (report_dict, cred_json, cred_xlsx) = credential(
×
308
        report_dict,
309
        trending_start_date,
310
        start_date,
311
        end_date,
312
        org_uid,
313
        org_code,
314
        output_directory,
315
    )
316

317
    # Domain Masquerading
NEW
318
    report_dict, da_json, da_xlsx = masquerading(
×
319
        report_dict,
320
        start_date,
321
        end_date,
322
        org_uid,
323
        org_code,
324
        output_directory,
325
    )
326

327
    # Inferred/Verified Vulnerabilities
NEW
328
    (report_dict, vuln_json, all_cves_df, vuln_xlsx) = mal_vuln(
×
329
        report_dict,
330
        start_date,
331
        end_date,
332
        org_uid,
333
        org_code,
334
        output_directory,
335
    )
336

337
    # Dark web mentions and alerts
NEW
338
    report_dict, mi_json, mi_xlsx = dark_web(
×
339
        report_dict,
340
        trending_start_date,
341
        start_date,
342
        end_date,
343
        org_uid,
344
        all_cves_df,
345
        soc_med_included,
346
        org_code,
347
        output_directory,
348
    )
349

350
    return (
×
351
        report_dict,
352
        cred_json,
353
        da_json,
354
        vuln_json,
355
        mi_json,
356
        cred_xlsx,
357
        da_xlsx,
358
        vuln_xlsx,
359
        mi_xlsx,
360
    )
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2025 Coveralls, Inc