1 Commits

Author SHA1 Message Date
6c2da10eaa Secure Source of Randomness 2024-06-15 10:28:21 +00:00
3 changed files with 311 additions and 311 deletions

View File

@ -1,178 +1,178 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
from json import loads, dumps
from collections import OrderedDict
from datetime import datetime
from traceback import print_exc
# Taken from https://github.com/dgunter/ParseZeekLogs <3
class ParseZeekLogs(object):
"""
Class that parses Zeek logs and allows log data to be output in CSV or json format.
Attributes: filepath: Path of Zeek log file to read
"""
def __init__(self, filepath, batchsize=500, fields=None, output_format=None, ignore_keys=[], meta={}, safe_headers=False):
self.fd = open(filepath, "r")
self.options = OrderedDict()
self.firstRun = True
self.filtered_fields = fields
self.batchsize = batchsize
self.output_format = output_format
self.ignore_keys = ignore_keys
self.meta = meta
self.safe_headers = safe_headers
# Convert ' to " in meta string
meta = loads(dumps(meta).replace("'", '"'))
# Read the header option lines
l = self.fd.readline(5_000_000).strip()
while l.strip().startswith("#"):
# Parse the options out
if l.startswith("#separator"):
key = str(l[1:].split(" ")[0])
value = str.encode(l[1:].split(
" ")[1].strip()).decode('unicode_escape')
self.options[key] = value
elif l.startswith("#"):
key = str(l[1:].split(self.options.get('separator'))[0])
value = l[1:].split(self.options.get('separator'))[1:]
self.options[key] = value
# Read the next line
l = self.fd.readline(5_000_000).strip()
self.firstLine = l
# Save mapping of fields to values:
self.fields = self.options.get('fields')
self.types = self.options.get('types')
self.data_types = {}
for i, val in enumerate(self.fields):
# Convert field names if safe_headers is enabled
if self.safe_headers is True:
self.fields[i] = self.fields[i].replace(".", "_")
# Match types with each other
self.data_types[self.fields[i]] = self.types[i]
def __del__(self):
self.fd.close()
def __iter__(self):
return self
def __next__(self):
retVal = ""
if self.firstRun is True:
retVal = self.firstLine
self.firstRun = False
else:
retVal = self.fd.readline().strip()
# If an empty string is returned, readline is done reading
if retVal == "" or retVal is None:
raise StopIteration
# Split out the data we are going to return
retVal = retVal.split(self.options.get('separator'))
record = None
# Make sure we aren't dealing with a comment line
if len(retVal) > 0 and not str(retVal[0]).strip().startswith("#") \
and len(retVal) is len(self.options.get("fields")):
record = OrderedDict()
# Prepare fields for conversion
for x in range(0, len(retVal)):
if self.safe_headers is True:
converted_field_name = self.options.get(
"fields")[x].replace(".", "_")
else:
converted_field_name = self.options.get("fields")[x]
if self.filtered_fields is None or converted_field_name in self.filtered_fields:
# Translate - to "" to fix a conversation error
if retVal[x] == "-":
retVal[x] = ""
# Save the record field if the field isn't filtered out
record[converted_field_name] = retVal[x]
# Convert values to the appropriate record type
record = self.convert_values(
record, self.ignore_keys, self.data_types)
if record is not None and self.output_format == "json":
# Output will be json
# Add metadata to json
for k, v in self.meta.items():
record[k] = v
retVal = record
elif record is not None and self.output_format == "csv":
retVal = ""
# Add escaping to csv format
for k, v in record.items():
# Add escaping to string values
if isinstance(v, str):
retVal += str("\"" + str(v).strip() + "\"" + ",")
else:
retVal += str(str(v).strip() + ",")
# Remove the trailing comma
retVal = retVal[:-1]
else:
retVal = None
return retVal
def convert_values(self, data, ignore_keys=[], data_types={}):
keys_to_delete = []
for k, v in data.items():
# print("evaluating k: " + str(k) + " v: " + str(v))
if isinstance(v, dict):
data[k] = self.convert_values(v)
else:
if data_types.get(k) is not None:
if (data_types.get(k) == "port" or data_types.get(k) == "count"):
if v != "":
data[k] = int(v)
else:
keys_to_delete.append(k)
elif (data_types.get(k) == "double" or data_types.get(k) == "interval"):
if v != "":
data[k] = float(v)
else:
keys_to_delete.append(k)
elif data_types.get(k) == "bool":
data[k] = bool(v)
else:
data[k] = v
for k in keys_to_delete:
del data[k]
return data
def get_fields(self):
"""Returns all fields present in the log file
Returns:
A python list containing all field names in the log file
"""
field_names = ""
if self.output_format == "csv":
for i, v in enumerate(self.fields):
if self.filtered_fields is None or v in self.filtered_fields:
field_names += str(v) + ","
# Remove the trailing comma
field_names = field_names[:-1].strip()
else:
field_names = []
for i, v in enumerate(self.fields):
if self.filtered_fields is None or v in self.filtered_fields:
field_names.append(v)
return field_names
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
from json import loads, dumps
from collections import OrderedDict
from datetime import datetime
from traceback import print_exc
# Taken from https://github.com/dgunter/ParseZeekLogs <3
class ParseZeekLogs(object):
"""
Class that parses Zeek logs and allows log data to be output in CSV or json format.
Attributes: filepath: Path of Zeek log file to read
"""
def __init__(self, filepath, batchsize=500, fields=None, output_format=None, ignore_keys=[], meta={}, safe_headers=False):
self.fd = open(filepath, "r")
self.options = OrderedDict()
self.firstRun = True
self.filtered_fields = fields
self.batchsize = batchsize
self.output_format = output_format
self.ignore_keys = ignore_keys
self.meta = meta
self.safe_headers = safe_headers
# Convert ' to " in meta string
meta = loads(dumps(meta).replace("'", '"'))
# Read the header option lines
l = self.fd.readline().strip()
while l.strip().startswith("#"):
# Parse the options out
if l.startswith("#separator"):
key = str(l[1:].split(" ")[0])
value = str.encode(l[1:].split(
" ")[1].strip()).decode('unicode_escape')
self.options[key] = value
elif l.startswith("#"):
key = str(l[1:].split(self.options.get('separator'))[0])
value = l[1:].split(self.options.get('separator'))[1:]
self.options[key] = value
# Read the next line
l = self.fd.readline().strip()
self.firstLine = l
# Save mapping of fields to values:
self.fields = self.options.get('fields')
self.types = self.options.get('types')
self.data_types = {}
for i, val in enumerate(self.fields):
# Convert field names if safe_headers is enabled
if self.safe_headers is True:
self.fields[i] = self.fields[i].replace(".", "_")
# Match types with each other
self.data_types[self.fields[i]] = self.types[i]
def __del__(self):
self.fd.close()
def __iter__(self):
return self
def __next__(self):
retVal = ""
if self.firstRun is True:
retVal = self.firstLine
self.firstRun = False
else:
retVal = self.fd.readline().strip()
# If an empty string is returned, readline is done reading
if retVal == "" or retVal is None:
raise StopIteration
# Split out the data we are going to return
retVal = retVal.split(self.options.get('separator'))
record = None
# Make sure we aren't dealing with a comment line
if len(retVal) > 0 and not str(retVal[0]).strip().startswith("#") \
and len(retVal) is len(self.options.get("fields")):
record = OrderedDict()
# Prepare fields for conversion
for x in range(0, len(retVal)):
if self.safe_headers is True:
converted_field_name = self.options.get(
"fields")[x].replace(".", "_")
else:
converted_field_name = self.options.get("fields")[x]
if self.filtered_fields is None or converted_field_name in self.filtered_fields:
# Translate - to "" to fix a conversation error
if retVal[x] == "-":
retVal[x] = ""
# Save the record field if the field isn't filtered out
record[converted_field_name] = retVal[x]
# Convert values to the appropriate record type
record = self.convert_values(
record, self.ignore_keys, self.data_types)
if record is not None and self.output_format == "json":
# Output will be json
# Add metadata to json
for k, v in self.meta.items():
record[k] = v
retVal = record
elif record is not None and self.output_format == "csv":
retVal = ""
# Add escaping to csv format
for k, v in record.items():
# Add escaping to string values
if isinstance(v, str):
retVal += str("\"" + str(v).strip() + "\"" + ",")
else:
retVal += str(str(v).strip() + ",")
# Remove the trailing comma
retVal = retVal[:-1]
else:
retVal = None
return retVal
def convert_values(self, data, ignore_keys=[], data_types={}):
keys_to_delete = []
for k, v in data.items():
# print("evaluating k: " + str(k) + " v: " + str(v))
if isinstance(v, dict):
data[k] = self.convert_values(v)
else:
if data_types.get(k) is not None:
if (data_types.get(k) == "port" or data_types.get(k) == "count"):
if v != "":
data[k] = int(v)
else:
keys_to_delete.append(k)
elif (data_types.get(k) == "double" or data_types.get(k) == "interval"):
if v != "":
data[k] = float(v)
else:
keys_to_delete.append(k)
elif data_types.get(k) == "bool":
data[k] = bool(v)
else:
data[k] = v
for k in keys_to_delete:
del data[k]
return data
def get_fields(self):
"""Returns all fields present in the log file
Returns:
A python list containing all field names in the log file
"""
field_names = ""
if self.output_format == "csv":
for i, v in enumerate(self.fields):
if self.filtered_fields is None or v in self.filtered_fields:
field_names += str(v) + ","
# Remove the trailing comma
field_names = field_names[:-1].strip()
else:
field_names = []
for i, v in enumerate(self.fields):
if self.filtered_fields is None or v in self.filtered_fields:
field_names.append(v)
return field_names

View File

@ -1,128 +1,128 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import subprocess as sp
from app.utils import terminate_process, read_config
from os import mkdir, path
from flask import send_file, jsonify
import datetime
import shutil
import json
import random
import sys
import re
class Capture(object):
def __init__(self):
self.random_choice_alphabet = "ABCDEF1234567890"
def start_capture(self):
"""
Start a tshark capture on the created AP interface and save
it in a temporary directory under /tmp/.
:return: dict containing capture token and status.
"""
# Kill potential tshark zombies instances, if any.
terminate_process("tshark")
# Few context variable assignment
self.capture_token = "".join(
[random.choice(self.random_choice_alphabet) for i in range(8)])
self.capture_dir = "/tmp/{}/".format(self.capture_token)
self.assets_dir = "/tmp/{}/assets/".format(self.capture_token)
self.pcap = self.capture_dir + "capture.pcap"
self.iface = read_config(("network", "in"))
# For packets monitoring
self.list_pkts = []
self.last_pkts = 0
# Make the capture and the assets directory
mkdir(self.capture_dir)
mkdir(self.assets_dir)
try:
sp.Popen(["tshark", "-i", self.iface, "-w",
self.pcap, "-f", "tcp or udp"])
return {"status": True,
"message": "Capture started",
"capture_token": self.capture_token}
except:
return {"status": False,
"message": "Unexpected error: %s" % sys.exc_info()[0]}
def get_capture_stats(self):
"""
Get some dirty capture statistics in order to have a sparkline
in the background of capture view.
:return: dict containing stats associated to the capture
"""
with open("/sys/class/net/{}/statistics/tx_packets".format(self.iface)) as f:
tx_pkts = int(f.read())
with open("/sys/class/net/{}/statistics/rx_packets".format(self.iface)) as f:
rx_pkts = int(f.read())
if self.last_pkts == 0:
self.last_pkts = tx_pkts + rx_pkts
return {"status": True,
"packets": [0*400]}
else:
curr_pkts = (tx_pkts + rx_pkts) - self.last_pkts
self.last_pkts = tx_pkts + rx_pkts
self.list_pkts.append(curr_pkts)
return {"status": True,
"packets": self.beautify_stats(self.list_pkts)}
@staticmethod
def beautify_stats(data):
"""
Add 0 at the end of the array if the len of the array is less
than max_len. Else, get the last 100 stats. This allows to
show a kind of "progressive chart" in the background for
the first packets.
:return: a list of integers.
"""
max_len = 400
if len(data) >= max_len:
return data[-max_len:]
else:
return data + [1] * (max_len - len(data))
def stop_capture(self):
"""
Stop tshark if any instance present & ask create_capinfos.
:return: dict as a small confirmation.
"""
if terminate_process("tshark"):
self.create_capinfos()
return {"status": True,
"message": "Capture stopped"}
else:
return {"status": False,
"message": "No active capture"}
def create_capinfos(self):
"""
Creates a capinfo json file.
:return: dict as a small confirmation.
"""
infos = sp.Popen(["capinfos", self.pcap],
stdout=sp.PIPE, stderr=sp.PIPE)
infos = infos.communicate()[0]
data = {}
for l in infos.decode().splitlines():
try:
l = l.split(": ") if ": " in l else l.split("= ")
if len(l[0]) and len(l[1]):
data[l[0].strip()] = l[1].strip()
except:
continue
with open("{}capinfos.json".format(self.assets_dir), 'w') as f:
json.dump(data, f)
return True
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import subprocess as sp
from app.utils import terminate_process, read_config
from os import mkdir, path
from flask import send_file, jsonify
import datetime
import shutil
import json
import sys
import re
import secrets
class Capture(object):
def __init__(self):
self.random_choice_alphabet = "ABCDEF1234567890"
def start_capture(self):
"""
Start a tshark capture on the created AP interface and save
it in a temporary directory under /tmp/.
:return: dict containing capture token and status.
"""
# Kill potential tshark zombies instances, if any.
terminate_process("tshark")
# Few context variable assignment
self.capture_token = "".join(
[secrets.choice(self.random_choice_alphabet) for i in range(8)])
self.capture_dir = "/tmp/{}/".format(self.capture_token)
self.assets_dir = "/tmp/{}/assets/".format(self.capture_token)
self.pcap = self.capture_dir + "capture.pcap"
self.iface = read_config(("network", "in"))
# For packets monitoring
self.list_pkts = []
self.last_pkts = 0
# Make the capture and the assets directory
mkdir(self.capture_dir)
mkdir(self.assets_dir)
try:
sp.Popen(["tshark", "-i", self.iface, "-w",
self.pcap, "-f", "tcp or udp"])
return {"status": True,
"message": "Capture started",
"capture_token": self.capture_token}
except:
return {"status": False,
"message": "Unexpected error: %s" % sys.exc_info()[0]}
def get_capture_stats(self):
"""
Get some dirty capture statistics in order to have a sparkline
in the background of capture view.
:return: dict containing stats associated to the capture
"""
with open("/sys/class/net/{}/statistics/tx_packets".format(self.iface)) as f:
tx_pkts = int(f.read())
with open("/sys/class/net/{}/statistics/rx_packets".format(self.iface)) as f:
rx_pkts = int(f.read())
if self.last_pkts == 0:
self.last_pkts = tx_pkts + rx_pkts
return {"status": True,
"packets": [0*400]}
else:
curr_pkts = (tx_pkts + rx_pkts) - self.last_pkts
self.last_pkts = tx_pkts + rx_pkts
self.list_pkts.append(curr_pkts)
return {"status": True,
"packets": self.beautify_stats(self.list_pkts)}
@staticmethod
def beautify_stats(data):
"""
Add 0 at the end of the array if the len of the array is less
than max_len. Else, get the last 100 stats. This allows to
show a kind of "progressive chart" in the background for
the first packets.
:return: a list of integers.
"""
max_len = 400
if len(data) >= max_len:
return data[-max_len:]
else:
return data + [1] * (max_len - len(data))
def stop_capture(self):
"""
Stop tshark if any instance present & ask create_capinfos.
:return: dict as a small confirmation.
"""
if terminate_process("tshark"):
self.create_capinfos()
return {"status": True,
"message": "Capture stopped"}
else:
return {"status": False,
"message": "No active capture"}
def create_capinfos(self):
"""
Creates a capinfo json file.
:return: dict as a small confirmation.
"""
infos = sp.Popen(["capinfos", self.pcap],
stdout=sp.PIPE, stderr=sp.PIPE)
infos = infos.communicate()[0]
data = {}
for l in infos.decode().splitlines():
try:
l = l.split(": ") if ": " in l else l.split("= ")
if len(l[0]) and len(l[1]):
data[l[0].strip()] = l[1].strip()
except:
continue
with open("{}capinfos.json".format(self.assets_dir), 'w') as f:
json.dump(data, f)
return True

View File

@ -9,13 +9,13 @@ import sys
import time
import qrcode
import base64
import random
import requests
from wifi import Cell
from os import path, remove
from io import BytesIO
from app.utils import terminate_process, read_config
import secrets
class Network(object):
@ -170,14 +170,14 @@ class Network(object):
# Generate the hostapd configuration
if read_config(("network", "tokenized_ssids")):
token = "".join([random.choice(self.random_choice_alphabet)
token = "".join([secrets.choice(self.random_choice_alphabet)
for i in range(4)])
self.AP_SSID = random.choice(read_config(
self.AP_SSID = secrets.choice(read_config(
("network", "ssids"))) + "-" + token
else:
self.AP_SSID = random.choice(read_config(("network", "ssids")))
self.AP_SSID = secrets.choice(read_config(("network", "ssids")))
self.AP_PASS = "".join(
[random.choice(self.random_choice_alphabet) for i in range(8)])
[secrets.choice(self.random_choice_alphabet) for i in range(8)])
# Launch hostapd
if self.write_hostapd_config():