CHANGE import of multiple log files
This commit is contained in:
parent
0e3323b7ab
commit
a3cff924ba
1 changed files with 163 additions and 75 deletions
98
main.py
98
main.py
|
@ -136,12 +136,15 @@ async def combined_view(
|
|||
# Parse all log files and collect all rows
|
||||
for filename in log_files:
|
||||
log_path = os.path.join(logs_dir, filename)
|
||||
try:
|
||||
columns, rows = parse_log_file(log_path)
|
||||
|
||||
if columns:
|
||||
common_columns.update(columns)
|
||||
|
||||
all_rows.extend(rows)
|
||||
except Exception as e:
|
||||
print(f"Error processing file {filename} in combined view: {e}")
|
||||
|
||||
# Apply gateway filter if specified
|
||||
if gateway:
|
||||
|
@ -290,11 +293,14 @@ async def api_all_entries(
|
|||
reference_columns = []
|
||||
for filename in log_files:
|
||||
log_path = os.path.join(logs_dir, filename)
|
||||
try:
|
||||
columns, rows = parse_log_file(log_path)
|
||||
if columns and not reference_columns:
|
||||
# Save column order from first file with columns
|
||||
reference_columns = columns
|
||||
all_rows.extend(rows)
|
||||
except Exception as e:
|
||||
print(f"Error processing file {filename} in api_all_entries: {e}")
|
||||
|
||||
# Apply gateway filter if specified
|
||||
if gateway:
|
||||
|
@ -355,13 +361,51 @@ class LogRow(BaseModel):
|
|||
@app.get("/view/{filename}", response_class=HTMLResponse)
|
||||
async def view_log(request: Request, filename: str):
|
||||
log_path = os.path.join(os.getcwd(), "logs", filename)
|
||||
raw_content = ""
|
||||
raw_content = None
|
||||
parsed_rows = []
|
||||
header_columns = []
|
||||
|
||||
try:
|
||||
with open(log_path, "r") as file:
|
||||
raw_content = file.read()
|
||||
# Read the file in binary mode first to check for encodings
|
||||
with open(log_path, "rb") as file:
|
||||
binary_content = file.read()
|
||||
|
||||
# Check for BOM (Byte Order Mark) at the beginning of the file
|
||||
raw_content = None
|
||||
|
||||
# Check for UTF-16 LE BOM
|
||||
if binary_content.startswith(b'\xff\xfe'):
|
||||
try:
|
||||
raw_content = binary_content.decode('utf-16-le')
|
||||
except UnicodeDecodeError:
|
||||
pass
|
||||
|
||||
# Check for UTF-16 BE BOM
|
||||
if raw_content is None and binary_content.startswith(b'\xfe\xff'):
|
||||
try:
|
||||
raw_content = binary_content.decode('utf-16-be')
|
||||
except UnicodeDecodeError:
|
||||
pass
|
||||
|
||||
# Try UTF-8
|
||||
if raw_content is None:
|
||||
try:
|
||||
raw_content = binary_content.decode('utf-8')
|
||||
except UnicodeDecodeError:
|
||||
pass
|
||||
|
||||
# Try common encodings if we still don't have content
|
||||
if raw_content is None:
|
||||
for encoding in ['utf-16', 'latin1', 'cp1252', 'iso-8859-1']:
|
||||
try:
|
||||
raw_content = binary_content.decode(encoding)
|
||||
break
|
||||
except UnicodeDecodeError:
|
||||
continue
|
||||
|
||||
# If all decodings fail, use latin1 as a fallback with replacement
|
||||
if raw_content is None:
|
||||
raw_content = binary_content.decode('latin1', errors='replace')
|
||||
|
||||
header_columns, parsed_dict_rows = parse_log_file(log_path)
|
||||
|
||||
|
@ -427,6 +471,7 @@ def get_all_logs() -> List[LogEntry]:
|
|||
result = []
|
||||
|
||||
for filename in log_files:
|
||||
try:
|
||||
gateway, timestamp = parse_filename(filename)
|
||||
if gateway and timestamp:
|
||||
result.append(LogEntry(
|
||||
|
@ -434,6 +479,10 @@ def get_all_logs() -> List[LogEntry]:
|
|||
timestamp=timestamp,
|
||||
filename=filename
|
||||
))
|
||||
else:
|
||||
print(f"Could not parse filename: {filename}")
|
||||
except Exception as e:
|
||||
print(f"Error processing log file {filename}: {e}")
|
||||
|
||||
# Sort by timestamp descending (newest first)
|
||||
result.sort(key=lambda x: x.timestamp, reverse=True)
|
||||
|
@ -459,8 +508,47 @@ def parse_log_file(log_path):
|
|||
header_columns = []
|
||||
|
||||
try:
|
||||
with open(log_path, "r") as file:
|
||||
content = file.read()
|
||||
# Read the file in binary mode first to check for encodings
|
||||
with open(log_path, "rb") as file:
|
||||
binary_content = file.read()
|
||||
|
||||
# Check for BOM (Byte Order Mark) at the beginning of the file
|
||||
content = None
|
||||
|
||||
# Check for UTF-16 LE BOM
|
||||
if binary_content.startswith(b'\xff\xfe'):
|
||||
try:
|
||||
content = binary_content.decode('utf-16-le')
|
||||
except UnicodeDecodeError:
|
||||
pass
|
||||
|
||||
# Check for UTF-16 BE BOM
|
||||
if content is None and binary_content.startswith(b'\xfe\xff'):
|
||||
try:
|
||||
content = binary_content.decode('utf-16-be')
|
||||
except UnicodeDecodeError:
|
||||
pass
|
||||
|
||||
# Try UTF-8
|
||||
if content is None:
|
||||
try:
|
||||
content = binary_content.decode('utf-8')
|
||||
except UnicodeDecodeError:
|
||||
pass
|
||||
|
||||
# Try common encodings if we still don't have content
|
||||
if content is None:
|
||||
for encoding in ['utf-16', 'latin1', 'cp1252', 'iso-8859-1']:
|
||||
try:
|
||||
content = binary_content.decode(encoding)
|
||||
break
|
||||
except UnicodeDecodeError:
|
||||
continue
|
||||
|
||||
# If all decodings fail, use latin1 as a fallback with replacement
|
||||
if content is None:
|
||||
content = binary_content.decode('latin1', errors='replace')
|
||||
|
||||
lines = content.splitlines()
|
||||
|
||||
# Find the "SSL-VPN sessions:" section
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue