First Commit of pfsense - log to jsonl converter
This commit is contained in:
234
pf2json.go
Normal file
234
pf2json.go
Normal file
@@ -0,0 +1,234 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"encoding/csv"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"os"
|
||||
"strings"
|
||||
)
|
||||
|
||||
func main() {
|
||||
// Create a scanner to read lines from standard input.
|
||||
scanner := bufio.NewScanner(os.Stdin)
|
||||
for scanner.Scan() {
|
||||
line := scanner.Text()
|
||||
// Expect exactly 4 tab-separated columns:
|
||||
// 1. Timestamp
|
||||
// 2. Log level (e.g. "Informational")
|
||||
// 3. The literal "filterlog" (optionally with a colon)
|
||||
// 4. The CSV log data.
|
||||
parts := strings.Split(line, "\t")
|
||||
if len(parts) < 4 {
|
||||
fmt.Fprintf(os.Stderr, "Skipping malformed line: %s\n", line)
|
||||
continue
|
||||
}
|
||||
|
||||
timestamp := strings.TrimSpace(parts[0])
|
||||
logLevel := strings.TrimSpace(parts[1])
|
||||
filterlogField := strings.TrimSpace(parts[2])
|
||||
// In case the CSV data was split over multiple tab columns, join them.
|
||||
csvData := strings.TrimSpace(strings.Join(parts[3:], "\t"))
|
||||
|
||||
// Validate that the third column is "filterlog" (with optional colon).
|
||||
if filterlogField != "filterlog" && filterlogField != "filterlog:" {
|
||||
fmt.Fprintf(os.Stderr, "Skipping line due to invalid filterlog field: %s\n", line)
|
||||
continue
|
||||
}
|
||||
|
||||
// Parse the CSV log data.
|
||||
r := csv.NewReader(strings.NewReader(csvData))
|
||||
fields, err := r.Read()
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "Error parsing CSV: %v\n", err)
|
||||
continue
|
||||
}
|
||||
|
||||
// Trim whitespace from each CSV field.
|
||||
for i, f := range fields {
|
||||
fields[i] = strings.TrimSpace(f)
|
||||
}
|
||||
|
||||
// Verify at least 9 fields (the common fields).
|
||||
if len(fields) < 9 {
|
||||
fmt.Fprintf(os.Stderr, "Not enough CSV fields in log-data: %s\n", line)
|
||||
continue
|
||||
}
|
||||
|
||||
// Map the common fields as defined in the BNF:
|
||||
// <rule-number>, <sub-rule-number>, <anchor>, <tracker>, <real-interface>,
|
||||
// <reason>, <action>, <direction>, <ip-version>
|
||||
base := map[string]interface{}{
|
||||
"rule_number": fields[0],
|
||||
"sub_rule_number": fields[1],
|
||||
"anchor": fields[2],
|
||||
"tracker": fields[3],
|
||||
"real_interface": fields[4],
|
||||
"reason": fields[5],
|
||||
"action": fields[6],
|
||||
"direction": fields[7],
|
||||
"ip_version": fields[8],
|
||||
}
|
||||
|
||||
ipVersion := fields[8]
|
||||
idx := 9
|
||||
|
||||
// We'll build two objects: one for the IP-specific header and one for the IP-data block.
|
||||
ipSpecificData := map[string]interface{}{}
|
||||
ipData := map[string]interface{}{}
|
||||
protocolSpecificData := map[string]interface{}{}
|
||||
var protoSpecific []string
|
||||
|
||||
if ipVersion == "4" {
|
||||
// For IPv4, expect 8 fields for IPv4-specific data.
|
||||
if len(fields) < idx+8+3 {
|
||||
fmt.Fprintf(os.Stderr, "Not enough fields for IPv4 in line: %s\n", line)
|
||||
continue
|
||||
}
|
||||
ipv4Header := map[string]interface{}{
|
||||
"tos": fields[idx],
|
||||
"ecn": fields[idx+1],
|
||||
"ttl": fields[idx+2],
|
||||
"id": fields[idx+3],
|
||||
"offset": fields[idx+4],
|
||||
"flags": fields[idx+5],
|
||||
"protocol_id": fields[idx+6],
|
||||
"protocol_text": fields[idx+7],
|
||||
}
|
||||
ipSpecificData["ipv4_header"] = ipv4Header
|
||||
idx += 8
|
||||
|
||||
// The ip-data block: <length>, <source-address>, <destination-address>.
|
||||
ipData = map[string]interface{}{
|
||||
"length": fields[idx],
|
||||
"source_address": fields[idx+1],
|
||||
"destination_address": fields[idx+2],
|
||||
}
|
||||
idx += 3
|
||||
} else if ipVersion == "6" {
|
||||
// For IPv6, expect 5 fields for IPv6-specific data.
|
||||
if len(fields) < idx+5+3 {
|
||||
fmt.Fprintf(os.Stderr, "Not enough fields for IPv6 in line: %s\n", line)
|
||||
continue
|
||||
}
|
||||
ipv6Header := map[string]interface{}{
|
||||
"class": fields[idx],
|
||||
"flow_label": fields[idx+1],
|
||||
"hop_limit": fields[idx+2],
|
||||
"protocol_text": fields[idx+3],
|
||||
"protocol_id": fields[idx+4],
|
||||
}
|
||||
ipSpecificData["ipv6_header"] = ipv6Header
|
||||
idx += 5
|
||||
|
||||
ipData = map[string]interface{}{
|
||||
"length": fields[idx],
|
||||
"source_address": fields[idx+1],
|
||||
"destination_address": fields[idx+2],
|
||||
}
|
||||
idx += 3
|
||||
} else {
|
||||
// Unknown IP version – capture remainder as raw.
|
||||
ipSpecificData["raw"] = fields[idx:]
|
||||
idx = len(fields)
|
||||
}
|
||||
|
||||
// Any fields remaining are protocol-specific.
|
||||
if len(fields) > idx {
|
||||
protoSpecific = fields[idx:]
|
||||
}
|
||||
|
||||
// Determine protocol from the header's protocol_text.
|
||||
var protocol string
|
||||
if ipVersion == "4" {
|
||||
if hdr, ok := ipSpecificData["ipv4_header"].(map[string]interface{}); ok {
|
||||
protocol = hdr["protocol_text"].(string)
|
||||
}
|
||||
} else if ipVersion == "6" {
|
||||
if hdr, ok := ipSpecificData["ipv6_header"].(map[string]interface{}); ok {
|
||||
protocol = hdr["protocol_text"].(string)
|
||||
}
|
||||
}
|
||||
|
||||
// Structure protocol-specific data if possible.
|
||||
switch strings.ToLower(protocol) {
|
||||
case "tcp":
|
||||
// Expect 9 fields for TCP:
|
||||
// <source-port>, <destination-port>, <data-length>, <tcp-flags>,
|
||||
// <sequence-number>, <ack-number>, <tcp-window>, <urg>, <tcp-options>
|
||||
if len(protoSpecific) >= 9 {
|
||||
protocolSpecificData = map[string]interface{}{
|
||||
"source_port": protoSpecific[0],
|
||||
"destination_port": protoSpecific[1],
|
||||
"data_length": protoSpecific[2],
|
||||
"tcp_flags": protoSpecific[3],
|
||||
"sequence_number": protoSpecific[4],
|
||||
"ack_number": protoSpecific[5],
|
||||
"tcp_window": protoSpecific[6],
|
||||
"urg": protoSpecific[7],
|
||||
"tcp_options": protoSpecific[8],
|
||||
}
|
||||
} else {
|
||||
protocolSpecificData["raw"] = protoSpecific
|
||||
}
|
||||
case "udp":
|
||||
// Expect 3 fields for UDP: <source-port>, <destination-port>, <data-length>
|
||||
if len(protoSpecific) >= 3 {
|
||||
protocolSpecificData = map[string]interface{}{
|
||||
"source_port": protoSpecific[0],
|
||||
"destination_port": protoSpecific[1],
|
||||
"data_length": protoSpecific[2],
|
||||
}
|
||||
} else {
|
||||
protocolSpecificData["raw"] = protoSpecific
|
||||
}
|
||||
case "icmp":
|
||||
// ICMP data may vary; output raw fields.
|
||||
protocolSpecificData["raw"] = protoSpecific
|
||||
case "carp":
|
||||
// Expect 6 fields for CARP: <carp-type>, <carp-ttl>, <vhid>, <version>, <advbase>, <advskew>
|
||||
if len(protoSpecific) >= 6 {
|
||||
protocolSpecificData = map[string]interface{}{
|
||||
"carp_type": protoSpecific[0],
|
||||
"carp_ttl": protoSpecific[1],
|
||||
"vhid": protoSpecific[2],
|
||||
"version": protoSpecific[3],
|
||||
"advbase": protoSpecific[4],
|
||||
"advskew": protoSpecific[5],
|
||||
}
|
||||
} else {
|
||||
protocolSpecificData["raw"] = protoSpecific
|
||||
}
|
||||
default:
|
||||
// For unknown protocols, return raw protocol-specific fields.
|
||||
if len(protoSpecific) > 0 {
|
||||
protocolSpecificData["raw"] = protoSpecific
|
||||
}
|
||||
}
|
||||
|
||||
// Assemble the final JSON object.
|
||||
result := map[string]interface{}{
|
||||
"timestamp": timestamp,
|
||||
"log_level": logLevel,
|
||||
"log": map[string]interface{}{
|
||||
"base": base,
|
||||
"ip_specific_data": ipSpecificData,
|
||||
"ip_data": ipData,
|
||||
"protocol_specific_data": protocolSpecificData,
|
||||
},
|
||||
}
|
||||
|
||||
jsonData, err := json.Marshal(result)
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "Error marshaling JSON: %v\n", err)
|
||||
continue
|
||||
}
|
||||
fmt.Println(string(jsonData))
|
||||
}
|
||||
|
||||
if err := scanner.Err(); err != nil {
|
||||
fmt.Fprintf(os.Stderr, "Error reading input: %v\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
||||
Reference in New Issue
Block a user