Restructure repository: organize tools by purpose, create what search tool
- Move single-file tools to tools/ organized by category (security, forensics, data, etc.) - Move multi-file projects to projects/ (go-tools, puzzlebox, timesketch, rust-tools) - Move system scripts to scripts/ (proxy, display, setup, windows) - Organize config files in config/ (shell, visidata, applications) - Move experimental tools to archive/experimental - Create 'what' fuzzy search tool with progressive enhancement (ollama->fzf->grep) - Add initial metadata database for intelligent tool discovery - Preserve git history using 'git mv' commands
This commit is contained in:
21
tools/data/between
Executable file
21
tools/data/between
Executable file
@@ -0,0 +1,21 @@
|
||||
#!/bin/bash
|
||||
start=${1}
|
||||
end=${1#*-}
|
||||
shift
|
||||
count=1
|
||||
if [[ -z "${end}" ]] ; then
|
||||
# echo "no end specified"
|
||||
if [[ $# -eq 1 ]] ; then
|
||||
echo "setting count"
|
||||
count=${1}
|
||||
fi
|
||||
end=$(( ${start} - 1 + ${count} ))
|
||||
# echo "from ${start}"
|
||||
# echo "end ${end}"
|
||||
else
|
||||
start=${start%-*}
|
||||
# echo "from ${start}"
|
||||
# echo "end ${end}"
|
||||
fi
|
||||
|
||||
cat $@ | head -n+${end} | tail -n+${start}
|
||||
16
tools/data/concat.py
Normal file
16
tools/data/concat.py
Normal file
@@ -0,0 +1,16 @@
|
||||
#!/usr/bin/python3
|
||||
#concats all files and stdin given as arguments and prepends output with filenames
|
||||
import fileinput
|
||||
import sys
|
||||
import os
|
||||
|
||||
#remove all non regular files
|
||||
sys.argv = [f for f in sys.argv if os.path.isfile(f)]
|
||||
|
||||
# if stdin has data add stdin to filelist
|
||||
if not sys.stdin.isatty():
|
||||
sys.argv.append("-")
|
||||
|
||||
# concat all lines from all files
|
||||
for line in fileinput.input():
|
||||
print(f'{fileinput.filename()}:{line.strip()}')
|
||||
26
tools/data/csv_get
Executable file
26
tools/data/csv_get
Executable file
@@ -0,0 +1,26 @@
|
||||
#!/usr/bin/env python3
|
||||
import csv
|
||||
import argparse
|
||||
import sys
|
||||
|
||||
def main():
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument('-c', type=int, dest='columns', action="append")
|
||||
parser.add_argument('infile', nargs='+', type=argparse.FileType('rt'))
|
||||
args = parser.parse_args()
|
||||
if args.columns is None or len(args.columns) == 0:
|
||||
args.columns=[0]
|
||||
for file in args.infile:
|
||||
csv_infile=csv.reader(file)
|
||||
for row in csv_infile:
|
||||
try:
|
||||
outrow = [row[index] for index in args.columns]
|
||||
print('\t'.join(outrow))
|
||||
except IndexError:
|
||||
print("No such column.")
|
||||
print("Input has columns: {}".format(",".join([str(x) for x in range(len(row))])))
|
||||
print("You selected {}".format(args.columns))
|
||||
sys.exit(1)
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
20
tools/data/domgrep.py
Executable file
20
tools/data/domgrep.py
Executable file
@@ -0,0 +1,20 @@
|
||||
#!/usr/bin/env python3
|
||||
import re
|
||||
import sys
|
||||
from urllib.parse import urlparse
|
||||
|
||||
pattern=re.compile(r'\d+\.\d+\.\d+\.\d+')
|
||||
for line in sys.stdin:
|
||||
line=line.strip()
|
||||
if not line.lower().startswith('http'):
|
||||
line="http://"+line
|
||||
try:
|
||||
p=urlparse(line)
|
||||
if not pattern.search(p.netloc):
|
||||
if ":" in p.netloc:
|
||||
print(p.netloc.split(":")[0])
|
||||
else:
|
||||
print(p.netloc)
|
||||
except Exception as e:
|
||||
print(e)
|
||||
pass
|
||||
31
tools/data/geturls.py
Executable file
31
tools/data/geturls.py
Executable file
@@ -0,0 +1,31 @@
|
||||
#!/usr/bin/env python3
|
||||
import sys
|
||||
from bs4 import BeautifulSoup
|
||||
|
||||
if sys.argv[1].startswith("http://") or sys.argv[1].startswith("https://"):
|
||||
import requests
|
||||
response = requests.get(sys.argv[1])
|
||||
data = response.content
|
||||
else:
|
||||
with open(sys.argv[1],'rt',encoding='ISO-8859-1') as f:
|
||||
data=f.read()
|
||||
|
||||
page=str(BeautifulSoup(data,features="lxml"))
|
||||
|
||||
def getURL(page):
|
||||
start_link = page.find("a href")
|
||||
if start_link == -1:
|
||||
return None, 0
|
||||
start_quote = page.find('"', start_link)
|
||||
end_quote = page.find('"', start_quote + 1)
|
||||
url = page[start_quote + 1: end_quote]
|
||||
return url, end_quote
|
||||
|
||||
|
||||
while True:
|
||||
url, n = getURL(page)
|
||||
page = page[n:]
|
||||
if url:
|
||||
print(url)
|
||||
else:
|
||||
break
|
||||
50
tools/data/json_save.py
Executable file
50
tools/data/json_save.py
Executable file
@@ -0,0 +1,50 @@
|
||||
import simplejson
|
||||
import json
|
||||
|
||||
def put(data, filename):
|
||||
try:
|
||||
jsondata = simplejson.dumps(data, indent=4, skipkeys=True, sort_keys=True)
|
||||
fd = open(filename, 'w')
|
||||
fd.write(jsondata)
|
||||
fd.close()
|
||||
except Exception as e:
|
||||
print('ERROR writing', filename)
|
||||
print( e)
|
||||
pass
|
||||
|
||||
def get(filename):
|
||||
returndata = {}
|
||||
try:
|
||||
fd = open(filename, 'r')
|
||||
text = fd.read()
|
||||
fd.close()
|
||||
returndata = json.read(text)
|
||||
# Hm. this returns unicode keys...
|
||||
#returndata = simplejson.loads(text)
|
||||
except:
|
||||
print('COULD NOT LOAD:', filename)
|
||||
return returndata
|
||||
|
||||
|
||||
# print(mail.filename)
|
||||
# print(mail.status)
|
||||
|
||||
# import gzip
|
||||
# import json
|
||||
#
|
||||
# # writing
|
||||
# with gzip.GzipFile(jsonfilename, 'w') as outfile:
|
||||
# for obj in objects:
|
||||
# outfile.write(json.dumps(obj) + '\n')
|
||||
#
|
||||
# # reading
|
||||
# with gzip.GzipFile(jsonfilename, 'r') as isfile:
|
||||
# for line in infile:
|
||||
# obj = json.loads(line)
|
||||
# # process obj
|
||||
# picklefile=open("mails.dump",'wb')
|
||||
# for mail in list_of_mail:
|
||||
# pickle.dump(mail, picklefile )
|
||||
#
|
||||
# picklefile.close()
|
||||
|
||||
28
tools/data/kv_parse.py
Executable file
28
tools/data/kv_parse.py
Executable file
@@ -0,0 +1,28 @@
|
||||
#!/usr/bin/env python3
|
||||
import re
|
||||
import json
|
||||
import argparse
|
||||
import sys
|
||||
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument("-p", "--preserve", action='store_true', help="preserve original logline in dict")
|
||||
parser.add_argument('infile', nargs='?', type=argparse.FileType('r'), default=sys.stdin)
|
||||
parser.add_argument('outfile', nargs='?', type=argparse.FileType('w'), default=sys.stdout)
|
||||
args = parser.parse_args()
|
||||
|
||||
data = args.infile.readlines()
|
||||
|
||||
kv_pat = re.compile('(?P<key>[^= ]+)=(?P<value>"[^"]+"|\S+)')
|
||||
|
||||
log=[]
|
||||
for line in data:
|
||||
line_dict={}
|
||||
line = line.strip()
|
||||
matches=kv_pat.findall(line)
|
||||
for match in matches:
|
||||
line_dict[match[0]] = match[1].strip('"')
|
||||
if args.preserve:
|
||||
line_dict['original_logline'] = line
|
||||
log.append(line_dict)
|
||||
|
||||
json.dump(log,args.outfile)
|
||||
39
tools/data/quickchardet.py
Executable file
39
tools/data/quickchardet.py
Executable file
@@ -0,0 +1,39 @@
|
||||
#!/usr/bin/python3
|
||||
import chardet
|
||||
from chardet import UniversalDetector
|
||||
import sys
|
||||
import argparse
|
||||
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument("-l",help="list all encoding changes in file",action='store_true')
|
||||
parser.add_argument("-d",help="try to decode all Lines",action='store_true')
|
||||
parser.add_argument('filename')
|
||||
args = parser.parse_args()
|
||||
|
||||
|
||||
with open(args.filename,'rb') as infile:
|
||||
det=UniversalDetector()
|
||||
if args.l:
|
||||
print("listing encodings of file \"{}\"".format(args.filename))
|
||||
encoding=None
|
||||
for nl,line in enumerate(infile.readlines()):
|
||||
det.reset()
|
||||
det.feed(line)
|
||||
det.close()
|
||||
res=det.result
|
||||
if encoding != res["encoding"]:
|
||||
encoding=res["encoding"]
|
||||
if args.d:
|
||||
print("{}#{}#{}({})".format(nl,line.decode(res["encoding"]),res["encoding"],res["confidence"]))
|
||||
else:
|
||||
print("{}#{}#{}({})".format(nl,line,res["encoding"],res["confidence"]))
|
||||
else:
|
||||
i=1000
|
||||
for line in infile.readlines():
|
||||
i-=1
|
||||
det.feed(line)
|
||||
if det.done or i==0:
|
||||
break
|
||||
det.close()
|
||||
res=det.result
|
||||
print("{}:{}({})".format(sys.argv[1],res["encoding"],res["confidence"]))
|
||||
61
tools/data/split_linewise.py
Executable file
61
tools/data/split_linewise.py
Executable file
@@ -0,0 +1,61 @@
|
||||
#!/usr/bin/python3
|
||||
import os
|
||||
import sys
|
||||
import argparse
|
||||
|
||||
|
||||
def split_lines(input_file, chunk_size_mb=500):
|
||||
"""
|
||||
Splits a file into chunks of size chunk_size_mb but preserves lines.
|
||||
|
||||
Args:
|
||||
input_file (str): Path to the file that will be splitted into chunks.
|
||||
chunk_size_mb (int, optional): Size of each chunk in MB. Defaults to 500.
|
||||
"""
|
||||
chunk_size_bytes = chunk_size_mb * 1024 * 1024 # Convert to MB to bytes
|
||||
current_chunk = 1
|
||||
current_size = 0
|
||||
output_file = None
|
||||
# Try to extract original extension
|
||||
file_extension = os.path.splitext(input_file)
|
||||
if len(file_extension) == 2:
|
||||
file_extension = file_extension[1]
|
||||
else:
|
||||
file_extension = ''
|
||||
|
||||
# Open the file and split it into chunks
|
||||
with open(input_file, 'r') as infile:
|
||||
for line in infile:
|
||||
# Open new file if none exists or next line exceeds chunk size
|
||||
if output_file is None or ((current_size + len(line.encode('utf-8'))) > chunk_size_bytes):
|
||||
if output_file:
|
||||
output_file.close()
|
||||
output_filename = f"{os.path.splitext(input_file)[0]}_chunk{current_chunk:03d}{file_extension}"
|
||||
print(f"Created {output_filename}")
|
||||
output_file = open(output_filename, 'w')
|
||||
current_chunk += 1
|
||||
current_size = 0
|
||||
output_file.write(line)
|
||||
current_size += len(line.encode('utf-8'))
|
||||
|
||||
if output_file:
|
||||
output_file.close()
|
||||
|
||||
def parse_arguments():
|
||||
"""
|
||||
Parses command line arguments.
|
||||
|
||||
Returns:
|
||||
argparse.Namespace: The arguments passed to the command line.
|
||||
"""
|
||||
parser = argparse.ArgumentParser(description='Split a text file into smaller chunks but keep lines intakt.')
|
||||
parser.add_argument('input_file', type=str, help='The path to the input file to be split.')
|
||||
parser.add_argument('--chunk_size_mb', type=int, default=500, help='Maximum chunk size in MB (default: 500).')
|
||||
return parser.parse_args()
|
||||
|
||||
def main():
|
||||
args = parse_arguments()
|
||||
split_lines(args.input_file, args.chunk_size_mb)
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
9
tools/data/uniq.py
Executable file
9
tools/data/uniq.py
Executable file
@@ -0,0 +1,9 @@
|
||||
#!/usr/bin/env python3
|
||||
import sys
|
||||
|
||||
hashes=set()
|
||||
for line in sys.stdin:
|
||||
h = hash(line)
|
||||
if not h in hashes:
|
||||
hashes.add(h)
|
||||
print(line,end="")
|
||||
64
tools/data/unum.py
Executable file
64
tools/data/unum.py
Executable file
@@ -0,0 +1,64 @@
|
||||
#!/usr/bin/env python3
|
||||
import unicodedata
|
||||
import sys
|
||||
|
||||
|
||||
def long_cat(category):
|
||||
cats = {"Cc": "Other, Control",
|
||||
"Cf": "Other, Format",
|
||||
"Cn": "Other, Not Assigned",
|
||||
"Co": "Other, Private Use",
|
||||
"Cs": "Other, Surrogate",
|
||||
"LC": "Letter, Cased",
|
||||
"Ll": "Letter, Lowercase",
|
||||
"Lm": "Letter, Modifier",
|
||||
"Lo": "Letter, Other",
|
||||
"Lt": "Letter, Titlecase",
|
||||
"Lu": "Letter, Uppercase",
|
||||
"Mc": "Mark, Spacing Combining",
|
||||
"Me": "Mark, Enclosing",
|
||||
"Mn": "Mark, Nonspacing",
|
||||
"Nd": "Number, Decimal Digit",
|
||||
"Nl": "Number, Letter",
|
||||
"No": "Number, Other",
|
||||
"Pc": "Punctuation, Connector",
|
||||
"Pd": "Punctuation, Dash",
|
||||
"Pe": "Punctuation, Close",
|
||||
"Pf": "Punctuation, Final quote",
|
||||
"Pi": "Punctuation, Initial quote",
|
||||
"Po": "Punctuation, Other",
|
||||
"Ps": "Punctuation, Open",
|
||||
"Sc": "Symbol, Currency",
|
||||
"Sk": "Symbol, Modifier",
|
||||
"Sm": "Symbol, Math",
|
||||
"So": "Symbol, Other",
|
||||
"Zl": "Separator, Line",
|
||||
"Zp": "Separator, Paragraph",
|
||||
"Zs": "Separator, Space"}
|
||||
if category in cats:
|
||||
return cats[category]
|
||||
else:
|
||||
return category
|
||||
|
||||
def print_info(char):
|
||||
spacing = " " if unicodedata.category(char) in ['Mn'] else ''
|
||||
try:
|
||||
unicodename = unicodedata.name(char)
|
||||
except ValueError as e:
|
||||
unicodename = "UNKNOWN"
|
||||
if ord(char) == 10:
|
||||
unicodename = "UNKNOWN"
|
||||
print(f"{ord(char):>8} 0x{ord(char):>06x} {spacing}{' ':^5} {long_cat(unicodedata.category(char)):<26} {unicodename:<30}")
|
||||
else:
|
||||
print(f"{ord(char):>8} 0x{ord(char):>06x} {spacing}{char:^5} {long_cat(unicodedata.category(char)):<26} {unicodename:<30}")
|
||||
|
||||
|
||||
print(f" Decimal Hex Char {'Category':^26} Name")
|
||||
if len(sys.argv) == 1:
|
||||
for char in sys.stdin.read():
|
||||
print_info(char)
|
||||
else:
|
||||
for argument in sys.argv[1:]:
|
||||
for char in argument:
|
||||
print_info(char)
|
||||
|
||||
6
tools/data/urldecode.py
Executable file
6
tools/data/urldecode.py
Executable file
@@ -0,0 +1,6 @@
|
||||
#!/usr/bin/python3
|
||||
from urllib.parse import unquote
|
||||
import html
|
||||
import sys
|
||||
url=' '.join(sys.argv[1:])
|
||||
print(html.unescape(unquote(url)))
|
||||
28
tools/data/vba_chr_decode.py
Normal file
28
tools/data/vba_chr_decode.py
Normal file
@@ -0,0 +1,28 @@
|
||||
#!/usr/bin/python
|
||||
#
|
||||
# Decode VBA Macro based on chr() obfuscation
|
||||
# Xavier Mertens <xavier@rootshell.be>
|
||||
#
|
||||
|
||||
import re
|
||||
import sys
|
||||
import argparse
|
||||
|
||||
def do_chr(m):
|
||||
if m.group(0):
|
||||
return eval(re.sub(r'[cC][hH][rR][wW\$]*\(([\d\+\-\s.]*)\)',r'chr(int(\1))', m.group(0)))
|
||||
return ""
|
||||
|
||||
for line in sys.stdin.readlines():
|
||||
line = re.sub(r'[cC][hH][rR][wW\$]*\(([\d+\+\-\s\.]*)\)', do_chr, line)
|
||||
line = re.sub(" & ", "", line)
|
||||
print line.rstrip()
|
||||
exit
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
|
||||
|
||||
def mname(self, arg):
|
||||
do_chr(1);
|
||||
pass
|
||||
Reference in New Issue
Block a user