Reorganise gists folder

This commit is contained in:
TKE
2022-05-13 12:49:21 +02:00
parent ecd3c7fe2f
commit acd8d616dc
98 changed files with 63 additions and 20 deletions

21
tools/between Executable file
View File

@@ -0,0 +1,21 @@
#!/bin/bash
start=${1}
end=${1#*-}
shift
count=1
if [[ -z "${end}" ]] ; then
# echo "no end specified"
if [[ $# -eq 1 ]] ; then
echo "setting count"
count=${1}
fi
end=$(( ${start} - 1 + ${count} ))
# echo "from ${start}"
# echo "end ${end}"
else
start=${start%-*}
# echo "from ${start}"
# echo "end ${end}"
fi
cat $@ | head -n+${end} | tail -n+${start}

16
tools/concat.py Normal file
View File

@@ -0,0 +1,16 @@
#!/usr/bin/python3
#concats all files and stdin given as arguments and prepends output with filenames
import fileinput
import sys
import os
#remove all non regular files
sys.argv = [f for f in sys.argv if os.path.isfile(f)]
# if stdin has data add stdin to filelist
if not sys.stdin.isatty():
sys.argv.append("-")
# concat all lines from all files
for line in fileinput.input():
print(f'{fileinput.filename()}:{line.strip()}')

17
tools/csv2dot Executable file
View File

@@ -0,0 +1,17 @@
#!/bin/bash
header=1
file=test.csv
output="${file}.dot"
delim=,
s_id=6
d_id=8
e_label=11
cat > "${output}" <<EOF
graph a{
node [shape=record]
EOF
#awk -F"${delim}" '{print "\""$6 "\" -> \"" $8 "\"[label=\"" $11"\"]"}' "${file}" >> "${output}"
awk -F"${delim}" '{print "\""$6 "\" -- \"" $8 "\""}' "${file}" >> "${output}"
echo "}" >> "${output}"

26
tools/csv_get Executable file
View File

@@ -0,0 +1,26 @@
#!/usr/bin/env python3
import csv
import argparse
import sys
def main():
parser = argparse.ArgumentParser()
parser.add_argument('-c', type=int, dest='columns', action="append")
parser.add_argument('infile', nargs='+', type=argparse.FileType('rt'))
args = parser.parse_args()
if args.columns is None or len(args.columns) == 0:
args.columns=[0]
for file in args.infile:
csv_infile=csv.reader(file)
for row in csv_infile:
try:
outrow = [row[index] for index in args.columns]
print('\t'.join(outrow))
except IndexError:
print("No such column.")
print("Input has columns: {}".format(",".join([str(x) for x in range(len(row))])))
print("You selected {}".format(args.columns))
sys.exit(1)
if __name__ == '__main__':
main()

20
tools/domgrep.py Executable file
View File

@@ -0,0 +1,20 @@
#!/usr/bin/env python3
import re
import sys
from urllib.parse import urlparse
pattern=re.compile(r'\d+\.\d+\.\d+\.\d+')
for line in sys.stdin:
line=line.strip()
if not line.lower().startswith('http'):
line="http://"+line
try:
p=urlparse(line)
if not pattern.search(p.netloc):
if ":" in p.netloc:
print(p.netloc.split(":")[0])
else:
print(p.netloc)
except Exception as e:
print(e)
pass

10
tools/get_ntp.py Executable file
View File

@@ -0,0 +1,10 @@
#!/usr/bin/env python3
import ntplib
import sys
from time import ctime
c = ntplib.NTPClient()
try:
response = c.request(sys.argv[1])
print(ctime(response.tx_time))
except:
print("ERROR")

15
tools/get_stp.sh Executable file
View File

@@ -0,0 +1,15 @@
#!/bin/bash
interface=${1}
one_stp=$(timeout -k 10 10 tcpdump -vvv -c1 stp -i ${interface} 2>/dev/null)
root_id=$(echo "$one_stp" | grep -Po "(?<=root-id )[^,]*")
bridge_id=$(echo "$one_stp" | grep -Po "(?<=bridge-id )[^,]*" | cut -f1 -d. )
port_id=$(echo "$one_stp" | grep -Po "(?<=bridge-id )[^,]*" | cut -f2 -d. )
echo "connected over $bridge_id at $port_id to $root_id"
echo $one_stp
if [[ $root_id == "80a3.00:1d:71:b9:f0:80" ]]; then
echo "iassc detected"
fi
#bridge-id c0a3.d0:c7:89:94:b4:00.8009
#bridge-id c0a3.d0:c7:89:94:b4:00.8009

31
tools/geturls.py Executable file
View File

@@ -0,0 +1,31 @@
#!/usr/bin/env python3
import sys
from bs4 import BeautifulSoup
if sys.argv[1].startswith("http://") or sys.argv[1].startswith("https://"):
import requests
response = requests.get(sys.argv[1])
data = response.content
else:
with open(sys.argv[1],'rt',encoding='ISO-8859-1') as f:
data=f.read()
page=str(BeautifulSoup(data,features="lxml"))
def getURL(page):
start_link = page.find("a href")
if start_link == -1:
return None, 0
start_quote = page.find('"', start_link)
end_quote = page.find('"', start_quote + 1)
url = page[start_quote + 1: end_quote]
return url, end_quote
while True:
url, n = getURL(page)
page = page[n:]
if url:
print(url)
else:
break

147
tools/ipgrep Executable file
View File

@@ -0,0 +1,147 @@
#!/bin/bash
# Greps IPs and MACs
# author : Tobias Kessels
# date : 26.01.2015
#grep Perl Regexpattern for MAC and IP
IP_PATTERN="(((25[0-5])|(2[0-4][0-9])|(1?\d?\d))\.){3}((25[0-5])|(2[0-4][0-9])|(1?\d?\d))"
MACID_PATTERN="(([a-fA-F0-9]{2}[:-]){5}[a-fA-F0-9]{2})|([a-f0-9]{4}\.[a-f0-9]{4}\.[a-f0-9]{4})"
#sort parameters to sort IPs correctly
IP_SORT=" -t. -k1,1n -k2,2n -k3,3n -k4,4n"
#SWITCHES & DEFAULTS
SORT=0
UNIQ=0
MAC=0
PINGABLE=0
RESOLVE=0
FILE=""
PATTERN=${IP_PATTERN}
SORT_PATTERN=${IP_SORT}
#prints usage help and exits
usage () {
echo "ipgrep [-u] [-s] [-m]"
echo ""
echo " ipgrep greps IPs or MACs from any output or file "
echo " -s sort Output"
echo " -u only show uniq IPs/MACs (implies -s)"
echo " -p only show 'pingable' entries (MACs still beta)"
echo " -r show additional information"
echo " -m grep MAC-IDs instead of IPs"
exit 1;
}
#process commandline switches
while getopts :husmrpf: FLAG; do
case $FLAG in
u) UNIQ=1 ;;
s) SORT=1 ;;
m) MAC=1 ;;
p) PINGABLE=1 ;;
r) RESOLVE=1 ;;
f) FILE=$OPTARG ;;
h) usage ;;
\?) echo "whats that: $OPTARG"
usage ;;
esac
done
#clear all Arguments so that 'cat $@' wont get any switches
shift $#
if [[ MAC -eq 1 ]]; then
PATTERN=${MACID_PATTERN}
SORT_PATTERN=""
fi
if [[ PINGABLE -eq 1 ]]; then
SORT=1
UNIQ=1
fi
filtery() {
if [[ $MAC -eq 1 ]]; then
cat "$@" | grep -Po "${MACID_PATTERN}"
else
cat "$@" | grep -Po "${IP_PATTERN}"
fi
}
sorty() {
if [[ $SORT -eq 1 ]] || [[ $UNIQ -eq 1 ]]
then
if [[ MAC -eq 1 ]]; then
SORT_PATTERN=""
else
SORT_PATTERN="${IP_SORT}"
fi
if [[ UNIQ -eq 1 ]]; then
cat "$@" | sort $SORT_PATTERN -u
else
cat "$@" | sort $SORT_PATTERN
fi
else
cat "$@"
fi
}
pingy() {
if [[ PINGABLE -eq 1 ]]; then
if [[ MAC -eq 1 ]]; then
(for i in $(cat "$@")
do
(if (arping -c 1 -w 5000000 $i 2>/dev/null 1>/dev/null)
then
echo $i
fi)&
done) | sorty
else
(for i in $(cat "$@")
do
(if (ping -c1 -w1 $i >/dev/null)
then
echo $i
fi)&
done) | sorty
fi
else
cat "$@"
fi
}
resolve(){
if [[ RESOLVE -eq 1 ]]; then
if [[ MAC -eq 1 ]]; then
(for i in $(cat "$@")
do
(if (arping -q -c 1 -w 5000000 $i 2>/dev/null 1>/dev/null)
then
arping -c1 $i
fi)&
done) | sorty
else
(for i in $(cat "$@")
do
(
name=$(host $i | grep -Po "(?<=pointer ).*")
echo "$i $name"
)&
done) | cat
fi
else
cat "$@"
fi
}
set -e
#execute command
if [ "$FILE" == "" ]; then
cat "$@" | filtery | sorty | pingy | resolve
else
cat $FILE | filtery | sorty | pingy | resolve
fi

6
tools/mailunpack Executable file
View File

@@ -0,0 +1,6 @@
#!/bin/bash
current_dir=$(pwd)
mail_fail=$(readlink -f "${1}")
filename=$(basename "${mail_fail}")
file_dir=$(dirname "${mail_fail}")
docker run -it --rm --user=`id -u` --net=none -v "${current_dir}:/home/nonroot/" -v "${file_dir}:/data:ro" tabledevil/file-analysis munpack -t "/data/${filename}"

3
tools/noerr Executable file
View File

@@ -0,0 +1,3 @@
#!/bin/bash
bash -c "$*" 2>/dev/null

5
tools/openflattenpdf.sh Executable file
View File

@@ -0,0 +1,5 @@
#!/usr/bin/env bash
tmpfile=$(mktemp)
pdf2ps "${1}" - | ps2pdf - "${tmpfile}"
evince ${tmpfile}

39
tools/quickchardet.py Executable file
View File

@@ -0,0 +1,39 @@
#!/usr/bin/python3
import chardet
from chardet import UniversalDetector
import sys
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("-l",help="list all encoding changes in file",action='store_true')
parser.add_argument("-d",help="try to decode all Lines",action='store_true')
parser.add_argument('filename')
args = parser.parse_args()
with open(args.filename,'rb') as infile:
det=UniversalDetector()
if args.l:
print("listing encodings of file \"{}\"".format(args.filename))
encoding=None
for nl,line in enumerate(infile.readlines()):
det.reset()
det.feed(line)
det.close()
res=det.result
if encoding != res["encoding"]:
encoding=res["encoding"]
if args.d:
print("{}#{}#{}({})".format(nl,line.decode(res["encoding"]),res["encoding"],res["confidence"]))
else:
print("{}#{}#{}({})".format(nl,line,res["encoding"],res["confidence"]))
else:
i=1000
for line in infile.readlines():
i-=1
det.feed(line)
if det.done or i==0:
break
det.close()
res=det.result
print("{}:{}({})".format(sys.argv[1],res["encoding"],res["confidence"]))

13
tools/rename.mime.py Executable file
View File

@@ -0,0 +1,13 @@
import os
import sys
import subprocess
import re
pattern=re.compile("(: )([^;]+)")
for file in os.listdir(sys.argv[1]):
output=subprocess.check_output(["file","-Ni",file])
match=pattern.search(output)
mimetype=re.sub(r"\W","_",match.group(2))
if not os.path.exists(mimetype):
os.makedirs(mimetype)
os.rename(file,mimetype+os.sep+file)

85
tools/scatterhash.py Executable file
View File

@@ -0,0 +1,85 @@
#!/usr/bin/python3
import sys
import hashlib
import os
import numpy as np
import math
import argparse
def even_select(N, M):
if M > N/2:
cut = np.zeros(N, dtype=int)
q, r = divmod(N, N-M)
indices = [q*i + min(i, r) for i in range(N-M)]
cut[indices] = True
else:
cut = np.ones(N, dtype=int)
q, r = divmod(N, M)
indices = [q*i + min(i, r) for i in range(M)]
cut[indices] = False
return cut
def get_offsets(blocksize, blockcount,blocks_to_hash):
selection = even_select(blockcount,blocks_to_hash)
for i in range(0,blockcount):
if selection[i] == 0:
offset = int(blocksize*i)
yield offset
def get_hash(file,hashalgo,spread=-1,maxsize=-1,blocks_to_hash=-1):
h=hashlib.new(hashalgo)
filesize = os.path.getsize(file.name)
blocksize = h.block_size*65535
blockcount = math.ceil(filesize/blocksize)
if blocks_to_hash == -1 :
blocks_to_hash = math.ceil(blockcount*spread/100)
if (blocks_to_hash * blocksize) > maxsize:
blocks_to_hash = math.ceil(maxsize/blocksize)
if filesize>blocksize:
for of in get_offsets(blocksize,blockcount,blocks_to_hash):
file.seek(of)
h.update(file.read(blocksize))
else:
h.update(file.read(blocksize))
result="{};{};{};{};{}".format(h.hexdigest(),blocks_to_hash,filesize,hashalgo,file.name)
return result
parser = argparse.ArgumentParser(description='Sparsly hash large files. Only a given percentage of the file is actualy hashed.')
parser.add_argument('-p',metavar='N', action="store",dest="spread",type=int, nargs='?',default=10,help='percentage of file to hash. 0 < N < 100 (default=10)')
parser.add_argument('-s',metavar='N', action="store",dest="size",type=int, nargs='?',default=10,help='maximum amount of data per file in MB')
parser.add_argument('-c', action="store",dest="hashalgo",nargs='?',default="md5",help='select an hashalgorithm (default=md5)')
parser.add_argument('file', type=argparse.FileType('rb'), nargs='+')
parser.add_argument('-v', default=False, dest="validate", action='store_true', help='read output-file of previous run and validate hashes')
parser.add_argument('-1', default=True, dest="mismatches", action='store_false', help='suppress mismatches')
parser.add_argument('-0', default=True, dest="matches", action='store_false', help='suppress matches')
args = parser.parse_args()
if not args.validate:
hashalgo = args.hashalgo
spread = args.spread
maxsize = args.size * 1024 * 1024
for infile in args.file:
print(get_hash(infile,hashalgo,spread,maxsize))
else:
print("validating")
for line in args.file[0]:
line=line.decode().strip()
hash, blocks_hashed, filesize, hashalgo, file = line.split(';')
blocks_hashed=int(blocks_hashed)
filesize=int(filesize)
if os.path.isfile(file):
if os.path.getsize(file) != filesize:
result="BAD_SIZE"
else:
rehash=get_hash(open(file,'rb'),hashalgo,blocks_to_hash=blocks_hashed)
if hash == rehash.split(";")[0]:
result = "OK"
else:
result = "BAD_HASH"
else:
result="FILE_NOT_FOUND"
if args.mismatches and not result == "OK":
print("{};{}".format(result,line))
elif args.matches and result == "OK":
print("{};{}".format(result,line))

4
tools/showgm.sh Executable file
View File

@@ -0,0 +1,4 @@
#!/bin/bash
picture="${1}"
url="https://www.google.com/maps/place/$(exiftool -ee -p '$gpslatitude, $gpslongitude' -c "%d?%d'%.2f"\" ${picture} 2> /dev/null | sed -e "s/ //g" -e "s/?/%C2%B0/g")"
firefox -p work "$url"

4
tools/showosm.sh Executable file
View File

@@ -0,0 +1,4 @@
#!/bin/bash
picture="${1}"
url="https://www.openstreetmap.org/search?query=$(exiftool -ee -p '$gpslatitude, $gpslongitude' -c "%d?%d'%.2f"\" ${picture} 2> /dev/null | sed -e "s/ //g" -e "s/?/%C2%B0/g")"
firefox -p work "$url"

19
tools/tarsum.py Executable file
View File

@@ -0,0 +1,19 @@
#!/usr/bin/python3 -u
import tarfile
import hashlib
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('infile', type=argparse.FileType('rb'))
parser.add_argument('-c','--hashtype', default="md5" , choices=hashlib.algorithms_available )
args = parser.parse_args()
tf=tarfile.open(fileobj=args.infile)
for file in tf:
if file.isfile():
h=hashlib.new(args.hashtype)
extracted_file=tf.extractfile(file)
for chunk in iter(lambda: extracted_file.read(h.block_size),b''):
h.update(chunk)
print("{1} {0}".format(file.name,h.hexdigest()))

64
tools/unum.py Executable file
View File

@@ -0,0 +1,64 @@
#!/usr/bin/env python3
import unicodedata
import sys
def long_cat(category):
cats = {"Cc": "Other, Control",
"Cf": "Other, Format",
"Cn": "Other, Not Assigned",
"Co": "Other, Private Use",
"Cs": "Other, Surrogate",
"LC": "Letter, Cased",
"Ll": "Letter, Lowercase",
"Lm": "Letter, Modifier",
"Lo": "Letter, Other",
"Lt": "Letter, Titlecase",
"Lu": "Letter, Uppercase",
"Mc": "Mark, Spacing Combining",
"Me": "Mark, Enclosing",
"Mn": "Mark, Nonspacing",
"Nd": "Number, Decimal Digit",
"Nl": "Number, Letter",
"No": "Number, Other",
"Pc": "Punctuation, Connector",
"Pd": "Punctuation, Dash",
"Pe": "Punctuation, Close",
"Pf": "Punctuation, Final quote",
"Pi": "Punctuation, Initial quote",
"Po": "Punctuation, Other",
"Ps": "Punctuation, Open",
"Sc": "Symbol, Currency",
"Sk": "Symbol, Modifier",
"Sm": "Symbol, Math",
"So": "Symbol, Other",
"Zl": "Separator, Line",
"Zp": "Separator, Paragraph",
"Zs": "Separator, Space"}
if category in cats:
return cats[category]
else:
return category
def print_info(char):
spacing = " " if unicodedata.category(char) in ['Mn'] else ''
try:
unicodename = unicodedata.name(char)
except ValueError as e:
unicodename = "UNKNOWN"
if ord(char) == 10:
unicodename = "UNKNOWN"
print(f"{ord(char):>8} 0x{ord(char):>06x} {spacing}{' ':^5} {long_cat(unicodedata.category(char)):<26} {unicodename:<30}")
else:
print(f"{ord(char):>8} 0x{ord(char):>06x} {spacing}{char:^5} {long_cat(unicodedata.category(char)):<26} {unicodename:<30}")
print(f" Decimal Hex Char {'Category':^26} Name")
if len(sys.argv) == 1:
for char in sys.stdin.read():
print_info(char)
else:
for argument in sys.argv[1:]:
for char in argument:
print_info(char)

6
tools/urldecode.py Executable file
View File

@@ -0,0 +1,6 @@
#!/usr/bin/python3
from urllib.parse import unquote
import html
import sys
url=' '.join(sys.argv[1:])
print(html.unescape(unquote(url)))

22
tools/watchgrowth.sh Executable file
View File

@@ -0,0 +1,22 @@
#!/bin/bash
old_size=$(du -b "${1}" | cut -f1)
while true; do
sleep 1
new_size=$(du -b "${1}" | cut -f1)
size_diff=$(( ${new_size} - ${old_size} ))
old_size=${new_size}
#speed=$(( ${size_diff} / (1024*1024) ))
progress=""
if [[ $# -eq 3 ]] ; then
total=${2}
progress_p=$(echo "2 k ${new_size} ${total} 100 / / p" | dc)
progress="${progress_p} %"
fi
speed=$(echo "2 k ${size_diff} 1024 1024 * / p" | dc)
echo "${progress} - ${speed} MB/s"
done