1
0
Fork 0
master
dece 3 years ago
commit d82a19c2fb

@ -0,0 +1,4 @@
#!/bin/bash
# Disable and reenable swaps to clear them.
sudo swapoff -a
sudo swapon -a

@ -0,0 +1,20 @@
#!/usr/bin/env python3
# Dumb script to print statistics of file extensions in the current directory.
import os
import os.path
from collections import defaultdict
def count_extensions(folder):
stats = defaultdict(int)
for (root, dirs, files) in os.walk(folder):
for f in files:
ext = os.path.splitext(f)[1].lstrip(".")
stats[ext] += 1
return stats
if __name__ == "__main__":
stats = count_extensions(".")
stats_list = reversed(sorted([(n, e) for e, n in stats.items()]))
for n, e in stats_list:
print("{}\t{}".format(n, e))

@ -0,0 +1,39 @@
#!/usr/bin/env python3
"""Extract LSB plans for either RGB or the greyscale value. Requires Pillow."""
import sys
import PIL.Image
def main():
img = PIL.Image.open(sys.argv[1])
if img.mode == "L":
print("Dumping grayscale LSB.")
dump_monochannel(img)
else:
print("Dumping RGB LSB.")
dump_rgb(img)
def dump_monochannel(img):
width, height = img.size
out = PIL.Image.new('1', (width, height))
for x in range(width):
for y in range(height):
op = img.getpixel((x, y))
p = 1 if op & 1 else 0
out.putpixel((x, y), p)
out.save("lsb.png")
def dump_rgb(img):
width, height = img.size
for i in range(3):
out = PIL.Image.new('1', (width, height))
for x in range(width):
for y in range(height):
op = img.getpixel((x, y))
p = 1 if op[i] & 1 else 0
out.putpixel((x, y), p)
out.save(f"lsb{i}.png")
if __name__ == "__main__":
main()

@ -0,0 +1,74 @@
#!/bin/bash
# Quick grep for emojis from the terminal.
# You first have to download the UCD archive. It's only a few MB compressed.
# I use ripgrep for its speed but you can replace GREP by what you see fit.
# Bonus! Use this bash function to copy the first result in your X clipboard:
# mj() { emoji -u "$1" | xclip ; echo "$(xclip -o)" }
# Made with 💖 by dece. s/o to mon loulou, the bash samurai. License: WTFPLv2.
UCD_URL="https://www.unicode.org/Public/UCD/latest/ucdxml/ucd.all.flat.zip"
UCD="$HOME/.local/share/emoji/ucd.all.flat.zip"
GREP="rg"
usage() {
echo "usage: $0 [-n] [-l LIMIT] FILTER"
echo "Display emojis based on the name filter provided."
echo " -h show usage"
echo " -n hide emoji name"
echo " -l LIMIT limit number of output lines"
echo " -u unique result (equals -n and -l 1), no new line"
echo " -c show code point"
echo " -d download UCD zip (requires curl)"
}
[ $# -eq 0 ] && usage && exit
download_ucdxml() {
directory="$(dirname "$UCD")"
[ ! -d "$directory" ] && mkdir -p "$directory"
curl -L -o "$UCD" "$UCD_URL"
}
HIDE_NAME=
LIMIT=
NO_NEW_LINE=
SHOW_CP=
while getopts "hdnl:uc" OPTION; do
case $OPTION in
h) usage; exit 0 ;;
d) download_ucdxml; exit $? ;;
n) HIDE_NAME=true ;;
l) LIMIT=$OPTARG ;;
u) HIDE_NAME=true; LIMIT=1; NO_NEW_LINE=true ;;
c) SHOW_CP=true ;;
*) usage; exit 1 ;;
esac
done
shift $(( OPTIND - 1 ))
FILTER="$*"
if [ ! -f "$UCD" ]; then
echo "Can't find UCD archive at $UCD. Use -d to download it!"
exit 1
fi
search_chars() {
zcat "$UCD" | "$GREP" 'Emoji="Y"' | "$GREP" -i "na.?=\"[^\"]*$1[^\"]*\""
}
line_id=0
search_chars "$FILTER" | while read -r line; do
[ -n "$LIMIT" ] && (( line_id >= LIMIT )) && break
codepoint="$(echo "$line" | sed -E 's/.* cp="([0-9A-F]+)".*/\1/g')"
result="$(echo -e "\\U$codepoint")"
if [ "$HIDE_NAME" != true ]; then
name="$(echo "$line" | sed -E 's/.* na="([^"]+)".*/\1/g')"
result="$result $(echo "$name" | tr '[:upper:]' '[:lower:]')"
fi
if [ "$SHOW_CP" = true ]; then
result="$result (U+$codepoint)"
fi
[ "$NO_NEW_LINE" = true ] && echo_opt="-n" || echo_opt=""
echo "$echo_opt" "$result"
line_id=$(( line_id + 1 ))
done

@ -0,0 +1,5 @@
#!/bin/bash
# Delete a branch both locally and on origin.
BRANCH_NAME="$1"
git branch -D $BRANCH_NAME
git push --delete origin $BRANCH_NAME

@ -0,0 +1,6 @@
#!/bin/bash
# Quickly setup user name and email.
read -p "User name: " name
git config user.name "$name"
read -p "User email: " email
git config user.email "$email"

@ -0,0 +1,14 @@
#!/bin/bash
# Ignore user-specific dev files without cluttering the gitignore files.
# Other team members may not care that you use Vim, VS Code, an IDEA product or
# whatever, so it is a bit weird to commit your own specific tooling ignore
# patterns into the project. Use your local exclude file instead.
exclude() {
if ! grep "$1" .git/info/exclude > /dev/null ; then
echo "$1" >> .git/info/exclude
fi
}
exclude '.*.swp'
exclude '.vscode/'

@ -0,0 +1,84 @@
"""IDAPython plugin to demangle strings.
It demangles the string under the cursor and set the result as comment. This
relies on the DbgHelp DLL on your system, so it should work for most recent VC
versions. This can be run as a standalone tool as well (Python 2).
"""
import argparse
import ctypes
import ctypes.util
import platform
try:
import idaapi
import idc
IS_IDAPYTHON = True
except ImportError:
IS_IDAPYTHON = False
MAX_DEMANGLED_LEN = 2**12
def main():
if IS_IDAPYTHON:
setup_ida()
else:
run_standalone()
def setup_ida():
bindings = {
"Shift-G": demangle_str_at_screen_ea
}
for binding in bindings:
idaapi.add_hotkey(binding, bindings[binding])
print "Bound " + ", ".join(bindings.keys())
def run_standalone():
argparser = argparse.ArgumentParser()
argparser.add_argument("name", type = str, help = "mangled name")
args = argparser.parse_args()
demangled = demangle_vc(args.name)
if demangled:
print demangled.decode("utf8", errors = "replace")
def demangle_str_at_screen_ea():
ea = idc.ScreenEA()
string = idc.GetString(ea)
if not string:
print "Couldn't get any string at {}.".format(hex(ea))
return
demangled = demangle_vc(string)
if not demangled:
print "Demangling failed."
return
idc.MakeComm(ea, demangled)
def demangle_vc(name, flags = 0x2800):
""" Call DbgHelp.UnDecorateSymbolName and return the demangled name bytes.
Default flags are UNDNAME_32_BIT_DECODE | UNDNAME_NO_ARGUMENTS because it
seems to work only this way?! """
if platform.system() != "Windows":
print "DbgHelp is only available on Windows!"
return ""
dbghelp_path = ctypes.util.find_library("dbghelp")
dbghelp = ctypes.windll.LoadLibrary(dbghelp_path)
name = name.lstrip(".")
mangled = ctypes.c_char_p(name.encode("utf8"))
demangled = ctypes.create_string_buffer("\x00" * MAX_DEMANGLED_LEN)
demangled_len = ctypes.c_int(MAX_DEMANGLED_LEN)
flags = ctypes.c_int(flags)
ret = dbghelp.UnDecorateSymbolName(mangled, demangled, demangled_len, flags)
if ret == 0:
error_code = ctypes.windll.kernel32.GetLastError()
print "UnDecorateSymbolName failed ({})".format(error_code)
return ""
else:
return demangled.value
if __name__ == "__main__":
main()

@ -0,0 +1,22 @@
#!/bin/bash
# A script to ensure one and only SSH agent is running, especially in Tmux or
# remote sessions. You can add it to your bashrc/zshrc without issues.
# Inspired from: http://rabexc.org/posts/pitfalls-of-ssh-agents
AGENT_CONFIG="$HOME/.ssh-agent"
# First try to list identities.
ssh-add -l &> /dev/null
# If it returned 2, try to load a running agent config.
if [ "$?" = 2 ]; then
test -r $AGENT_CONFIG && eval "$(<$AGENT_CONFIG)" > /dev/null
# Retry.
ssh-add -l &> /dev/null
# If it still does not work, start a new agent.
if [ "$?" = 2 ]; then
(umask 066; ssh-agent > $AGENT_CONFIG)
eval "$(<$AGENT_CONFIG)" > /dev/null
fi
fi

@ -0,0 +1,76 @@
#!/urs/bin/env python3
# type: ignore
"""Cheat sheet for Toki Pona, using the nimi ale pona table.
You need a CSV export of the nimi ale pona (inli). It can be downloaded from:
https://docs.google.com/spreadsheets/d/1t-pjAgZDyKPXcCRnEdATFQOxGbQFMjZm-8EvXiQd2Po/edit#gid=0
Place it in the path stored in the CSV variable below, or change that variable
to suit your needs :)
If colorama is installed on your system (it often is for some reason), the
output will be colored; else it will still properly display text.
2021 - License: WTFPLv2 - pona tawa sina!
"""
import argparse
import csv
from pathlib import Path
CSV = Path.home() / ".local/share/toki/nimi-ale-pona.csv"
try:
from colorama import Fore, init, Style
except ImportError:
class Dummy:
def __getattr__(self, _):
return ""
Fore = Dummy()
Style = Dummy()
else:
init()
def main():
argparser = argparse.ArgumentParser(description="nimi ale pona!")
argparser.add_argument("word", nargs="?", help="word to search")
args = argparser.parse_args()
with open(CSV) as nap_file:
nap_csv = csv.DictReader(nap_file)
if args.word:
for row in nap_csv:
words = row["word"].split(", ")
if args.word in words:
print_row(row)
break
else:
print("nimi ala!")
else:
for row in nap_csv:
print_row(row)
COLORED_CATS = { # soweli suwi kule!
"pu": f"{Fore.GREEN}pu{Fore.RESET}",
"pre-pu": f"{Fore.CYAN}pre-pu{Fore.RESET}",
"post-pu": f"{Fore.MAGENTA}post-pu{Fore.RESET}",
}
def print_row(row):
word_line = f"{Style.BRIGHT}{row['word']}{Style.NORMAL}"
category = COLORED_CATS.get(row["category"], "?")
definition = row['definition'].strip().replace("\n", "")
word_line += f" ({category}): {definition}"
print(word_line)
details = f"from {row['source language']}"
etymology = row['etymology'].strip().replace("\n", "")
if etymology:
details += ": " + etymology
print(f" {Style.DIM}{details}{Style.RESET_ALL}")
if row['tags']:
print(f" {Style.DIM}{row['tags']}{Style.RESET_ALL}")
if row['']: # bogus second tag columns
print(f" {Style.DIM}{row['']}{Style.RESET_ALL}")
if __name__ == "__main__":
main()

@ -0,0 +1,13 @@
#!/usr/bin/env python3
# Brutal URL encoding of whatever string is passed as argument.
import binascii
import sys
s = sys.argv[1].encode()
h = binascii.hexlify(s).decode()
encoded = ""
for i in range(len(h) // 2):
byte = h[i * 2 : i * 2 + 2]
encoded += "%" + byte
print(encoded)

@ -0,0 +1,157 @@
#!/usr/bin/env python3
# encoding: utf-8
"""
File: britamerican.py
Author: Paul Lajoie-Mazenc
Description: Checks for british and american spellings in a file. This is just
a basic thing, it may have lots of false positives/negatives.
Inspired from Nicholas J. Higham's “Handbook of Writing for the Mathematical
Sciences
"""
import re
import argparse
# Words that have a different spelling
# (british, american) spelling
WORDS = [('behaviour', 'behavior'), ('colour', 'color'),
('catalogue', 'catalog'), ('centre', 'center'), ('defence', 'defense'),
('grey', 'gray'), ('manoeuvre', 'maneuver'),
('marvellous', 'marvelous'), ('modelled', 'modeled'),
('modelling', 'modeling'), ('skilful', 'skillful'),
('speciality', 'specialty'), ('acknowledgement', 'acknowledgment'),
('benefited', 'benefitted'), ('encyclopaedia', 'encyclopedia'),
('focused', 'focussed'), ('judgement', 'judgment'),
('appendices', 'appendixes'), ('formulae', 'formulas'),
('indices', 'indexes'), ('lemmata', 'lemmas'),
('vertices', 'vertexes'), ('optimisation', 'optimization')]
BRITISH = [word[0] for word in WORDS]
AMERICAN = [word[1] for word in WORDS]
# Exceptions for the *ise words, mostly verbs
# All the other *ise verbs should be *ise in british and *ize in american
EXCEPTIONS = ['advise', 'arise', 'circumcise', 'comprise', 'compromise',
'concise', 'demise', 'despise', 'devise', 'disguise', 'excise', 'exercise',
'expertise', 'franchise', 'guise', 'improvise', 'incise', 'likewise',
'otherwise', 'precise', 'premise', 'promise', 'reprise', 'revise', 'rise',
'size', 'scriptsize', 'footnotesize', 'supervise', 'surmise', 'surprise',
'televise', 'treatise', 'wise']
# Detects words
re_words = re.compile('\\w+')
# Gets the *ise[ds] and *ize[ds]
re_ise = re.compile('\\b\\w+ise[ds]?\\b')
re_ize = re.compile('\\b\\w+ize[ds]?\\b')
# Gets the *yse[ds] and *yze[ds]
re_yse = re.compile('\\b\\w+yse[ds]?\\b')
re_yze = re.compile('\\b\\w+yze[ds]?\\b')
# The word ends with a d or an s
re_suffix = re.compile('^\\w+[ds]$')
def parse_args():
""" Parses the arguments of the command line """
parser = argparse.ArgumentParser(
description="Checks a file for british and american spellings")
parser.add_argument('files', metavar="files", type=str, nargs='+',
help='file where to check the spellings')
return parser.parse_args()
def check_british(text):
""" Checks text for british words """
return [word for word in text if word in BRITISH]
def check_american(text):
""" Checks text for american words """
return [word for word in text if word in AMERICAN]
def check_ise(text):
""" Checks for words ending in ise[ds]? """
return re_ise.findall(text)
def check_ize(text):
""" Checks for words ending in ize[ds]? """
return re_ize.findall(text)
def check_yse(text):
""" Checks for words ending in yse[ds]? """
return re_yse.findall(text)
def check_yze(text):
""" Checks for words ending in yze[ds]? """
return re_yze.findall(text)
def root(word):
""" Gets the root of a word (ie removes the 'd' or 's' of past participle or plurals/conjugation """
if re_suffix.match(word):
return word[:-1]
return word
def remove_exceptions(words):
""" Removes exceptions from the resulting words """
return [word for word in words if root(word) not in EXCEPTIONS]
def get_words(line):
""" Gets the american and british spellings in text """
british = []
american = []
line = line.lower()
# British/American words
words = re_words.findall(line)
british.extend(check_british(words))
american.extend(check_american(words))
# -ise/-ize verbs
british.extend(check_ise(line))
american.extend(check_ize(line))
# -yse/-yze verbs
british.extend(check_yse(line))
american.extend(check_yze(line))
british = remove_exceptions(british)
american = remove_exceptions(american)
return british, american
def check_line(line, index):
""" Checks the text for american and british spellings
The formatting is correctly aligned for < 10,000 lines"""
british, american = get_words(line)
british_prefix = '\033[91m' + "UK" + '\033[0m'
american_prefix = '\033[92m' + "US" + '\033[0m'
if len(british) > 0 or len(american) > 0:
pad = ''
print("{:<4d}: ".format(index + 1), end='')
if len(british) > 0:
print("{}: {}".format(british_prefix, british))
pad = ' '*6
if len(american) > 0:
print("{}{}: {}".format(pad, american_prefix, american))
def main():
""" Main function """
files = parse_args().files
for file_ in files:
try:
fd = open(file_)
lines = fd.readlines()
fd.close()
except IOError:
print("Couldn't read file {}, skipping it".format(file_))
break
print(file_)
for index, line in enumerate(lines):
check_line(line, index)
if __name__ == '__main__':
main()

@ -0,0 +1,53 @@
#!/usr/bin/env perl
# Finds duplicate adjacent words.
use strict ;
my $DupCount = 0 ;
if (!@ARGV) {
print "usage: dups <file> ...\n" ;
exit ;
}
while (1) {
my $FileName = shift @ARGV ;
# Exit code = number of duplicates found.
exit $DupCount if (!$FileName) ;
open FILE, $FileName or die $!;
my $LastWord = "" ;
my $LineNum = 0 ;
while (<FILE>) {
chomp ;
$LineNum ++ ;
my @words = split (/(\W+)/) ;
foreach my $word (@words) {
# Skip spaces:
next if $word =~ /^\s*$/ ;
# Skip punctuation:
if ($word =~ /^\W+$/) {
$LastWord = "" ;
next ;
}
# Found a dup?
if (lc($word) eq lc($LastWord)) {
print "$FileName:$LineNum $word\n" ;
$DupCount ++ ;
} # Thanks to Sean Cronin for tip on case.
# Mark this as the last word:
$LastWord = $word ;
}
}
close FILE ;
}

@ -0,0 +1,66 @@
#!/bin/bash
# Find occurences of passive form: maybe active form is more adapted?
irregulars="awoken|\
been|born|beat|\
become|begun|bent|\
beset|bet|bid|\
bidden|bound|bitten|\
bled|blown|broken|\
bred|brought|broadcast|\
built|burnt|burst|\
bought|cast|caught|\
chosen|clung|come|\
cost|crept|cut|\
dealt|dug|dived|\
done|drawn|dreamt|\
driven|drunk|eaten|fallen|\
fed|felt|fought|found|\
fit|fled|flung|flown|\
forbidden|forgotten|\
foregone|forgiven|\
forsaken|frozen|\
gotten|given|gone|\
ground|grown|hung|\
heard|hidden|hit|\
held|hurt|kept|knelt|\
knit|known|laid|led|\
leapt|learnt|left|\
lent|let|lain|lighted|\
lost|made|meant|met|\
misspelt|mistaken|mown|\
overcome|overdone|overtaken|\
overthrown|paid|pled|proven|\
put|quit|read|rid|ridden|\
rung|risen|run|sawn|said|\
seen|sought|sold|sent|\
set|sewn|shaken|shaven|\
shorn|shed|shone|shod|\
shot|shown|shrunk|shut|\
sung|sunk|sat|slept|\
slain|slid|slung|slit|\
smitten|sown|spoken|sped|\
spent|spilt|spun|spit|\
split|spread|sprung|stood|\
stolen|stuck|stung|stunk|\
stridden|struck|strung|\
striven|sworn|swept|\
swollen|swum|swung|taken|\
taught|torn|told|thought|\
thrived|thrown|thrust|\
trodden|understood|upheld|\
upset|woken|worn|woven|\
wed|wept|wound|won|\
withheld|withstood|wrung|\
written"
if [ "$1" = "" ]; then
echo "usage: `basename $0` <file> ..."
exit
fi
egrep -n -i --color \
"\\b(am|are|were|being|is|been|was|be)\
\\b[ ]*(\w+ed|($irregulars))\\b" $*
exit $?

@ -0,0 +1,40 @@
#!/bin/bash
# Highlight weasel words: are they really useful?
weasels="many|various|very|fairly|several|extremely\
|exceedingly|quite|remarkably|few|surprisingly\
|mostly|largely|huge|tiny|((are|is) a number)\
|excellent|interestingly|significantly\
|substantially|clearly|vast|relatively|completely"
wordfile=""
# Check for an alternate weasel file
if [ -f $HOME/etc/words/weasels ]; then
wordfile="$HOME/etc/words/weasels"
fi
if [ -f $WORDSDIR/weasels ]; then
wordfile="$WORDSDIR/weasels"
fi
if [ -f words/weasels ]; then
wordfile="words/weasels"
fi
if [ ! "$wordfile" = "" ]; then
weasels="xyzabc123";
for w in `cat $wordfile`; do
weasels="$weasels|$w"
done
fi
if [ "$1" = "" ]; then
echo "usage: `basename $0` <file> ..."
exit
fi
egrep -i -n --color "\\b($weasels)\\b" $*
exit $?
Loading…
Cancel
Save