| 1 | # |
| 2 | |
| 3 | d() { |
| 4 | local -r word=$(fzf < /usr/share/dict/words) |
| 5 | dict "$word" |
| 6 | } |
| 7 | |
| 8 | shell_activity_report() { |
| 9 | # TODO: optional concrete number output |
| 10 | # TODO: manual weekday calc (since forking date is so expensive) |
| 11 | # TODO: optional combinations of granularities: hour, weekday, month, year |
| 12 | history \ |
| 13 | | awk ' |
| 14 | { |
| 15 | # NOTE: $2 & $3 are specific to oh-my-zsh history output |
| 16 | date = $2 |
| 17 | time = $3 |
| 18 | d_fields = split(date, d, "-") |
| 19 | t_fields = split(time, t, ":") |
| 20 | if (t_fields && d_fields) { |
| 21 | # +0 to coerce number from string |
| 22 | month = d[2] + 0 |
| 23 | hour = t[1] + 0 |
| 24 | c = count[month, hour]++ |
| 25 | } |
| 26 | if (c > max) |
| 27 | max = c |
| 28 | } |
| 29 | |
| 30 | END { |
| 31 | m[ 1] = "January" |
| 32 | m[ 2] = "February" |
| 33 | m[ 3] = "March" |
| 34 | m[ 4] = "April" |
| 35 | m[ 5] = "May" |
| 36 | m[ 6] = "June" |
| 37 | m[ 7] = "July" |
| 38 | m[ 8] = "August" |
| 39 | m[ 9] = "September" |
| 40 | m[10] = "October" |
| 41 | m[11] = "November" |
| 42 | m[12] = "December" |
| 43 | for (month = 1; month <= 12; month++) { |
| 44 | printf "%s\n", m[month]; |
| 45 | for (hour=0; hour<24; hour++) { |
| 46 | c = count[month, hour] |
| 47 | printf " %2d ", hour |
| 48 | for (i = 1; i <= (c * 100) / max; i++) |
| 49 | printf "|" |
| 50 | printf "\n" |
| 51 | } |
| 52 | } |
| 53 | }' |
| 54 | } |
| 55 | |
| 56 | top_commands() { |
| 57 | history \ |
| 58 | | awk ' |
| 59 | { |
| 60 | count[$4]++ |
| 61 | } |
| 62 | |
| 63 | END { |
| 64 | for (cmd in count) |
| 65 | print count[cmd], cmd |
| 66 | }' \ |
| 67 | | sort -n -r -k 1 \ |
| 68 | | head -50 \ |
| 69 | | awk ' |
| 70 | { |
| 71 | cmd[NR] = $2 |
| 72 | c = count[NR] = $1 + 0 # + 0 to coerce number from string |
| 73 | if (c > max) |
| 74 | max = c |
| 75 | } |
| 76 | |
| 77 | END { |
| 78 | for (i = 1; i <= NR; i++) { |
| 79 | c = count[i] |
| 80 | printf "%s %d ", cmd[i], c |
| 81 | scaled = (c * 100) / max |
| 82 | for (j = 1; j <= scaled; j++) |
| 83 | printf "|" |
| 84 | printf "\n" |
| 85 | } |
| 86 | }' \ |
| 87 | | column -t |
| 88 | } |
| 89 | |
| 90 | # Top Disk-Using directories |
| 91 | # TODO: Consider using numfmt instead of awk |
| 92 | tdu() { |
| 93 | du "$1" \ |
| 94 | | sort -n -k 1 -r \ |
| 95 | | head -50 \ |
| 96 | | awk ' |
| 97 | { |
| 98 | size = $1 |
| 99 | path = $0 |
| 100 | sub("^" $1 "\t+", "", path) |
| 101 | gb = size / 1024 / 1024 |
| 102 | printf("%f\t%s\n", gb, path) |
| 103 | }' \ |
| 104 | | cut -c 1-115 |
| 105 | } |
| 106 | |
| 107 | # Top Disk-Using Files |
| 108 | tduf() { |
| 109 | find "$1" -type f -printf '%s\t%p\0' \ |
| 110 | | sort -z -n -k 1 -r \ |
| 111 | | head -z -n 50 \ |
| 112 | | gawk -v RS='\0' ' |
| 113 | { |
| 114 | size = $1 |
| 115 | path = $0 |
| 116 | sub("^" $1 "\t+", "", path) |
| 117 | gb = size / 1024 / 1024 / 1024 |
| 118 | printf("%f\t%s\n", gb, path) |
| 119 | }' |
| 120 | } |
| 121 | |
| 122 | # Most-recently modified file system objects |
| 123 | recent() { |
| 124 | # NOTES: |
| 125 | # - intentionally not quoting the parameters, so that some can be ignored |
| 126 | # if not passed, rather than be passed to find as an empty string; |
| 127 | # - %T+ is a GNU extension; |
| 128 | # - gawk is able to split records on \0, while awk cannot. |
| 129 | find $@ -printf '%T@ %T+ %p\0' \ |
| 130 | | tee >(gawk -v RS='\0' 'END { printf("[INFO] Total found: %d\n", NR); }') \ |
| 131 | | sort -z -k 1 -n -r \ |
| 132 | | head -n "$(stty size | awk 'NR == 1 {print $1 - 5}')" -z \ |
| 133 | | gawk -v RS='\0' ' |
| 134 | { |
| 135 | sub("^" $1 " +", "") # Remove epoch time |
| 136 | sub("+", " ") # Blank-out the default separator |
| 137 | sub("\\.[0-9]+", "") # Remove fractional seconds |
| 138 | print |
| 139 | }' |
| 140 | } |
| 141 | |
| 142 | recent_dirs() { |
| 143 | recent "$1" -type d |
| 144 | } |
| 145 | |
| 146 | recent_files() { |
| 147 | recent "$1" -type f |
| 148 | } |
| 149 | |
| 150 | pa_def_sink() { |
| 151 | pactl info | awk '/^Default Sink:/ {print $3}' |
| 152 | } |
| 153 | |
| 154 | void_pkgs() { |
| 155 | curl "https://xq-api.voidlinux.org/v1/query/x86_64?q=$1" | jq '.data' |
| 156 | } |
| 157 | |
| 158 | # Colorful man |
| 159 | man() { |
| 160 | LESS_TERMCAP_md=$'\e[01;31m' \ |
| 161 | LESS_TERMCAP_me=$'\e[0m' \ |
| 162 | LESS_TERMCAP_se=$'\e[0m' \ |
| 163 | LESS_TERMCAP_so=$'\e[01;44;33m' \ |
| 164 | LESS_TERMCAP_ue=$'\e[0m' \ |
| 165 | LESS_TERMCAP_us=$'\e[01;32m' \ |
| 166 | command man "$@" |
| 167 | } |
| 168 | |
| 169 | experiment() { |
| 170 | cd "$(~/bin/experiment $@)" || exit 1 |
| 171 | } |
| 172 | |
| 173 | hump() { |
| 174 | ledit -l "$(stty size | awk '{print $2}')" ocaml $@ |
| 175 | } |
| 176 | |
| 177 | howto() { |
| 178 | cat "$(find ~/Archives/Documents/HOWTOs -mindepth 1 -maxdepth 1 | sort | fzf)" |
| 179 | } |
| 180 | |
| 181 | yt() { |
| 182 | local _yt_uri |
| 183 | local _yt_id |
| 184 | local _yt_title |
| 185 | local _yt_dir |
| 186 | |
| 187 | _yt_uri="$1" |
| 188 | _yt_id=$(youtube-dl --get-id "$_yt_uri") |
| 189 | _yt_title=$(youtube-dl --get-title "$_yt_uri") |
| 190 | _yt_dir="${DIR_YOUTUBE}/individual-videos/${_yt_title}--${_yt_id}" |
| 191 | |
| 192 | mkdir -p "$_yt_dir" |
| 193 | cd "$_yt_dir" || exit 1 |
| 194 | echo "$_yt_uri" > 'uri' |
| 195 | youtube-dl -c --write-description --write-info-json "$_yt_uri" |
| 196 | } |
| 197 | |
| 198 | gh_fetch_repos() { |
| 199 | curl "https://api.github.com/$1/$2/repos?page=1&per_page=10000" |
| 200 | } |
| 201 | |
| 202 | gh_clone() { |
| 203 | gh_user_type="$1" |
| 204 | gh_user_name="$2" |
| 205 | gh_dir="${DIR_GITHUB}/${gh_user_name}" |
| 206 | mkdir -p "$gh_dir" |
| 207 | cd "$gh_dir" || exit 1 |
| 208 | gh_fetch_repos "$gh_user_type" "$gh_user_name" \ |
| 209 | | jq --raw-output '.[] | select(.fork | not) | .git_url' \ |
| 210 | | parallel -j 25 \ |
| 211 | git clone {} |
| 212 | } |
| 213 | |
| 214 | gh_clone_user() { |
| 215 | gh_clone 'users' "$1" |
| 216 | } |
| 217 | |
| 218 | gh_clone_org() { |
| 219 | gh_clone 'orgs' "$1" |
| 220 | } |
| 221 | |
| 222 | gh_clone_repo() { |
| 223 | gh_username=$(echo "$1" | awk -F / '"$1 == "https" && $3 == github.com" {print $4}') |
| 224 | gh_dir="${DIR_GITHUB}/${gh_username}" |
| 225 | mkdir -p "$gh_dir" |
| 226 | cd "$gh_dir" || exit 1 |
| 227 | git clone "$1" |
| 228 | } |
| 229 | |
| 230 | work_log_template() { |
| 231 | cat << EOF |
| 232 | $(date '+%F %A') |
| 233 | ========== |
| 234 | |
| 235 | Morning report |
| 236 | -------------- |
| 237 | |
| 238 | ### Previous |
| 239 | |
| 240 | ### Current |
| 241 | |
| 242 | ### Blockers |
| 243 | |
| 244 | Day's notes |
| 245 | ----------- |
| 246 | EOF |
| 247 | } |
| 248 | |
| 249 | work_log() { |
| 250 | mkdir -p "$DIR_WORK_LOG" |
| 251 | file_work_log_today="${DIR_WORK_LOG}/$(date +%F).md" |
| 252 | if [ ! -f "$file_work_log_today" ] |
| 253 | then |
| 254 | work_log_template > "$file_work_log_today" |
| 255 | fi |
| 256 | vim -c 'set spell' "$file_work_log_today" |
| 257 | |
| 258 | } |
| 259 | |
| 260 | note() { |
| 261 | mkdir -p "$DIR_NOTES" |
| 262 | vim -c 'set spell' "$DIR_NOTES/$(date +'%Y_%m_%d--%H_%M_%S%z')--$1.md" |
| 263 | } |
| 264 | |
| 265 | weather() { |
| 266 | curl "http://wttr.in/$WEATHER_LOCATION" |
| 267 | } |
| 268 | |
| 269 | bt_devs_paired() { |
| 270 | bluetoothctl -- paired-devices \ |
| 271 | | awk '{print $2}' \ |
| 272 | | xargs bluetoothctl -- info |
| 273 | } |
| 274 | |
| 275 | bt_devs() { |
| 276 | bluetoothctl -- devices \ |
| 277 | | awk '{print $2}' \ |
| 278 | | xargs bluetoothctl -- info |
| 279 | } |
| 280 | |
| 281 | run() { |
| 282 | stderr="$(mktemp)" |
| 283 | $@ 2> >(tee "$stderr") |
| 284 | code="$?" |
| 285 | urgency='' |
| 286 | case "$code" in |
| 287 | 0) urgency='normal';; |
| 288 | *) urgency='critical' |
| 289 | esac |
| 290 | notify-send -u "$urgency" "Job done: $code" "$(cat $stderr)" |
| 291 | rm "$stderr" |
| 292 | } |