f669c7239b00c3125f1377b53456ac7694bcc9c4
[tt.git] / TODO
1 # vim:sw=2:sts=2:
2 TODO
3 ====
4
5 Legend:
6 - [ ] not started
7 - [-] in-progress
8 - [x] done
9 - [~] cancelled
10
11 In-progress
12 -----------
13 - [-] timeline limits
14 - [x] by time range
15 - [ ] by msg count
16 - [ ] per peer
17 - [ ] total
18 Not necessary for short format, because we have Unix head/tail,
19 but may be convinient for long format (because msg spans multiple lines).
20 - [-] Convert to Typed Racket
21 - [x] build executable (otherwise too-slow)
22 - [-] add signatures
23 - [x] top-level
24 - [ ] inner
25 - [ ] imports
26 - [-] commands:
27 - [x] c | crawl
28 Discover new peers mentioned by known peers.
29 - [x] r | read
30 - see timeline ops above
31 - [ ] w | write
32 - arg or stdin
33 - nick expand to URI
34 - Watch FIFO for lines, then read, timestamp and append [+ upload].
35 Can be part of a "live" mode, along with background polling and
36 incremental printing. Sort of an ii-like IRC experience.
37 - [ ] q | query
38 - see timeline ops above
39 - see hashtag and channels above
40 - [x] d | download
41 - [ ] options:
42 - [ ] all - use all known peers
43 - [ ] fast - all except peers known to be slow or unavailable
44 REQUIRES: stats
45 - [x] u | upload
46 - calls user-configured command to upload user's own timeline file to their server
47 Looks like a better CLI parser than "racket/cmdline": https://docs.racket-lang.org/natural-cli/
48 But it is no longer necessary now that I've figured out how to chain (command-line ..) calls.
49 - [-] Output formats:
50 - [x] text long
51 - [x] text short
52 - [ ] HTML
53 - [ ] JSON
54 - [-] Peer discovery
55 - [-] parse peer refs from peer timelines
56 - [x] mentions from timeline messages
57 - [x] @<source.nick source.url>
58 - [x] @<source.url>
59 - [ ] "following" from timeline comments: # following = <nick> <uri>
60 - [ ] Parse User-Agent web access logs.
61 - [-] Update peer ref file(s)
62 - [x] peers-all
63 - [x] peers-mentioned
64 - [ ] peers-followed (by others, parsed from comments)
65 - [ ] peers-down (net errors)
66 - [ ] redirects?
67 Rough sketch from late 2019:
68 let read file =
69 ...
70 let write file peers =
71 ...
72 let fetch peer =
73 (* Fetch could mean either or both of:
74 * - fetch peer's we-are-twtxt.txt
75 * - fetch peer's twtxt.txt and extract mentioned peer URIs
76 * *)
77 ...
78 let test peers =
79 ...
80 let rec discover peers_old =
81 let peers_all =
82 Set.fold peers_old ~init:peers_old ~f:(fun peers p ->
83 match fetch p with
84 | Error _ ->
85 (* TODO: Should p be moved to down set here? *)
86 log_warning ...;
87 peers
88 | Ok peers_fetched ->
89 Set.union peers peers_fetched
90 )
91 in
92 if Set.empty (Set.diff peers_old peers_all) then
93 peers_all
94 else
95 discover peers_all
96 let rec loop interval peers_old =
97 let peers_all = discover peers_old in
98 let (peers_up, peers_down) = test peers_all in
99 write "peers-all.txt" peers_all;
100 write "peers-up.txt" peers_up;
101 write "peers-down.txt" peers_down;
102 sleep interval;
103 loop interval peers_all
104 let () =
105 loop (Sys.argv.(1)) (read "peers-all.txt")
106
107 Backlog
108 -------
109 - [ ] Support date without time in timestamps
110 - [ ] Crawl all cache/objects/*, not given peers.
111 BUT, in order to build A-mentioned-B graph, we need to know the nick
112 associated with the URI whos object we're examining. How to do that?
113 - [ ] Crawl downloaded web access logs
114 - [ ] download-command hook to grab the access logs
115
116 (define (parse log-line)
117 (match (regexp-match #px"([^/]+)/([^ ]+) +\\(\\+([a-z]+://[^;]+); *@([^\\)]+)\\)" log-line)
118 [(list _ client version uri nick) (cons nick uri)]
119 [_ #f]))
120
121 (list->set (filter-map parse (file->lines "logs/combined-access.log")))
122
123 (filter (λ (p) (equal? 'file (file-or-directory-type p))) (directory-list logs-dir))
124
125 - [ ] user-agent file as CLI option - need to run at least the crawler as another user
126 - [ ] Support fetching rsync URIs
127 - [ ] Check for peer duplicates:
128 - [ ] same nick for N>1 URIs
129 - [ ] same URI for N>1 nicks
130 - [ ] Background polling and incremental timeline updates.
131 We can mark which messages have already been printed and print new ones as
132 they come in.
133 REQUIRES: polling
134 - [ ] Polling mode/command, where tt periodically polls peer timelines
135 - [ ] nick tiebreaker(s)
136 - [ ] some sort of a hash of URI?
137 - [ ] angry-purple-tiger kind if thingie?
138 - [ ] P2P nick registration?
139 - [ ] Peers vote by claiming to have seen a nick->uri mapping?
140 The inherent race condition would be a feature, since all user name
141 registrations are races.
142 REQUIRES: blockchain
143 - [ ] stats
144 - [ ] download times per peer
145 - [ ] Support redirects
146 - should permanent redirects update the peer ref somehow?
147 - [ ] optional text wrap
148 - [ ] write
149 - [ ] peer refs set operations (perhaps better done externally?)
150 - [ ] timeline as a result of a query (peer ref set op + filter expressions)
151 - [ ] config files
152 - [ ] highlight mentions
153 - [ ] filter on mentions
154 - [ ] highlight hashtags
155 - [ ] filter on hashtags
156 - [ ] hashtags as channels? initial hashtag special?
157 - [ ] query language
158 - [ ] console logger colors by level ('error)
159 - [ ] file logger ('debug)
160 - [ ] Suport immutable timelines
161 - store individual messages
162 - where?
163 - something like DBM or SQLite - faster
164 - filesystem - transparent, easily published - probably best
165 - [ ] block(chain/tree) of twtxts
166 - distributed twtxt.db
167 - each twtxt.txt is a ledger
168 - peers can verify states of ledgers
169 - peers can publish known nick->url mappings
170 - peers can vote on nick->url mappings
171 - we could break time periods into blocks
172 - how to handle the facts that many(most?) twtxt are unseen by peers
173 - longest X wins?
174
175 Done
176 ----
177 - [x] Support time ranges (i.e. reading the timeline between given time points)
178 - [x] Dedup read-in peers before using them.
179 - [x] Prevent redundant downloads
180 - [x] Check ETag
181 - [x] Check Last-Modified if no ETag was provided
182 - [x] Parse rfc2822 timestamps
183 - [x] caching (use cache by default, unless explicitly asked for update)
184 - [x] value --> cache
185 - [x] value <-- cache
186 REQUIRES: d command
187 - [x] Logger sync before exit.
188 - [x] Implement rfc3339->epoch
189 - [x] Remove dependency on rfc3339-old
190 - [x] remove dependency on http-client
191 - [x] Build executable
192 Implies fix of "collection not found" when executing the built executable
193 outside the source directory:
194
195 collection-path: collection not found
196 collection: "tt"
197 in collection directories:
198 context...:
199 /usr/share/racket/collects/racket/private/collect.rkt:11:53: fail
200 /usr/share/racket/collects/setup/getinfo.rkt:17:0: get-info
201 /usr/share/racket/collects/racket/contract/private/arrow-val-first.rkt:555:3
202 /usr/share/racket/collects/racket/cmdline.rkt:191:51
203 '|#%mzc:p
204
205
206 Cancelled
207 ---------
208 - [~] named timelines/peer-sets
209 REASON: That is basically files of peers, which we already support.
This page took 0.047763 seconds and 3 git commands to generate.