Compare commits

...

576 Commits

Author SHA1 Message Date
2b04374e23 add option to disable browsing of files, make the dialog bigger 2019-10-19 00:53:13 +01:00
383addbc37 implement view comment from browse window 2019-10-19 00:30:03 +01:00
cc39cd7f8e implement downloading from browse window 2019-10-19 00:23:43 +01:00
83665d7524 wip on browse host 2019-10-18 23:55:07 +01:00
94340480b4 wip on browse host 2019-10-18 23:25:26 +01:00
8850d49c63 wip on browse host 2019-10-18 23:16:37 +01:00
f0f9d840f0 wip on browse host 2019-10-18 22:35:17 +01:00
7f4cd4f331 wip on browse host 2019-10-18 21:17:34 +01:00
e6162503f6 wip on browse host 2019-10-18 20:29:39 +01:00
7a5d71dc36 add copy name to clipboard option 2019-10-17 19:01:53 +01:00
6fa39a5e35 turn off logging if there is no config file 2019-10-17 18:39:28 +01:00
c5ae804f61 Implement automatic font sizing; set all font properties on change of font 2019-10-17 18:15:04 +01:00
d7695b448d remove my DS_Store 2019-10-17 05:50:29 +01:00
946d9c8f32 disable sharing of hidden files by default, add option to enable 2019-10-17 05:46:27 +01:00
02441ca1e3 add option to disable searching in comments 2019-10-16 19:57:18 +01:00
5fa21b2360 keep tree expanded on modifications 2019-10-16 14:42:40 +01:00
d4c08f4fe6 only remove from index if no more files have the same comment pt.2 2019-10-16 14:23:12 +01:00
942de287c6 only remove from index if no more files have the same comment 2019-10-16 14:21:50 +01:00
d0299f80c6 search through comments 2019-10-16 14:06:11 +01:00
1227cf9263 Release 0.5.0 2019-10-15 12:38:25 +01:00
a05575485f move things around 2019-10-15 10:40:50 +01:00
f5bccd8126 All shared directories are watched directories. Fix manipulation of tree structure 2019-10-15 08:38:23 +01:00
70fb789abf remove the watched directories table 2019-10-15 04:51:21 +01:00
feb712c253 Move persisting of files on dedicated thread. Introduce an event to forcefully persist files. Do that immediately after unsharing anything 2019-10-15 04:21:40 +01:00
d22b403e2a stop watching multiple directories at once 2019-10-14 23:16:05 +01:00
a24982e0df fix comments for local results 2019-10-14 22:47:52 +01:00
6c26019164 allow switching without restart 2019-10-14 21:40:03 +01:00
965fa79bbf fix count of shared files in tree view mode 2019-10-14 20:57:50 +01:00
60ddb85461 Tree view of the shared files. The count is wrong for some reason 2019-10-14 20:13:25 +01:00
c7284623bc Release 0.4.16 2019-10-13 22:14:33 +01:00
3e7f2aa70a Add a note about DND, automatically watch shared directories 2019-10-13 20:21:28 +01:00
4f436a636c implement drop on MW -> share files/directories 2019-10-13 20:00:08 +01:00
b49dbc30c3 comment already decoded by the time it gets to the gui 2019-10-11 19:01:40 +01:00
c25d314e1c typo 2019-10-11 18:56:46 +01:00
b28587a275 wip on file comments 2019-10-11 18:42:02 +01:00
8b8e5d59be Silence an IllegalArgumentException while sorting downloads table 2019-10-11 11:21:56 +01:00
70bbe1f636 update version 2019-10-10 17:33:07 +01:00
337605dc0f Release 0.4.15 2019-10-10 16:48:10 +01:00
14bdfa6b2e throttle even further - 500/s 2019-10-09 17:34:54 +01:00
ed3f9da773 throttle loading even further, to 1000/sec 2019-10-09 16:46:17 +01:00
251080d08f throttle loading of files to 500/s 2019-10-09 16:34:09 +01:00
f530ab999d operations on multiple selection in shared files table 2019-10-09 03:38:08 +01:00
4133384e48 ability to share multiple files and directories 2019-10-08 21:30:34 +01:00
600fc98868 update TODO 2019-10-07 12:38:26 +01:00
129eeb3b88 JDK needed, not JRE 2019-10-07 12:38:09 +01:00
20b51b78a0 reduce priority of file persister thread 2019-10-07 11:59:51 +01:00
33fe755b60 implement multiple-selection on downloads table 2019-10-07 04:26:35 +01:00
8b0668a134 Rewrite utils into Java, cache the persistable data of shared files to reduce object churn 2019-10-05 22:50:32 +01:00
730d2202fd bundles for linux available now 2019-10-05 18:53:43 +01:00
69906a986d set i2p.dir.base to prevent router creating files in PWD 2019-10-05 15:03:59 +01:00
5bc8fa8633 Preserve selection on refresh #18 2019-10-05 05:13:49 +01:00
7de7c9d8f3 Add 'Clear Hits' button to content control panel #18 2019-10-05 05:03:25 +01:00
e943f6019d disable all GUI unit tests, enable host-cache unit tests. The 'build' target now succeeds 2019-10-05 04:31:11 +01:00
2eec7bec5b fix most core tests 2019-10-05 04:20:14 +01:00
c36110cf76 update readme 2019-10-04 16:41:07 +01:00
abe28517bc Release 0.4.14 2019-10-04 13:00:57 +01:00
15bc4c064d center the button 2019-10-03 21:32:32 +01:00
91d771944b add option for sequential download 2019-10-03 20:45:22 +01:00
e09c456a13 make the download retry interval in seconds, default still 1 minute 2019-10-03 19:31:15 +01:00
d9c1067226 Add Neutral button to search tab, issue #17 2019-10-02 06:02:06 +01:00
eda3e7ad3a Add option to not search extra hop, only considered if connecting only to trusted peers, issue #6 2019-10-02 05:45:46 +01:00
e9798c7eaa remember last rejection and back off from hosts that reject us. Fix return value of retry and hopelessness predicates 2019-10-01 08:34:43 +01:00
66bb4eef5b close outbound establishments on a separate thread 2019-10-01 07:50:29 +01:00
55f260b3f4 update version 2019-09-29 19:21:06 +01:00
32d4c3965e Release 0.4.13 2019-09-29 19:00:20 +01:00
de1534d837 reduce the default host retry interval 2019-09-29 18:45:09 +01:00
7b58e8a88a separate setting for the interval after which a host is considered hopeless 2019-09-29 18:43:39 +01:00
8a03b89985 clean up the filtering logic; allow serialization of hosts that can be retried 2019-09-29 16:49:02 +01:00
1d97374857 track last successful attempt. Only re-attempt hosts if they have ever been successful. Do not serialize hosts considered hopeless 2019-09-29 16:19:19 +01:00
549e8c2d98 Release 0.4.12 2019-09-22 16:55:04 +01:00
b54d24db0d new update server destination 2019-09-22 16:47:35 +01:00
fa12e84345 stronger sig type 2019-09-22 16:23:01 +01:00
6430ff2691 bump i2p libs version 2019-09-22 16:13:12 +01:00
591313c81c point to the pkg project 2019-09-20 21:09:53 +01:00
ce7b6a0c65 change to gasp AA font table, try metal lnf if the others fail 2019-09-16 15:06:45 +01:00
5c4d4c4580 embedded router will not work without reseed certificates, so remove it 2019-09-16 15:04:34 +01:00
4cb864ff9f update version 2019-09-16 15:03:20 +01:00
417675ad07 update dark_trion's hostcache address 2019-07-22 21:48:29 +01:00
9513e5ba3c update todo 2019-07-20 13:15:44 +01:00
85610cf169 add new host-cache 2019-07-15 22:05:09 +01:00
e8322384b8 Release 0.4.11 2019-07-15 14:28:21 +01:00
179279ed30 Merge branch 'master' of https://github.com/zlatinb/muwire 2019-07-14 06:19:18 +01:00
ae79f0fded Clear Done button, thanks to Aegon 2019-07-14 06:19:05 +01:00
ed878b3762 Merge pull request #11 from zetok/readme
Add info about the default I2CP port to README.md
2019-07-12 09:17:24 +01:00
623cca0ef2 Add info about the default I2CP port to README.md
Also:
 - improved formatting a bit
 - removed trailing whitespaces
2019-07-12 07:28:12 +01:00
eaa883c3ba count duplicate files towards total in Uploads panel 2019-07-11 23:28:12 +01:00
7ae8076865 disable webui for now 2019-07-11 22:29:47 +01:00
b1aa92661c do not pack200 some jars because of duplicate entries 2019-07-11 20:42:24 +01:00
9ed94c8376 do not include tomcat runtime 2019-07-11 20:41:57 +01:00
fa6aea1abe attempt to produce an I2P plugin 2019-07-11 19:49:04 +01:00
0de84e704b hello webui 2019-07-11 18:34:27 +01:00
a767dda044 add empty grails project for a web ui 2019-07-11 17:56:42 +01:00
56e9235d7b avoid FS call to get file length 2019-07-11 15:28:25 +01:00
2fba9a74ce persist files.json every minute 2019-07-11 14:32:57 +01:00
2bb6826906 canonicalize all files before they enter FileManager and do not look for absolute path on persistence 2019-07-11 14:32:12 +01:00
9f339629a9 remove unnecessary canonicalization 2019-07-11 11:58:20 +01:00
58d4207f94 Release 0.4.10 2019-07-11 05:09:05 +01:00
32577a28dc some download stats 2019-07-11 05:00:25 +01:00
f7b43304d4 use split pane in downloads tab as well 2019-07-11 03:57:49 +01:00
dcbe09886d split pane instead of gridlayout 2019-07-11 03:48:05 +01:00
5a54b2dcda shift focus to search pane on search 2019-07-10 22:33:21 +01:00
581293b24f column sizes 2019-07-10 22:27:07 +01:00
cd072b9f76 enable/disable download button correctly 2019-07-10 22:23:20 +01:00
6b74fc5956 fix trust/distrust buttons 2019-07-10 22:17:32 +01:00
3de2f872bb show results per sender 2019-07-10 22:08:18 +01:00
fcde917d08 fix context menu and double-click 2019-07-10 21:26:13 +01:00
4ded065010 move buttons onto search result tab 2019-07-10 21:23:00 +01:00
18a1c7091a move downloads to their own pane 2019-07-10 20:54:45 +01:00
46aee19f80 disable the button of the currently open pane 2019-07-10 20:37:09 +01:00
92dd7064c6 Release 0.4.9 2019-07-10 12:02:36 +01:00
b2e4dda677 rearrange tables 2019-07-10 11:55:06 +01:00
e77a2c8961 clear hits table on refresh 2019-07-09 21:42:52 +01:00
ee2fd2ef68 single hit per search uuid 2019-07-09 21:22:31 +01:00
3f95d2bf1d trust and distrust buttons 2019-07-09 21:15:08 +01:00
1390983732 populate hits table 2019-07-09 21:05:49 +01:00
ce660cefe9 deleting of rules 2019-07-09 20:50:07 +01:00
72b81eb886 fix matching 2019-07-09 20:27:28 +01:00
57d593a68a persist watched keywords and regexes 2019-07-09 20:11:29 +01:00
39a81a3376 hook up rule creation 2019-07-09 19:53:40 +01:00
fd0bf17c24 add ability to unregister event listeners 2019-07-09 19:53:08 +01:00
ac12bff69b wip on content control panel ui 2019-07-09 19:20:06 +01:00
feef773bac hook up content control panel to rest of UI 2019-07-09 17:55:36 +01:00
239d8f12a7 wip on core side of content management 2019-07-09 17:13:09 +01:00
8bbc61a7cb add settings for watched keywords and regexes 2019-07-09 16:50:51 +01:00
7f31c4477f matchers for keywords 2019-07-09 11:47:55 +01:00
6bad67c1bf Release 0.4.8 2019-07-08 18:30:19 +01:00
c76e6dc99f Merge pull request #9 from zetok/backticks
Replace deprecated backticks with $() for command substitution
2019-07-08 08:24:37 +01:00
acf9db0db3 Replace deprecated backticks with $() for command substitution
Although it's a Bash FAQ, the point also applies to POSIX-compatible
shells: https://mywiki.wooledge.org/BashFAQ/082
2019-07-08 06:29:33 +01:00
69b4f0b547 Add trust/distrust action from monitor window. Thanks Aegon 2019-07-07 15:31:21 +01:00
80e165b505 fix download size in renderer, thanks Aegon 2019-07-07 11:17:56 +01:00
bcce55b873 fix integer overflow 2019-07-07 10:58:39 +01:00
d5c92560db fix integer overflow 2019-07-07 10:56:14 +01:00
f827c1c9bf Home directories for different OSes 2019-07-07 09:14:13 +01:00
88c5f1a02d Add GPG key link 2019-07-07 09:04:52 +01:00
d8e44f5f39 kill other workers if download is finished 2019-07-06 22:21:13 +01:00
72ff47ffe5 use custom renderer and comparator for download progress 2019-07-06 12:53:49 +01:00
066ee2c96d wrong list 2019-07-06 11:28:04 +01:00
0a8016dea7 enable stealing of pieces from other download workers 2019-07-06 11:26:18 +01:00
db36367b11 avoid AIOOBE 2019-07-06 11:00:31 +01:00
b6c9ccb7f6 return up to 9 X-Alts 2019-07-06 09:03:27 +01:00
a9dc636bce write pieces every time a downloader finishes 2019-07-06 00:52:49 +01:00
3cc0574d11 working partial pieces 2019-07-06 00:47:45 +01:00
20fab9b16d work on partial piece persistence 2019-07-06 00:17:46 +01:00
4015818323 center buttons 2019-07-05 17:15:50 +01:00
f569d45c8c reallign tables 2019-07-05 17:07:14 +01:00
3773647869 remove diff rejects 2019-07-05 16:24:57 +01:00
29cdbf018c remove trailing spaces 2019-07-05 16:24:19 +01:00
94bb7022eb tabs -> spaces 2019-07-05 16:22:34 +01:00
39808302df Show which file is hashing, thanks to Aegon 2019-07-05 16:20:03 +01:00
2d22f9c39e override router log manager 2019-07-05 12:32:23 +01:00
ee8f80bab6 up i2p to 0.9.41 2019-07-05 12:26:48 +01:00
3e6242e583 break when matching search is found 2019-07-04 18:12:22 +01:00
41181616ee compact display of incoming searches, thanks Aegon 2019-07-04 17:59:53 +01:00
eb2530ca32 fix sorting of download/upload tables thanks Aegon 2019-07-04 17:58:06 +01:00
b5233780ef Release 0.4.7 2019-07-03 20:36:54 +01:00
78753d7538 shut down cache client on shutdown 2019-07-03 19:50:00 +01:00
4740e8b4f5 log hostcache stats 2019-07-03 19:46:24 +01:00
ad5b00fc90 prettier progress status thanks to Aegon 2019-07-03 12:50:24 +01:00
d6c6880848 update readme 2019-07-03 07:27:48 +01:00
4f948c1b9e Release 0.4.6 2019-07-03 07:11:59 +01:00
2b68c24f9c use switch 2019-07-03 07:01:27 +01:00
bcdf0422db update for embedded router 2019-07-03 07:00:04 +01:00
f6434b478d remove FAQ 2019-07-03 06:56:20 +01:00
e979fdd26f update list view tables 2019-07-03 06:51:21 +01:00
e6bfcaaab9 size columns, center integers 2019-07-03 06:11:02 +01:00
9780108e8a disable trust buttons on action 2019-07-03 06:00:09 +01:00
697c7d2d6d enable/disable trust panel buttons 2019-07-03 05:41:17 +01:00
887d10c8bf move buttons around 2019-07-03 05:30:39 +01:00
ef6b8fe458 add a state for failed updates 2019-07-03 05:12:00 +01:00
20ab55d763 update todo 2019-07-03 00:23:21 +01:00
eda58c9e0d Merge branch 'trust-lists' 2019-07-03 00:04:50 +01:00
fb42fc0e35 add trust panel in options 2019-07-03 00:04:08 +01:00
35cabc47ad hook up trust and distrust buttons 2019-07-02 23:44:43 +01:00
5be97d0404 show something when review button is pressed 2019-07-02 22:51:04 +01:00
82b0fa253c enable update and unsubscribe buttons 2019-07-02 22:26:29 +01:00
011a4d5766 prevent duplicate updates and zero timestamps 2019-07-02 22:02:15 +01:00
5cd1ca88c1 do actual updating on in a threadpool 2019-07-02 21:34:29 +01:00
44c880d911 store subscriber list upon subscription 2019-07-02 20:53:29 +01:00
14857cb5ad swallow headers in trust list response 2019-07-02 20:35:50 +01:00
7daf981f1a fix NPE 2019-07-02 20:24:51 +01:00
b99bc0ea32 fix 2019-07-02 20:12:22 +01:00
1ccf6fbdfa participating bandwidth grid cell 2019-07-02 15:35:42 +01:00
5711979272 Release 0.4.5 2019-07-02 15:01:51 +01:00
9a5e2b1fa3 speed smoothing patch courtesy of Aegon 2019-07-02 14:46:40 +01:00
cafc5f582e subscribe button 2019-07-02 14:35:52 +01:00
a89b423dfc simpler speed calculation 2019-07-02 13:05:06 +01:00
79e8438941 always assume interval is at least 1 second 2019-07-02 12:49:00 +01:00
19c2c46491 prevent NPE on startup 2019-07-02 12:27:15 +01:00
78f1d54b69 add new host cache 2019-07-02 10:04:24 +01:00
9461649ed4 change sig type 2019-07-02 09:49:13 +01:00
8573ab2850 work on trust list UI 2019-07-02 09:35:21 +01:00
8b3d752727 add status to the trust list object 2019-07-02 08:59:30 +01:00
7c54bd8966 start work on sharing of trust lists 2019-07-01 23:33:39 +01:00
5d0fcb7027 start work on sharing of trust lists 2019-07-01 23:15:13 +01:00
3ec9654d3c start work on sharing of trust lists 2019-07-01 22:05:43 +01:00
7c8d64b462 start work on sharing of trust lists 2019-07-01 21:40:07 +01:00
31e30e3d31 excludePeerCaps 2019-07-01 18:31:58 +01:00
8caf6e99b0 show floodfill status 2019-07-01 13:18:31 +01:00
624155debd update todo 2019-07-01 06:17:46 +01:00
4468a262ae actually add timestamps to the list 2019-06-30 21:40:18 +01:00
1780901cb0 throttle connections to 10 searches per second 2019-06-30 21:22:49 +01:00
d830d9261f canonicalize before checking if file is already shared 2019-06-30 17:12:25 +01:00
f5e1833a48 Release 0.4.4 2019-06-30 15:55:23 +01:00
9feb2a3c8f fix NPE on update search 2019-06-30 15:11:13 +01:00
b27665f5dd Merge pull request #5 from 0rC0/patch-1
code markdown for commands and paths in README.md
2019-06-30 13:45:36 +01:00
4465aa4134 code markdown for commands and paths in README.md
... instead of quotes
2019-06-30 14:27:33 +02:00
ad766ac748 try to unmap files when done 2019-06-30 13:20:26 +01:00
d9e7d67d86 javadoc 2019-06-30 12:51:34 +01:00
3fefbc94b3 utility to decode personas 2019-06-30 10:41:42 +01:00
21034209a5 add ? to split pattern 2019-06-30 06:29:46 +01:00
7c04c0f83c unshare individual file 2019-06-30 05:44:08 +01:00
f5293d65dd update todo 2019-06-29 16:00:49 +01:00
8191bf6066 Release 0.4.3 2019-06-29 10:44:15 +01:00
29b6bfd463 support different update types 2019-06-29 10:31:27 +01:00
2f3d23bc34 fixes 2019-06-29 10:12:50 +01:00
98dd80c4b8 fix 2019-06-29 10:03:58 +01:00
d9edb2e128 ability to download updates automatically 2019-06-29 09:23:27 +01:00
de04b40b86 Release 0.4.2 2019-06-29 07:17:45 +01:00
7206a3d926 more i2p metrics 2019-06-29 07:07:48 +01:00
98b98d8938 I2P status panel 2019-06-29 06:33:53 +01:00
294b8fcc2f MW status window 2019-06-29 05:58:46 +01:00
32f601a1b1 add ability to change i2p port 2019-06-28 23:53:22 +01:00
8e3a398080 Release 0.4.1 2019-06-28 16:42:37 +01:00
720b9688b4 Add unsharing of directories 2019-06-28 16:08:04 +01:00
e3066161c5 do not perform filesystem operations in the UI thread 2019-06-27 23:29:48 +01:00
a9aa3a524f disable i2cp interface on embedded router 2019-06-27 09:56:18 +01:00
92848e818a on empty properties source from java props 2019-06-27 03:47:56 +01:00
a7aa3008c0 bandwidth settings 2019-06-27 00:42:27 +01:00
485325e824 embedded router except for logs 2019-06-26 23:25:22 +01:00
0df2a0e039 start work on embedded router 2019-06-26 22:39:25 +01:00
fb7b4466c2 update readme 2019-06-26 22:05:04 +01:00
53105245f4 Release 0.4.0 2019-06-26 21:59:28 +01:00
b68eab91e0 Release 0.3.10 2019-06-25 22:39:43 +01:00
f72cf91462 wait for files to be loaded before sharing watched directories 2019-06-25 22:24:32 +01:00
a655c4ef50 add toString 2019-06-25 22:24:15 +01:00
5d46e9b796 switch 4_ to INFO 2019-06-25 21:50:15 +01:00
642e6e67b3 wait for all files loaded before watching dirs 2019-06-25 21:43:07 +01:00
2b6b86f903 show how many pieces the remote side already has 2019-06-25 17:44:05 +01:00
f2706a4426 clarify upload column 2019-06-25 17:24:42 +01:00
1af75413aa update for brackets 2019-06-25 16:27:02 +01:00
adc4077b1a filter asterix 2019-06-25 15:54:30 +01:00
01f4e2453b limit search length to 128 characters 2019-06-25 15:53:53 +01:00
61267374dd move button around 2019-06-25 08:10:20 +01:00
970f814685 make mesh expiration configurable 2019-06-25 08:04:57 +01:00
4fd9fc1991 add option to change download location 2019-06-25 07:59:30 +01:00
26207ffd1b add constructor 2019-06-25 07:53:24 +01:00
2614cfbe5f make host clear interval configurable 2019-06-25 07:41:20 +01:00
f11d461ec0 make download sequential ratio a property 2019-06-25 07:34:26 +01:00
b2eb2d2755 show hidden files in file choosers 2019-06-24 23:09:20 +01:00
ea46a54f19 enable AA by default 2019-06-24 22:55:26 +01:00
627add45ad remove griffon icons 2019-06-24 22:51:43 +01:00
d364855459 logo 2019-06-24 22:13:03 +01:00
14ee35e77a Release 0.3.9 2019-06-24 18:39:59 +01:00
8773eb4ee0 fix piece size calculation 2019-06-24 18:29:00 +01:00
51425bbfd9 Release 0.3.8 2019-06-24 07:38:39 +01:00
6a4879bc0b always save pieces 2019-06-24 07:29:49 +01:00
e7fe56439b persist X-Have, fix flickering bug 2019-06-24 07:20:53 +01:00
2886feab4a do not modify the set of available pieces 2019-06-23 17:08:07 +01:00
fb91194026 even noisier log 2019-06-23 16:39:38 +01:00
4527478b0d even noisier 4_ 2019-06-23 12:42:44 +01:00
b0062f146e log roots of download exceptions 2019-06-23 12:10:19 +01:00
bf16561170 Release 0.3.7 2019-06-23 11:25:19 +01:00
3b23dc29c4 if all sources are expired forget mesh 2019-06-23 11:21:39 +01:00
c0645b670e no split on list 2019-06-23 10:50:19 +01:00
30613fe530 update todo 2019-06-23 09:56:51 +01:00
e7822f6edc expire sources, fix compilation 2019-06-23 09:43:56 +01:00
7e5c9ba115 actually save 2019-06-23 09:41:20 +01:00
647fa3a481 persist download mesh 2019-06-23 09:38:42 +01:00
538eca9297 Release 0.3.6 2019-06-23 08:54:28 +01:00
e73a23d4a4 fix space not showing 2019-06-23 08:44:51 +01:00
76e41a0383 fix restoring paused downloads 2019-06-23 08:42:45 +01:00
7045927666 hide monitor options from gui 2019-06-23 08:02:28 +01:00
5fb3086b42 update faq 2019-06-23 07:52:01 +01:00
2de18227c1 persist pause state 2019-06-23 07:48:49 +01:00
bd12a1de3d pause/resume downloads 2019-06-23 06:59:52 +01:00
a3a91050c8 update todo 2019-06-23 01:50:30 +01:00
6c1cc28e49 shutdown if connection to I2P router is lost 2019-06-22 17:32:12 +01:00
b6e5b54f05 do not show monitor by default 2019-06-22 14:51:26 +01:00
a6e559ec67 change some defaults 2019-06-22 06:54:49 +01:00
f11badb824 update todo 2019-06-21 22:43:46 +01:00
44da44ff6f Release 0.3.5 2019-06-21 22:35:54 +01:00
aae3fc29ca add logging.properties with various degree of noisiness 2019-06-21 22:28:57 +01:00
c30aa19d8b Merge branch 'download-mesh' 2019-06-21 22:26:17 +01:00
c79e8712d0 correctly determine if uploader has requested piece 2019-06-21 20:36:33 +01:00
ed12d78a48 clear pieces on cancel 2019-06-21 17:22:55 +01:00
d27872cc8b investigate StringIndexOutOfBounds 2019-06-21 16:29:52 +01:00
f794c39760 personas not destinations 2019-06-21 16:15:35 +01:00
2be9c425f7 compute which pieces are requested 2019-06-21 16:09:57 +01:00
ab5fea9216 416 if piece not downloaded 2019-06-21 16:03:20 +01:00
d1c8328080 do not send alts if there aren't any 2019-06-21 15:39:00 +01:00
89e761f53b write personas on the wire part1 2019-06-21 15:26:18 +01:00
40410eba63 fix constructor 2019-06-21 14:57:53 +01:00
85466a8e80 fix npe 2019-06-21 14:45:14 +01:00
c210af7870 source partial uploads from incompletes file 2019-06-21 14:39:20 +01:00
38ff49d28f downloaders get pieces from mesh manager 2019-06-21 14:17:10 +01:00
710f9f52a8 send X-Have and X-Alts from uploader 2019-06-21 13:58:21 +01:00
1b6eda5a40 skeleton of mesh manager 2019-06-21 13:34:00 +01:00
1ee9ccf098 parse X-Have on uploader side 2019-06-21 12:55:25 +01:00
0f07562de3 pass new sources to active downloaders 2019-06-21 12:39:16 +01:00
6eb1aa07f5 key downloaders by infohash 2019-06-21 12:29:32 +01:00
05b02834af parse X-Alt 2019-06-21 12:25:04 +01:00
56125f6df8 refactor X-Have decoding logic 2019-06-21 09:32:10 +01:00
8f9996848b send X-Have from downloader too 2019-06-21 09:25:28 +01:00
dd655ed60f test for re-requesting available pieces 2019-06-21 09:12:42 +01:00
8923c6ff7d exclude local results by default 2019-06-21 08:15:20 +01:00
807ab22f8e test parsing of X-Have 2019-06-21 06:43:48 +01:00
a26ad229ee more tests 2019-06-21 05:56:42 +01:00
5504dd2251 tighten conditions 2019-06-21 05:45:11 +01:00
f9777d29f4 get existing tests to pass 2019-06-21 05:41:49 +01:00
b23226e8c6 wip on parsing X-Have from uploader 2019-06-21 05:30:56 +01:00
1249ad29e0 claim pieces from list of available pieces 2019-06-21 04:42:02 +01:00
7bb5e5b632 Release 0.3.4 2019-06-20 21:07:50 +01:00
b2e43f9765 update split pattern and add unit test 2019-06-20 21:06:39 +01:00
2aa73c203a Release 0.3.3 2019-06-20 18:08:02 +01:00
18d2b56563 fix indexing 2019-06-20 17:57:36 +01:00
a455b4ad6e redirect exceptions in result sender to log 2019-06-20 17:22:59 +01:00
761b683a81 Release 0.3.2 2019-06-20 16:04:46 +01:00
1d41bcd825 prevent empty tokens in search index 2019-06-20 16:02:48 +01:00
f1ac038b55 update split pattern 2019-06-20 15:47:00 +01:00
396c636e42 prevent empty search terms 2019-06-20 15:29:27 +01:00
e32c858e90 update README with quick FAQ 2019-06-20 14:18:37 +01:00
821555f3f1 Release 0.3.1 2019-06-20 14:02:22 +01:00
089ab4f0d9 do not retry downloads if core is shut(ting) down 2019-06-20 13:40:04 +01:00
948b6292fe add shutdown hook to shutdown core on SIGTERM 2019-06-20 13:29:15 +01:00
4e2a530a13 Release 0.3.0 2019-06-20 07:04:45 +01:00
03646e2b90 Document download mesh 2019-06-20 01:19:15 +01:00
3dce228bbb always clean 2019-06-19 22:42:05 +01:00
15a49ad550 show git revision in title 2019-06-19 22:36:22 +01:00
3d91c0f4c7 increase default tunnel count 2019-06-19 22:24:04 +01:00
2825a8d9a4 Release 0.2.10 2019-06-19 17:18:30 +01:00
8dcce9bda6 Merge branch 'connection-logic' 2019-06-19 17:16:13 +01:00
d8d3e2cd58 update tests 2019-06-19 15:54:35 +01:00
51d5dbe47e Prevent rare exception on changing trust when result tabs are open 2019-06-19 12:23:18 +01:00
84cee0aa43 retry failed hosts after one hour 2019-06-19 08:35:31 +01:00
162844787f explicitly set java versions 2019-06-19 02:11:00 +01:00
d8a2b59055 tool to print out contents of files.json 2019-06-18 22:08:33 +01:00
67a0939de4 Release 0.2.9 2019-06-18 20:15:53 +01:00
37ca922a2c reduce default retry interval 2019-06-18 20:07:20 +01:00
1d6781819b ignore CWSE if shutting down 2019-06-18 19:44:22 +01:00
64d45da94a show version on title 2019-06-18 18:57:44 +01:00
59c84d8a5e Release 0.2.8 2019-06-18 17:48:07 +01:00
8b55021a4b fix 2019-06-18 17:23:18 +01:00
8bd3ebfaf5 timestamp entries 2019-06-18 17:17:03 +01:00
526ec45da3 Release 0.2.7 2019-06-18 15:53:54 +01:00
deb7c0b4b0 exclude files present locally from search results 2019-06-18 15:45:27 +01:00
e85a0c7b2c Merge branch 'source-tracking' 2019-06-18 12:22:46 +01:00
7b021a47eb fix detection of moving files into a watched dir on Linux 2019-06-18 12:20:10 +01:00
0c21d4d6c1 implement source tracking 2019-06-18 11:34:19 +01:00
8e9f79d404 update TODO 2019-06-18 09:43:22 +01:00
bf33a6ff61 Release 0.2.6 2019-06-18 09:07:27 +01:00
19c8d84afd Merge branch 'file-monitor' 2019-06-18 09:01:09 +01:00
6a40787863 fine log 2019-06-18 05:46:16 +01:00
c698cbd737 register created directories recursively 2019-06-18 05:43:41 +01:00
9c049b9301 special case mac 2019-06-18 05:26:41 +01:00
84a9bb9482 watch deleting of files 2019-06-18 04:15:44 +01:00
0c1008d6b3 update readme 2019-06-18 04:01:04 +01:00
c46f1b1ccd delay processing of files until after 1 second after the last MODIFY event 2019-06-17 23:08:16 +01:00
7e2c4d48c6 wait for UI to load before loading files 2019-06-17 22:34:19 +01:00
71a919e62b shut down watcher before connection manager 2019-06-17 22:15:50 +01:00
d5eb65bdc2 do not print stacktrace on clean shutdown 2019-06-17 21:58:44 +01:00
aef7533bd5 make watcher thread daemon 2019-06-17 19:58:57 +01:00
e78016ead4 ui panel for managing watched directories 2019-06-17 19:23:04 +01:00
52ced669dd basic watching of directories 2019-06-17 16:36:12 +01:00
b52fb38ede fix disabling of buttons on search tab close 2019-06-17 13:43:11 +01:00
5dcef3ca05 Release 0.2.5 2019-06-17 12:53:58 +01:00
eaa0e46ce5 Merge branch 'separate-incomplete-files' 2019-06-17 12:45:51 +01:00
c4f48c02b6 delete incomplete file on cancel 2019-06-17 12:33:44 +01:00
5c16335969 if no row is selected do not enable buttons 2019-06-17 12:26:28 +01:00
546eb4e9d3 only allow one download per infohash from gui 2019-06-17 11:25:21 +01:00
c3d9e852ba separate incomplete files 2019-06-17 07:49:06 +01:00
0db7077a45 Release 0.2.4 2019-06-17 03:22:52 +01:00
614ecc85fe new piece selection logic to avoid high cpu bug 2019-06-17 03:21:37 +01:00
af66a79376 fix sorting by progress 2019-06-17 00:56:16 +01:00
465171c81d prevent multiple identical shared files 2019-06-17 00:38:05 +01:00
b507361c58 close the file before marking pieces complete 2019-06-16 23:45:23 +01:00
4d001ae74b thread-safe access to the pieces file 2019-06-16 22:56:09 +01:00
36a6e2769f Release 0.2.3 2019-06-16 19:05:12 +01:00
69eeb7d77a fix 2019-06-16 18:58:52 +01:00
551982b72a batch results sent to the GUI to prevent freeze 2019-06-16 18:51:07 +01:00
8d808f0b8f Release 0.2.2 2019-06-16 13:30:11 +01:00
7833a83c87 mark hash queries for V2 results 2019-06-16 13:17:32 +01:00
3160c1a8f3 fix for silent uploader exceptions 2019-06-16 13:01:14 +01:00
e295aa67d5 proper log statement 2019-06-16 10:59:11 +01:00
a9f5625dc3 fix popup menu on failed downloads 2019-06-16 10:50:21 +01:00
cc0af5b9ed add context menu to downloads table 2019-06-16 10:29:28 +01:00
041fc3bef3 Release 0.2.1 2019-06-16 09:37:53 +01:00
03c3b1ebf1 fix copying of hash if search results are sorted 2019-06-16 09:30:52 +01:00
aece390daa right-click menu on the search results tab 2019-06-16 09:17:17 +01:00
cf63be68e8 copy search to clipboard 2019-06-16 08:38:47 +01:00
88ece4dc23 add option to show search hashes in monitor 2019-06-16 08:29:03 +01:00
13767d58f2 detect if a query is hash, get rid of radio buttons 2019-06-16 08:09:51 +01:00
05a1ccd3d8 update todo 2019-06-16 07:31:01 +01:00
6807c14a5f add copy hash to clipboard 2019-06-16 07:23:22 +01:00
684be0c50e start of work on directory watcher 2019-06-16 07:03:16 +01:00
6655c262c6 more todo items 2019-06-16 07:01:50 +01:00
b1ccd55030 more todo items 2019-06-16 06:26:03 +01:00
a3becd0f7e update TODO 2019-06-16 06:19:28 +01:00
af2f3e0ebf in/out direction done 2019-06-16 05:56:56 +01:00
e2b7ffa1db direction in monitor tab 2019-06-16 05:52:23 +01:00
0e0176acfc add web UI to TODO list 2019-06-16 05:35:05 +01:00
7f09bb079c Beginnings of a TODO list 2019-06-16 05:28:42 +01:00
77e48b01bb Release 0.2.0 2019-06-15 21:10:11 +01:00
12db6857c1 disable unshare files popup until implemented 2019-06-15 12:12:08 +01:00
acd67733a5 sort the downloads table on updates 2019-06-15 12:08:29 +01:00
8d3ce7aa8e use the same sorted row selection logic in downloads table 2019-06-15 09:57:12 +01:00
0eb5870e9b Release 0.1.13 2019-06-15 09:19:19 +01:00
051efbfaba prevent empty searches 2019-06-15 09:11:42 +01:00
6b38d7bffb fix sorting bug try 2 2019-06-15 08:58:51 +01:00
5778d537ce Release 0.1.12 2019-06-15 08:39:19 +01:00
93664a7985 update readme 2019-06-15 08:37:29 +01:00
edd58e0c90 allow cancelling of downloads while hashlist is being fetched 2019-06-15 08:35:23 +01:00
9ac52b61dc sort results table on update 2019-06-15 08:33:22 +01:00
0a4b9c7029 shut down connection manager last 2019-06-15 08:20:10 +01:00
87b366a205 add ability to cancel failed downloads 2019-06-14 22:49:56 +01:00
040248560a Release 0.1.11 2019-06-14 22:26:28 +01:00
77caaf83de reset instead of close 2019-06-14 22:08:25 +01:00
cc5ece5103 do not throw exception on shutdown 2019-06-14 21:36:50 +01:00
db7e21e343 close connections in parallel, more shutdown fixes 2019-06-14 21:25:22 +01:00
a388eaec1d shutdown all connections on shutdown 2019-06-14 20:53:54 +01:00
8ff39072c7 download file on double-clicking a result 2019-06-14 20:42:26 +01:00
55d2ac9b24 delete partial files and pieces file on cancel 2019-06-14 20:27:14 +01:00
6ebe492fd8 if nothing is enabled cancel and retry buttons are disabled 2019-06-14 18:37:18 +01:00
165cd542ec work around not having a selected row while cancelling a download 2019-06-14 18:28:00 +01:00
5ca0c8b00d wip on unshare selected files popup menu 2019-06-14 18:08:56 +01:00
b6a38e3f23 revert to default lnf if the desired one fails 2019-06-14 18:01:14 +01:00
34d9165bd5 Release 0.1.10 2019-06-14 16:43:28 +01:00
2e52dd5c49 fix overwriting of custom nickname 2019-06-14 16:20:21 +01:00
2a315dd734 add option to exclude local results from searches 2019-06-14 14:48:01 +01:00
6b661b99c5 fix sorting by size in shared files table 2019-06-14 13:47:35 +01:00
5dacd60bbb hook up cleaning up of cancelled/finished downloads 2019-06-14 13:11:20 +01:00
f8f7cfe836 UI options panel 2019-06-14 12:51:27 +01:00
0b4f261bc1 ability to not show monitor panel 2019-06-14 12:21:14 +01:00
042d67d784 fix selection of size column 2019-06-14 11:46:31 +01:00
800df88f14 proper sorting by size 2019-06-14 11:10:19 +01:00
4d1eac50a0 update readme for sorting bug 2019-06-14 10:39:58 +01:00
c48df7f14b Release 0.1.9 2019-06-13 22:57:08 +01:00
9d04148001 remember loaded downloads from previous sessions 2019-06-13 22:53:23 +01:00
bb4d522572 Release 0.1.8 2019-06-13 15:27:06 +01:00
8052501e52 increase persistence interval to 15 seconds 2019-06-13 15:25:30 +01:00
66cc6d8ab7 reduce piece size by factor of 8 2019-06-13 15:24:26 +01:00
a45e57f5ec Release 0.1.7 2019-06-13 10:28:44 +01:00
7d8ca55d87 fix emiting of download finished event 2019-06-13 10:27:18 +01:00
de22f3c6b9 use metal lnf on java 9 or newer 2019-06-13 05:02:11 +01:00
3b0eb5678d update wire protocol 2019-06-12 23:46:48 +01:00
5a1f32e40b Release 0.1.6 2019-06-12 22:42:34 +01:00
ca3f2513e1 sync persisting of hashlist or hashroot for active downloads 2019-06-12 22:39:00 +01:00
658d9cf5a8 serialize downloads that do not have a hashlist 2019-06-12 22:22:20 +01:00
e389090b7e download side of oob hashlist 2019-06-12 22:13:16 +01:00
04ceaba514 do not persist downloaders until they have a hashlist 2019-06-12 21:02:01 +01:00
6a01d97a8d enable oob infohash in queries; send V2 search results 2019-06-12 20:55:13 +01:00
747663e1dc fix pieece size of shared downloaded files 2019-06-12 18:22:53 +01:00
e426b3ccbd refactoring to enable hashlist uploads 2019-06-12 17:33:43 +01:00
5172e19627 font-ize more elements 2019-06-12 16:34:24 +01:00
e826cfd8d5 start work on ability to configure font 2019-06-12 16:26:40 +01:00
51004f6fe9 wip on adding UI options 2019-06-11 08:04:26 +01:00
08bb2b614d load some gui props from a separate config file 2019-06-11 02:17:58 +01:00
d0e5d0ce8a set default i2cp options if none present 2019-06-10 08:55:44 +01:00
9e05802d1b Merge pull request #4 from mikalv/master
Fixes i2cp bug while connecting to remote router
2019-06-10 08:48:27 +01:00
fb4f56eec9 Remove debug message 2019-06-10 09:40:32 +02:00
be2083d430 Fixes i2cp bug while connecting to remote router 2019-06-10 09:39:46 +02:00
af6275d0a3 prevent Cli from hanging if there are no shared files 2019-06-10 07:04:01 +01:00
5269815329 update readme 2019-06-10 04:49:09 +01:00
bd21cf65ea Release 0.1.5 2019-06-09 20:37:39 +01:00
dea592eb27 do not resume cancelled downloads on restart 2019-06-09 20:36:14 +01:00
c81f963e0a Release 0.1.4 2019-06-09 17:37:10 +01:00
dc6b1199f3 implement resume across restart 2019-06-09 17:35:32 +01:00
42621a2dfb wip on persisting downloads between restarts 2019-06-09 16:26:00 +01:00
a7125963a7 DownloadManager listens to events, not FileManager 2019-06-09 16:19:35 +01:00
f39d7f4fa8 emit an event when the UI loads 2019-06-09 15:44:06 +01:00
b88334f19a Release 0.1.3 for sorting fixes 2019-06-08 17:57:36 +01:00
81e186ad1f fix sorting by download status and trust, fix events on downloads table 2019-06-08 17:55:39 +01:00
33a45c3835 fix buttons when tables are sorted 2019-06-08 17:09:44 +01:00
32b7867e44 Release 0.1.2 for search index test 2019-06-08 13:09:28 +01:00
5b313276f4 fix tests broken by piece size change 2019-06-08 13:08:20 +01:00
abba4cc6fa fix a bug where multi-term search modifies the index 2019-06-08 12:55:47 +01:00
15b4804968 update wire protocol with originator and oobHashlist fields 2019-06-08 12:40:38 +01:00
942a01a501 forgot to commit 2019-06-08 09:33:16 +01:00
502a8d91da print only the root 2019-06-08 09:30:01 +01:00
5414e8679b update readme 2019-06-08 09:07:13 +01:00
14e42dd7c2 correct element 2019-06-08 08:46:28 +01:00
1299fb2512 Release 0.1.1 for fixes and reduced piece size 2019-06-08 08:04:35 +01:00
9bafdfe0b1 reduce piece size 2019-06-08 07:57:36 +01:00
36eb632756 do not set the flag until it is implemented 2019-06-08 07:53:33 +01:00
83ee620402 sort by columns 2019-06-08 07:45:07 +01:00
3fe40d317d update readme for custom host:port 2019-06-08 07:28:23 +01:00
e9703a2652 support for custom i2cp host:port 2019-06-08 07:23:14 +01:00
a3fe89851f OS-specific home dir 2019-06-08 07:10:24 +01:00
b9ea0128cd add oobInfohash flag, filter results by that flag 2019-06-08 02:44:49 +01:00
53c6db4ec8 de-hardcode piece sizes in results 2019-06-08 01:48:07 +01:00
60776829b9 fix disabling sharing of downloaded files 2019-06-08 01:35:03 +01:00
b5cb31c23d proposed infohash upgrade document 2019-06-08 01:04:56 +01:00
5052c0c993 note about downloads in progress 2019-06-07 21:52:38 +01:00
06de007866 update readme 2019-06-07 21:22:49 +01:00
7c8a0c9ad9 update readme for 0.1.0 2019-06-07 19:24:13 +01:00
cda81a89a2 Release 0.1.0 2019-06-07 18:39:39 +01:00
483773422c fix remaining tests 2019-06-07 18:23:16 +01:00
1e1e6d0bb0 fix test 2019-06-07 18:17:16 +01:00
668d6e087d fix test 2019-06-07 18:15:03 +01:00
49af412b96 status update and auto-retry 2019-06-07 16:13:35 +01:00
d5513021ed Release 0.0.14 for split search 2019-06-07 15:00:16 +01:00
c3154cf717 stray println 2019-06-07 14:58:03 +01:00
114940c4c1 fix searches with spaces 2019-06-07 14:51:09 +01:00
d4336e9b5d outbound nickname 2019-06-07 14:24:45 +01:00
2c1d5508ed outbound nickname 2019-06-07 14:21:03 +01:00
1cebf6c7bd cli downloader 2019-06-07 14:02:10 +01:00
e12924a207 shadow jar for cli 2019-06-07 14:01:28 +01:00
f3b11895e4 utility for hashing files 2019-06-07 12:10:18 +01:00
1e084820fb log tweak 2019-06-07 11:55:17 +01:00
2198b4846d change wording 2019-06-07 11:43:02 +01:00
a5d442d320 Release 0.0.13 for keyword search fix 2019-06-07 06:37:23 +01:00
3f9ee887d6 prevent NPE in toString 2019-06-07 06:31:29 +01:00
4a9e6d3b6b prevent npe in keyword searches 2019-06-07 06:14:40 +01:00
80f2cc5f99 logging and toString() 2019-06-07 06:07:02 +01:00
12283dba9d Release 0.0.12 for search by hash 2019-06-06 22:22:43 +01:00
5c959bc8b7 name update search tab 2019-06-06 22:07:20 +01:00
f3712fe7af delay initial update check a minute 2019-06-06 21:52:35 +01:00
3e49b0ec66 infohash may be null 2019-06-06 21:40:44 +01:00
f90beb8e3d encode infohash 2019-06-06 21:31:00 +01:00
fbad7b6c7e searchHash 2019-06-06 21:27:07 +01:00
ec2d89c18c serialize infohash 2019-06-06 21:21:40 +01:00
c27fc0a515 update from infohash 2019-06-06 21:08:58 +01:00
14681c2060 search by hash ui 2019-06-06 20:30:15 +01:00
1aeb230ea8 catch exceptions in event dispatch thread 2019-06-06 19:31:10 +01:00
d1dfc73f5a decode infohash 2019-06-06 19:28:29 +01:00
0cebe4119c update list of limitations 2019-06-06 14:19:43 +01:00
9f21120ec8 print periodic stats 2019-06-06 13:59:05 +01:00
7eea8be67d Release 0.0.11 for file loading bug 2019-06-06 09:22:16 +01:00
f114302bdb hopefully fix the shared file loss 2019-06-06 09:19:00 +01:00
05b9b37488 emit an event when all files are loaded 2019-06-06 09:10:09 +01:00
52f317a5b7 prevent division by zero 2019-06-06 07:09:54 +01:00
fb8227a1f3 prevent division by zero 2019-06-06 07:09:05 +01:00
5677d9f46a release 0.0.10 2019-06-06 00:23:59 +01:00
c5192e3845 update readme for fix 2019-06-06 00:21:41 +01:00
43c2a55cb8 0 not null 2019-06-06 00:03:22 +01:00
94f6de6bea do not create new objects because that clears the successes 2019-06-05 21:07:23 +01:00
6782849a12 retry hosts received from hostcache even if marked as failed 2019-06-05 20:58:28 +01:00
c07d351c5d switch to jul, reduce aging interval 2019-06-05 20:14:38 +01:00
dc2f675dd3 delete pieces file when download finishes 2019-06-05 19:52:50 +01:00
a8e795ec51 do not accept connections if already try to connect to them 2019-06-05 19:07:36 +01:00
33c5b3b18e option to disable sharing of downloaded files 2019-06-05 17:46:55 +01:00
581fce4643 share downloaded files 2019-06-05 17:33:34 +01:00
7fe78a0719 more clear name 2019-06-05 16:47:10 +01:00
cdb6e22522 ui option for allowing untrusted connections 2019-06-05 15:47:44 +01:00
2edeb046be drop neutral queries if configured 2019-06-05 15:38:39 +01:00
4021f3c244 fix jullog 2019-06-05 13:04:46 +01:00
9008fac24d shutdown cleanly on exit 2019-06-05 12:38:56 +01:00
e2f92c5c5e print reported version 2019-06-05 10:07:04 +01:00
7b33a16fd8 update list of known issues 2019-06-05 09:22:56 +01:00
9a2531b264 release 0.0.9 2019-06-05 09:04:52 +01:00
9a8dadff57 center the sources column 2019-06-05 08:43:58 +01:00
4a274010f9 fix close tab button not appearing on duplicate searches 2019-06-05 08:34:09 +01:00
1eb930435b fix hashing errors in large files 2019-06-05 00:34:38 +01:00
9df28552ad try to load persisted files before hashing new ones 2019-06-05 00:22:36 +01:00
ac0204dffc hopefully more accurate bandwidth gauge 2019-06-04 23:50:36 +01:00
e5c402a400 retry download workers on resume 2019-06-04 23:36:57 +01:00
7704c73b68 pass logging.properties to cli 2019-06-04 22:19:19 +01:00
a9aa8dd840 do not count finished downloaders towards bandwidth 2019-06-04 21:55:59 +01:00
de682a802a options panel for i2p tunnel options 2019-06-04 21:14:23 +01:00
5435518212 core-side i2cp options 2019-06-04 20:20:25 +01:00
bd01f983c9 break html in search results 2019-06-04 19:27:22 +01:00
8b63864b90 utility to share files in headless mode 2019-06-04 18:58:02 +01:00
ed3943c1af 0.0.8 for UI tweaks and sanitization 2019-06-04 18:01:08 +01:00
e195141a27 simpler sanitization 2019-06-04 17:58:19 +01:00
bb02fdbee9 do not use regex in sanitization 2019-06-04 17:46:41 +01:00
6e3a2c0d08 update split pattern 2019-06-04 17:30:55 +01:00
bd5fecc19d fix 2019-06-04 17:04:24 +01:00
d5db49fa79 initialize core 2019-06-04 16:56:58 +01:00
f2ea8619bb CLI project 2019-06-04 16:46:32 +01:00
b129e79196 do not count finished workers in total count 2019-06-04 16:22:48 +01:00
404d5b60bc format length in shared file stable an resize columns 2019-06-04 14:05:33 +01:00
de2753ac50 preferred sizes for download table columns 2019-06-04 13:35:18 +01:00
2d53999c8e only show download speed if downloading 2019-06-04 13:23:48 +01:00
5aecf72d6f format download speed 2019-06-04 13:19:14 +01:00
a574a67ec6 format file size 2019-06-04 13:15:24 +01:00
6b5ad969b7 pass logging properties 2019-06-04 13:00:10 +01:00
617209c4e4 column widths tweaks 2019-06-04 12:46:48 +01:00
309 changed files with 37786 additions and 4840 deletions

View File

@ -1,39 +1,41 @@
# MuWire - Easy Anonymous File-Sharing
MuWire is an easy to use file-sharing program which offers anonymity using [I2P technology](http://geti2p.net).
MuWire is an easy to use file-sharing program which offers anonymity using [I2P technology](http://geti2p.net). It works on any platform Java works on, including Windows,MacOS,Linux.
It is inspired by the LimeWire Gnutella client and developped by a former LimeWire developer.
The project is in development. You can find technical documentation in the "doc" folder.
The current stable release - 0.4.15 is avaiable for download at https://muwire.com. You can find technical documentation in the "doc" folder.
### Building
You need JDK 8 or newer. After installing that and setting up the appropriate paths, just type
```
./gradlew assemble
./gradlew clean assemble
```
If you want to run the unit tests, type
```
./gradlew build
./gradlew clean build
```
Some of the UI tests will fail because they haven't been written yet :-/
If you want to build binary bundles that do not depend on Java or I2P, see the https://github.com/zlatinb/muwire-pkg project
### Running
You need to have an I2P router up and running on the same machine. After you build the application, look inside "gui/build/distributions". Untar/unzip one of the "shadow" files and then run the jar contained inside.
After you build the application, look inside `gui/build/distributions`. Untar/unzip one of the `shadow` files and then run the jar contained inside by typing `java -jar gui-x.y.z.jar` in a terminal or command prompt.
The first time you run MuWire it will ask you to select a nickname. This nickname will be displayed with search results, so that others can verify the file was shared by you.
If you have an I2P router running on the same machine that is all you need to do. If you use a custom I2CP host and port, create a file `i2p.properties` and put `i2cp.tcp.host=<host>` and `i2cp.tcp.port=<port>` in there. On Windows that file should go into `%HOME%\AppData\Roaming\MuWire`, on Mac into `$HOME/Library/Application Support/MuWire` and on Linux `$HOME/.MuWire`
At the moment there are very few nodes on the network, so you will see very few connections and search results. It is best to leave MuWire running all the time, just like I2P.
### Known bugs and limitations
* Any shared files get re-hashed on startup
* Sometimes the list of shared files gets lost
* Many UI features you would expect are not there yet
[Default I2CP port]\: `7654`
### GPG Fingerprint
```
471B 9FD4 5517 A5ED 101F C57D A728 3207 2D52 5E41
```
You can find the full key at https://keybase.io/zlatinb
[Default I2CP port]: https://geti2p.net/en/docs/ports

26
TODO.md Normal file
View File

@ -0,0 +1,26 @@
# TODO List
Not in any particular order yet
### Big Items
##### Bloom Filters
This reduces query traffic by not sending last hop queries to peers that definitely do not have the file
##### Two-tier Topology
This helps with scalability
##### Web UI, REST Interface, etc.
Basically any non-gui non-cli user interface
##### Metadata editing and search
To enable parsing of metadata from known file types and the user editing it or adding manual metadata
### Small Items
* Wrapper of some kind for in-place upgrades
* Automatic adjustment of number of I2P tunnels

View File

@ -2,7 +2,7 @@ subprojects {
apply plugin: 'groovy'
dependencies {
compile 'net.i2p:i2p:0.9.40'
compile 'net.i2p:i2p:0.9.42'
compile 'org.codehaus.groovy:groovy-all:2.4.15'
}

22
cli/build.gradle Normal file
View File

@ -0,0 +1,22 @@
buildscript {
repositories {
jcenter()
mavenLocal()
}
dependencies {
classpath 'com.github.jengelman.gradle.plugins:shadow:2.0.4'
}
}
apply plugin : 'application'
mainClassName = 'com.muwire.cli.Cli'
apply plugin : 'com.github.johnrengelman.shadow'
applicationDefaultJvmArgs = ['-Djava.util.logging.config.file=logging.properties']
dependencies {
compile project(":core")
}

View File

@ -0,0 +1,144 @@
package com.muwire.cli
import java.util.concurrent.CountDownLatch
import com.muwire.core.Core
import com.muwire.core.MuWireSettings
import com.muwire.core.UILoadedEvent
import com.muwire.core.connection.ConnectionAttemptStatus
import com.muwire.core.connection.ConnectionEvent
import com.muwire.core.connection.DisconnectionEvent
import com.muwire.core.files.AllFilesLoadedEvent
import com.muwire.core.files.FileHashedEvent
import com.muwire.core.files.FileLoadedEvent
import com.muwire.core.files.FileSharedEvent
import com.muwire.core.upload.UploadEvent
import com.muwire.core.upload.UploadFinishedEvent
class Cli {
public static void main(String[] args) {
def home = System.getProperty("user.home") + File.separator + ".MuWire"
home = new File(home)
if (!home.exists())
home.mkdirs()
def propsFile = new File(home,"MuWire.properties")
if (!propsFile.exists()) {
println "create props file ${propsFile.getAbsoluteFile()} before launching MuWire"
System.exit(1)
}
def props = new Properties()
propsFile.withInputStream { props.load(it) }
props = new MuWireSettings(props)
Core core
try {
core = new Core(props, home, "0.5.0")
} catch (Exception bad) {
bad.printStackTrace(System.out)
println "Failed to initialize core, exiting"
System.exit(1)
}
def filesList
if (args.length == 0) {
println "Enter a file containing list of files to share"
def reader = new BufferedReader(new InputStreamReader(System.in))
filesList = reader.readLine()
} else
filesList = args[0]
Thread.sleep(1000)
println "loading shared files from $filesList"
// listener for shared files
def sharedListener = new SharedListener()
core.eventBus.register(FileHashedEvent.class, sharedListener)
core.eventBus.register(FileLoadedEvent.class, sharedListener)
// for connections
def connectionsListener = new ConnectionListener()
core.eventBus.register(ConnectionEvent.class, connectionsListener)
core.eventBus.register(DisconnectionEvent.class, connectionsListener)
// for uploads
def uploadsListener = new UploadsListener()
core.eventBus.register(UploadEvent.class, uploadsListener)
core.eventBus.register(UploadFinishedEvent.class, uploadsListener)
Timer timer = new Timer("status-printer", true)
timer.schedule({
println String.valueOf(new Date()) + " Connections $connectionsListener.connections Uploads $uploadsListener.uploads Shared $sharedListener.shared"
} as TimerTask, 60000, 60000)
def latch = new CountDownLatch(1)
def fileLoader = new Object() {
public void onAllFilesLoadedEvent(AllFilesLoadedEvent e) {
latch.countDown()
}
}
core.eventBus.register(AllFilesLoadedEvent.class, fileLoader)
core.startServices()
core.eventBus.publish(new UILoadedEvent())
println "waiting for files to load"
latch.await()
// now we begin
println "MuWire is ready"
filesList = new File(filesList)
filesList.withReader {
def toShare = it.readLine()
core.eventBus.publish(new FileSharedEvent(file : new File(toShare)))
}
Runtime.getRuntime().addShutdownHook({
println "shutting down.."
core.shutdown()
println "shutdown."
})
Thread.sleep(Integer.MAX_VALUE)
}
static class ConnectionListener {
volatile int connections
public void onConnectionEvent(ConnectionEvent e) {
if (e.status == ConnectionAttemptStatus.SUCCESSFUL)
connections++
}
public void onDisconnectionEvent(DisconnectionEvent e) {
connections--
}
}
static class UploadsListener {
volatile int uploads
public void onUploadEvent(UploadEvent e) {
uploads++
println String.valueOf(new Date()) + " Starting upload of ${e.uploader.file.getName()} to ${e.uploader.request.downloader.getHumanReadableName()}"
}
public void onUploadFinishedEvent(UploadFinishedEvent e) {
uploads--
println String.valueOf(new Date()) + " Finished upload of ${e.uploader.file.getName()} to ${e.uploader.request.downloader.getHumanReadableName()}"
}
}
static class SharedListener {
volatile int shared
void onFileHashedEvent(FileHashedEvent e) {
if (e.error != null)
println "ERROR $e.error"
else {
println "Shared file : $e.sharedFile.file"
shared++
}
}
void onFileLoadedEvent(FileLoadedEvent e) {
shared++
}
}
}

View File

@ -0,0 +1,166 @@
package com.muwire.cli
import java.util.concurrent.ConcurrentHashMap
import java.util.concurrent.CountDownLatch
import com.muwire.core.Core
import com.muwire.core.MuWireSettings
import com.muwire.core.connection.ConnectionAttemptStatus
import com.muwire.core.connection.ConnectionEvent
import com.muwire.core.download.DownloadStartedEvent
import com.muwire.core.download.Downloader
import com.muwire.core.download.UIDownloadEvent
import com.muwire.core.search.QueryEvent
import com.muwire.core.search.SearchEvent
import com.muwire.core.search.UIResultEvent
import net.i2p.data.Base64
class CliDownloader {
private static final List<Downloader> downloaders = Collections.synchronizedList(new ArrayList<>())
private static final Map<UUID,ResultsHolder> resultsListeners = new ConcurrentHashMap<>()
public static void main(String []args) {
def home = System.getProperty("user.home") + File.separator + ".MuWire"
home = new File(home)
if (!home.exists())
home.mkdirs()
def propsFile = new File(home,"MuWire.properties")
if (!propsFile.exists()) {
println "create props file ${propsFile.getAbsoluteFile()} before launching MuWire"
System.exit(1)
}
def props = new Properties()
propsFile.withInputStream { props.load(it) }
props = new MuWireSettings(props)
def filesList
int connections
int resultWait
if (args.length != 3) {
println "Enter a file containing list of hashes of files to download, " +
"how many connections you want before searching" +
"and how long to wait for results to arrive"
System.exit(1)
} else {
filesList = args[0]
connections = Integer.parseInt(args[1])
resultWait = Integer.parseInt(args[2])
}
Core core
try {
core = new Core(props, home, "0.5.0")
} catch (Exception bad) {
bad.printStackTrace(System.out)
println "Failed to initialize core, exiting"
System.exit(1)
}
def latch = new CountDownLatch(connections)
def connectionListener = new ConnectionWaiter(latch : latch)
core.eventBus.register(ConnectionEvent.class, connectionListener)
core.startServices()
println "starting to wait until there are $connections connections"
latch.await()
println "connected, searching for files"
def file = new File(filesList)
file.eachLine {
String[] split = it.split(",")
UUID uuid = UUID.randomUUID()
core.eventBus.register(UIResultEvent.class, new ResultsListener(fileName : split[1]))
def hash = Base64.decode(split[0])
def searchEvent = new SearchEvent(searchHash : hash, uuid : uuid)
core.eventBus.publish(new QueryEvent(searchEvent : searchEvent, firstHop:true,
replyTo: core.me.destination, receivedOn : core.me.destination, originator: core.me))
}
println "waiting for results to arrive"
Thread.sleep(resultWait * 1000)
core.eventBus.register(DownloadStartedEvent.class, new DownloadListener())
resultsListeners.each { uuid, resultsListener ->
println "starting download of $resultsListener.fileName from ${resultsListener.getResults().size()} hosts"
File target = new File(resultsListener.fileName)
core.eventBus.publish(new UIDownloadEvent(target : target, result : resultsListener.getResults()))
}
Thread.sleep(1000)
Timer timer = new Timer("stats-printer")
timer.schedule({
println "==== STATUS UPDATE ==="
downloaders.each {
int donePieces = it.donePieces()
int totalPieces = it.nPieces
int sources = it.activeWorkers.size()
def root = Base64.encode(it.infoHash.getRoot())
def state = it.getCurrentState()
println "file $it.file hash: $root progress: $donePieces/$totalPieces sources: $sources status: $state}"
it.resume()
}
println "==== END ==="
} as TimerTask, 60000, 60000)
println "waiting for downloads to finish"
while(true) {
boolean allFinished = true
for (Downloader d : downloaders) {
allFinished &= d.getCurrentState() == Downloader.DownloadState.FINISHED
}
if (allFinished)
break
Thread.sleep(1000)
}
println "all downloads finished"
}
static class ResultsHolder {
final List<UIResultEvent> results = Collections.synchronizedList(new ArrayList<>())
String fileName
void add(UIResultEvent e) {
results.add(e)
}
List getResults() {
results
}
}
static class ResultsListener {
UUID uuid
String fileName
public onUIResultEvent(UIResultEvent e) {
println "got a result for $fileName from ${e.sender.getHumanReadableName()}"
ResultsHolder listener = resultsListeners.get(e.uuid)
if (listener == null) {
listener = new ResultsHolder(fileName : fileName)
resultsListeners.put(e.uuid, listener)
}
listener.add(e)
}
}
static class ConnectionWaiter {
CountDownLatch latch
public void onConnectionEvent(ConnectionEvent e) {
if (e.status == ConnectionAttemptStatus.SUCCESSFUL)
latch.countDown()
}
}
static class DownloadListener {
public void onDownloadStartedEvent(DownloadStartedEvent e) {
downloaders.add(e.downloader)
}
}
}

View File

@ -0,0 +1,23 @@
package com.muwire.cli
import com.muwire.core.util.DataUtil
import groovy.json.JsonSlurper
import net.i2p.data.Base64
class FileList {
public static void main(String [] args) {
if (args.length < 1) {
println "pass files.json as argument"
System.exit(1)
}
def slurper = new JsonSlurper()
File filesJson = new File(args[0])
filesJson.eachLine {
def json = slurper.parseText(it)
String name = DataUtil.readi18nString(Base64.decode(json.file))
println "$name,$json.length,$json.pieceSize,$json.infoHash"
}
}
}

View File

@ -2,8 +2,9 @@ apply plugin : 'application'
mainClassName = 'com.muwire.core.Core'
applicationDefaultJvmArgs = ['-Djava.util.logging.config.file=logging.properties']
dependencies {
compile 'net.i2p.client:mstreaming:0.9.40'
compile 'net.i2p.client:streaming:0.9.40'
compile 'net.i2p:router:0.9.42'
compile 'net.i2p.client:mstreaming:0.9.42'
compile 'net.i2p.client:streaming:0.9.42'
testCompile 'org.junit.jupiter:junit-jupiter-api:5.4.2'
testCompile 'junit:junit:4.12'

View File

@ -1,15 +0,0 @@
package com.muwire.core
import net.i2p.crypto.SigType
class Constants {
public static final byte PERSONA_VERSION = (byte)1
public static final SigType SIG_TYPE = SigType.EdDSA_SHA512_Ed25519
public static final int MAX_HEADER_SIZE = 0x1 << 14
public static final int MAX_HEADERS = 16
public static final float DOWNLOAD_SEQUENTIAL_RATIO = 0.8f
public static final String SPLIT_PATTERN = "[\\.,_-]"
}

View File

@ -1,6 +1,7 @@
package com.muwire.core
import java.nio.charset.StandardCharsets
import java.util.concurrent.atomic.AtomicBoolean
import com.muwire.core.connection.ConnectionAcceptor
import com.muwire.core.connection.ConnectionEstablisher
@ -12,9 +13,14 @@ import com.muwire.core.connection.I2PConnector
import com.muwire.core.connection.LeafConnectionManager
import com.muwire.core.connection.UltrapeerConnectionManager
import com.muwire.core.download.DownloadManager
import com.muwire.core.download.SourceDiscoveredEvent
import com.muwire.core.download.UIDownloadCancelledEvent
import com.muwire.core.download.UIDownloadEvent
import com.muwire.core.download.UIDownloadPausedEvent
import com.muwire.core.download.UIDownloadResumedEvent
import com.muwire.core.files.FileDownloadedEvent
import com.muwire.core.files.FileHashedEvent
import com.muwire.core.files.FileHashingEvent
import com.muwire.core.files.FileHasher
import com.muwire.core.files.FileLoadedEvent
import com.muwire.core.files.FileManager
@ -22,19 +28,32 @@ import com.muwire.core.files.FileSharedEvent
import com.muwire.core.files.FileUnsharedEvent
import com.muwire.core.files.HasherService
import com.muwire.core.files.PersisterService
import com.muwire.core.files.UICommentEvent
import com.muwire.core.files.UIPersistFilesEvent
import com.muwire.core.files.AllFilesLoadedEvent
import com.muwire.core.files.DirectoryUnsharedEvent
import com.muwire.core.files.DirectoryWatcher
import com.muwire.core.hostcache.CacheClient
import com.muwire.core.hostcache.HostCache
import com.muwire.core.hostcache.HostDiscoveredEvent
import com.muwire.core.mesh.MeshManager
import com.muwire.core.search.BrowseManager
import com.muwire.core.search.QueryEvent
import com.muwire.core.search.ResultsEvent
import com.muwire.core.search.ResultsSender
import com.muwire.core.search.SearchEvent
import com.muwire.core.search.SearchManager
import com.muwire.core.search.UIBrowseEvent
import com.muwire.core.search.UIResultBatchEvent
import com.muwire.core.trust.TrustEvent
import com.muwire.core.trust.TrustService
import com.muwire.core.trust.TrustSubscriber
import com.muwire.core.trust.TrustSubscriptionEvent
import com.muwire.core.update.UpdateClient
import com.muwire.core.upload.UploadManager
import com.muwire.core.util.MuWireLogManager
import com.muwire.core.content.ContentControlEvent
import com.muwire.core.content.ContentManager
import groovy.util.logging.Log
import net.i2p.I2PAppContext
@ -43,6 +62,7 @@ import net.i2p.client.I2PSession
import net.i2p.client.streaming.I2PSocketManager
import net.i2p.client.streaming.I2PSocketManagerFactory
import net.i2p.client.streaming.I2PSocketOptions
import net.i2p.client.streaming.I2PSocketManager.DisconnectListener
import net.i2p.crypto.DSAEngine
import net.i2p.crypto.SigType
import net.i2p.data.Destination
@ -50,14 +70,20 @@ import net.i2p.data.PrivateKey
import net.i2p.data.Signature
import net.i2p.data.SigningPrivateKey
import net.i2p.router.Router
import net.i2p.router.RouterContext
@Log
public class Core {
final EventBus eventBus
final Persona me
final File home
final Properties i2pOptions
final MuWireSettings muOptions
private final TrustService trustService
private final TrustSubscriber trustSubscriber
private final PersisterService persisterService
private final HostCache hostCache
private final ConnectionManager connectionManager
@ -66,34 +92,91 @@ public class Core {
private final ConnectionAcceptor connectionAcceptor
private final ConnectionEstablisher connectionEstablisher
private final HasherService hasherService
private final DownloadManager downloadManager
private final DirectoryWatcher directoryWatcher
final FileManager fileManager
final UploadManager uploadManager
final ContentManager contentManager
private final Router router
final AtomicBoolean shutdown = new AtomicBoolean()
public Core(MuWireSettings props, File home, String myVersion) {
this.home = home
log.info "Initializing I2P context"
I2PAppContext.getGlobalContext().logManager()
I2PAppContext.getGlobalContext()._logManager = new MuWireLogManager()
log.info("initializing I2P socket manager")
def i2pClient = new I2PClientFactory().createClient()
File keyDat = new File(home, "key.dat")
if (!keyDat.exists()) {
log.info("Creating new key.dat")
keyDat.withOutputStream {
i2pClient.createDestination(it, Constants.SIG_TYPE)
}
}
def sysProps = System.getProperties().clone()
sysProps["inbound.nickname"] = "MuWire"
I2PSession i2pSession
I2PSocketManager socketManager
keyDat.withInputStream {
socketManager = new I2PSocketManagerFactory().createManager(it, sysProps)
}
socketManager.getDefaultOptions().setReadTimeout(60000)
socketManager.getDefaultOptions().setConnectTimeout(30000)
i2pSession = socketManager.getSession()
this.home = home
this.muOptions = props
i2pOptions = new Properties()
def i2pOptionsFile = new File(home,"i2p.properties")
if (i2pOptionsFile.exists()) {
i2pOptionsFile.withInputStream { i2pOptions.load(it) }
if (!i2pOptions.containsKey("inbound.nickname"))
i2pOptions["inbound.nickname"] = "MuWire"
if (!i2pOptions.containsKey("outbound.nickname"))
i2pOptions["outbound.nickname"] = "MuWire"
} else {
i2pOptions["inbound.nickname"] = "MuWire"
i2pOptions["outbound.nickname"] = "MuWire"
i2pOptions["inbound.length"] = "3"
i2pOptions["inbound.quantity"] = "4"
i2pOptions["outbound.length"] = "3"
i2pOptions["outbound.quantity"] = "4"
i2pOptions["i2cp.tcp.host"] = "127.0.0.1"
i2pOptions["i2cp.tcp.port"] = "7654"
Random r = new Random()
int port = r.nextInt(60000) + 4000
i2pOptions["i2np.ntcp.port"] = String.valueOf(port)
i2pOptions["i2np.udp.port"] = String.valueOf(port)
i2pOptionsFile.withOutputStream { i2pOptions.store(it, "") }
}
if (!props.embeddedRouter) {
log.info "Initializing I2P context"
I2PAppContext.getGlobalContext().logManager()
I2PAppContext.getGlobalContext()._logManager = new MuWireLogManager()
router = null
} else {
log.info("launching embedded router")
Properties routerProps = new Properties()
routerProps.setProperty("i2p.dir.base", home.getAbsolutePath())
routerProps.setProperty("i2p.dir.config", home.getAbsolutePath())
routerProps.setProperty("router.excludePeerCaps", "KLM")
routerProps.setProperty("i2np.inboundKBytesPerSecond", String.valueOf(props.inBw))
routerProps.setProperty("i2np.outboundKBytesPerSecond", String.valueOf(props.outBw))
routerProps.setProperty("i2cp.disableInterface", "true")
routerProps.setProperty("i2np.ntcp.port", i2pOptions["i2np.ntcp.port"])
routerProps.setProperty("i2np.udp.port", i2pOptions["i2np.udp.port"])
routerProps.setProperty("i2np.udp.internalPort", i2pOptions["i2np.udp.port"])
router = new Router(routerProps)
router.getContext().setLogManager(new MuWireLogManager())
router.runRouter()
while(!router.isRunning())
Thread.sleep(100)
}
log.info("initializing I2P socket manager")
def i2pClient = new I2PClientFactory().createClient()
File keyDat = new File(home, "key.dat")
if (!keyDat.exists()) {
log.info("Creating new key.dat")
keyDat.withOutputStream {
i2pClient.createDestination(it, Constants.SIG_TYPE)
}
}
// options like tunnel length and quantity
I2PSession i2pSession
I2PSocketManager socketManager
keyDat.withInputStream {
socketManager = new I2PSocketManagerFactory().createManager(it, i2pOptions["i2cp.tcp.host"], i2pOptions["i2cp.tcp.port"].toInteger(), i2pOptions)
}
socketManager.getDefaultOptions().setReadTimeout(60000)
socketManager.getDefaultOptions().setConnectTimeout(30000)
socketManager.addDisconnectListener({eventBus.publish(new RouterDisconnectedEvent())} as DisconnectListener)
i2pSession = socketManager.getSession()
def destination = new Destination()
def spk = new SigningPrivateKey(Constants.SIG_TYPE)
keyDat.withInputStream {
@ -101,8 +184,8 @@ public class Core {
def privateKey = new PrivateKey()
privateKey.readBytes(it)
spk.readBytes(it)
}
}
def baos = new ByteArrayOutputStream()
def daos = new DataOutputStream(baos)
daos.write(Constants.PERSONA_VERSION)
@ -119,83 +202,121 @@ public class Core {
me = new Persona(new ByteArrayInputStream(baos.toByteArray()))
log.info("Loaded myself as "+me.getHumanReadableName())
eventBus = new EventBus()
log.info("initializing trust service")
File goodTrust = new File(home, "trusted")
File badTrust = new File(home, "distrusted")
trustService = new TrustService(goodTrust, badTrust, 5000)
eventBus.register(TrustEvent.class, trustService)
log.info "initializing file manager"
FileManager fileManager = new FileManager(eventBus)
eventBus.register(FileHashedEvent.class, fileManager)
eventBus.register(FileLoadedEvent.class, fileManager)
eventBus.register(FileDownloadedEvent.class, fileManager)
eventBus.register(FileUnsharedEvent.class, fileManager)
eventBus.register(SearchEvent.class, fileManager)
log.info "initializing persistence service"
persisterService = new PersisterService(new File(home, "files.json"), eventBus, 5000, fileManager)
log.info("initializing host cache")
File hostStorage = new File(home, "hosts.json")
eventBus = new EventBus()
log.info("initializing trust service")
File goodTrust = new File(home, "trusted")
File badTrust = new File(home, "distrusted")
trustService = new TrustService(goodTrust, badTrust, 5000)
eventBus.register(TrustEvent.class, trustService)
log.info "initializing file manager"
fileManager = new FileManager(eventBus, props)
eventBus.register(FileHashedEvent.class, fileManager)
eventBus.register(FileLoadedEvent.class, fileManager)
eventBus.register(FileDownloadedEvent.class, fileManager)
eventBus.register(FileUnsharedEvent.class, fileManager)
eventBus.register(SearchEvent.class, fileManager)
eventBus.register(DirectoryUnsharedEvent.class, fileManager)
eventBus.register(UICommentEvent.class, fileManager)
log.info("initializing mesh manager")
MeshManager meshManager = new MeshManager(fileManager, home, props)
eventBus.register(SourceDiscoveredEvent.class, meshManager)
log.info "initializing persistence service"
persisterService = new PersisterService(new File(home, "files.json"), eventBus, 60000, fileManager)
eventBus.register(UILoadedEvent.class, persisterService)
eventBus.register(UIPersistFilesEvent.class, persisterService)
log.info("initializing host cache")
File hostStorage = new File(home, "hosts.json")
hostCache = new HostCache(trustService,hostStorage, 30000, props, i2pSession.getMyDestination())
eventBus.register(HostDiscoveredEvent.class, hostCache)
eventBus.register(ConnectionEvent.class, hostCache)
log.info("initializing connection manager")
connectionManager = props.isLeaf() ?
new LeafConnectionManager(eventBus, me, 3, hostCache) : new UltrapeerConnectionManager(eventBus, me, 512, 512, hostCache, trustService)
eventBus.register(TrustEvent.class, connectionManager)
eventBus.register(ConnectionEvent.class, connectionManager)
eventBus.register(DisconnectionEvent.class, connectionManager)
eventBus.register(HostDiscoveredEvent.class, hostCache)
eventBus.register(ConnectionEvent.class, hostCache)
log.info("initializing connection manager")
connectionManager = props.isLeaf() ?
new LeafConnectionManager(eventBus, me, 3, hostCache, props) :
new UltrapeerConnectionManager(eventBus, me, 512, 512, hostCache, trustService, props)
eventBus.register(TrustEvent.class, connectionManager)
eventBus.register(ConnectionEvent.class, connectionManager)
eventBus.register(DisconnectionEvent.class, connectionManager)
eventBus.register(QueryEvent.class, connectionManager)
log.info("initializing cache client")
cacheClient = new CacheClient(eventBus,hostCache, connectionManager, i2pSession, props, 10000)
log.info("initializing cache client")
cacheClient = new CacheClient(eventBus,hostCache, connectionManager, i2pSession, props, 10000)
log.info("initializing update client")
updateClient = new UpdateClient(eventBus, i2pSession, myVersion, props)
log.info("initializing connector")
I2PConnector i2pConnector = new I2PConnector(socketManager)
log.info "initializing results sender"
ResultsSender resultsSender = new ResultsSender(eventBus, i2pConnector, me)
log.info "initializing search manager"
SearchManager searchManager = new SearchManager(eventBus, me, resultsSender)
eventBus.register(QueryEvent.class, searchManager)
eventBus.register(ResultsEvent.class, searchManager)
updateClient = new UpdateClient(eventBus, i2pSession, myVersion, props, fileManager, me)
eventBus.register(FileDownloadedEvent.class, updateClient)
eventBus.register(UIResultBatchEvent.class, updateClient)
log.info("initializing connector")
I2PConnector i2pConnector = new I2PConnector(socketManager)
log.info "initializing results sender"
ResultsSender resultsSender = new ResultsSender(eventBus, i2pConnector, me, props)
log.info "initializing search manager"
SearchManager searchManager = new SearchManager(eventBus, me, resultsSender)
eventBus.register(QueryEvent.class, searchManager)
eventBus.register(ResultsEvent.class, searchManager)
log.info("initializing download manager")
DownloadManager downloadManager = new DownloadManager(eventBus, i2pConnector, new File(home, "incompletes"), me)
downloadManager = new DownloadManager(eventBus, trustService, meshManager, props, i2pConnector, home, me)
eventBus.register(UIDownloadEvent.class, downloadManager)
eventBus.register(UILoadedEvent.class, downloadManager)
eventBus.register(FileDownloadedEvent.class, downloadManager)
eventBus.register(UIDownloadCancelledEvent.class, downloadManager)
eventBus.register(SourceDiscoveredEvent.class, downloadManager)
eventBus.register(UIDownloadPausedEvent.class, downloadManager)
eventBus.register(UIDownloadResumedEvent.class, downloadManager)
log.info("initializing upload manager")
UploadManager uploadManager = new UploadManager(eventBus, fileManager)
log.info("initializing acceptor")
I2PAcceptor i2pAcceptor = new I2PAcceptor(socketManager)
connectionAcceptor = new ConnectionAcceptor(eventBus, connectionManager, props,
i2pAcceptor, hostCache, trustService, searchManager, uploadManager)
uploadManager = new UploadManager(eventBus, fileManager, meshManager, downloadManager)
log.info("initializing connection establisher")
connectionEstablisher = new ConnectionEstablisher(eventBus, i2pConnector, props, connectionManager, hostCache)
log.info("initializing acceptor")
I2PAcceptor i2pAcceptor = new I2PAcceptor(socketManager)
connectionAcceptor = new ConnectionAcceptor(eventBus, connectionManager, props,
i2pAcceptor, hostCache, trustService, searchManager, uploadManager, fileManager, connectionEstablisher)
log.info("initializing directory watcher")
directoryWatcher = new DirectoryWatcher(eventBus, fileManager, home, props)
eventBus.register(FileSharedEvent.class, directoryWatcher)
eventBus.register(AllFilesLoadedEvent.class, directoryWatcher)
eventBus.register(DirectoryUnsharedEvent.class, directoryWatcher)
log.info("initializing hasher service")
hasherService = new HasherService(new FileHasher(), eventBus)
hasherService = new HasherService(new FileHasher(), eventBus, fileManager, props)
eventBus.register(FileSharedEvent.class, hasherService)
}
eventBus.register(FileUnsharedEvent.class, hasherService)
eventBus.register(DirectoryUnsharedEvent.class, hasherService)
log.info("initializing trust subscriber")
trustSubscriber = new TrustSubscriber(eventBus, i2pConnector, props)
eventBus.register(UILoadedEvent.class, trustSubscriber)
eventBus.register(TrustSubscriptionEvent.class, trustSubscriber)
log.info("initializing content manager")
contentManager = new ContentManager()
eventBus.register(ContentControlEvent.class, contentManager)
eventBus.register(QueryEvent.class, contentManager)
log.info("initializing browse manager")
BrowseManager browseManager = new BrowseManager(i2pConnector, eventBus)
eventBus.register(UIBrowseEvent.class, browseManager)
}
public void startServices() {
hasherService.start()
trustService.start()
trustService.waitForLoad()
persisterService.start()
hostCache.start()
connectionManager.start()
cacheClient.start()
@ -204,9 +325,30 @@ public class Core {
hostCache.waitForLoad()
updateClient.start()
}
public void shutdown() {
if (!shutdown.compareAndSet(false, true)) {
log.info("already shutting down")
return
}
log.info("shutting down trust subscriber")
trustSubscriber.stop()
log.info("shutting down download manageer")
downloadManager.shutdown()
log.info("shutting down connection acceeptor")
connectionAcceptor.stop()
log.info("shutting down connection establisher")
connectionEstablisher.stop()
log.info("shutting down directory watcher")
directoryWatcher.stop()
log.info("shutting down cache client")
cacheClient.stop()
log.info("shutting down connection manager")
connectionManager.shutdown()
if (router != null) {
log.info("shutting down embedded router")
router.shutdown(0)
}
}
static main(args) {
@ -216,7 +358,7 @@ public class Core {
log.info("creating home dir")
home.mkdir()
}
def props = new Properties()
def propsFile = new File(home, "MuWire.properties")
if (propsFile.exists()) {
@ -232,10 +374,10 @@ public class Core {
props.write(it)
}
}
Core core = new Core(props, home, "0.0.7")
Core core = new Core(props, home, "0.5.0")
core.startServices()
// ... at the end, sleep or execute script
if (args.length == 0) {
log.info("initialized everything, sleeping")

View File

@ -4,17 +4,17 @@ import java.util.concurrent.atomic.AtomicLong
class Event {
private static final AtomicLong SEQ_NO = new AtomicLong();
final long seqNo
final long timestamp
Event() {
seqNo = SEQ_NO.getAndIncrement()
timestamp = System.currentTimeMillis()
}
@Override
public String toString() {
"seqNo $seqNo timestamp $timestamp"
}
private static final AtomicLong SEQ_NO = new AtomicLong();
final long seqNo
final long timestamp
Event() {
seqNo = SEQ_NO.getAndIncrement()
timestamp = System.currentTimeMillis()
}
@Override
public String toString() {
"seqNo $seqNo timestamp $timestamp"
}
}

View File

@ -3,44 +3,54 @@ package com.muwire.core
import java.util.concurrent.CopyOnWriteArrayList
import java.util.concurrent.Executor
import java.util.concurrent.Executors
import java.util.logging.Level
import com.muwire.core.files.FileSharedEvent
import groovy.util.logging.Log
@Log
class EventBus {
private Map handlers = new HashMap()
private final Executor executor = Executors.newSingleThreadExecutor {r ->
def rv = new Thread(r)
rv.setDaemon(true)
rv.setName("event-bus")
rv
}
void publish(Event e) {
executor.execute({publishInternal(e)} as Runnable)
}
private void publishInternal(Event e) {
log.fine "publishing event $e of type ${e.getClass().getSimpleName()}"
def currentHandlers
final def clazz = e.getClass()
synchronized(this) {
currentHandlers = handlers.getOrDefault(clazz, [])
}
currentHandlers.each {
it."on${clazz.getSimpleName()}"(e)
}
}
synchronized void register(Class<? extends Event> eventType, def handler) {
log.info "Registering $handler for type $eventType"
def currentHandlers = handlers.get(eventType)
if (currentHandlers == null) {
currentHandlers = new CopyOnWriteArrayList()
handlers.put(eventType, currentHandlers)
}
currentHandlers.add handler
}
private Map handlers = new HashMap()
private final Executor executor = Executors.newSingleThreadExecutor {r ->
def rv = new Thread(r)
rv.setDaemon(true)
rv.setName("event-bus")
rv
}
void publish(Event e) {
executor.execute({publishInternal(e)} as Runnable)
}
private void publishInternal(Event e) {
log.fine "publishing event $e of type ${e.getClass().getSimpleName()} event $e"
def currentHandlers
final def clazz = e.getClass()
synchronized(this) {
currentHandlers = handlers.getOrDefault(clazz, [])
}
currentHandlers.each {
try {
it."on${clazz.getSimpleName()}"(e)
} catch (Exception bad) {
log.log(Level.SEVERE, "exception dispatching event",bad)
}
}
}
synchronized void register(Class<? extends Event> eventType, def handler) {
log.info "Registering $handler for type $eventType"
def currentHandlers = handlers.get(eventType)
if (currentHandlers == null) {
currentHandlers = new CopyOnWriteArrayList()
handlers.put(eventType, currentHandlers)
}
currentHandlers.add handler
}
synchronized void unregister(Class<? extends Event> eventType, def handler) {
log.info("Unregistering $handler for type $eventType")
handlers[eventType]?.remove(handler)
}
}

View File

@ -13,5 +13,5 @@ class InvalidSignatureException extends Exception {
public InvalidSignatureException(Throwable cause) {
super(cause);
}
}

View File

@ -1,68 +1,165 @@
package com.muwire.core
import java.util.stream.Collectors
import com.muwire.core.hostcache.CrawlerResponse
import com.muwire.core.util.DataUtil
import net.i2p.data.Base64
import net.i2p.util.ConcurrentHashSet
class MuWireSettings {
final boolean isLeaf
boolean allowUntrusted
boolean searchExtraHop
boolean allowTrustLists
int trustListInterval
Set<Persona> trustSubscriptions
int downloadRetryInterval
int updateCheckInterval
boolean autoDownloadUpdate
String updateType
String nickname
File downloadLocation
String sharedFiles
CrawlerResponse crawlerResponse
MuWireSettings() {
boolean shareDownloadedFiles
boolean shareHiddenFiles
boolean searchComments
boolean browseFiles
Set<String> watchedDirectories
float downloadSequentialRatio
int hostClearInterval, hostHopelessInterval, hostRejectInterval
int meshExpiration
boolean embeddedRouter
int inBw, outBw
Set<String> watchedKeywords
Set<String> watchedRegexes
MuWireSettings() {
this(new Properties())
}
MuWireSettings(Properties props) {
isLeaf = Boolean.valueOf(props.get("leaf","false"))
allowUntrusted = Boolean.valueOf(props.get("allowUntrusted","true"))
crawlerResponse = CrawlerResponse.valueOf(props.get("crawlerResponse","REGISTERED"))
MuWireSettings(Properties props) {
isLeaf = Boolean.valueOf(props.get("leaf","false"))
allowUntrusted = Boolean.valueOf(props.getProperty("allowUntrusted","true"))
searchExtraHop = Boolean.valueOf(props.getProperty("searchExtraHop","false"))
allowTrustLists = Boolean.valueOf(props.getProperty("allowTrustLists","true"))
trustListInterval = Integer.valueOf(props.getProperty("trustListInterval","1"))
crawlerResponse = CrawlerResponse.valueOf(props.get("crawlerResponse","REGISTERED"))
nickname = props.getProperty("nickname","MuWireUser")
downloadLocation = new File((String)props.getProperty("downloadLocation",
downloadLocation = new File((String)props.getProperty("downloadLocation",
System.getProperty("user.home")))
sharedFiles = props.getProperty("sharedFiles")
downloadRetryInterval = Integer.parseInt(props.getProperty("downloadRetryInterval","15"))
updateCheckInterval = Integer.parseInt(props.getProperty("updateCheckInterval","36"))
}
downloadRetryInterval = Integer.parseInt(props.getProperty("downloadRetryInterval","60"))
updateCheckInterval = Integer.parseInt(props.getProperty("updateCheckInterval","24"))
autoDownloadUpdate = Boolean.parseBoolean(props.getProperty("autoDownloadUpdate","true"))
updateType = props.getProperty("updateType","jar")
shareDownloadedFiles = Boolean.parseBoolean(props.getProperty("shareDownloadedFiles","true"))
shareHiddenFiles = Boolean.parseBoolean(props.getProperty("shareHiddenFiles","false"))
downloadSequentialRatio = Float.valueOf(props.getProperty("downloadSequentialRatio","0.8"))
hostClearInterval = Integer.valueOf(props.getProperty("hostClearInterval","15"))
hostHopelessInterval = Integer.valueOf(props.getProperty("hostHopelessInterval", "1440"))
hostRejectInterval = Integer.valueOf(props.getProperty("hostRejectInterval", "1"))
meshExpiration = Integer.valueOf(props.getProperty("meshExpiration","60"))
embeddedRouter = Boolean.valueOf(props.getProperty("embeddedRouter","false"))
inBw = Integer.valueOf(props.getProperty("inBw","256"))
outBw = Integer.valueOf(props.getProperty("outBw","128"))
searchComments = Boolean.valueOf(props.getProperty("searchComments","true"))
browseFiles = Boolean.valueOf(props.getProperty("browseFiles","true"))
watchedDirectories = readEncodedSet(props, "watchedDirectories")
watchedKeywords = readEncodedSet(props, "watchedKeywords")
watchedRegexes = readEncodedSet(props, "watchedRegexes")
trustSubscriptions = new HashSet<>()
if (props.containsKey("trustSubscriptions")) {
props.getProperty("trustSubscriptions").split(",").each {
trustSubscriptions.add(new Persona(new ByteArrayInputStream(Base64.decode(it))))
}
}
}
void write(OutputStream out) throws IOException {
Properties props = new Properties()
props.setProperty("leaf", isLeaf.toString())
props.setProperty("allowUntrusted", allowUntrusted.toString())
props.setProperty("searchExtraHop", String.valueOf(searchExtraHop))
props.setProperty("allowTrustLists", String.valueOf(allowTrustLists))
props.setProperty("trustListInterval", String.valueOf(trustListInterval))
props.setProperty("crawlerResponse", crawlerResponse.toString())
props.setProperty("nickname", nickname)
props.setProperty("downloadLocation", downloadLocation.getAbsolutePath())
props.setProperty("downloadRetryInterval", String.valueOf(downloadRetryInterval))
props.setProperty("updateCheckInterval", String.valueOf(updateCheckInterval))
if (sharedFiles != null)
props.setProperty("sharedFiles", sharedFiles)
props.setProperty("autoDownloadUpdate", String.valueOf(autoDownloadUpdate))
props.setProperty("updateType",String.valueOf(updateType))
props.setProperty("shareDownloadedFiles", String.valueOf(shareDownloadedFiles))
props.setProperty("shareHiddenFiles", String.valueOf(shareHiddenFiles))
props.setProperty("downloadSequentialRatio", String.valueOf(downloadSequentialRatio))
props.setProperty("hostClearInterval", String.valueOf(hostClearInterval))
props.setProperty("hostHopelessInterval", String.valueOf(hostHopelessInterval))
props.setProperty("hostRejectInterval", String.valueOf(hostRejectInterval))
props.setProperty("meshExpiration", String.valueOf(meshExpiration))
props.setProperty("embeddedRouter", String.valueOf(embeddedRouter))
props.setProperty("inBw", String.valueOf(inBw))
props.setProperty("outBw", String.valueOf(outBw))
props.setProperty("searchComments", String.valueOf(searchComments))
props.setProperty("browseFiles", String.valueOf(browseFiles))
writeEncodedSet(watchedDirectories, "watchedDirectories", props)
writeEncodedSet(watchedKeywords, "watchedKeywords", props)
writeEncodedSet(watchedRegexes, "watchedRegexes", props)
if (!trustSubscriptions.isEmpty()) {
String encoded = trustSubscriptions.stream().
map({it.toBase64()}).
collect(Collectors.joining(","))
props.setProperty("trustSubscriptions", encoded)
}
props.store(out, "")
}
boolean isLeaf() {
isLeaf
}
boolean allowUntrusted() {
allowUntrusted
}
void setAllowUntrusted(boolean allowUntrusted) {
this.allowUntrusted = allowUntrusted
}
CrawlerResponse getCrawlerResponse() {
crawlerResponse
}
void setCrawlerResponse(CrawlerResponse crawlerResponse) {
this.crawlerResponse = crawlerResponse
}
private static Set<String> readEncodedSet(Properties props, String property) {
Set<String> rv = new ConcurrentHashSet<>()
if (props.containsKey(property)) {
String[] encoded = props.getProperty(property).split(",")
encoded.each { rv << DataUtil.readi18nString(Base64.decode(it)) }
}
rv
}
private static void writeEncodedSet(Set<String> set, String property, Properties props) {
if (set.isEmpty())
return
String encoded = set.stream().
map({Base64.encode(DataUtil.encodei18nString(it))}).
collect(Collectors.joining(","))
props.setProperty(property, encoded)
}
boolean isLeaf() {
isLeaf
}
boolean allowUntrusted() {
allowUntrusted
}
void setAllowUntrusted(boolean allowUntrusted) {
this.allowUntrusted = allowUntrusted
}
CrawlerResponse getCrawlerResponse() {
crawlerResponse
}
void setCrawlerResponse(CrawlerResponse crawlerResponse) {
this.crawlerResponse = crawlerResponse
}
String getNickname() {
nickname
}

View File

@ -7,11 +7,11 @@ import java.nio.charset.StandardCharsets
*/
public class Name {
final String name
Name(String name) {
this.name = name
}
Name(InputStream nameStream) throws IOException {
DataInputStream dis = new DataInputStream(nameStream)
int length = dis.readUnsignedShort()
@ -19,22 +19,22 @@ public class Name {
dis.readFully(nameBytes)
this.name = new String(nameBytes, StandardCharsets.UTF_8)
}
public void write(OutputStream out) throws IOException {
DataOutputStream dos = new DataOutputStream(out)
dos.writeShort(name.length())
dos.write(name.getBytes(StandardCharsets.UTF_8))
}
public getName() {
name
}
@Override
public int hashCode() {
name.hashCode()
}
@Override
public boolean equals(Object o) {
if (!(o instanceof Name))

View File

@ -9,7 +9,7 @@ import net.i2p.data.SigningPublicKey
public class Persona {
private static final int SIG_LEN = Constants.SIG_TYPE.getSigLen()
private final byte version
private final Name name
private final Destination destination
@ -17,12 +17,12 @@ public class Persona {
private volatile String humanReadableName
private volatile String base64
private volatile byte[] payload
public Persona(InputStream personaStream) throws IOException, InvalidSignatureException {
version = (byte) (personaStream.read() & 0xFF)
if (version != Constants.PERSONA_VERSION)
throw new IOException("Unknown version "+version)
name = new Name(personaStream)
destination = Destination.create(personaStream)
sig = new byte[SIG_LEN]
@ -31,7 +31,7 @@ public class Persona {
if (!verify(version, name, destination, sig))
throw new InvalidSignatureException(getHumanReadableName() + " didn't verify")
}
private static boolean verify(byte version, Name name, Destination destination, byte [] sig) {
ByteArrayOutputStream baos = new ByteArrayOutputStream()
baos.write(version)
@ -42,7 +42,7 @@ public class Persona {
Signature signature = new Signature(Constants.SIG_TYPE, sig)
DSAEngine.getInstance().verifySignature(signature, payload, spk)
}
public void write(OutputStream out) throws IOException {
if (payload == null) {
ByteArrayOutputStream baos = new ByteArrayOutputStream()
@ -54,13 +54,13 @@ public class Persona {
}
out.write(payload)
}
public String getHumanReadableName() {
if (humanReadableName == null)
if (humanReadableName == null)
humanReadableName = name.getName() + "@" + destination.toBase32().substring(0,32)
humanReadableName
}
public String toBase64() {
if (base64 == null) {
def baos = new ByteArrayOutputStream()
@ -69,12 +69,12 @@ public class Persona {
}
base64
}
@Override
public int hashCode() {
name.hashCode() ^ destination.hashCode()
}
@Override
public boolean equals(Object o) {
if (!(o instanceof Persona))
@ -82,4 +82,13 @@ public class Persona {
Persona other = (Persona)o
name.equals(other.name) && destination.equals(other.destination)
}
public static void main(String []args) {
if (args.length != 1) {
println "This utility decodes a bas64-encoded persona"
System.exit(1)
}
Persona p = new Persona(new ByteArrayInputStream(Base64.decode(args[0])))
println p.getHumanReadableName()
}
}

View File

@ -0,0 +1,4 @@
package com.muwire.core
class RouterDisconnectedEvent extends Event {
}

View File

@ -2,12 +2,12 @@ package com.muwire.core
abstract class Service {
volatile boolean loaded
abstract void load()
void waitForLoad() {
while (!loaded)
Thread.sleep(10)
}
volatile boolean loaded
abstract void load()
void waitForLoad() {
while (!loaded)
Thread.sleep(10)
}
}

View File

@ -0,0 +1,7 @@
package com.muwire.core
class SplitPattern {
public static final String SPLIT_PATTERN = "[\\*\\+\\-,\\.:;\\(\\)=_/\\\\\\!\\\"\\\'\\\$%\\|\\[\\]\\{\\}\\?]";
}

View File

@ -0,0 +1,4 @@
package com.muwire.core
class UILoadedEvent extends Event {
}

View File

@ -6,6 +6,7 @@ import java.util.concurrent.atomic.AtomicBoolean
import java.util.logging.Level
import com.muwire.core.EventBus
import com.muwire.core.MuWireSettings
import com.muwire.core.Persona
import com.muwire.core.hostcache.HostCache
import com.muwire.core.hostcache.HostDiscoveredEvent
@ -21,101 +22,108 @@ import net.i2p.data.Destination
@Log
abstract class Connection implements Closeable {
final EventBus eventBus
final Endpoint endpoint
final boolean incoming
final HostCache hostCache
private static final int SEARCHES = 10
private static final long INTERVAL = 1000
final EventBus eventBus
final Endpoint endpoint
final boolean incoming
final HostCache hostCache
final TrustService trustService
private final AtomicBoolean running = new AtomicBoolean()
private final BlockingQueue messages = new LinkedBlockingQueue()
private final Thread reader, writer
protected final String name
long lastPingSentTime, lastPongReceivedTime
Connection(EventBus eventBus, Endpoint endpoint, boolean incoming, HostCache hostCache, TrustService trustService) {
this.eventBus = eventBus
this.incoming = incoming
this.endpoint = endpoint
this.hostCache = hostCache
final MuWireSettings settings
private final AtomicBoolean running = new AtomicBoolean()
private final BlockingQueue messages = new LinkedBlockingQueue()
private final Thread reader, writer
private final LinkedList<Long> searchTimestamps = new LinkedList<>()
protected final String name
long lastPingSentTime, lastPongReceivedTime
Connection(EventBus eventBus, Endpoint endpoint, boolean incoming,
HostCache hostCache, TrustService trustService, MuWireSettings settings) {
this.eventBus = eventBus
this.incoming = incoming
this.endpoint = endpoint
this.hostCache = hostCache
this.trustService = trustService
this.name = endpoint.destination.toBase32().substring(0,8)
this.reader = new Thread({readLoop()} as Runnable)
this.reader.setName("reader-$name")
this.reader.setDaemon(true)
this.writer = new Thread({writeLoop()} as Runnable)
this.writer.setName("writer-$name")
this.writer.setDaemon(true)
}
/**
* starts the connection threads
*/
void start() {
if (!running.compareAndSet(false, true)) {
log.log(Level.WARNING,"$name already running", new Exception())
return
}
reader.start()
writer.start()
}
@Override
public void close() {
if (!running.compareAndSet(true, false)) {
log.log(Level.WARNING, "$name already closed", new Exception() )
return
}
this.settings = settings
this.name = endpoint.destination.toBase32().substring(0,8)
this.reader = new Thread({readLoop()} as Runnable)
this.reader.setName("reader-$name")
this.reader.setDaemon(true)
this.writer = new Thread({writeLoop()} as Runnable)
this.writer.setName("writer-$name")
this.writer.setDaemon(true)
}
/**
* starts the connection threads
*/
void start() {
if (!running.compareAndSet(false, true)) {
log.log(Level.WARNING,"$name already running", new Exception())
return
}
reader.start()
writer.start()
}
@Override
public void close() {
if (!running.compareAndSet(true, false)) {
log.log(Level.WARNING, "$name already closed", new Exception() )
return
}
log.info("closing $name")
reader.interrupt()
writer.interrupt()
endpoint.close()
reader.interrupt()
writer.interrupt()
eventBus.publish(new DisconnectionEvent(destination: endpoint.destination))
}
protected void readLoop() {
try {
while(running.get()) {
read()
}
} catch (SocketTimeoutException e) {
eventBus.publish(new DisconnectionEvent(destination: endpoint.destination))
}
protected void readLoop() {
try {
while(running.get()) {
read()
}
} catch (SocketTimeoutException e) {
} catch (Exception e) {
log.log(Level.WARNING,"unhandled exception in reader",e)
} finally {
close()
}
}
protected abstract void read()
protected void writeLoop() {
try {
while(running.get()) {
def message = messages.take()
write(message)
}
} catch (Exception e) {
}
protected abstract void read()
protected void writeLoop() {
try {
while(running.get()) {
def message = messages.take()
write(message)
}
} catch (Exception e) {
log.log(Level.WARNING, "unhandled exception in writer",e)
} finally {
close()
}
}
protected abstract void write(def message);
void sendPing() {
def ping = [:]
ping.type = "Ping"
ping.version = 1
messages.put(ping)
lastPingSentTime = System.currentTimeMillis()
}
}
protected abstract void write(def message);
void sendPing() {
def ping = [:]
ping.type = "Ping"
ping.version = 1
messages.put(ping)
lastPingSentTime = System.currentTimeMillis()
}
void sendQuery(QueryEvent e) {
def query = [:]
query.type = "Search"
@ -123,44 +131,73 @@ abstract class Connection implements Closeable {
query.uuid = e.searchEvent.getUuid()
query.firstHop = e.firstHop
query.keywords = e.searchEvent.getSearchTerms()
query.oobInfohash = e.searchEvent.oobInfohash
query.searchComments = e.searchEvent.searchComments
if (e.searchEvent.searchHash != null)
query.infohash = Base64.encode(e.searchEvent.searchHash)
query.replyTo = e.replyTo.toBase64()
if (e.originator != null)
query.originator = e.originator.toBase64()
messages.put(query)
}
protected void handlePing() {
log.fine("$name received ping")
def pong = [:]
pong.type = "Pong"
pong.version = 1
pong.pongs = hostCache.getGoodHosts(10).collect { d -> d.toBase64() }
messages.put(pong)
}
protected void handlePong(def pong) {
log.fine("$name received pong")
lastPongReceivedTime = System.currentTimeMillis()
if (pong.pongs == null)
throw new Exception("Pong doesn't have pongs")
pong.pongs.each {
def dest = new Destination(it)
eventBus.publish(new HostDiscoveredEvent(destination: dest))
}
}
protected void handlePing() {
log.fine("$name received ping")
def pong = [:]
pong.type = "Pong"
pong.version = 1
pong.pongs = hostCache.getGoodHosts(10).collect { d -> d.toBase64() }
messages.put(pong)
}
protected void handlePong(def pong) {
log.fine("$name received pong")
lastPongReceivedTime = System.currentTimeMillis()
if (pong.pongs == null)
throw new Exception("Pong doesn't have pongs")
pong.pongs.each {
def dest = new Destination(it)
eventBus.publish(new HostDiscoveredEvent(destination: dest))
}
}
private boolean throttleSearch() {
final long now = System.currentTimeMillis()
if (searchTimestamps.size() < SEARCHES) {
searchTimestamps.addLast(now)
return false
}
Long oldest = searchTimestamps.getFirst()
if (now - oldest.longValue() < INTERVAL)
return true
searchTimestamps.addLast(now)
searchTimestamps.removeFirst()
false
}
protected void handleSearch(def search) {
if (throttleSearch()) {
log.info("dropping excessive search")
return
}
UUID uuid = UUID.fromString(search.uuid)
if (search.infohash != null)
byte [] infohash = null
if (search.infohash != null) {
search.keywords = null
infohash = Base64.decode(search.infohash)
}
Destination replyTo = new Destination(search.replyTo)
if (trustService.getLevel(replyTo) == TrustLevel.DISTRUSTED) {
TrustLevel trustLevel = trustService.getLevel(replyTo)
if (trustLevel == TrustLevel.DISTRUSTED) {
log.info "dropping search from distrusted peer"
return
}
// TODO: add option to respond only to trusted peers
if (trustLevel == TrustLevel.NEUTRAL && !settings.allowUntrusted()) {
log.info("dropping search from neutral peer")
return
}
Persona originator = null
if (search.originator != null) {
originator = new Persona(new ByteArrayInputStream(Base64.decode(search.originator)))
@ -169,17 +206,25 @@ abstract class Connection implements Closeable {
return
}
}
boolean oob = false
if (search.oobInfohash != null)
oob = search.oobInfohash
boolean searchComments = false
if (search.searchComments != null)
searchComments = search.searchComments
SearchEvent searchEvent = new SearchEvent(searchTerms : search.keywords,
searchHash : search.infohash,
uuid : uuid)
searchHash : infohash,
uuid : uuid,
oobInfohash : oob,
searchComments : searchComments)
QueryEvent event = new QueryEvent ( searchEvent : searchEvent,
replyTo : replyTo,
originator : originator,
receivedOn : endpoint.destination,
firstHop : search.firstHop )
eventBus.publish(event)
}
}

View File

@ -10,13 +10,18 @@ import java.util.zip.InflaterInputStream
import com.muwire.core.EventBus
import com.muwire.core.MuWireSettings
import com.muwire.core.Persona
import com.muwire.core.files.FileManager
import com.muwire.core.hostcache.HostCache
import com.muwire.core.trust.TrustLevel
import com.muwire.core.trust.TrustService
import com.muwire.core.upload.UploadManager
import com.muwire.core.util.DataUtil
import com.muwire.core.search.InvalidSearchResultException
import com.muwire.core.search.ResultsParser
import com.muwire.core.search.ResultsSender
import com.muwire.core.search.SearchManager
import com.muwire.core.search.UIResultBatchEvent
import com.muwire.core.search.UIResultEvent
import com.muwire.core.search.UnexpectedResultsException
import groovy.json.JsonOutput
@ -26,156 +31,190 @@ import groovy.util.logging.Log
@Log
class ConnectionAcceptor {
final EventBus eventBus
final UltrapeerConnectionManager manager
final MuWireSettings settings
final I2PAcceptor acceptor
final HostCache hostCache
final TrustService trustService
final SearchManager searchManager
final EventBus eventBus
final UltrapeerConnectionManager manager
final MuWireSettings settings
final I2PAcceptor acceptor
final HostCache hostCache
final TrustService trustService
final SearchManager searchManager
final UploadManager uploadManager
final ExecutorService acceptorThread
final ExecutorService handshakerThreads
ConnectionAcceptor(EventBus eventBus, UltrapeerConnectionManager manager,
MuWireSettings settings, I2PAcceptor acceptor, HostCache hostCache,
TrustService trustService, SearchManager searchManager, UploadManager uploadManager) {
this.eventBus = eventBus
this.manager = manager
this.settings = settings
this.acceptor = acceptor
this.hostCache = hostCache
this.trustService = trustService
final FileManager fileManager
final ConnectionEstablisher establisher
final ExecutorService acceptorThread
final ExecutorService handshakerThreads
private volatile shutdown
ConnectionAcceptor(EventBus eventBus, UltrapeerConnectionManager manager,
MuWireSettings settings, I2PAcceptor acceptor, HostCache hostCache,
TrustService trustService, SearchManager searchManager, UploadManager uploadManager,
FileManager fileManager, ConnectionEstablisher establisher) {
this.eventBus = eventBus
this.manager = manager
this.settings = settings
this.acceptor = acceptor
this.hostCache = hostCache
this.trustService = trustService
this.searchManager = searchManager
this.fileManager = fileManager
this.uploadManager = uploadManager
acceptorThread = Executors.newSingleThreadExecutor { r ->
def rv = new Thread(r)
rv.setDaemon(true)
rv.setName("acceptor")
rv
}
handshakerThreads = Executors.newCachedThreadPool { r ->
def rv = new Thread(r)
rv.setDaemon(true)
rv.setName("acceptor-processor-${System.currentTimeMillis()}")
rv
}
}
void start() {
acceptorThread.execute({acceptLoop()} as Runnable)
}
void stop() {
acceptorThread.shutdownNow()
handshakerThreads.shutdownNow()
}
private void acceptLoop() {
while(true) {
def incoming = acceptor.accept()
log.info("accepted connection from ${incoming.destination.toBase32()}")
switch(trustService.getLevel(incoming.destination)) {
case TrustLevel.TRUSTED : break
case TrustLevel.NEUTRAL :
if (settings.allowUntrusted())
break
case TrustLevel.DISTRUSTED :
log.info("Disallowing distrusted connection")
incoming.close()
continue
}
handshakerThreads.execute({processIncoming(incoming)} as Runnable)
}
}
private void processIncoming(Endpoint e) {
InputStream is = e.inputStream
try {
int read = is.read()
switch(read) {
case (byte)'M':
this.establisher = establisher
acceptorThread = Executors.newSingleThreadExecutor { r ->
def rv = new Thread(r)
rv.setDaemon(true)
rv.setName("acceptor")
rv
}
handshakerThreads = Executors.newCachedThreadPool { r ->
def rv = new Thread(r)
rv.setDaemon(true)
rv.setName("acceptor-processor-${System.currentTimeMillis()}")
rv
}
}
void start() {
acceptorThread.execute({acceptLoop()} as Runnable)
}
void stop() {
shutdown = true
acceptorThread.shutdownNow()
handshakerThreads.shutdownNow()
}
private void acceptLoop() {
try {
while(true) {
def incoming = acceptor.accept()
log.info("accepted connection from ${incoming.destination.toBase32()}")
switch(trustService.getLevel(incoming.destination)) {
case TrustLevel.TRUSTED : break
case TrustLevel.NEUTRAL :
if (settings.allowUntrusted())
break
case TrustLevel.DISTRUSTED :
log.info("Disallowing distrusted connection")
incoming.close()
continue
}
handshakerThreads.execute({processIncoming(incoming)} as Runnable)
}
} catch (Exception e) {
log.log(Level.WARNING, "exception in accept loop",e)
if (!shutdown)
throw e
}
}
private void processIncoming(Endpoint e) {
InputStream is = e.inputStream
try {
int read = is.read()
switch(read) {
case (byte)'M':
if (settings.isLeaf())
throw new IOException("Incoming connection as leaf")
processMuWire(e)
break
case (byte)'G':
processGET(e)
break
processMuWire(e)
break
case (byte)'G':
processGET(e)
break
case (byte)'H':
processHashList(e)
break
case (byte)'P':
processPOST(e)
break
default:
throw new Exception("Invalid read $read")
}
} catch (Exception ex) {
log.log(Level.WARNING, "incoming connection failed",ex)
e.close()
eventBus.publish new ConnectionEvent(endpoint: e, incoming: true, leaf: null, status: ConnectionAttemptStatus.FAILED)
}
}
private void processMuWire(Endpoint e) {
byte[] uWire = "uWire ".bytes
for (int i = 0; i < uWire.length; i++) {
int read = e.inputStream.read()
if (read != uWire[i]) {
throw new IOException("unexpected value $read at position $i")
}
}
byte[] type = new byte[4]
DataInputStream dis = new DataInputStream(e.inputStream)
dis.readFully(type)
case (byte)'T':
processTRUST(e)
break
case (byte)'B':
processBROWSE(e)
break
default:
throw new Exception("Invalid read $read")
}
} catch (Exception ex) {
log.log(Level.WARNING, "incoming connection failed",ex)
e.close()
eventBus.publish new ConnectionEvent(endpoint: e, incoming: true, leaf: null, status: ConnectionAttemptStatus.FAILED)
}
}
private void processMuWire(Endpoint e) {
byte[] uWire = "uWire ".bytes
for (int i = 0; i < uWire.length; i++) {
int read = e.inputStream.read()
if (read != uWire[i]) {
throw new IOException("unexpected value $read at position $i")
}
}
byte[] type = new byte[4]
DataInputStream dis = new DataInputStream(e.inputStream)
dis.readFully(type)
if (type == "leaf".bytes)
handleIncoming(e, true)
else if (type == "peer".bytes)
handleIncoming(e, false)
else
else
throw new IOException("unknown connection type $type")
}
private void handleIncoming(Endpoint e, boolean leaf) {
boolean accept = !manager.isConnected(e.destination) && (leaf ? manager.hasLeafSlots() : manager.hasPeerSlots())
if (accept) {
log.info("accepting connection, leaf:$leaf")
e.outputStream.write("OK".bytes)
e.outputStream.flush()
def wrapped = new Endpoint(e.destination, new InflaterInputStream(e.inputStream), new DeflaterOutputStream(e.outputStream, true), e.toClose)
eventBus.publish(new ConnectionEvent(endpoint: wrapped, incoming: true, leaf: leaf, status: ConnectionAttemptStatus.SUCCESSFUL))
} else {
log.info("rejecting connection, leaf:$leaf")
e.outputStream.write("REJECT".bytes)
def hosts = hostCache.getGoodHosts(10)
if (!hosts.isEmpty()) {
def json = [:]
json.tryHosts = hosts.collect { d -> d.toBase64() }
json = JsonOutput.toJson(json)
def os = new DataOutputStream(e.outputStream)
os.writeShort(json.bytes.length)
os.write(json.bytes)
}
e.outputStream.flush()
e.close()
eventBus.publish(new ConnectionEvent(endpoint: e, incoming: true, leaf: leaf, status: ConnectionAttemptStatus.REJECTED))
}
}
private void processGET(Endpoint e) {
private void handleIncoming(Endpoint e, boolean leaf) {
boolean accept = !manager.isConnected(e.destination) &&
!establisher.isInProgress(e.destination) &&
(leaf ? manager.hasLeafSlots() : manager.hasPeerSlots())
if (accept) {
log.info("accepting connection, leaf:$leaf")
e.outputStream.write("OK".bytes)
e.outputStream.flush()
def wrapped = new Endpoint(e.destination, new InflaterInputStream(e.inputStream), new DeflaterOutputStream(e.outputStream, true), e.toClose)
eventBus.publish(new ConnectionEvent(endpoint: wrapped, incoming: true, leaf: leaf, status: ConnectionAttemptStatus.SUCCESSFUL))
} else {
log.info("rejecting connection, leaf:$leaf")
e.outputStream.write("REJECT".bytes)
def hosts = hostCache.getGoodHosts(10)
if (!hosts.isEmpty()) {
def json = [:]
json.tryHosts = hosts.collect { d -> d.toBase64() }
json = JsonOutput.toJson(json)
def os = new DataOutputStream(e.outputStream)
os.writeShort(json.bytes.length)
os.write(json.bytes)
}
e.outputStream.flush()
e.close()
eventBus.publish(new ConnectionEvent(endpoint: e, incoming: true, leaf: leaf, status: ConnectionAttemptStatus.REJECTED))
}
}
private void processGET(Endpoint e) {
byte[] et = new byte[3]
final DataInputStream dis = new DataInputStream(e.getInputStream())
dis.readFully(et)
if (et != "ET ".getBytes(StandardCharsets.US_ASCII))
throw new IOException("Invalid GET connection")
uploadManager.processEndpoint(e)
}
uploadManager.processGET(e)
}
private void processHashList(Endpoint e) {
byte[] ashList = new byte[8]
final DataInputStream dis = new DataInputStream(e.getInputStream())
dis.readFully(ashList)
if (ashList != "ASHLIST ".getBytes(StandardCharsets.US_ASCII))
throw new IOException("Invalid HASHLIST connection")
uploadManager.processHashList(e)
}
private void processPOST(final Endpoint e) throws IOException {
byte [] ost = new byte[4]
final DataInputStream dis = new DataInputStream(e.getInputStream())
@ -199,18 +238,101 @@ class ConnectionAcceptor {
if (sender.destination != e.getDestination())
throw new IOException("Sender destination mismatch expected $e.getDestination(), got $sender.destination")
int nResults = dis.readUnsignedShort()
UIResultEvent[] results = new UIResultEvent[nResults]
for (int i = 0; i < nResults; i++) {
int jsonSize = dis.readUnsignedShort()
byte [] payload = new byte[jsonSize]
dis.readFully(payload)
def json = slurper.parse(payload)
eventBus.publish(ResultsParser.parse(sender, resultsUUID, json))
results[i] = ResultsParser.parse(sender, resultsUUID, json)
}
eventBus.publish(new UIResultBatchEvent(uuid: resultsUUID, results: results))
} catch (IOException | UnexpectedResultsException | InvalidSearchResultException bad) {
log.log(Level.WARNING, "failed to process POST", bad)
} finally {
e.close()
}
}
private void processBROWSE(Endpoint e) {
try {
byte [] rowse = new byte[7]
DataInputStream dis = new DataInputStream(e.getInputStream())
dis.readFully(rowse)
if (rowse != "ROWSE\r\n".getBytes(StandardCharsets.US_ASCII))
throw new IOException("Invalid BROWSE connection")
String header
while ((header = DataUtil.readTillRN(dis)) != ""); // ignore headers for now
OutputStream os = e.getOutputStream()
if (!settings.browseFiles) {
os.write("403 Not Allowed\r\n\r\n".getBytes(StandardCharsets.US_ASCII))
os.flush()
e.close()
return
}
os.write("200 OK\r\n".getBytes(StandardCharsets.US_ASCII))
def sharedFiles = fileManager.getSharedFiles().values()
os.write("Count: ${sharedFiles.size()}\r\n\r\n".getBytes(StandardCharsets.US_ASCII))
DataOutputStream dos = new DataOutputStream(os)
JsonOutput jsonOutput = new JsonOutput()
sharedFiles.each {
def obj = ResultsSender.sharedFileToObj(it, false)
def json = jsonOutput.toJson(obj)
dos.writeShort((short)json.length())
dos.write(json.getBytes(StandardCharsets.US_ASCII))
}
dos.flush()
} finally {
e.close()
}
}
private void processTRUST(Endpoint e) {
try {
byte[] RUST = new byte[6]
DataInputStream dis = new DataInputStream(e.getInputStream())
dis.readFully(RUST)
if (RUST != "RUST\r\n".getBytes(StandardCharsets.US_ASCII))
throw new IOException("Invalid TRUST connection")
String header
while ((header = DataUtil.readTillRN(dis)) != ""); // ignore headers for now
OutputStream os = e.getOutputStream()
if (!settings.allowTrustLists) {
os.write("403 Not Allowed\r\n\r\n".getBytes(StandardCharsets.US_ASCII))
os.flush()
e.close()
return
}
os.write("200 OK\r\n\r\n".getBytes(StandardCharsets.US_ASCII))
List<Persona> good = new ArrayList<>(trustService.good.values())
int size = Math.min(Short.MAX_VALUE * 2, good.size())
good = good.subList(0, size)
DataOutputStream dos = new DataOutputStream(os)
dos.writeShort(size)
good.each {
it.write(dos)
}
List<Persona> bad = new ArrayList<>(trustService.bad.values())
size = Math.min(Short.MAX_VALUE * 2, bad.size())
bad = bad.subList(0, size)
dos.writeShort(size)
bad.each {
it.write(dos)
}
dos.flush()
} finally {
e.close()
}
}
}

View File

@ -21,159 +21,170 @@ import net.i2p.util.ConcurrentHashSet
@Log
class ConnectionEstablisher {
private static final int CONCURRENT = 4
final EventBus eventBus
final I2PConnector i2pConnector
final MuWireSettings settings
final ConnectionManager connectionManager
final HostCache hostCache
final Timer timer
final ExecutorService executor
final Set inProgress = new ConcurrentHashSet()
ConnectionEstablisher(EventBus eventBus, I2PConnector i2pConnector, MuWireSettings settings,
ConnectionManager connectionManager, HostCache hostCache) {
this.eventBus = eventBus
this.i2pConnector = i2pConnector
this.settings = settings
this.connectionManager = connectionManager
this.hostCache = hostCache
timer = new Timer("connection-timer",true)
executor = Executors.newFixedThreadPool(CONCURRENT, { r ->
def rv = new Thread(r)
rv.setDaemon(true)
rv.setName("connector-${System.currentTimeMillis()}")
rv
} as ThreadFactory)
}
void start() {
timer.schedule({connectIfNeeded()} as TimerTask, 100, 1000)
}
void stop() {
timer.cancel()
executor.shutdownNow()
}
private void connectIfNeeded() {
if (!connectionManager.needsConnections())
return
if (inProgress.size() >= CONCURRENT)
return
private static final int CONCURRENT = 4
def toTry = null
for (int i = 0; i < 5; i++) {
toTry = hostCache.getHosts(1)
if (toTry.isEmpty())
return
toTry = toTry[0]
if (!connectionManager.isConnected(toTry) &&
!inProgress.contains(toTry)) {
break
}
}
if (toTry == null)
return
if (!connectionManager.isConnected(toTry) && inProgress.add(toTry))
executor.execute({connect(toTry)} as Runnable)
}
private void connect(Destination toTry) {
log.info("starting connect to ${toTry.toBase32()}")
try {
def endpoint = i2pConnector.connect(toTry)
log.info("successful transport connect to ${toTry.toBase32()}")
// outgoing handshake
endpoint.outputStream.write("MuWire ".bytes)
def type = settings.isLeaf() ? "leaf" : "peer"
endpoint.outputStream.write(type.bytes)
endpoint.outputStream.flush()
InputStream is = endpoint.inputStream
int read = is.read()
if (read == -1) {
fail endpoint
return
}
switch(read) {
case (byte)'O': readK(endpoint); break
case (byte)'R': readEJECT(endpoint); break
default :
log.warning("unknown response $read")
fail endpoint
}
} catch (Exception e) {
log.log(Level.WARNING, "Couldn't connect to ${toTry.toBase32()}", e)
def endpoint = new Endpoint(toTry, null, null, null)
fail(endpoint)
} finally {
inProgress.remove(toTry)
}
}
private void fail(Endpoint endpoint) {
endpoint.close()
eventBus.publish(new ConnectionEvent(endpoint: endpoint, incoming: false, leaf: false, status: ConnectionAttemptStatus.FAILED))
}
private void readK(Endpoint e) {
int read = e.inputStream.read()
if (read != 'K') {
log.warning("unknown response after O: $read")
fail e
return
}
log.info("connection to ${e.destination.toBase32()} established")
// wrap into deflater / inflater streams and publish
def wrapped = new Endpoint(e.destination, new InflaterInputStream(e.inputStream), new DeflaterOutputStream(e.outputStream, true), e.toClose)
eventBus.publish(new ConnectionEvent(endpoint: wrapped, incoming: false, leaf: false, status: ConnectionAttemptStatus.SUCCESSFUL))
}
private void readEJECT(Endpoint e) {
byte[] eject = "EJECT".bytes
for (int i = 0; i < eject.length; i++) {
int read = e.inputStream.read()
if (read != eject[i]) {
log.warning("Unknown response after R at position $i")
fail e
return
}
}
log.info("connection to ${e.destination.toBase32()} rejected")
eventBus.publish(new ConnectionEvent(endpoint: e, incoming: false, leaf: false, status: ConnectionAttemptStatus.REJECTED))
try {
DataInputStream dais = new DataInputStream(e.inputStream)
int payloadSize = dais.readUnsignedShort()
byte[] payload = new byte[payloadSize]
dais.readFully(payload)
final EventBus eventBus
final I2PConnector i2pConnector
final MuWireSettings settings
final ConnectionManager connectionManager
final HostCache hostCache
def json = new JsonSlurper()
json = json.parse(payload)
final Timer timer
final ExecutorService executor, closer
if (json.tryHosts == null) {
log.warning("post-rejection json didn't contain hosts to try")
return
}
final Set inProgress = new ConcurrentHashSet()
json.tryHosts.asList().each {
Destination suggested = new Destination(it)
eventBus.publish(new HostDiscoveredEvent(destination: suggested))
}
} catch (Exception ignore) {
log.log(Level.WARNING,"Problem parsing post-rejection payload",ignore)
} finally {
// the end
e.close()
}
}
ConnectionEstablisher(){}
ConnectionEstablisher(EventBus eventBus, I2PConnector i2pConnector, MuWireSettings settings,
ConnectionManager connectionManager, HostCache hostCache) {
this.eventBus = eventBus
this.i2pConnector = i2pConnector
this.settings = settings
this.connectionManager = connectionManager
this.hostCache = hostCache
timer = new Timer("connection-timer",true)
executor = Executors.newFixedThreadPool(CONCURRENT, { r ->
def rv = new Thread(r)
rv.setDaemon(true)
rv.setName("connector-${System.currentTimeMillis()}")
rv
} as ThreadFactory)
closer = Executors.newSingleThreadExecutor()
}
void start() {
timer.schedule({connectIfNeeded()} as TimerTask, 100, 1000)
}
void stop() {
timer.cancel()
executor.shutdownNow()
closer.shutdown()
}
private void connectIfNeeded() {
if (!connectionManager.needsConnections())
return
if (inProgress.size() >= CONCURRENT)
return
def toTry = null
for (int i = 0; i < 5; i++) {
toTry = hostCache.getHosts(1)
if (toTry.isEmpty())
return
toTry = toTry[0]
if (!connectionManager.isConnected(toTry) &&
!inProgress.contains(toTry)) {
break
}
}
if (toTry == null)
return
if (!connectionManager.isConnected(toTry) && inProgress.add(toTry))
executor.execute({connect(toTry)} as Runnable)
}
private void connect(Destination toTry) {
log.info("starting connect to ${toTry.toBase32()}")
try {
def endpoint = i2pConnector.connect(toTry)
log.info("successful transport connect to ${toTry.toBase32()}")
// outgoing handshake
endpoint.outputStream.write("MuWire ".bytes)
def type = settings.isLeaf() ? "leaf" : "peer"
endpoint.outputStream.write(type.bytes)
endpoint.outputStream.flush()
InputStream is = endpoint.inputStream
int read = is.read()
if (read == -1) {
fail endpoint
return
}
switch(read) {
case (byte)'O': readK(endpoint); break
case (byte)'R': readEJECT(endpoint); break
default :
log.warning("unknown response $read")
fail endpoint
}
} catch (Exception e) {
log.log(Level.WARNING, "Couldn't connect to ${toTry.toBase32()}", e)
def endpoint = new Endpoint(toTry, null, null, null)
fail(endpoint)
} finally {
inProgress.remove(toTry)
}
}
private void fail(Endpoint endpoint) {
closer.execute {
endpoint.close()
eventBus.publish(new ConnectionEvent(endpoint: endpoint, incoming: false, leaf: false, status: ConnectionAttemptStatus.FAILED))
} as Runnable
}
private void readK(Endpoint e) {
int read = e.inputStream.read()
if (read != 'K') {
log.warning("unknown response after O: $read")
fail e
return
}
log.info("connection to ${e.destination.toBase32()} established")
// wrap into deflater / inflater streams and publish
def wrapped = new Endpoint(e.destination, new InflaterInputStream(e.inputStream), new DeflaterOutputStream(e.outputStream, true), e.toClose)
eventBus.publish(new ConnectionEvent(endpoint: wrapped, incoming: false, leaf: false, status: ConnectionAttemptStatus.SUCCESSFUL))
}
private void readEJECT(Endpoint e) {
byte[] eject = "EJECT".bytes
for (int i = 0; i < eject.length; i++) {
int read = e.inputStream.read()
if (read != eject[i]) {
log.warning("Unknown response after R at position $i")
fail e
return
}
}
log.info("connection to ${e.destination.toBase32()} rejected")
eventBus.publish(new ConnectionEvent(endpoint: e, incoming: false, leaf: false, status: ConnectionAttemptStatus.REJECTED))
try {
DataInputStream dais = new DataInputStream(e.inputStream)
int payloadSize = dais.readUnsignedShort()
byte[] payload = new byte[payloadSize]
dais.readFully(payload)
def json = new JsonSlurper()
json = json.parse(payload)
if (json.tryHosts == null) {
log.warning("post-rejection json didn't contain hosts to try")
return
}
json.tryHosts.asList().each {
Destination suggested = new Destination(it)
eventBus.publish(new HostDiscoveredEvent(destination: suggested))
}
} catch (Exception ignore) {
log.log(Level.WARNING,"Problem parsing post-rejection payload",ignore)
} finally {
// the end
closer.execute({e.close()} as Runnable)
}
}
public boolean isInProgress(Destination d) {
inProgress.contains(d)
}
}

View File

@ -6,14 +6,14 @@ import net.i2p.data.Destination
class ConnectionEvent extends Event {
Endpoint endpoint
boolean incoming
Boolean leaf // can be null if uknown
ConnectionAttemptStatus status
@Override
public String toString() {
"ConnectionEvent ${super.toString()} endpoint: $endpoint incoming: $incoming leaf : $leaf status : $status"
}
Endpoint endpoint
boolean incoming
Boolean leaf // can be null if uknown
ConnectionAttemptStatus status
@Override
public String toString() {
"ConnectionEvent ${super.toString()} endpoint: $endpoint incoming: $incoming leaf : $leaf status : $status"
}
}

View File

@ -1,6 +1,7 @@
package com.muwire.core.connection
import com.muwire.core.EventBus
import com.muwire.core.MuWireSettings
import com.muwire.core.Persona
import com.muwire.core.hostcache.HostCache
import com.muwire.core.search.QueryEvent
@ -10,62 +11,64 @@ import com.muwire.core.trust.TrustLevel
import net.i2p.data.Destination
abstract class ConnectionManager {
private static final int PING_TIME = 20000
final EventBus eventBus
private final Timer timer
protected final HostCache hostCache
private static final int PING_TIME = 20000
final EventBus eventBus
private final Timer timer
protected final HostCache hostCache
protected final Persona me
ConnectionManager() {}
ConnectionManager(EventBus eventBus, Persona me, HostCache hostCache) {
this.eventBus = eventBus
protected final MuWireSettings settings
ConnectionManager() {}
ConnectionManager(EventBus eventBus, Persona me, HostCache hostCache, MuWireSettings settings) {
this.eventBus = eventBus
this.me = me
this.hostCache = hostCache
this.timer = new Timer("connections-pinger",true)
}
void start() {
timer.schedule({sendPings()} as TimerTask, 1000,1000)
}
void stop() {
timer.cancel()
getConnections().each { it.close() }
}
void onTrustEvent(TrustEvent e) {
if (e.level == TrustLevel.DISTRUSTED)
drop(e.persona.destination)
}
abstract void drop(Destination d)
abstract Collection<Connection> getConnections()
protected abstract int getDesiredConnections()
boolean needsConnections() {
return getConnections().size() < getDesiredConnections()
}
abstract boolean isConnected(Destination d)
abstract void onConnectionEvent(ConnectionEvent e)
abstract void onDisconnectionEvent(DisconnectionEvent e)
this.hostCache = hostCache
this.settings = settings
this.timer = new Timer("connections-pinger",true)
}
void start() {
timer.schedule({sendPings()} as TimerTask, 1000,1000)
}
void stop() {
timer.cancel()
getConnections().each { it.close() }
}
void onTrustEvent(TrustEvent e) {
if (e.level == TrustLevel.DISTRUSTED)
drop(e.persona.destination)
}
abstract void drop(Destination d)
abstract Collection<Connection> getConnections()
protected abstract int getDesiredConnections()
boolean needsConnections() {
return getConnections().size() < getDesiredConnections()
}
abstract boolean isConnected(Destination d)
abstract void onConnectionEvent(ConnectionEvent e)
abstract void onDisconnectionEvent(DisconnectionEvent e)
abstract void shutdown()
protected void sendPings() {
final long now = System.currentTimeMillis()
getConnections().each {
if (now - it.lastPingSentTime > PING_TIME)
it.sendPing()
}
}
protected void sendPings() {
final long now = System.currentTimeMillis()
getConnections().each {
if (now - it.lastPingSentTime > PING_TIME)
it.sendPing()
}
}
}

View File

@ -5,11 +5,11 @@ import com.muwire.core.Event
import net.i2p.data.Destination
class DisconnectionEvent extends Event {
Destination destination
@Override
public String toString() {
"DisconnectionEvent ${super.toString()} destination:${destination.toBase32()}"
}
Destination destination
@Override
public String toString() {
"DisconnectionEvent ${super.toString()} destination:${destination.toBase32()}"
}
}

View File

@ -1,45 +1,46 @@
package com.muwire.core.connection
import java.util.concurrent.atomic.AtomicBoolean
import java.util.logging.Level
import groovy.util.logging.Log
import net.i2p.data.Destination
@Log
class Endpoint implements Closeable {
final Destination destination
final InputStream inputStream
final OutputStream outputStream
final Destination destination
final InputStream inputStream
final OutputStream outputStream
final def toClose
private final AtomicBoolean closed = new AtomicBoolean()
Endpoint(Destination destination, InputStream inputStream, OutputStream outputStream, def toClose) {
this.destination = destination
this.inputStream = inputStream
this.outputStream = outputStream
private final AtomicBoolean closed = new AtomicBoolean()
Endpoint(Destination destination, InputStream inputStream, OutputStream outputStream, def toClose) {
this.destination = destination
this.inputStream = inputStream
this.outputStream = outputStream
this.toClose = toClose
}
@Override
public void close() {
if (!closed.compareAndSet(false, true)) {
log.warning("Close loop detected for ${destination.toBase32()}", new Exception())
return
}
if (inputStream != null) {
try {inputStream.close()} catch (Exception ignore) {}
}
if (outputStream != null) {
try {outputStream.close()} catch (Exception ignore) {}
}
if (toClose != null) {
try {toClose.close()} catch (Exception ignore) {}
}
@Override
public void close() {
if (!closed.compareAndSet(false, true)) {
log.log(Level.WARNING,"Close loop detected for ${destination.toBase32()}", new Exception())
return
}
}
@Override
public String toString() {
"destination: ${destination.toBase32()}"
}
}
if (inputStream != null) {
try {inputStream.close()} catch (Exception ignore) {}
}
if (outputStream != null) {
try {outputStream.close()} catch (Exception ignore) {}
}
if (toClose != null) {
try {toClose.reset()} catch (Exception ignore) {}
}
}
@Override
public String toString() {
"destination: ${destination.toBase32()}"
}
}

View File

@ -5,18 +5,18 @@ import net.i2p.client.streaming.I2PSocketManager
class I2PAcceptor {
final I2PSocketManager socketManager
final I2PServerSocket serverSocket
I2PAcceptor() {}
I2PAcceptor(I2PSocketManager socketManager) {
this.socketManager = socketManager
this.serverSocket = socketManager.getServerSocket()
}
Endpoint accept() {
def socket = serverSocket.accept()
new Endpoint(socket.getPeerDestination(), socket.getInputStream(), socket.getOutputStream(), socket)
}
final I2PSocketManager socketManager
final I2PServerSocket serverSocket
I2PAcceptor() {}
I2PAcceptor(I2PSocketManager socketManager) {
this.socketManager = socketManager
this.serverSocket = socketManager.getServerSocket()
}
Endpoint accept() {
def socket = serverSocket.accept()
new Endpoint(socket.getPeerDestination(), socket.getInputStream(), socket.getOutputStream(), socket)
}
}

View File

@ -4,18 +4,18 @@ import net.i2p.client.streaming.I2PSocketManager
import net.i2p.data.Destination
class I2PConnector {
final I2PSocketManager socketManager
I2PConnector() {}
I2PConnector(I2PSocketManager socketManager) {
this.socketManager = socketManager
}
Endpoint connect(Destination dest) {
def socket = socketManager.connect(dest)
new Endpoint(dest, socket.getInputStream(), socket.getOutputStream(), socket)
}
final I2PSocketManager socketManager
I2PConnector() {}
I2PConnector(I2PSocketManager socketManager) {
this.socketManager = socketManager
}
Endpoint connect(Destination dest) {
def socket = socketManager.connect(dest)
new Endpoint(dest, socket.getInputStream(), socket.getOutputStream(), socket)
}
}

View File

@ -4,32 +4,34 @@ import java.io.InputStream
import java.io.OutputStream
import com.muwire.core.EventBus
import com.muwire.core.MuWireSettings
import com.muwire.core.hostcache.HostCache
import com.muwire.core.trust.TrustService
import net.i2p.data.Destination
/**
* Connection where the other side is a leaf.
* Connection where the other side is a leaf.
* Such connections can only be incoming.
* @author zab
*/
class LeafConnection extends Connection {
public LeafConnection(EventBus eventBus, Endpoint endpoint, HostCache hostCache, TrustService trustService) {
super(eventBus, endpoint, true, hostCache, trustService);
}
public LeafConnection(EventBus eventBus, Endpoint endpoint, HostCache hostCache,
TrustService trustService, MuWireSettings settings) {
super(eventBus, endpoint, true, hostCache, trustService, settings);
}
@Override
protected void read() {
// TODO Auto-generated method stub
}
@Override
protected void read() {
// TODO Auto-generated method stub
@Override
protected void write(Object message) {
// TODO Auto-generated method stub
}
}
@Override
protected void write(Object message) {
// TODO Auto-generated method stub
}
}

View File

@ -3,6 +3,7 @@ package com.muwire.core.connection
import java.util.concurrent.ConcurrentHashMap
import com.muwire.core.EventBus
import com.muwire.core.MuWireSettings
import com.muwire.core.Persona
import com.muwire.core.hostcache.HostCache
import com.muwire.core.search.QueryEvent
@ -12,67 +13,68 @@ import net.i2p.data.Destination
@Log
class LeafConnectionManager extends ConnectionManager {
final int maxConnections
final Map<Destination, UltrapeerConnection> connections = new ConcurrentHashMap()
public LeafConnectionManager(EventBus eventBus, Persona me, int maxConnections, HostCache hostCache) {
super(eventBus, me, hostCache)
this.maxConnections = maxConnections
}
@Override
public void drop(Destination d) {
// TODO Auto-generated method stub
}
final int maxConnections
final Map<Destination, UltrapeerConnection> connections = new ConcurrentHashMap()
public LeafConnectionManager(EventBus eventBus, Persona me, int maxConnections,
HostCache hostCache, MuWireSettings settings) {
super(eventBus, me, hostCache, settings)
this.maxConnections = maxConnections
}
@Override
public void drop(Destination d) {
// TODO Auto-generated method stub
}
void onQueryEvent(QueryEvent e) {
if (me.destination == e.receivedOn) {
connections.values().each { it.sendQuery(e) }
}
}
@Override
public Collection<Connection> getConnections() {
connections.values()
}
@Override
public Collection<Connection> getConnections() {
connections.values()
}
@Override
protected int getDesiredConnections() {
return maxConnections;
}
@Override
protected int getDesiredConnections() {
return maxConnections;
}
@Override
public boolean isConnected(Destination d) {
connections.containsKey(d)
}
@Override
public boolean isConnected(Destination d) {
connections.containsKey(d)
}
@Override
public void onConnectionEvent(ConnectionEvent e) {
if (e.incoming || e.leaf) {
log.severe("Got inconsistent event as a leaf! $e")
return
}
if (e.status != ConnectionAttemptStatus.SUCCESSFUL)
return
Connection c = new UltrapeerConnection(eventBus, e.endpoint)
connections.put(e.endpoint.destination, c)
c.start()
}
@Override
public void onDisconnectionEvent(DisconnectionEvent e) {
def removed = connections.remove(e.destination)
if (removed == null)
log.severe("removed destination not present in connection manager ${e.destination.toBase32()}")
}
@Override
public void onConnectionEvent(ConnectionEvent e) {
if (e.incoming || e.leaf) {
log.severe("Got inconsistent event as a leaf! $e")
return
}
if (e.status != ConnectionAttemptStatus.SUCCESSFUL)
return
Connection c = new UltrapeerConnection(eventBus, e.endpoint)
connections.put(e.endpoint.destination, c)
c.start()
}
@Override
public void onDisconnectionEvent(DisconnectionEvent e) {
def removed = connections.remove(e.destination)
if (removed == null)
log.severe("removed destination not present in connection manager ${e.destination.toBase32()}")
}
@Override
void shutdown() {
}
}

View File

@ -4,6 +4,7 @@ import java.io.InputStream
import java.io.OutputStream
import com.muwire.core.EventBus
import com.muwire.core.MuWireSettings
import com.muwire.core.hostcache.HostCache
import com.muwire.core.trust.TrustService
import com.muwire.core.util.DataUtil
@ -19,62 +20,63 @@ import net.i2p.data.Destination
*/
@Log
class PeerConnection extends Connection {
private final DataInputStream dis
private final DataOutputStream dos
private final byte[] readHeader = new byte[3]
private final byte[] writeHeader = new byte[3]
private final JsonSlurper slurper = new JsonSlurper()
public PeerConnection(EventBus eventBus, Endpoint endpoint,
boolean incoming, HostCache hostCache, TrustService trustService) {
super(eventBus, endpoint, incoming, hostCache, trustService)
this.dis = new DataInputStream(endpoint.inputStream)
this.dos = new DataOutputStream(endpoint.outputStream)
}
private final DataInputStream dis
private final DataOutputStream dos
@Override
protected void read() {
dis.readFully(readHeader)
int length = DataUtil.readLength(readHeader)
log.fine("$name read length $length")
byte[] payload = new byte[length]
dis.readFully(payload)
if ((readHeader[0] & (byte)0x80) == 0x80) {
// TODO process binary
} else {
def json = slurper.parse(payload)
if (json.type == null)
throw new Exception("missing json type")
switch(json.type) {
case "Ping" : handlePing(); break;
case "Pong" : handlePong(json); break;
private final byte[] readHeader = new byte[3]
private final byte[] writeHeader = new byte[3]
private final JsonSlurper slurper = new JsonSlurper()
public PeerConnection(EventBus eventBus, Endpoint endpoint,
boolean incoming, HostCache hostCache, TrustService trustService,
MuWireSettings settings) {
super(eventBus, endpoint, incoming, hostCache, trustService, settings)
this.dis = new DataInputStream(endpoint.inputStream)
this.dos = new DataOutputStream(endpoint.outputStream)
}
@Override
protected void read() {
dis.readFully(readHeader)
int length = DataUtil.readLength(readHeader)
log.fine("$name read length $length")
byte[] payload = new byte[length]
dis.readFully(payload)
if ((readHeader[0] & (byte)0x80) == 0x80) {
// TODO process binary
} else {
def json = slurper.parse(payload)
if (json.type == null)
throw new Exception("missing json type")
switch(json.type) {
case "Ping" : handlePing(); break;
case "Pong" : handlePong(json); break;
case "Search": handleSearch(json); break
default :
throw new Exception("unknown json type ${json.type}")
}
}
}
default :
throw new Exception("unknown json type ${json.type}")
}
}
}
@Override
protected void write(Object message) {
byte[] payload
if (message instanceof Map) {
payload = JsonOutput.toJson(message).bytes
DataUtil.packHeader(payload.length, writeHeader)
log.fine "$name writing message type ${message.type} length $payload.length"
writeHeader[0] &= (byte)0x7F
} else {
// TODO: write binary
}
dos.write(writeHeader)
dos.write(payload)
dos.flush()
}
@Override
protected void write(Object message) {
byte[] payload
if (message instanceof Map) {
payload = JsonOutput.toJson(message).bytes
DataUtil.packHeader(payload.length, writeHeader)
log.fine "$name writing message type ${message.type} length $payload.length"
writeHeader[0] &= (byte)0x7F
} else {
// TODO: write binary
}
dos.write(writeHeader)
dos.write(payload)
dos.flush()
}
}

View File

@ -17,30 +17,30 @@ import net.i2p.data.Destination
*/
class UltrapeerConnection extends Connection {
public UltrapeerConnection(EventBus eventBus, Endpoint endpoint, HostCache hostCache, TrustService trustService) {
super(eventBus, endpoint, false, hostCache, trustService)
}
public UltrapeerConnection(EventBus eventBus, Endpoint endpoint, HostCache hostCache, TrustService trustService) {
super(eventBus, endpoint, false, hostCache, trustService)
}
@Override
protected void read() {
// TODO Auto-generated method stub
}
@Override
protected void read() {
// TODO Auto-generated method stub
@Override
protected void write(Object message) {
if (message instanceof Map) {
writeJsonMessage(message)
} else {
writeBinaryMessage(message)
}
}
}
private void writeJsonMessage(def message) {
}
private void writeBinaryMessage(def message) {
}
@Override
protected void write(Object message) {
if (message instanceof Map) {
writeJsonMessage(message)
} else {
writeBinaryMessage(message)
}
}
private void writeJsonMessage(def message) {
}
private void writeBinaryMessage(def message) {
}
}

View File

@ -4,6 +4,7 @@ import java.util.Collection
import java.util.concurrent.ConcurrentHashMap
import com.muwire.core.EventBus
import com.muwire.core.MuWireSettings
import com.muwire.core.Persona
import com.muwire.core.hostcache.HostCache
import com.muwire.core.search.QueryEvent
@ -14,28 +15,28 @@ import net.i2p.data.Destination
@Log
class UltrapeerConnectionManager extends ConnectionManager {
final int maxPeers, maxLeafs
final TrustService trustService
final Map<Destination, PeerConnection> peerConnections = new ConcurrentHashMap()
final Map<Destination, LeafConnection> leafConnections = new ConcurrentHashMap()
UltrapeerConnectionManager() {}
public UltrapeerConnectionManager(EventBus eventBus, Persona me, int maxPeers, int maxLeafs,
HostCache hostCache, TrustService trustService) {
super(eventBus, me, hostCache)
this.maxPeers = maxPeers
this.maxLeafs = maxLeafs
final int maxPeers, maxLeafs
final TrustService trustService
final Map<Destination, PeerConnection> peerConnections = new ConcurrentHashMap()
final Map<Destination, LeafConnection> leafConnections = new ConcurrentHashMap()
UltrapeerConnectionManager() {}
public UltrapeerConnectionManager(EventBus eventBus, Persona me, int maxPeers, int maxLeafs,
HostCache hostCache, TrustService trustService, MuWireSettings settings) {
super(eventBus, me, hostCache, settings)
this.maxPeers = maxPeers
this.maxLeafs = maxLeafs
this.trustService = trustService
}
@Override
public void drop(Destination d) {
peerConnections.get(d)?.close()
}
@Override
public void drop(Destination d) {
peerConnections.get(d)?.close()
leafConnections.get(d)?.close()
}
}
void onQueryEvent(QueryEvent e) {
forwardQueryToLeafs(e)
if (!e.firstHop)
@ -49,67 +50,67 @@ class UltrapeerConnectionManager extends ConnectionManager {
}
}
@Override
public Collection<Connection> getConnections() {
def rv = new ArrayList(peerConnections.size() + leafConnections.size())
rv.addAll(peerConnections.values())
rv.addAll(leafConnections.values())
rv
}
boolean hasLeafSlots() {
leafConnections.size() < maxLeafs
}
boolean hasPeerSlots() {
peerConnections.size() < maxPeers
}
@Override
protected int getDesiredConnections() {
return maxPeers / 2;
}
@Override
public boolean isConnected(Destination d) {
peerConnections.containsKey(d) || leafConnections.containsKey(d)
}
@Override
public Collection<Connection> getConnections() {
def rv = new ArrayList(peerConnections.size() + leafConnections.size())
rv.addAll(peerConnections.values())
rv.addAll(leafConnections.values())
rv
}
boolean hasLeafSlots() {
leafConnections.size() < maxLeafs
}
boolean hasPeerSlots() {
peerConnections.size() < maxPeers
}
@Override
protected int getDesiredConnections() {
return maxPeers / 2;
}
@Override
public boolean isConnected(Destination d) {
peerConnections.containsKey(d) || leafConnections.containsKey(d)
}
@Override
public void onConnectionEvent(ConnectionEvent e) {
if (!e.incoming && e.leaf) {
log.severe("Inconsistent event $e")
return
}
if (e.status != ConnectionAttemptStatus.SUCCESSFUL)
return
Connection c = e.leaf ?
new LeafConnection(eventBus, e.endpoint, hostCache, trustService, settings) :
new PeerConnection(eventBus, e.endpoint, e.incoming, hostCache, trustService, settings)
def map = e.leaf ? leafConnections : peerConnections
map.put(e.endpoint.destination, c)
c.start()
}
@Override
public void onDisconnectionEvent(DisconnectionEvent e) {
def removed = peerConnections.remove(e.destination)
if (removed == null)
removed = leafConnections.remove(e.destination)
if (removed == null)
log.severe("Removed connection not present in either leaf or peer map ${e.destination.toBase32()}")
}
@Override
public void onConnectionEvent(ConnectionEvent e) {
if (!e.incoming && e.leaf) {
log.severe("Inconsistent event $e")
return
}
if (e.status != ConnectionAttemptStatus.SUCCESSFUL)
return
Connection c = e.leaf ?
new LeafConnection(eventBus, e.endpoint, hostCache, trustService) :
new PeerConnection(eventBus, e.endpoint, e.incoming, hostCache, trustService)
def map = e.leaf ? leafConnections : peerConnections
map.put(e.endpoint.destination, c)
c.start()
}
@Override
public void onDisconnectionEvent(DisconnectionEvent e) {
def removed = peerConnections.remove(e.destination)
if (removed == null)
removed = leafConnections.remove(e.destination)
if (removed == null)
log.severe("Removed connection not present in either leaf or peer map ${e.destination.toBase32()}")
}
@Override
void shutdown() {
peerConnections.each {k,v -> v.close() }
leafConnections.each {k,v -> v.close() }
peerConnections.values().stream().parallel().forEach({v -> v.close()})
leafConnections.values().stream().parallel().forEach({v -> v.close()})
peerConnections.clear()
leafConnections.clear()
}
void forwardQueryToLeafs(QueryEvent e) {
}
void forwardQueryToLeafs(QueryEvent e) {
}
}

View File

@ -0,0 +1,9 @@
package com.muwire.core.content
import com.muwire.core.Event
class ContentControlEvent extends Event {
String term
boolean regex
boolean add
}

View File

@ -0,0 +1,30 @@
package com.muwire.core.content
import java.util.concurrent.ConcurrentHashMap
import com.muwire.core.search.QueryEvent
import net.i2p.util.ConcurrentHashSet
class ContentManager {
Set<Matcher> matchers = new ConcurrentHashSet()
void onContentControlEvent(ContentControlEvent e) {
Matcher m
if (e.regex)
m = new RegexMatcher(e.term)
else
m = new KeywordMatcher(e.term)
if (e.add)
matchers.add(m)
else
matchers.remove(m)
}
void onQueryEvent(QueryEvent e) {
if (e.searchEvent.searchTerms == null)
return
matchers.each { it.process(e) }
}
}

View File

@ -0,0 +1,36 @@
package com.muwire.core.content
class KeywordMatcher extends Matcher {
private final String keyword
KeywordMatcher(String keyword) {
this.keyword = keyword
}
@Override
protected boolean match(List<String> searchTerms) {
boolean found = false
searchTerms.each {
if (keyword == it)
found = true
}
found
}
@Override
public String getTerm() {
keyword
}
@Override
public int hashCode() {
keyword.hashCode()
}
@Override
public boolean equals(Object o) {
if (!(o instanceof KeywordMatcher))
return false
KeywordMatcher other = (KeywordMatcher) o
keyword.equals(other.keyword)
}
}

View File

@ -0,0 +1,9 @@
package com.muwire.core.content
import com.muwire.core.Persona
class Match {
Persona persona
String [] keywords
long timestamp
}

View File

@ -0,0 +1,20 @@
package com.muwire.core.content
import com.muwire.core.search.QueryEvent
abstract class Matcher {
final List<Match> matches = Collections.synchronizedList(new ArrayList<>())
final Set<UUID> uuids = new HashSet<>()
protected abstract boolean match(List<String> searchTerms);
public abstract String getTerm();
public void process(QueryEvent qe) {
def terms = qe.searchEvent.searchTerms
if (match(terms) && uuids.add(qe.searchEvent.uuid)) {
long now = System.currentTimeMillis()
matches << new Match(persona : qe.originator, keywords : terms, timestamp : now)
}
}
}

View File

@ -0,0 +1,35 @@
package com.muwire.core.content
import java.util.regex.Pattern
import java.util.stream.Collectors
class RegexMatcher extends Matcher {
private final Pattern pattern
RegexMatcher(String pattern) {
this.pattern = Pattern.compile(pattern)
}
@Override
protected boolean match(List<String> keywords) {
String combined = keywords.join(" ")
return pattern.matcher(combined).find()
}
@Override
public String getTerm() {
pattern.pattern()
}
@Override
public int hashCode() {
pattern.pattern().hashCode()
}
@Override
public boolean equals(Object o) {
if (!(o instanceof RegexMatcher))
return false
RegexMatcher other = (RegexMatcher) o
pattern.pattern() == other.pattern.pattern()
}
}

View File

@ -21,5 +21,5 @@ class BadHashException extends Exception {
public BadHashException(Throwable cause) {
super(cause);
}
}

View File

@ -1,32 +1,57 @@
package com.muwire.core.download
import com.muwire.core.connection.I2PConnector
import com.muwire.core.files.FileDownloadedEvent
import com.muwire.core.files.FileHasher
import com.muwire.core.mesh.Mesh
import com.muwire.core.mesh.MeshManager
import com.muwire.core.trust.TrustLevel
import com.muwire.core.trust.TrustService
import com.muwire.core.util.DataUtil
import groovy.json.JsonBuilder
import groovy.json.JsonOutput
import groovy.json.JsonSlurper
import net.i2p.data.Base64
import net.i2p.data.Destination
import net.i2p.util.ConcurrentHashSet
import com.muwire.core.EventBus
import com.muwire.core.InfoHash
import com.muwire.core.MuWireSettings
import com.muwire.core.Persona
import com.muwire.core.UILoadedEvent
import java.util.concurrent.ConcurrentHashMap
import java.util.concurrent.Executor
import java.util.concurrent.Executors
public class DownloadManager {
private final EventBus eventBus
private final TrustService trustService
private final MeshManager meshManager
private final MuWireSettings muSettings
private final I2PConnector connector
private final Executor executor
private final File incompletes
private final File incompletes, home
private final Persona me
public DownloadManager(EventBus eventBus, I2PConnector connector, File incompletes, Persona me) {
private final Map<InfoHash, Downloader> downloaders = new ConcurrentHashMap<>()
public DownloadManager(EventBus eventBus, TrustService trustService, MeshManager meshManager, MuWireSettings muSettings,
I2PConnector connector, File home, Persona me) {
this.eventBus = eventBus
this.trustService = trustService
this.meshManager = meshManager
this.muSettings = muSettings
this.connector = connector
this.incompletes = incompletes
this.incompletes = new File(home,"incompletes")
this.home = home
this.me = me
incompletes.mkdir()
this.executor = Executors.newCachedThreadPool({ r ->
Thread rv = new Thread(r)
rv.setName("download-worker")
@ -34,27 +59,150 @@ public class DownloadManager {
rv
})
}
public void onUIDownloadEvent(UIDownloadEvent e) {
def size = e.result[0].size
def infohash = e.result[0].infohash
def pieceSize = e.result[0].pieceSize
Set<Destination> destinations = new HashSet<>()
e.result.each {
e.result.each {
destinations.add(it.sender.destination)
}
def downloader = new Downloader(this, me, e.target, size,
destinations.addAll(e.sources)
destinations.remove(me.destination)
Pieces pieces = getPieces(infohash, size, pieceSize, e.sequential)
def downloader = new Downloader(eventBus, this, me, e.target, size,
infohash, pieceSize, connector, destinations,
incompletes)
incompletes, pieces)
downloaders.put(infohash, downloader)
persistDownloaders()
executor.execute({downloader.download()} as Runnable)
eventBus.publish(new DownloadStartedEvent(downloader : downloader))
}
public void onUIDownloadCancelledEvent(UIDownloadCancelledEvent e) {
downloaders.remove(e.downloader.infoHash)
persistDownloaders()
}
public void onUIDownloadPausedEvent(UIDownloadPausedEvent e) {
persistDownloaders()
}
public void onUIDownloadResumedEvent(UIDownloadResumedEvent e) {
persistDownloaders()
}
void resume(Downloader downloader) {
executor.execute({downloader.download() as Runnable})
}
void onUILoadedEvent(UILoadedEvent e) {
File downloadsFile = new File(home, "downloads.json")
if (!downloadsFile.exists())
return
def slurper = new JsonSlurper()
downloadsFile.eachLine {
def json = slurper.parseText(it)
File file = new File(DataUtil.readi18nString(Base64.decode(json.file)))
def destinations = new HashSet<>()
json.destinations.each { destination ->
destinations.add new Destination(destination)
}
InfoHash infoHash
if (json.hashList != null) {
byte[] hashList = Base64.decode(json.hashList)
infoHash = InfoHash.fromHashList(hashList)
} else {
byte [] root = Base64.decode(json.hashRoot)
infoHash = new InfoHash(root)
}
boolean sequential = false
if (json.sequential != null)
sequential = json.sequential
Pieces pieces = getPieces(infoHash, (long)json.length, json.pieceSizePow2, sequential)
def downloader = new Downloader(eventBus, this, me, file, (long)json.length,
infoHash, json.pieceSizePow2, connector, destinations, incompletes, pieces)
if (json.paused != null)
downloader.paused = json.paused
downloaders.put(infoHash, downloader)
downloader.readPieces()
if (!downloader.paused)
downloader.download()
eventBus.publish(new DownloadStartedEvent(downloader : downloader))
}
}
private Pieces getPieces(InfoHash infoHash, long length, int pieceSizePow2, boolean sequential) {
int pieceSize = 0x1 << pieceSizePow2
int nPieces = (int)(length / pieceSize)
if (length % pieceSize != 0)
nPieces++
Mesh mesh = meshManager.getOrCreate(infoHash, nPieces, sequential)
mesh.pieces
}
void onSourceDiscoveredEvent(SourceDiscoveredEvent e) {
Downloader downloader = downloaders.get(e.infoHash)
if (downloader == null)
return
boolean ok = false
switch(trustService.getLevel(e.source.destination)) {
case TrustLevel.TRUSTED: ok = true; break
case TrustLevel.NEUTRAL: ok = muSettings.allowUntrusted; break
case TrustLevel.DISTRUSTED: ok = false; break
}
if (ok)
downloader.addSource(e.source.destination)
}
void onFileDownloadedEvent(FileDownloadedEvent e) {
downloaders.remove(e.downloader.infoHash)
persistDownloaders()
}
private void persistDownloaders() {
File downloadsFile = new File(home,"downloads.json")
downloadsFile.withPrintWriter { writer ->
downloaders.values().each { downloader ->
if (!downloader.cancelled) {
def json = [:]
json.file = Base64.encode(DataUtil.encodei18nString(downloader.file.getAbsolutePath()))
json.length = downloader.length
json.pieceSizePow2 = downloader.pieceSizePow2
def destinations = []
downloader.destinations.each {
destinations << it.toBase64()
}
json.destinations = destinations
InfoHash infoHash = downloader.getInfoHash()
if (infoHash.hashList != null)
json.hashList = Base64.encode(infoHash.hashList)
else
json.hashRoot = Base64.encode(infoHash.getRoot())
json.paused = downloader.paused
json.sequential = downloader.pieces.ratio == 0f
writer.println(JsonOutput.toJson(json))
}
}
}
}
public void shutdown() {
downloaders.values().each { it.stop() }
Downloader.executorService.shutdownNow()
}
}

View File

@ -3,49 +3,56 @@ package com.muwire.core.download;
import net.i2p.data.Base64
import com.muwire.core.Constants
import com.muwire.core.EventBus
import com.muwire.core.InfoHash
import com.muwire.core.Persona
import com.muwire.core.connection.Endpoint
import com.muwire.core.util.DataUtil
import static com.muwire.core.util.DataUtil.readTillRN
import groovy.util.logging.Log
import java.nio.ByteBuffer
import java.nio.MappedByteBuffer
import java.nio.channels.FileChannel
import java.nio.charset.StandardCharsets
import java.nio.file.Files
import java.nio.file.StandardOpenOption
import java.security.MessageDigest
import java.security.NoSuchAlgorithmException
import java.util.logging.Level
@Log
class DownloadSession {
private static int SAMPLES = 10
private final EventBus eventBus
private final String meB64
private final Pieces downloaded, claimed
private final Pieces pieces
private final InfoHash infoHash
private final Endpoint endpoint
private final File file
private final int pieceSize
private final long fileLength
private final Set<Integer> available
private final MessageDigest digest
private final ArrayDeque<Long> timestamps = new ArrayDeque<>(SAMPLES)
private final ArrayDeque<Integer> reads = new ArrayDeque<>(SAMPLES)
private ByteBuffer mapped
DownloadSession(String meB64, Pieces downloaded, Pieces claimed, InfoHash infoHash, Endpoint endpoint, File file,
int pieceSize, long fileLength) {
private long lastSpeedRead = System.currentTimeMillis()
private long dataSinceLastRead
private MappedByteBuffer mapped
DownloadSession(EventBus eventBus, String meB64, Pieces pieces, InfoHash infoHash, Endpoint endpoint, File file,
int pieceSize, long fileLength, Set<Integer> available) {
this.eventBus = eventBus
this.meB64 = meB64
this.downloaded = downloaded
this.claimed = claimed
this.pieces = pieces
this.endpoint = endpoint
this.infoHash = infoHash
this.file = file
this.pieceSize = pieceSize
this.fileLength = fileLength
this.available = available
try {
digest = MessageDigest.getInstance("SHA-256")
} catch (NoSuchAlgorithmException impossible) {
@ -53,7 +60,7 @@ class DownloadSession {
System.exit(1)
}
}
/**
* @return if the request will proceed. The only time it may not
* is if all the pieces have been claimed by other sessions.
@ -62,130 +69,164 @@ class DownloadSession {
public boolean request() throws IOException {
OutputStream os = endpoint.getOutputStream()
InputStream is = endpoint.getInputStream()
int piece
while(true) {
piece = downloaded.getRandomPiece()
if (claimed.isMarked(piece)) {
if (downloaded.donePieces() + claimed.donePieces() == downloaded.nPieces) {
log.info("all pieces claimed")
return false
}
continue
}
break
}
claimed.markDownloaded(piece)
log.info("will download piece $piece")
long start = piece * pieceSize
long end = Math.min(fileLength, start + pieceSize) - 1
long length = end - start + 1
int[] pieceAndPosition
if (available.isEmpty())
pieceAndPosition = pieces.claim()
else
pieceAndPosition = pieces.claim(new HashSet<>(available))
if (pieceAndPosition == null)
return false
int piece = pieceAndPosition[0]
int position = pieceAndPosition[1]
boolean steal = pieceAndPosition[2] == 1
boolean unclaim = true
log.info("will download piece $piece from position $position steal $steal")
long pieceStart = piece * ((long)pieceSize)
long end = Math.min(fileLength, pieceStart + pieceSize) - 1
long start = pieceStart + position
String root = Base64.encode(infoHash.getRoot())
FileChannel channel
try {
os.write("GET $root\r\n".getBytes(StandardCharsets.US_ASCII))
os.write("Range: $start-$end\r\n".getBytes(StandardCharsets.US_ASCII))
os.write("X-Persona: $meB64\r\n\r\n".getBytes(StandardCharsets.US_ASCII))
os.write("X-Persona: $meB64\r\n".getBytes(StandardCharsets.US_ASCII))
String xHave = DataUtil.encodeXHave(pieces.getDownloaded(), pieces.nPieces)
os.write("X-Have: $xHave\r\n\r\n".getBytes(StandardCharsets.US_ASCII))
os.flush()
String code = readTillRN(is)
if (code.startsWith("404 ")) {
String codeString = readTillRN(is)
int space = codeString.indexOf(' ')
if (space > 0)
codeString = codeString.substring(0, space)
int code = Integer.parseInt(codeString.trim())
if (code == 404) {
log.warning("file not found")
endpoint.close()
return
return false
}
if (code.startsWith("416 ")) {
log.warning("range $start-$end cannot be satisfied")
return // leave endpoint open
}
if (!code.startsWith("200 ")) {
if (!(code == 200 || code == 416)) {
log.warning("unknown code $code")
endpoint.close()
return
return false
}
// parse all headers
Set<String> headers = new HashSet<>()
Map<String,String> headers = new HashMap<>()
String header
while((header = readTillRN(is)) != "" && headers.size() < Constants.MAX_HEADERS)
headers.add(header)
long receivedStart = -1
long receivedEnd = -1
for (String receivedHeader : headers) {
def group = (receivedHeader =~ /^Content-Range: (\d+)-(\d+)$/)
if (group.size() != 1) {
log.info("ignoring header $receivedHeader")
continue
}
receivedStart = Long.parseLong(group[0][1])
receivedEnd = Long.parseLong(group[0][2])
while((header = readTillRN(is)) != "" && headers.size() < Constants.MAX_HEADERS) {
int colon = header.indexOf(':')
if (colon == -1 || colon == header.length() - 1)
throw new IOException("invalid header $header")
String key = header.substring(0, colon)
String value = header.substring(colon + 1)
headers[key] = value.trim()
}
// prase X-Alt if present
if (headers.containsKey("X-Alt")) {
headers["X-Alt"].split(",").each {
if (it.length() > 0) {
byte [] raw = Base64.decode(it)
Persona source = new Persona(new ByteArrayInputStream(raw))
eventBus.publish(new SourceDiscoveredEvent(infoHash : infoHash, source : source))
}
}
}
// parse X-Have if present
if (headers.containsKey("X-Have")) {
DataUtil.decodeXHave(headers["X-Have"]).each {
available.add(it)
}
if (!available.contains(piece))
return true // try again next time
} else {
if (code != 200)
throw new IOException("Code $code but no X-Have")
available.clear()
}
if (code != 200)
return true
String range = headers["Content-Range"]
if (range == null)
throw new IOException("Code 200 but no Content-Range")
def group = (range =~ /^(\d+)-(\d+)$/)
if (group.size() != 1)
throw new IOException("invalid Content-Range header $range")
long receivedStart = Long.parseLong(group[0][1])
long receivedEnd = Long.parseLong(group[0][2])
if (receivedStart != start || receivedEnd != end) {
log.warning("We don't support mismatching ranges yet")
endpoint.close()
return
return false
}
// start the download
channel = Files.newByteChannel(file.toPath(), EnumSet.of(StandardOpenOption.READ, StandardOpenOption.WRITE,
StandardOpenOption.SPARSE, StandardOpenOption.CREATE)) // TODO: double-check, maybe CREATE_NEW
mapped = channel.map(FileChannel.MapMode.READ_WRITE, start, end - start + 1)
byte[] tmp = new byte[0x1 << 13]
while(mapped.hasRemaining()) {
if (mapped.remaining() < tmp.length)
tmp = new byte[mapped.remaining()]
int read = is.read(tmp)
if (read == -1)
throw new IOException()
synchronized(this) {
mapped.put(tmp, 0, read)
if (timestamps.size() == SAMPLES) {
timestamps.removeFirst()
reads.removeFirst()
FileChannel channel
try {
channel = Files.newByteChannel(file.toPath(), EnumSet.of(StandardOpenOption.READ, StandardOpenOption.WRITE,
StandardOpenOption.SPARSE, StandardOpenOption.CREATE))
mapped = channel.map(FileChannel.MapMode.READ_WRITE, pieceStart, end - pieceStart + 1)
mapped.position(position)
byte[] tmp = new byte[0x1 << 13]
while(mapped.hasRemaining()) {
if (mapped.remaining() < tmp.length)
tmp = new byte[mapped.remaining()]
int read = is.read(tmp)
if (read == -1)
throw new IOException()
synchronized(this) {
mapped.put(tmp, 0, read)
dataSinceLastRead += read
pieces.markPartial(piece, mapped.position())
}
timestamps.addLast(System.currentTimeMillis())
reads.addLast(read)
}
mapped.clear()
digest.update(mapped)
byte [] hash = digest.digest()
byte [] expected = new byte[32]
System.arraycopy(infoHash.getHashList(), piece * 32, expected, 0, 32)
if (hash != expected) {
pieces.markPartial(piece, 0)
throw new BadHashException("bad hash on piece $piece")
}
} finally {
try { channel?.close() } catch (IOException ignore) {}
DataUtil.tryUnmap(mapped)
}
mapped.clear()
digest.update(mapped)
byte [] hash = digest.digest()
byte [] expected = new byte[32]
System.arraycopy(infoHash.getHashList(), piece * 32, expected, 0, 32)
if (hash != expected)
throw new BadHashException()
downloaded.markDownloaded(piece)
pieces.markDownloaded(piece)
unclaim = false
} finally {
claimed.clear(piece)
try { channel?.close() } catch (IOException ignore) {}
if (unclaim && !steal)
pieces.unclaim(piece)
}
return true
}
synchronized int positionInPiece() {
if (mapped == null)
return 0
mapped.position()
}
synchronized int speed() {
if (timestamps.size() < SAMPLES)
return 0
long interval = timestamps.last - timestamps.first
int totalRead = 0
reads.each { totalRead += it }
(int)(totalRead * 1000.0 / interval)
final long now = System.currentTimeMillis()
long interval = Math.max(1000, now - lastSpeedRead)
lastSpeedRead = now;
int rv = (int) (dataSinceLastRead * 1000.0 / interval)
dataSinceLastRead = 0
rv
}
}

View File

@ -4,22 +4,32 @@ import com.muwire.core.InfoHash
import com.muwire.core.Persona
import com.muwire.core.connection.Endpoint
import java.nio.file.AtomicMoveNotSupportedException
import java.nio.file.Files
import java.nio.file.StandardCopyOption
import java.time.Instant
import java.util.concurrent.ConcurrentHashMap
import java.util.concurrent.ExecutorService
import java.util.concurrent.Executors
import java.util.concurrent.atomic.AtomicBoolean
import java.util.logging.Level
import com.muwire.core.Constants
import com.muwire.core.DownloadedFile
import com.muwire.core.EventBus
import com.muwire.core.connection.I2PConnector
import com.muwire.core.files.FileDownloadedEvent
import com.muwire.core.util.DataUtil
import groovy.util.logging.Log
import net.i2p.data.Destination
import net.i2p.util.ConcurrentHashSet
@Log
public class Downloader {
public enum DownloadState { CONNECTING, DOWNLOADING, FAILED, CANCELLED, FINISHED }
private enum WorkerState { CONNECTING, DOWNLOADING, FINISHED}
public enum DownloadState { CONNECTING, HASHLIST, DOWNLOADING, FAILED, CANCELLED, PAUSED, FINISHED }
private enum WorkerState { CONNECTING, HASHLIST, DOWNLOADING, FINISHED}
private static final ExecutorService executorService = Executors.newCachedThreadPool({r ->
Thread rv = new Thread(r)
rv.setName("download worker")
@ -27,25 +37,38 @@ public class Downloader {
rv
})
private final DownloadManager downloadManager
private final Persona me
private final EventBus eventBus
private final DownloadManager downloadManager
private final Persona me
private final File file
private final Pieces downloaded, claimed
private final Pieces pieces
private final long length
private final InfoHash infoHash
private InfoHash infoHash
private final int pieceSize
private final I2PConnector connector
private final Set<Destination> destinations
private final int nPieces
private final File piecesFile
private final File incompleteFile
final int pieceSizePow2
private final Map<Destination, DownloadWorker> activeWorkers = new ConcurrentHashMap<>()
private volatile boolean cancelled
private final Set<Destination> successfulDestinations = new ConcurrentHashSet<>()
public Downloader(DownloadManager downloadManager, Persona me, File file, long length, InfoHash infoHash,
private volatile boolean cancelled, paused
private final AtomicBoolean eventFired = new AtomicBoolean()
private boolean piecesFileClosed
private ArrayList speedArr = new ArrayList<Integer>()
private int speedPos = 0
private int speedAvg = 0
private long timestamp = Instant.now().toEpochMilli()
public Downloader(EventBus eventBus, DownloadManager downloadManager,
Persona me, File file, long length, InfoHash infoHash,
int pieceSizePow2, I2PConnector connector, Set<Destination> destinations,
File incompletes) {
File incompletes, Pieces pieces) {
this.eventBus = eventBus
this.me = me
this.downloadManager = downloadManager
this.file = file
@ -54,19 +77,25 @@ public class Downloader {
this.connector = connector
this.destinations = destinations
this.piecesFile = new File(incompletes, file.getName()+".pieces")
this.incompleteFile = new File(incompletes, file.getName()+".part")
this.pieceSizePow2 = pieceSizePow2
this.pieceSize = 1 << pieceSizePow2
int nPieces
if (length % pieceSize == 0)
nPieces = length / pieceSize
else
nPieces = length / pieceSize + 1
this.nPieces = nPieces
downloaded = new Pieces(nPieces, Constants.DOWNLOAD_SEQUENTIAL_RATIO)
claimed = new Pieces(nPieces)
this.pieces = pieces
this.nPieces = pieces.nPieces
// default size suitable for an average of 5 seconds / 5 elements / 5 interval units
// it's easily adjustable by resizing the size of speedArr
this.speedArr = [ 0, 0, 0, 0, 0 ]
}
public synchronized InfoHash getInfoHash() {
infoHash
}
private synchronized void setInfoHash(InfoHash infoHash) {
this.infoHash = infoHash
}
void download() {
readPieces()
destinations.each {
@ -77,116 +106,240 @@ public class Downloader {
}
}
}
void readPieces() {
if (!piecesFile.exists())
return
piecesFile.withReader {
int piece = Integer.parseInt(it.readLine())
downloaded.markDownloaded(piece)
}
}
void writePieces() {
piecesFile.withPrintWriter { writer ->
downloaded.getDownloaded().each { piece ->
writer.println(piece)
piecesFile.eachLine {
String [] split = it.split(",")
int piece = Integer.parseInt(split[0])
if (split.length == 1)
pieces.markDownloaded(piece)
else {
int position = Integer.parseInt(split[1])
pieces.markPartial(piece, position)
}
}
}
public long donePieces() {
downloaded.donePieces()
}
public int speed() {
int total = 0
activeWorkers.values().each {
total += it.speed()
void writePieces() {
synchronized(piecesFile) {
if (piecesFileClosed)
return
piecesFile.withPrintWriter { writer ->
pieces.write(writer)
}
}
total
}
public long donePieces() {
pieces.donePieces()
}
public int speed() {
int currSpeed = 0
if (getCurrentState() == DownloadState.DOWNLOADING) {
activeWorkers.values().each {
if (it.currentState == WorkerState.DOWNLOADING)
currSpeed += it.speed()
}
}
// normalize to speedArr.size
currSpeed /= speedArr.size()
// compute new speedAvg and update speedArr
if ( speedArr[speedPos] > speedAvg ) {
speedAvg = 0
} else {
speedAvg -= speedArr[speedPos]
}
speedAvg += currSpeed
speedArr[speedPos] = currSpeed
// this might be necessary due to rounding errors
if (speedAvg < 0)
speedAvg = 0
// rolling index over the speedArr
speedPos++
if (speedPos >= speedArr.size())
speedPos=0
speedAvg
}
public DownloadState getCurrentState() {
if (cancelled)
return DownloadState.CANCELLED
if (paused)
return DownloadState.PAUSED
boolean allFinished = true
activeWorkers.values().each {
activeWorkers.values().each {
allFinished &= it.currentState == WorkerState.FINISHED
}
if (allFinished) {
if (downloaded.isComplete())
if (pieces.isComplete())
return DownloadState.FINISHED
return DownloadState.FAILED
}
// if at least one is downloading...
boolean oneDownloading = false
activeWorkers.values().each {
activeWorkers.values().each {
if (it.currentState == WorkerState.DOWNLOADING) {
oneDownloading = true
return
return
}
}
if (oneDownloading)
return DownloadState.DOWNLOADING
// at least one is requesting hashlist
boolean oneHashlist = false
activeWorkers.values().each {
if (it.currentState == WorkerState.HASHLIST) {
oneHashlist = true
return
}
}
if (oneHashlist)
return DownloadState.HASHLIST
return DownloadState.CONNECTING
}
public void cancel() {
cancelled = true
activeWorkers.values().each {
stop()
synchronized(piecesFile) {
piecesFileClosed = true
piecesFile.delete()
}
incompleteFile.delete()
pieces.clearAll()
}
public void pause() {
paused = true
stop()
}
void stop() {
activeWorkers.values().each {
it.cancel()
}
}
public void resume() {
downloadManager.resume(this)
public int activeWorkers() {
int active = 0
activeWorkers.values().each {
if (it.currentState != WorkerState.FINISHED)
active++
}
active
}
public void resume() {
paused = false
readPieces()
destinations.each { destination ->
def worker = activeWorkers.get(destination)
if (worker != null) {
if (worker.currentState == WorkerState.FINISHED) {
def newWorker = new DownloadWorker(destination)
activeWorkers.put(destination, newWorker)
executorService.submit(newWorker)
}
} else {
worker = new DownloadWorker(destination)
activeWorkers.put(destination, worker)
executorService.submit(worker)
}
}
}
void addSource(Destination d) {
if (activeWorkers.containsKey(d))
return
DownloadWorker newWorker = new DownloadWorker(d)
activeWorkers.put(d, newWorker)
executorService.submit(newWorker)
}
class DownloadWorker implements Runnable {
private final Destination destination
private volatile WorkerState currentState
private volatile Thread downloadThread
private Endpoint endpoint
private volatile DownloadSession currentSession
private final Set<Integer> available = new HashSet<>()
DownloadWorker(Destination destination) {
this.destination = destination
}
public void run() {
downloadThread = Thread.currentThread()
currentState = WorkerState.CONNECTING
Endpoint endpoint = null
try {
endpoint = connector.connect(destination)
while(getInfoHash().hashList == null) {
currentState = WorkerState.HASHLIST
HashListSession session = new HashListSession(me.toBase64(), infoHash, endpoint)
InfoHash received = session.request()
setInfoHash(received)
}
currentState = WorkerState.DOWNLOADING
boolean requestPerformed
while(!downloaded.isComplete()) {
currentSession = new DownloadSession(me.toBase64(), downloaded, claimed, infoHash, endpoint, file, pieceSize, length)
while(!pieces.isComplete()) {
currentSession = new DownloadSession(eventBus, me.toBase64(), pieces, getInfoHash(),
endpoint, incompleteFile, pieceSize, length, available)
requestPerformed = currentSession.request()
if (!requestPerformed)
break
successfulDestinations.add(endpoint.destination)
writePieces()
}
} catch (Exception bad) {
log.log(Level.WARNING,"Exception while downloading",bad)
log.log(Level.WARNING,"Exception while downloading",DataUtil.findRoot(bad))
} finally {
writePieces()
currentState = WorkerState.FINISHED
if (pieces.isComplete() && eventFired.compareAndSet(false, true)) {
synchronized(piecesFile) {
piecesFileClosed = true
piecesFile.delete()
}
activeWorkers.values().each {
if (it.destination != destination)
it.cancel()
}
try {
Files.move(incompleteFile.toPath(), file.toPath(), StandardCopyOption.ATOMIC_MOVE)
} catch (AtomicMoveNotSupportedException e) {
Files.copy(incompleteFile.toPath(), file.toPath(), StandardCopyOption.REPLACE_EXISTING)
incompleteFile.delete()
}
eventBus.publish(
new FileDownloadedEvent(
downloadedFile : new DownloadedFile(file.getCanonicalFile(), getInfoHash(), pieceSizePow2, successfulDestinations),
downloader : Downloader.this))
}
endpoint?.close()
}
}
int speed() {
if (currentSession == null)
return 0
currentSession.speed()
}
void cancel() {
downloadThread?.interrupt()
}

View File

@ -0,0 +1,82 @@
package com.muwire.core.download
import java.nio.ByteBuffer
import java.nio.charset.StandardCharsets
import java.security.MessageDigest
import java.security.NoSuchAlgorithmException
import com.muwire.core.Constants
import com.muwire.core.InfoHash
import com.muwire.core.connection.Endpoint
import groovy.util.logging.Log
import static com.muwire.core.util.DataUtil.readTillRN
import net.i2p.data.Base64
@Log
class HashListSession {
private final String meB64
private final InfoHash infoHash
private final Endpoint endpoint
HashListSession(String meB64, InfoHash infoHash, Endpoint endpoint) {
this.meB64 = meB64
this.infoHash = infoHash
this.endpoint = endpoint
}
InfoHash request() throws IOException {
InputStream is = endpoint.getInputStream()
OutputStream os = endpoint.getOutputStream()
String root = Base64.encode(infoHash.getRoot())
os.write("HASHLIST $root\r\n".getBytes(StandardCharsets.US_ASCII))
os.write("X-Persona: $meB64\r\n\r\n".getBytes(StandardCharsets.US_ASCII))
os.flush()
String code = readTillRN(is)
if (!code.startsWith("200"))
throw new IOException("unknown code $code")
// parse all headers
Set<String> headers = new HashSet<>()
String header
while((header = readTillRN(is)) != "" && headers.size() < Constants.MAX_HEADERS)
headers.add(header)
long receivedStart = -1
long receivedEnd = -1
for (String receivedHeader : headers) {
def group = (receivedHeader =~ /^Content-Range: (\d+)-(\d+)$/)
if (group.size() != 1) {
log.info("ignoring header $receivedHeader")
continue
}
receivedStart = Long.parseLong(group[0][1])
receivedEnd = Long.parseLong(group[0][2])
}
if (receivedStart != 0)
throw new IOException("hashlist started at $receivedStart")
byte[] hashList = new byte[receivedEnd]
ByteBuffer hashListBuf = ByteBuffer.wrap(hashList)
byte[] tmp = new byte[0x1 << 13]
while(hashListBuf.hasRemaining()) {
if (hashListBuf.remaining() > tmp.length)
tmp = new byte[hashListBuf.remaining()]
int read = is.read(tmp)
if (read == -1)
throw new IOException()
hashListBuf.put(tmp, 0, read)
}
InfoHash received = InfoHash.fromHashList(hashList)
if (received.getRoot() != infoHash.getRoot())
throw new IOException("fetched list doesn't match root")
received
}
}

View File

@ -1,64 +1,117 @@
package com.muwire.core.download
class Pieces {
private final BitSet bitSet
private final BitSet done, claimed
private final int nPieces
private final float ratio
private final Random random = new Random()
private final Map<Integer,Integer> partials = new HashMap<>()
Pieces(int nPieces) {
this(nPieces, 1.0f)
}
Pieces(int nPieces, float ratio) {
this.nPieces = nPieces
this.ratio = ratio
bitSet = new BitSet(nPieces)
done = new BitSet(nPieces)
claimed = new BitSet(nPieces)
}
synchronized int getRandomPiece() {
int cardinality = bitSet.cardinality()
if (cardinality == nPieces)
return -1
// if fuller than ratio just do sequential
if ( (1.0f * cardinality) / nPieces > ratio) {
return bitSet.nextClearBit(0)
synchronized int[] claim() {
int claimedCardinality = claimed.cardinality()
if (claimedCardinality == nPieces) {
// steal
int downloadedCardinality = done.cardinality()
if (downloadedCardinality == nPieces)
return null
int rv = done.nextClearBit(0)
return [rv, partials.getOrDefault(rv, 0), 1]
}
// if fuller than ratio just do sequential
if ( (1.0f * claimedCardinality) / nPieces >= ratio) {
int rv = claimed.nextClearBit(0)
claimed.set(rv)
return [rv, partials.getOrDefault(rv, 0), 0]
}
while(true) {
int start = random.nextInt(nPieces)
if (bitSet.get(start))
if (claimed.get(start))
continue
return start
claimed.set(start)
return [start, partials.getOrDefault(start,0), 0]
}
}
def getDownloaded() {
synchronized int[] claim(Set<Integer> available) {
for (int i = done.nextSetBit(0); i >= 0; i = done.nextSetBit(i+1))
available.remove(i)
if (available.isEmpty())
return null
Set<Integer> availableCopy = new HashSet<>(available)
for (int i = claimed.nextSetBit(0); i >= 0; i = claimed.nextSetBit(i+1))
availableCopy.remove(i)
if (availableCopy.isEmpty()) {
// steal
int rv = available.first()
return [rv, partials.getOrDefault(rv, 0), 1]
}
List<Integer> toList = availableCopy.toList()
if (ratio > 0f)
Collections.shuffle(toList)
int rv = toList[0]
claimed.set(rv)
[rv, partials.getOrDefault(rv, 0), 0]
}
synchronized def getDownloaded() {
def rv = []
for (int i = bitSet.nextSetBit(0); i >= 0; i = bitSet.nextSetBit(i+1)) {
for (int i = done.nextSetBit(0); i >= 0; i = done.nextSetBit(i+1)) {
rv << i
}
rv
}
synchronized void markDownloaded(int piece) {
bitSet.set(piece)
done.set(piece)
claimed.set(piece)
partials.remove(piece)
}
synchronized void clear(int piece) {
bitSet.clear(piece)
synchronized void markPartial(int piece, int position) {
partials.put(piece, position)
}
synchronized void unclaim(int piece) {
claimed.clear(piece)
}
synchronized boolean isComplete() {
bitSet.cardinality() == nPieces
done.cardinality() == nPieces
}
synchronized boolean isMarked(int piece) {
bitSet.get(piece)
}
synchronized int donePieces() {
bitSet.cardinality()
done.cardinality()
}
synchronized boolean isDownloaded(int piece) {
done.get(piece)
}
synchronized void clearAll() {
done.clear()
claimed.clear()
partials.clear()
}
synchronized void write(PrintWriter writer) {
for (int i = done.nextSetBit(0); i >= 0; i = done.nextSetBit(i+1)) {
writer.println(i)
}
partials.each { piece, position ->
writer.println("$piece,$position")
}
}
}

View File

@ -0,0 +1,10 @@
package com.muwire.core.download
import com.muwire.core.Event
import com.muwire.core.InfoHash
import com.muwire.core.Persona
class SourceDiscoveredEvent extends Event {
InfoHash infoHash
Persona source
}

View File

@ -0,0 +1,7 @@
package com.muwire.core.download
import com.muwire.core.Event
class UIDownloadCancelledEvent extends Event {
Downloader downloader
}

View File

@ -3,8 +3,12 @@ package com.muwire.core.download
import com.muwire.core.Event
import com.muwire.core.search.UIResultEvent
import net.i2p.data.Destination
class UIDownloadEvent extends Event {
UIResultEvent[] result
Set<Destination> sources
File target
boolean sequential
}

View File

@ -0,0 +1,6 @@
package com.muwire.core.download
import com.muwire.core.Event
class UIDownloadPausedEvent extends Event {
}

View File

@ -0,0 +1,6 @@
package com.muwire.core.download
import com.muwire.core.Event
class UIDownloadResumedEvent extends Event {
}

View File

@ -0,0 +1,6 @@
package com.muwire.core.files
import com.muwire.core.Event
class AllFilesLoadedEvent extends Event {
}

View File

@ -0,0 +1,7 @@
package com.muwire.core.files
import com.muwire.core.Event
class DirectoryUnsharedEvent extends Event {
File directory
}

View File

@ -0,0 +1,166 @@
package com.muwire.core.files
import java.nio.file.FileSystem
import java.nio.file.FileSystems
import java.nio.file.Path
import java.nio.file.Paths
import static java.nio.file.StandardWatchEventKinds.*
import java.nio.file.ClosedWatchServiceException
import java.nio.file.WatchEvent
import java.nio.file.WatchKey
import java.nio.file.WatchService
import java.util.concurrent.ConcurrentHashMap
import com.muwire.core.EventBus
import com.muwire.core.MuWireSettings
import com.muwire.core.SharedFile
import groovy.util.logging.Log
import net.i2p.util.SystemVersion
@Log
class DirectoryWatcher {
private static final long WAIT_TIME = 1000
private static final WatchEvent.Kind[] kinds
static {
if (SystemVersion.isMac())
kinds = [ENTRY_MODIFY, ENTRY_DELETE]
else
kinds = [ENTRY_CREATE, ENTRY_MODIFY, ENTRY_DELETE]
}
private final File home
private final MuWireSettings muOptions
private final EventBus eventBus
private final FileManager fileManager
private final Thread watcherThread, publisherThread
private final Map<File, Long> waitingFiles = new ConcurrentHashMap<>()
private final Map<File, WatchKey> watchedDirectories = new ConcurrentHashMap<>()
private WatchService watchService
private volatile boolean shutdown
DirectoryWatcher(EventBus eventBus, FileManager fileManager, File home, MuWireSettings muOptions) {
this.home = home
this.muOptions = muOptions
this.eventBus = eventBus
this.fileManager = fileManager
this.watcherThread = new Thread({watch() } as Runnable, "directory-watcher")
watcherThread.setDaemon(true)
this.publisherThread = new Thread({publish()} as Runnable, "watched-files-publisher")
publisherThread.setDaemon(true)
}
void onAllFilesLoadedEvent(AllFilesLoadedEvent e) {
watchService = FileSystems.getDefault().newWatchService()
watcherThread.start()
publisherThread.start()
}
void stop() {
shutdown = true
watcherThread?.interrupt()
publisherThread?.interrupt()
watchService?.close()
}
void onFileSharedEvent(FileSharedEvent e) {
if (!e.file.isDirectory())
return
File canonical = e.file.getCanonicalFile()
Path path = canonical.toPath()
WatchKey wk = path.register(watchService, kinds)
watchedDirectories.put(canonical, wk)
if (muOptions.watchedDirectories.add(canonical.toString()))
saveMuSettings()
}
void onDirectoryUnsharedEvent(DirectoryUnsharedEvent e) {
WatchKey wk = watchedDirectories.remove(e.directory)
wk?.cancel()
if (muOptions.watchedDirectories.remove(e.directory.toString()))
saveMuSettings()
}
private void saveMuSettings() {
File muSettingsFile = new File(home, "MuWire.properties")
muSettingsFile.withOutputStream {
muOptions.write(it)
}
}
private void watch() {
try {
while(!shutdown) {
WatchKey key = watchService.take()
key.pollEvents().each {
switch(it.kind()) {
case ENTRY_CREATE: processCreated(key.watchable(), it.context()); break
case ENTRY_MODIFY: processModified(key.watchable(), it.context()); break
case ENTRY_DELETE: processDeleted(key.watchable(), it.context()); break
}
}
key.reset()
}
} catch (InterruptedException|ClosedWatchServiceException e) {
if (!shutdown)
throw e
}
}
private void processCreated(Path parent, Path path) {
File f= join(parent, path)
log.fine("created entry $f")
if (f.isDirectory())
f.toPath().register(watchService, kinds)
else
waitingFiles.put(f, System.currentTimeMillis())
}
private void processModified(Path parent, Path path) {
File f = join(parent, path)
log.fine("modified entry $f")
waitingFiles.put(f, System.currentTimeMillis())
}
private void processDeleted(Path parent, Path path) {
File f = join(parent, path)
log.fine("deleted entry $f")
SharedFile sf = fileManager.fileToSharedFile.get(f)
if (sf != null)
eventBus.publish(new FileUnsharedEvent(unsharedFile : sf))
}
private static File join(Path parent, Path path) {
File parentFile = parent.toFile().getCanonicalFile()
new File(parentFile, path.toFile().getName()).getCanonicalFile()
}
private void publish() {
try {
while(!shutdown) {
Thread.sleep(WAIT_TIME)
long now = System.currentTimeMillis()
def published = []
waitingFiles.each { file, timestamp ->
if (now - timestamp > WAIT_TIME) {
log.fine("publishing file $file")
eventBus.publish new FileSharedEvent(file : file)
published << file
}
}
published.each {
waitingFiles.remove(it)
}
}
} catch (InterruptedException e) {
if (!shutdown)
throw e
}
}
}

View File

@ -2,10 +2,11 @@ package com.muwire.core.files
import com.muwire.core.DownloadedFile
import com.muwire.core.Event
import com.muwire.core.download.Downloader
import net.i2p.data.Destination
class FileDownloadedEvent extends Event {
DownloadedFile downloadedFile
Downloader downloader
DownloadedFile downloadedFile
}

View File

@ -5,6 +5,12 @@ import com.muwire.core.SharedFile
class FileHashedEvent extends Event {
SharedFile sharedFile
String error
SharedFile sharedFile
String error
@Override
public String toString() {
super.toString() + " sharedFile " + sharedFile?.file.getAbsolutePath() + " error: $error"
}
}

View File

@ -1,6 +1,10 @@
package com.muwire.core.files
import com.muwire.core.InfoHash
import com.muwire.core.util.DataUtil
import net.i2p.data.Base64
import java.nio.MappedByteBuffer
import java.nio.channels.FileChannel
import java.nio.channels.FileChannel.MapMode
@ -9,62 +13,79 @@ import java.security.NoSuchAlgorithmException
class FileHasher {
/** max size of shared file is 128 GB */
public static final long MAX_SIZE = 0x1L << 37
/**
* @param size of the file to be shared
* @return the size of each piece in power of 2
*/
static int getPieceSize(long size) {
if (size <= 0x1 << 25)
return 18
for (int i = 26; i <= 37; i++) {
if (size <= 0x1L << i) {
return i-7
}
}
throw new IllegalArgumentException("File too large $size")
}
final MessageDigest digest
FileHasher() {
try {
digest = MessageDigest.getInstance("SHA-256")
} catch (NoSuchAlgorithmException impossible) {
digest = null
System.exit(1)
}
}
InfoHash hashFile(File file) {
final long length = file.length()
final int size = 0x1 << getPieceSize(length)
int numPieces = (int) (length / size)
if (numPieces * size < length)
numPieces++
def output = new ByteArrayOutputStream()
RandomAccessFile raf = new RandomAccessFile(file, "r")
try {
MappedByteBuffer buf
for (int i = 0; i < numPieces - 1; i++) {
buf = raf.getChannel().map(MapMode.READ_ONLY, size * i, size)
digest.update buf
output.write(digest.digest(), 0, 32)
}
def lastPieceLength = length - (numPieces - 1) * size
buf = raf.getChannel().map(MapMode.READ_ONLY, length - lastPieceLength, lastPieceLength)
digest.update buf
output.write(digest.digest(), 0, 32)
} finally {
raf.close()
}
byte [] hashList = output.toByteArray()
InfoHash.fromHashList(hashList)
}
/** max size of shared file is 128 GB */
public static final long MAX_SIZE = 0x1L << 37
/**
* @param size of the file to be shared
* @return the size of each piece in power of 2
* piece size is minimum 128 KBytees and maximum 16 MBytes in power of 2 steps (2^17 - 2^24)
* there can be up to 8192 pieces maximum per file
*/
static int getPieceSize(long size) {
if (size <= 0x1 << 30)
return 17
for (int i = 31; i <= 37; i++) {
if (size <= 0x1L << i) {
return i-13
}
}
throw new IllegalArgumentException("File too large $size")
}
final MessageDigest digest
FileHasher() {
try {
digest = MessageDigest.getInstance("SHA-256")
} catch (NoSuchAlgorithmException impossible) {
digest = null
System.exit(1)
}
}
InfoHash hashFile(File file) {
final long length = file.length()
final int size = 0x1 << getPieceSize(length)
int numPieces = (int) (length / size)
if (numPieces * size < length)
numPieces++
def output = new ByteArrayOutputStream()
RandomAccessFile raf = new RandomAccessFile(file, "r")
try {
MappedByteBuffer buf
for (int i = 0; i < numPieces - 1; i++) {
buf = raf.getChannel().map(MapMode.READ_ONLY, ((long)size) * i, size)
digest.update buf
DataUtil.tryUnmap(buf)
output.write(digest.digest(), 0, 32)
}
def lastPieceLength = length - (numPieces - 1) * ((long)size)
buf = raf.getChannel().map(MapMode.READ_ONLY, length - lastPieceLength, lastPieceLength)
digest.update buf
output.write(digest.digest(), 0, 32)
} finally {
raf.close()
}
byte [] hashList = output.toByteArray()
InfoHash.fromHashList(hashList)
}
public static void main(String[] args) {
if (args.length != 1) {
println "This utility computes an infohash of a file"
println "Pass absolute path to a file as an argument"
System.exit(1)
}
def file = new File(args[0])
file = file.getAbsoluteFile()
def hasher = new FileHasher()
def infohash = hasher.hashFile(file)
println Base64.encode(infohash.getRoot())
}
}

View File

@ -0,0 +1,15 @@
package com.muwire.core.files
import com.muwire.core.Event
import com.muwire.core.SharedFile
class FileHashingEvent extends Event {
File hashingFile
@Override
public String toString() {
super.toString() + " hashingFile " + hashingFile.getAbsolutePath()
}
}

View File

@ -5,5 +5,5 @@ import com.muwire.core.SharedFile
class FileLoadedEvent extends Event {
SharedFile loadedFile
SharedFile loadedFile
}

View File

@ -2,118 +2,205 @@ package com.muwire.core.files
import com.muwire.core.EventBus
import com.muwire.core.InfoHash
import com.muwire.core.MuWireSettings
import com.muwire.core.SharedFile
import com.muwire.core.UILoadedEvent
import com.muwire.core.search.ResultsEvent
import com.muwire.core.search.SearchEvent
import com.muwire.core.search.SearchIndex
import com.muwire.core.util.DataUtil
import groovy.util.logging.Log
import net.i2p.data.Base64
@Log
class FileManager {
final EventBus eventBus
final Map<InfoHash, Set<SharedFile>> rootToFiles = Collections.synchronizedMap(new HashMap<>())
final Map<File, SharedFile> fileToSharedFile = Collections.synchronizedMap(new HashMap<>())
final Map<String, Set<File>> nameToFiles = new HashMap<>()
final SearchIndex index = new SearchIndex()
FileManager(EventBus eventBus) {
this.eventBus = eventBus
}
void onFileHashedEvent(FileHashedEvent e) {
if (e.sharedFile != null)
addToIndex(e.sharedFile)
}
void onFileLoadedEvent(FileLoadedEvent e) {
addToIndex(e.loadedFile)
}
void onFileDownloadedEvent(FileDownloadedEvent e) {
addToIndex(e.downloadedFile)
}
private void addToIndex(SharedFile sf) {
final EventBus eventBus
final MuWireSettings settings
final Map<InfoHash, Set<SharedFile>> rootToFiles = Collections.synchronizedMap(new HashMap<>())
final Map<File, SharedFile> fileToSharedFile = Collections.synchronizedMap(new HashMap<>())
final Map<String, Set<File>> nameToFiles = new HashMap<>()
final Map<String, Set<File>> commentToFile = new HashMap<>()
final SearchIndex index = new SearchIndex()
FileManager(EventBus eventBus, MuWireSettings settings) {
this.settings = settings
this.eventBus = eventBus
}
void onFileHashedEvent(FileHashedEvent e) {
if (e.sharedFile != null)
addToIndex(e.sharedFile)
}
void onFileLoadedEvent(FileLoadedEvent e) {
addToIndex(e.loadedFile)
}
void onFileDownloadedEvent(FileDownloadedEvent e) {
if (settings.shareDownloadedFiles) {
addToIndex(e.downloadedFile)
}
}
private void addToIndex(SharedFile sf) {
log.info("Adding shared file " + sf.getFile())
InfoHash infoHash = sf.getInfoHash()
Set<SharedFile> existing = rootToFiles.get(infoHash)
if (existing == null) {
InfoHash infoHash = sf.getInfoHash()
Set<SharedFile> existing = rootToFiles.get(infoHash)
if (existing == null) {
log.info("adding new root")
existing = new HashSet<>()
rootToFiles.put(infoHash, existing);
}
existing.add(sf)
fileToSharedFile.put(sf.file, sf)
String name = sf.getFile().getName()
Set<File> existingFiles = nameToFiles.get(name)
if (existingFiles == null) {
existingFiles = new HashSet<>()
nameToFiles.put(name, existingFiles)
}
existingFiles.add(sf.getFile())
index.add(name)
}
void onFileUnsharedEvent(FileUnsharedEvent e) {
SharedFile sf = e.unsharedFile
InfoHash infoHash = sf.getInfoHash()
Set<SharedFile> existing = rootToFiles.get(infoHash)
if (existing != null) {
existing.remove(sf)
if (existing.isEmpty()) {
rootToFiles.remove(infoHash)
}
}
fileToSharedFile.remove(sf.file)
String name = sf.getFile().getName()
Set<File> existingFiles = nameToFiles.get(name)
if (existingFiles != null) {
existingFiles.remove(sf.file)
if (existingFiles.isEmpty()) {
nameToFiles.remove(name)
}
}
index.remove(name)
}
Map<File, SharedFile> getSharedFiles() {
existing = new HashSet<>()
rootToFiles.put(infoHash, existing);
}
existing.add(sf)
fileToSharedFile.put(sf.file, sf)
String name = sf.getFile().getName()
Set<File> existingFiles = nameToFiles.get(name)
if (existingFiles == null) {
existingFiles = new HashSet<>()
nameToFiles.put(name, existingFiles)
}
existingFiles.add(sf.getFile())
String comment = sf.getComment()
if (comment != null) {
comment = DataUtil.readi18nString(Base64.decode(comment))
index.add(comment)
Set<File> existingComment = commentToFile.get(comment)
if(existingComment == null) {
existingComment = new HashSet<>()
commentToFile.put(comment, existingComment)
}
existingComment.add(sf.getFile())
}
index.add(name)
}
void onFileUnsharedEvent(FileUnsharedEvent e) {
SharedFile sf = e.unsharedFile
InfoHash infoHash = sf.getInfoHash()
Set<SharedFile> existing = rootToFiles.get(infoHash)
if (existing != null) {
existing.remove(sf)
if (existing.isEmpty()) {
rootToFiles.remove(infoHash)
}
}
fileToSharedFile.remove(sf.file)
String name = sf.getFile().getName()
Set<File> existingFiles = nameToFiles.get(name)
if (existingFiles != null) {
existingFiles.remove(sf.file)
if (existingFiles.isEmpty()) {
nameToFiles.remove(name)
}
}
String comment = sf.getComment()
if (comment != null) {
Set<File> existingComment = commentToFile.get(comment)
if (existingComment != null) {
existingComment.remove(sf.getFile())
if (existingComment.isEmpty()) {
commentToFile.remove(comment)
index.remove(comment)
}
}
}
index.remove(name)
}
void onUICommentEvent(UICommentEvent e) {
if (e.oldComment != null) {
def comment = DataUtil.readi18nString(Base64.decode(e.oldComment))
Set<File> existingFiles = commentToFile.get(comment)
existingFiles.remove(e.sharedFile.getFile())
if (existingFiles.isEmpty()) {
commentToFile.remove(comment)
index.remove(comment)
}
}
String comment = e.sharedFile.getComment()
comment = DataUtil.readi18nString(Base64.decode(comment))
if (comment != null) {
index.add(comment)
Set<File> existingComment = commentToFile.get(comment)
if(existingComment == null) {
existingComment = new HashSet<>()
commentToFile.put(comment, existingComment)
}
existingComment.add(e.sharedFile.getFile())
}
}
Map<File, SharedFile> getSharedFiles() {
synchronized(fileToSharedFile) {
return new HashMap<>(fileToSharedFile)
}
}
}
Set<SharedFile> getSharedFiles(byte []root) {
return rootToFiles.get(new InfoHash(root))
}
void onSearchEvent(SearchEvent e) {
// hash takes precedence
ResultsEvent re = null
if (e.searchHash != null) {
void onSearchEvent(SearchEvent e) {
// hash takes precedence
ResultsEvent re = null
if (e.searchHash != null) {
Set<SharedFile> found
found = rootToFiles.get new InfoHash(e.searchHash)
if (found != null && !found.isEmpty())
re = new ResultsEvent(results: found.asList(), uuid: e.uuid)
} else {
def names = index.search e.searchTerms
Set<File> files = new HashSet<>()
names.each { files.addAll nameToFiles.getOrDefault(it, []) }
Set<SharedFile> sharedFiles = new HashSet<>()
files.each { sharedFiles.add fileToSharedFile[it] }
if (!sharedFiles.isEmpty())
re = new ResultsEvent(results: sharedFiles.asList(), uuid: e.uuid)
}
if (re != null)
eventBus.publish(re)
}
found = filter(found, e.oobInfohash)
if (found != null && !found.isEmpty())
re = new ResultsEvent(results: found.asList(), uuid: e.uuid, searchEvent: e)
} else {
def names = index.search e.searchTerms
Set<File> files = new HashSet<>()
names.each {
files.addAll nameToFiles.getOrDefault(it, [])
if (e.searchComments)
files.addAll commentToFile.getOrDefault(it, [])
}
Set<SharedFile> sharedFiles = new HashSet<>()
files.each { sharedFiles.add fileToSharedFile[it] }
files = filter(sharedFiles, e.oobInfohash)
if (!sharedFiles.isEmpty())
re = new ResultsEvent(results: sharedFiles.asList(), uuid: e.uuid, searchEvent: e)
}
if (re != null)
eventBus.publish(re)
}
private static Set<SharedFile> filter(Set<SharedFile> files, boolean oob) {
if (!oob)
return files
Set<SharedFile> rv = new HashSet<>()
files.each {
if (it.getPieceSize() != 0)
rv.add(it)
}
rv
}
void onDirectoryUnsharedEvent(DirectoryUnsharedEvent e) {
e.directory.listFiles().each {
if (it.isDirectory())
eventBus.publish(new DirectoryUnsharedEvent(directory : it))
else {
SharedFile sf = fileToSharedFile.get(it)
if (sf != null)
eventBus.publish(new FileUnsharedEvent(unsharedFile : sf))
}
}
}
}

View File

@ -4,5 +4,10 @@ import com.muwire.core.Event
class FileSharedEvent extends Event {
File file
File file
@Override
public String toString() {
return super.toString() + " file: "+file.getAbsolutePath()
}
}

View File

@ -4,5 +4,5 @@ import com.muwire.core.Event
import com.muwire.core.SharedFile
class FileUnsharedEvent extends Event {
SharedFile unsharedFile
SharedFile unsharedFile
}

View File

@ -4,40 +4,60 @@ import java.util.concurrent.Executor
import java.util.concurrent.Executors
import com.muwire.core.EventBus
import com.muwire.core.MuWireSettings
import com.muwire.core.SharedFile
class HasherService {
final FileHasher hasher
final EventBus eventBus
Executor executor
HasherService(FileHasher hasher, EventBus eventBus) {
this.hasher = hasher
this.eventBus = eventBus
}
void start() {
executor = Executors.newSingleThreadExecutor()
}
void onFileSharedEvent(FileSharedEvent evt) {
executor.execute( { -> process(evt.file) } as Runnable)
}
private void process(File f) {
f = f.getCanonicalFile()
if (f.isDirectory()) {
f.listFiles().each {onFileSharedEvent new FileSharedEvent(file: it) }
} else {
if (f.length() == 0) {
eventBus.publish new FileHashedEvent(error: "Not sharing empty file $f")
} else if (f.length() > FileHasher.MAX_SIZE) {
eventBus.publish new FileHashedEvent(error: "$f is too large to be shared ${f.length()}")
} else {
def hash = hasher.hashFile f
eventBus.publish new FileHashedEvent(sharedFile: new SharedFile(f, hash))
}
}
}
final FileHasher hasher
final EventBus eventBus
final FileManager fileManager
final Set<File> hashed = new HashSet<>()
final MuWireSettings settings
Executor executor
HasherService(FileHasher hasher, EventBus eventBus, FileManager fileManager, MuWireSettings settings) {
this.hasher = hasher
this.eventBus = eventBus
this.fileManager = fileManager
this.settings = settings
}
void start() {
executor = Executors.newSingleThreadExecutor()
}
void onFileSharedEvent(FileSharedEvent evt) {
File canonical = evt.file.getCanonicalFile()
if (!settings.shareHiddenFiles && canonical.isHidden())
return
if (fileManager.fileToSharedFile.containsKey(canonical))
return
if (hashed.add(canonical))
executor.execute( { -> process(canonical) } as Runnable)
}
void onFileUnsharedEvent(FileUnsharedEvent evt) {
hashed.remove(evt.unsharedFile.file)
}
void onDirectoryUnsharedEvent(DirectoryUnsharedEvent evt) {
hashed.remove(evt.directory)
}
private void process(File f) {
if (f.isDirectory()) {
f.listFiles().each {eventBus.publish new FileSharedEvent(file: it) }
} else {
if (f.length() == 0) {
eventBus.publish new FileHashedEvent(error: "Not sharing empty file $f")
} else if (f.length() > FileHasher.MAX_SIZE) {
eventBus.publish new FileHashedEvent(error: "$f is too large to be shared ${f.length()}")
} else {
eventBus.publish new FileHashingEvent(hashingFile: f)
def hash = hasher.hashFile f
eventBus.publish new FileHashedEvent(sharedFile: new SharedFile(f, hash, FileHasher.getPieceSize(f.length())))
}
}
}
}

View File

@ -1,5 +1,11 @@
package com.muwire.core.files
import java.nio.file.CopyOption
import java.nio.file.Files
import java.nio.file.StandardCopyOption
import java.util.concurrent.ExecutorService
import java.util.concurrent.Executors
import java.util.concurrent.ThreadFactory
import java.util.logging.Level
import java.util.stream.Collectors
@ -8,6 +14,7 @@ import com.muwire.core.EventBus
import com.muwire.core.InfoHash
import com.muwire.core.Service
import com.muwire.core.SharedFile
import com.muwire.core.UILoadedEvent
import com.muwire.core.util.DataUtil
import groovy.json.JsonOutput
@ -19,122 +26,148 @@ import net.i2p.data.Destination
@Log
class PersisterService extends Service {
final File location
final EventBus listener
final int interval
final Timer timer
final FileManager fileManager
PersisterService(File location, EventBus listener, int interval, FileManager fileManager) {
this.location = location
this.listener = listener
this.interval = interval
this.fileManager = fileManager
timer = new Timer("file persister", true)
}
void start() {
timer.schedule({load()} as TimerTask, 1000)
}
void stop() {
timer.cancel()
}
void load() {
if (location.exists() && location.isFile()) {
def slurper = new JsonSlurper()
try {
location.eachLine {
if (it.trim().length() > 0) {
def parsed = slurper.parseText it
def event = fromJson parsed
if (event != null) {
final File location
final EventBus listener
final int interval
final Timer timer
final FileManager fileManager
final ExecutorService persisterExecutor = Executors.newSingleThreadExecutor({ r ->
new Thread(r, "file persister")
} as ThreadFactory)
PersisterService(File location, EventBus listener, int interval, FileManager fileManager) {
this.location = location
this.listener = listener
this.interval = interval
this.fileManager = fileManager
timer = new Timer("file persister timer", true)
}
void stop() {
timer.cancel()
}
void onUILoadedEvent(UILoadedEvent e) {
timer.schedule({load()} as TimerTask, 1)
}
void onUIPersistFilesEvent(UIPersistFilesEvent e) {
persistFiles()
}
void load() {
Thread.currentThread().setPriority(Thread.MIN_PRIORITY)
if (location.exists() && location.isFile()) {
int loaded = 0
def slurper = new JsonSlurper()
try {
location.eachLine {
if (it.trim().length() > 0) {
def parsed = slurper.parseText it
def event = fromJson parsed
if (event != null) {
log.fine("loaded file $event.loadedFile.file")
listener.publish event
}
}
}
} catch (IllegalArgumentException|NumberFormatException e) {
listener.publish event
loaded++
if (loaded % 10 == 0)
Thread.sleep(20)
}
}
}
listener.publish(new AllFilesLoadedEvent())
} catch (IllegalArgumentException|NumberFormatException e) {
log.log(Level.WARNING, "couldn't load files",e)
}
}
timer.schedule({persistFiles()} as TimerTask, 0, interval)
loaded = true
}
private static FileLoadedEvent fromJson(def json) {
if (json.file == null || json.length == null || json.infoHash == null || json.hashList == null)
throw new IllegalArgumentException()
if (!(json.hashList instanceof List))
throw new IllegalArgumentException()
def file = new File(DataUtil.readi18nString(Base64.decode(json.file)))
file = file.getCanonicalFile()
if (!file.exists() || file.isDirectory())
return null
long length = Long.valueOf(json.length)
if (length != file.length())
return null
List hashList = (List) json.hashList
ByteArrayOutputStream baos = new ByteArrayOutputStream()
hashList.each {
byte [] hash = Base64.decode it.toString()
if (hash == null)
throw new IllegalArgumentException()
baos.write hash
}
byte[] hashListBytes = baos.toByteArray()
InfoHash ih = InfoHash.fromHashList(hashListBytes)
byte [] root = Base64.decode(json.infoHash.toString())
if (root == null)
throw new IllegalArgumentException()
if (!Arrays.equals(root, ih.getRoot()))
return null
if (json.sources != null) {
List sources = (List)json.sources
Set<Destination> sourceSet = sources.stream().map({d -> new Destination(d.toString())}).collect Collectors.toSet()
DownloadedFile df = new DownloadedFile(file, ih, sourceSet)
return new FileLoadedEvent(loadedFile : df)
}
SharedFile sf = new SharedFile(file, ih)
return new FileLoadedEvent(loadedFile: sf)
}
private void persistFiles() {
location.delete()
def sharedFiles = fileManager.getSharedFiles()
location.withPrintWriter { writer ->
sharedFiles.each { k, v ->
def json = toJson(k,v)
json = JsonOutput.toJson(json)
writer.println json
}
}
}
private def toJson(File f, SharedFile sf) {
def json = [:]
json.file = Base64.encode DataUtil.encodei18nString(f.getCanonicalFile().toString())
json.length = f.length()
InfoHash ih = sf.getInfoHash()
json.infoHash = Base64.encode ih.getRoot()
byte [] tmp = new byte [32]
json.hashList = []
for (int i = 0;i < ih.getHashList().length / 32; i++) {
System.arraycopy(ih.getHashList(), i * 32, tmp, 0, 32)
json.hashList.add Base64.encode(tmp)
}
if (sf instanceof DownloadedFile) {
json.sources = sf.sources.stream().map( {d -> d.toBase64()}).collect(Collectors.toList())
}
json
}
}
} else {
listener.publish(new AllFilesLoadedEvent())
}
timer.schedule({persistFiles()} as TimerTask, 0, interval)
loaded = true
}
private static FileLoadedEvent fromJson(def json) {
if (json.file == null || json.length == null || json.infoHash == null || json.hashList == null)
throw new IllegalArgumentException()
if (!(json.hashList instanceof List))
throw new IllegalArgumentException()
def file = new File(DataUtil.readi18nString(Base64.decode(json.file)))
file = file.getCanonicalFile()
if (!file.exists() || file.isDirectory())
return null
long length = Long.valueOf(json.length)
if (length != file.length())
return null
List hashList = (List) json.hashList
ByteArrayOutputStream baos = new ByteArrayOutputStream()
hashList.each {
byte [] hash = Base64.decode it.toString()
if (hash == null)
throw new IllegalArgumentException()
baos.write hash
}
byte[] hashListBytes = baos.toByteArray()
InfoHash ih = InfoHash.fromHashList(hashListBytes)
byte [] root = Base64.decode(json.infoHash.toString())
if (root == null)
throw new IllegalArgumentException()
if (!Arrays.equals(root, ih.getRoot()))
return null
int pieceSize = 0
if (json.pieceSize != null)
pieceSize = json.pieceSize
if (json.sources != null) {
List sources = (List)json.sources
Set<Destination> sourceSet = sources.stream().map({d -> new Destination(d.toString())}).collect Collectors.toSet()
DownloadedFile df = new DownloadedFile(file, ih, pieceSize, sourceSet)
df.setComment(json.comment)
return new FileLoadedEvent(loadedFile : df)
}
SharedFile sf = new SharedFile(file, ih, pieceSize)
sf.setComment(json.comment)
return new FileLoadedEvent(loadedFile: sf)
}
private void persistFiles() {
persisterExecutor.submit( {
def sharedFiles = fileManager.getSharedFiles()
File tmp = File.createTempFile("muwire-files", "tmp")
tmp.deleteOnExit()
tmp.withPrintWriter { writer ->
sharedFiles.each { k, v ->
def json = toJson(k,v)
json = JsonOutput.toJson(json)
writer.println json
}
}
Files.copy(tmp.toPath(), location.toPath(), StandardCopyOption.REPLACE_EXISTING)
tmp.delete()
} as Runnable)
}
private def toJson(File f, SharedFile sf) {
def json = [:]
json.file = sf.getB64EncodedFileName()
json.length = sf.getCachedLength()
InfoHash ih = sf.getInfoHash()
json.infoHash = sf.getB64EncodedHashRoot()
json.pieceSize = sf.getPieceSize()
json.hashList = sf.getB64EncodedHashList()
json.comment = sf.getComment()
if (sf instanceof DownloadedFile) {
json.sources = sf.sources.stream().map( {d -> d.toBase64()}).collect(Collectors.toList())
}
json
}
}

View File

@ -0,0 +1,9 @@
package com.muwire.core.files
import com.muwire.core.Event
import com.muwire.core.SharedFile
class UICommentEvent extends Event {
SharedFile sharedFile
String oldComment
}

View File

@ -0,0 +1,6 @@
package com.muwire.core.files
import com.muwire.core.Event
class UIPersistFilesEvent extends Event {
}

View File

@ -17,177 +17,177 @@ import net.i2p.data.Destination
@Log
class CacheClient {
private static final int CRAWLER_RETURN = 10
final EventBus eventBus
final HostCache cache
final ConnectionManager manager
final I2PSession session
final long interval
final MuWireSettings settings
final Timer timer
public CacheClient(EventBus eventBus, HostCache cache,
ConnectionManager manager, I2PSession session,
MuWireSettings settings, long interval) {
this.eventBus = eventBus
this.cache = cache
this.manager = manager
this.session = session
this.settings = settings
this.interval = interval
this.timer = new Timer("hostcache-client",true)
}
void start() {
session.addMuxedSessionListener(new Listener(), I2PSession.PROTO_DATAGRAM, 0)
timer.schedule({queryIfNeeded()} as TimerTask, 1, interval)
}
void stop() {
timer.cancel()
}
private void queryIfNeeded() {
if (!manager.getConnections().isEmpty())
return
if (!cache.getHosts(1).isEmpty())
return
log.info "Will query hostcaches"
def ping = [type: "Ping", version: 1, leaf: settings.isLeaf()]
ping = JsonOutput.toJson(ping)
def maker = new I2PDatagramMaker(session)
ping = maker.makeI2PDatagram(ping.bytes)
def options = new SendMessageOptions()
options.setSendLeaseSet(true)
CacheServers.getCacheServers().each {
log.info "Querying hostcache ${it.toBase32()}"
session.sendMessage(it, ping, 0, ping.length, I2PSession.PROTO_DATAGRAM, 1, 0, options)
}
}
class Listener implements I2PSessionMuxedListener {
private final JsonSlurper slurper = new JsonSlurper()
private static final int CRAWLER_RETURN = 10
@Override
public void messageAvailable(I2PSession session, int msgId, long size) {
}
final EventBus eventBus
final HostCache cache
final ConnectionManager manager
final I2PSession session
final long interval
final MuWireSettings settings
final Timer timer
@Override
public void messageAvailable(I2PSession session, int msgId, long size, int proto, int fromport, int toport) {
if (proto != I2PSession.PROTO_DATAGRAM) {
log.warning "Received unexpected protocol $proto"
return
}
def payload = session.receiveMessage(msgId)
def dissector = new I2PDatagramDissector()
try {
dissector.loadI2PDatagram(payload)
def sender = dissector.getSender()
log.info("Received something from ${sender.toBase32()}")
payload = dissector.getPayload()
payload = slurper.parse(payload)
if (payload.type == null) {
log.warning("type missing")
return
}
switch(payload.type) {
case "Pong" : handlePong(sender, payload); break
case "CrawlerPing": handleCrawlerPing(session, sender, payload); break
default : log.warning("unknown type ${payload.type}")
}
} catch (Exception e) {
log.warning("Invalid datagram $e")
}
}
public CacheClient(EventBus eventBus, HostCache cache,
ConnectionManager manager, I2PSession session,
MuWireSettings settings, long interval) {
this.eventBus = eventBus
this.cache = cache
this.manager = manager
this.session = session
this.settings = settings
this.interval = interval
this.timer = new Timer("hostcache-client",true)
}
@Override
public void reportAbuse(I2PSession session, int severity) {
}
void start() {
session.addMuxedSessionListener(new Listener(), I2PSession.PROTO_DATAGRAM, 0)
timer.schedule({queryIfNeeded()} as TimerTask, 1, interval)
}
@Override
public void disconnected(I2PSession session) {
log.severe "I2P session disconnected"
}
void stop() {
timer.cancel()
}
@Override
public void errorOccurred(I2PSession session, String message, Throwable error) {
log.severe "I2P error occured $message $error"
}
}
private void handlePong(Destination from, def pong) {
if (!CacheServers.isRegistered(from)) {
log.warning("received pong from non-registered destination")
return
}
if (pong.pongs == null) {
log.warning("malformed pong - no pongs")
return
}
pong.pongs.asList().each {
Destination dest = new Destination(it)
if (!session.getMyDestination().equals(dest))
eventBus.publish(new HostDiscoveredEvent(destination: dest))
}
}
private void handleCrawlerPing(I2PSession session, Destination from, def ping) {
if (settings.isLeaf()) {
log.warning("Received crawler ping but I'm a leaf")
return
}
switch(settings.getCrawlerResponse()) {
case CrawlerResponse.NONE:
log.info("Responding to crawlers is disabled by user")
break
case CrawlerResponse.ALL:
respondToCrawler(session, from, ping)
break;
case CrawlerResponse.REGISTERED:
if (CacheServers.isRegistered(from))
respondToCrawler(session, from, ping)
else
log.warning("Ignoring crawler ping from non-registered crawler")
break
}
}
private void respondToCrawler(I2PSession session, Destination from, def ping) {
log.info "responding to crawler ping"
def neighbors = manager.getConnections().collect { c -> c.endpoint.destination.toBase64() }
Collections.shuffle(neighbors)
if (neighbors.size() > CRAWLER_RETURN)
neighbors = neighbors[0..CRAWLER_RETURN - 1]
def upManager = (UltrapeerConnectionManager) manager;
def pong = [:]
pong.peers = neighbors
pong.uuid = ping.uuid
pong.type = "CrawlerPong"
pong.version = 1
pong.leafSlots = upManager.hasLeafSlots()
pong.peerSlots = upManager.hasPeerSlots()
pong = JsonOutput.toJson(pong)
def maker = new I2PDatagramMaker(session)
pong = maker.makeI2PDatagram(pong.bytes)
session.sendMessage(from, pong, I2PSession.PROTO_DATAGRAM, 0, 0)
}
private void queryIfNeeded() {
if (!manager.getConnections().isEmpty())
return
if (!cache.getHosts(1).isEmpty())
return
log.info "Will query hostcaches"
def ping = [type: "Ping", version: 1, leaf: settings.isLeaf()]
ping = JsonOutput.toJson(ping)
def maker = new I2PDatagramMaker(session)
ping = maker.makeI2PDatagram(ping.bytes)
def options = new SendMessageOptions()
options.setSendLeaseSet(true)
CacheServers.getCacheServers().each {
log.info "Querying hostcache ${it.toBase32()}"
session.sendMessage(it, ping, 0, ping.length, I2PSession.PROTO_DATAGRAM, 1, 0, options)
}
}
class Listener implements I2PSessionMuxedListener {
private final JsonSlurper slurper = new JsonSlurper()
@Override
public void messageAvailable(I2PSession session, int msgId, long size) {
}
@Override
public void messageAvailable(I2PSession session, int msgId, long size, int proto, int fromport, int toport) {
if (proto != I2PSession.PROTO_DATAGRAM) {
log.warning "Received unexpected protocol $proto"
return
}
def payload = session.receiveMessage(msgId)
def dissector = new I2PDatagramDissector()
try {
dissector.loadI2PDatagram(payload)
def sender = dissector.getSender()
log.info("Received something from ${sender.toBase32()}")
payload = dissector.getPayload()
payload = slurper.parse(payload)
if (payload.type == null) {
log.warning("type missing")
return
}
switch(payload.type) {
case "Pong" : handlePong(sender, payload); break
case "CrawlerPing": handleCrawlerPing(session, sender, payload); break
default : log.warning("unknown type ${payload.type}")
}
} catch (Exception e) {
log.warning("Invalid datagram $e")
}
}
@Override
public void reportAbuse(I2PSession session, int severity) {
}
@Override
public void disconnected(I2PSession session) {
log.severe "I2P session disconnected"
}
@Override
public void errorOccurred(I2PSession session, String message, Throwable error) {
log.severe "I2P error occured $message $error"
}
}
private void handlePong(Destination from, def pong) {
if (!CacheServers.isRegistered(from)) {
log.warning("received pong from non-registered destination")
return
}
if (pong.pongs == null) {
log.warning("malformed pong - no pongs")
return
}
pong.pongs.asList().each {
Destination dest = new Destination(it)
if (!session.getMyDestination().equals(dest))
eventBus.publish(new HostDiscoveredEvent(destination: dest, fromHostcache : true))
}
}
private void handleCrawlerPing(I2PSession session, Destination from, def ping) {
if (settings.isLeaf()) {
log.warning("Received crawler ping but I'm a leaf")
return
}
switch(settings.getCrawlerResponse()) {
case CrawlerResponse.NONE:
log.info("Responding to crawlers is disabled by user")
break
case CrawlerResponse.ALL:
respondToCrawler(session, from, ping)
break;
case CrawlerResponse.REGISTERED:
if (CacheServers.isRegistered(from))
respondToCrawler(session, from, ping)
else
log.warning("Ignoring crawler ping from non-registered crawler")
break
}
}
private void respondToCrawler(I2PSession session, Destination from, def ping) {
log.info "responding to crawler ping"
def neighbors = manager.getConnections().collect { c -> c.endpoint.destination.toBase64() }
Collections.shuffle(neighbors)
if (neighbors.size() > CRAWLER_RETURN)
neighbors = neighbors[0..CRAWLER_RETURN - 1]
def upManager = (UltrapeerConnectionManager) manager;
def pong = [:]
pong.peers = neighbors
pong.uuid = ping.uuid
pong.type = "CrawlerPong"
pong.version = 1
pong.leafSlots = upManager.hasLeafSlots()
pong.peerSlots = upManager.hasPeerSlots()
pong = JsonOutput.toJson(pong)
def maker = new I2PDatagramMaker(session)
pong = maker.makeI2PDatagram(pong.bytes)
session.sendMessage(from, pong, I2PSession.PROTO_DATAGRAM, 0, 0)
}
}

View File

@ -4,20 +4,25 @@ import net.i2p.data.Destination
class CacheServers {
private static final int TO_GIVE = 3
private static Set<Destination> CACHES = [
new Destination("Wddh2E6FyyXBF7SvUYHKdN-vjf3~N6uqQWNeBDTM0P33YjiQCOsyedrjmDZmWFrXUJfJLWnCb5bnKezfk4uDaMyj~uvDG~yvLVcFgcPWSUd7BfGgym-zqcG1q1DcM8vfun-US7YamBlmtC6MZ2j-~Igqzmgshita8aLPCfNAA6S6e2UMjjtG7QIXlxpMec75dkHdJlVWbzrk9z8Qgru3YIk0UztYgEwDNBbm9wInsbHhr3HtAfa02QcgRVqRN2PnQXuqUJs7R7~09FZPEviiIcUpkY3FeyLlX1sgQFBeGeA96blaPvZNGd6KnNdgfLgMebx5SSxC-N4KZMSMBz5cgonQF3~m2HHFRSI85zqZNG5X9bJN85t80ltiv1W1es8ZnQW4es11r7MrvJNXz5bmSH641yJIvS6qI8OJJNpFVBIQSXLD-96TayrLQPaYw~uNZ-eXaE6G5dYhiuN8xHsFI1QkdaUaVZnvDGfsRbpS5GtpUbBDbyLkdPurG0i7dN1wAAAA")
]
private static final int TO_GIVE = 3
private static Set<Destination> CACHES = [
// zlatinb
new Destination("Wddh2E6FyyXBF7SvUYHKdN-vjf3~N6uqQWNeBDTM0P33YjiQCOsyedrjmDZmWFrXUJfJLWnCb5bnKezfk4uDaMyj~uvDG~yvLVcFgcPWSUd7BfGgym-zqcG1q1DcM8vfun-US7YamBlmtC6MZ2j-~Igqzmgshita8aLPCfNAA6S6e2UMjjtG7QIXlxpMec75dkHdJlVWbzrk9z8Qgru3YIk0UztYgEwDNBbm9wInsbHhr3HtAfa02QcgRVqRN2PnQXuqUJs7R7~09FZPEviiIcUpkY3FeyLlX1sgQFBeGeA96blaPvZNGd6KnNdgfLgMebx5SSxC-N4KZMSMBz5cgonQF3~m2HHFRSI85zqZNG5X9bJN85t80ltiv1W1es8ZnQW4es11r7MrvJNXz5bmSH641yJIvS6qI8OJJNpFVBIQSXLD-96TayrLQPaYw~uNZ-eXaE6G5dYhiuN8xHsFI1QkdaUaVZnvDGfsRbpS5GtpUbBDbyLkdPurG0i7dN1wAAAA"),
// sNL
new Destination("JC63wJNOqSJmymkj4~UJWywBTvDGikKMoYP0HX2Wz9c5l3otXSkwnxWAFL4cKr~Ygh3BNNi2t93vuLIiI1W8AsE42kR~PwRx~Y-WvIHXR6KUejRmOp-n8WidtjKg9k4aDy428uSOedqXDxys5mpoeQXwDsv1CoPTTwnmb1GWFy~oTGIsCguCl~aJWGnqiKarPO3GJQ~ev-NbvAQzUfC3HeP1e6pdI5CGGjExahTCID5UjpJw8GaDXWlGmYWWH303Xu4x-vAHQy1dJLsOBCn8dZravsn5BKJk~j0POUon45CCx-~NYtaPe0Itt9cMdD2ciC76Rep1D0X0sm1SjlSs8sZ52KmF3oaLZ6OzgI9QLMIyBUrfi41sK5I0qTuUVBAkvW1xr~L-20dYJ9TrbOaOb2-vDIfKaxVi6xQOuhgQDiSBhd3qv2m0xGu-BM9DQYfNA0FdMjnZmqjmji9RMavzQSsVFIbQGLbrLepiEFlb7TseCK5UtRp8TxnG7L4gbYevBQAEAAcAAA=="),
// dark_trion
new Destination("Gec9L29FVcQvYDgpcYuEYdltJn06PPoOWAcAM8Af-gDm~ehlrJcwlLXXs0hidq~yP2A0X7QcDi6i6shAfuEofTchxGJl8LRNqj9lio7WnB7cIixXWL~uCkD7Np5LMX0~akNX34oOb9RcBYVT2U5rFGJmJ7OtBv~IBkGeLhsMrqaCjahd0jdBO~QJ-t82ZKZhh044d24~JEfF9zSJxdBoCdAcXzryGNy7sYtFVDFsPKJudAxSW-UsSQiGw2~k-TxyF0r-iAt1IdzfNu8Lu0WPqLdhDYJWcPldx2PR5uJorI~zo~z3I5RX3NwzarlbD4nEP5s65ahPSfVCEkzmaJUBgP8DvBqlFaX89K4nGRYc7jkEjJ8cX4L6YPXUpTPWcfKkW259WdQY3YFh6x7rzijrGZewpczOLCrt-bZRYgDrUibmZxKZmNhy~lQu4gYVVjkz1i4tL~DWlhIc4y0x2vItwkYLArPPi~ejTnt-~Lhb7oPMXRcWa3UrwGKpFvGZY4NXBQAEAAcAAA==")
]
static List<Destination> getCacheServers() {
List<Destination> allCaches = new ArrayList<>(CACHES)
Collections.shuffle(allCaches)
if (allCaches.size() <= TO_GIVE)
return allCaches
allCaches[0..TO_GIVE-1]
}
static boolean isRegistered(Destination d) {
return CACHES.contains(d)
}
static List<Destination> getCacheServers() {
List<Destination> allCaches = new ArrayList<>(CACHES)
Collections.shuffle(allCaches)
if (allCaches.size() <= TO_GIVE)
return allCaches
allCaches[0..TO_GIVE-1]
}
static boolean isRegistered(Destination d) {
return CACHES.contains(d)
}
}

View File

@ -4,30 +4,67 @@ import net.i2p.data.Destination
class Host {
private static final int MAX_FAILURES = 3
final Destination destination
int failures,successes
public Host(Destination destination) {
this.destination = destination
}
private static final int MAX_FAILURES = 3
synchronized void onConnect() {
failures = 0
successes++
}
synchronized void onFailure() {
failures++
successes = 0
}
synchronized boolean isFailed() {
failures >= MAX_FAILURES
}
synchronized boolean hasSucceeded() {
successes > 0
}
final Destination destination
private final int clearInterval, hopelessInterval, rejectionInterval
int failures,successes
long lastAttempt
long lastSuccessfulAttempt
long lastRejection
public Host(Destination destination, int clearInterval, int hopelessInterval, int rejectionInterval) {
this.destination = destination
this.clearInterval = clearInterval
this.hopelessInterval = hopelessInterval
this.rejectionInterval = rejectionInterval
}
private void connectSuccessful() {
failures = 0
successes++
lastAttempt = System.currentTimeMillis()
}
synchronized void onConnect() {
connectSuccessful()
lastSuccessfulAttempt = lastAttempt
}
synchronized void onReject() {
connectSuccessful()
lastRejection = lastAttempt;
}
synchronized void onFailure() {
failures++
successes = 0
lastAttempt = System.currentTimeMillis()
}
synchronized boolean isFailed() {
failures >= MAX_FAILURES
}
synchronized boolean hasSucceeded() {
successes > 0
}
synchronized void clearFailures() {
failures = 0
}
synchronized boolean canTryAgain() {
lastSuccessfulAttempt > 0 &&
System.currentTimeMillis() - lastAttempt > (clearInterval * 60 * 1000)
}
synchronized boolean isHopeless() {
isFailed() &&
System.currentTimeMillis() - lastSuccessfulAttempt > (hopelessInterval * 60 * 1000)
}
synchronized boolean isRecentlyRejected() {
System.currentTimeMillis() - lastRejection < (rejectionInterval * 60 * 1000)
}
}

View File

@ -15,134 +15,151 @@ import net.i2p.data.Destination
class HostCache extends Service {
final TrustService trustService
final File storage
final int interval
final Timer timer
final MuWireSettings settings
final Destination myself
final Map<Destination, Host> hosts = new ConcurrentHashMap<>()
HostCache(){}
public HostCache(TrustService trustService, File storage, int interval,
MuWireSettings settings, Destination myself) {
this.trustService = trustService
this.storage = storage
this.interval = interval
this.settings = settings
this.myself = myself
this.timer = new Timer("host-persister",true)
}
final TrustService trustService
final File storage
final int interval
final Timer timer
final MuWireSettings settings
final Destination myself
final Map<Destination, Host> hosts = new ConcurrentHashMap<>()
void start() {
timer.schedule({load()} as TimerTask, 1)
}
void stop() {
timer.cancel()
}
void onHostDiscoveredEvent(HostDiscoveredEvent e) {
if (myself == e.destination)
return
if (hosts.containsKey(e.destination))
return
Host host = new Host(e.destination)
if (allowHost(host)) {
hosts.put(e.destination, host)
}
}
void onConnectionEvent(ConnectionEvent e) {
if (e.leaf)
return
Destination dest = e.endpoint.destination
Host host = hosts.get(dest)
if (host == null) {
host = new Host(dest)
hosts.put(dest, host)
}
HostCache(){}
switch(e.status) {
case ConnectionAttemptStatus.SUCCESSFUL:
case ConnectionAttemptStatus.REJECTED:
host.onConnect()
break
case ConnectionAttemptStatus.FAILED:
host.onFailure()
break
}
}
List<Destination> getHosts(int n) {
List<Destination> rv = new ArrayList<>(hosts.keySet())
rv.retainAll {allowHost(hosts[it])}
if (rv.size() <= n)
return rv
Collections.shuffle(rv)
rv[0..n-1]
}
List<Destination> getGoodHosts(int n) {
List<Destination> rv = new ArrayList<>(hosts.keySet())
rv.retainAll {
Host host = hosts[it]
allowHost(host) && host.hasSucceeded()
}
if (rv.size() <= n)
return rv
Collections.shuffle(rv)
rv[0..n-1]
}
void load() {
if (storage.exists()) {
JsonSlurper slurper = new JsonSlurper()
storage.eachLine {
def entry = slurper.parseText(it)
Destination dest = new Destination(entry.destination)
Host host = new Host(dest)
host.failures = Integer.valueOf(String.valueOf(entry.failures))
host.successes = Integer.valueOf(String.valueOf(entry.successes))
if (allowHost(host))
hosts.put(dest, host)
}
}
timer.schedule({save()} as TimerTask, interval, interval)
loaded = true
}
private boolean allowHost(Host host) {
if (host.isFailed())
return false
if (host.destination == myself)
return false
TrustLevel trust = trustService.getLevel(host.destination)
switch(trust) {
case TrustLevel.DISTRUSTED :
return false
case TrustLevel.TRUSTED :
return true
case TrustLevel.NEUTRAL :
return settings.allowUntrusted()
}
false
}
private void save() {
storage.delete()
storage.withPrintWriter { writer ->
hosts.each { dest, host ->
if (allowHost(host)) {
def map = [:]
map.destination = dest.toBase64()
map.failures = host.failures
map.successes = host.successes
def json = JsonOutput.toJson(map)
writer.println json
}
}
}
}
public HostCache(TrustService trustService, File storage, int interval,
MuWireSettings settings, Destination myself) {
this.trustService = trustService
this.storage = storage
this.interval = interval
this.settings = settings
this.myself = myself
this.timer = new Timer("host-persister",true)
}
void start() {
timer.schedule({load()} as TimerTask, 1)
}
void stop() {
timer.cancel()
}
void onHostDiscoveredEvent(HostDiscoveredEvent e) {
if (myself == e.destination)
return
if (hosts.containsKey(e.destination)) {
if (!e.fromHostcache)
return
hosts.get(e.destination).clearFailures()
return
}
Host host = new Host(e.destination, settings.hostClearInterval, settings.hostHopelessInterval, settings.hostRejectInterval)
if (allowHost(host)) {
hosts.put(e.destination, host)
}
}
void onConnectionEvent(ConnectionEvent e) {
if (e.leaf)
return
Destination dest = e.endpoint.destination
Host host = hosts.get(dest)
if (host == null) {
host = new Host(dest, settings.hostClearInterval, settings.hostHopelessInterval, settings.hostRejectInterval)
hosts.put(dest, host)
}
switch(e.status) {
case ConnectionAttemptStatus.SUCCESSFUL:
host.onConnect()
break
case ConnectionAttemptStatus.REJECTED:
host.onReject()
break
case ConnectionAttemptStatus.FAILED:
host.onFailure()
break
}
}
List<Destination> getHosts(int n) {
List<Destination> rv = new ArrayList<>(hosts.keySet())
rv.retainAll {allowHost(hosts[it])}
rv.removeAll {
def h = hosts[it];
(h.isFailed() && !h.canTryAgain()) || h.isRecentlyRejected()
}
if (rv.size() <= n)
return rv
Collections.shuffle(rv)
rv[0..n-1]
}
List<Destination> getGoodHosts(int n) {
List<Destination> rv = new ArrayList<>(hosts.keySet())
rv.retainAll {
Host host = hosts[it]
allowHost(host) && host.hasSucceeded()
}
if (rv.size() <= n)
return rv
Collections.shuffle(rv)
rv[0..n-1]
}
void load() {
if (storage.exists()) {
JsonSlurper slurper = new JsonSlurper()
storage.eachLine {
def entry = slurper.parseText(it)
Destination dest = new Destination(entry.destination)
Host host = new Host(dest, settings.hostClearInterval, settings.hostHopelessInterval, settings.hostRejectInterval)
host.failures = Integer.valueOf(String.valueOf(entry.failures))
host.successes = Integer.valueOf(String.valueOf(entry.successes))
if (entry.lastAttempt != null)
host.lastAttempt = entry.lastAttempt
if (entry.lastSuccessfulAttempt != null)
host.lastSuccessfulAttempt = entry.lastSuccessfulAttempt
if (entry.lastRejection != null)
host.lastRejection = entry.lastRejection
if (allowHost(host))
hosts.put(dest, host)
}
}
timer.schedule({save()} as TimerTask, interval, interval)
loaded = true
}
private boolean allowHost(Host host) {
if (host.destination == myself)
return false
TrustLevel trust = trustService.getLevel(host.destination)
switch(trust) {
case TrustLevel.DISTRUSTED :
return false
case TrustLevel.TRUSTED :
return true
case TrustLevel.NEUTRAL :
return settings.allowUntrusted()
}
false
}
private void save() {
storage.delete()
storage.withPrintWriter { writer ->
hosts.each { dest, host ->
if (allowHost(host) && !host.isHopeless()) {
def map = [:]
map.destination = dest.toBase64()
map.failures = host.failures
map.successes = host.successes
map.lastAttempt = host.lastAttempt
map.lastSuccessfulAttempt = host.lastSuccessfulAttempt
map.lastRejection = host.lastRejection
def json = JsonOutput.toJson(map)
writer.println json
}
}
}
}
}

View File

@ -6,10 +6,11 @@ import net.i2p.data.Destination
class HostDiscoveredEvent extends Event {
Destination destination
@Override
public String toString() {
"HostDiscoveredEvent ${super.toString()} destination:${destination.toBase32()}"
}
Destination destination
boolean fromHostcache
@Override
public String toString() {
"HostDiscoveredEvent ${super.toString()} destination:${destination.toBase32()} from hostcache $fromHostcache"
}
}

View File

@ -0,0 +1,28 @@
package com.muwire.core.mesh
import com.muwire.core.InfoHash
import com.muwire.core.Persona
import com.muwire.core.download.Pieces
import net.i2p.data.Destination
import net.i2p.util.ConcurrentHashSet
class Mesh {
private final InfoHash infoHash
private final Set<Persona> sources = new ConcurrentHashSet<>()
private final Pieces pieces
Mesh(InfoHash infoHash, Pieces pieces) {
this.infoHash = infoHash
this.pieces = pieces
}
Set<Persona> getRandom(int n, Persona exclude) {
List<Persona> tmp = new ArrayList<>(sources)
tmp.remove(exclude)
Collections.shuffle(tmp)
if (tmp.size() < n)
return tmp
tmp[0..n-1]
}
}

View File

@ -0,0 +1,103 @@
package com.muwire.core.mesh
import java.util.stream.Collectors
import com.muwire.core.Constants
import com.muwire.core.InfoHash
import com.muwire.core.MuWireSettings
import com.muwire.core.Persona
import com.muwire.core.download.Pieces
import com.muwire.core.download.SourceDiscoveredEvent
import com.muwire.core.files.FileManager
import com.muwire.core.util.DataUtil
import groovy.json.JsonOutput
import groovy.json.JsonSlurper
import net.i2p.data.Base64
class MeshManager {
private final Map<InfoHash, Mesh> meshes = Collections.synchronizedMap(new HashMap<>())
private final FileManager fileManager
private final File home
private final MuWireSettings settings
MeshManager(FileManager fileManager, File home, MuWireSettings settings) {
this.fileManager = fileManager
this.home = home
this.settings = settings
load()
}
Mesh get(InfoHash infoHash) {
meshes.get(infoHash)
}
Mesh getOrCreate(InfoHash infoHash, int nPieces, boolean sequential) {
synchronized(meshes) {
if (meshes.containsKey(infoHash))
return meshes.get(infoHash)
float ratio = sequential ? 0f : settings.downloadSequentialRatio
Pieces pieces = new Pieces(nPieces, ratio)
if (fileManager.rootToFiles.containsKey(infoHash)) {
for (int i = 0; i < nPieces; i++)
pieces.markDownloaded(i)
}
Mesh rv = new Mesh(infoHash, pieces)
meshes.put(infoHash, rv)
return rv
}
}
void onSourceDiscoveredEvent(SourceDiscoveredEvent e) {
Mesh mesh = meshes.get(e.infoHash)
if (mesh == null)
return
mesh.sources.add(e.source)
save()
}
private void save() {
File meshFile = new File(home, "mesh.json")
synchronized(meshes) {
meshFile.withPrintWriter { writer ->
meshes.values().each { mesh ->
def json = [:]
json.timestamp = System.currentTimeMillis()
json.infoHash = Base64.encode(mesh.infoHash.getRoot())
json.sources = mesh.sources.stream().map({it.toBase64()}).collect(Collectors.toList())
json.nPieces = mesh.pieces.nPieces
json.xHave = DataUtil.encodeXHave(mesh.pieces.downloaded, mesh.pieces.nPieces)
writer.println(JsonOutput.toJson(json))
}
}
}
}
private void load() {
File meshFile = new File(home, "mesh.json")
if (!meshFile.exists())
return
long now = System.currentTimeMillis()
JsonSlurper slurper = new JsonSlurper()
meshFile.eachLine {
def json = slurper.parseText(it)
if (now - json.timestamp > settings.meshExpiration * 60 * 1000)
return
InfoHash infoHash = new InfoHash(Base64.decode(json.infoHash))
Pieces pieces = new Pieces(json.nPieces, settings.downloadSequentialRatio)
Mesh mesh = new Mesh(infoHash, pieces)
json.sources.each { source ->
Persona persona = new Persona(new ByteArrayInputStream(Base64.decode(source)))
mesh.sources.add(persona)
}
if (json.xHave != null)
DataUtil.decodeXHave(json.xHave).each { pieces.markDownloaded(it) }
if (!mesh.sources.isEmpty())
meshes.put(infoHash, mesh)
}
}
}

View File

@ -0,0 +1,86 @@
package com.muwire.core.search
import com.muwire.core.Constants
import com.muwire.core.EventBus
import com.muwire.core.connection.Endpoint
import com.muwire.core.connection.I2PConnector
import com.muwire.core.util.DataUtil
import groovy.json.JsonSlurper
import groovy.util.logging.Log
import java.nio.charset.StandardCharsets
import java.util.concurrent.Executor
import java.util.concurrent.Executors
import java.util.logging.Level
@Log
class BrowseManager {
private final I2PConnector connector
private final EventBus eventBus
private final Executor browserThread = Executors.newSingleThreadExecutor()
BrowseManager(I2PConnector connector, EventBus eventBus) {
this.connector = connector
this.eventBus = eventBus
}
void onUIBrowseEvent(UIBrowseEvent e) {
browserThread.execute({
Endpoint endpoint = null
try {
eventBus.publish(new BrowseStatusEvent(status : BrowseStatus.CONNECTING))
endpoint = connector.connect(e.host.destination)
OutputStream os = endpoint.getOutputStream()
os.write("BROWSE\r\n\r\n".getBytes(StandardCharsets.US_ASCII))
InputStream is = endpoint.getInputStream()
String code = DataUtil.readTillRN(is)
if (!code.startsWith("200"))
throw new IOException("Invalid code")
// parse all headers
Map<String,String> headers = new HashMap<>()
String header
while((header = DataUtil.readTillRN(is)) != "" && headers.size() < Constants.MAX_HEADERS) {
int colon = header.indexOf(':')
if (colon == -1 || colon == header.length() - 1)
throw new IOException("invalid header $header")
String key = header.substring(0, colon)
String value = header.substring(colon + 1)
headers[key] = value.trim()
}
if (!headers.containsKey("Count"))
throw new IOException("No count header")
int results = Integer.parseInt(headers['Count'])
// at this stage, start pulling the results
eventBus.publish(new BrowseStatusEvent(status : BrowseStatus.FETCHING))
JsonSlurper slurper = new JsonSlurper()
DataInputStream dis = new DataInputStream(is)
UUID uuid = UUID.randomUUID()
for (int i = 0; i < results; i++) {
int size = dis.readUnsignedShort()
byte [] tmp = new byte[size]
dis.readFully(tmp)
def json = slurper.parse(tmp)
UIResultEvent result = ResultsParser.parse(e.host, uuid, json)
eventBus.publish(result)
}
eventBus.publish(new BrowseStatusEvent(status : BrowseStatus.FINISHED))
} catch (Exception bad) {
log.log(Level.WARNING, "browse failed", bad)
eventBus.publish(new BrowseStatusEvent(status : BrowseStatus.FAILED))
} finally {
endpoint?.close()
}
} as Runnable)
}
}

View File

@ -0,0 +1,5 @@
package com.muwire.core.search;
public enum BrowseStatus {
CONNECTING, FETCHING, FINISHED, FAILED
}

View File

@ -0,0 +1,7 @@
package com.muwire.core.search
import com.muwire.core.Event
class BrowseStatusEvent extends Event {
BrowseStatus status
}

View File

@ -6,11 +6,11 @@ import net.i2p.data.Base32
import net.i2p.data.Destination
class DeleteEvent extends Event {
byte [] infoHash
Destination leaf
@Override
public String toString() {
"DeleteEvent ${super.toString()} infoHash:${Base32.encode(infoHash)} leaf:${leaf.toBase32()}"
}
byte [] infoHash
Destination leaf
@Override
public String toString() {
"DeleteEvent ${super.toString()} infoHash:${Base32.encode(infoHash)} leaf:${leaf.toBase32()}"
}
}

View File

@ -21,5 +21,5 @@ class InvalidSearchResultException extends Exception {
super(cause);
// TODO Auto-generated constructor stub
}
}

View File

@ -6,33 +6,33 @@ import com.muwire.core.connection.UltrapeerConnectionManager
import net.i2p.data.Destination
class LeafSearcher {
final UltrapeerConnectionManager connectionManager
final SearchIndex searchIndex = new SearchIndex()
final Map<String, Set<byte[]>> fileNameToHashes = new HashMap<>()
final Map<byte[], Set<Destination>> hashToLeafs = new HashMap<>()
final Map<Destination, Map<byte[], Set<String>>> leafToFiles = new HashMap<>()
LeafSearcher(UltrapeerConnectionManager connectionManager) {
this.connectionManager = connectionManager
}
void onUpsertEvent(UpsertEvent e) {
// TODO: implement
}
void onDeleteEvent(DeleteEvent e) {
// TODO: implement
}
void onDisconnectionEvent(DisconnectionEvent e) {
// TODO: implement
}
void onQueryEvent(QueryEvent e) {
// TODO: implement
}
final UltrapeerConnectionManager connectionManager
final SearchIndex searchIndex = new SearchIndex()
final Map<String, Set<byte[]>> fileNameToHashes = new HashMap<>()
final Map<byte[], Set<Destination>> hashToLeafs = new HashMap<>()
final Map<Destination, Map<byte[], Set<String>>> leafToFiles = new HashMap<>()
LeafSearcher(UltrapeerConnectionManager connectionManager) {
this.connectionManager = connectionManager
}
void onUpsertEvent(UpsertEvent e) {
// TODO: implement
}
void onDeleteEvent(DeleteEvent e) {
// TODO: implement
}
void onDisconnectionEvent(DisconnectionEvent e) {
// TODO: implement
}
void onQueryEvent(QueryEvent e) {
// TODO: implement
}
}

View File

@ -6,11 +6,15 @@ import com.muwire.core.Persona
import net.i2p.data.Destination
class QueryEvent extends Event {
SearchEvent searchEvent
boolean firstHop
Destination replyTo
Persona originator
Destination receivedOn
SearchEvent searchEvent
boolean firstHop
Destination replyTo
Persona originator
Destination receivedOn
String toString() {
"searchEvent: $searchEvent firstHop:$firstHop, replyTo:${replyTo.toBase32()}" +
"originator: ${originator.getHumanReadableName()} receivedOn: ${receivedOn.toBase32()}"
}
}

View File

@ -5,6 +5,7 @@ import com.muwire.core.SharedFile
class ResultsEvent extends Event {
SharedFile[] results
UUID uuid
SearchEvent searchEvent
SharedFile[] results
UUID uuid
}

View File

@ -1,5 +1,7 @@
package com.muwire.core.search
import java.util.stream.Collectors
import javax.naming.directory.InvalidSearchControlsException
import com.muwire.core.InfoHash
@ -7,13 +9,25 @@ import com.muwire.core.Persona
import com.muwire.core.util.DataUtil
import net.i2p.data.Base64
import net.i2p.data.Destination
class ResultsParser {
public static UIResultEvent parse(Persona p, UUID uuid, def json) throws InvalidSearchResultException {
if (json.type != "Result")
throw new InvalidSearchResultException("not a result json")
if (json.version != 1)
throw new InvalidSearchResultException("unknown version $json.version")
switch(json.version) {
case 1:
return parseV1(p, uuid, json)
case 2:
return parseV2(p, uuid, json)
default:
throw new InvalidSearchResultException("unknown version $json.version")
}
}
private static parseV1(Persona p, UUID uuid, def json) {
if (json.name == null)
throw new InvalidSearchResultException("name missing")
if (json.size == null)
@ -41,15 +55,61 @@ class ResultsParser {
InfoHash parsedIH = InfoHash.fromHashList(hashList)
if (parsedIH.getRoot() != infoHash)
throw new InvalidSearchControlsException("infohash root doesn't match")
return new UIResultEvent( sender : p,
name : name,
size : size,
infohash : parsedIH,
pieceSize : pieceSize,
sources : Collections.emptySet(),
uuid : uuid)
} catch (Exception e) {
throw new InvalidSearchResultException("parsing search result failed",e)
}
}
private static UIResultEvent parseV2(Persona p, UUID uuid, def json) {
if (json.name == null)
throw new InvalidSearchResultException("name missing")
if (json.size == null)
throw new InvalidSearchResultException("length missing")
if (json.infohash == null)
throw new InvalidSearchResultException("infohash missing")
if (json.pieceSize == null)
throw new InvalidSearchResultException("pieceSize missing")
if (json.hashList != null)
throw new InvalidSearchResultException("V2 result with hashlist")
try {
String name = DataUtil.readi18nString(Base64.decode(json.name))
long size = json.size
byte [] infoHash = Base64.decode(json.infohash)
if (infoHash.length != InfoHash.SIZE)
throw new InvalidSearchResultException("invalid infohash size $infoHash.length")
int pieceSize = json.pieceSize
Set<Destination> sources = Collections.emptySet()
if (json.sources != null)
sources = json.sources.stream().map({new Destination(it)}).collect(Collectors.toSet())
String comment = null
if (json.comment != null)
comment = DataUtil.readi18nString(Base64.decode(json.comment))
boolean browse = false
if (json.browse != null)
browse = true
return new UIResultEvent( sender : p,
name : name,
size : size,
infohash : new InfoHash(infoHash),
pieceSize : pieceSize,
sources : sources,
comment : comment,
browse : browse,
uuid: uuid)
} catch (Exception e) {
throw new InvalidSearchResultException("parsing search result failed",e)
}
}
}

View File

@ -4,6 +4,7 @@ import com.muwire.core.SharedFile
import com.muwire.core.connection.Endpoint
import com.muwire.core.connection.I2PConnector
import com.muwire.core.files.FileHasher
import com.muwire.core.util.DataUtil
import com.muwire.core.Persona
import java.nio.charset.StandardCharsets
@ -11,9 +12,13 @@ import java.util.concurrent.Executor
import java.util.concurrent.Executors
import java.util.concurrent.ThreadFactory
import java.util.concurrent.atomic.AtomicInteger
import java.util.logging.Level
import java.util.stream.Collectors
import com.muwire.core.DownloadedFile
import com.muwire.core.EventBus
import com.muwire.core.InfoHash
import com.muwire.core.MuWireSettings
import groovy.json.JsonOutput
import groovy.util.logging.Log
@ -22,9 +27,9 @@ import net.i2p.data.Destination
@Log
class ResultsSender {
private static final AtomicInteger THREAD_NO = new AtomicInteger()
private final Executor executor = Executors.newCachedThreadPool(
new ThreadFactory() {
@Override
@ -35,83 +40,107 @@ class ResultsSender {
rv
}
})
private final I2PConnector connector
private final Persona me
private final EventBus eventBus
ResultsSender(EventBus eventBus, I2PConnector connector, Persona me) {
private final MuWireSettings settings
ResultsSender(EventBus eventBus, I2PConnector connector, Persona me, MuWireSettings settings) {
this.connector = connector;
this.eventBus = eventBus
this.me = me
this.settings = settings
}
void sendResults(UUID uuid, SharedFile[] results, Destination target) {
log.info("Sending $results.length results for uuid $uuid to ${target.toBase32()}")
void sendResults(UUID uuid, SharedFile[] results, Destination target, boolean oobInfohash) {
log.info("Sending $results.length results for uuid $uuid to ${target.toBase32()} oobInfohash : $oobInfohash")
if (target.equals(me.destination)) {
results.each {
results.each {
long length = it.getFile().length()
int pieceSize = it.getPieceSize()
if (pieceSize == 0)
pieceSize = FileHasher.getPieceSize(length)
Set<Destination> suggested = Collections.emptySet()
if (it instanceof DownloadedFile)
suggested = it.sources
def comment = null
if (it.getComment() != null) {
comment = DataUtil.readi18nString(Base64.decode(it.getComment()))
}
def uiResultEvent = new UIResultEvent( sender : me,
name : it.getFile().getName(),
size : length,
infohash : it.getInfoHash(),
pieceSize : FileHasher.getPieceSize(length),
uuid : uuid
pieceSize : pieceSize,
uuid : uuid,
sources : suggested,
comment : comment
)
eventBus.publish(uiResultEvent)
}
} else {
executor.execute(new ResultSendJob(uuid : uuid, results : results, target: target))
executor.execute(new ResultSendJob(uuid : uuid, results : results,
target: target, oobInfohash : oobInfohash))
}
}
private class ResultSendJob implements Runnable {
UUID uuid
SharedFile [] results
Destination target
boolean oobInfohash
@Override
public void run() {
byte [] tmp = new byte[InfoHash.SIZE]
JsonOutput jsonOutput = new JsonOutput()
Endpoint endpoint = null;
try {
endpoint = connector.connect(target)
DataOutputStream os = new DataOutputStream(endpoint.getOutputStream())
os.write("POST $uuid\r\n\r\n".getBytes(StandardCharsets.US_ASCII))
me.write(os)
os.writeShort((short)results.length)
results.each {
byte [] name = it.getFile().getName().getBytes(StandardCharsets.UTF_8)
def baos = new ByteArrayOutputStream()
def daos = new DataOutputStream(baos)
daos.writeShort((short) name.length)
daos.write(name)
daos.flush()
String encodedName = Base64.encode(baos.toByteArray())
def obj = [:]
obj.type = "Result"
obj.version = 1
obj.name = encodedName
obj.infohash = Base64.encode(it.getInfoHash().getRoot())
obj.size = it.getFile().length()
obj.pieceSize = FileHasher.getPieceSize(it.getFile().length())
byte [] hashList = it.getInfoHash().getHashList()
def hashListB64 = []
for (int i = 0; i < hashList.length / InfoHash.SIZE; i++) {
System.arraycopy(hashList, InfoHash.SIZE * i, tmp, 0, InfoHash.SIZE)
hashListB64 << Base64.encode(tmp)
JsonOutput jsonOutput = new JsonOutput()
Endpoint endpoint = null;
try {
endpoint = connector.connect(target)
DataOutputStream os = new DataOutputStream(endpoint.getOutputStream())
os.write("POST $uuid\r\n\r\n".getBytes(StandardCharsets.US_ASCII))
me.write(os)
os.writeShort((short)results.length)
results.each {
def obj = sharedFileToObj(it, settings.browseFiles)
def json = jsonOutput.toJson(obj)
os.writeShort((short)json.length())
os.write(json.getBytes(StandardCharsets.US_ASCII))
}
obj.hashList = hashListB64
def json = jsonOutput.toJson(obj)
os.writeShort((short)json.length())
os.write(json.getBytes(StandardCharsets.US_ASCII))
os.flush()
} finally {
endpoint?.close()
}
os.flush()
} finally {
endpoint?.close()
} catch (Exception e) {
log.log(Level.WARNING, "problem sending results",e)
}
}
}
public static def sharedFileToObj(SharedFile sf, boolean browseFiles) {
byte [] name = sf.getFile().getName().getBytes(StandardCharsets.UTF_8)
def baos = new ByteArrayOutputStream()
def daos = new DataOutputStream(baos)
daos.writeShort((short) name.length)
daos.write(name)
daos.flush()
String encodedName = Base64.encode(baos.toByteArray())
def obj = [:]
obj.type = "Result"
obj.version = 2
obj.name = encodedName
obj.infohash = Base64.encode(sf.getInfoHash().getRoot())
obj.size = sf.getCachedLength()
obj.pieceSize = sf.getPieceSize()
if (sf instanceof DownloadedFile)
obj.sources = sf.sources.stream().map({dest -> dest.toBase64()}).collect(Collectors.toSet())
if (sf.getComment() != null)
obj.comment = sf.getComment()
obj.browse = browseFiles
obj
}
}

View File

@ -1,10 +1,20 @@
package com.muwire.core.search
import com.muwire.core.Event
import com.muwire.core.InfoHash
class SearchEvent extends Event {
List<String> searchTerms
byte [] searchHash
UUID uuid
List<String> searchTerms
byte [] searchHash
UUID uuid
boolean oobInfohash
boolean searchComments
String toString() {
def infoHash = null
if (searchHash != null)
infoHash = new InfoHash(searchHash)
"searchTerms: $searchTerms searchHash:$infoHash, uuid:$uuid oobInfohash:$oobInfohash searchComments:$searchComments"
}
}

View File

@ -1,56 +1,59 @@
package com.muwire.core.search
import com.muwire.core.Constants
import com.muwire.core.SplitPattern
class SearchIndex {
final Map<String, Set<String>> keywords = new HashMap<>()
void add(String string) {
String [] split = split(string)
split.each {
Set<String> existing = keywords.get(it)
if (existing == null) {
existing = new HashSet<>()
keywords.put(it, existing)
}
existing.add(string)
}
}
void remove(String string) {
String [] split = split(string)
split.each {
Set<String> existing = keywords.get it
if (existing != null) {
existing.remove(string)
if (existing.isEmpty()) {
keywords.remove(it)
}
}
}
}
private static String[] split(String source) {
source = source.replaceAll(Constants.SPLIT_PATTERN, " ").toLowerCase()
source.split(" ")
}
String[] search(List<String> terms) {
Set<String> rv = null;
terms.each {
Set<String> forWord = keywords.getOrDefault(it,[])
if (rv == null) {
rv = forWord
} else {
rv.retainAll(forWord)
}
}
if (rv != null)
return rv.asList()
[]
}
final Map<String, Set<String>> keywords = new HashMap<>()
void add(String string) {
String [] split = split(string)
split.each {
Set<String> existing = keywords.get(it)
if (existing == null) {
existing = new HashSet<>()
keywords.put(it, existing)
}
existing.add(string)
}
}
void remove(String string) {
String [] split = split(string)
split.each {
Set<String> existing = keywords.get it
if (existing != null) {
existing.remove(string)
if (existing.isEmpty()) {
keywords.remove(it)
}
}
}
}
private static String[] split(String source) {
source = source.replaceAll(SplitPattern.SPLIT_PATTERN, " ").toLowerCase()
String [] split = source.split(" ")
def rv = []
split.each { if (it.length() > 0) rv << it }
rv.toArray(new String[0])
}
String[] search(List<String> terms) {
Set<String> rv = null;
terms.each {
Set<String> forWord = keywords.getOrDefault(it,[])
if (rv == null) {
rv = new HashSet<>(forWord)
} else {
rv.retainAll(forWord)
}
}
if (rv != null)
return rv.asList()
[]
}
}

View File

@ -8,17 +8,17 @@ import net.i2p.data.Destination
@Log
public class SearchManager {
private static final int EXPIRE_TIME = 60 * 1000 * 1000
private static final int CHECK_INTERVAL = 60 * 1000
private final EventBus eventBus
private final Persona me
private final ResultsSender resultsSender
private final Map<UUID, QueryEvent> responderAddress = Collections.synchronizedMap(new HashMap<>())
SearchManager(){}
SearchManager(EventBus eventBus, Persona me, ResultsSender resultsSender) {
this.eventBus = eventBus
this.me = me
@ -26,7 +26,7 @@ public class SearchManager {
Timer timer = new Timer("query-expirer", true)
timer.schedule({cleanup()} as TimerTask, CHECK_INTERVAL, CHECK_INTERVAL)
}
void onQueryEvent(QueryEvent event) {
if (responderAddress.containsKey(event.searchEvent.uuid)) {
log.info("Dropping duplicate search uuid $event.searchEvent.uuid")
@ -35,7 +35,7 @@ public class SearchManager {
responderAddress.put(event.searchEvent.uuid, event)
eventBus.publish(event.searchEvent)
}
void onResultsEvent(ResultsEvent event) {
Destination target = responderAddress.get(event.uuid)?.replyTo
if (target == null)
@ -44,13 +44,13 @@ public class SearchManager {
log.info("No results for search uuid $event.uuid")
return
}
resultsSender.sendResults(event.uuid, event.results, target)
resultsSender.sendResults(event.uuid, event.results, target, event.searchEvent.oobInfohash)
}
boolean hasLocalSearch(UUID uuid) {
me.destination.equals(responderAddress.get(uuid)?.replyTo)
}
private void cleanup() {
final long now = System.currentTimeMillis()
synchronized(responderAddress) {

View File

@ -0,0 +1,8 @@
package com.muwire.core.search
import com.muwire.core.Event
import com.muwire.core.Persona
class UIBrowseEvent extends Event {
Persona host
}

View File

@ -0,0 +1,8 @@
package com.muwire.core.search
import com.muwire.core.Event
class UIResultBatchEvent extends Event {
UUID uuid
UIResultEvent[] results
}

View File

@ -4,11 +4,21 @@ import com.muwire.core.Event
import com.muwire.core.InfoHash
import com.muwire.core.Persona
import net.i2p.data.Destination
class UIResultEvent extends Event {
Persona sender
Set<Destination> sources
UUID uuid
String name
long size
InfoHash infohash
int pieceSize
String comment
boolean browse
@Override
public String toString() {
super.toString() + "name:$name size:$size sender:${sender.getHumanReadableName()} pieceSize $pieceSize"
}
}

View File

@ -18,5 +18,5 @@ class UnexpectedResultsException extends Exception {
public UnexpectedResultsException(String message) {
super(message);
}
}

View File

@ -7,12 +7,12 @@ import net.i2p.data.Destination
class UpsertEvent extends Event {
Set<String> names
byte [] infoHash
Destination leaf
@Override
public String toString() {
"UpsertEvent ${super.toString()} names:$names infoHash:${Base32.encode(infoHash)} leaf:${leaf.toBase32()}"
}
Set<String> names
byte [] infoHash
Destination leaf
@Override
public String toString() {
"UpsertEvent ${super.toString()} names:$names infoHash:${Base32.encode(infoHash)} leaf:${leaf.toBase32()}"
}
}

View File

@ -0,0 +1,31 @@
package com.muwire.core.trust
import java.util.concurrent.ConcurrentHashMap
import com.muwire.core.Persona
import net.i2p.util.ConcurrentHashSet
class RemoteTrustList {
public enum Status { NEW, UPDATING, UPDATED, UPDATE_FAILED }
private final Persona persona
private final Set<Persona> good, bad
volatile long timestamp
volatile boolean forceUpdate
Status status = Status.NEW
RemoteTrustList(Persona persona) {
this.persona = persona
good = new ConcurrentHashSet<>()
bad = new ConcurrentHashSet<>()
}
@Override
public boolean equals(Object o) {
if (!(o instanceof RemoteTrustList))
return false
RemoteTrustList other = (RemoteTrustList)o
persona == other.persona
}
}

View File

@ -5,6 +5,6 @@ import com.muwire.core.Persona
class TrustEvent extends Event {
Persona persona
TrustLevel level
Persona persona
TrustLevel level
}

View File

@ -11,87 +11,87 @@ import net.i2p.util.ConcurrentHashSet
class TrustService extends Service {
final File persistGood, persistBad
final long persistInterval
final Map<Destination, Persona> good = new ConcurrentHashMap<>()
final Map<Destination, Persona> bad = new ConcurrentHashMap<>()
final Timer timer
TrustService() {}
TrustService(File persistGood, File persistBad, long persistInterval) {
this.persistBad = persistBad
this.persistGood = persistGood
this.persistInterval = persistInterval
this.timer = new Timer("trust-persister",true)
}
void start() {
timer.schedule({load()} as TimerTask, 1)
}
void stop() {
timer.cancel()
}
void load() {
if (persistGood.exists()) {
persistGood.eachLine {
final File persistGood, persistBad
final long persistInterval
final Map<Destination, Persona> good = new ConcurrentHashMap<>()
final Map<Destination, Persona> bad = new ConcurrentHashMap<>()
final Timer timer
TrustService() {}
TrustService(File persistGood, File persistBad, long persistInterval) {
this.persistBad = persistBad
this.persistGood = persistGood
this.persistInterval = persistInterval
this.timer = new Timer("trust-persister",true)
}
void start() {
timer.schedule({load()} as TimerTask, 1)
}
void stop() {
timer.cancel()
}
void load() {
if (persistGood.exists()) {
persistGood.eachLine {
byte [] decoded = Base64.decode(it)
Persona persona = new Persona(new ByteArrayInputStream(decoded))
good.put(persona.destination, persona)
}
}
if (persistBad.exists()) {
persistBad.eachLine {
good.put(persona.destination, persona)
}
}
if (persistBad.exists()) {
persistBad.eachLine {
byte [] decoded = Base64.decode(it)
Persona persona = new Persona(new ByteArrayInputStream(decoded))
bad.put(persona.destination, persona)
}
}
timer.schedule({persist()} as TimerTask, persistInterval, persistInterval)
loaded = true
}
private void persist() {
persistGood.delete()
persistGood.withPrintWriter { writer ->
good.each {k,v ->
writer.println v.toBase64()
}
}
persistBad.delete()
persistBad.withPrintWriter { writer ->
bad.each { k,v ->
writer.println v.toBase64()
}
}
}
TrustLevel getLevel(Destination dest) {
if (good.containsKey(dest))
return TrustLevel.TRUSTED
else if (bad.containsKey(dest))
return TrustLevel.DISTRUSTED
TrustLevel.NEUTRAL
}
void onTrustEvent(TrustEvent e) {
switch(e.level) {
case TrustLevel.TRUSTED:
bad.remove(e.persona.destination)
good.put(e.persona.destination, e.persona)
break
case TrustLevel.DISTRUSTED:
good.remove(e.persona.destination)
bad.put(e.persona.destination, e.persona)
break
case TrustLevel.NEUTRAL:
good.remove(e.persona.destination)
bad.remove(e.persona.destination)
break
}
}
}
}
timer.schedule({persist()} as TimerTask, persistInterval, persistInterval)
loaded = true
}
private void persist() {
persistGood.delete()
persistGood.withPrintWriter { writer ->
good.each {k,v ->
writer.println v.toBase64()
}
}
persistBad.delete()
persistBad.withPrintWriter { writer ->
bad.each { k,v ->
writer.println v.toBase64()
}
}
}
TrustLevel getLevel(Destination dest) {
if (good.containsKey(dest))
return TrustLevel.TRUSTED
else if (bad.containsKey(dest))
return TrustLevel.DISTRUSTED
TrustLevel.NEUTRAL
}
void onTrustEvent(TrustEvent e) {
switch(e.level) {
case TrustLevel.TRUSTED:
bad.remove(e.persona.destination)
good.put(e.persona.destination, e.persona)
break
case TrustLevel.DISTRUSTED:
good.remove(e.persona.destination)
bad.put(e.persona.destination, e.persona)
break
case TrustLevel.NEUTRAL:
good.remove(e.persona.destination)
bad.remove(e.persona.destination)
break
}
}
}

View File

@ -0,0 +1,161 @@
package com.muwire.core.trust
import java.nio.charset.StandardCharsets
import java.util.concurrent.ConcurrentHashMap
import java.util.concurrent.ExecutorService
import java.util.concurrent.Executors
import java.util.logging.Level
import com.muwire.core.EventBus
import com.muwire.core.MuWireSettings
import com.muwire.core.Persona
import com.muwire.core.UILoadedEvent
import com.muwire.core.connection.Endpoint
import com.muwire.core.connection.I2PConnector
import com.muwire.core.util.DataUtil
import groovy.util.logging.Log
import net.i2p.data.Destination
@Log
class TrustSubscriber {
private final EventBus eventBus
private final I2PConnector i2pConnector
private final MuWireSettings settings
private final Map<Destination, RemoteTrustList> remoteTrustLists = new ConcurrentHashMap<>()
private final Object waitLock = new Object()
private volatile boolean shutdown
private volatile Thread thread
private final ExecutorService updateThreads = Executors.newCachedThreadPool()
TrustSubscriber(EventBus eventBus, I2PConnector i2pConnector, MuWireSettings settings) {
this.eventBus = eventBus
this.i2pConnector = i2pConnector
this.settings = settings
}
void onUILoadedEvent(UILoadedEvent e) {
thread = new Thread({checkLoop()} as Runnable, "trust-subscriber")
thread.setDaemon(true)
thread.start()
}
void stop() {
shutdown = true
thread?.interrupt()
updateThreads.shutdownNow()
}
void onTrustSubscriptionEvent(TrustSubscriptionEvent e) {
if (!e.subscribe) {
remoteTrustLists.remove(e.persona.destination)
} else {
RemoteTrustList trustList = remoteTrustLists.putIfAbsent(e.persona.destination, new RemoteTrustList(e.persona))
trustList?.forceUpdate = true
synchronized(waitLock) {
waitLock.notify()
}
}
}
private void checkLoop() {
try {
while(!shutdown) {
synchronized(waitLock) {
waitLock.wait(60 * 1000)
}
final long now = System.currentTimeMillis()
remoteTrustLists.values().each { trustList ->
if (trustList.status == RemoteTrustList.Status.UPDATING)
return
if (!trustList.forceUpdate &&
now - trustList.timestamp < settings.trustListInterval * 60 * 60 * 1000)
return
trustList.forceUpdate = false
updateThreads.submit(new UpdateJob(trustList))
}
}
} catch (InterruptedException e) {
if (!shutdown)
throw e
}
}
private class UpdateJob implements Runnable {
private final RemoteTrustList trustList
UpdateJob(RemoteTrustList trustList) {
this.trustList = trustList
}
public void run() {
trustList.status = RemoteTrustList.Status.UPDATING
eventBus.publish(new TrustSubscriptionUpdatedEvent(trustList : trustList))
if (check(trustList, System.currentTimeMillis()))
trustList.status = RemoteTrustList.Status.UPDATED
else
trustList.status = RemoteTrustList.Status.UPDATE_FAILED
eventBus.publish(new TrustSubscriptionUpdatedEvent(trustList : trustList))
}
}
private boolean check(RemoteTrustList trustList, long now) {
log.info("fetching trust list from ${trustList.persona.getHumanReadableName()}")
Endpoint endpoint = null
try {
endpoint = i2pConnector.connect(trustList.persona.destination)
OutputStream os = endpoint.getOutputStream()
InputStream is = endpoint.getInputStream()
os.write("TRUST\r\n\r\n".getBytes(StandardCharsets.US_ASCII))
os.flush()
String codeString = DataUtil.readTillRN(is)
int space = codeString.indexOf(' ')
if (space > 0)
codeString = codeString.substring(0,space)
int code = Integer.parseInt(codeString.trim())
if (code != 200) {
log.info("couldn't fetch trust list, code $code")
return false
}
// swallow any headers
String header
while (( header = DataUtil.readTillRN(is)) != "");
DataInputStream dis = new DataInputStream(is)
Set<Persona> good = new HashSet<>()
int nGood = dis.readUnsignedShort()
for (int i = 0; i < nGood; i++) {
Persona p = new Persona(dis)
good.add(p)
}
Set<Persona> bad = new HashSet<>()
int nBad = dis.readUnsignedShort()
for (int i = 0; i < nBad; i++) {
Persona p = new Persona(dis)
bad.add(p)
}
trustList.timestamp = now
trustList.good.clear()
trustList.good.addAll(good)
trustList.bad.clear()
trustList.bad.addAll(bad)
return true
} catch (Exception e) {
log.log(Level.WARNING,"exception fetching trust list from ${trustList.persona.getHumanReadableName()}",e)
return false
} finally {
endpoint?.close()
}
}
}

View File

@ -0,0 +1,9 @@
package com.muwire.core.trust
import com.muwire.core.Event
import com.muwire.core.Persona
class TrustSubscriptionEvent extends Event {
Persona persona
boolean subscribe
}

View File

@ -0,0 +1,7 @@
package com.muwire.core.trust
import com.muwire.core.Event
class TrustSubscriptionUpdatedEvent extends Event {
RemoteTrustList trustList
}

View File

@ -1,8 +1,10 @@
package com.muwire.core.update
import com.muwire.core.Event
import com.muwire.core.InfoHash
class UpdateAvailableEvent extends Event {
String version
String signer
String infoHash
}

View File

@ -3,7 +3,15 @@ package com.muwire.core.update
import java.util.logging.Level
import com.muwire.core.EventBus
import com.muwire.core.InfoHash
import com.muwire.core.MuWireSettings
import com.muwire.core.Persona
import com.muwire.core.download.UIDownloadEvent
import com.muwire.core.files.FileDownloadedEvent
import com.muwire.core.files.FileManager
import com.muwire.core.search.QueryEvent
import com.muwire.core.search.SearchEvent
import com.muwire.core.search.UIResultBatchEvent
import groovy.json.JsonOutput
import groovy.json.JsonSlurper
@ -13,6 +21,7 @@ import net.i2p.client.I2PSessionMuxedListener
import net.i2p.client.SendMessageOptions
import net.i2p.client.datagram.I2PDatagramDissector
import net.i2p.client.datagram.I2PDatagramMaker
import net.i2p.data.Base64
import net.i2p.util.VersionComparator
@Log
@ -21,28 +30,54 @@ class UpdateClient {
final I2PSession session
final String myVersion
final MuWireSettings settings
final FileManager fileManager
final Persona me
private final Timer timer
private long lastUpdateCheckTime
UpdateClient(EventBus eventBus, I2PSession session, String myVersion, MuWireSettings settings) {
private volatile InfoHash updateInfoHash
private volatile String version, signer
private volatile boolean updateDownloading
UpdateClient(EventBus eventBus, I2PSession session, String myVersion, MuWireSettings settings, FileManager fileManager, Persona me) {
this.eventBus = eventBus
this.session = session
this.myVersion = myVersion
this.settings = settings
this.fileManager = fileManager
this.me = me
timer = new Timer("update-client",true)
}
void start() {
session.addMuxedSessionListener(new Listener(), I2PSession.PROTO_DATAGRAM, 2)
timer.schedule({checkUpdate()} as TimerTask, 30000, 60 * 60 * 1000)
timer.schedule({checkUpdate()} as TimerTask, 60000, 60 * 60 * 1000)
}
void stop() {
timer.cancel()
}
void onUIResultBatchEvent(UIResultBatchEvent results) {
if (results.results[0].infohash != updateInfoHash)
return
if (updateDownloading)
return
updateDownloading = true
def file = new File(settings.downloadLocation, results.results[0].name)
def downloadEvent = new UIDownloadEvent(result: results.results[0], sources : results.results[0].sources, target : file)
eventBus.publish(downloadEvent)
}
void onFileDownloadedEvent(FileDownloadedEvent e) {
if (e.downloadedFile.infoHash != updateInfoHash)
return
updateDownloading = false
eventBus.publish(new UpdateDownloadedEvent(version : version, signer : signer))
}
private void checkUpdate() {
final long now = System.currentTimeMillis()
if (lastUpdateCheckTime > 0) {
@ -50,20 +85,20 @@ class UpdateClient {
return
}
lastUpdateCheckTime = now
log.info("checking for update")
def ping = [version : 1, myVersion : myVersion]
ping = JsonOutput.toJson(ping)
def maker = new I2PDatagramMaker(session)
ping = maker.makeI2PDatagram(ping.bytes)
def options = new SendMessageOptions()
def options = new SendMessageOptions()
options.setSendLeaseSet(true)
session.sendMessage(UpdateServers.UPDATE_SERVER, ping, 0, ping.length, I2PSession.PROTO_DATAGRAM, 2, 0, options)
session.sendMessage(UpdateServers.UPDATE_SERVER, ping, 0, ping.length, I2PSession.PROTO_DATAGRAM, 2, 0, options)
}
class Listener implements I2PSessionMuxedListener {
final JsonSlurper slurper = new JsonSlurper()
@Override
@ -76,7 +111,7 @@ class UpdateClient {
log.warning "Received unexpected protocol $proto"
return
}
def payload = session.receiveMessage(msgId)
def dissector = new I2PDatagramDissector()
try {
@ -86,29 +121,53 @@ class UpdateClient {
log.warning("received something not from update server " + sender.toBase32())
return
}
log.info("Received something from update server")
payload = dissector.getPayload()
payload = slurper.parse(payload)
if (payload.version == null) {
log.warning("version missing")
return
}
if (payload.signer == null) {
log.warning("signer missing")
}
if (VersionComparator.comp(myVersion, payload.version) >= 0) {
log.info("no new version available")
return
}
log.info("new version $payload.version available, publishing event")
eventBus.publish(new UpdateAvailableEvent(version : payload.version, signer : payload.signer))
String infoHash
if (settings.updateType == "jar") {
infoHash = payload.infoHash
} else
infoHash = payload[settings.updateType]
if (!settings.autoDownloadUpdate) {
log.info("new version $payload.version available, publishing event")
eventBus.publish(new UpdateAvailableEvent(version : payload.version, signer : payload.signer, infoHash : infoHash))
} else {
log.info("new version $payload.version available")
updateInfoHash = new InfoHash(Base64.decode(infoHash))
if (fileManager.rootToFiles.containsKey(updateInfoHash))
eventBus.publish(new UpdateDownloadedEvent(version : payload.version, signer : payload.signer))
else {
updateDownloading = false
version = payload.version
signer = payload.signer
log.info("starting search for new version hash $payload.infoHash")
def searchEvent = new SearchEvent(searchHash : updateInfoHash.getRoot(), uuid : UUID.randomUUID(), oobInfohash : true)
def queryEvent = new QueryEvent(searchEvent : searchEvent, firstHop : true, replyTo : me.destination,
receivedOn : me.destination, originator : me)
eventBus.publish(queryEvent)
}
}
} catch (Exception e) {
log.log(Level.WARNING,"Invalid datagram",e)
}
@ -127,6 +186,6 @@ class UpdateClient {
public void errorOccurred(I2PSession session, String message, Throwable error) {
log.log(Level.SEVERE, message, error)
}
}
}

View File

@ -0,0 +1,8 @@
package com.muwire.core.update
import com.muwire.core.Event
class UpdateDownloadedEvent extends Event {
String version
String signer
}

Some files were not shown because too many files have changed in this diff Show More