Compare commits

..

510 Commits

Author SHA1 Message Date
Zlatin Balevsky
6bad67c1bf Release 0.4.8 2019-07-08 18:30:19 +01:00
Zlatin Balevsky
c76e6dc99f Merge pull request #9 from zetok/backticks
Replace deprecated backticks with $() for command substitution
2019-07-08 08:24:37 +01:00
Zetok Zalbavar
acf9db0db3 Replace deprecated backticks with $() for command substitution
Although it's a Bash FAQ, the point also applies to POSIX-compatible
shells: https://mywiki.wooledge.org/BashFAQ/082
2019-07-08 06:29:33 +01:00
Zlatin Balevsky
69b4f0b547 Add trust/distrust action from monitor window. Thanks Aegon 2019-07-07 15:31:21 +01:00
Zlatin Balevsky
80e165b505 fix download size in renderer, thanks Aegon 2019-07-07 11:17:56 +01:00
Zlatin Balevsky
bcce55b873 fix integer overflow 2019-07-07 10:58:39 +01:00
Zlatin Balevsky
d5c92560db fix integer overflow 2019-07-07 10:56:14 +01:00
Zlatin Balevsky
f827c1c9bf Home directories for different OSes 2019-07-07 09:14:13 +01:00
Zlatin Balevsky
88c5f1a02d Add GPG key link 2019-07-07 09:04:52 +01:00
Zlatin Balevsky
d8e44f5f39 kill other workers if download is finished 2019-07-06 22:21:13 +01:00
Zlatin Balevsky
72ff47ffe5 use custom renderer and comparator for download progress 2019-07-06 12:53:49 +01:00
Zlatin Balevsky
066ee2c96d wrong list 2019-07-06 11:28:04 +01:00
Zlatin Balevsky
0a8016dea7 enable stealing of pieces from other download workers 2019-07-06 11:26:18 +01:00
Zlatin Balevsky
db36367b11 avoid AIOOBE 2019-07-06 11:00:31 +01:00
Zlatin Balevsky
b6c9ccb7f6 return up to 9 X-Alts 2019-07-06 09:03:27 +01:00
Zlatin Balevsky
a9dc636bce write pieces every time a downloader finishes 2019-07-06 00:52:49 +01:00
Zlatin Balevsky
3cc0574d11 working partial pieces 2019-07-06 00:47:45 +01:00
Zlatin Balevsky
20fab9b16d work on partial piece persistence 2019-07-06 00:17:46 +01:00
Zlatin Balevsky
4015818323 center buttons 2019-07-05 17:15:50 +01:00
Zlatin Balevsky
f569d45c8c reallign tables 2019-07-05 17:07:14 +01:00
Zlatin Balevsky
3773647869 remove diff rejects 2019-07-05 16:24:57 +01:00
Zlatin Balevsky
29cdbf018c remove trailing spaces 2019-07-05 16:24:19 +01:00
Zlatin Balevsky
94bb7022eb tabs -> spaces 2019-07-05 16:22:34 +01:00
Zlatin Balevsky
39808302df Show which file is hashing, thanks to Aegon 2019-07-05 16:20:03 +01:00
Zlatin Balevsky
2d22f9c39e override router log manager 2019-07-05 12:32:23 +01:00
Zlatin Balevsky
ee8f80bab6 up i2p to 0.9.41 2019-07-05 12:26:48 +01:00
Zlatin Balevsky
3e6242e583 break when matching search is found 2019-07-04 18:12:22 +01:00
Zlatin Balevsky
41181616ee compact display of incoming searches, thanks Aegon 2019-07-04 17:59:53 +01:00
Zlatin Balevsky
eb2530ca32 fix sorting of download/upload tables thanks Aegon 2019-07-04 17:58:06 +01:00
Zlatin Balevsky
b5233780ef Release 0.4.7 2019-07-03 20:36:54 +01:00
Zlatin Balevsky
78753d7538 shut down cache client on shutdown 2019-07-03 19:50:00 +01:00
Zlatin Balevsky
4740e8b4f5 log hostcache stats 2019-07-03 19:46:24 +01:00
Zlatin Balevsky
ad5b00fc90 prettier progress status thanks to Aegon 2019-07-03 12:50:24 +01:00
Zlatin Balevsky
d6c6880848 update readme 2019-07-03 07:27:48 +01:00
Zlatin Balevsky
4f948c1b9e Release 0.4.6 2019-07-03 07:11:59 +01:00
Zlatin Balevsky
2b68c24f9c use switch 2019-07-03 07:01:27 +01:00
Zlatin Balevsky
bcdf0422db update for embedded router 2019-07-03 07:00:04 +01:00
Zlatin Balevsky
f6434b478d remove FAQ 2019-07-03 06:56:20 +01:00
Zlatin Balevsky
e979fdd26f update list view tables 2019-07-03 06:51:21 +01:00
Zlatin Balevsky
e6bfcaaab9 size columns, center integers 2019-07-03 06:11:02 +01:00
Zlatin Balevsky
9780108e8a disable trust buttons on action 2019-07-03 06:00:09 +01:00
Zlatin Balevsky
697c7d2d6d enable/disable trust panel buttons 2019-07-03 05:41:17 +01:00
Zlatin Balevsky
887d10c8bf move buttons around 2019-07-03 05:30:39 +01:00
Zlatin Balevsky
ef6b8fe458 add a state for failed updates 2019-07-03 05:12:00 +01:00
Zlatin Balevsky
20ab55d763 update todo 2019-07-03 00:23:21 +01:00
Zlatin Balevsky
eda58c9e0d Merge branch 'trust-lists' 2019-07-03 00:04:50 +01:00
Zlatin Balevsky
fb42fc0e35 add trust panel in options 2019-07-03 00:04:08 +01:00
Zlatin Balevsky
35cabc47ad hook up trust and distrust buttons 2019-07-02 23:44:43 +01:00
Zlatin Balevsky
5be97d0404 show something when review button is pressed 2019-07-02 22:51:04 +01:00
Zlatin Balevsky
82b0fa253c enable update and unsubscribe buttons 2019-07-02 22:26:29 +01:00
Zlatin Balevsky
011a4d5766 prevent duplicate updates and zero timestamps 2019-07-02 22:02:15 +01:00
Zlatin Balevsky
5cd1ca88c1 do actual updating on in a threadpool 2019-07-02 21:34:29 +01:00
Zlatin Balevsky
44c880d911 store subscriber list upon subscription 2019-07-02 20:53:29 +01:00
Zlatin Balevsky
14857cb5ad swallow headers in trust list response 2019-07-02 20:35:50 +01:00
Zlatin Balevsky
7daf981f1a fix NPE 2019-07-02 20:24:51 +01:00
Zlatin Balevsky
b99bc0ea32 fix 2019-07-02 20:12:22 +01:00
Zlatin Balevsky
1ccf6fbdfa participating bandwidth grid cell 2019-07-02 15:35:42 +01:00
Zlatin Balevsky
5711979272 Release 0.4.5 2019-07-02 15:01:51 +01:00
Zlatin Balevsky
9a5e2b1fa3 speed smoothing patch courtesy of Aegon 2019-07-02 14:46:40 +01:00
Zlatin Balevsky
cafc5f582e subscribe button 2019-07-02 14:35:52 +01:00
Zlatin Balevsky
a89b423dfc simpler speed calculation 2019-07-02 13:05:06 +01:00
Zlatin Balevsky
79e8438941 always assume interval is at least 1 second 2019-07-02 12:49:00 +01:00
Zlatin Balevsky
19c2c46491 prevent NPE on startup 2019-07-02 12:27:15 +01:00
Zlatin Balevsky
78f1d54b69 add new host cache 2019-07-02 10:04:24 +01:00
Zlatin Balevsky
9461649ed4 change sig type 2019-07-02 09:49:13 +01:00
Zlatin Balevsky
8573ab2850 work on trust list UI 2019-07-02 09:35:21 +01:00
Zlatin Balevsky
8b3d752727 add status to the trust list object 2019-07-02 08:59:30 +01:00
Zlatin Balevsky
7c54bd8966 start work on sharing of trust lists 2019-07-01 23:33:39 +01:00
Zlatin Balevsky
5d0fcb7027 start work on sharing of trust lists 2019-07-01 23:15:13 +01:00
Zlatin Balevsky
3ec9654d3c start work on sharing of trust lists 2019-07-01 22:05:43 +01:00
Zlatin Balevsky
7c8d64b462 start work on sharing of trust lists 2019-07-01 21:40:07 +01:00
Zlatin Balevsky
31e30e3d31 excludePeerCaps 2019-07-01 18:31:58 +01:00
Zlatin Balevsky
8caf6e99b0 show floodfill status 2019-07-01 13:18:31 +01:00
Zlatin Balevsky
624155debd update todo 2019-07-01 06:17:46 +01:00
Zlatin Balevsky
4468a262ae actually add timestamps to the list 2019-06-30 21:40:18 +01:00
Zlatin Balevsky
1780901cb0 throttle connections to 10 searches per second 2019-06-30 21:22:49 +01:00
Zlatin Balevsky
d830d9261f canonicalize before checking if file is already shared 2019-06-30 17:12:25 +01:00
Zlatin Balevsky
f5e1833a48 Release 0.4.4 2019-06-30 15:55:23 +01:00
Zlatin Balevsky
9feb2a3c8f fix NPE on update search 2019-06-30 15:11:13 +01:00
Zlatin Balevsky
b27665f5dd Merge pull request #5 from 0rC0/patch-1
code markdown for commands and paths in README.md
2019-06-30 13:45:36 +01:00
orco
4465aa4134 code markdown for commands and paths in README.md
... instead of quotes
2019-06-30 14:27:33 +02:00
Zlatin Balevsky
ad766ac748 try to unmap files when done 2019-06-30 13:20:26 +01:00
Zlatin Balevsky
d9e7d67d86 javadoc 2019-06-30 12:51:34 +01:00
Zlatin Balevsky
3fefbc94b3 utility to decode personas 2019-06-30 10:41:42 +01:00
Zlatin Balevsky
21034209a5 add ? to split pattern 2019-06-30 06:29:46 +01:00
Zlatin Balevsky
7c04c0f83c unshare individual file 2019-06-30 05:44:08 +01:00
Zlatin Balevsky
f5293d65dd update todo 2019-06-29 16:00:49 +01:00
Zlatin Balevsky
8191bf6066 Release 0.4.3 2019-06-29 10:44:15 +01:00
Zlatin Balevsky
29b6bfd463 support different update types 2019-06-29 10:31:27 +01:00
Zlatin Balevsky
2f3d23bc34 fixes 2019-06-29 10:12:50 +01:00
Zlatin Balevsky
98dd80c4b8 fix 2019-06-29 10:03:58 +01:00
Zlatin Balevsky
d9edb2e128 ability to download updates automatically 2019-06-29 09:23:27 +01:00
Zlatin Balevsky
de04b40b86 Release 0.4.2 2019-06-29 07:17:45 +01:00
Zlatin Balevsky
7206a3d926 more i2p metrics 2019-06-29 07:07:48 +01:00
Zlatin Balevsky
98b98d8938 I2P status panel 2019-06-29 06:33:53 +01:00
Zlatin Balevsky
294b8fcc2f MW status window 2019-06-29 05:58:46 +01:00
Zlatin Balevsky
32f601a1b1 add ability to change i2p port 2019-06-28 23:53:22 +01:00
Zlatin Balevsky
8e3a398080 Release 0.4.1 2019-06-28 16:42:37 +01:00
Zlatin Balevsky
720b9688b4 Add unsharing of directories 2019-06-28 16:08:04 +01:00
Zlatin Balevsky
e3066161c5 do not perform filesystem operations in the UI thread 2019-06-27 23:29:48 +01:00
Zlatin Balevsky
a9aa3a524f disable i2cp interface on embedded router 2019-06-27 09:56:18 +01:00
Zlatin Balevsky
92848e818a on empty properties source from java props 2019-06-27 03:47:56 +01:00
Zlatin Balevsky
a7aa3008c0 bandwidth settings 2019-06-27 00:42:27 +01:00
Zlatin Balevsky
485325e824 embedded router except for logs 2019-06-26 23:25:22 +01:00
Zlatin Balevsky
0df2a0e039 start work on embedded router 2019-06-26 22:39:25 +01:00
Zlatin Balevsky
fb7b4466c2 update readme 2019-06-26 22:05:04 +01:00
Zlatin Balevsky
53105245f4 Release 0.4.0 2019-06-26 21:59:28 +01:00
Zlatin Balevsky
b68eab91e0 Release 0.3.10 2019-06-25 22:39:43 +01:00
Zlatin Balevsky
f72cf91462 wait for files to be loaded before sharing watched directories 2019-06-25 22:24:32 +01:00
Zlatin Balevsky
a655c4ef50 add toString 2019-06-25 22:24:15 +01:00
Zlatin Balevsky
5d46e9b796 switch 4_ to INFO 2019-06-25 21:50:15 +01:00
Zlatin Balevsky
642e6e67b3 wait for all files loaded before watching dirs 2019-06-25 21:43:07 +01:00
Zlatin Balevsky
2b6b86f903 show how many pieces the remote side already has 2019-06-25 17:44:05 +01:00
Zlatin Balevsky
f2706a4426 clarify upload column 2019-06-25 17:24:42 +01:00
Zlatin Balevsky
1af75413aa update for brackets 2019-06-25 16:27:02 +01:00
Zlatin Balevsky
adc4077b1a filter asterix 2019-06-25 15:54:30 +01:00
Zlatin Balevsky
01f4e2453b limit search length to 128 characters 2019-06-25 15:53:53 +01:00
Zlatin Balevsky
61267374dd move button around 2019-06-25 08:10:20 +01:00
Zlatin Balevsky
970f814685 make mesh expiration configurable 2019-06-25 08:04:57 +01:00
Zlatin Balevsky
4fd9fc1991 add option to change download location 2019-06-25 07:59:30 +01:00
Zlatin Balevsky
26207ffd1b add constructor 2019-06-25 07:53:24 +01:00
Zlatin Balevsky
2614cfbe5f make host clear interval configurable 2019-06-25 07:41:20 +01:00
Zlatin Balevsky
f11d461ec0 make download sequential ratio a property 2019-06-25 07:34:26 +01:00
Zlatin Balevsky
b2eb2d2755 show hidden files in file choosers 2019-06-24 23:09:20 +01:00
Zlatin Balevsky
ea46a54f19 enable AA by default 2019-06-24 22:55:26 +01:00
Zlatin Balevsky
627add45ad remove griffon icons 2019-06-24 22:51:43 +01:00
Zlatin Balevsky
d364855459 logo 2019-06-24 22:13:03 +01:00
Zlatin Balevsky
14ee35e77a Release 0.3.9 2019-06-24 18:39:59 +01:00
Zlatin Balevsky
8773eb4ee0 fix piece size calculation 2019-06-24 18:29:00 +01:00
Zlatin Balevsky
51425bbfd9 Release 0.3.8 2019-06-24 07:38:39 +01:00
Zlatin Balevsky
6a4879bc0b always save pieces 2019-06-24 07:29:49 +01:00
Zlatin Balevsky
e7fe56439b persist X-Have, fix flickering bug 2019-06-24 07:20:53 +01:00
Zlatin Balevsky
2886feab4a do not modify the set of available pieces 2019-06-23 17:08:07 +01:00
Zlatin Balevsky
fb91194026 even noisier log 2019-06-23 16:39:38 +01:00
Zlatin Balevsky
4527478b0d even noisier 4_ 2019-06-23 12:42:44 +01:00
Zlatin Balevsky
b0062f146e log roots of download exceptions 2019-06-23 12:10:19 +01:00
Zlatin Balevsky
bf16561170 Release 0.3.7 2019-06-23 11:25:19 +01:00
Zlatin Balevsky
3b23dc29c4 if all sources are expired forget mesh 2019-06-23 11:21:39 +01:00
Zlatin Balevsky
c0645b670e no split on list 2019-06-23 10:50:19 +01:00
Zlatin Balevsky
30613fe530 update todo 2019-06-23 09:56:51 +01:00
Zlatin Balevsky
e7822f6edc expire sources, fix compilation 2019-06-23 09:43:56 +01:00
Zlatin Balevsky
7e5c9ba115 actually save 2019-06-23 09:41:20 +01:00
Zlatin Balevsky
647fa3a481 persist download mesh 2019-06-23 09:38:42 +01:00
Zlatin Balevsky
538eca9297 Release 0.3.6 2019-06-23 08:54:28 +01:00
Zlatin Balevsky
e73a23d4a4 fix space not showing 2019-06-23 08:44:51 +01:00
Zlatin Balevsky
76e41a0383 fix restoring paused downloads 2019-06-23 08:42:45 +01:00
Zlatin Balevsky
7045927666 hide monitor options from gui 2019-06-23 08:02:28 +01:00
Zlatin Balevsky
5fb3086b42 update faq 2019-06-23 07:52:01 +01:00
Zlatin Balevsky
2de18227c1 persist pause state 2019-06-23 07:48:49 +01:00
Zlatin Balevsky
bd12a1de3d pause/resume downloads 2019-06-23 06:59:52 +01:00
Zlatin Balevsky
a3a91050c8 update todo 2019-06-23 01:50:30 +01:00
Zlatin Balevsky
6c1cc28e49 shutdown if connection to I2P router is lost 2019-06-22 17:32:12 +01:00
Zlatin Balevsky
b6e5b54f05 do not show monitor by default 2019-06-22 14:51:26 +01:00
Zlatin Balevsky
a6e559ec67 change some defaults 2019-06-22 06:54:49 +01:00
Zlatin Balevsky
f11badb824 update todo 2019-06-21 22:43:46 +01:00
Zlatin Balevsky
44da44ff6f Release 0.3.5 2019-06-21 22:35:54 +01:00
Zlatin Balevsky
aae3fc29ca add logging.properties with various degree of noisiness 2019-06-21 22:28:57 +01:00
Zlatin Balevsky
c30aa19d8b Merge branch 'download-mesh' 2019-06-21 22:26:17 +01:00
Zlatin Balevsky
c79e8712d0 correctly determine if uploader has requested piece 2019-06-21 20:36:33 +01:00
Zlatin Balevsky
ed12d78a48 clear pieces on cancel 2019-06-21 17:22:55 +01:00
Zlatin Balevsky
d27872cc8b investigate StringIndexOutOfBounds 2019-06-21 16:29:52 +01:00
Zlatin Balevsky
f794c39760 personas not destinations 2019-06-21 16:15:35 +01:00
Zlatin Balevsky
2be9c425f7 compute which pieces are requested 2019-06-21 16:09:57 +01:00
Zlatin Balevsky
ab5fea9216 416 if piece not downloaded 2019-06-21 16:03:20 +01:00
Zlatin Balevsky
d1c8328080 do not send alts if there aren't any 2019-06-21 15:39:00 +01:00
Zlatin Balevsky
89e761f53b write personas on the wire part1 2019-06-21 15:26:18 +01:00
Zlatin Balevsky
40410eba63 fix constructor 2019-06-21 14:57:53 +01:00
Zlatin Balevsky
85466a8e80 fix npe 2019-06-21 14:45:14 +01:00
Zlatin Balevsky
c210af7870 source partial uploads from incompletes file 2019-06-21 14:39:20 +01:00
Zlatin Balevsky
38ff49d28f downloaders get pieces from mesh manager 2019-06-21 14:17:10 +01:00
Zlatin Balevsky
710f9f52a8 send X-Have and X-Alts from uploader 2019-06-21 13:58:21 +01:00
Zlatin Balevsky
1b6eda5a40 skeleton of mesh manager 2019-06-21 13:34:00 +01:00
Zlatin Balevsky
1ee9ccf098 parse X-Have on uploader side 2019-06-21 12:55:25 +01:00
Zlatin Balevsky
0f07562de3 pass new sources to active downloaders 2019-06-21 12:39:16 +01:00
Zlatin Balevsky
6eb1aa07f5 key downloaders by infohash 2019-06-21 12:29:32 +01:00
Zlatin Balevsky
05b02834af parse X-Alt 2019-06-21 12:25:04 +01:00
Zlatin Balevsky
56125f6df8 refactor X-Have decoding logic 2019-06-21 09:32:10 +01:00
Zlatin Balevsky
8f9996848b send X-Have from downloader too 2019-06-21 09:25:28 +01:00
Zlatin Balevsky
dd655ed60f test for re-requesting available pieces 2019-06-21 09:12:42 +01:00
Zlatin Balevsky
8923c6ff7d exclude local results by default 2019-06-21 08:15:20 +01:00
Zlatin Balevsky
807ab22f8e test parsing of X-Have 2019-06-21 06:43:48 +01:00
Zlatin Balevsky
a26ad229ee more tests 2019-06-21 05:56:42 +01:00
Zlatin Balevsky
5504dd2251 tighten conditions 2019-06-21 05:45:11 +01:00
Zlatin Balevsky
f9777d29f4 get existing tests to pass 2019-06-21 05:41:49 +01:00
Zlatin Balevsky
b23226e8c6 wip on parsing X-Have from uploader 2019-06-21 05:30:56 +01:00
Zlatin Balevsky
1249ad29e0 claim pieces from list of available pieces 2019-06-21 04:42:02 +01:00
Zlatin Balevsky
7bb5e5b632 Release 0.3.4 2019-06-20 21:07:50 +01:00
Zlatin Balevsky
b2e43f9765 update split pattern and add unit test 2019-06-20 21:06:39 +01:00
Zlatin Balevsky
2aa73c203a Release 0.3.3 2019-06-20 18:08:02 +01:00
Zlatin Balevsky
18d2b56563 fix indexing 2019-06-20 17:57:36 +01:00
Zlatin Balevsky
a455b4ad6e redirect exceptions in result sender to log 2019-06-20 17:22:59 +01:00
Zlatin Balevsky
761b683a81 Release 0.3.2 2019-06-20 16:04:46 +01:00
Zlatin Balevsky
1d41bcd825 prevent empty tokens in search index 2019-06-20 16:02:48 +01:00
Zlatin Balevsky
f1ac038b55 update split pattern 2019-06-20 15:47:00 +01:00
Zlatin Balevsky
396c636e42 prevent empty search terms 2019-06-20 15:29:27 +01:00
Zlatin Balevsky
e32c858e90 update README with quick FAQ 2019-06-20 14:18:37 +01:00
Zlatin Balevsky
821555f3f1 Release 0.3.1 2019-06-20 14:02:22 +01:00
Zlatin Balevsky
089ab4f0d9 do not retry downloads if core is shut(ting) down 2019-06-20 13:40:04 +01:00
Zlatin Balevsky
948b6292fe add shutdown hook to shutdown core on SIGTERM 2019-06-20 13:29:15 +01:00
Zlatin Balevsky
4e2a530a13 Release 0.3.0 2019-06-20 07:04:45 +01:00
Zlatin Balevsky
03646e2b90 Document download mesh 2019-06-20 01:19:15 +01:00
Zlatin Balevsky
3dce228bbb always clean 2019-06-19 22:42:05 +01:00
Zlatin Balevsky
15a49ad550 show git revision in title 2019-06-19 22:36:22 +01:00
Zlatin Balevsky
3d91c0f4c7 increase default tunnel count 2019-06-19 22:24:04 +01:00
Zlatin Balevsky
2825a8d9a4 Release 0.2.10 2019-06-19 17:18:30 +01:00
Zlatin Balevsky
8dcce9bda6 Merge branch 'connection-logic' 2019-06-19 17:16:13 +01:00
Zlatin Balevsky
d8d3e2cd58 update tests 2019-06-19 15:54:35 +01:00
Zlatin Balevsky
51d5dbe47e Prevent rare exception on changing trust when result tabs are open 2019-06-19 12:23:18 +01:00
Zlatin Balevsky
84cee0aa43 retry failed hosts after one hour 2019-06-19 08:35:31 +01:00
Zlatin Balevsky
162844787f explicitly set java versions 2019-06-19 02:11:00 +01:00
Zlatin Balevsky
d8a2b59055 tool to print out contents of files.json 2019-06-18 22:08:33 +01:00
Zlatin Balevsky
67a0939de4 Release 0.2.9 2019-06-18 20:15:53 +01:00
Zlatin Balevsky
37ca922a2c reduce default retry interval 2019-06-18 20:07:20 +01:00
Zlatin Balevsky
1d6781819b ignore CWSE if shutting down 2019-06-18 19:44:22 +01:00
Zlatin Balevsky
64d45da94a show version on title 2019-06-18 18:57:44 +01:00
Zlatin Balevsky
59c84d8a5e Release 0.2.8 2019-06-18 17:48:07 +01:00
Zlatin Balevsky
8b55021a4b fix 2019-06-18 17:23:18 +01:00
Zlatin Balevsky
8bd3ebfaf5 timestamp entries 2019-06-18 17:17:03 +01:00
Zlatin Balevsky
526ec45da3 Release 0.2.7 2019-06-18 15:53:54 +01:00
Zlatin Balevsky
deb7c0b4b0 exclude files present locally from search results 2019-06-18 15:45:27 +01:00
Zlatin Balevsky
e85a0c7b2c Merge branch 'source-tracking' 2019-06-18 12:22:46 +01:00
Zlatin Balevsky
7b021a47eb fix detection of moving files into a watched dir on Linux 2019-06-18 12:20:10 +01:00
Zlatin Balevsky
0c21d4d6c1 implement source tracking 2019-06-18 11:34:19 +01:00
Zlatin Balevsky
8e9f79d404 update TODO 2019-06-18 09:43:22 +01:00
Zlatin Balevsky
bf33a6ff61 Release 0.2.6 2019-06-18 09:07:27 +01:00
Zlatin Balevsky
19c8d84afd Merge branch 'file-monitor' 2019-06-18 09:01:09 +01:00
Zlatin Balevsky
6a40787863 fine log 2019-06-18 05:46:16 +01:00
Zlatin Balevsky
c698cbd737 register created directories recursively 2019-06-18 05:43:41 +01:00
Zlatin Balevsky
9c049b9301 special case mac 2019-06-18 05:26:41 +01:00
Zlatin Balevsky
84a9bb9482 watch deleting of files 2019-06-18 04:15:44 +01:00
Zlatin Balevsky
0c1008d6b3 update readme 2019-06-18 04:01:04 +01:00
Zlatin Balevsky
c46f1b1ccd delay processing of files until after 1 second after the last MODIFY event 2019-06-17 23:08:16 +01:00
Zlatin Balevsky
7e2c4d48c6 wait for UI to load before loading files 2019-06-17 22:34:19 +01:00
Zlatin Balevsky
71a919e62b shut down watcher before connection manager 2019-06-17 22:15:50 +01:00
Zlatin Balevsky
d5eb65bdc2 do not print stacktrace on clean shutdown 2019-06-17 21:58:44 +01:00
Zlatin Balevsky
aef7533bd5 make watcher thread daemon 2019-06-17 19:58:57 +01:00
Zlatin Balevsky
e78016ead4 ui panel for managing watched directories 2019-06-17 19:23:04 +01:00
Zlatin Balevsky
52ced669dd basic watching of directories 2019-06-17 16:36:12 +01:00
Zlatin Balevsky
b52fb38ede fix disabling of buttons on search tab close 2019-06-17 13:43:11 +01:00
Zlatin Balevsky
5dcef3ca05 Release 0.2.5 2019-06-17 12:53:58 +01:00
Zlatin Balevsky
eaa0e46ce5 Merge branch 'separate-incomplete-files' 2019-06-17 12:45:51 +01:00
Zlatin Balevsky
c4f48c02b6 delete incomplete file on cancel 2019-06-17 12:33:44 +01:00
Zlatin Balevsky
5c16335969 if no row is selected do not enable buttons 2019-06-17 12:26:28 +01:00
Zlatin Balevsky
546eb4e9d3 only allow one download per infohash from gui 2019-06-17 11:25:21 +01:00
Zlatin Balevsky
c3d9e852ba separate incomplete files 2019-06-17 07:49:06 +01:00
Zlatin Balevsky
0db7077a45 Release 0.2.4 2019-06-17 03:22:52 +01:00
Zlatin Balevsky
614ecc85fe new piece selection logic to avoid high cpu bug 2019-06-17 03:21:37 +01:00
Zlatin Balevsky
af66a79376 fix sorting by progress 2019-06-17 00:56:16 +01:00
Zlatin Balevsky
465171c81d prevent multiple identical shared files 2019-06-17 00:38:05 +01:00
Zlatin Balevsky
b507361c58 close the file before marking pieces complete 2019-06-16 23:45:23 +01:00
Zlatin Balevsky
4d001ae74b thread-safe access to the pieces file 2019-06-16 22:56:09 +01:00
Zlatin Balevsky
36a6e2769f Release 0.2.3 2019-06-16 19:05:12 +01:00
Zlatin Balevsky
69eeb7d77a fix 2019-06-16 18:58:52 +01:00
Zlatin Balevsky
551982b72a batch results sent to the GUI to prevent freeze 2019-06-16 18:51:07 +01:00
Zlatin Balevsky
8d808f0b8f Release 0.2.2 2019-06-16 13:30:11 +01:00
Zlatin Balevsky
7833a83c87 mark hash queries for V2 results 2019-06-16 13:17:32 +01:00
Zlatin Balevsky
3160c1a8f3 fix for silent uploader exceptions 2019-06-16 13:01:14 +01:00
Zlatin Balevsky
e295aa67d5 proper log statement 2019-06-16 10:59:11 +01:00
Zlatin Balevsky
a9f5625dc3 fix popup menu on failed downloads 2019-06-16 10:50:21 +01:00
Zlatin Balevsky
cc0af5b9ed add context menu to downloads table 2019-06-16 10:29:28 +01:00
Zlatin Balevsky
041fc3bef3 Release 0.2.1 2019-06-16 09:37:53 +01:00
Zlatin Balevsky
03c3b1ebf1 fix copying of hash if search results are sorted 2019-06-16 09:30:52 +01:00
Zlatin Balevsky
aece390daa right-click menu on the search results tab 2019-06-16 09:17:17 +01:00
Zlatin Balevsky
cf63be68e8 copy search to clipboard 2019-06-16 08:38:47 +01:00
Zlatin Balevsky
88ece4dc23 add option to show search hashes in monitor 2019-06-16 08:29:03 +01:00
Zlatin Balevsky
13767d58f2 detect if a query is hash, get rid of radio buttons 2019-06-16 08:09:51 +01:00
Zlatin Balevsky
05a1ccd3d8 update todo 2019-06-16 07:31:01 +01:00
Zlatin Balevsky
6807c14a5f add copy hash to clipboard 2019-06-16 07:23:22 +01:00
Zlatin Balevsky
684be0c50e start of work on directory watcher 2019-06-16 07:03:16 +01:00
Zlatin Balevsky
6655c262c6 more todo items 2019-06-16 07:01:50 +01:00
Zlatin Balevsky
b1ccd55030 more todo items 2019-06-16 06:26:03 +01:00
Zlatin Balevsky
a3becd0f7e update TODO 2019-06-16 06:19:28 +01:00
Zlatin Balevsky
af2f3e0ebf in/out direction done 2019-06-16 05:56:56 +01:00
Zlatin Balevsky
e2b7ffa1db direction in monitor tab 2019-06-16 05:52:23 +01:00
Zlatin Balevsky
0e0176acfc add web UI to TODO list 2019-06-16 05:35:05 +01:00
Zlatin Balevsky
7f09bb079c Beginnings of a TODO list 2019-06-16 05:28:42 +01:00
Zlatin Balevsky
77e48b01bb Release 0.2.0 2019-06-15 21:10:11 +01:00
Zlatin Balevsky
12db6857c1 disable unshare files popup until implemented 2019-06-15 12:12:08 +01:00
Zlatin Balevsky
acd67733a5 sort the downloads table on updates 2019-06-15 12:08:29 +01:00
Zlatin Balevsky
8d3ce7aa8e use the same sorted row selection logic in downloads table 2019-06-15 09:57:12 +01:00
Zlatin Balevsky
0eb5870e9b Release 0.1.13 2019-06-15 09:19:19 +01:00
Zlatin Balevsky
051efbfaba prevent empty searches 2019-06-15 09:11:42 +01:00
Zlatin Balevsky
6b38d7bffb fix sorting bug try 2 2019-06-15 08:58:51 +01:00
Zlatin Balevsky
5778d537ce Release 0.1.12 2019-06-15 08:39:19 +01:00
Zlatin Balevsky
93664a7985 update readme 2019-06-15 08:37:29 +01:00
Zlatin Balevsky
edd58e0c90 allow cancelling of downloads while hashlist is being fetched 2019-06-15 08:35:23 +01:00
Zlatin Balevsky
9ac52b61dc sort results table on update 2019-06-15 08:33:22 +01:00
Zlatin Balevsky
0a4b9c7029 shut down connection manager last 2019-06-15 08:20:10 +01:00
Zlatin Balevsky
87b366a205 add ability to cancel failed downloads 2019-06-14 22:49:56 +01:00
Zlatin Balevsky
040248560a Release 0.1.11 2019-06-14 22:26:28 +01:00
Zlatin Balevsky
77caaf83de reset instead of close 2019-06-14 22:08:25 +01:00
Zlatin Balevsky
cc5ece5103 do not throw exception on shutdown 2019-06-14 21:36:50 +01:00
Zlatin Balevsky
db7e21e343 close connections in parallel, more shutdown fixes 2019-06-14 21:25:22 +01:00
Zlatin Balevsky
a388eaec1d shutdown all connections on shutdown 2019-06-14 20:53:54 +01:00
Zlatin Balevsky
8ff39072c7 download file on double-clicking a result 2019-06-14 20:42:26 +01:00
Zlatin Balevsky
55d2ac9b24 delete partial files and pieces file on cancel 2019-06-14 20:27:14 +01:00
Zlatin Balevsky
6ebe492fd8 if nothing is enabled cancel and retry buttons are disabled 2019-06-14 18:37:18 +01:00
Zlatin Balevsky
165cd542ec work around not having a selected row while cancelling a download 2019-06-14 18:28:00 +01:00
Zlatin Balevsky
5ca0c8b00d wip on unshare selected files popup menu 2019-06-14 18:08:56 +01:00
Zlatin Balevsky
b6a38e3f23 revert to default lnf if the desired one fails 2019-06-14 18:01:14 +01:00
Zlatin Balevsky
34d9165bd5 Release 0.1.10 2019-06-14 16:43:28 +01:00
Zlatin Balevsky
2e52dd5c49 fix overwriting of custom nickname 2019-06-14 16:20:21 +01:00
Zlatin Balevsky
2a315dd734 add option to exclude local results from searches 2019-06-14 14:48:01 +01:00
Zlatin Balevsky
6b661b99c5 fix sorting by size in shared files table 2019-06-14 13:47:35 +01:00
Zlatin Balevsky
5dacd60bbb hook up cleaning up of cancelled/finished downloads 2019-06-14 13:11:20 +01:00
Zlatin Balevsky
f8f7cfe836 UI options panel 2019-06-14 12:51:27 +01:00
Zlatin Balevsky
0b4f261bc1 ability to not show monitor panel 2019-06-14 12:21:14 +01:00
Zlatin Balevsky
042d67d784 fix selection of size column 2019-06-14 11:46:31 +01:00
Zlatin Balevsky
800df88f14 proper sorting by size 2019-06-14 11:10:19 +01:00
Zlatin Balevsky
4d1eac50a0 update readme for sorting bug 2019-06-14 10:39:58 +01:00
Zlatin Balevsky
c48df7f14b Release 0.1.9 2019-06-13 22:57:08 +01:00
Zlatin Balevsky
9d04148001 remember loaded downloads from previous sessions 2019-06-13 22:53:23 +01:00
Zlatin Balevsky
bb4d522572 Release 0.1.8 2019-06-13 15:27:06 +01:00
Zlatin Balevsky
8052501e52 increase persistence interval to 15 seconds 2019-06-13 15:25:30 +01:00
Zlatin Balevsky
66cc6d8ab7 reduce piece size by factor of 8 2019-06-13 15:24:26 +01:00
Zlatin Balevsky
a45e57f5ec Release 0.1.7 2019-06-13 10:28:44 +01:00
Zlatin Balevsky
7d8ca55d87 fix emiting of download finished event 2019-06-13 10:27:18 +01:00
Zlatin Balevsky
de22f3c6b9 use metal lnf on java 9 or newer 2019-06-13 05:02:11 +01:00
Zlatin Balevsky
3b0eb5678d update wire protocol 2019-06-12 23:46:48 +01:00
Zlatin Balevsky
5a1f32e40b Release 0.1.6 2019-06-12 22:42:34 +01:00
Zlatin Balevsky
ca3f2513e1 sync persisting of hashlist or hashroot for active downloads 2019-06-12 22:39:00 +01:00
Zlatin Balevsky
658d9cf5a8 serialize downloads that do not have a hashlist 2019-06-12 22:22:20 +01:00
Zlatin Balevsky
e389090b7e download side of oob hashlist 2019-06-12 22:13:16 +01:00
Zlatin Balevsky
04ceaba514 do not persist downloaders until they have a hashlist 2019-06-12 21:02:01 +01:00
Zlatin Balevsky
6a01d97a8d enable oob infohash in queries; send V2 search results 2019-06-12 20:55:13 +01:00
Zlatin Balevsky
747663e1dc fix pieece size of shared downloaded files 2019-06-12 18:22:53 +01:00
Zlatin Balevsky
e426b3ccbd refactoring to enable hashlist uploads 2019-06-12 17:33:43 +01:00
Zlatin Balevsky
5172e19627 font-ize more elements 2019-06-12 16:34:24 +01:00
Zlatin Balevsky
e826cfd8d5 start work on ability to configure font 2019-06-12 16:26:40 +01:00
Zlatin Balevsky
51004f6fe9 wip on adding UI options 2019-06-11 08:04:26 +01:00
Zlatin Balevsky
08bb2b614d load some gui props from a separate config file 2019-06-11 02:17:58 +01:00
Zlatin Balevsky
d0e5d0ce8a set default i2cp options if none present 2019-06-10 08:55:44 +01:00
Zlatin Balevsky
9e05802d1b Merge pull request #4 from mikalv/master
Fixes i2cp bug while connecting to remote router
2019-06-10 08:48:27 +01:00
Mikal Villa
fb4f56eec9 Remove debug message 2019-06-10 09:40:32 +02:00
Mikal Villa
be2083d430 Fixes i2cp bug while connecting to remote router 2019-06-10 09:39:46 +02:00
Zlatin Balevsky
af6275d0a3 prevent Cli from hanging if there are no shared files 2019-06-10 07:04:01 +01:00
Zlatin Balevsky
5269815329 update readme 2019-06-10 04:49:09 +01:00
Zlatin Balevsky
bd21cf65ea Release 0.1.5 2019-06-09 20:37:39 +01:00
Zlatin Balevsky
dea592eb27 do not resume cancelled downloads on restart 2019-06-09 20:36:14 +01:00
Zlatin Balevsky
c81f963e0a Release 0.1.4 2019-06-09 17:37:10 +01:00
Zlatin Balevsky
dc6b1199f3 implement resume across restart 2019-06-09 17:35:32 +01:00
Zlatin Balevsky
42621a2dfb wip on persisting downloads between restarts 2019-06-09 16:26:00 +01:00
Zlatin Balevsky
a7125963a7 DownloadManager listens to events, not FileManager 2019-06-09 16:19:35 +01:00
Zlatin Balevsky
f39d7f4fa8 emit an event when the UI loads 2019-06-09 15:44:06 +01:00
Zlatin Balevsky
b88334f19a Release 0.1.3 for sorting fixes 2019-06-08 17:57:36 +01:00
Zlatin Balevsky
81e186ad1f fix sorting by download status and trust, fix events on downloads table 2019-06-08 17:55:39 +01:00
Zlatin Balevsky
33a45c3835 fix buttons when tables are sorted 2019-06-08 17:09:44 +01:00
Zlatin Balevsky
32b7867e44 Release 0.1.2 for search index test 2019-06-08 13:09:28 +01:00
Zlatin Balevsky
5b313276f4 fix tests broken by piece size change 2019-06-08 13:08:20 +01:00
Zlatin Balevsky
abba4cc6fa fix a bug where multi-term search modifies the index 2019-06-08 12:55:47 +01:00
Zlatin Balevsky
15b4804968 update wire protocol with originator and oobHashlist fields 2019-06-08 12:40:38 +01:00
Zlatin Balevsky
942a01a501 forgot to commit 2019-06-08 09:33:16 +01:00
Zlatin Balevsky
502a8d91da print only the root 2019-06-08 09:30:01 +01:00
Zlatin Balevsky
5414e8679b update readme 2019-06-08 09:07:13 +01:00
Zlatin Balevsky
14e42dd7c2 correct element 2019-06-08 08:46:28 +01:00
Zlatin Balevsky
1299fb2512 Release 0.1.1 for fixes and reduced piece size 2019-06-08 08:04:35 +01:00
Zlatin Balevsky
9bafdfe0b1 reduce piece size 2019-06-08 07:57:36 +01:00
Zlatin Balevsky
36eb632756 do not set the flag until it is implemented 2019-06-08 07:53:33 +01:00
Zlatin Balevsky
83ee620402 sort by columns 2019-06-08 07:45:07 +01:00
Zlatin Balevsky
3fe40d317d update readme for custom host:port 2019-06-08 07:28:23 +01:00
Zlatin Balevsky
e9703a2652 support for custom i2cp host:port 2019-06-08 07:23:14 +01:00
Zlatin Balevsky
a3fe89851f OS-specific home dir 2019-06-08 07:10:24 +01:00
Zlatin Balevsky
b9ea0128cd add oobInfohash flag, filter results by that flag 2019-06-08 02:44:49 +01:00
Zlatin Balevsky
53c6db4ec8 de-hardcode piece sizes in results 2019-06-08 01:48:07 +01:00
Zlatin Balevsky
60776829b9 fix disabling sharing of downloaded files 2019-06-08 01:35:03 +01:00
Zlatin Balevsky
b5cb31c23d proposed infohash upgrade document 2019-06-08 01:04:56 +01:00
Zlatin Balevsky
5052c0c993 note about downloads in progress 2019-06-07 21:52:38 +01:00
Zlatin Balevsky
06de007866 update readme 2019-06-07 21:22:49 +01:00
Zlatin Balevsky
7c8a0c9ad9 update readme for 0.1.0 2019-06-07 19:24:13 +01:00
Zlatin Balevsky
cda81a89a2 Release 0.1.0 2019-06-07 18:39:39 +01:00
Zlatin Balevsky
483773422c fix remaining tests 2019-06-07 18:23:16 +01:00
Zlatin Balevsky
1e1e6d0bb0 fix test 2019-06-07 18:17:16 +01:00
Zlatin Balevsky
668d6e087d fix test 2019-06-07 18:15:03 +01:00
Zlatin Balevsky
49af412b96 status update and auto-retry 2019-06-07 16:13:35 +01:00
Zlatin Balevsky
d5513021ed Release 0.0.14 for split search 2019-06-07 15:00:16 +01:00
Zlatin Balevsky
c3154cf717 stray println 2019-06-07 14:58:03 +01:00
Zlatin Balevsky
114940c4c1 fix searches with spaces 2019-06-07 14:51:09 +01:00
Zlatin Balevsky
d4336e9b5d outbound nickname 2019-06-07 14:24:45 +01:00
Zlatin Balevsky
2c1d5508ed outbound nickname 2019-06-07 14:21:03 +01:00
Zlatin Balevsky
1cebf6c7bd cli downloader 2019-06-07 14:02:10 +01:00
Zlatin Balevsky
e12924a207 shadow jar for cli 2019-06-07 14:01:28 +01:00
Zlatin Balevsky
f3b11895e4 utility for hashing files 2019-06-07 12:10:18 +01:00
Zlatin Balevsky
1e084820fb log tweak 2019-06-07 11:55:17 +01:00
Zlatin Balevsky
2198b4846d change wording 2019-06-07 11:43:02 +01:00
Zlatin Balevsky
a5d442d320 Release 0.0.13 for keyword search fix 2019-06-07 06:37:23 +01:00
Zlatin Balevsky
3f9ee887d6 prevent NPE in toString 2019-06-07 06:31:29 +01:00
Zlatin Balevsky
4a9e6d3b6b prevent npe in keyword searches 2019-06-07 06:14:40 +01:00
Zlatin Balevsky
80f2cc5f99 logging and toString() 2019-06-07 06:07:02 +01:00
Zlatin Balevsky
12283dba9d Release 0.0.12 for search by hash 2019-06-06 22:22:43 +01:00
Zlatin Balevsky
5c959bc8b7 name update search tab 2019-06-06 22:07:20 +01:00
Zlatin Balevsky
f3712fe7af delay initial update check a minute 2019-06-06 21:52:35 +01:00
Zlatin Balevsky
3e49b0ec66 infohash may be null 2019-06-06 21:40:44 +01:00
Zlatin Balevsky
f90beb8e3d encode infohash 2019-06-06 21:31:00 +01:00
Zlatin Balevsky
fbad7b6c7e searchHash 2019-06-06 21:27:07 +01:00
Zlatin Balevsky
ec2d89c18c serialize infohash 2019-06-06 21:21:40 +01:00
Zlatin Balevsky
c27fc0a515 update from infohash 2019-06-06 21:08:58 +01:00
Zlatin Balevsky
14681c2060 search by hash ui 2019-06-06 20:30:15 +01:00
Zlatin Balevsky
1aeb230ea8 catch exceptions in event dispatch thread 2019-06-06 19:31:10 +01:00
Zlatin Balevsky
d1dfc73f5a decode infohash 2019-06-06 19:28:29 +01:00
Zlatin Balevsky
0cebe4119c update list of limitations 2019-06-06 14:19:43 +01:00
Zlatin Balevsky
9f21120ec8 print periodic stats 2019-06-06 13:59:05 +01:00
Zlatin Balevsky
7eea8be67d Release 0.0.11 for file loading bug 2019-06-06 09:22:16 +01:00
Zlatin Balevsky
f114302bdb hopefully fix the shared file loss 2019-06-06 09:19:00 +01:00
Zlatin Balevsky
05b9b37488 emit an event when all files are loaded 2019-06-06 09:10:09 +01:00
Zlatin Balevsky
52f317a5b7 prevent division by zero 2019-06-06 07:09:54 +01:00
Zlatin Balevsky
fb8227a1f3 prevent division by zero 2019-06-06 07:09:05 +01:00
Zlatin Balevsky
5677d9f46a release 0.0.10 2019-06-06 00:23:59 +01:00
Zlatin Balevsky
c5192e3845 update readme for fix 2019-06-06 00:21:41 +01:00
Zlatin Balevsky
43c2a55cb8 0 not null 2019-06-06 00:03:22 +01:00
Zlatin Balevsky
94f6de6bea do not create new objects because that clears the successes 2019-06-05 21:07:23 +01:00
Zlatin Balevsky
6782849a12 retry hosts received from hostcache even if marked as failed 2019-06-05 20:58:28 +01:00
Zlatin Balevsky
c07d351c5d switch to jul, reduce aging interval 2019-06-05 20:14:38 +01:00
Zlatin Balevsky
dc2f675dd3 delete pieces file when download finishes 2019-06-05 19:52:50 +01:00
Zlatin Balevsky
a8e795ec51 do not accept connections if already try to connect to them 2019-06-05 19:07:36 +01:00
Zlatin Balevsky
33c5b3b18e option to disable sharing of downloaded files 2019-06-05 17:46:55 +01:00
Zlatin Balevsky
581fce4643 share downloaded files 2019-06-05 17:33:34 +01:00
Zlatin Balevsky
7fe78a0719 more clear name 2019-06-05 16:47:10 +01:00
Zlatin Balevsky
cdb6e22522 ui option for allowing untrusted connections 2019-06-05 15:47:44 +01:00
Zlatin Balevsky
2edeb046be drop neutral queries if configured 2019-06-05 15:38:39 +01:00
Zlatin Balevsky
4021f3c244 fix jullog 2019-06-05 13:04:46 +01:00
Zlatin Balevsky
9008fac24d shutdown cleanly on exit 2019-06-05 12:38:56 +01:00
Zlatin Balevsky
e2f92c5c5e print reported version 2019-06-05 10:07:04 +01:00
Zlatin Balevsky
7b33a16fd8 update list of known issues 2019-06-05 09:22:56 +01:00
Zlatin Balevsky
9a2531b264 release 0.0.9 2019-06-05 09:04:52 +01:00
Zlatin Balevsky
9a8dadff57 center the sources column 2019-06-05 08:43:58 +01:00
Zlatin Balevsky
4a274010f9 fix close tab button not appearing on duplicate searches 2019-06-05 08:34:09 +01:00
Zlatin Balevsky
1eb930435b fix hashing errors in large files 2019-06-05 00:34:38 +01:00
Zlatin Balevsky
9df28552ad try to load persisted files before hashing new ones 2019-06-05 00:22:36 +01:00
Zlatin Balevsky
ac0204dffc hopefully more accurate bandwidth gauge 2019-06-04 23:50:36 +01:00
Zlatin Balevsky
e5c402a400 retry download workers on resume 2019-06-04 23:36:57 +01:00
Zlatin Balevsky
7704c73b68 pass logging.properties to cli 2019-06-04 22:19:19 +01:00
Zlatin Balevsky
a9aa8dd840 do not count finished downloaders towards bandwidth 2019-06-04 21:55:59 +01:00
Zlatin Balevsky
de682a802a options panel for i2p tunnel options 2019-06-04 21:14:23 +01:00
Zlatin Balevsky
5435518212 core-side i2cp options 2019-06-04 20:20:25 +01:00
Zlatin Balevsky
bd01f983c9 break html in search results 2019-06-04 19:27:22 +01:00
Zlatin Balevsky
8b63864b90 utility to share files in headless mode 2019-06-04 18:58:02 +01:00
Zlatin Balevsky
ed3943c1af 0.0.8 for UI tweaks and sanitization 2019-06-04 18:01:08 +01:00
Zlatin Balevsky
e195141a27 simpler sanitization 2019-06-04 17:58:19 +01:00
Zlatin Balevsky
bb02fdbee9 do not use regex in sanitization 2019-06-04 17:46:41 +01:00
Zlatin Balevsky
6e3a2c0d08 update split pattern 2019-06-04 17:30:55 +01:00
Zlatin Balevsky
bd5fecc19d fix 2019-06-04 17:04:24 +01:00
Zlatin Balevsky
d5db49fa79 initialize core 2019-06-04 16:56:58 +01:00
Zlatin Balevsky
f2ea8619bb CLI project 2019-06-04 16:46:32 +01:00
Zlatin Balevsky
b129e79196 do not count finished workers in total count 2019-06-04 16:22:48 +01:00
Zlatin Balevsky
404d5b60bc format length in shared file stable an resize columns 2019-06-04 14:05:33 +01:00
Zlatin Balevsky
de2753ac50 preferred sizes for download table columns 2019-06-04 13:35:18 +01:00
Zlatin Balevsky
2d53999c8e only show download speed if downloading 2019-06-04 13:23:48 +01:00
Zlatin Balevsky
5aecf72d6f format download speed 2019-06-04 13:19:14 +01:00
Zlatin Balevsky
a574a67ec6 format file size 2019-06-04 13:15:24 +01:00
Zlatin Balevsky
6b5ad969b7 pass logging properties 2019-06-04 13:00:10 +01:00
Zlatin Balevsky
617209c4e4 column widths tweaks 2019-06-04 12:46:48 +01:00
Zlatin Balevsky
16b475bd9a 0.0.7 for multi-source downloads 2019-06-04 04:17:29 +01:00
Zlatin Balevsky
3cea1870cd multisource downloads, untested 2019-06-04 03:30:55 +01:00
Zlatin Balevsky
e7240dcb6f keep track of claimed pieces in preparation for multi-source downloads 2019-06-04 02:18:30 +01:00
Zlatin Balevsky
c91440cbfc config option for update check interval 2019-06-03 23:30:39 +01:00
Zlatin Balevsky
294605f5c7 basic update notification 2019-06-03 23:23:07 +01:00
Zlatin Balevsky
986caf3a75 backend for checking updates 2019-06-03 23:11:03 +01:00
Zlatin Balevsky
8524d5309f typo 2019-06-03 21:53:51 +01:00
Zlatin Balevsky
48b3ac2b4a wip on update server 2019-06-03 21:50:46 +01:00
Zlatin Balevsky
18f21dc247 update server 2019-06-03 21:47:31 +01:00
Zlatin Balevsky
e69a5eac18 0.0.6 2019-06-03 18:30:27 +01:00
Zlatin Balevsky
6e0f1778b7 rudimentary speed gauge 2019-06-03 18:02:10 +01:00
Zlatin Balevsky
abbb741d73 show the number of sources for a result, counted by infohash 2019-06-03 17:21:08 +01:00
Zlatin Balevsky
07dfc0a1d1 destroy mvc group on options window close 2019-06-03 15:33:16 +01:00
Zlatin Balevsky
00c12cfd49 hook up download retry logic 2019-06-03 15:02:04 +01:00
Zlatin Balevsky
1ee389ff91 options dialog 2019-06-03 14:40:32 +01:00
Zlatin Balevsky
3642736cfe options dialog, wip 2019-06-03 11:32:34 +01:00
Zlatin Balevsky
b6f7f51476 verify X-Persona header if present 2019-06-03 08:12:33 +01:00
Zlatin Balevsky
4c21f2d5ae show full persona in searches 2019-06-03 08:06:51 +01:00
Zlatin Balevsky
9e0d52d548 show source in incoming searches 2019-06-03 07:43:28 +01:00
Zlatin Balevsky
fad01603de fix replyTo field 2019-06-03 07:35:09 +01:00
Zlatin Balevsky
da007795fb learn about new hosts from incoming connections too 2019-06-03 07:27:12 +01:00
Zlatin Balevsky
881d755dd3 update test work with personas 2019-06-02 22:47:43 +01:00
Zlatin Balevsky
bc3b6f500f 0.0.5 for trust panel 2019-06-02 12:18:44 +01:00
Zlatin Balevsky
8f8710801c update any result tabs on trust events 2019-06-02 12:16:28 +01:00
Zlatin Balevsky
43f3cf9b7a small ui tweak 2019-06-02 12:00:14 +01:00
Zlatin Balevsky
6fe4155678 delete accidental commit 2019-06-02 11:57:15 +01:00
Zlatin Balevsky
32f944a089 trust panel ui 2019-06-02 11:56:19 +01:00
Zlatin Balevsky
b19b5ef315 Fix for java 9+ #1 2019-06-02 10:04:27 +01:00
Zlatin Balevsky
5138935c20 add options for portable installation, issue #2 2019-06-02 09:33:28 +01:00
Zlatin Balevsky
ba596af778 Trust panel, wip 2019-06-02 05:40:44 +01:00
Zlatin Balevsky
0f4533c867 persist personas in trust files instead of destinations 2019-06-02 05:12:14 +01:00
Zlatin Balevsky
727834390c slightly better looking message 2019-06-02 04:18:15 +01:00
Zlatin Balevsky
c51e3874da show a message instead of search bar while disconnected 2019-06-02 04:12:11 +01:00
Zlatin Balevsky
d18a618575 focus on the tab of the new search 2019-06-02 03:54:34 +01:00
Zlatin Balevsky
15508f417d hack to add some horizontal space 2019-06-02 01:33:53 +01:00
Zlatin Balevsky
44dad55178 update test 2019-06-02 01:28:00 +01:00
Zlatin Balevsky
5c17e77190 change groovy version to match griffon 2019-06-02 01:20:55 +01:00
Zlatin Balevsky
de856cd085 canonize search terms 2019-06-02 00:42:18 +01:00
Zlatin Balevsky
d2533cc4d6 retry failed downloads, every 15 minutes by default 2019-06-02 00:22:33 +01:00
Zlatin Balevsky
f41cc39659 show who is downloading 2019-06-01 21:53:14 +01:00
Zlatin Balevsky
656b62fc2e 0.0.4 with download retry 2019-06-01 18:31:36 +01:00
Zlatin Balevsky
13b3f0f63b retry implemented 2019-06-01 18:30:30 +01:00
Zlatin Balevsky
98ea8154a5 store done pieces on disk to enable resume 2019-06-01 18:09:14 +01:00
Zlatin Balevsky
82377aa9df hook up cancel button 2019-06-01 17:44:52 +01:00
Zlatin Balevsky
bd2368e23a cancelled downloader state 2019-06-01 17:31:18 +01:00
Zlatin Balevsky
70078c309b add cancel and retry buttons, not hooked up yet 2019-06-01 17:30:29 +01:00
Zlatin Balevsky
15a0eda713 preserve selection in downloads table 2019-06-01 17:09:23 +01:00
Zlatin Balevsky
9645716e18 prevent rare stacktraces on shutdown 2019-06-01 16:55:37 +01:00
Zlatin Balevsky
03d6af39ed icon for closing tabs 2019-06-01 16:43:05 +01:00
Zlatin Balevsky
9435cb003b Show warning if cannot find I2P router 2019-06-01 16:36:23 +01:00
Zlatin Balevsky
63399803d5 ui tweaks 2019-06-01 15:59:55 +01:00
Zlatin Balevsky
4d6541030f disable system l&f on osx 2019-06-01 14:55:17 +01:00
Zlatin Balevsky
16c51e7cd6 add a failed download state 2019-06-01 14:14:20 +01:00
Zlatin Balevsky
9d75550b6f do not show local searches in monitor 2019-06-01 13:48:12 +01:00
Zlatin Balevsky
1996681677 incoming searches monitor 2019-06-01 13:44:46 +01:00
Zlatin Balevsky
9dac1891b2 connection monitor 2019-06-01 13:32:40 +01:00
Zlatin Balevsky
1255ac936b close connections on shutdown 2019-06-01 13:04:22 +01:00
Zlatin Balevsky
2db3276b07 fix rare NPE on shutdown 2019-06-01 13:03:42 +01:00
Zlatin Balevsky
7e3b0795af disable buttons if no row is selected 2019-06-01 12:23:20 +01:00
197 changed files with 10777 additions and 4406 deletions

View File

@@ -1,39 +1,35 @@
# MuWire - Easy Anonymous File-Sharing # MuWire - Easy Anonymous File-Sharing
MuWire is an easy to use file-sharing program which offers anonymity using [I2P technology](http://geti2p.net). MuWire is an easy to use file-sharing program which offers anonymity using [I2P technology](http://geti2p.net). It works on any platform Java works on, including Windows,MacOS,Linux.
It is inspired by the LimeWire Gnutella client and developped by a former LimeWire developer. It is inspired by the LimeWire Gnutella client and developped by a former LimeWire developer.
The project is in development. You can find technical documentation in the "doc" folder. The current stable release - 0.4.6 is avaiable for download at https://muwire.com. You can find technical documentation in the "doc" folder.
### Building ### Building
You need JDK 8 or newer. After installing that and setting up the appropriate paths, just type You need JRE 8 or newer. After installing that and setting up the appropriate paths, just type
``` ```
./gradlew assemble ./gradlew clean assemble
``` ```
If you want to run the unit tests, type If you want to run the unit tests, type
``` ```
./gradlew build ./gradlew clean build
``` ```
Some of the UI tests will fail because they haven't been written yet :-/ Some of the UI tests will fail because they haven't been written yet :-/
### Running ### Running
You need to have an I2P router up and running on the same machine. After you build the application, look inside "gui/build/distributions". Untar/unzip one of the "shadow" files and then run the jar contained inside. After you build the application, look inside `gui/build/distributions`. Untar/unzip one of the `shadow` files and then run the jar contained inside by typing `java -jar MuWire-x.y.z.jar` in a terminal or command prompt.
The first time you run MuWire it will ask you to select a nickname. This nickname will be displayed with search results, so that others can verify the file was shared by you. If you have an I2P router running on the same machine that is all you need to do. If you use a custom I2CP host and port, create a file `i2p.properties` and put `i2cp.tcp.host=<host>` and `i2cp.tcp.port=<port>` in there. On Windows that file should go into `%HOME%\AppData\Roaming\MuWire`, on Mac into `$HOME/Library/Application Support/MuWire` and on Linux `$HOME/.MuWire`
At the moment there are very few nodes on the network, so you will see very few connections and search results. It is best to leave MuWire running all the time, just like I2P. If you do not have an I2P router, pass the following switch to the Java process: `-DembeddedRouter=true`. This will launch MuWire's embedded router. Be aware that this causes startup to take a lot longer.
### Known bugs and limitations
* Any shared files get re-hashed on startup
* Sometimes the list of shared files gets lost
* Many UI features you would expect are not there yet
### GPG Fingerprint
471B 9FD4 5517 A5ED 101F C57D A728 3207 2D52 5E41
You can find the full key at https://keybase.io/zlatinb

31
TODO.md Normal file
View File

@@ -0,0 +1,31 @@
# TODO List
Not in any particular order yet
### Big Items
##### Bloom Filters
This reduces query traffic by not sending last hop queries to peers that definitely do not have the file
##### Two-tier Topology
This helps with scalability
##### Content Control Panel
To allow every user to not route queries for content they do not like. This is mostly GUI work, the backend part is simple
##### Web UI, REST Interface, etc.
Basically any non-gui non-cli user interface
##### Metadata editing and search
To enable parsing of metadata from known file types and the user editing it or adding manual metadata
### Small Items
* Wrapper of some kind for in-place upgrades
* Download file sequentially
* Multiple-selection download, Ctrl-A

View File

@@ -2,8 +2,8 @@ subprojects {
apply plugin: 'groovy' apply plugin: 'groovy'
dependencies { dependencies {
compile 'net.i2p:i2p:0.9.40' compile 'net.i2p:i2p:0.9.41'
compile 'org.codehaus.groovy:groovy-all:2.5.7' compile 'org.codehaus.groovy:groovy-all:2.4.15'
} }
compileGroovy { compileGroovy {

22
cli/build.gradle Normal file
View File

@@ -0,0 +1,22 @@
buildscript {
repositories {
jcenter()
mavenLocal()
}
dependencies {
classpath 'com.github.jengelman.gradle.plugins:shadow:2.0.4'
}
}
apply plugin : 'application'
mainClassName = 'com.muwire.cli.Cli'
apply plugin : 'com.github.johnrengelman.shadow'
applicationDefaultJvmArgs = ['-Djava.util.logging.config.file=logging.properties']
dependencies {
compile project(":core")
}

View File

@@ -0,0 +1,144 @@
package com.muwire.cli
import java.util.concurrent.CountDownLatch
import com.muwire.core.Core
import com.muwire.core.MuWireSettings
import com.muwire.core.UILoadedEvent
import com.muwire.core.connection.ConnectionAttemptStatus
import com.muwire.core.connection.ConnectionEvent
import com.muwire.core.connection.DisconnectionEvent
import com.muwire.core.files.AllFilesLoadedEvent
import com.muwire.core.files.FileHashedEvent
import com.muwire.core.files.FileLoadedEvent
import com.muwire.core.files.FileSharedEvent
import com.muwire.core.upload.UploadEvent
import com.muwire.core.upload.UploadFinishedEvent
class Cli {
public static void main(String[] args) {
def home = System.getProperty("user.home") + File.separator + ".MuWire"
home = new File(home)
if (!home.exists())
home.mkdirs()
def propsFile = new File(home,"MuWire.properties")
if (!propsFile.exists()) {
println "create props file ${propsFile.getAbsoluteFile()} before launching MuWire"
System.exit(1)
}
def props = new Properties()
propsFile.withInputStream { props.load(it) }
props = new MuWireSettings(props)
Core core
try {
core = new Core(props, home, "0.4.8")
} catch (Exception bad) {
bad.printStackTrace(System.out)
println "Failed to initialize core, exiting"
System.exit(1)
}
def filesList
if (args.length == 0) {
println "Enter a file containing list of files to share"
def reader = new BufferedReader(new InputStreamReader(System.in))
filesList = reader.readLine()
} else
filesList = args[0]
Thread.sleep(1000)
println "loading shared files from $filesList"
// listener for shared files
def sharedListener = new SharedListener()
core.eventBus.register(FileHashedEvent.class, sharedListener)
core.eventBus.register(FileLoadedEvent.class, sharedListener)
// for connections
def connectionsListener = new ConnectionListener()
core.eventBus.register(ConnectionEvent.class, connectionsListener)
core.eventBus.register(DisconnectionEvent.class, connectionsListener)
// for uploads
def uploadsListener = new UploadsListener()
core.eventBus.register(UploadEvent.class, uploadsListener)
core.eventBus.register(UploadFinishedEvent.class, uploadsListener)
Timer timer = new Timer("status-printer", true)
timer.schedule({
println String.valueOf(new Date()) + " Connections $connectionsListener.connections Uploads $uploadsListener.uploads Shared $sharedListener.shared"
} as TimerTask, 60000, 60000)
def latch = new CountDownLatch(1)
def fileLoader = new Object() {
public void onAllFilesLoadedEvent(AllFilesLoadedEvent e) {
latch.countDown()
}
}
core.eventBus.register(AllFilesLoadedEvent.class, fileLoader)
core.startServices()
core.eventBus.publish(new UILoadedEvent())
println "waiting for files to load"
latch.await()
// now we begin
println "MuWire is ready"
filesList = new File(filesList)
filesList.withReader {
def toShare = it.readLine()
core.eventBus.publish(new FileSharedEvent(file : new File(toShare)))
}
Runtime.getRuntime().addShutdownHook({
println "shutting down.."
core.shutdown()
println "shutdown."
})
Thread.sleep(Integer.MAX_VALUE)
}
static class ConnectionListener {
volatile int connections
public void onConnectionEvent(ConnectionEvent e) {
if (e.status == ConnectionAttemptStatus.SUCCESSFUL)
connections++
}
public void onDisconnectionEvent(DisconnectionEvent e) {
connections--
}
}
static class UploadsListener {
volatile int uploads
public void onUploadEvent(UploadEvent e) {
uploads++
println String.valueOf(new Date()) + " Starting upload of ${e.uploader.file.getName()} to ${e.uploader.request.downloader.getHumanReadableName()}"
}
public void onUploadFinishedEvent(UploadFinishedEvent e) {
uploads--
println String.valueOf(new Date()) + " Finished upload of ${e.uploader.file.getName()} to ${e.uploader.request.downloader.getHumanReadableName()}"
}
}
static class SharedListener {
volatile int shared
void onFileHashedEvent(FileHashedEvent e) {
if (e.error != null)
println "ERROR $e.error"
else {
println "Shared file : $e.sharedFile.file"
shared++
}
}
void onFileLoadedEvent(FileLoadedEvent e) {
shared++
}
}
}

View File

@@ -0,0 +1,166 @@
package com.muwire.cli
import java.util.concurrent.ConcurrentHashMap
import java.util.concurrent.CountDownLatch
import com.muwire.core.Core
import com.muwire.core.MuWireSettings
import com.muwire.core.connection.ConnectionAttemptStatus
import com.muwire.core.connection.ConnectionEvent
import com.muwire.core.download.DownloadStartedEvent
import com.muwire.core.download.Downloader
import com.muwire.core.download.UIDownloadEvent
import com.muwire.core.search.QueryEvent
import com.muwire.core.search.SearchEvent
import com.muwire.core.search.UIResultEvent
import net.i2p.data.Base64
class CliDownloader {
private static final List<Downloader> downloaders = Collections.synchronizedList(new ArrayList<>())
private static final Map<UUID,ResultsHolder> resultsListeners = new ConcurrentHashMap<>()
public static void main(String []args) {
def home = System.getProperty("user.home") + File.separator + ".MuWire"
home = new File(home)
if (!home.exists())
home.mkdirs()
def propsFile = new File(home,"MuWire.properties")
if (!propsFile.exists()) {
println "create props file ${propsFile.getAbsoluteFile()} before launching MuWire"
System.exit(1)
}
def props = new Properties()
propsFile.withInputStream { props.load(it) }
props = new MuWireSettings(props)
def filesList
int connections
int resultWait
if (args.length != 3) {
println "Enter a file containing list of hashes of files to download, " +
"how many connections you want before searching" +
"and how long to wait for results to arrive"
System.exit(1)
} else {
filesList = args[0]
connections = Integer.parseInt(args[1])
resultWait = Integer.parseInt(args[2])
}
Core core
try {
core = new Core(props, home, "0.4.8")
} catch (Exception bad) {
bad.printStackTrace(System.out)
println "Failed to initialize core, exiting"
System.exit(1)
}
def latch = new CountDownLatch(connections)
def connectionListener = new ConnectionWaiter(latch : latch)
core.eventBus.register(ConnectionEvent.class, connectionListener)
core.startServices()
println "starting to wait until there are $connections connections"
latch.await()
println "connected, searching for files"
def file = new File(filesList)
file.eachLine {
String[] split = it.split(",")
UUID uuid = UUID.randomUUID()
core.eventBus.register(UIResultEvent.class, new ResultsListener(fileName : split[1]))
def hash = Base64.decode(split[0])
def searchEvent = new SearchEvent(searchHash : hash, uuid : uuid)
core.eventBus.publish(new QueryEvent(searchEvent : searchEvent, firstHop:true,
replyTo: core.me.destination, receivedOn : core.me.destination, originator: core.me))
}
println "waiting for results to arrive"
Thread.sleep(resultWait * 1000)
core.eventBus.register(DownloadStartedEvent.class, new DownloadListener())
resultsListeners.each { uuid, resultsListener ->
println "starting download of $resultsListener.fileName from ${resultsListener.getResults().size()} hosts"
File target = new File(resultsListener.fileName)
core.eventBus.publish(new UIDownloadEvent(target : target, result : resultsListener.getResults()))
}
Thread.sleep(1000)
Timer timer = new Timer("stats-printer")
timer.schedule({
println "==== STATUS UPDATE ==="
downloaders.each {
int donePieces = it.donePieces()
int totalPieces = it.nPieces
int sources = it.activeWorkers.size()
def root = Base64.encode(it.infoHash.getRoot())
def state = it.getCurrentState()
println "file $it.file hash: $root progress: $donePieces/$totalPieces sources: $sources status: $state}"
it.resume()
}
println "==== END ==="
} as TimerTask, 60000, 60000)
println "waiting for downloads to finish"
while(true) {
boolean allFinished = true
for (Downloader d : downloaders) {
allFinished &= d.getCurrentState() == Downloader.DownloadState.FINISHED
}
if (allFinished)
break
Thread.sleep(1000)
}
println "all downloads finished"
}
static class ResultsHolder {
final List<UIResultEvent> results = Collections.synchronizedList(new ArrayList<>())
String fileName
void add(UIResultEvent e) {
results.add(e)
}
List getResults() {
results
}
}
static class ResultsListener {
UUID uuid
String fileName
public onUIResultEvent(UIResultEvent e) {
println "got a result for $fileName from ${e.sender.getHumanReadableName()}"
ResultsHolder listener = resultsListeners.get(e.uuid)
if (listener == null) {
listener = new ResultsHolder(fileName : fileName)
resultsListeners.put(e.uuid, listener)
}
listener.add(e)
}
}
static class ConnectionWaiter {
CountDownLatch latch
public void onConnectionEvent(ConnectionEvent e) {
if (e.status == ConnectionAttemptStatus.SUCCESSFUL)
latch.countDown()
}
}
static class DownloadListener {
public void onDownloadStartedEvent(DownloadStartedEvent e) {
downloaders.add(e.downloader)
}
}
}

View File

@@ -0,0 +1,23 @@
package com.muwire.cli
import com.muwire.core.util.DataUtil
import groovy.json.JsonSlurper
import net.i2p.data.Base64
class FileList {
public static void main(String [] args) {
if (args.length < 1) {
println "pass files.json as argument"
System.exit(1)
}
def slurper = new JsonSlurper()
File filesJson = new File(args[0])
filesJson.eachLine {
def json = slurper.parseText(it)
String name = DataUtil.readi18nString(Base64.decode(json.file))
println "$name,$json.length,$json.pieceSize,$json.infoHash"
}
}
}

View File

@@ -2,8 +2,9 @@ apply plugin : 'application'
mainClassName = 'com.muwire.core.Core' mainClassName = 'com.muwire.core.Core'
applicationDefaultJvmArgs = ['-Djava.util.logging.config.file=logging.properties'] applicationDefaultJvmArgs = ['-Djava.util.logging.config.file=logging.properties']
dependencies { dependencies {
compile 'net.i2p.client:mstreaming:0.9.40' compile 'net.i2p:router:0.9.41'
compile 'net.i2p.client:streaming:0.9.40' compile 'net.i2p.client:mstreaming:0.9.41'
compile 'net.i2p.client:streaming:0.9.41'
testCompile 'org.junit.jupiter:junit-jupiter-api:5.4.2' testCompile 'org.junit.jupiter:junit-jupiter-api:5.4.2'
testCompile 'junit:junit:4.12' testCompile 'junit:junit:4.12'

View File

@@ -4,10 +4,10 @@ import net.i2p.crypto.SigType
class Constants { class Constants {
public static final byte PERSONA_VERSION = (byte)1 public static final byte PERSONA_VERSION = (byte)1
public static final SigType SIG_TYPE = SigType.EdDSA_SHA512_Ed25519 public static final SigType SIG_TYPE = SigType.EdDSA_SHA512_Ed25519
public static final int MAX_HEADER_SIZE = 0x1 << 14 public static final int MAX_HEADER_SIZE = 0x1 << 14
public static final int MAX_HEADERS = 16 public static final int MAX_HEADERS = 16
public static final float DOWNLOAD_SEQUENTIAL_RATIO = 0.8f public static final String SPLIT_PATTERN = "[\\*\\+\\-,\\.:;\\(\\)=_/\\\\\\!\\\"\\\'\\\$%\\|\\[\\]\\{\\}\\?]"
} }

View File

@@ -1,6 +1,7 @@
package com.muwire.core package com.muwire.core
import java.nio.charset.StandardCharsets import java.nio.charset.StandardCharsets
import java.util.concurrent.atomic.AtomicBoolean
import com.muwire.core.connection.ConnectionAcceptor import com.muwire.core.connection.ConnectionAcceptor
import com.muwire.core.connection.ConnectionEstablisher import com.muwire.core.connection.ConnectionEstablisher
@@ -12,9 +13,14 @@ import com.muwire.core.connection.I2PConnector
import com.muwire.core.connection.LeafConnectionManager import com.muwire.core.connection.LeafConnectionManager
import com.muwire.core.connection.UltrapeerConnectionManager import com.muwire.core.connection.UltrapeerConnectionManager
import com.muwire.core.download.DownloadManager import com.muwire.core.download.DownloadManager
import com.muwire.core.download.SourceDiscoveredEvent
import com.muwire.core.download.UIDownloadCancelledEvent
import com.muwire.core.download.UIDownloadEvent import com.muwire.core.download.UIDownloadEvent
import com.muwire.core.download.UIDownloadPausedEvent
import com.muwire.core.download.UIDownloadResumedEvent
import com.muwire.core.files.FileDownloadedEvent import com.muwire.core.files.FileDownloadedEvent
import com.muwire.core.files.FileHashedEvent import com.muwire.core.files.FileHashedEvent
import com.muwire.core.files.FileHashingEvent
import com.muwire.core.files.FileHasher import com.muwire.core.files.FileHasher
import com.muwire.core.files.FileLoadedEvent import com.muwire.core.files.FileLoadedEvent
import com.muwire.core.files.FileManager import com.muwire.core.files.FileManager
@@ -22,16 +28,24 @@ import com.muwire.core.files.FileSharedEvent
import com.muwire.core.files.FileUnsharedEvent import com.muwire.core.files.FileUnsharedEvent
import com.muwire.core.files.HasherService import com.muwire.core.files.HasherService
import com.muwire.core.files.PersisterService import com.muwire.core.files.PersisterService
import com.muwire.core.files.AllFilesLoadedEvent
import com.muwire.core.files.DirectoryUnsharedEvent
import com.muwire.core.files.DirectoryWatcher
import com.muwire.core.hostcache.CacheClient import com.muwire.core.hostcache.CacheClient
import com.muwire.core.hostcache.HostCache import com.muwire.core.hostcache.HostCache
import com.muwire.core.hostcache.HostDiscoveredEvent import com.muwire.core.hostcache.HostDiscoveredEvent
import com.muwire.core.mesh.MeshManager
import com.muwire.core.search.QueryEvent import com.muwire.core.search.QueryEvent
import com.muwire.core.search.ResultsEvent import com.muwire.core.search.ResultsEvent
import com.muwire.core.search.ResultsSender import com.muwire.core.search.ResultsSender
import com.muwire.core.search.SearchEvent import com.muwire.core.search.SearchEvent
import com.muwire.core.search.SearchManager import com.muwire.core.search.SearchManager
import com.muwire.core.search.UIResultBatchEvent
import com.muwire.core.trust.TrustEvent import com.muwire.core.trust.TrustEvent
import com.muwire.core.trust.TrustService import com.muwire.core.trust.TrustService
import com.muwire.core.trust.TrustSubscriber
import com.muwire.core.trust.TrustSubscriptionEvent
import com.muwire.core.update.UpdateClient
import com.muwire.core.upload.UploadManager import com.muwire.core.upload.UploadManager
import com.muwire.core.util.MuWireLogManager import com.muwire.core.util.MuWireLogManager
@@ -42,6 +56,7 @@ import net.i2p.client.I2PSession
import net.i2p.client.streaming.I2PSocketManager import net.i2p.client.streaming.I2PSocketManager
import net.i2p.client.streaming.I2PSocketManagerFactory import net.i2p.client.streaming.I2PSocketManagerFactory
import net.i2p.client.streaming.I2PSocketOptions import net.i2p.client.streaming.I2PSocketOptions
import net.i2p.client.streaming.I2PSocketManager.DisconnectListener
import net.i2p.crypto.DSAEngine import net.i2p.crypto.DSAEngine
import net.i2p.crypto.SigType import net.i2p.crypto.SigType
import net.i2p.data.Destination import net.i2p.data.Destination
@@ -49,47 +64,111 @@ import net.i2p.data.PrivateKey
import net.i2p.data.Signature import net.i2p.data.Signature
import net.i2p.data.SigningPrivateKey import net.i2p.data.SigningPrivateKey
import net.i2p.router.Router
import net.i2p.router.RouterContext
@Log @Log
public class Core { public class Core {
final EventBus eventBus final EventBus eventBus
final Persona me final Persona me
final File home
final Properties i2pOptions
final MuWireSettings muOptions
private final TrustService trustService private final TrustService trustService
private final TrustSubscriber trustSubscriber
private final PersisterService persisterService private final PersisterService persisterService
private final HostCache hostCache private final HostCache hostCache
private final ConnectionManager connectionManager private final ConnectionManager connectionManager
private final CacheClient cacheClient private final CacheClient cacheClient
private final UpdateClient updateClient
private final ConnectionAcceptor connectionAcceptor private final ConnectionAcceptor connectionAcceptor
private final ConnectionEstablisher connectionEstablisher private final ConnectionEstablisher connectionEstablisher
private final HasherService hasherService private final HasherService hasherService
private final DownloadManager downloadManager
public Core(MuWireSettings props, File home) { private final DirectoryWatcher directoryWatcher
log.info "Initializing I2P context" final FileManager fileManager
I2PAppContext.getGlobalContext().logManager() final UploadManager uploadManager
I2PAppContext.getGlobalContext()._logManager = new MuWireLogManager()
private final Router router
log.info("initializing I2P socket manager")
def i2pClient = new I2PClientFactory().createClient() final AtomicBoolean shutdown = new AtomicBoolean()
File keyDat = new File(home, "key.dat")
if (!keyDat.exists()) { public Core(MuWireSettings props, File home, String myVersion) {
log.info("Creating new key.dat") this.home = home
keyDat.withOutputStream { this.muOptions = props
i2pClient.createDestination(it, Constants.SIG_TYPE)
} i2pOptions = new Properties()
} def i2pOptionsFile = new File(home,"i2p.properties")
if (i2pOptionsFile.exists()) {
def sysProps = System.getProperties().clone() i2pOptionsFile.withInputStream { i2pOptions.load(it) }
sysProps["inbound.nickname"] = "MuWire"
I2PSession i2pSession if (!i2pOptions.containsKey("inbound.nickname"))
I2PSocketManager socketManager i2pOptions["inbound.nickname"] = "MuWire"
keyDat.withInputStream { if (!i2pOptions.containsKey("outbound.nickname"))
socketManager = new I2PSocketManagerFactory().createManager(it, sysProps) i2pOptions["outbound.nickname"] = "MuWire"
} } else {
socketManager.getDefaultOptions().setReadTimeout(60000) i2pOptions["inbound.nickname"] = "MuWire"
socketManager.getDefaultOptions().setConnectTimeout(30000) i2pOptions["outbound.nickname"] = "MuWire"
i2pSession = socketManager.getSession() i2pOptions["inbound.length"] = "3"
i2pOptions["inbound.quantity"] = "4"
i2pOptions["outbound.length"] = "3"
i2pOptions["outbound.quantity"] = "4"
i2pOptions["i2cp.tcp.host"] = "127.0.0.1"
i2pOptions["i2cp.tcp.port"] = "7654"
Random r = new Random()
int port = r.nextInt(60000) + 4000
i2pOptions["i2np.ntcp.port"] = String.valueOf(port)
i2pOptions["i2np.udp.port"] = String.valueOf(port)
i2pOptionsFile.withOutputStream { i2pOptions.store(it, "") }
}
if (!props.embeddedRouter) {
log.info "Initializing I2P context"
I2PAppContext.getGlobalContext().logManager()
I2PAppContext.getGlobalContext()._logManager = new MuWireLogManager()
router = null
} else {
log.info("launching embedded router")
Properties routerProps = new Properties()
routerProps.setProperty("i2p.dir.config", home.getAbsolutePath())
routerProps.setProperty("router.excludePeerCaps", "KLM")
routerProps.setProperty("i2np.inboundKBytesPerSecond", String.valueOf(props.inBw))
routerProps.setProperty("i2np.outboundKBytesPerSecond", String.valueOf(props.outBw))
routerProps.setProperty("i2cp.disableInterface", "true")
routerProps.setProperty("i2np.ntcp.port", i2pOptions["i2np.ntcp.port"])
routerProps.setProperty("i2np.udp.port", i2pOptions["i2np.udp.port"])
routerProps.setProperty("i2np.udp.internalPort", i2pOptions["i2np.udp.port"])
router = new Router(routerProps)
router.getContext().setLogManager(new MuWireLogManager())
router.runRouter()
while(!router.isRunning())
Thread.sleep(100)
}
log.info("initializing I2P socket manager")
def i2pClient = new I2PClientFactory().createClient()
File keyDat = new File(home, "key.dat")
if (!keyDat.exists()) {
log.info("Creating new key.dat")
keyDat.withOutputStream {
i2pClient.createDestination(it, Constants.SIG_TYPE)
}
}
// options like tunnel length and quantity
I2PSession i2pSession
I2PSocketManager socketManager
keyDat.withInputStream {
socketManager = new I2PSocketManagerFactory().createManager(it, i2pOptions["i2cp.tcp.host"], i2pOptions["i2cp.tcp.port"].toInteger(), i2pOptions)
}
socketManager.getDefaultOptions().setReadTimeout(60000)
socketManager.getDefaultOptions().setConnectTimeout(30000)
socketManager.addDisconnectListener({eventBus.publish(new RouterDisconnectedEvent())} as DisconnectListener)
i2pSession = socketManager.getSession()
def destination = new Destination() def destination = new Destination()
def spk = new SigningPrivateKey(Constants.SIG_TYPE) def spk = new SigningPrivateKey(Constants.SIG_TYPE)
keyDat.withInputStream { keyDat.withInputStream {
@@ -97,8 +176,8 @@ public class Core {
def privateKey = new PrivateKey() def privateKey = new PrivateKey()
privateKey.readBytes(it) privateKey.readBytes(it)
spk.readBytes(it) spk.readBytes(it)
} }
def baos = new ByteArrayOutputStream() def baos = new ByteArrayOutputStream()
def daos = new DataOutputStream(baos) def daos = new DataOutputStream(baos)
daos.write(Constants.PERSONA_VERSION) daos.write(Constants.PERSONA_VERSION)
@@ -115,86 +194,139 @@ public class Core {
me = new Persona(new ByteArrayInputStream(baos.toByteArray())) me = new Persona(new ByteArrayInputStream(baos.toByteArray()))
log.info("Loaded myself as "+me.getHumanReadableName()) log.info("Loaded myself as "+me.getHumanReadableName())
eventBus = new EventBus() eventBus = new EventBus()
log.info("initializing trust service") log.info("initializing trust service")
File goodTrust = new File(home, "trust.good") File goodTrust = new File(home, "trusted")
File badTrust = new File(home, "trust.bad") File badTrust = new File(home, "distrusted")
trustService = new TrustService(goodTrust, badTrust, 5000) trustService = new TrustService(goodTrust, badTrust, 5000)
eventBus.register(TrustEvent.class, trustService) eventBus.register(TrustEvent.class, trustService)
log.info "initializing file manager" log.info "initializing file manager"
FileManager fileManager = new FileManager(eventBus) fileManager = new FileManager(eventBus, props)
eventBus.register(FileHashedEvent.class, fileManager) eventBus.register(FileHashedEvent.class, fileManager)
eventBus.register(FileLoadedEvent.class, fileManager) eventBus.register(FileLoadedEvent.class, fileManager)
eventBus.register(FileDownloadedEvent.class, fileManager) eventBus.register(FileDownloadedEvent.class, fileManager)
eventBus.register(FileUnsharedEvent.class, fileManager) eventBus.register(FileUnsharedEvent.class, fileManager)
eventBus.register(SearchEvent.class, fileManager) eventBus.register(SearchEvent.class, fileManager)
eventBus.register(DirectoryUnsharedEvent.class, fileManager)
log.info "initializing persistence service"
persisterService = new PersisterService(new File(home, "files.json"), eventBus, 5000, fileManager) log.info("initializing mesh manager")
MeshManager meshManager = new MeshManager(fileManager, home, props)
log.info("initializing host cache") eventBus.register(SourceDiscoveredEvent.class, meshManager)
File hostStorage = new File(home, "hosts.json")
log.info "initializing persistence service"
persisterService = new PersisterService(new File(home, "files.json"), eventBus, 15000, fileManager)
eventBus.register(UILoadedEvent.class, persisterService)
log.info("initializing host cache")
File hostStorage = new File(home, "hosts.json")
hostCache = new HostCache(trustService,hostStorage, 30000, props, i2pSession.getMyDestination()) hostCache = new HostCache(trustService,hostStorage, 30000, props, i2pSession.getMyDestination())
eventBus.register(HostDiscoveredEvent.class, hostCache) eventBus.register(HostDiscoveredEvent.class, hostCache)
eventBus.register(ConnectionEvent.class, hostCache) eventBus.register(ConnectionEvent.class, hostCache)
log.info("initializing connection manager") log.info("initializing connection manager")
connectionManager = props.isLeaf() ? connectionManager = props.isLeaf() ?
new LeafConnectionManager(eventBus, me, 3, hostCache) : new UltrapeerConnectionManager(eventBus, me, 512, 512, hostCache, trustService) new LeafConnectionManager(eventBus, me, 3, hostCache, props) :
eventBus.register(TrustEvent.class, connectionManager) new UltrapeerConnectionManager(eventBus, me, 512, 512, hostCache, trustService, props)
eventBus.register(ConnectionEvent.class, connectionManager) eventBus.register(TrustEvent.class, connectionManager)
eventBus.register(DisconnectionEvent.class, connectionManager) eventBus.register(ConnectionEvent.class, connectionManager)
eventBus.register(DisconnectionEvent.class, connectionManager)
eventBus.register(QueryEvent.class, connectionManager) eventBus.register(QueryEvent.class, connectionManager)
log.info("initializing cache client") log.info("initializing cache client")
cacheClient = new CacheClient(eventBus,hostCache, connectionManager, i2pSession, props, 10000) cacheClient = new CacheClient(eventBus,hostCache, connectionManager, i2pSession, props, 10000)
log.info("initializing connector") log.info("initializing update client")
I2PConnector i2pConnector = new I2PConnector(socketManager) updateClient = new UpdateClient(eventBus, i2pSession, myVersion, props, fileManager, me)
eventBus.register(FileDownloadedEvent.class, updateClient)
log.info "initializing results sender" eventBus.register(UIResultBatchEvent.class, updateClient)
ResultsSender resultsSender = new ResultsSender(eventBus, i2pConnector, me)
log.info("initializing connector")
log.info "initializing search manager" I2PConnector i2pConnector = new I2PConnector(socketManager)
SearchManager searchManager = new SearchManager(eventBus, me, resultsSender)
eventBus.register(QueryEvent.class, searchManager) log.info "initializing results sender"
eventBus.register(ResultsEvent.class, searchManager) ResultsSender resultsSender = new ResultsSender(eventBus, i2pConnector, me)
log.info "initializing search manager"
SearchManager searchManager = new SearchManager(eventBus, me, resultsSender)
eventBus.register(QueryEvent.class, searchManager)
eventBus.register(ResultsEvent.class, searchManager)
log.info("initializing download manager") log.info("initializing download manager")
DownloadManager downloadManager = new DownloadManager(eventBus, i2pConnector) downloadManager = new DownloadManager(eventBus, trustService, meshManager, props, i2pConnector, home, me)
eventBus.register(UIDownloadEvent.class, downloadManager) eventBus.register(UIDownloadEvent.class, downloadManager)
eventBus.register(UILoadedEvent.class, downloadManager)
eventBus.register(FileDownloadedEvent.class, downloadManager)
eventBus.register(UIDownloadCancelledEvent.class, downloadManager)
eventBus.register(SourceDiscoveredEvent.class, downloadManager)
eventBus.register(UIDownloadPausedEvent.class, downloadManager)
eventBus.register(UIDownloadResumedEvent.class, downloadManager)
log.info("initializing upload manager") log.info("initializing upload manager")
UploadManager uploadManager = new UploadManager(eventBus, fileManager) uploadManager = new UploadManager(eventBus, fileManager, meshManager, downloadManager)
log.info("initializing connection establisher")
log.info("initializing acceptor")
I2PAcceptor i2pAcceptor = new I2PAcceptor(socketManager)
connectionAcceptor = new ConnectionAcceptor(eventBus, connectionManager, props,
i2pAcceptor, hostCache, trustService, searchManager, uploadManager)
connectionEstablisher = new ConnectionEstablisher(eventBus, i2pConnector, props, connectionManager, hostCache) connectionEstablisher = new ConnectionEstablisher(eventBus, i2pConnector, props, connectionManager, hostCache)
log.info("initializing acceptor")
I2PAcceptor i2pAcceptor = new I2PAcceptor(socketManager)
connectionAcceptor = new ConnectionAcceptor(eventBus, connectionManager, props,
i2pAcceptor, hostCache, trustService, searchManager, uploadManager, connectionEstablisher)
log.info("initializing directory watcher")
directoryWatcher = new DirectoryWatcher(eventBus, fileManager)
eventBus.register(FileSharedEvent.class, directoryWatcher)
eventBus.register(AllFilesLoadedEvent.class, directoryWatcher)
eventBus.register(DirectoryUnsharedEvent.class, directoryWatcher)
log.info("initializing hasher service") log.info("initializing hasher service")
hasherService = new HasherService(new FileHasher(), eventBus) hasherService = new HasherService(new FileHasher(), eventBus, fileManager)
eventBus.register(FileSharedEvent.class, hasherService) eventBus.register(FileSharedEvent.class, hasherService)
}
log.info("initializing trust subscriber")
trustSubscriber = new TrustSubscriber(eventBus, i2pConnector, props)
eventBus.register(UILoadedEvent.class, trustSubscriber)
eventBus.register(TrustSubscriptionEvent.class, trustSubscriber)
}
public void startServices() { public void startServices() {
hasherService.start() hasherService.start()
trustService.start() trustService.start()
trustService.waitForLoad() trustService.waitForLoad()
persisterService.start()
hostCache.start() hostCache.start()
connectionManager.start() connectionManager.start()
cacheClient.start() cacheClient.start()
connectionAcceptor.start() connectionAcceptor.start()
connectionEstablisher.start() connectionEstablisher.start()
hostCache.waitForLoad() hostCache.waitForLoad()
updateClient.start()
}
public void shutdown() {
if (!shutdown.compareAndSet(false, true)) {
log.info("already shutting down")
return
}
log.info("shutting down trust subscriber")
trustSubscriber.stop()
log.info("shutting down download manageer")
downloadManager.shutdown()
log.info("shutting down connection acceeptor")
connectionAcceptor.stop()
log.info("shutting down connection establisher")
connectionEstablisher.stop()
log.info("shutting down directory watcher")
directoryWatcher.stop()
log.info("shutting down cache client")
cacheClient.stop()
log.info("shutting down connection manager")
connectionManager.shutdown()
if (router != null) {
log.info("shutting down embedded router")
router.shutdown(0)
}
} }
static main(args) { static main(args) {
@@ -204,7 +336,7 @@ public class Core {
log.info("creating home dir") log.info("creating home dir")
home.mkdir() home.mkdir()
} }
def props = new Properties() def props = new Properties()
def propsFile = new File(home, "MuWire.properties") def propsFile = new File(home, "MuWire.properties")
if (propsFile.exists()) { if (propsFile.exists()) {
@@ -220,10 +352,10 @@ public class Core {
props.write(it) props.write(it)
} }
} }
Core core = new Core(props, home) Core core = new Core(props, home, "0.4.8")
core.startServices() core.startServices()
// ... at the end, sleep or execute script // ... at the end, sleep or execute script
if (args.length == 0) { if (args.length == 0) {
log.info("initialized everything, sleeping") log.info("initialized everything, sleeping")

View File

@@ -4,17 +4,17 @@ import java.util.concurrent.atomic.AtomicLong
class Event { class Event {
private static final AtomicLong SEQ_NO = new AtomicLong(); private static final AtomicLong SEQ_NO = new AtomicLong();
final long seqNo final long seqNo
final long timestamp final long timestamp
Event() { Event() {
seqNo = SEQ_NO.getAndIncrement() seqNo = SEQ_NO.getAndIncrement()
timestamp = System.currentTimeMillis() timestamp = System.currentTimeMillis()
} }
@Override @Override
public String toString() { public String toString() {
"seqNo $seqNo timestamp $timestamp" "seqNo $seqNo timestamp $timestamp"
} }
} }

View File

@@ -3,44 +3,49 @@ package com.muwire.core
import java.util.concurrent.CopyOnWriteArrayList import java.util.concurrent.CopyOnWriteArrayList
import java.util.concurrent.Executor import java.util.concurrent.Executor
import java.util.concurrent.Executors import java.util.concurrent.Executors
import java.util.logging.Level
import com.muwire.core.files.FileSharedEvent import com.muwire.core.files.FileSharedEvent
import groovy.util.logging.Log import groovy.util.logging.Log
@Log @Log
class EventBus { class EventBus {
private Map handlers = new HashMap()
private final Executor executor = Executors.newSingleThreadExecutor {r ->
def rv = new Thread(r)
rv.setDaemon(true)
rv.setName("event-bus")
rv
}
void publish(Event e) { private Map handlers = new HashMap()
executor.execute({publishInternal(e)} as Runnable) private final Executor executor = Executors.newSingleThreadExecutor {r ->
} def rv = new Thread(r)
rv.setDaemon(true)
private void publishInternal(Event e) { rv.setName("event-bus")
log.fine "publishing event $e of type ${e.getClass().getSimpleName()}" rv
def currentHandlers }
final def clazz = e.getClass()
synchronized(this) { void publish(Event e) {
currentHandlers = handlers.getOrDefault(clazz, []) executor.execute({publishInternal(e)} as Runnable)
} }
currentHandlers.each {
it."on${clazz.getSimpleName()}"(e) private void publishInternal(Event e) {
} log.fine "publishing event $e of type ${e.getClass().getSimpleName()} event $e"
} def currentHandlers
final def clazz = e.getClass()
synchronized void register(Class<? extends Event> eventType, def handler) { synchronized(this) {
log.info "Registering $handler for type $eventType" currentHandlers = handlers.getOrDefault(clazz, [])
def currentHandlers = handlers.get(eventType) }
if (currentHandlers == null) { currentHandlers.each {
currentHandlers = new CopyOnWriteArrayList() try {
handlers.put(eventType, currentHandlers) it."on${clazz.getSimpleName()}"(e)
} } catch (Exception bad) {
currentHandlers.add handler log.log(Level.SEVERE, "exception dispatching event",bad)
} }
}
}
synchronized void register(Class<? extends Event> eventType, def handler) {
log.info "Registering $handler for type $eventType"
def currentHandlers = handlers.get(eventType)
if (currentHandlers == null) {
currentHandlers = new CopyOnWriteArrayList()
handlers.put(eventType, currentHandlers)
}
currentHandlers.add handler
}
} }

View File

@@ -13,5 +13,5 @@ class InvalidSignatureException extends Exception {
public InvalidSignatureException(Throwable cause) { public InvalidSignatureException(Throwable cause) {
super(cause); super(cause);
} }
} }

View File

@@ -1,62 +1,131 @@
package com.muwire.core package com.muwire.core
import java.util.stream.Collectors
import com.muwire.core.hostcache.CrawlerResponse import com.muwire.core.hostcache.CrawlerResponse
import com.muwire.core.util.DataUtil
import net.i2p.data.Base64
class MuWireSettings { class MuWireSettings {
final boolean isLeaf final boolean isLeaf
boolean allowUntrusted boolean allowUntrusted
boolean allowTrustLists
int trustListInterval
Set<Persona> trustSubscriptions
int downloadRetryInterval
int updateCheckInterval
boolean autoDownloadUpdate
String updateType
String nickname String nickname
File downloadLocation File downloadLocation
String sharedFiles
CrawlerResponse crawlerResponse CrawlerResponse crawlerResponse
boolean shareDownloadedFiles
MuWireSettings() { Set<String> watchedDirectories
float downloadSequentialRatio
int hostClearInterval
int meshExpiration
boolean embeddedRouter
int inBw, outBw
MuWireSettings() {
this(new Properties()) this(new Properties())
} }
MuWireSettings(Properties props) { MuWireSettings(Properties props) {
isLeaf = Boolean.valueOf(props.get("leaf","false")) isLeaf = Boolean.valueOf(props.get("leaf","false"))
allowUntrusted = Boolean.valueOf(props.get("allowUntrusted","true")) allowUntrusted = Boolean.valueOf(props.getProperty("allowUntrusted","true"))
crawlerResponse = CrawlerResponse.valueOf(props.get("crawlerResponse","REGISTERED")) allowTrustLists = Boolean.valueOf(props.getProperty("allowTrustLists","true"))
trustListInterval = Integer.valueOf(props.getProperty("trustListInterval","1"))
crawlerResponse = CrawlerResponse.valueOf(props.get("crawlerResponse","REGISTERED"))
nickname = props.getProperty("nickname","MuWireUser") nickname = props.getProperty("nickname","MuWireUser")
downloadLocation = new File((String)props.getProperty("downloadLocation", downloadLocation = new File((String)props.getProperty("downloadLocation",
System.getProperty("user.home"))) System.getProperty("user.home")))
sharedFiles = props.getProperty("sharedFiles") downloadRetryInterval = Integer.parseInt(props.getProperty("downloadRetryInterval","1"))
} updateCheckInterval = Integer.parseInt(props.getProperty("updateCheckInterval","24"))
autoDownloadUpdate = Boolean.parseBoolean(props.getProperty("autoDownloadUpdate","true"))
updateType = props.getProperty("updateType","jar")
shareDownloadedFiles = Boolean.parseBoolean(props.getProperty("shareDownloadedFiles","true"))
downloadSequentialRatio = Float.valueOf(props.getProperty("downloadSequentialRatio","0.8"))
hostClearInterval = Integer.valueOf(props.getProperty("hostClearInterval","60"))
meshExpiration = Integer.valueOf(props.getProperty("meshExpiration","60"))
embeddedRouter = Boolean.valueOf(props.getProperty("embeddedRouter","false"))
inBw = Integer.valueOf(props.getProperty("inBw","256"))
outBw = Integer.valueOf(props.getProperty("outBw","128"))
watchedDirectories = new HashSet<>()
if (props.containsKey("watchedDirectories")) {
String[] encoded = props.getProperty("watchedDirectories").split(",")
encoded.each { watchedDirectories << DataUtil.readi18nString(Base64.decode(it)) }
}
trustSubscriptions = new HashSet<>()
if (props.containsKey("trustSubscriptions")) {
props.getProperty("trustSubscriptions").split(",").each {
trustSubscriptions.add(new Persona(new ByteArrayInputStream(Base64.decode(it))))
}
}
}
void write(OutputStream out) throws IOException { void write(OutputStream out) throws IOException {
Properties props = new Properties() Properties props = new Properties()
props.setProperty("leaf", isLeaf.toString()) props.setProperty("leaf", isLeaf.toString())
props.setProperty("allowUntrusted", allowUntrusted.toString()) props.setProperty("allowUntrusted", allowUntrusted.toString())
props.setProperty("allowTrustLists", String.valueOf(allowTrustLists))
props.setProperty("trustListInterval", String.valueOf(trustListInterval))
props.setProperty("crawlerResponse", crawlerResponse.toString()) props.setProperty("crawlerResponse", crawlerResponse.toString())
props.setProperty("nickname", nickname) props.setProperty("nickname", nickname)
props.setProperty("downloadLocation", downloadLocation.getAbsolutePath()) props.setProperty("downloadLocation", downloadLocation.getAbsolutePath())
if (sharedFiles != null) props.setProperty("downloadRetryInterval", String.valueOf(downloadRetryInterval))
props.setProperty("sharedFiles", sharedFiles) props.setProperty("updateCheckInterval", String.valueOf(updateCheckInterval))
props.setProperty("autoDownloadUpdate", String.valueOf(autoDownloadUpdate))
props.setProperty("updateType",String.valueOf(updateType))
props.setProperty("shareDownloadedFiles", String.valueOf(shareDownloadedFiles))
props.setProperty("downloadSequentialRatio", String.valueOf(downloadSequentialRatio))
props.setProperty("hostClearInterval", String.valueOf(hostClearInterval))
props.setProperty("meshExpiration", String.valueOf(meshExpiration))
props.setProperty("embeddedRouter", String.valueOf(embeddedRouter))
props.setProperty("inBw", String.valueOf(inBw))
props.setProperty("outBw", String.valueOf(outBw))
if (!watchedDirectories.isEmpty()) {
String encoded = watchedDirectories.stream().
map({Base64.encode(DataUtil.encodei18nString(it))}).
collect(Collectors.joining(","))
props.setProperty("watchedDirectories", encoded)
}
if (!trustSubscriptions.isEmpty()) {
String encoded = trustSubscriptions.stream().
map({it.toBase64()}).
collect(Collectors.joining(","))
props.setProperty("trustSubscriptions", encoded)
}
props.store(out, "") props.store(out, "")
} }
boolean isLeaf() { boolean isLeaf() {
isLeaf isLeaf
} }
boolean allowUntrusted() { boolean allowUntrusted() {
allowUntrusted allowUntrusted
} }
void setAllowUntrusted(boolean allowUntrusted) { void setAllowUntrusted(boolean allowUntrusted) {
this.allowUntrusted = allowUntrusted this.allowUntrusted = allowUntrusted
} }
CrawlerResponse getCrawlerResponse() { CrawlerResponse getCrawlerResponse() {
crawlerResponse crawlerResponse
} }
void setCrawlerResponse(CrawlerResponse crawlerResponse) { void setCrawlerResponse(CrawlerResponse crawlerResponse) {
this.crawlerResponse = crawlerResponse this.crawlerResponse = crawlerResponse
} }
String getNickname() { String getNickname() {
nickname nickname
} }

View File

@@ -7,11 +7,11 @@ import java.nio.charset.StandardCharsets
*/ */
public class Name { public class Name {
final String name final String name
Name(String name) { Name(String name) {
this.name = name this.name = name
} }
Name(InputStream nameStream) throws IOException { Name(InputStream nameStream) throws IOException {
DataInputStream dis = new DataInputStream(nameStream) DataInputStream dis = new DataInputStream(nameStream)
int length = dis.readUnsignedShort() int length = dis.readUnsignedShort()
@@ -19,22 +19,22 @@ public class Name {
dis.readFully(nameBytes) dis.readFully(nameBytes)
this.name = new String(nameBytes, StandardCharsets.UTF_8) this.name = new String(nameBytes, StandardCharsets.UTF_8)
} }
public void write(OutputStream out) throws IOException { public void write(OutputStream out) throws IOException {
DataOutputStream dos = new DataOutputStream(out) DataOutputStream dos = new DataOutputStream(out)
dos.writeShort(name.length()) dos.writeShort(name.length())
dos.write(name.getBytes(StandardCharsets.UTF_8)) dos.write(name.getBytes(StandardCharsets.UTF_8))
} }
public getName() { public getName() {
name name
} }
@Override @Override
public int hashCode() { public int hashCode() {
name.hashCode() name.hashCode()
} }
@Override @Override
public boolean equals(Object o) { public boolean equals(Object o) {
if (!(o instanceof Name)) if (!(o instanceof Name))

View File

@@ -2,25 +2,27 @@ package com.muwire.core
import net.i2p.crypto.DSAEngine import net.i2p.crypto.DSAEngine
import net.i2p.crypto.SigType import net.i2p.crypto.SigType
import net.i2p.data.Base64
import net.i2p.data.Destination import net.i2p.data.Destination
import net.i2p.data.Signature import net.i2p.data.Signature
import net.i2p.data.SigningPublicKey import net.i2p.data.SigningPublicKey
public class Persona { public class Persona {
private static final int SIG_LEN = Constants.SIG_TYPE.getSigLen() private static final int SIG_LEN = Constants.SIG_TYPE.getSigLen()
private final byte version private final byte version
private final Name name private final Name name
private final Destination destination private final Destination destination
private final byte[] sig private final byte[] sig
private volatile String humanReadableName private volatile String humanReadableName
private volatile String base64
private volatile byte[] payload private volatile byte[] payload
public Persona(InputStream personaStream) throws IOException, InvalidSignatureException { public Persona(InputStream personaStream) throws IOException, InvalidSignatureException {
version = (byte) (personaStream.read() & 0xFF) version = (byte) (personaStream.read() & 0xFF)
if (version != Constants.PERSONA_VERSION) if (version != Constants.PERSONA_VERSION)
throw new IOException("Unknown version "+version) throw new IOException("Unknown version "+version)
name = new Name(personaStream) name = new Name(personaStream)
destination = Destination.create(personaStream) destination = Destination.create(personaStream)
sig = new byte[SIG_LEN] sig = new byte[SIG_LEN]
@@ -29,7 +31,7 @@ public class Persona {
if (!verify(version, name, destination, sig)) if (!verify(version, name, destination, sig))
throw new InvalidSignatureException(getHumanReadableName() + " didn't verify") throw new InvalidSignatureException(getHumanReadableName() + " didn't verify")
} }
private static boolean verify(byte version, Name name, Destination destination, byte [] sig) { private static boolean verify(byte version, Name name, Destination destination, byte [] sig) {
ByteArrayOutputStream baos = new ByteArrayOutputStream() ByteArrayOutputStream baos = new ByteArrayOutputStream()
baos.write(version) baos.write(version)
@@ -40,7 +42,7 @@ public class Persona {
Signature signature = new Signature(Constants.SIG_TYPE, sig) Signature signature = new Signature(Constants.SIG_TYPE, sig)
DSAEngine.getInstance().verifySignature(signature, payload, spk) DSAEngine.getInstance().verifySignature(signature, payload, spk)
} }
public void write(OutputStream out) throws IOException { public void write(OutputStream out) throws IOException {
if (payload == null) { if (payload == null) {
ByteArrayOutputStream baos = new ByteArrayOutputStream() ByteArrayOutputStream baos = new ByteArrayOutputStream()
@@ -52,18 +54,27 @@ public class Persona {
} }
out.write(payload) out.write(payload)
} }
public String getHumanReadableName() { public String getHumanReadableName() {
if (humanReadableName == null) if (humanReadableName == null)
humanReadableName = name.getName() + "@" + destination.toBase32().substring(0,32) humanReadableName = name.getName() + "@" + destination.toBase32().substring(0,32)
humanReadableName humanReadableName
} }
public String toBase64() {
if (base64 == null) {
def baos = new ByteArrayOutputStream()
write(baos)
base64 = Base64.encode(baos.toByteArray())
}
base64
}
@Override @Override
public int hashCode() { public int hashCode() {
name.hashCode() ^ destination.hashCode() name.hashCode() ^ destination.hashCode()
} }
@Override @Override
public boolean equals(Object o) { public boolean equals(Object o) {
if (!(o instanceof Persona)) if (!(o instanceof Persona))
@@ -71,4 +82,13 @@ public class Persona {
Persona other = (Persona)o Persona other = (Persona)o
name.equals(other.name) && destination.equals(other.destination) name.equals(other.name) && destination.equals(other.destination)
} }
public static void main(String []args) {
if (args.length != 1) {
println "This utility decodes a bas64-encoded persona"
System.exit(1)
}
Persona p = new Persona(new ByteArrayInputStream(Base64.decode(args[0])))
println p.getHumanReadableName()
}
} }

View File

@@ -0,0 +1,4 @@
package com.muwire.core
class RouterDisconnectedEvent extends Event {
}

View File

@@ -2,12 +2,12 @@ package com.muwire.core
abstract class Service { abstract class Service {
volatile boolean loaded volatile boolean loaded
abstract void load() abstract void load()
void waitForLoad() { void waitForLoad() {
while (!loaded) while (!loaded)
Thread.sleep(10) Thread.sleep(10)
} }
} }

View File

@@ -0,0 +1,4 @@
package com.muwire.core
class UILoadedEvent extends Event {
}

View File

@@ -6,6 +6,8 @@ import java.util.concurrent.atomic.AtomicBoolean
import java.util.logging.Level import java.util.logging.Level
import com.muwire.core.EventBus import com.muwire.core.EventBus
import com.muwire.core.MuWireSettings
import com.muwire.core.Persona
import com.muwire.core.hostcache.HostCache import com.muwire.core.hostcache.HostCache
import com.muwire.core.hostcache.HostDiscoveredEvent import com.muwire.core.hostcache.HostDiscoveredEvent
import com.muwire.core.search.QueryEvent import com.muwire.core.search.QueryEvent
@@ -14,159 +16,210 @@ import com.muwire.core.trust.TrustLevel
import com.muwire.core.trust.TrustService import com.muwire.core.trust.TrustService
import groovy.util.logging.Log import groovy.util.logging.Log
import net.i2p.data.Base64
import net.i2p.data.Destination import net.i2p.data.Destination
@Log @Log
abstract class Connection implements Closeable { abstract class Connection implements Closeable {
final EventBus eventBus private static final int SEARCHES = 10
final Endpoint endpoint private static final long INTERVAL = 1000
final boolean incoming
final HostCache hostCache final EventBus eventBus
final Endpoint endpoint
final boolean incoming
final HostCache hostCache
final TrustService trustService final TrustService trustService
final MuWireSettings settings
private final AtomicBoolean running = new AtomicBoolean()
private final BlockingQueue messages = new LinkedBlockingQueue() private final AtomicBoolean running = new AtomicBoolean()
private final Thread reader, writer private final BlockingQueue messages = new LinkedBlockingQueue()
private final Thread reader, writer
protected final String name private final LinkedList<Long> searchTimestamps = new LinkedList<>()
long lastPingSentTime, lastPongReceivedTime protected final String name
Connection(EventBus eventBus, Endpoint endpoint, boolean incoming, HostCache hostCache, TrustService trustService) { long lastPingSentTime, lastPongReceivedTime
this.eventBus = eventBus
this.incoming = incoming Connection(EventBus eventBus, Endpoint endpoint, boolean incoming,
this.endpoint = endpoint HostCache hostCache, TrustService trustService, MuWireSettings settings) {
this.hostCache = hostCache this.eventBus = eventBus
this.incoming = incoming
this.endpoint = endpoint
this.hostCache = hostCache
this.trustService = trustService this.trustService = trustService
this.settings = settings
this.name = endpoint.destination.toBase32().substring(0,8)
this.name = endpoint.destination.toBase32().substring(0,8)
this.reader = new Thread({readLoop()} as Runnable)
this.reader.setName("reader-$name") this.reader = new Thread({readLoop()} as Runnable)
this.reader.setDaemon(true) this.reader.setName("reader-$name")
this.reader.setDaemon(true)
this.writer = new Thread({writeLoop()} as Runnable)
this.writer.setName("writer-$name") this.writer = new Thread({writeLoop()} as Runnable)
this.writer.setDaemon(true) this.writer.setName("writer-$name")
} this.writer.setDaemon(true)
}
/**
* starts the connection threads /**
*/ * starts the connection threads
void start() { */
if (!running.compareAndSet(false, true)) { void start() {
log.log(Level.WARNING,"$name already running", new Exception()) if (!running.compareAndSet(false, true)) {
return log.log(Level.WARNING,"$name already running", new Exception())
} return
reader.start() }
writer.start() reader.start()
} writer.start()
}
@Override
public void close() { @Override
if (!running.compareAndSet(true, false)) { public void close() {
log.log(Level.WARNING, "$name already closed", new Exception() ) if (!running.compareAndSet(true, false)) {
return log.log(Level.WARNING, "$name already closed", new Exception() )
} return
}
log.info("closing $name") log.info("closing $name")
reader.interrupt()
writer.interrupt()
endpoint.close() endpoint.close()
reader.interrupt() eventBus.publish(new DisconnectionEvent(destination: endpoint.destination))
writer.interrupt() }
eventBus.publish(new DisconnectionEvent(destination: endpoint.destination))
} protected void readLoop() {
try {
protected void readLoop() { while(running.get()) {
try { read()
while(running.get()) { }
read() } catch (SocketTimeoutException e) {
}
} catch (SocketTimeoutException e) {
close()
} catch (Exception e) { } catch (Exception e) {
log.log(Level.WARNING,"unhandled exception in reader",e) log.log(Level.WARNING,"unhandled exception in reader",e)
} finally { } finally {
close() close()
} }
} }
protected abstract void read() protected abstract void read()
protected void writeLoop() { protected void writeLoop() {
try { try {
while(running.get()) { while(running.get()) {
def message = messages.take() def message = messages.take()
write(message) write(message)
} }
} catch (Exception e) { } catch (Exception e) {
log.log(Level.WARNING, "unhandled exception in writer",e) log.log(Level.WARNING, "unhandled exception in writer",e)
} finally { } finally {
close() close()
} }
} }
protected abstract void write(def message); protected abstract void write(def message);
void sendPing() { void sendPing() {
def ping = [:] def ping = [:]
ping.type = "Ping" ping.type = "Ping"
ping.version = 1 ping.version = 1
messages.put(ping) messages.put(ping)
lastPingSentTime = System.currentTimeMillis() lastPingSentTime = System.currentTimeMillis()
} }
void sendQuery(QueryEvent e) { void sendQuery(QueryEvent e) {
def query = [:] def query = [:]
query.type = "Search" query.type = "Search"
query.version = 1 query.version = 1
query.uuid = e.searchEvent.getUuid() query.uuid = e.searchEvent.getUuid()
query.firstHop = e.firstHop query.firstHop = e.firstHop
// TODO: first hop figure out
query.keywords = e.searchEvent.getSearchTerms() query.keywords = e.searchEvent.getSearchTerms()
query.replyTo = e.getReceivedOn().toBase64() query.oobInfohash = e.searchEvent.oobInfohash
if (e.searchEvent.searchHash != null)
query.infohash = Base64.encode(e.searchEvent.searchHash)
query.replyTo = e.replyTo.toBase64()
if (e.originator != null)
query.originator = e.originator.toBase64()
messages.put(query) messages.put(query)
} }
protected void handlePing() { protected void handlePing() {
log.fine("$name received ping") log.fine("$name received ping")
def pong = [:] def pong = [:]
pong.type = "Pong" pong.type = "Pong"
pong.version = 1 pong.version = 1
pong.pongs = hostCache.getGoodHosts(10).collect { d -> d.toBase64() } pong.pongs = hostCache.getGoodHosts(10).collect { d -> d.toBase64() }
messages.put(pong) messages.put(pong)
} }
protected void handlePong(def pong) { protected void handlePong(def pong) {
log.fine("$name received pong") log.fine("$name received pong")
lastPongReceivedTime = System.currentTimeMillis() lastPongReceivedTime = System.currentTimeMillis()
if (pong.pongs == null) if (pong.pongs == null)
throw new Exception("Pong doesn't have pongs") throw new Exception("Pong doesn't have pongs")
pong.pongs.each { pong.pongs.each {
def dest = new Destination(it) def dest = new Destination(it)
eventBus.publish(new HostDiscoveredEvent(destination: dest)) eventBus.publish(new HostDiscoveredEvent(destination: dest))
} }
} }
private boolean throttleSearch() {
final long now = System.currentTimeMillis()
if (searchTimestamps.size() < SEARCHES) {
searchTimestamps.addLast(now)
return false
}
Long oldest = searchTimestamps.getFirst()
if (now - oldest.longValue() < INTERVAL)
return true
searchTimestamps.addLast(now)
searchTimestamps.removeFirst()
false
}
protected void handleSearch(def search) { protected void handleSearch(def search) {
if (throttleSearch()) {
log.info("dropping excessive search")
return
}
UUID uuid = UUID.fromString(search.uuid) UUID uuid = UUID.fromString(search.uuid)
if (search.infohash != null) byte [] infohash = null
if (search.infohash != null) {
search.keywords = null search.keywords = null
infohash = Base64.decode(search.infohash)
}
Destination replyTo = new Destination(search.replyTo) Destination replyTo = new Destination(search.replyTo)
if (trustService.getLevel(replyTo) == TrustLevel.DISTRUSTED) { TrustLevel trustLevel = trustService.getLevel(replyTo)
if (trustLevel == TrustLevel.DISTRUSTED) {
log.info "dropping search from distrusted peer" log.info "dropping search from distrusted peer"
return return
} }
// TODO: add option to respond only to trusted peers if (trustLevel == TrustLevel.NEUTRAL && !settings.allowUntrusted()) {
log.info("dropping search from neutral peer")
return
}
Persona originator = null
if (search.originator != null) {
originator = new Persona(new ByteArrayInputStream(Base64.decode(search.originator)))
if (originator.destination != replyTo) {
log.info("originator doesn't match destination")
return
}
}
boolean oob = false
if (search.oobInfohash != null)
oob = search.oobInfohash
SearchEvent searchEvent = new SearchEvent(searchTerms : search.keywords, SearchEvent searchEvent = new SearchEvent(searchTerms : search.keywords,
searchHash : search.infohash, searchHash : infohash,
uuid : uuid) uuid : uuid,
oobInfohash : oob)
QueryEvent event = new QueryEvent ( searchEvent : searchEvent, QueryEvent event = new QueryEvent ( searchEvent : searchEvent,
replyTo : replyTo, replyTo : replyTo,
originator : originator,
receivedOn : endpoint.destination, receivedOn : endpoint.destination,
firstHop : search.firstHop ) firstHop : search.firstHop )
eventBus.publish(event) eventBus.publish(event)
} }
} }

View File

@@ -14,9 +14,12 @@ import com.muwire.core.hostcache.HostCache
import com.muwire.core.trust.TrustLevel import com.muwire.core.trust.TrustLevel
import com.muwire.core.trust.TrustService import com.muwire.core.trust.TrustService
import com.muwire.core.upload.UploadManager import com.muwire.core.upload.UploadManager
import com.muwire.core.util.DataUtil
import com.muwire.core.search.InvalidSearchResultException import com.muwire.core.search.InvalidSearchResultException
import com.muwire.core.search.ResultsParser import com.muwire.core.search.ResultsParser
import com.muwire.core.search.SearchManager import com.muwire.core.search.SearchManager
import com.muwire.core.search.UIResultBatchEvent
import com.muwire.core.search.UIResultEvent
import com.muwire.core.search.UnexpectedResultsException import com.muwire.core.search.UnexpectedResultsException
import groovy.json.JsonOutput import groovy.json.JsonOutput
@@ -26,156 +29,185 @@ import groovy.util.logging.Log
@Log @Log
class ConnectionAcceptor { class ConnectionAcceptor {
final EventBus eventBus final EventBus eventBus
final UltrapeerConnectionManager manager final UltrapeerConnectionManager manager
final MuWireSettings settings final MuWireSettings settings
final I2PAcceptor acceptor final I2PAcceptor acceptor
final HostCache hostCache final HostCache hostCache
final TrustService trustService final TrustService trustService
final SearchManager searchManager final SearchManager searchManager
final UploadManager uploadManager final UploadManager uploadManager
final ConnectionEstablisher establisher
final ExecutorService acceptorThread
final ExecutorService handshakerThreads final ExecutorService acceptorThread
final ExecutorService handshakerThreads
ConnectionAcceptor(EventBus eventBus, UltrapeerConnectionManager manager,
MuWireSettings settings, I2PAcceptor acceptor, HostCache hostCache, private volatile shutdown
TrustService trustService, SearchManager searchManager, UploadManager uploadManager) {
this.eventBus = eventBus ConnectionAcceptor(EventBus eventBus, UltrapeerConnectionManager manager,
this.manager = manager MuWireSettings settings, I2PAcceptor acceptor, HostCache hostCache,
this.settings = settings TrustService trustService, SearchManager searchManager, UploadManager uploadManager,
this.acceptor = acceptor ConnectionEstablisher establisher) {
this.hostCache = hostCache this.eventBus = eventBus
this.trustService = trustService this.manager = manager
this.settings = settings
this.acceptor = acceptor
this.hostCache = hostCache
this.trustService = trustService
this.searchManager = searchManager this.searchManager = searchManager
this.uploadManager = uploadManager this.uploadManager = uploadManager
this.establisher = establisher
acceptorThread = Executors.newSingleThreadExecutor { r ->
def rv = new Thread(r) acceptorThread = Executors.newSingleThreadExecutor { r ->
rv.setDaemon(true) def rv = new Thread(r)
rv.setName("acceptor") rv.setDaemon(true)
rv rv.setName("acceptor")
} rv
}
handshakerThreads = Executors.newCachedThreadPool { r ->
def rv = new Thread(r) handshakerThreads = Executors.newCachedThreadPool { r ->
rv.setDaemon(true) def rv = new Thread(r)
rv.setName("acceptor-processor-${System.currentTimeMillis()}") rv.setDaemon(true)
rv rv.setName("acceptor-processor-${System.currentTimeMillis()}")
} rv
} }
}
void start() {
acceptorThread.execute({acceptLoop()} as Runnable) void start() {
} acceptorThread.execute({acceptLoop()} as Runnable)
}
void stop() {
acceptorThread.shutdownNow() void stop() {
handshakerThreads.shutdownNow() shutdown = true
} acceptorThread.shutdownNow()
handshakerThreads.shutdownNow()
private void acceptLoop() { }
while(true) {
def incoming = acceptor.accept() private void acceptLoop() {
log.info("accepted connection from ${incoming.destination.toBase32()}") try {
switch(trustService.getLevel(incoming.destination)) { while(true) {
case TrustLevel.TRUSTED : break def incoming = acceptor.accept()
case TrustLevel.NEUTRAL : log.info("accepted connection from ${incoming.destination.toBase32()}")
if (settings.allowUntrusted()) switch(trustService.getLevel(incoming.destination)) {
break case TrustLevel.TRUSTED : break
case TrustLevel.DISTRUSTED : case TrustLevel.NEUTRAL :
log.info("Disallowing distrusted connection") if (settings.allowUntrusted())
incoming.close() break
continue case TrustLevel.DISTRUSTED :
} log.info("Disallowing distrusted connection")
handshakerThreads.execute({processIncoming(incoming)} as Runnable) incoming.close()
} continue
} }
handshakerThreads.execute({processIncoming(incoming)} as Runnable)
private void processIncoming(Endpoint e) { }
InputStream is = e.inputStream } catch (Exception e) {
try { log.log(Level.WARNING, "exception in accept loop",e)
int read = is.read() if (!shutdown)
switch(read) { throw e
case (byte)'M': }
}
private void processIncoming(Endpoint e) {
InputStream is = e.inputStream
try {
int read = is.read()
switch(read) {
case (byte)'M':
if (settings.isLeaf()) if (settings.isLeaf())
throw new IOException("Incoming connection as leaf") throw new IOException("Incoming connection as leaf")
processMuWire(e) processMuWire(e)
break break
case (byte)'G': case (byte)'G':
processGET(e) processGET(e)
break break
case (byte)'H':
processHashList(e)
break
case (byte)'P': case (byte)'P':
processPOST(e) processPOST(e)
break break
default: case (byte)'T':
throw new Exception("Invalid read $read") processTRUST(e)
} break
} catch (Exception ex) { default:
log.log(Level.WARNING, "incoming connection failed",ex) throw new Exception("Invalid read $read")
e.close() }
eventBus.publish new ConnectionEvent(endpoint: e, incoming: true, leaf: null, status: ConnectionAttemptStatus.FAILED) } catch (Exception ex) {
} log.log(Level.WARNING, "incoming connection failed",ex)
} e.close()
eventBus.publish new ConnectionEvent(endpoint: e, incoming: true, leaf: null, status: ConnectionAttemptStatus.FAILED)
private void processMuWire(Endpoint e) { }
byte[] uWire = "uWire ".bytes }
for (int i = 0; i < uWire.length; i++) {
int read = e.inputStream.read() private void processMuWire(Endpoint e) {
if (read != uWire[i]) { byte[] uWire = "uWire ".bytes
throw new IOException("unexpected value $read at position $i") for (int i = 0; i < uWire.length; i++) {
} int read = e.inputStream.read()
} if (read != uWire[i]) {
throw new IOException("unexpected value $read at position $i")
byte[] type = new byte[4] }
DataInputStream dis = new DataInputStream(e.inputStream) }
dis.readFully(type)
byte[] type = new byte[4]
DataInputStream dis = new DataInputStream(e.inputStream)
dis.readFully(type)
if (type == "leaf".bytes) if (type == "leaf".bytes)
handleIncoming(e, true) handleIncoming(e, true)
else if (type == "peer".bytes) else if (type == "peer".bytes)
handleIncoming(e, false) handleIncoming(e, false)
else else
throw new IOException("unknown connection type $type") throw new IOException("unknown connection type $type")
} }
private void handleIncoming(Endpoint e, boolean leaf) { private void handleIncoming(Endpoint e, boolean leaf) {
boolean accept = !manager.isConnected(e.destination) && (leaf ? manager.hasLeafSlots() : manager.hasPeerSlots()) boolean accept = !manager.isConnected(e.destination) &&
if (accept) { !establisher.isInProgress(e.destination) &&
log.info("accepting connection, leaf:$leaf") (leaf ? manager.hasLeafSlots() : manager.hasPeerSlots())
e.outputStream.write("OK".bytes) if (accept) {
e.outputStream.flush() log.info("accepting connection, leaf:$leaf")
def wrapped = new Endpoint(e.destination, new InflaterInputStream(e.inputStream), new DeflaterOutputStream(e.outputStream, true), e.toClose) e.outputStream.write("OK".bytes)
eventBus.publish(new ConnectionEvent(endpoint: wrapped, incoming: true, leaf: leaf, status: ConnectionAttemptStatus.SUCCESSFUL)) e.outputStream.flush()
} else { def wrapped = new Endpoint(e.destination, new InflaterInputStream(e.inputStream), new DeflaterOutputStream(e.outputStream, true), e.toClose)
log.info("rejecting connection, leaf:$leaf") eventBus.publish(new ConnectionEvent(endpoint: wrapped, incoming: true, leaf: leaf, status: ConnectionAttemptStatus.SUCCESSFUL))
e.outputStream.write("REJECT".bytes) } else {
def hosts = hostCache.getGoodHosts(10) log.info("rejecting connection, leaf:$leaf")
if (!hosts.isEmpty()) { e.outputStream.write("REJECT".bytes)
def json = [:] def hosts = hostCache.getGoodHosts(10)
json.tryHosts = hosts.collect { d -> d.toBase64() } if (!hosts.isEmpty()) {
json = JsonOutput.toJson(json) def json = [:]
def os = new DataOutputStream(e.outputStream) json.tryHosts = hosts.collect { d -> d.toBase64() }
os.writeShort(json.bytes.length) json = JsonOutput.toJson(json)
os.write(json.bytes) def os = new DataOutputStream(e.outputStream)
} os.writeShort(json.bytes.length)
e.outputStream.flush() os.write(json.bytes)
e.close() }
eventBus.publish(new ConnectionEvent(endpoint: e, incoming: true, leaf: leaf, status: ConnectionAttemptStatus.REJECTED)) e.outputStream.flush()
} e.close()
} eventBus.publish(new ConnectionEvent(endpoint: e, incoming: true, leaf: leaf, status: ConnectionAttemptStatus.REJECTED))
}
}
private void processGET(Endpoint e) {
private void processGET(Endpoint e) {
byte[] et = new byte[3] byte[] et = new byte[3]
final DataInputStream dis = new DataInputStream(e.getInputStream()) final DataInputStream dis = new DataInputStream(e.getInputStream())
dis.readFully(et) dis.readFully(et)
if (et != "ET ".getBytes(StandardCharsets.US_ASCII)) if (et != "ET ".getBytes(StandardCharsets.US_ASCII))
throw new IOException("Invalid GET connection") throw new IOException("Invalid GET connection")
uploadManager.processEndpoint(e) uploadManager.processGET(e)
} }
private void processHashList(Endpoint e) {
byte[] ashList = new byte[8]
final DataInputStream dis = new DataInputStream(e.getInputStream())
dis.readFully(ashList)
if (ashList != "ASHLIST ".getBytes(StandardCharsets.US_ASCII))
throw new IOException("Invalid HASHLIST connection")
uploadManager.processHashList(e)
}
private void processPOST(final Endpoint e) throws IOException { private void processPOST(final Endpoint e) throws IOException {
byte [] ost = new byte[4] byte [] ost = new byte[4]
final DataInputStream dis = new DataInputStream(e.getInputStream()) final DataInputStream dis = new DataInputStream(e.getInputStream())
@@ -199,18 +231,59 @@ class ConnectionAcceptor {
if (sender.destination != e.getDestination()) if (sender.destination != e.getDestination())
throw new IOException("Sender destination mismatch expected $e.getDestination(), got $sender.destination") throw new IOException("Sender destination mismatch expected $e.getDestination(), got $sender.destination")
int nResults = dis.readUnsignedShort() int nResults = dis.readUnsignedShort()
UIResultEvent[] results = new UIResultEvent[nResults]
for (int i = 0; i < nResults; i++) { for (int i = 0; i < nResults; i++) {
int jsonSize = dis.readUnsignedShort() int jsonSize = dis.readUnsignedShort()
byte [] payload = new byte[jsonSize] byte [] payload = new byte[jsonSize]
dis.readFully(payload) dis.readFully(payload)
def json = slurper.parse(payload) def json = slurper.parse(payload)
eventBus.publish(ResultsParser.parse(sender, resultsUUID, json)) results[i] = ResultsParser.parse(sender, resultsUUID, json)
} }
eventBus.publish(new UIResultBatchEvent(uuid: resultsUUID, results: results))
} catch (IOException | UnexpectedResultsException | InvalidSearchResultException bad) { } catch (IOException | UnexpectedResultsException | InvalidSearchResultException bad) {
log.log(Level.WARNING, "failed to process POST", bad) log.log(Level.WARNING, "failed to process POST", bad)
} finally { } finally {
e.close() e.close()
} }
} }
private void processTRUST(Endpoint e) {
byte[] RUST = new byte[6]
DataInputStream dis = new DataInputStream(e.getInputStream())
dis.readFully(RUST)
if (RUST != "RUST\r\n".getBytes(StandardCharsets.US_ASCII))
throw new IOException("Invalid TRUST connection")
String header
while ((header = DataUtil.readTillRN(dis)) != ""); // ignore headers for now
OutputStream os = e.getOutputStream()
if (!settings.allowTrustLists) {
os.write("403 Not Allowed\r\n\r\n".getBytes(StandardCharsets.US_ASCII))
os.flush()
e.close()
return
}
os.write("200 OK\r\n\r\n".getBytes(StandardCharsets.US_ASCII))
List<Persona> good = new ArrayList<>(trustService.good.values())
int size = Math.min(Short.MAX_VALUE * 2, good.size())
good = good.subList(0, size)
DataOutputStream dos = new DataOutputStream(os)
dos.writeShort(size)
good.each {
it.write(dos)
}
List<Persona> bad = new ArrayList<>(trustService.bad.values())
size = Math.min(Short.MAX_VALUE * 2, bad.size())
bad = bad.subList(0, size)
dos.writeShort(size)
bad.each {
it.write(dos)
}
dos.flush()
e.close()
}
} }

View File

@@ -21,159 +21,165 @@ import net.i2p.util.ConcurrentHashSet
@Log @Log
class ConnectionEstablisher { class ConnectionEstablisher {
private static final int CONCURRENT = 4
final EventBus eventBus private static final int CONCURRENT = 4
final I2PConnector i2pConnector
final MuWireSettings settings
final ConnectionManager connectionManager
final HostCache hostCache
final Timer timer
final ExecutorService executor
final Set inProgress = new ConcurrentHashSet()
ConnectionEstablisher(EventBus eventBus, I2PConnector i2pConnector, MuWireSettings settings,
ConnectionManager connectionManager, HostCache hostCache) {
this.eventBus = eventBus
this.i2pConnector = i2pConnector
this.settings = settings
this.connectionManager = connectionManager
this.hostCache = hostCache
timer = new Timer("connection-timer",true)
executor = Executors.newFixedThreadPool(CONCURRENT, { r ->
def rv = new Thread(r)
rv.setDaemon(true)
rv.setName("connector-${System.currentTimeMillis()}")
rv
} as ThreadFactory)
}
void start() {
timer.schedule({connectIfNeeded()} as TimerTask, 100, 1000)
}
void stop() {
timer.cancel()
executor.shutdownNow()
}
private void connectIfNeeded() {
if (!connectionManager.needsConnections())
return
if (inProgress.size() >= CONCURRENT)
return
def toTry = null final EventBus eventBus
for (int i = 0; i < 5; i++) { final I2PConnector i2pConnector
toTry = hostCache.getHosts(1) final MuWireSettings settings
if (toTry.isEmpty()) final ConnectionManager connectionManager
return final HostCache hostCache
toTry = toTry[0]
if (!connectionManager.isConnected(toTry) &&
!inProgress.contains(toTry)) {
break
}
}
if (toTry == null)
return
if (!connectionManager.isConnected(toTry) && inProgress.add(toTry))
executor.execute({connect(toTry)} as Runnable)
}
private void connect(Destination toTry) {
log.info("starting connect to ${toTry.toBase32()}")
try {
def endpoint = i2pConnector.connect(toTry)
log.info("successful transport connect to ${toTry.toBase32()}")
// outgoing handshake
endpoint.outputStream.write("MuWire ".bytes)
def type = settings.isLeaf() ? "leaf" : "peer"
endpoint.outputStream.write(type.bytes)
endpoint.outputStream.flush()
InputStream is = endpoint.inputStream
int read = is.read()
if (read == -1) {
fail endpoint
return
}
switch(read) {
case (byte)'O': readK(endpoint); break
case (byte)'R': readEJECT(endpoint); break
default :
log.warning("unknown response $read")
fail endpoint
}
} catch (Exception e) {
log.log(Level.WARNING, "Couldn't connect to ${toTry.toBase32()}", e)
def endpoint = new Endpoint(toTry, null, null, null)
fail(endpoint)
} finally {
inProgress.remove(toTry)
}
}
private void fail(Endpoint endpoint) {
endpoint.close()
eventBus.publish(new ConnectionEvent(endpoint: endpoint, incoming: false, leaf: false, status: ConnectionAttemptStatus.FAILED))
}
private void readK(Endpoint e) {
int read = e.inputStream.read()
if (read != 'K') {
log.warning("unknown response after O: $read")
fail e
return
}
log.info("connection to ${e.destination.toBase32()} established")
// wrap into deflater / inflater streams and publish
def wrapped = new Endpoint(e.destination, new InflaterInputStream(e.inputStream), new DeflaterOutputStream(e.outputStream, true), e.toClose)
eventBus.publish(new ConnectionEvent(endpoint: wrapped, incoming: false, leaf: false, status: ConnectionAttemptStatus.SUCCESSFUL))
}
private void readEJECT(Endpoint e) {
byte[] eject = "EJECT".bytes
for (int i = 0; i < eject.length; i++) {
int read = e.inputStream.read()
if (read != eject[i]) {
log.warning("Unknown response after R at position $i")
fail e
return
}
}
log.info("connection to ${e.destination.toBase32()} rejected")
eventBus.publish(new ConnectionEvent(endpoint: e, incoming: false, leaf: false, status: ConnectionAttemptStatus.REJECTED))
try {
DataInputStream dais = new DataInputStream(e.inputStream)
int payloadSize = dais.readUnsignedShort()
byte[] payload = new byte[payloadSize]
dais.readFully(payload)
def json = new JsonSlurper() final Timer timer
json = json.parse(payload) final ExecutorService executor
if (json.tryHosts == null) { final Set inProgress = new ConcurrentHashSet()
log.warning("post-rejection json didn't contain hosts to try")
return
}
json.tryHosts.asList().each { ConnectionEstablisher(){}
Destination suggested = new Destination(it)
eventBus.publish(new HostDiscoveredEvent(destination: suggested)) ConnectionEstablisher(EventBus eventBus, I2PConnector i2pConnector, MuWireSettings settings,
} ConnectionManager connectionManager, HostCache hostCache) {
} catch (Exception ignore) { this.eventBus = eventBus
log.log(Level.WARNING,"Problem parsing post-rejection payload",ignore) this.i2pConnector = i2pConnector
} finally { this.settings = settings
// the end this.connectionManager = connectionManager
e.close() this.hostCache = hostCache
} timer = new Timer("connection-timer",true)
} executor = Executors.newFixedThreadPool(CONCURRENT, { r ->
def rv = new Thread(r)
rv.setDaemon(true)
rv.setName("connector-${System.currentTimeMillis()}")
rv
} as ThreadFactory)
}
void start() {
timer.schedule({connectIfNeeded()} as TimerTask, 100, 1000)
}
void stop() {
timer.cancel()
executor.shutdownNow()
}
private void connectIfNeeded() {
if (!connectionManager.needsConnections())
return
if (inProgress.size() >= CONCURRENT)
return
def toTry = null
for (int i = 0; i < 5; i++) {
toTry = hostCache.getHosts(1)
if (toTry.isEmpty())
return
toTry = toTry[0]
if (!connectionManager.isConnected(toTry) &&
!inProgress.contains(toTry)) {
break
}
}
if (toTry == null)
return
if (!connectionManager.isConnected(toTry) && inProgress.add(toTry))
executor.execute({connect(toTry)} as Runnable)
}
private void connect(Destination toTry) {
log.info("starting connect to ${toTry.toBase32()}")
try {
def endpoint = i2pConnector.connect(toTry)
log.info("successful transport connect to ${toTry.toBase32()}")
// outgoing handshake
endpoint.outputStream.write("MuWire ".bytes)
def type = settings.isLeaf() ? "leaf" : "peer"
endpoint.outputStream.write(type.bytes)
endpoint.outputStream.flush()
InputStream is = endpoint.inputStream
int read = is.read()
if (read == -1) {
fail endpoint
return
}
switch(read) {
case (byte)'O': readK(endpoint); break
case (byte)'R': readEJECT(endpoint); break
default :
log.warning("unknown response $read")
fail endpoint
}
} catch (Exception e) {
log.log(Level.WARNING, "Couldn't connect to ${toTry.toBase32()}", e)
def endpoint = new Endpoint(toTry, null, null, null)
fail(endpoint)
} finally {
inProgress.remove(toTry)
}
}
private void fail(Endpoint endpoint) {
endpoint.close()
eventBus.publish(new ConnectionEvent(endpoint: endpoint, incoming: false, leaf: false, status: ConnectionAttemptStatus.FAILED))
}
private void readK(Endpoint e) {
int read = e.inputStream.read()
if (read != 'K') {
log.warning("unknown response after O: $read")
fail e
return
}
log.info("connection to ${e.destination.toBase32()} established")
// wrap into deflater / inflater streams and publish
def wrapped = new Endpoint(e.destination, new InflaterInputStream(e.inputStream), new DeflaterOutputStream(e.outputStream, true), e.toClose)
eventBus.publish(new ConnectionEvent(endpoint: wrapped, incoming: false, leaf: false, status: ConnectionAttemptStatus.SUCCESSFUL))
}
private void readEJECT(Endpoint e) {
byte[] eject = "EJECT".bytes
for (int i = 0; i < eject.length; i++) {
int read = e.inputStream.read()
if (read != eject[i]) {
log.warning("Unknown response after R at position $i")
fail e
return
}
}
log.info("connection to ${e.destination.toBase32()} rejected")
eventBus.publish(new ConnectionEvent(endpoint: e, incoming: false, leaf: false, status: ConnectionAttemptStatus.REJECTED))
try {
DataInputStream dais = new DataInputStream(e.inputStream)
int payloadSize = dais.readUnsignedShort()
byte[] payload = new byte[payloadSize]
dais.readFully(payload)
def json = new JsonSlurper()
json = json.parse(payload)
if (json.tryHosts == null) {
log.warning("post-rejection json didn't contain hosts to try")
return
}
json.tryHosts.asList().each {
Destination suggested = new Destination(it)
eventBus.publish(new HostDiscoveredEvent(destination: suggested))
}
} catch (Exception ignore) {
log.log(Level.WARNING,"Problem parsing post-rejection payload",ignore)
} finally {
// the end
e.close()
}
}
public boolean isInProgress(Destination d) {
inProgress.contains(d)
}
} }

View File

@@ -6,14 +6,14 @@ import net.i2p.data.Destination
class ConnectionEvent extends Event { class ConnectionEvent extends Event {
Endpoint endpoint Endpoint endpoint
boolean incoming boolean incoming
Boolean leaf // can be null if uknown Boolean leaf // can be null if uknown
ConnectionAttemptStatus status ConnectionAttemptStatus status
@Override @Override
public String toString() { public String toString() {
"ConnectionEvent ${super.toString()} endpoint: $endpoint incoming: $incoming leaf : $leaf status : $status" "ConnectionEvent ${super.toString()} endpoint: $endpoint incoming: $incoming leaf : $leaf status : $status"
} }
} }

View File

@@ -1,6 +1,7 @@
package com.muwire.core.connection package com.muwire.core.connection
import com.muwire.core.EventBus import com.muwire.core.EventBus
import com.muwire.core.MuWireSettings
import com.muwire.core.Persona import com.muwire.core.Persona
import com.muwire.core.hostcache.HostCache import com.muwire.core.hostcache.HostCache
import com.muwire.core.search.QueryEvent import com.muwire.core.search.QueryEvent
@@ -10,60 +11,64 @@ import com.muwire.core.trust.TrustLevel
import net.i2p.data.Destination import net.i2p.data.Destination
abstract class ConnectionManager { abstract class ConnectionManager {
private static final int PING_TIME = 20000
final EventBus eventBus private static final int PING_TIME = 20000
private final Timer timer final EventBus eventBus
protected final HostCache hostCache private final Timer timer
protected final HostCache hostCache
protected final Persona me protected final Persona me
protected final MuWireSettings settings
ConnectionManager() {}
ConnectionManager() {}
ConnectionManager(EventBus eventBus, Persona me, HostCache hostCache) {
this.eventBus = eventBus ConnectionManager(EventBus eventBus, Persona me, HostCache hostCache, MuWireSettings settings) {
this.eventBus = eventBus
this.me = me this.me = me
this.hostCache = hostCache this.hostCache = hostCache
this.timer = new Timer("connections-pinger",true) this.settings = settings
} this.timer = new Timer("connections-pinger",true)
}
void start() {
timer.schedule({sendPings()} as TimerTask, 1000,1000) void start() {
} timer.schedule({sendPings()} as TimerTask, 1000,1000)
}
void stop() {
timer.cancel() void stop() {
getConnections().each { it.close() } timer.cancel()
} getConnections().each { it.close() }
}
void onTrustEvent(TrustEvent e) {
if (e.level == TrustLevel.DISTRUSTED) void onTrustEvent(TrustEvent e) {
drop(e.destination) if (e.level == TrustLevel.DISTRUSTED)
} drop(e.persona.destination)
}
abstract void drop(Destination d)
abstract void drop(Destination d)
abstract Collection<Connection> getConnections()
abstract Collection<Connection> getConnections()
protected abstract int getDesiredConnections()
protected abstract int getDesiredConnections()
boolean needsConnections() {
return getConnections().size() < getDesiredConnections() boolean needsConnections() {
} return getConnections().size() < getDesiredConnections()
}
abstract boolean isConnected(Destination d)
abstract boolean isConnected(Destination d)
abstract void onConnectionEvent(ConnectionEvent e)
abstract void onConnectionEvent(ConnectionEvent e)
abstract void onDisconnectionEvent(DisconnectionEvent e)
abstract void onDisconnectionEvent(DisconnectionEvent e)
protected void sendPings() {
final long now = System.currentTimeMillis() abstract void shutdown()
getConnections().each {
if (now - it.lastPingSentTime > PING_TIME) protected void sendPings() {
it.sendPing() final long now = System.currentTimeMillis()
} getConnections().each {
} if (now - it.lastPingSentTime > PING_TIME)
it.sendPing()
}
}
} }

View File

@@ -5,11 +5,11 @@ import com.muwire.core.Event
import net.i2p.data.Destination import net.i2p.data.Destination
class DisconnectionEvent extends Event { class DisconnectionEvent extends Event {
Destination destination
@Override Destination destination
public String toString() {
"DisconnectionEvent ${super.toString()} destination:${destination.toBase32()}" @Override
} public String toString() {
"DisconnectionEvent ${super.toString()} destination:${destination.toBase32()}"
}
} }

View File

@@ -1,45 +1,46 @@
package com.muwire.core.connection package com.muwire.core.connection
import java.util.concurrent.atomic.AtomicBoolean import java.util.concurrent.atomic.AtomicBoolean
import java.util.logging.Level
import groovy.util.logging.Log import groovy.util.logging.Log
import net.i2p.data.Destination import net.i2p.data.Destination
@Log @Log
class Endpoint implements Closeable { class Endpoint implements Closeable {
final Destination destination final Destination destination
final InputStream inputStream final InputStream inputStream
final OutputStream outputStream final OutputStream outputStream
final def toClose final def toClose
private final AtomicBoolean closed = new AtomicBoolean() private final AtomicBoolean closed = new AtomicBoolean()
Endpoint(Destination destination, InputStream inputStream, OutputStream outputStream, def toClose) { Endpoint(Destination destination, InputStream inputStream, OutputStream outputStream, def toClose) {
this.destination = destination this.destination = destination
this.inputStream = inputStream this.inputStream = inputStream
this.outputStream = outputStream this.outputStream = outputStream
this.toClose = toClose this.toClose = toClose
} }
@Override @Override
public void close() { public void close() {
if (!closed.compareAndSet(false, true)) { if (!closed.compareAndSet(false, true)) {
log.warning("Close loop detected for ${destination.toBase32()}", new Exception()) log.log(Level.WARNING,"Close loop detected for ${destination.toBase32()}", new Exception())
return return
}
if (inputStream != null) {
try {inputStream.close()} catch (Exception ignore) {}
}
if (outputStream != null) {
try {outputStream.close()} catch (Exception ignore) {}
}
if (toClose != null) {
try {toClose.close()} catch (Exception ignore) {}
} }
} if (inputStream != null) {
try {inputStream.close()} catch (Exception ignore) {}
@Override }
public String toString() { if (outputStream != null) {
"destination: ${destination.toBase32()}" try {outputStream.close()} catch (Exception ignore) {}
} }
} if (toClose != null) {
try {toClose.reset()} catch (Exception ignore) {}
}
}
@Override
public String toString() {
"destination: ${destination.toBase32()}"
}
}

View File

@@ -5,18 +5,18 @@ import net.i2p.client.streaming.I2PSocketManager
class I2PAcceptor { class I2PAcceptor {
final I2PSocketManager socketManager final I2PSocketManager socketManager
final I2PServerSocket serverSocket final I2PServerSocket serverSocket
I2PAcceptor() {} I2PAcceptor() {}
I2PAcceptor(I2PSocketManager socketManager) { I2PAcceptor(I2PSocketManager socketManager) {
this.socketManager = socketManager this.socketManager = socketManager
this.serverSocket = socketManager.getServerSocket() this.serverSocket = socketManager.getServerSocket()
} }
Endpoint accept() { Endpoint accept() {
def socket = serverSocket.accept() def socket = serverSocket.accept()
new Endpoint(socket.getPeerDestination(), socket.getInputStream(), socket.getOutputStream(), socket) new Endpoint(socket.getPeerDestination(), socket.getInputStream(), socket.getOutputStream(), socket)
} }
} }

View File

@@ -4,18 +4,18 @@ import net.i2p.client.streaming.I2PSocketManager
import net.i2p.data.Destination import net.i2p.data.Destination
class I2PConnector { class I2PConnector {
final I2PSocketManager socketManager final I2PSocketManager socketManager
I2PConnector() {} I2PConnector() {}
I2PConnector(I2PSocketManager socketManager) { I2PConnector(I2PSocketManager socketManager) {
this.socketManager = socketManager this.socketManager = socketManager
} }
Endpoint connect(Destination dest) { Endpoint connect(Destination dest) {
def socket = socketManager.connect(dest) def socket = socketManager.connect(dest)
new Endpoint(dest, socket.getInputStream(), socket.getOutputStream(), socket) new Endpoint(dest, socket.getInputStream(), socket.getOutputStream(), socket)
} }
} }

View File

@@ -4,32 +4,34 @@ import java.io.InputStream
import java.io.OutputStream import java.io.OutputStream
import com.muwire.core.EventBus import com.muwire.core.EventBus
import com.muwire.core.MuWireSettings
import com.muwire.core.hostcache.HostCache import com.muwire.core.hostcache.HostCache
import com.muwire.core.trust.TrustService import com.muwire.core.trust.TrustService
import net.i2p.data.Destination import net.i2p.data.Destination
/** /**
* Connection where the other side is a leaf. * Connection where the other side is a leaf.
* Such connections can only be incoming. * Such connections can only be incoming.
* @author zab * @author zab
*/ */
class LeafConnection extends Connection { class LeafConnection extends Connection {
public LeafConnection(EventBus eventBus, Endpoint endpoint, HostCache hostCache, TrustService trustService) { public LeafConnection(EventBus eventBus, Endpoint endpoint, HostCache hostCache,
super(eventBus, endpoint, true, hostCache, trustService); TrustService trustService, MuWireSettings settings) {
} super(eventBus, endpoint, true, hostCache, trustService, settings);
}
@Override @Override
protected void read() { protected void read() {
// TODO Auto-generated method stub // TODO Auto-generated method stub
}
@Override }
protected void write(Object message) {
// TODO Auto-generated method stub @Override
protected void write(Object message) {
} // TODO Auto-generated method stub
}
} }

View File

@@ -3,6 +3,7 @@ package com.muwire.core.connection
import java.util.concurrent.ConcurrentHashMap import java.util.concurrent.ConcurrentHashMap
import com.muwire.core.EventBus import com.muwire.core.EventBus
import com.muwire.core.MuWireSettings
import com.muwire.core.Persona import com.muwire.core.Persona
import com.muwire.core.hostcache.HostCache import com.muwire.core.hostcache.HostCache
import com.muwire.core.search.QueryEvent import com.muwire.core.search.QueryEvent
@@ -12,63 +13,68 @@ import net.i2p.data.Destination
@Log @Log
class LeafConnectionManager extends ConnectionManager { class LeafConnectionManager extends ConnectionManager {
final int maxConnections final int maxConnections
final Map<Destination, UltrapeerConnection> connections = new ConcurrentHashMap() final Map<Destination, UltrapeerConnection> connections = new ConcurrentHashMap()
public LeafConnectionManager(EventBus eventBus, Persona me, int maxConnections, HostCache hostCache) { public LeafConnectionManager(EventBus eventBus, Persona me, int maxConnections,
super(eventBus, me, hostCache) HostCache hostCache, MuWireSettings settings) {
this.maxConnections = maxConnections super(eventBus, me, hostCache, settings)
} this.maxConnections = maxConnections
}
@Override
public void drop(Destination d) { @Override
// TODO Auto-generated method stub public void drop(Destination d) {
// TODO Auto-generated method stub
}
}
void onQueryEvent(QueryEvent e) { void onQueryEvent(QueryEvent e) {
if (me.destination == e.receivedOn) { if (me.destination == e.receivedOn) {
connections.values().each { it.sendQuery(e) } connections.values().each { it.sendQuery(e) }
} }
} }
@Override @Override
public Collection<Connection> getConnections() { public Collection<Connection> getConnections() {
connections.values() connections.values()
} }
@Override @Override
protected int getDesiredConnections() { protected int getDesiredConnections() {
return maxConnections; return maxConnections;
} }
@Override @Override
public boolean isConnected(Destination d) { public boolean isConnected(Destination d) {
connections.containsKey(d) connections.containsKey(d)
} }
@Override @Override
public void onConnectionEvent(ConnectionEvent e) { public void onConnectionEvent(ConnectionEvent e) {
if (e.incoming || e.leaf) { if (e.incoming || e.leaf) {
log.severe("Got inconsistent event as a leaf! $e") log.severe("Got inconsistent event as a leaf! $e")
return return
} }
if (e.status != ConnectionAttemptStatus.SUCCESSFUL) if (e.status != ConnectionAttemptStatus.SUCCESSFUL)
return return
Connection c = new UltrapeerConnection(eventBus, e.endpoint) Connection c = new UltrapeerConnection(eventBus, e.endpoint)
connections.put(e.endpoint.destination, c) connections.put(e.endpoint.destination, c)
c.start() c.start()
} }
@Override @Override
public void onDisconnectionEvent(DisconnectionEvent e) { public void onDisconnectionEvent(DisconnectionEvent e) {
def removed = connections.remove(e.destination) def removed = connections.remove(e.destination)
if (removed == null) if (removed == null)
log.severe("removed destination not present in connection manager ${e.destination.toBase32()}") log.severe("removed destination not present in connection manager ${e.destination.toBase32()}")
} }
@Override
void shutdown() {
}
} }

View File

@@ -4,6 +4,7 @@ import java.io.InputStream
import java.io.OutputStream import java.io.OutputStream
import com.muwire.core.EventBus import com.muwire.core.EventBus
import com.muwire.core.MuWireSettings
import com.muwire.core.hostcache.HostCache import com.muwire.core.hostcache.HostCache
import com.muwire.core.trust.TrustService import com.muwire.core.trust.TrustService
import com.muwire.core.util.DataUtil import com.muwire.core.util.DataUtil
@@ -19,62 +20,63 @@ import net.i2p.data.Destination
*/ */
@Log @Log
class PeerConnection extends Connection { class PeerConnection extends Connection {
private final DataInputStream dis
private final DataOutputStream dos
private final byte[] readHeader = new byte[3]
private final byte[] writeHeader = new byte[3]
private final JsonSlurper slurper = new JsonSlurper()
public PeerConnection(EventBus eventBus, Endpoint endpoint, private final DataInputStream dis
boolean incoming, HostCache hostCache, TrustService trustService) { private final DataOutputStream dos
super(eventBus, endpoint, incoming, hostCache, trustService)
this.dis = new DataInputStream(endpoint.inputStream)
this.dos = new DataOutputStream(endpoint.outputStream)
}
@Override private final byte[] readHeader = new byte[3]
protected void read() { private final byte[] writeHeader = new byte[3]
dis.readFully(readHeader)
int length = DataUtil.readLength(readHeader) private final JsonSlurper slurper = new JsonSlurper()
log.fine("$name read length $length")
public PeerConnection(EventBus eventBus, Endpoint endpoint,
byte[] payload = new byte[length] boolean incoming, HostCache hostCache, TrustService trustService,
dis.readFully(payload) MuWireSettings settings) {
super(eventBus, endpoint, incoming, hostCache, trustService, settings)
if ((readHeader[0] & (byte)0x80) == 0x80) { this.dis = new DataInputStream(endpoint.inputStream)
// TODO process binary this.dos = new DataOutputStream(endpoint.outputStream)
} else { }
def json = slurper.parse(payload)
if (json.type == null) @Override
throw new Exception("missing json type") protected void read() {
switch(json.type) { dis.readFully(readHeader)
case "Ping" : handlePing(); break; int length = DataUtil.readLength(readHeader)
case "Pong" : handlePong(json); break; log.fine("$name read length $length")
byte[] payload = new byte[length]
dis.readFully(payload)
if ((readHeader[0] & (byte)0x80) == 0x80) {
// TODO process binary
} else {
def json = slurper.parse(payload)
if (json.type == null)
throw new Exception("missing json type")
switch(json.type) {
case "Ping" : handlePing(); break;
case "Pong" : handlePong(json); break;
case "Search": handleSearch(json); break case "Search": handleSearch(json); break
default : default :
throw new Exception("unknown json type ${json.type}") throw new Exception("unknown json type ${json.type}")
} }
} }
} }
@Override @Override
protected void write(Object message) { protected void write(Object message) {
byte[] payload byte[] payload
if (message instanceof Map) { if (message instanceof Map) {
payload = JsonOutput.toJson(message).bytes payload = JsonOutput.toJson(message).bytes
DataUtil.packHeader(payload.length, writeHeader) DataUtil.packHeader(payload.length, writeHeader)
log.fine "$name writing message type ${message.type} length $payload.length" log.fine "$name writing message type ${message.type} length $payload.length"
writeHeader[0] &= (byte)0x7F writeHeader[0] &= (byte)0x7F
} else { } else {
// TODO: write binary // TODO: write binary
} }
dos.write(writeHeader) dos.write(writeHeader)
dos.write(payload) dos.write(payload)
dos.flush() dos.flush()
} }
} }

View File

@@ -17,30 +17,30 @@ import net.i2p.data.Destination
*/ */
class UltrapeerConnection extends Connection { class UltrapeerConnection extends Connection {
public UltrapeerConnection(EventBus eventBus, Endpoint endpoint, HostCache hostCache, TrustService trustService) { public UltrapeerConnection(EventBus eventBus, Endpoint endpoint, HostCache hostCache, TrustService trustService) {
super(eventBus, endpoint, false, hostCache, trustService) super(eventBus, endpoint, false, hostCache, trustService)
} }
@Override @Override
protected void read() { protected void read() {
// TODO Auto-generated method stub // TODO Auto-generated method stub
}
@Override }
protected void write(Object message) {
if (message instanceof Map) {
writeJsonMessage(message)
} else {
writeBinaryMessage(message)
}
}
private void writeJsonMessage(def message) { @Override
protected void write(Object message) {
} if (message instanceof Map) {
writeJsonMessage(message)
private void writeBinaryMessage(def message) { } else {
writeBinaryMessage(message)
} }
}
private void writeJsonMessage(def message) {
}
private void writeBinaryMessage(def message) {
}
} }

View File

@@ -4,6 +4,7 @@ import java.util.Collection
import java.util.concurrent.ConcurrentHashMap import java.util.concurrent.ConcurrentHashMap
import com.muwire.core.EventBus import com.muwire.core.EventBus
import com.muwire.core.MuWireSettings
import com.muwire.core.Persona import com.muwire.core.Persona
import com.muwire.core.hostcache.HostCache import com.muwire.core.hostcache.HostCache
import com.muwire.core.search.QueryEvent import com.muwire.core.search.QueryEvent
@@ -14,28 +15,28 @@ import net.i2p.data.Destination
@Log @Log
class UltrapeerConnectionManager extends ConnectionManager { class UltrapeerConnectionManager extends ConnectionManager {
final int maxPeers, maxLeafs
final TrustService trustService
final Map<Destination, PeerConnection> peerConnections = new ConcurrentHashMap()
final Map<Destination, LeafConnection> leafConnections = new ConcurrentHashMap()
UltrapeerConnectionManager() {}
public UltrapeerConnectionManager(EventBus eventBus, Persona me, int maxPeers, int maxLeafs, final int maxPeers, maxLeafs
HostCache hostCache, TrustService trustService) { final TrustService trustService
super(eventBus, me, hostCache)
this.maxPeers = maxPeers final Map<Destination, PeerConnection> peerConnections = new ConcurrentHashMap()
this.maxLeafs = maxLeafs final Map<Destination, LeafConnection> leafConnections = new ConcurrentHashMap()
UltrapeerConnectionManager() {}
public UltrapeerConnectionManager(EventBus eventBus, Persona me, int maxPeers, int maxLeafs,
HostCache hostCache, TrustService trustService, MuWireSettings settings) {
super(eventBus, me, hostCache, settings)
this.maxPeers = maxPeers
this.maxLeafs = maxLeafs
this.trustService = trustService this.trustService = trustService
} }
@Override @Override
public void drop(Destination d) { public void drop(Destination d) {
peerConnections.get(d)?.close() peerConnections.get(d)?.close()
leafConnections.get(d)?.close() leafConnections.get(d)?.close()
} }
void onQueryEvent(QueryEvent e) { void onQueryEvent(QueryEvent e) {
forwardQueryToLeafs(e) forwardQueryToLeafs(e)
if (!e.firstHop) if (!e.firstHop)
@@ -49,59 +50,67 @@ class UltrapeerConnectionManager extends ConnectionManager {
} }
} }
@Override @Override
public Collection<Connection> getConnections() { public Collection<Connection> getConnections() {
def rv = new ArrayList(peerConnections.size() + leafConnections.size()) def rv = new ArrayList(peerConnections.size() + leafConnections.size())
rv.addAll(peerConnections.values()) rv.addAll(peerConnections.values())
rv.addAll(leafConnections.values()) rv.addAll(leafConnections.values())
rv rv
} }
boolean hasLeafSlots() {
leafConnections.size() < maxLeafs
}
boolean hasPeerSlots() {
peerConnections.size() < maxPeers
}
@Override
protected int getDesiredConnections() {
return maxPeers / 2;
}
@Override
public boolean isConnected(Destination d) {
peerConnections.containsKey(d) || leafConnections.containsKey(d)
}
@Override boolean hasLeafSlots() {
public void onConnectionEvent(ConnectionEvent e) { leafConnections.size() < maxLeafs
if (!e.incoming && e.leaf) { }
log.severe("Inconsistent event $e")
return boolean hasPeerSlots() {
} peerConnections.size() < maxPeers
}
if (e.status != ConnectionAttemptStatus.SUCCESSFUL)
return @Override
protected int getDesiredConnections() {
Connection c = e.leaf ? return maxPeers / 2;
new LeafConnection(eventBus, e.endpoint, hostCache, trustService) : }
new PeerConnection(eventBus, e.endpoint, e.incoming, hostCache, trustService) @Override
def map = e.leaf ? leafConnections : peerConnections public boolean isConnected(Destination d) {
map.put(e.endpoint.destination, c) peerConnections.containsKey(d) || leafConnections.containsKey(d)
c.start() }
}
@Override
@Override public void onConnectionEvent(ConnectionEvent e) {
public void onDisconnectionEvent(DisconnectionEvent e) { if (!e.incoming && e.leaf) {
def removed = peerConnections.remove(e.destination) log.severe("Inconsistent event $e")
if (removed == null) return
removed = leafConnections.remove(e.destination) }
if (removed == null)
log.severe("Removed connection not present in either leaf or peer map ${e.destination.toBase32()}") if (e.status != ConnectionAttemptStatus.SUCCESSFUL)
} return
void forwardQueryToLeafs(QueryEvent e) { Connection c = e.leaf ?
new LeafConnection(eventBus, e.endpoint, hostCache, trustService, settings) :
} new PeerConnection(eventBus, e.endpoint, e.incoming, hostCache, trustService, settings)
def map = e.leaf ? leafConnections : peerConnections
map.put(e.endpoint.destination, c)
c.start()
}
@Override
public void onDisconnectionEvent(DisconnectionEvent e) {
def removed = peerConnections.remove(e.destination)
if (removed == null)
removed = leafConnections.remove(e.destination)
if (removed == null)
log.severe("Removed connection not present in either leaf or peer map ${e.destination.toBase32()}")
}
@Override
void shutdown() {
peerConnections.values().stream().parallel().forEach({v -> v.close()})
leafConnections.values().stream().parallel().forEach({v -> v.close()})
peerConnections.clear()
leafConnections.clear()
}
void forwardQueryToLeafs(QueryEvent e) {
}
} }

View File

@@ -0,0 +1,25 @@
package com.muwire.core.download
class BadHashException extends Exception {
public BadHashException() {
super();
}
public BadHashException(String message, Throwable cause, boolean enableSuppression, boolean writableStackTrace) {
super(message, cause, enableSuppression, writableStackTrace);
}
public BadHashException(String message, Throwable cause) {
super(message, cause);
}
public BadHashException(String message) {
super(message);
}
public BadHashException(Throwable cause) {
super(cause);
}
}

View File

@@ -1,20 +1,57 @@
package com.muwire.core.download package com.muwire.core.download
import com.muwire.core.connection.I2PConnector import com.muwire.core.connection.I2PConnector
import com.muwire.core.EventBus import com.muwire.core.files.FileDownloadedEvent
import com.muwire.core.files.FileHasher
import com.muwire.core.mesh.Mesh
import com.muwire.core.mesh.MeshManager
import com.muwire.core.trust.TrustLevel
import com.muwire.core.trust.TrustService
import com.muwire.core.util.DataUtil
import groovy.json.JsonBuilder
import groovy.json.JsonOutput
import groovy.json.JsonSlurper
import net.i2p.data.Base64
import net.i2p.data.Destination
import net.i2p.util.ConcurrentHashSet
import com.muwire.core.EventBus
import com.muwire.core.InfoHash
import com.muwire.core.MuWireSettings
import com.muwire.core.Persona
import com.muwire.core.UILoadedEvent
import java.util.concurrent.ConcurrentHashMap
import java.util.concurrent.Executor import java.util.concurrent.Executor
import java.util.concurrent.Executors import java.util.concurrent.Executors
public class DownloadManager { public class DownloadManager {
private final EventBus eventBus private final EventBus eventBus
private final TrustService trustService
private final MeshManager meshManager
private final MuWireSettings muSettings
private final I2PConnector connector private final I2PConnector connector
private final Executor executor private final Executor executor
private final File incompletes, home
public DownloadManager(EventBus eventBus, I2PConnector connector) { private final Persona me
private final Map<InfoHash, Downloader> downloaders = new ConcurrentHashMap<>()
public DownloadManager(EventBus eventBus, TrustService trustService, MeshManager meshManager, MuWireSettings muSettings,
I2PConnector connector, File home, Persona me) {
this.eventBus = eventBus this.eventBus = eventBus
this.trustService = trustService
this.meshManager = meshManager
this.muSettings = muSettings
this.connector = connector this.connector = connector
this.incompletes = new File(home,"incompletes")
this.home = home
this.me = me
incompletes.mkdir()
this.executor = Executors.newCachedThreadPool({ r -> this.executor = Executors.newCachedThreadPool({ r ->
Thread rv = new Thread(r) Thread rv = new Thread(r)
rv.setName("download-worker") rv.setName("download-worker")
@@ -22,12 +59,143 @@ public class DownloadManager {
rv rv
}) })
} }
public void onUIDownloadEvent(UIDownloadEvent e) { public void onUIDownloadEvent(UIDownloadEvent e) {
def downloader = new Downloader(e.target, e.result.size,
e.result.infohash, e.result.pieceSize, connector, e.result.sender.destination) def size = e.result[0].size
def infohash = e.result[0].infohash
def pieceSize = e.result[0].pieceSize
Set<Destination> destinations = new HashSet<>()
e.result.each {
destinations.add(it.sender.destination)
}
destinations.addAll(e.sources)
destinations.remove(me.destination)
Pieces pieces = getPieces(infohash, size, pieceSize)
def downloader = new Downloader(eventBus, this, me, e.target, size,
infohash, pieceSize, connector, destinations,
incompletes, pieces)
downloaders.put(infohash, downloader)
persistDownloaders()
executor.execute({downloader.download()} as Runnable) executor.execute({downloader.download()} as Runnable)
eventBus.publish(new DownloadStartedEvent(downloader : downloader)) eventBus.publish(new DownloadStartedEvent(downloader : downloader))
} }
public void onUIDownloadCancelledEvent(UIDownloadCancelledEvent e) {
downloaders.remove(e.downloader.infoHash)
persistDownloaders()
}
public void onUIDownloadPausedEvent(UIDownloadPausedEvent e) {
persistDownloaders()
}
public void onUIDownloadResumedEvent(UIDownloadResumedEvent e) {
persistDownloaders()
}
void resume(Downloader downloader) {
executor.execute({downloader.download() as Runnable})
}
void onUILoadedEvent(UILoadedEvent e) {
File downloadsFile = new File(home, "downloads.json")
if (!downloadsFile.exists())
return
def slurper = new JsonSlurper()
downloadsFile.eachLine {
def json = slurper.parseText(it)
File file = new File(DataUtil.readi18nString(Base64.decode(json.file)))
def destinations = new HashSet<>()
json.destinations.each { destination ->
destinations.add new Destination(destination)
}
InfoHash infoHash
if (json.hashList != null) {
byte[] hashList = Base64.decode(json.hashList)
infoHash = InfoHash.fromHashList(hashList)
} else {
byte [] root = Base64.decode(json.hashRoot)
infoHash = new InfoHash(root)
}
Pieces pieces = getPieces(infoHash, (long)json.length, json.pieceSizePow2)
def downloader = new Downloader(eventBus, this, me, file, (long)json.length,
infoHash, json.pieceSizePow2, connector, destinations, incompletes, pieces)
if (json.paused != null)
downloader.paused = json.paused
downloaders.put(infoHash, downloader)
downloader.readPieces()
if (!downloader.paused)
downloader.download()
eventBus.publish(new DownloadStartedEvent(downloader : downloader))
}
}
private Pieces getPieces(InfoHash infoHash, long length, int pieceSizePow2) {
int pieceSize = 0x1 << pieceSizePow2
int nPieces = (int)(length / pieceSize)
if (length % pieceSize != 0)
nPieces++
Mesh mesh = meshManager.getOrCreate(infoHash, nPieces)
mesh.pieces
}
void onSourceDiscoveredEvent(SourceDiscoveredEvent e) {
Downloader downloader = downloaders.get(e.infoHash)
if (downloader == null)
return
boolean ok = false
switch(trustService.getLevel(e.source.destination)) {
case TrustLevel.TRUSTED: ok = true; break
case TrustLevel.NEUTRAL: ok = muSettings.allowUntrusted; break
case TrustLevel.DISTRUSTED: ok = false; break
}
if (ok)
downloader.addSource(e.source.destination)
}
void onFileDownloadedEvent(FileDownloadedEvent e) {
downloaders.remove(e.downloader.infoHash)
persistDownloaders()
}
private void persistDownloaders() {
File downloadsFile = new File(home,"downloads.json")
downloadsFile.withPrintWriter { writer ->
downloaders.values().each { downloader ->
if (!downloader.cancelled) {
def json = [:]
json.file = Base64.encode(DataUtil.encodei18nString(downloader.file.getAbsolutePath()))
json.length = downloader.length
json.pieceSizePow2 = downloader.pieceSizePow2
def destinations = []
downloader.destinations.each {
destinations << it.toBase64()
}
json.destinations = destinations
InfoHash infoHash = downloader.getInfoHash()
if (infoHash.hashList != null)
json.hashList = Base64.encode(infoHash.hashList)
else
json.hashRoot = Base64.encode(infoHash.getRoot())
json.paused = downloader.paused
writer.println(JsonOutput.toJson(json))
}
}
}
}
public void shutdown() {
downloaders.values().each { it.stop() }
Downloader.executorService.shutdownNow()
}
} }

View File

@@ -3,41 +3,56 @@ package com.muwire.core.download;
import net.i2p.data.Base64 import net.i2p.data.Base64
import com.muwire.core.Constants import com.muwire.core.Constants
import com.muwire.core.EventBus
import com.muwire.core.InfoHash import com.muwire.core.InfoHash
import com.muwire.core.Persona
import com.muwire.core.connection.Endpoint import com.muwire.core.connection.Endpoint
import com.muwire.core.util.DataUtil
import static com.muwire.core.util.DataUtil.readTillRN import static com.muwire.core.util.DataUtil.readTillRN
import groovy.util.logging.Log import groovy.util.logging.Log
import java.nio.ByteBuffer import java.nio.ByteBuffer
import java.nio.MappedByteBuffer
import java.nio.channels.FileChannel import java.nio.channels.FileChannel
import java.nio.charset.StandardCharsets import java.nio.charset.StandardCharsets
import java.nio.file.Files import java.nio.file.Files
import java.nio.file.StandardOpenOption import java.nio.file.StandardOpenOption
import java.security.MessageDigest import java.security.MessageDigest
import java.security.NoSuchAlgorithmException import java.security.NoSuchAlgorithmException
import java.util.logging.Level
@Log @Log
class DownloadSession { class DownloadSession {
private final EventBus eventBus
private final String meB64
private final Pieces pieces private final Pieces pieces
private final InfoHash infoHash private final InfoHash infoHash
private final Endpoint endpoint private final Endpoint endpoint
private final File file private final File file
private final int pieceSize private final int pieceSize
private final long fileLength private final long fileLength
private final Set<Integer> available
private final MessageDigest digest private final MessageDigest digest
private ByteBuffer mapped private long lastSpeedRead = System.currentTimeMillis()
private long dataSinceLastRead
DownloadSession(Pieces pieces, InfoHash infoHash, Endpoint endpoint, File file,
int pieceSize, long fileLength) { private MappedByteBuffer mapped
DownloadSession(EventBus eventBus, String meB64, Pieces pieces, InfoHash infoHash, Endpoint endpoint, File file,
int pieceSize, long fileLength, Set<Integer> available) {
this.eventBus = eventBus
this.meB64 = meB64
this.pieces = pieces this.pieces = pieces
this.endpoint = endpoint this.endpoint = endpoint
this.infoHash = infoHash this.infoHash = infoHash
this.file = file this.file = file
this.pieceSize = pieceSize this.pieceSize = pieceSize
this.fileLength = fileLength this.fileLength = fileLength
this.available = available
try { try {
digest = MessageDigest.getInstance("SHA-256") digest = MessageDigest.getInstance("SHA-256")
} catch (NoSuchAlgorithmException impossible) { } catch (NoSuchAlgorithmException impossible) {
@@ -45,103 +60,173 @@ class DownloadSession {
System.exit(1) System.exit(1)
} }
} }
public void request() throws IOException { /**
* @return if the request will proceed. The only time it may not
* is if all the pieces have been claimed by other sessions.
* @throws IOException
*/
public boolean request() throws IOException {
OutputStream os = endpoint.getOutputStream() OutputStream os = endpoint.getOutputStream()
InputStream is = endpoint.getInputStream() InputStream is = endpoint.getInputStream()
int piece = pieces.getRandomPiece() int[] pieceAndPosition
long start = piece * pieceSize if (available.isEmpty())
long end = Math.min(fileLength, start + pieceSize) - 1 pieceAndPosition = pieces.claim()
long length = end - start + 1 else
pieceAndPosition = pieces.claim(new HashSet<>(available))
if (pieceAndPosition == null)
return false
int piece = pieceAndPosition[0]
int position = pieceAndPosition[1]
boolean steal = pieceAndPosition[2] == 1
boolean unclaim = true
log.info("will download piece $piece from position $position steal $steal")
long pieceStart = piece * ((long)pieceSize)
long end = Math.min(fileLength, pieceStart + pieceSize) - 1
long start = pieceStart + position
String root = Base64.encode(infoHash.getRoot()) String root = Base64.encode(infoHash.getRoot())
FileChannel channel
try { try {
os.write("GET $root\r\n".getBytes(StandardCharsets.US_ASCII)) os.write("GET $root\r\n".getBytes(StandardCharsets.US_ASCII))
os.write("Range: $start-$end\r\n\r\n".getBytes(StandardCharsets.US_ASCII)) os.write("Range: $start-$end\r\n".getBytes(StandardCharsets.US_ASCII))
os.write("X-Persona: $meB64\r\n".getBytes(StandardCharsets.US_ASCII))
String xHave = DataUtil.encodeXHave(pieces.getDownloaded(), pieces.nPieces)
os.write("X-Have: $xHave\r\n\r\n".getBytes(StandardCharsets.US_ASCII))
os.flush() os.flush()
String code = readTillRN(is) String codeString = readTillRN(is)
if (code.startsWith("404 ")) { int space = codeString.indexOf(' ')
if (space > 0)
codeString = codeString.substring(0, space)
int code = Integer.parseInt(codeString.trim())
if (code == 404) {
log.warning("file not found") log.warning("file not found")
endpoint.close() endpoint.close()
return return false
} }
if (code.startsWith("416 ")) { if (!(code == 200 || code == 416)) {
log.warning("range $start-$end cannot be satisfied")
return // leave endpoint open
}
if (!code.startsWith("200 ")) {
log.warning("unknown code $code") log.warning("unknown code $code")
endpoint.close() endpoint.close()
return return false
} }
// parse all headers // parse all headers
Set<String> headers = new HashSet<>() Map<String,String> headers = new HashMap<>()
String header String header
while((header = readTillRN(is)) != "" && headers.size() < Constants.MAX_HEADERS) while((header = readTillRN(is)) != "" && headers.size() < Constants.MAX_HEADERS) {
headers.add(header) int colon = header.indexOf(':')
if (colon == -1 || colon == header.length() - 1)
long receivedStart = -1 throw new IOException("invalid header $header")
long receivedEnd = -1 String key = header.substring(0, colon)
for (String receivedHeader : headers) { String value = header.substring(colon + 1)
def group = (receivedHeader =~ /^Content-Range: (\d+)-(\d+)$/) headers[key] = value.trim()
if (group.size() != 1) {
log.info("ignoring header $receivedHeader")
continue
}
receivedStart = Long.parseLong(group[0][1])
receivedEnd = Long.parseLong(group[0][2])
} }
// prase X-Alt if present
if (headers.containsKey("X-Alt")) {
headers["X-Alt"].split(",").each {
if (it.length() > 0) {
byte [] raw = Base64.decode(it)
Persona source = new Persona(new ByteArrayInputStream(raw))
eventBus.publish(new SourceDiscoveredEvent(infoHash : infoHash, source : source))
}
}
}
// parse X-Have if present
if (headers.containsKey("X-Have")) {
DataUtil.decodeXHave(headers["X-Have"]).each {
available.add(it)
}
if (!available.contains(piece))
return true // try again next time
} else {
if (code != 200)
throw new IOException("Code $code but no X-Have")
available.clear()
}
if (code != 200)
return true
String range = headers["Content-Range"]
if (range == null)
throw new IOException("Code 200 but no Content-Range")
def group = (range =~ /^(\d+)-(\d+)$/)
if (group.size() != 1)
throw new IOException("invalid Content-Range header $range")
long receivedStart = Long.parseLong(group[0][1])
long receivedEnd = Long.parseLong(group[0][2])
if (receivedStart != start || receivedEnd != end) { if (receivedStart != start || receivedEnd != end) {
log.warning("We don't support mismatching ranges yet") log.warning("We don't support mismatching ranges yet")
endpoint.close() endpoint.close()
return return false
} }
// start the download // start the download
channel = Files.newByteChannel(file.toPath(), EnumSet.of(StandardOpenOption.READ, StandardOpenOption.WRITE, FileChannel channel
StandardOpenOption.SPARSE, StandardOpenOption.CREATE)) // TODO: double-check, maybe CREATE_NEW try {
mapped = channel.map(FileChannel.MapMode.READ_WRITE, start, end - start + 1) channel = Files.newByteChannel(file.toPath(), EnumSet.of(StandardOpenOption.READ, StandardOpenOption.WRITE,
StandardOpenOption.SPARSE, StandardOpenOption.CREATE))
byte[] tmp = new byte[0x1 << 13] mapped = channel.map(FileChannel.MapMode.READ_WRITE, pieceStart, end - pieceStart + 1)
while(mapped.hasRemaining()) { mapped.position(position)
if (mapped.remaining() < tmp.length)
tmp = new byte[mapped.remaining()] byte[] tmp = new byte[0x1 << 13]
int read = is.read(tmp) while(mapped.hasRemaining()) {
if (read == -1) if (mapped.remaining() < tmp.length)
throw new IOException() tmp = new byte[mapped.remaining()]
synchronized(this) { int read = is.read(tmp)
mapped.put(tmp, 0, read) if (read == -1)
throw new IOException()
synchronized(this) {
mapped.put(tmp, 0, read)
dataSinceLastRead += read
pieces.markPartial(piece, mapped.position())
}
} }
mapped.clear()
digest.update(mapped)
byte [] hash = digest.digest()
byte [] expected = new byte[32]
System.arraycopy(infoHash.getHashList(), piece * 32, expected, 0, 32)
if (hash != expected) {
pieces.markPartial(piece, 0)
throw new BadHashException("bad hash on piece $piece")
}
} finally {
try { channel?.close() } catch (IOException ignore) {}
DataUtil.tryUnmap(mapped)
} }
pieces.markDownloaded(piece)
mapped.clear() unclaim = false
digest.update(mapped)
byte [] hash = digest.digest()
byte [] expected = new byte[32]
System.arraycopy(infoHash.getHashList(), piece * 32, expected, 0, 32)
if (hash != expected) {
log.warning("hash mismatch")
endpoint.close()
return
}
pieces.markDownloaded(piece)
} finally { } finally {
try { channel?.close() } catch (IOException ignore) {} if (unclaim && !steal)
pieces.unclaim(piece)
} }
return true
} }
synchronized int positionInPiece() { synchronized int positionInPiece() {
if (mapped == null) if (mapped == null)
return 0 return 0
mapped.position() mapped.position()
} }
synchronized int speed() {
final long now = System.currentTimeMillis()
long interval = Math.max(1000, now - lastSpeedRead)
lastSpeedRead = now;
int rv = (int) (dataSinceLastRead * 1000.0 / interval)
dataSinceLastRead = 0
rv
}
} }

View File

@@ -1,69 +1,347 @@
package com.muwire.core.download package com.muwire.core.download
import com.muwire.core.InfoHash import com.muwire.core.InfoHash
import com.muwire.core.Persona
import com.muwire.core.connection.Endpoint import com.muwire.core.connection.Endpoint
import java.nio.file.AtomicMoveNotSupportedException
import java.nio.file.Files
import java.nio.file.StandardCopyOption
import java.time.Instant
import java.util.concurrent.ConcurrentHashMap
import java.util.concurrent.ExecutorService
import java.util.concurrent.Executors
import java.util.concurrent.atomic.AtomicBoolean
import java.util.logging.Level
import com.muwire.core.Constants import com.muwire.core.Constants
import com.muwire.core.DownloadedFile
import com.muwire.core.EventBus
import com.muwire.core.connection.I2PConnector import com.muwire.core.connection.I2PConnector
import com.muwire.core.files.FileDownloadedEvent
import com.muwire.core.util.DataUtil
import groovy.util.logging.Log
import net.i2p.data.Destination import net.i2p.data.Destination
import net.i2p.util.ConcurrentHashSet
@Log
public class Downloader { public class Downloader {
public enum DownloadState { CONNECTING, DOWNLOADING, FINISHED } public enum DownloadState { CONNECTING, HASHLIST, DOWNLOADING, FAILED, CANCELLED, PAUSED, FINISHED }
private enum WorkerState { CONNECTING, HASHLIST, DOWNLOADING, FINISHED}
private static final ExecutorService executorService = Executors.newCachedThreadPool({r ->
Thread rv = new Thread(r)
rv.setName("download worker")
rv.setDaemon(true)
rv
})
private final EventBus eventBus
private final DownloadManager downloadManager
private final Persona me
private final File file private final File file
private final Pieces pieces private final Pieces pieces
private final long length private final long length
private final InfoHash infoHash private InfoHash infoHash
private final int pieceSize private final int pieceSize
private final I2PConnector connector private final I2PConnector connector
private final Destination destination private final Set<Destination> destinations
private final int nPieces private final int nPieces
private final File piecesFile
private Endpoint endpoint private final File incompleteFile
private volatile DownloadSession currentSession final int pieceSizePow2
private volatile DownloadState currentState private final Map<Destination, DownloadWorker> activeWorkers = new ConcurrentHashMap<>()
private final Set<Destination> successfulDestinations = new ConcurrentHashSet<>()
public Downloader(File file, long length, InfoHash infoHash, int pieceSizePow2, I2PConnector connector, Destination destination) {
private volatile boolean cancelled, paused
private final AtomicBoolean eventFired = new AtomicBoolean()
private boolean piecesFileClosed
private ArrayList speedArr = new ArrayList<Integer>()
private int speedPos = 0
private int speedAvg = 0
private long timestamp = Instant.now().toEpochMilli()
public Downloader(EventBus eventBus, DownloadManager downloadManager,
Persona me, File file, long length, InfoHash infoHash,
int pieceSizePow2, I2PConnector connector, Set<Destination> destinations,
File incompletes, Pieces pieces) {
this.eventBus = eventBus
this.me = me
this.downloadManager = downloadManager
this.file = file this.file = file
this.infoHash = infoHash this.infoHash = infoHash
this.length = length this.length = length
this.connector = connector this.connector = connector
this.destination = destination this.destinations = destinations
this.piecesFile = new File(incompletes, file.getName()+".pieces")
this.incompleteFile = new File(incompletes, file.getName()+".part")
this.pieceSizePow2 = pieceSizePow2
this.pieceSize = 1 << pieceSizePow2 this.pieceSize = 1 << pieceSizePow2
this.pieces = pieces
int nPieces this.nPieces = pieces.nPieces
if (length % pieceSize == 0)
nPieces = length / pieceSize // default size suitable for an average of 5 seconds / 5 elements / 5 interval units
else // it's easily adjustable by resizing the size of speedArr
nPieces = length / pieceSize + 1 this.speedArr = [ 0, 0, 0, 0, 0 ]
this.nPieces = nPieces
pieces = new Pieces(nPieces, Constants.DOWNLOAD_SEQUENTIAL_RATIO)
currentState = DownloadState.CONNECTING
} }
public synchronized InfoHash getInfoHash() {
infoHash
}
private synchronized void setInfoHash(InfoHash infoHash) {
this.infoHash = infoHash
}
void download() { void download() {
Endpoint endpoint = connector.connect(destination) readPieces()
currentState = DownloadState.DOWNLOADING destinations.each {
while(!pieces.isComplete()) { if (it != me.destination) {
currentSession = new DownloadSession(pieces, infoHash, endpoint, file, pieceSize, length) def worker = new DownloadWorker(it)
currentSession.request() activeWorkers.put(it, worker)
executorService.submit(worker)
}
} }
currentState = DownloadState.FINISHED
endpoint.close()
} }
void readPieces() {
if (!piecesFile.exists())
return
piecesFile.eachLine {
String [] split = it.split(",")
int piece = Integer.parseInt(split[0])
if (split.length == 1)
pieces.markDownloaded(piece)
else {
int position = Integer.parseInt(split[1])
pieces.markPartial(piece, position)
}
}
}
void writePieces() {
synchronized(piecesFile) {
if (piecesFileClosed)
return
piecesFile.withPrintWriter { writer ->
pieces.write(writer)
}
}
}
public long donePieces() { public long donePieces() {
pieces.donePieces() pieces.donePieces()
} }
public int positionInPiece() {
if (currentSession == null) public int speed() {
return 0 int currSpeed = 0
currentSession.positionInPiece() if (getCurrentState() == DownloadState.DOWNLOADING) {
activeWorkers.values().each {
if (it.currentState == WorkerState.DOWNLOADING)
currSpeed += it.speed()
}
}
// normalize to speedArr.size
currSpeed /= speedArr.size()
// compute new speedAvg and update speedArr
if ( speedArr[speedPos] > speedAvg ) {
speedAvg = 0
} else {
speedAvg -= speedArr[speedPos]
}
speedAvg += currSpeed
speedArr[speedPos] = currSpeed
// this might be necessary due to rounding errors
if (speedAvg < 0)
speedAvg = 0
// rolling index over the speedArr
speedPos++
if (speedPos >= speedArr.size())
speedPos=0
speedAvg
} }
public DownloadState getCurrentState() { public DownloadState getCurrentState() {
currentState if (cancelled)
return DownloadState.CANCELLED
if (paused)
return DownloadState.PAUSED
boolean allFinished = true
activeWorkers.values().each {
allFinished &= it.currentState == WorkerState.FINISHED
}
if (allFinished) {
if (pieces.isComplete())
return DownloadState.FINISHED
return DownloadState.FAILED
}
// if at least one is downloading...
boolean oneDownloading = false
activeWorkers.values().each {
if (it.currentState == WorkerState.DOWNLOADING) {
oneDownloading = true
return
}
}
if (oneDownloading)
return DownloadState.DOWNLOADING
// at least one is requesting hashlist
boolean oneHashlist = false
activeWorkers.values().each {
if (it.currentState == WorkerState.HASHLIST) {
oneHashlist = true
return
}
}
if (oneHashlist)
return DownloadState.HASHLIST
return DownloadState.CONNECTING
}
public void cancel() {
cancelled = true
stop()
synchronized(piecesFile) {
piecesFileClosed = true
piecesFile.delete()
}
incompleteFile.delete()
pieces.clearAll()
}
public void pause() {
paused = true
stop()
}
void stop() {
activeWorkers.values().each {
it.cancel()
}
}
public int activeWorkers() {
int active = 0
activeWorkers.values().each {
if (it.currentState != WorkerState.FINISHED)
active++
}
active
}
public void resume() {
paused = false
readPieces()
destinations.each { destination ->
def worker = activeWorkers.get(destination)
if (worker != null) {
if (worker.currentState == WorkerState.FINISHED) {
def newWorker = new DownloadWorker(destination)
activeWorkers.put(destination, newWorker)
executorService.submit(newWorker)
}
} else {
worker = new DownloadWorker(destination)
activeWorkers.put(destination, worker)
executorService.submit(worker)
}
}
}
void addSource(Destination d) {
if (activeWorkers.containsKey(d))
return
DownloadWorker newWorker = new DownloadWorker(d)
activeWorkers.put(d, newWorker)
executorService.submit(newWorker)
}
class DownloadWorker implements Runnable {
private final Destination destination
private volatile WorkerState currentState
private volatile Thread downloadThread
private Endpoint endpoint
private volatile DownloadSession currentSession
private final Set<Integer> available = new HashSet<>()
DownloadWorker(Destination destination) {
this.destination = destination
}
public void run() {
downloadThread = Thread.currentThread()
currentState = WorkerState.CONNECTING
Endpoint endpoint = null
try {
endpoint = connector.connect(destination)
while(getInfoHash().hashList == null) {
currentState = WorkerState.HASHLIST
HashListSession session = new HashListSession(me.toBase64(), infoHash, endpoint)
InfoHash received = session.request()
setInfoHash(received)
}
currentState = WorkerState.DOWNLOADING
boolean requestPerformed
while(!pieces.isComplete()) {
currentSession = new DownloadSession(eventBus, me.toBase64(), pieces, getInfoHash(),
endpoint, incompleteFile, pieceSize, length, available)
requestPerformed = currentSession.request()
if (!requestPerformed)
break
successfulDestinations.add(endpoint.destination)
writePieces()
}
} catch (Exception bad) {
log.log(Level.WARNING,"Exception while downloading",DataUtil.findRoot(bad))
} finally {
writePieces()
currentState = WorkerState.FINISHED
if (pieces.isComplete() && eventFired.compareAndSet(false, true)) {
synchronized(piecesFile) {
piecesFileClosed = true
piecesFile.delete()
}
activeWorkers.values().each {
if (it.destination != destination)
it.cancel()
}
try {
Files.move(incompleteFile.toPath(), file.toPath(), StandardCopyOption.ATOMIC_MOVE)
} catch (AtomicMoveNotSupportedException e) {
Files.copy(incompleteFile.toPath(), file.toPath(), StandardCopyOption.REPLACE_EXISTING)
incompleteFile.delete()
}
eventBus.publish(
new FileDownloadedEvent(
downloadedFile : new DownloadedFile(file, getInfoHash(), pieceSizePow2, successfulDestinations),
downloader : Downloader.this))
}
endpoint?.close()
}
}
int speed() {
if (currentSession == null)
return 0
currentSession.speed()
}
void cancel() {
downloadThread?.interrupt()
}
} }
} }

View File

@@ -0,0 +1,82 @@
package com.muwire.core.download
import java.nio.ByteBuffer
import java.nio.charset.StandardCharsets
import java.security.MessageDigest
import java.security.NoSuchAlgorithmException
import com.muwire.core.Constants
import com.muwire.core.InfoHash
import com.muwire.core.connection.Endpoint
import groovy.util.logging.Log
import static com.muwire.core.util.DataUtil.readTillRN
import net.i2p.data.Base64
@Log
class HashListSession {
private final String meB64
private final InfoHash infoHash
private final Endpoint endpoint
HashListSession(String meB64, InfoHash infoHash, Endpoint endpoint) {
this.meB64 = meB64
this.infoHash = infoHash
this.endpoint = endpoint
}
InfoHash request() throws IOException {
InputStream is = endpoint.getInputStream()
OutputStream os = endpoint.getOutputStream()
String root = Base64.encode(infoHash.getRoot())
os.write("HASHLIST $root\r\n".getBytes(StandardCharsets.US_ASCII))
os.write("X-Persona: $meB64\r\n\r\n".getBytes(StandardCharsets.US_ASCII))
os.flush()
String code = readTillRN(is)
if (!code.startsWith("200"))
throw new IOException("unknown code $code")
// parse all headers
Set<String> headers = new HashSet<>()
String header
while((header = readTillRN(is)) != "" && headers.size() < Constants.MAX_HEADERS)
headers.add(header)
long receivedStart = -1
long receivedEnd = -1
for (String receivedHeader : headers) {
def group = (receivedHeader =~ /^Content-Range: (\d+)-(\d+)$/)
if (group.size() != 1) {
log.info("ignoring header $receivedHeader")
continue
}
receivedStart = Long.parseLong(group[0][1])
receivedEnd = Long.parseLong(group[0][2])
}
if (receivedStart != 0)
throw new IOException("hashlist started at $receivedStart")
byte[] hashList = new byte[receivedEnd]
ByteBuffer hashListBuf = ByteBuffer.wrap(hashList)
byte[] tmp = new byte[0x1 << 13]
while(hashListBuf.hasRemaining()) {
if (hashListBuf.remaining() > tmp.length)
tmp = new byte[hashListBuf.remaining()]
int read = is.read(tmp)
if (read == -1)
throw new IOException()
hashListBuf.put(tmp, 0, read)
}
InfoHash received = InfoHash.fromHashList(hashList)
if (received.getRoot() != infoHash.getRoot())
throw new IOException("fetched list doesn't match root")
received
}
}

View File

@@ -1,47 +1,116 @@
package com.muwire.core.download package com.muwire.core.download
class Pieces { class Pieces {
private final BitSet bitSet private final BitSet done, claimed
private final int nPieces private final int nPieces
private final float ratio private final float ratio
private final Random random = new Random() private final Random random = new Random()
private final Map<Integer,Integer> partials = new HashMap<>()
Pieces(int nPieces) { Pieces(int nPieces) {
this(nPieces, 1.0f) this(nPieces, 1.0f)
} }
Pieces(int nPieces, float ratio) { Pieces(int nPieces, float ratio) {
this.nPieces = nPieces this.nPieces = nPieces
this.ratio = ratio this.ratio = ratio
bitSet = new BitSet(nPieces) done = new BitSet(nPieces)
claimed = new BitSet(nPieces)
} }
synchronized int getRandomPiece() { synchronized int[] claim() {
int cardinality = bitSet.cardinality() int claimedCardinality = claimed.cardinality()
if (cardinality == nPieces) if (claimedCardinality == nPieces) {
return -1 // steal
int downloadedCardinality = done.cardinality()
// if fuller than ratio just do sequential if (downloadedCardinality == nPieces)
if ( (1.0f * cardinality) / nPieces > ratio) { return null
return bitSet.nextClearBit(0) int rv = done.nextClearBit(0)
return [rv, partials.getOrDefault(rv, 0), 1]
} }
// if fuller than ratio just do sequential
if ( (1.0f * claimedCardinality) / nPieces > ratio) {
int rv = claimed.nextClearBit(0)
claimed.set(rv)
return [rv, partials.getOrDefault(rv, 0), 0]
}
while(true) { while(true) {
int start = random.nextInt(nPieces) int start = random.nextInt(nPieces)
while(bitSet.get(start) && ++start < nPieces); if (claimed.get(start))
return start continue
claimed.set(start)
return [start, partials.getOrDefault(start,0), 0]
} }
} }
synchronized int[] claim(Set<Integer> available) {
for (int i = done.nextSetBit(0); i >= 0; i = done.nextSetBit(i+1))
available.remove(i)
if (available.isEmpty())
return null
Set<Integer> availableCopy = new HashSet<>(available)
for (int i = claimed.nextSetBit(0); i >= 0; i = claimed.nextSetBit(i+1))
availableCopy.remove(i)
if (availableCopy.isEmpty()) {
// steal
int rv = available.first()
return [rv, partials.getOrDefault(rv, 0), 1]
}
List<Integer> toList = availableCopy.toList()
Collections.shuffle(toList)
int rv = toList[0]
claimed.set(rv)
[rv, partials.getOrDefault(rv, 0), 0]
}
synchronized def getDownloaded() {
def rv = []
for (int i = done.nextSetBit(0); i >= 0; i = done.nextSetBit(i+1)) {
rv << i
}
rv
}
synchronized void markDownloaded(int piece) { synchronized void markDownloaded(int piece) {
bitSet.set(piece) done.set(piece)
claimed.set(piece)
partials.remove(piece)
} }
synchronized void markPartial(int piece, int position) {
partials.put(piece, position)
}
synchronized void unclaim(int piece) {
claimed.clear(piece)
}
synchronized boolean isComplete() { synchronized boolean isComplete() {
bitSet.cardinality() == nPieces done.cardinality() == nPieces
}
synchronized int donePieces() {
done.cardinality()
}
synchronized boolean isDownloaded(int piece) {
done.get(piece)
}
synchronized void clearAll() {
done.clear()
claimed.clear()
partials.clear()
} }
synchronized int donePieces() { synchronized void write(PrintWriter writer) {
bitSet.cardinality() for (int i = done.nextSetBit(0); i >= 0; i = done.nextSetBit(i+1)) {
writer.println(i)
}
partials.each { piece, position ->
writer.println("$piece,$position")
}
} }
} }

View File

@@ -0,0 +1,10 @@
package com.muwire.core.download
import com.muwire.core.Event
import com.muwire.core.InfoHash
import com.muwire.core.Persona
class SourceDiscoveredEvent extends Event {
InfoHash infoHash
Persona source
}

View File

@@ -0,0 +1,7 @@
package com.muwire.core.download
import com.muwire.core.Event
class UIDownloadCancelledEvent extends Event {
Downloader downloader
}

View File

@@ -3,8 +3,11 @@ package com.muwire.core.download
import com.muwire.core.Event import com.muwire.core.Event
import com.muwire.core.search.UIResultEvent import com.muwire.core.search.UIResultEvent
import net.i2p.data.Destination
class UIDownloadEvent extends Event { class UIDownloadEvent extends Event {
UIResultEvent result UIResultEvent[] result
Set<Destination> sources
File target File target
} }

View File

@@ -0,0 +1,6 @@
package com.muwire.core.download
import com.muwire.core.Event
class UIDownloadPausedEvent extends Event {
}

View File

@@ -0,0 +1,6 @@
package com.muwire.core.download
import com.muwire.core.Event
class UIDownloadResumedEvent extends Event {
}

View File

@@ -0,0 +1,6 @@
package com.muwire.core.files
import com.muwire.core.Event
class AllFilesLoadedEvent extends Event {
}

View File

@@ -0,0 +1,7 @@
package com.muwire.core.files
import com.muwire.core.Event
class DirectoryUnsharedEvent extends Event {
File directory
}

View File

@@ -0,0 +1,148 @@
package com.muwire.core.files
import java.nio.file.FileSystem
import java.nio.file.FileSystems
import java.nio.file.Path
import java.nio.file.Paths
import static java.nio.file.StandardWatchEventKinds.*
import java.nio.file.ClosedWatchServiceException
import java.nio.file.WatchEvent
import java.nio.file.WatchKey
import java.nio.file.WatchService
import java.util.concurrent.ConcurrentHashMap
import com.muwire.core.EventBus
import com.muwire.core.SharedFile
import groovy.util.logging.Log
import net.i2p.util.SystemVersion
@Log
class DirectoryWatcher {
private static final long WAIT_TIME = 1000
private static final WatchEvent.Kind[] kinds
static {
if (SystemVersion.isMac())
kinds = [ENTRY_MODIFY, ENTRY_DELETE]
else
kinds = [ENTRY_CREATE, ENTRY_MODIFY, ENTRY_DELETE]
}
private final EventBus eventBus
private final FileManager fileManager
private final Thread watcherThread, publisherThread
private final Map<File, Long> waitingFiles = new ConcurrentHashMap<>()
private final Map<File, WatchKey> watchedDirectories = new ConcurrentHashMap<>()
private WatchService watchService
private volatile boolean shutdown
DirectoryWatcher(EventBus eventBus, FileManager fileManager) {
this.eventBus = eventBus
this.fileManager = fileManager
this.watcherThread = new Thread({watch() } as Runnable, "directory-watcher")
watcherThread.setDaemon(true)
this.publisherThread = new Thread({publish()} as Runnable, "watched-files-publisher")
publisherThread.setDaemon(true)
}
void onAllFilesLoadedEvent(AllFilesLoadedEvent e) {
watchService = FileSystems.getDefault().newWatchService()
watcherThread.start()
publisherThread.start()
}
void stop() {
shutdown = true
watcherThread?.interrupt()
publisherThread?.interrupt()
watchService?.close()
}
void onFileSharedEvent(FileSharedEvent e) {
if (!e.file.isDirectory())
return
Path path = e.file.getCanonicalFile().toPath()
WatchKey wk = path.register(watchService, kinds)
watchedDirectories.put(e.file, wk)
}
void onDirectoryUnsharedEvent(DirectoryUnsharedEvent e) {
WatchKey wk = watchedDirectories.remove(e.directory)
wk?.cancel()
}
private void watch() {
try {
while(!shutdown) {
WatchKey key = watchService.take()
key.pollEvents().each {
switch(it.kind()) {
case ENTRY_CREATE: processCreated(key.watchable(), it.context()); break
case ENTRY_MODIFY: processModified(key.watchable(), it.context()); break
case ENTRY_DELETE: processDeleted(key.watchable(), it.context()); break
}
}
key.reset()
}
} catch (InterruptedException|ClosedWatchServiceException e) {
if (!shutdown)
throw e
}
}
private void processCreated(Path parent, Path path) {
File f= join(parent, path)
log.fine("created entry $f")
if (f.isDirectory())
f.toPath().register(watchService, kinds)
else
waitingFiles.put(f, System.currentTimeMillis())
}
private void processModified(Path parent, Path path) {
File f = join(parent, path)
log.fine("modified entry $f")
waitingFiles.put(f, System.currentTimeMillis())
}
private void processDeleted(Path parent, Path path) {
File f = join(parent, path)
log.fine("deleted entry $f")
SharedFile sf = fileManager.fileToSharedFile.get(f)
if (sf != null)
eventBus.publish(new FileUnsharedEvent(unsharedFile : sf))
}
private static File join(Path parent, Path path) {
File parentFile = parent.toFile().getCanonicalFile()
new File(parentFile, path.toFile().getName()).getCanonicalFile()
}
private void publish() {
try {
while(!shutdown) {
Thread.sleep(WAIT_TIME)
long now = System.currentTimeMillis()
def published = []
waitingFiles.each { file, timestamp ->
if (now - timestamp > WAIT_TIME) {
log.fine("publishing file $file")
eventBus.publish new FileSharedEvent(file : file)
published << file
}
}
published.each {
waitingFiles.remove(it)
}
}
} catch (InterruptedException e) {
if (!shutdown)
throw e
}
}
}

View File

@@ -2,10 +2,11 @@ package com.muwire.core.files
import com.muwire.core.DownloadedFile import com.muwire.core.DownloadedFile
import com.muwire.core.Event import com.muwire.core.Event
import com.muwire.core.download.Downloader
import net.i2p.data.Destination import net.i2p.data.Destination
class FileDownloadedEvent extends Event { class FileDownloadedEvent extends Event {
Downloader downloader
DownloadedFile downloadedFile DownloadedFile downloadedFile
} }

View File

@@ -5,6 +5,12 @@ import com.muwire.core.SharedFile
class FileHashedEvent extends Event { class FileHashedEvent extends Event {
SharedFile sharedFile SharedFile sharedFile
String error String error
@Override
public String toString() {
super.toString() + " sharedFile " + sharedFile?.file.getAbsolutePath() + " error: $error"
}
} }

View File

@@ -1,6 +1,10 @@
package com.muwire.core.files package com.muwire.core.files
import com.muwire.core.InfoHash import com.muwire.core.InfoHash
import com.muwire.core.util.DataUtil
import net.i2p.data.Base64
import java.nio.MappedByteBuffer import java.nio.MappedByteBuffer
import java.nio.channels.FileChannel import java.nio.channels.FileChannel
import java.nio.channels.FileChannel.MapMode import java.nio.channels.FileChannel.MapMode
@@ -9,62 +13,79 @@ import java.security.NoSuchAlgorithmException
class FileHasher { class FileHasher {
/** max size of shared file is 128 GB */ /** max size of shared file is 128 GB */
public static final long MAX_SIZE = 0x1L << 37 public static final long MAX_SIZE = 0x1L << 37
/** /**
* @param size of the file to be shared * @param size of the file to be shared
* @return the size of each piece in power of 2 * @return the size of each piece in power of 2
*/ * piece size is minimum 128 KBytees and maximum 16 MBytes in power of 2 steps (2^17 - 2^24)
static int getPieceSize(long size) { * there can be up to 8192 pieces maximum per file
if (size <= 0x1 << 25) */
return 18 static int getPieceSize(long size) {
if (size <= 0x1 << 30)
for (int i = 26; i <= 37; i++) { return 17
if (size <= 0x1L << i) {
return i-7 for (int i = 31; i <= 37; i++) {
} if (size <= 0x1L << i) {
} return i-13
}
throw new IllegalArgumentException("File too large $size") }
}
throw new IllegalArgumentException("File too large $size")
final MessageDigest digest }
FileHasher() { final MessageDigest digest
try {
digest = MessageDigest.getInstance("SHA-256") FileHasher() {
} catch (NoSuchAlgorithmException impossible) { try {
digest = null digest = MessageDigest.getInstance("SHA-256")
System.exit(1) } catch (NoSuchAlgorithmException impossible) {
} digest = null
} System.exit(1)
}
InfoHash hashFile(File file) { }
final long length = file.length()
final int size = 0x1 << getPieceSize(length) InfoHash hashFile(File file) {
int numPieces = (int) (length / size) final long length = file.length()
if (numPieces * size < length) final int size = 0x1 << getPieceSize(length)
numPieces++ int numPieces = (int) (length / size)
if (numPieces * size < length)
def output = new ByteArrayOutputStream() numPieces++
RandomAccessFile raf = new RandomAccessFile(file, "r")
try { def output = new ByteArrayOutputStream()
MappedByteBuffer buf RandomAccessFile raf = new RandomAccessFile(file, "r")
for (int i = 0; i < numPieces - 1; i++) { try {
buf = raf.getChannel().map(MapMode.READ_ONLY, size * i, size) MappedByteBuffer buf
digest.update buf for (int i = 0; i < numPieces - 1; i++) {
output.write(digest.digest(), 0, 32) buf = raf.getChannel().map(MapMode.READ_ONLY, ((long)size) * i, size)
} digest.update buf
def lastPieceLength = length - (numPieces - 1) * size DataUtil.tryUnmap(buf)
buf = raf.getChannel().map(MapMode.READ_ONLY, length - lastPieceLength, lastPieceLength) output.write(digest.digest(), 0, 32)
digest.update buf }
output.write(digest.digest(), 0, 32) def lastPieceLength = length - (numPieces - 1) * ((long)size)
} finally { buf = raf.getChannel().map(MapMode.READ_ONLY, length - lastPieceLength, lastPieceLength)
raf.close() digest.update buf
} output.write(digest.digest(), 0, 32)
} finally {
byte [] hashList = output.toByteArray() raf.close()
InfoHash.fromHashList(hashList) }
}
byte [] hashList = output.toByteArray()
InfoHash.fromHashList(hashList)
}
public static void main(String[] args) {
if (args.length != 1) {
println "This utility computes an infohash of a file"
println "Pass absolute path to a file as an argument"
System.exit(1)
}
def file = new File(args[0])
file = file.getAbsoluteFile()
def hasher = new FileHasher()
def infohash = hasher.hashFile(file)
println Base64.encode(infohash.getRoot())
}
} }

View File

@@ -0,0 +1,15 @@
package com.muwire.core.files
import com.muwire.core.Event
import com.muwire.core.SharedFile
class FileHashingEvent extends Event {
File hashingFile
@Override
public String toString() {
super.toString() + " hashingFile " + hashingFile.getAbsolutePath()
}
}

View File

@@ -5,5 +5,5 @@ import com.muwire.core.SharedFile
class FileLoadedEvent extends Event { class FileLoadedEvent extends Event {
SharedFile loadedFile SharedFile loadedFile
} }

View File

@@ -2,7 +2,9 @@ package com.muwire.core.files
import com.muwire.core.EventBus import com.muwire.core.EventBus
import com.muwire.core.InfoHash import com.muwire.core.InfoHash
import com.muwire.core.MuWireSettings
import com.muwire.core.SharedFile import com.muwire.core.SharedFile
import com.muwire.core.UILoadedEvent
import com.muwire.core.search.ResultsEvent import com.muwire.core.search.ResultsEvent
import com.muwire.core.search.SearchEvent import com.muwire.core.search.SearchEvent
import com.muwire.core.search.SearchIndex import com.muwire.core.search.SearchIndex
@@ -13,107 +15,136 @@ import groovy.util.logging.Log
class FileManager { class FileManager {
final EventBus eventBus final EventBus eventBus
final Map<InfoHash, Set<SharedFile>> rootToFiles = Collections.synchronizedMap(new HashMap<>()) final MuWireSettings settings
final Map<File, SharedFile> fileToSharedFile = Collections.synchronizedMap(new HashMap<>()) final Map<InfoHash, Set<SharedFile>> rootToFiles = Collections.synchronizedMap(new HashMap<>())
final Map<String, Set<File>> nameToFiles = new HashMap<>() final Map<File, SharedFile> fileToSharedFile = Collections.synchronizedMap(new HashMap<>())
final SearchIndex index = new SearchIndex() final Map<String, Set<File>> nameToFiles = new HashMap<>()
final SearchIndex index = new SearchIndex()
FileManager(EventBus eventBus) {
this.eventBus = eventBus FileManager(EventBus eventBus, MuWireSettings settings) {
} this.settings = settings
this.eventBus = eventBus
void onFileHashedEvent(FileHashedEvent e) { }
if (e.sharedFile != null)
addToIndex(e.sharedFile) void onFileHashedEvent(FileHashedEvent e) {
} if (e.sharedFile != null)
addToIndex(e.sharedFile)
void onFileLoadedEvent(FileLoadedEvent e) { }
addToIndex(e.loadedFile)
} void onFileLoadedEvent(FileLoadedEvent e) {
addToIndex(e.loadedFile)
void onFileDownloadedEvent(FileDownloadedEvent e) { }
addToIndex(e.downloadedFile)
} void onFileDownloadedEvent(FileDownloadedEvent e) {
if (settings.shareDownloadedFiles) {
private void addToIndex(SharedFile sf) { addToIndex(e.downloadedFile)
}
}
private void addToIndex(SharedFile sf) {
log.info("Adding shared file " + sf.getFile()) log.info("Adding shared file " + sf.getFile())
InfoHash infoHash = sf.getInfoHash() InfoHash infoHash = sf.getInfoHash()
Set<SharedFile> existing = rootToFiles.get(infoHash) Set<SharedFile> existing = rootToFiles.get(infoHash)
if (existing == null) { if (existing == null) {
log.info("adding new root") log.info("adding new root")
existing = new HashSet<>() existing = new HashSet<>()
rootToFiles.put(infoHash, existing); rootToFiles.put(infoHash, existing);
} }
existing.add(sf) existing.add(sf)
fileToSharedFile.put(sf.file, sf) fileToSharedFile.put(sf.file, sf)
String name = sf.getFile().getName() String name = sf.getFile().getName()
Set<File> existingFiles = nameToFiles.get(name) Set<File> existingFiles = nameToFiles.get(name)
if (existingFiles == null) { if (existingFiles == null) {
existingFiles = new HashSet<>() existingFiles = new HashSet<>()
nameToFiles.put(name, existingFiles) nameToFiles.put(name, existingFiles)
} }
existingFiles.add(sf.getFile()) existingFiles.add(sf.getFile())
index.add(name) index.add(name)
} }
void onFileUnsharedEvent(FileUnsharedEvent e) { void onFileUnsharedEvent(FileUnsharedEvent e) {
SharedFile sf = e.unsharedFile SharedFile sf = e.unsharedFile
InfoHash infoHash = sf.getInfoHash() InfoHash infoHash = sf.getInfoHash()
Set<SharedFile> existing = rootToFiles.get(infoHash) Set<SharedFile> existing = rootToFiles.get(infoHash)
if (existing != null) { if (existing != null) {
existing.remove(sf) existing.remove(sf)
if (existing.isEmpty()) { if (existing.isEmpty()) {
rootToFiles.remove(infoHash) rootToFiles.remove(infoHash)
} }
} }
fileToSharedFile.remove(sf.file) fileToSharedFile.remove(sf.file)
String name = sf.getFile().getName() String name = sf.getFile().getName()
Set<File> existingFiles = nameToFiles.get(name) Set<File> existingFiles = nameToFiles.get(name)
if (existingFiles != null) { if (existingFiles != null) {
existingFiles.remove(sf.file) existingFiles.remove(sf.file)
if (existingFiles.isEmpty()) { if (existingFiles.isEmpty()) {
nameToFiles.remove(name) nameToFiles.remove(name)
} }
} }
index.remove(name) index.remove(name)
} }
Map<File, SharedFile> getSharedFiles() { Map<File, SharedFile> getSharedFiles() {
synchronized(fileToSharedFile) { synchronized(fileToSharedFile) {
return new HashMap<>(fileToSharedFile) return new HashMap<>(fileToSharedFile)
} }
} }
Set<SharedFile> getSharedFiles(byte []root) { Set<SharedFile> getSharedFiles(byte []root) {
return rootToFiles.get(new InfoHash(root)) return rootToFiles.get(new InfoHash(root))
} }
void onSearchEvent(SearchEvent e) { void onSearchEvent(SearchEvent e) {
// hash takes precedence // hash takes precedence
ResultsEvent re = null ResultsEvent re = null
if (e.searchHash != null) { if (e.searchHash != null) {
Set<SharedFile> found Set<SharedFile> found
found = rootToFiles.get new InfoHash(e.searchHash) found = rootToFiles.get new InfoHash(e.searchHash)
if (found != null && !found.isEmpty()) found = filter(found, e.oobInfohash)
re = new ResultsEvent(results: found.asList(), uuid: e.uuid) if (found != null && !found.isEmpty())
} else { re = new ResultsEvent(results: found.asList(), uuid: e.uuid, searchEvent: e)
def names = index.search e.searchTerms } else {
Set<File> files = new HashSet<>() def names = index.search e.searchTerms
names.each { files.addAll nameToFiles.getOrDefault(it, []) } Set<File> files = new HashSet<>()
Set<SharedFile> sharedFiles = new HashSet<>() names.each { files.addAll nameToFiles.getOrDefault(it, []) }
files.each { sharedFiles.add fileToSharedFile[it] } Set<SharedFile> sharedFiles = new HashSet<>()
if (!sharedFiles.isEmpty()) files.each { sharedFiles.add fileToSharedFile[it] }
re = new ResultsEvent(results: sharedFiles.asList(), uuid: e.uuid) files = filter(sharedFiles, e.oobInfohash)
if (!sharedFiles.isEmpty())
} re = new ResultsEvent(results: sharedFiles.asList(), uuid: e.uuid, searchEvent: e)
if (re != null) }
eventBus.publish(re)
} if (re != null)
eventBus.publish(re)
}
private static Set<SharedFile> filter(Set<SharedFile> files, boolean oob) {
if (!oob)
return files
Set<SharedFile> rv = new HashSet<>()
files.each {
if (it.getPieceSize() != 0)
rv.add(it)
}
rv
}
void onDirectoryUnsharedEvent(DirectoryUnsharedEvent e) {
e.directory.listFiles().each {
if (it.isDirectory())
eventBus.publish(new DirectoryUnsharedEvent(directory : it))
else {
SharedFile sf = fileToSharedFile.get(it)
if (sf != null)
eventBus.publish(new FileUnsharedEvent(unsharedFile : sf))
}
}
}
} }

View File

@@ -4,5 +4,10 @@ import com.muwire.core.Event
class FileSharedEvent extends Event { class FileSharedEvent extends Event {
File file File file
@Override
public String toString() {
return super.toString() + " file: "+file.getAbsolutePath()
}
} }

View File

@@ -4,5 +4,5 @@ import com.muwire.core.Event
import com.muwire.core.SharedFile import com.muwire.core.SharedFile
class FileUnsharedEvent extends Event { class FileUnsharedEvent extends Event {
SharedFile unsharedFile SharedFile unsharedFile
} }

View File

@@ -8,36 +8,41 @@ import com.muwire.core.SharedFile
class HasherService { class HasherService {
final FileHasher hasher final FileHasher hasher
final EventBus eventBus final EventBus eventBus
Executor executor final FileManager fileManager
Executor executor
HasherService(FileHasher hasher, EventBus eventBus) {
this.hasher = hasher HasherService(FileHasher hasher, EventBus eventBus, FileManager fileManager) {
this.eventBus = eventBus this.hasher = hasher
} this.eventBus = eventBus
this.fileManager = fileManager
void start() { }
executor = Executors.newSingleThreadExecutor()
} void start() {
executor = Executors.newSingleThreadExecutor()
void onFileSharedEvent(FileSharedEvent evt) { }
executor.execute( { -> process(evt.file) } as Runnable)
} void onFileSharedEvent(FileSharedEvent evt) {
if (fileManager.fileToSharedFile.containsKey(evt.file.getCanonicalFile()))
private void process(File f) { return
f = f.getCanonicalFile() executor.execute( { -> process(evt.file) } as Runnable)
if (f.isDirectory()) { }
f.listFiles().each {onFileSharedEvent new FileSharedEvent(file: it) }
} else { private void process(File f) {
if (f.length() == 0) { f = f.getCanonicalFile()
eventBus.publish new FileHashedEvent(error: "Not sharing empty file $f") if (f.isDirectory()) {
} else if (f.length() > FileHasher.MAX_SIZE) { f.listFiles().each {eventBus.publish new FileSharedEvent(file: it) }
eventBus.publish new FileHashedEvent(error: "$f is too large to be shared ${f.length()}") } else {
} else { if (f.length() == 0) {
def hash = hasher.hashFile f eventBus.publish new FileHashedEvent(error: "Not sharing empty file $f")
eventBus.publish new FileHashedEvent(sharedFile: new SharedFile(f, hash)) } else if (f.length() > FileHasher.MAX_SIZE) {
} eventBus.publish new FileHashedEvent(error: "$f is too large to be shared ${f.length()}")
} } else {
} eventBus.publish new FileHashingEvent(hashingFile: f)
def hash = hasher.hashFile f
eventBus.publish new FileHashedEvent(sharedFile: new SharedFile(f, hash, FileHasher.getPieceSize(f.length())))
}
}
}
} }

View File

@@ -1,5 +1,8 @@
package com.muwire.core.files package com.muwire.core.files
import java.nio.file.CopyOption
import java.nio.file.Files
import java.nio.file.StandardCopyOption
import java.util.logging.Level import java.util.logging.Level
import java.util.stream.Collectors import java.util.stream.Collectors
@@ -8,6 +11,7 @@ import com.muwire.core.EventBus
import com.muwire.core.InfoHash import com.muwire.core.InfoHash
import com.muwire.core.Service import com.muwire.core.Service
import com.muwire.core.SharedFile import com.muwire.core.SharedFile
import com.muwire.core.UILoadedEvent
import com.muwire.core.util.DataUtil import com.muwire.core.util.DataUtil
import groovy.json.JsonOutput import groovy.json.JsonOutput
@@ -19,122 +23,135 @@ import net.i2p.data.Destination
@Log @Log
class PersisterService extends Service { class PersisterService extends Service {
final File location final File location
final EventBus listener final EventBus listener
final int interval final int interval
final Timer timer final Timer timer
final FileManager fileManager final FileManager fileManager
PersisterService(File location, EventBus listener, int interval, FileManager fileManager) { PersisterService(File location, EventBus listener, int interval, FileManager fileManager) {
this.location = location this.location = location
this.listener = listener this.listener = listener
this.interval = interval this.interval = interval
this.fileManager = fileManager this.fileManager = fileManager
timer = new Timer("file persister", true) timer = new Timer("file persister", true)
} }
void start() { void stop() {
timer.schedule({load()} as TimerTask, 1000) timer.cancel()
} }
void stop() { void onUILoadedEvent(UILoadedEvent e) {
timer.cancel() timer.schedule({load()} as TimerTask, 1)
} }
void load() { void load() {
if (location.exists() && location.isFile()) { if (location.exists() && location.isFile()) {
def slurper = new JsonSlurper() def slurper = new JsonSlurper()
try { try {
location.eachLine { location.eachLine {
if (it.trim().length() > 0) { if (it.trim().length() > 0) {
def parsed = slurper.parseText it def parsed = slurper.parseText it
def event = fromJson parsed def event = fromJson parsed
if (event != null) { if (event != null) {
log.fine("loaded file $event.loadedFile.file") log.fine("loaded file $event.loadedFile.file")
listener.publish event listener.publish event
} }
} }
} }
} catch (IllegalArgumentException|NumberFormatException e) { listener.publish(new AllFilesLoadedEvent())
} catch (IllegalArgumentException|NumberFormatException e) {
log.log(Level.WARNING, "couldn't load files",e) log.log(Level.WARNING, "couldn't load files",e)
} }
} } else {
timer.schedule({persistFiles()} as TimerTask, 0, interval) listener.publish(new AllFilesLoadedEvent())
loaded = true }
} timer.schedule({persistFiles()} as TimerTask, 0, interval)
loaded = true
private static FileLoadedEvent fromJson(def json) { }
if (json.file == null || json.length == null || json.infoHash == null || json.hashList == null)
throw new IllegalArgumentException() private static FileLoadedEvent fromJson(def json) {
if (!(json.hashList instanceof List)) if (json.file == null || json.length == null || json.infoHash == null || json.hashList == null)
throw new IllegalArgumentException() throw new IllegalArgumentException()
if (!(json.hashList instanceof List))
def file = new File(DataUtil.readi18nString(Base64.decode(json.file))) throw new IllegalArgumentException()
file = file.getCanonicalFile()
if (!file.exists() || file.isDirectory()) def file = new File(DataUtil.readi18nString(Base64.decode(json.file)))
return null file = file.getCanonicalFile()
long length = Long.valueOf(json.length) if (!file.exists() || file.isDirectory())
if (length != file.length()) return null
return null long length = Long.valueOf(json.length)
if (length != file.length())
List hashList = (List) json.hashList return null
ByteArrayOutputStream baos = new ByteArrayOutputStream()
hashList.each { List hashList = (List) json.hashList
byte [] hash = Base64.decode it.toString() ByteArrayOutputStream baos = new ByteArrayOutputStream()
if (hash == null) hashList.each {
throw new IllegalArgumentException() byte [] hash = Base64.decode it.toString()
baos.write hash if (hash == null)
} throw new IllegalArgumentException()
byte[] hashListBytes = baos.toByteArray() baos.write hash
}
InfoHash ih = InfoHash.fromHashList(hashListBytes) byte[] hashListBytes = baos.toByteArray()
byte [] root = Base64.decode(json.infoHash.toString())
if (root == null) InfoHash ih = InfoHash.fromHashList(hashListBytes)
throw new IllegalArgumentException() byte [] root = Base64.decode(json.infoHash.toString())
if (!Arrays.equals(root, ih.getRoot())) if (root == null)
return null throw new IllegalArgumentException()
if (!Arrays.equals(root, ih.getRoot()))
if (json.sources != null) { return null
List sources = (List)json.sources
Set<Destination> sourceSet = sources.stream().map({d -> new Destination(d.toString())}).collect Collectors.toSet() int pieceSize = 0
DownloadedFile df = new DownloadedFile(file, ih, sourceSet) if (json.pieceSize != null)
return new FileLoadedEvent(loadedFile : df) pieceSize = json.pieceSize
}
if (json.sources != null) {
SharedFile sf = new SharedFile(file, ih) List sources = (List)json.sources
return new FileLoadedEvent(loadedFile: sf) Set<Destination> sourceSet = sources.stream().map({d -> new Destination(d.toString())}).collect Collectors.toSet()
DownloadedFile df = new DownloadedFile(file, ih, pieceSize, sourceSet)
} return new FileLoadedEvent(loadedFile : df)
}
private void persistFiles() {
location.delete()
def sharedFiles = fileManager.getSharedFiles() SharedFile sf = new SharedFile(file, ih, pieceSize)
location.withPrintWriter { writer -> return new FileLoadedEvent(loadedFile: sf)
sharedFiles.each { k, v ->
def json = toJson(k,v) }
json = JsonOutput.toJson(json)
writer.println json private void persistFiles() {
} def sharedFiles = fileManager.getSharedFiles()
}
} File tmp = File.createTempFile("muwire-files", "tmp")
tmp.deleteOnExit()
private def toJson(File f, SharedFile sf) { tmp.withPrintWriter { writer ->
def json = [:] sharedFiles.each { k, v ->
json.file = Base64.encode DataUtil.encodei18nString(f.getCanonicalFile().toString()) def json = toJson(k,v)
json.length = f.length() json = JsonOutput.toJson(json)
InfoHash ih = sf.getInfoHash() writer.println json
json.infoHash = Base64.encode ih.getRoot() }
byte [] tmp = new byte [32] }
json.hashList = [] Files.copy(tmp.toPath(), location.toPath(), StandardCopyOption.REPLACE_EXISTING)
for (int i = 0;i < ih.getHashList().length / 32; i++) { tmp.delete()
System.arraycopy(ih.getHashList(), i * 32, tmp, 0, 32) }
json.hashList.add Base64.encode(tmp)
} private def toJson(File f, SharedFile sf) {
def json = [:]
if (sf instanceof DownloadedFile) { json.file = Base64.encode DataUtil.encodei18nString(f.getCanonicalFile().toString())
json.sources = sf.sources.stream().map( {d -> d.toBase64()}).collect(Collectors.toList()) json.length = f.length()
} InfoHash ih = sf.getInfoHash()
json.infoHash = Base64.encode ih.getRoot()
json json.pieceSize = sf.getPieceSize()
} byte [] tmp = new byte [32]
json.hashList = []
for (int i = 0;i < ih.getHashList().length / 32; i++) {
System.arraycopy(ih.getHashList(), i * 32, tmp, 0, 32)
json.hashList.add Base64.encode(tmp)
}
if (sf instanceof DownloadedFile) {
json.sources = sf.sources.stream().map( {d -> d.toBase64()}).collect(Collectors.toList())
}
json
}
} }

View File

@@ -17,177 +17,177 @@ import net.i2p.data.Destination
@Log @Log
class CacheClient { class CacheClient {
private static final int CRAWLER_RETURN = 10
final EventBus eventBus
final HostCache cache
final ConnectionManager manager
final I2PSession session
final long interval
final MuWireSettings settings
final Timer timer
public CacheClient(EventBus eventBus, HostCache cache, private static final int CRAWLER_RETURN = 10
ConnectionManager manager, I2PSession session,
MuWireSettings settings, long interval) {
this.eventBus = eventBus
this.cache = cache
this.manager = manager
this.session = session
this.settings = settings
this.interval = interval
this.timer = new Timer("hostcache-client",true)
}
void start() {
session.addMuxedSessionListener(new Listener(), I2PSession.PROTO_DATAGRAM, 0)
timer.schedule({queryIfNeeded()} as TimerTask, 1, interval)
}
void stop() {
timer.cancel()
}
private void queryIfNeeded() {
if (!manager.getConnections().isEmpty())
return
if (!cache.getHosts(1).isEmpty())
return
log.info "Will query hostcaches"
def ping = [type: "Ping", version: 1, leaf: settings.isLeaf()]
ping = JsonOutput.toJson(ping)
def maker = new I2PDatagramMaker(session)
ping = maker.makeI2PDatagram(ping.bytes)
def options = new SendMessageOptions()
options.setSendLeaseSet(true)
CacheServers.getCacheServers().each {
log.info "Querying hostcache ${it.toBase32()}"
session.sendMessage(it, ping, 0, ping.length, I2PSession.PROTO_DATAGRAM, 0, 0, options)
}
}
class Listener implements I2PSessionMuxedListener {
private final JsonSlurper slurper = new JsonSlurper()
@Override final EventBus eventBus
public void messageAvailable(I2PSession session, int msgId, long size) { final HostCache cache
} final ConnectionManager manager
final I2PSession session
final long interval
final MuWireSettings settings
final Timer timer
@Override public CacheClient(EventBus eventBus, HostCache cache,
public void messageAvailable(I2PSession session, int msgId, long size, int proto, int fromport, int toport) { ConnectionManager manager, I2PSession session,
MuWireSettings settings, long interval) {
if (proto != I2PSession.PROTO_DATAGRAM) { this.eventBus = eventBus
log.warning "Received unexpected protocol $proto" this.cache = cache
return this.manager = manager
} this.session = session
this.settings = settings
def payload = session.receiveMessage(msgId) this.interval = interval
def dissector = new I2PDatagramDissector() this.timer = new Timer("hostcache-client",true)
try { }
dissector.loadI2PDatagram(payload)
def sender = dissector.getSender()
log.info("Received something from ${sender.toBase32()}")
payload = dissector.getPayload()
payload = slurper.parse(payload)
if (payload.type == null) {
log.warning("type missing")
return
}
switch(payload.type) {
case "Pong" : handlePong(sender, payload); break
case "CrawlerPing": handleCrawlerPing(session, sender, payload); break
default : log.warning("unknown type ${payload.type}")
}
} catch (Exception e) {
log.warning("Invalid datagram $e")
}
}
@Override void start() {
public void reportAbuse(I2PSession session, int severity) { session.addMuxedSessionListener(new Listener(), I2PSession.PROTO_DATAGRAM, 0)
} timer.schedule({queryIfNeeded()} as TimerTask, 1, interval)
}
@Override void stop() {
public void disconnected(I2PSession session) { timer.cancel()
log.severe "I2P session disconnected" }
}
@Override private void queryIfNeeded() {
public void errorOccurred(I2PSession session, String message, Throwable error) { if (!manager.getConnections().isEmpty())
log.severe "I2P error occured $message $error" return
} if (!cache.getHosts(1).isEmpty())
return
}
log.info "Will query hostcaches"
private void handlePong(Destination from, def pong) {
if (!CacheServers.isRegistered(from)) { def ping = [type: "Ping", version: 1, leaf: settings.isLeaf()]
log.warning("received pong from non-registered destination") ping = JsonOutput.toJson(ping)
return def maker = new I2PDatagramMaker(session)
} ping = maker.makeI2PDatagram(ping.bytes)
def options = new SendMessageOptions()
if (pong.pongs == null) { options.setSendLeaseSet(true)
log.warning("malformed pong - no pongs") CacheServers.getCacheServers().each {
return log.info "Querying hostcache ${it.toBase32()}"
} session.sendMessage(it, ping, 0, ping.length, I2PSession.PROTO_DATAGRAM, 1, 0, options)
}
pong.pongs.asList().each { }
Destination dest = new Destination(it)
if (!session.getMyDestination().equals(dest)) class Listener implements I2PSessionMuxedListener {
eventBus.publish(new HostDiscoveredEvent(destination: dest))
} private final JsonSlurper slurper = new JsonSlurper()
} @Override
public void messageAvailable(I2PSession session, int msgId, long size) {
private void handleCrawlerPing(I2PSession session, Destination from, def ping) { }
if (settings.isLeaf()) {
log.warning("Received crawler ping but I'm a leaf") @Override
return public void messageAvailable(I2PSession session, int msgId, long size, int proto, int fromport, int toport) {
}
if (proto != I2PSession.PROTO_DATAGRAM) {
switch(settings.getCrawlerResponse()) { log.warning "Received unexpected protocol $proto"
case CrawlerResponse.NONE: return
log.info("Responding to crawlers is disabled by user") }
break
case CrawlerResponse.ALL: def payload = session.receiveMessage(msgId)
respondToCrawler(session, from, ping) def dissector = new I2PDatagramDissector()
break; try {
case CrawlerResponse.REGISTERED: dissector.loadI2PDatagram(payload)
if (CacheServers.isRegistered(from)) def sender = dissector.getSender()
respondToCrawler(session, from, ping) log.info("Received something from ${sender.toBase32()}")
else
log.warning("Ignoring crawler ping from non-registered crawler") payload = dissector.getPayload()
break payload = slurper.parse(payload)
}
} if (payload.type == null) {
log.warning("type missing")
private void respondToCrawler(I2PSession session, Destination from, def ping) { return
log.info "responding to crawler ping" }
def neighbors = manager.getConnections().collect { c -> c.endpoint.destination.toBase64() } switch(payload.type) {
Collections.shuffle(neighbors) case "Pong" : handlePong(sender, payload); break
if (neighbors.size() > CRAWLER_RETURN) case "CrawlerPing": handleCrawlerPing(session, sender, payload); break
neighbors = neighbors[0..CRAWLER_RETURN - 1] default : log.warning("unknown type ${payload.type}")
}
def upManager = (UltrapeerConnectionManager) manager; } catch (Exception e) {
def pong = [:] log.warning("Invalid datagram $e")
pong.peers = neighbors }
pong.uuid = ping.uuid }
pong.type = "CrawlerPong"
pong.version = 1 @Override
pong.leafSlots = upManager.hasLeafSlots() public void reportAbuse(I2PSession session, int severity) {
pong.peerSlots = upManager.hasPeerSlots() }
pong = JsonOutput.toJson(pong)
@Override
def maker = new I2PDatagramMaker(session) public void disconnected(I2PSession session) {
pong = maker.makeI2PDatagram(pong.bytes) log.severe "I2P session disconnected"
session.sendMessage(from, pong, I2PSession.PROTO_DATAGRAM, 0, 0) }
}
@Override
public void errorOccurred(I2PSession session, String message, Throwable error) {
log.severe "I2P error occured $message $error"
}
}
private void handlePong(Destination from, def pong) {
if (!CacheServers.isRegistered(from)) {
log.warning("received pong from non-registered destination")
return
}
if (pong.pongs == null) {
log.warning("malformed pong - no pongs")
return
}
pong.pongs.asList().each {
Destination dest = new Destination(it)
if (!session.getMyDestination().equals(dest))
eventBus.publish(new HostDiscoveredEvent(destination: dest, fromHostcache : true))
}
}
private void handleCrawlerPing(I2PSession session, Destination from, def ping) {
if (settings.isLeaf()) {
log.warning("Received crawler ping but I'm a leaf")
return
}
switch(settings.getCrawlerResponse()) {
case CrawlerResponse.NONE:
log.info("Responding to crawlers is disabled by user")
break
case CrawlerResponse.ALL:
respondToCrawler(session, from, ping)
break;
case CrawlerResponse.REGISTERED:
if (CacheServers.isRegistered(from))
respondToCrawler(session, from, ping)
else
log.warning("Ignoring crawler ping from non-registered crawler")
break
}
}
private void respondToCrawler(I2PSession session, Destination from, def ping) {
log.info "responding to crawler ping"
def neighbors = manager.getConnections().collect { c -> c.endpoint.destination.toBase64() }
Collections.shuffle(neighbors)
if (neighbors.size() > CRAWLER_RETURN)
neighbors = neighbors[0..CRAWLER_RETURN - 1]
def upManager = (UltrapeerConnectionManager) manager;
def pong = [:]
pong.peers = neighbors
pong.uuid = ping.uuid
pong.type = "CrawlerPong"
pong.version = 1
pong.leafSlots = upManager.hasLeafSlots()
pong.peerSlots = upManager.hasPeerSlots()
pong = JsonOutput.toJson(pong)
def maker = new I2PDatagramMaker(session)
pong = maker.makeI2PDatagram(pong.bytes)
session.sendMessage(from, pong, I2PSession.PROTO_DATAGRAM, 0, 0)
}
} }

View File

@@ -4,20 +4,21 @@ import net.i2p.data.Destination
class CacheServers { class CacheServers {
private static final int TO_GIVE = 3 private static final int TO_GIVE = 3
private static Set<Destination> CACHES = [ private static Set<Destination> CACHES = [
new Destination("Wddh2E6FyyXBF7SvUYHKdN-vjf3~N6uqQWNeBDTM0P33YjiQCOsyedrjmDZmWFrXUJfJLWnCb5bnKezfk4uDaMyj~uvDG~yvLVcFgcPWSUd7BfGgym-zqcG1q1DcM8vfun-US7YamBlmtC6MZ2j-~Igqzmgshita8aLPCfNAA6S6e2UMjjtG7QIXlxpMec75dkHdJlVWbzrk9z8Qgru3YIk0UztYgEwDNBbm9wInsbHhr3HtAfa02QcgRVqRN2PnQXuqUJs7R7~09FZPEviiIcUpkY3FeyLlX1sgQFBeGeA96blaPvZNGd6KnNdgfLgMebx5SSxC-N4KZMSMBz5cgonQF3~m2HHFRSI85zqZNG5X9bJN85t80ltiv1W1es8ZnQW4es11r7MrvJNXz5bmSH641yJIvS6qI8OJJNpFVBIQSXLD-96TayrLQPaYw~uNZ-eXaE6G5dYhiuN8xHsFI1QkdaUaVZnvDGfsRbpS5GtpUbBDbyLkdPurG0i7dN1wAAAA") new Destination("Wddh2E6FyyXBF7SvUYHKdN-vjf3~N6uqQWNeBDTM0P33YjiQCOsyedrjmDZmWFrXUJfJLWnCb5bnKezfk4uDaMyj~uvDG~yvLVcFgcPWSUd7BfGgym-zqcG1q1DcM8vfun-US7YamBlmtC6MZ2j-~Igqzmgshita8aLPCfNAA6S6e2UMjjtG7QIXlxpMec75dkHdJlVWbzrk9z8Qgru3YIk0UztYgEwDNBbm9wInsbHhr3HtAfa02QcgRVqRN2PnQXuqUJs7R7~09FZPEviiIcUpkY3FeyLlX1sgQFBeGeA96blaPvZNGd6KnNdgfLgMebx5SSxC-N4KZMSMBz5cgonQF3~m2HHFRSI85zqZNG5X9bJN85t80ltiv1W1es8ZnQW4es11r7MrvJNXz5bmSH641yJIvS6qI8OJJNpFVBIQSXLD-96TayrLQPaYw~uNZ-eXaE6G5dYhiuN8xHsFI1QkdaUaVZnvDGfsRbpS5GtpUbBDbyLkdPurG0i7dN1wAAAA"),
] new Destination("JC63wJNOqSJmymkj4~UJWywBTvDGikKMoYP0HX2Wz9c5l3otXSkwnxWAFL4cKr~Ygh3BNNi2t93vuLIiI1W8AsE42kR~PwRx~Y-WvIHXR6KUejRmOp-n8WidtjKg9k4aDy428uSOedqXDxys5mpoeQXwDsv1CoPTTwnmb1GWFy~oTGIsCguCl~aJWGnqiKarPO3GJQ~ev-NbvAQzUfC3HeP1e6pdI5CGGjExahTCID5UjpJw8GaDXWlGmYWWH303Xu4x-vAHQy1dJLsOBCn8dZravsn5BKJk~j0POUon45CCx-~NYtaPe0Itt9cMdD2ciC76Rep1D0X0sm1SjlSs8sZ52KmF3oaLZ6OzgI9QLMIyBUrfi41sK5I0qTuUVBAkvW1xr~L-20dYJ9TrbOaOb2-vDIfKaxVi6xQOuhgQDiSBhd3qv2m0xGu-BM9DQYfNA0FdMjnZmqjmji9RMavzQSsVFIbQGLbrLepiEFlb7TseCK5UtRp8TxnG7L4gbYevBQAEAAcAAA==")
]
static List<Destination> getCacheServers() { static List<Destination> getCacheServers() {
List<Destination> allCaches = new ArrayList<>(CACHES) List<Destination> allCaches = new ArrayList<>(CACHES)
Collections.shuffle(allCaches) Collections.shuffle(allCaches)
if (allCaches.size() <= TO_GIVE) if (allCaches.size() <= TO_GIVE)
return allCaches return allCaches
allCaches[0..TO_GIVE-1] allCaches[0..TO_GIVE-1]
} }
static boolean isRegistered(Destination d) { static boolean isRegistered(Destination d) {
return CACHES.contains(d) return CACHES.contains(d)
} }
} }

View File

@@ -4,30 +4,43 @@ import net.i2p.data.Destination
class Host { class Host {
private static final int MAX_FAILURES = 3 private static final int MAX_FAILURES = 3
final Destination destination
int failures,successes
public Host(Destination destination) {
this.destination = destination
}
synchronized void onConnect() { final Destination destination
failures = 0 private final int clearInterval
successes++ int failures,successes
} long lastAttempt
synchronized void onFailure() { public Host(Destination destination, int clearInterval) {
failures++ this.destination = destination
successes = 0 this.clearInterval = clearInterval
} }
synchronized boolean isFailed() { synchronized void onConnect() {
failures >= MAX_FAILURES failures = 0
} successes++
lastAttempt = System.currentTimeMillis()
synchronized boolean hasSucceeded() { }
successes > 0
} synchronized void onFailure() {
failures++
successes = 0
lastAttempt = System.currentTimeMillis()
}
synchronized boolean isFailed() {
failures >= MAX_FAILURES
}
synchronized boolean hasSucceeded() {
successes > 0
}
synchronized void clearFailures() {
failures = 0
}
synchronized void canTryAgain() {
System.currentTimeMillis() - lastAttempt > (clearInterval * 60 * 1000)
}
} }

View File

@@ -15,134 +15,141 @@ import net.i2p.data.Destination
class HostCache extends Service { class HostCache extends Service {
final TrustService trustService final TrustService trustService
final File storage final File storage
final int interval final int interval
final Timer timer final Timer timer
final MuWireSettings settings final MuWireSettings settings
final Destination myself final Destination myself
final Map<Destination, Host> hosts = new ConcurrentHashMap<>() final Map<Destination, Host> hosts = new ConcurrentHashMap<>()
HostCache(){}
public HostCache(TrustService trustService, File storage, int interval,
MuWireSettings settings, Destination myself) {
this.trustService = trustService
this.storage = storage
this.interval = interval
this.settings = settings
this.myself = myself
this.timer = new Timer("host-persister",true)
}
void start() { HostCache(){}
timer.schedule({load()} as TimerTask, 1)
}
void stop() {
timer.cancel()
}
void onHostDiscoveredEvent(HostDiscoveredEvent e) {
if (myself == e.destination)
return
if (hosts.containsKey(e.destination))
return
Host host = new Host(e.destination)
if (allowHost(host)) {
hosts.put(e.destination, host)
}
}
void onConnectionEvent(ConnectionEvent e) {
if (e.incoming || e.leaf)
return
Destination dest = e.endpoint.destination
Host host = hosts.get(dest)
if (host == null) {
host = new Host(dest)
hosts.put(dest, host)
}
switch(e.status) { public HostCache(TrustService trustService, File storage, int interval,
case ConnectionAttemptStatus.SUCCESSFUL: MuWireSettings settings, Destination myself) {
case ConnectionAttemptStatus.REJECTED: this.trustService = trustService
host.onConnect() this.storage = storage
break this.interval = interval
case ConnectionAttemptStatus.FAILED: this.settings = settings
host.onFailure() this.myself = myself
break this.timer = new Timer("host-persister",true)
} }
}
void start() {
List<Destination> getHosts(int n) { timer.schedule({load()} as TimerTask, 1)
List<Destination> rv = new ArrayList<>(hosts.keySet()) }
rv.retainAll {allowHost(hosts[it])}
if (rv.size() <= n) void stop() {
return rv timer.cancel()
Collections.shuffle(rv) }
rv[0..n-1]
} void onHostDiscoveredEvent(HostDiscoveredEvent e) {
if (myself == e.destination)
List<Destination> getGoodHosts(int n) { return
List<Destination> rv = new ArrayList<>(hosts.keySet()) if (hosts.containsKey(e.destination)) {
rv.retainAll { if (!e.fromHostcache)
Host host = hosts[it] return
allowHost(host) && host.hasSucceeded() hosts.get(e.destination).clearFailures()
} return
if (rv.size() <= n) }
return rv Host host = new Host(e.destination, settings.hostClearInterval)
Collections.shuffle(rv) if (allowHost(host)) {
rv[0..n-1] hosts.put(e.destination, host)
} }
}
void load() {
if (storage.exists()) { void onConnectionEvent(ConnectionEvent e) {
JsonSlurper slurper = new JsonSlurper() if (e.leaf)
storage.eachLine { return
def entry = slurper.parseText(it) Destination dest = e.endpoint.destination
Destination dest = new Destination(entry.destination) Host host = hosts.get(dest)
Host host = new Host(dest) if (host == null) {
host.failures = Integer.valueOf(String.valueOf(entry.failures)) host = new Host(dest, settings.hostClearInterval)
host.successes = Integer.valueOf(String.valueOf(entry.successes)) hosts.put(dest, host)
if (allowHost(host)) }
hosts.put(dest, host)
} switch(e.status) {
} case ConnectionAttemptStatus.SUCCESSFUL:
timer.schedule({save()} as TimerTask, interval, interval) case ConnectionAttemptStatus.REJECTED:
loaded = true host.onConnect()
} break
case ConnectionAttemptStatus.FAILED:
private boolean allowHost(Host host) { host.onFailure()
if (host.isFailed()) break
return false }
if (host.destination == myself) }
return false
TrustLevel trust = trustService.getLevel(host.destination) List<Destination> getHosts(int n) {
switch(trust) { List<Destination> rv = new ArrayList<>(hosts.keySet())
case TrustLevel.DISTRUSTED : rv.retainAll {allowHost(hosts[it])}
return false if (rv.size() <= n)
case TrustLevel.TRUSTED : return rv
return true Collections.shuffle(rv)
case TrustLevel.NEUTRAL : rv[0..n-1]
return settings.allowUntrusted() }
}
false List<Destination> getGoodHosts(int n) {
} List<Destination> rv = new ArrayList<>(hosts.keySet())
rv.retainAll {
private void save() { Host host = hosts[it]
storage.delete() allowHost(host) && host.hasSucceeded()
storage.withPrintWriter { writer -> }
hosts.each { dest, host -> if (rv.size() <= n)
if (allowHost(host)) { return rv
def map = [:] Collections.shuffle(rv)
map.destination = dest.toBase64() rv[0..n-1]
map.failures = host.failures }
map.successes = host.successes
def json = JsonOutput.toJson(map) void load() {
writer.println json if (storage.exists()) {
} JsonSlurper slurper = new JsonSlurper()
} storage.eachLine {
} def entry = slurper.parseText(it)
} Destination dest = new Destination(entry.destination)
Host host = new Host(dest, settings.hostClearInterval)
host.failures = Integer.valueOf(String.valueOf(entry.failures))
host.successes = Integer.valueOf(String.valueOf(entry.successes))
if (entry.lastAttempt != null)
host.lastAttempt = entry.lastAttempt
if (allowHost(host))
hosts.put(dest, host)
}
}
timer.schedule({save()} as TimerTask, interval, interval)
loaded = true
}
private boolean allowHost(Host host) {
if (host.isFailed() && !host.canTryAgain())
return false
if (host.destination == myself)
return false
TrustLevel trust = trustService.getLevel(host.destination)
switch(trust) {
case TrustLevel.DISTRUSTED :
return false
case TrustLevel.TRUSTED :
return true
case TrustLevel.NEUTRAL :
return settings.allowUntrusted()
}
false
}
private void save() {
storage.delete()
storage.withPrintWriter { writer ->
hosts.each { dest, host ->
if (allowHost(host)) {
def map = [:]
map.destination = dest.toBase64()
map.failures = host.failures
map.successes = host.successes
map.lastAttempt = host.lastAttempt
def json = JsonOutput.toJson(map)
writer.println json
}
}
}
}
} }

View File

@@ -6,10 +6,11 @@ import net.i2p.data.Destination
class HostDiscoveredEvent extends Event { class HostDiscoveredEvent extends Event {
Destination destination Destination destination
boolean fromHostcache
@Override
public String toString() { @Override
"HostDiscoveredEvent ${super.toString()} destination:${destination.toBase32()}" public String toString() {
} "HostDiscoveredEvent ${super.toString()} destination:${destination.toBase32()} from hostcache $fromHostcache"
}
} }

View File

@@ -0,0 +1,28 @@
package com.muwire.core.mesh
import com.muwire.core.InfoHash
import com.muwire.core.Persona
import com.muwire.core.download.Pieces
import net.i2p.data.Destination
import net.i2p.util.ConcurrentHashSet
class Mesh {
private final InfoHash infoHash
private final Set<Persona> sources = new ConcurrentHashSet<>()
private final Pieces pieces
Mesh(InfoHash infoHash, Pieces pieces) {
this.infoHash = infoHash
this.pieces = pieces
}
Set<Persona> getRandom(int n, Persona exclude) {
List<Persona> tmp = new ArrayList<>(sources)
tmp.remove(exclude)
Collections.shuffle(tmp)
if (tmp.size() < n)
return tmp
tmp[0..n-1]
}
}

View File

@@ -0,0 +1,102 @@
package com.muwire.core.mesh
import java.util.stream.Collectors
import com.muwire.core.Constants
import com.muwire.core.InfoHash
import com.muwire.core.MuWireSettings
import com.muwire.core.Persona
import com.muwire.core.download.Pieces
import com.muwire.core.download.SourceDiscoveredEvent
import com.muwire.core.files.FileManager
import com.muwire.core.util.DataUtil
import groovy.json.JsonOutput
import groovy.json.JsonSlurper
import net.i2p.data.Base64
class MeshManager {
private final Map<InfoHash, Mesh> meshes = Collections.synchronizedMap(new HashMap<>())
private final FileManager fileManager
private final File home
private final MuWireSettings settings
MeshManager(FileManager fileManager, File home, MuWireSettings settings) {
this.fileManager = fileManager
this.home = home
this.settings = settings
load()
}
Mesh get(InfoHash infoHash) {
meshes.get(infoHash)
}
Mesh getOrCreate(InfoHash infoHash, int nPieces) {
synchronized(meshes) {
if (meshes.containsKey(infoHash))
return meshes.get(infoHash)
Pieces pieces = new Pieces(nPieces, settings.downloadSequentialRatio)
if (fileManager.rootToFiles.containsKey(infoHash)) {
for (int i = 0; i < nPieces; i++)
pieces.markDownloaded(i)
}
Mesh rv = new Mesh(infoHash, pieces)
meshes.put(infoHash, rv)
return rv
}
}
void onSourceDiscoveredEvent(SourceDiscoveredEvent e) {
Mesh mesh = meshes.get(e.infoHash)
if (mesh == null)
return
mesh.sources.add(e.source)
save()
}
private void save() {
File meshFile = new File(home, "mesh.json")
synchronized(meshes) {
meshFile.withPrintWriter { writer ->
meshes.values().each { mesh ->
def json = [:]
json.timestamp = System.currentTimeMillis()
json.infoHash = Base64.encode(mesh.infoHash.getRoot())
json.sources = mesh.sources.stream().map({it.toBase64()}).collect(Collectors.toList())
json.nPieces = mesh.pieces.nPieces
json.xHave = DataUtil.encodeXHave(mesh.pieces.downloaded, mesh.pieces.nPieces)
writer.println(JsonOutput.toJson(json))
}
}
}
}
private void load() {
File meshFile = new File(home, "mesh.json")
if (!meshFile.exists())
return
long now = System.currentTimeMillis()
JsonSlurper slurper = new JsonSlurper()
meshFile.eachLine {
def json = slurper.parseText(it)
if (now - json.timestamp > settings.meshExpiration * 60 * 1000)
return
InfoHash infoHash = new InfoHash(Base64.decode(json.infoHash))
Pieces pieces = new Pieces(json.nPieces, settings.downloadSequentialRatio)
Mesh mesh = new Mesh(infoHash, pieces)
json.sources.each { source ->
Persona persona = new Persona(new ByteArrayInputStream(Base64.decode(source)))
mesh.sources.add(persona)
}
if (json.xHave != null)
DataUtil.decodeXHave(json.xHave).each { pieces.markDownloaded(it) }
if (!mesh.sources.isEmpty())
meshes.put(infoHash, mesh)
}
}
}

View File

@@ -6,11 +6,11 @@ import net.i2p.data.Base32
import net.i2p.data.Destination import net.i2p.data.Destination
class DeleteEvent extends Event { class DeleteEvent extends Event {
byte [] infoHash byte [] infoHash
Destination leaf Destination leaf
@Override @Override
public String toString() { public String toString() {
"DeleteEvent ${super.toString()} infoHash:${Base32.encode(infoHash)} leaf:${leaf.toBase32()}" "DeleteEvent ${super.toString()} infoHash:${Base32.encode(infoHash)} leaf:${leaf.toBase32()}"
} }
} }

View File

@@ -21,5 +21,5 @@ class InvalidSearchResultException extends Exception {
super(cause); super(cause);
// TODO Auto-generated constructor stub // TODO Auto-generated constructor stub
} }
} }

View File

@@ -6,33 +6,33 @@ import com.muwire.core.connection.UltrapeerConnectionManager
import net.i2p.data.Destination import net.i2p.data.Destination
class LeafSearcher { class LeafSearcher {
final UltrapeerConnectionManager connectionManager final UltrapeerConnectionManager connectionManager
final SearchIndex searchIndex = new SearchIndex() final SearchIndex searchIndex = new SearchIndex()
final Map<String, Set<byte[]>> fileNameToHashes = new HashMap<>() final Map<String, Set<byte[]>> fileNameToHashes = new HashMap<>()
final Map<byte[], Set<Destination>> hashToLeafs = new HashMap<>() final Map<byte[], Set<Destination>> hashToLeafs = new HashMap<>()
final Map<Destination, Map<byte[], Set<String>>> leafToFiles = new HashMap<>() final Map<Destination, Map<byte[], Set<String>>> leafToFiles = new HashMap<>()
LeafSearcher(UltrapeerConnectionManager connectionManager) { LeafSearcher(UltrapeerConnectionManager connectionManager) {
this.connectionManager = connectionManager this.connectionManager = connectionManager
} }
void onUpsertEvent(UpsertEvent e) { void onUpsertEvent(UpsertEvent e) {
// TODO: implement // TODO: implement
} }
void onDeleteEvent(DeleteEvent e) { void onDeleteEvent(DeleteEvent e) {
// TODO: implement // TODO: implement
} }
void onDisconnectionEvent(DisconnectionEvent e) { void onDisconnectionEvent(DisconnectionEvent e) {
// TODO: implement // TODO: implement
} }
void onQueryEvent(QueryEvent e) { void onQueryEvent(QueryEvent e) {
// TODO: implement // TODO: implement
} }
} }

View File

@@ -1,14 +1,20 @@
package com.muwire.core.search package com.muwire.core.search
import com.muwire.core.Event import com.muwire.core.Event
import com.muwire.core.Persona
import net.i2p.data.Destination import net.i2p.data.Destination
class QueryEvent extends Event { class QueryEvent extends Event {
SearchEvent searchEvent
boolean firstHop
Destination replyTo
Destination receivedOn
SearchEvent searchEvent
boolean firstHop
Destination replyTo
Persona originator
Destination receivedOn
String toString() {
"searchEvent: $searchEvent firstHop:$firstHop, replyTo:${replyTo.toBase32()}" +
"originator: ${originator.getHumanReadableName()} receivedOn: ${receivedOn.toBase32()}"
}
} }

View File

@@ -5,6 +5,7 @@ import com.muwire.core.SharedFile
class ResultsEvent extends Event { class ResultsEvent extends Event {
SharedFile[] results SearchEvent searchEvent
UUID uuid SharedFile[] results
UUID uuid
} }

View File

@@ -1,5 +1,7 @@
package com.muwire.core.search package com.muwire.core.search
import java.util.stream.Collectors
import javax.naming.directory.InvalidSearchControlsException import javax.naming.directory.InvalidSearchControlsException
import com.muwire.core.InfoHash import com.muwire.core.InfoHash
@@ -7,13 +9,25 @@ import com.muwire.core.Persona
import com.muwire.core.util.DataUtil import com.muwire.core.util.DataUtil
import net.i2p.data.Base64 import net.i2p.data.Base64
import net.i2p.data.Destination
class ResultsParser { class ResultsParser {
public static UIResultEvent parse(Persona p, UUID uuid, def json) throws InvalidSearchResultException { public static UIResultEvent parse(Persona p, UUID uuid, def json) throws InvalidSearchResultException {
if (json.type != "Result") if (json.type != "Result")
throw new InvalidSearchResultException("not a result json") throw new InvalidSearchResultException("not a result json")
if (json.version != 1) switch(json.version) {
throw new InvalidSearchResultException("unknown version $json.version") case 1:
return parseV1(p, uuid, json)
case 2:
return parseV2(p, uuid, json)
default:
throw new InvalidSearchResultException("unknown version $json.version")
}
}
private static parseV1(Persona p, UUID uuid, def json) {
if (json.name == null) if (json.name == null)
throw new InvalidSearchResultException("name missing") throw new InvalidSearchResultException("name missing")
if (json.size == null) if (json.size == null)
@@ -41,15 +55,51 @@ class ResultsParser {
InfoHash parsedIH = InfoHash.fromHashList(hashList) InfoHash parsedIH = InfoHash.fromHashList(hashList)
if (parsedIH.getRoot() != infoHash) if (parsedIH.getRoot() != infoHash)
throw new InvalidSearchControlsException("infohash root doesn't match") throw new InvalidSearchControlsException("infohash root doesn't match")
return new UIResultEvent( sender : p, return new UIResultEvent( sender : p,
name : name, name : name,
size : size, size : size,
infohash : parsedIH, infohash : parsedIH,
pieceSize : pieceSize, pieceSize : pieceSize,
sources : Collections.emptySet(),
uuid : uuid) uuid : uuid)
} catch (Exception e) { } catch (Exception e) {
throw new InvalidSearchResultException("parsing search result failed",e) throw new InvalidSearchResultException("parsing search result failed",e)
} }
} }
private static UIResultEvent parseV2(Persona p, UUID uuid, def json) {
if (json.name == null)
throw new InvalidSearchResultException("name missing")
if (json.size == null)
throw new InvalidSearchResultException("length missing")
if (json.infohash == null)
throw new InvalidSearchResultException("infohash missing")
if (json.pieceSize == null)
throw new InvalidSearchResultException("pieceSize missing")
if (json.hashList != null)
throw new InvalidSearchResultException("V2 result with hashlist")
try {
String name = DataUtil.readi18nString(Base64.decode(json.name))
long size = json.size
byte [] infoHash = Base64.decode(json.infohash)
if (infoHash.length != InfoHash.SIZE)
throw new InvalidSearchResultException("invalid infohash size $infoHash.length")
int pieceSize = json.pieceSize
Set<Destination> sources = Collections.emptySet()
if (json.sources != null)
sources = json.sources.stream().map({new Destination(it)}).collect(Collectors.toSet())
return new UIResultEvent( sender : p,
name : name,
size : size,
infohash : new InfoHash(infoHash),
pieceSize : pieceSize,
sources : sources,
uuid: uuid)
} catch (Exception e) {
throw new InvalidSearchResultException("parsing search result failed",e)
}
}
} }

View File

@@ -11,7 +11,10 @@ import java.util.concurrent.Executor
import java.util.concurrent.Executors import java.util.concurrent.Executors
import java.util.concurrent.ThreadFactory import java.util.concurrent.ThreadFactory
import java.util.concurrent.atomic.AtomicInteger import java.util.concurrent.atomic.AtomicInteger
import java.util.logging.Level
import java.util.stream.Collectors
import com.muwire.core.DownloadedFile
import com.muwire.core.EventBus import com.muwire.core.EventBus
import com.muwire.core.InfoHash import com.muwire.core.InfoHash
@@ -22,9 +25,9 @@ import net.i2p.data.Destination
@Log @Log
class ResultsSender { class ResultsSender {
private static final AtomicInteger THREAD_NO = new AtomicInteger() private static final AtomicInteger THREAD_NO = new AtomicInteger()
private final Executor executor = Executors.newCachedThreadPool( private final Executor executor = Executors.newCachedThreadPool(
new ThreadFactory() { new ThreadFactory() {
@Override @Override
@@ -35,82 +38,100 @@ class ResultsSender {
rv rv
} }
}) })
private final I2PConnector connector private final I2PConnector connector
private final Persona me private final Persona me
private final EventBus eventBus private final EventBus eventBus
ResultsSender(EventBus eventBus, I2PConnector connector, Persona me) { ResultsSender(EventBus eventBus, I2PConnector connector, Persona me) {
this.connector = connector; this.connector = connector;
this.eventBus = eventBus this.eventBus = eventBus
this.me = me this.me = me
} }
void sendResults(UUID uuid, SharedFile[] results, Destination target) { void sendResults(UUID uuid, SharedFile[] results, Destination target, boolean oobInfohash) {
log.info("Sending $results.length results for uuid $uuid to ${target.toBase32()}") log.info("Sending $results.length results for uuid $uuid to ${target.toBase32()} oobInfohash : $oobInfohash")
if (target.equals(me.destination)) { if (target.equals(me.destination)) {
results.each { results.each {
long length = it.getFile().length() long length = it.getFile().length()
int pieceSize = it.getPieceSize()
if (pieceSize == 0)
pieceSize = FileHasher.getPieceSize(length)
Set<Destination> suggested = Collections.emptySet()
if (it instanceof DownloadedFile)
suggested = it.sources
def uiResultEvent = new UIResultEvent( sender : me, def uiResultEvent = new UIResultEvent( sender : me,
name : it.getFile().getName(), name : it.getFile().getName(),
size : length, size : length,
infohash : it.getInfoHash(), infohash : it.getInfoHash(),
pieceSize : FileHasher.getPieceSize(length), pieceSize : pieceSize,
uuid : uuid uuid : uuid,
sources : suggested
) )
eventBus.publish(uiResultEvent) eventBus.publish(uiResultEvent)
} }
} else { } else {
executor.execute(new ResultSendJob(uuid : uuid, results : results, target: target)) executor.execute(new ResultSendJob(uuid : uuid, results : results,
target: target, oobInfohash : oobInfohash))
} }
} }
private class ResultSendJob implements Runnable { private class ResultSendJob implements Runnable {
UUID uuid UUID uuid
SharedFile [] results SharedFile [] results
Destination target Destination target
boolean oobInfohash
@Override @Override
public void run() { public void run() {
byte [] tmp = new byte[InfoHash.SIZE]
JsonOutput jsonOutput = new JsonOutput()
Endpoint endpoint = null;
try { try {
endpoint = connector.connect(target) byte [] tmp = new byte[InfoHash.SIZE]
DataOutputStream os = new DataOutputStream(endpoint.getOutputStream()) JsonOutput jsonOutput = new JsonOutput()
os.write("POST $uuid\r\n\r\n".getBytes(StandardCharsets.US_ASCII)) Endpoint endpoint = null;
me.write(os) try {
os.writeShort((short)results.length) endpoint = connector.connect(target)
results.each { DataOutputStream os = new DataOutputStream(endpoint.getOutputStream())
byte [] name = it.getFile().getName().getBytes(StandardCharsets.UTF_8) os.write("POST $uuid\r\n\r\n".getBytes(StandardCharsets.US_ASCII))
def baos = new ByteArrayOutputStream() me.write(os)
def daos = new DataOutputStream(baos) os.writeShort((short)results.length)
daos.writeShort((short) name.length) results.each {
daos.write(name) byte [] name = it.getFile().getName().getBytes(StandardCharsets.UTF_8)
daos.flush() def baos = new ByteArrayOutputStream()
String encodedName = Base64.encode(baos.toByteArray()) def daos = new DataOutputStream(baos)
def obj = [:] daos.writeShort((short) name.length)
obj.type = "Result" daos.write(name)
obj.version = 1 daos.flush()
obj.name = encodedName String encodedName = Base64.encode(baos.toByteArray())
obj.infohash = Base64.encode(it.getInfoHash().getRoot()) def obj = [:]
obj.size = it.getFile().length() obj.type = "Result"
obj.pieceSize = FileHasher.getPieceSize(it.getFile().length()) obj.version = oobInfohash ? 2 : 1
byte [] hashList = it.getInfoHash().getHashList() obj.name = encodedName
def hashListB64 = [] obj.infohash = Base64.encode(it.getInfoHash().getRoot())
for (int i = 0; i < hashList.length / InfoHash.SIZE; i++) { obj.size = it.getFile().length()
System.arraycopy(hashList, InfoHash.SIZE * i, tmp, 0, InfoHash.SIZE) obj.pieceSize = it.getPieceSize()
hashListB64 << Base64.encode(tmp) if (!oobInfohash) {
} byte [] hashList = it.getInfoHash().getHashList()
obj.hashList = hashListB64 def hashListB64 = []
for (int i = 0; i < hashList.length / InfoHash.SIZE; i++) {
System.arraycopy(hashList, InfoHash.SIZE * i, tmp, 0, InfoHash.SIZE)
hashListB64 << Base64.encode(tmp)
}
obj.hashList = hashListB64
}
def json = jsonOutput.toJson(obj) if (it instanceof DownloadedFile)
os.writeShort((short)json.length()) obj.sources = it.sources.stream().map({dest -> dest.toBase64()}).collect(Collectors.toSet())
os.write(json.getBytes(StandardCharsets.US_ASCII))
def json = jsonOutput.toJson(obj)
os.writeShort((short)json.length())
os.write(json.getBytes(StandardCharsets.US_ASCII))
}
os.flush()
} finally {
endpoint?.close()
} }
os.flush() } catch (Exception e) {
} finally { log.log(Level.WARNING, "problem sending results",e)
endpoint?.close()
} }
} }
} }

View File

@@ -1,10 +1,19 @@
package com.muwire.core.search package com.muwire.core.search
import com.muwire.core.Event import com.muwire.core.Event
import com.muwire.core.InfoHash
class SearchEvent extends Event { class SearchEvent extends Event {
List<String> searchTerms List<String> searchTerms
byte [] searchHash byte [] searchHash
UUID uuid UUID uuid
boolean oobInfohash
String toString() {
def infoHash = null
if (searchHash != null)
infoHash = new InfoHash(searchHash)
"searchTerms: $searchTerms searchHash:$infoHash, uuid:$uuid oobInfohash:$oobInfohash"
}
} }

View File

@@ -1,55 +1,59 @@
package com.muwire.core.search package com.muwire.core.search
import com.muwire.core.Constants
class SearchIndex { class SearchIndex {
final Map<String, Set<String>> keywords = new HashMap<>() final Map<String, Set<String>> keywords = new HashMap<>()
void add(String string) { void add(String string) {
String [] split = split(string) String [] split = split(string)
split.each { split.each {
Set<String> existing = keywords.get(it) Set<String> existing = keywords.get(it)
if (existing == null) { if (existing == null) {
existing = new HashSet<>() existing = new HashSet<>()
keywords.put(it, existing) keywords.put(it, existing)
} }
existing.add(string) existing.add(string)
} }
} }
void remove(String string) { void remove(String string) {
String [] split = split(string) String [] split = split(string)
split.each { split.each {
Set<String> existing = keywords.get it Set<String> existing = keywords.get it
if (existing != null) { if (existing != null) {
existing.remove(string) existing.remove(string)
if (existing.isEmpty()) { if (existing.isEmpty()) {
keywords.remove(it) keywords.remove(it)
} }
} }
} }
} }
private static String[] split(String source) { private static String[] split(String source) {
source = source.replaceAll("[\\.,_-]", " ") source = source.replaceAll(Constants.SPLIT_PATTERN, " ").toLowerCase()
source.split(" ") String [] split = source.split(" ")
} def rv = []
split.each { if (it.length() > 0) rv << it }
String[] search(List<String> terms) { rv.toArray(new String[0])
Set<String> rv = null; }
terms.each { String[] search(List<String> terms) {
Set<String> forWord = keywords.getOrDefault(it,[]) Set<String> rv = null;
if (rv == null) {
rv = forWord terms.each {
} else { Set<String> forWord = keywords.getOrDefault(it,[])
rv.retainAll(forWord) if (rv == null) {
} rv = new HashSet<>(forWord)
} else {
} rv.retainAll(forWord)
}
if (rv != null)
return rv.asList() }
[]
} if (rv != null)
return rv.asList()
[]
}
} }

View File

@@ -8,17 +8,17 @@ import net.i2p.data.Destination
@Log @Log
public class SearchManager { public class SearchManager {
private static final int EXPIRE_TIME = 60 * 1000 * 1000 private static final int EXPIRE_TIME = 60 * 1000 * 1000
private static final int CHECK_INTERVAL = 60 * 1000 private static final int CHECK_INTERVAL = 60 * 1000
private final EventBus eventBus private final EventBus eventBus
private final Persona me private final Persona me
private final ResultsSender resultsSender private final ResultsSender resultsSender
private final Map<UUID, QueryEvent> responderAddress = Collections.synchronizedMap(new HashMap<>()) private final Map<UUID, QueryEvent> responderAddress = Collections.synchronizedMap(new HashMap<>())
SearchManager(){} SearchManager(){}
SearchManager(EventBus eventBus, Persona me, ResultsSender resultsSender) { SearchManager(EventBus eventBus, Persona me, ResultsSender resultsSender) {
this.eventBus = eventBus this.eventBus = eventBus
this.me = me this.me = me
@@ -26,7 +26,7 @@ public class SearchManager {
Timer timer = new Timer("query-expirer", true) Timer timer = new Timer("query-expirer", true)
timer.schedule({cleanup()} as TimerTask, CHECK_INTERVAL, CHECK_INTERVAL) timer.schedule({cleanup()} as TimerTask, CHECK_INTERVAL, CHECK_INTERVAL)
} }
void onQueryEvent(QueryEvent event) { void onQueryEvent(QueryEvent event) {
if (responderAddress.containsKey(event.searchEvent.uuid)) { if (responderAddress.containsKey(event.searchEvent.uuid)) {
log.info("Dropping duplicate search uuid $event.searchEvent.uuid") log.info("Dropping duplicate search uuid $event.searchEvent.uuid")
@@ -35,7 +35,7 @@ public class SearchManager {
responderAddress.put(event.searchEvent.uuid, event) responderAddress.put(event.searchEvent.uuid, event)
eventBus.publish(event.searchEvent) eventBus.publish(event.searchEvent)
} }
void onResultsEvent(ResultsEvent event) { void onResultsEvent(ResultsEvent event) {
Destination target = responderAddress.get(event.uuid)?.replyTo Destination target = responderAddress.get(event.uuid)?.replyTo
if (target == null) if (target == null)
@@ -44,13 +44,13 @@ public class SearchManager {
log.info("No results for search uuid $event.uuid") log.info("No results for search uuid $event.uuid")
return return
} }
resultsSender.sendResults(event.uuid, event.results, target) resultsSender.sendResults(event.uuid, event.results, target, event.searchEvent.oobInfohash)
} }
boolean hasLocalSearch(UUID uuid) { boolean hasLocalSearch(UUID uuid) {
me.destination.equals(responderAddress.get(uuid)?.replyTo) me.destination.equals(responderAddress.get(uuid)?.replyTo)
} }
private void cleanup() { private void cleanup() {
final long now = System.currentTimeMillis() final long now = System.currentTimeMillis()
synchronized(responderAddress) { synchronized(responderAddress) {

View File

@@ -0,0 +1,8 @@
package com.muwire.core.search
import com.muwire.core.Event
class UIResultBatchEvent extends Event {
UUID uuid
UIResultEvent[] results
}

View File

@@ -4,11 +4,19 @@ import com.muwire.core.Event
import com.muwire.core.InfoHash import com.muwire.core.InfoHash
import com.muwire.core.Persona import com.muwire.core.Persona
import net.i2p.data.Destination
class UIResultEvent extends Event { class UIResultEvent extends Event {
Persona sender Persona sender
Set<Destination> sources
UUID uuid UUID uuid
String name String name
long size long size
InfoHash infohash InfoHash infohash
int pieceSize int pieceSize
@Override
public String toString() {
super.toString() + "name:$name size:$size sender:${sender.getHumanReadableName()} pieceSize $pieceSize"
}
} }

View File

@@ -18,5 +18,5 @@ class UnexpectedResultsException extends Exception {
public UnexpectedResultsException(String message) { public UnexpectedResultsException(String message) {
super(message); super(message);
} }
} }

View File

@@ -7,12 +7,12 @@ import net.i2p.data.Destination
class UpsertEvent extends Event { class UpsertEvent extends Event {
Set<String> names Set<String> names
byte [] infoHash byte [] infoHash
Destination leaf Destination leaf
@Override @Override
public String toString() { public String toString() {
"UpsertEvent ${super.toString()} names:$names infoHash:${Base32.encode(infoHash)} leaf:${leaf.toBase32()}" "UpsertEvent ${super.toString()} names:$names infoHash:${Base32.encode(infoHash)} leaf:${leaf.toBase32()}"
} }
} }

View File

@@ -0,0 +1,31 @@
package com.muwire.core.trust
import java.util.concurrent.ConcurrentHashMap
import com.muwire.core.Persona
import net.i2p.util.ConcurrentHashSet
class RemoteTrustList {
public enum Status { NEW, UPDATING, UPDATED, UPDATE_FAILED }
private final Persona persona
private final Set<Persona> good, bad
volatile long timestamp
volatile boolean forceUpdate
Status status = Status.NEW
RemoteTrustList(Persona persona) {
this.persona = persona
good = new ConcurrentHashSet<>()
bad = new ConcurrentHashSet<>()
}
@Override
public boolean equals(Object o) {
if (!(o instanceof RemoteTrustList))
return false
RemoteTrustList other = (RemoteTrustList)o
persona == other.persona
}
}

View File

@@ -1,11 +1,10 @@
package com.muwire.core.trust package com.muwire.core.trust
import com.muwire.core.Event import com.muwire.core.Event
import com.muwire.core.Persona
import net.i2p.data.Destination
class TrustEvent extends Event { class TrustEvent extends Event {
Destination destination Persona persona
TrustLevel level TrustLevel level
} }

View File

@@ -1,89 +1,97 @@
package com.muwire.core.trust package com.muwire.core.trust
import java.util.concurrent.ConcurrentHashMap
import com.muwire.core.Persona
import com.muwire.core.Service import com.muwire.core.Service
import net.i2p.data.Base64
import net.i2p.data.Destination import net.i2p.data.Destination
import net.i2p.util.ConcurrentHashSet import net.i2p.util.ConcurrentHashSet
class TrustService extends Service { class TrustService extends Service {
final File persistGood, persistBad final File persistGood, persistBad
final long persistInterval final long persistInterval
final Set<Destination> good = new ConcurrentHashSet<>() final Map<Destination, Persona> good = new ConcurrentHashMap<>()
final Set<Destination> bad = new ConcurrentHashSet<>() final Map<Destination, Persona> bad = new ConcurrentHashMap<>()
final Timer timer final Timer timer
TrustService() {} TrustService() {}
TrustService(File persistGood, File persistBad, long persistInterval) { TrustService(File persistGood, File persistBad, long persistInterval) {
this.persistBad = persistBad this.persistBad = persistBad
this.persistGood = persistGood this.persistGood = persistGood
this.persistInterval = persistInterval this.persistInterval = persistInterval
this.timer = new Timer("trust-persister",true) this.timer = new Timer("trust-persister",true)
} }
void start() { void start() {
timer.schedule({load()} as TimerTask, 1) timer.schedule({load()} as TimerTask, 1)
} }
void stop() { void stop() {
timer.cancel() timer.cancel()
} }
void load() { void load() {
if (persistGood.exists()) { if (persistGood.exists()) {
persistGood.eachLine { persistGood.eachLine {
good.add(new Destination(it)) byte [] decoded = Base64.decode(it)
} Persona persona = new Persona(new ByteArrayInputStream(decoded))
} good.put(persona.destination, persona)
if (persistBad.exists()) { }
persistBad.eachLine { }
bad.add(new Destination(it)) if (persistBad.exists()) {
} persistBad.eachLine {
} byte [] decoded = Base64.decode(it)
timer.schedule({persist()} as TimerTask, persistInterval, persistInterval) Persona persona = new Persona(new ByteArrayInputStream(decoded))
loaded = true bad.put(persona.destination, persona)
} }
}
private void persist() { timer.schedule({persist()} as TimerTask, persistInterval, persistInterval)
persistGood.delete() loaded = true
persistGood.withPrintWriter { writer -> }
good.each {
writer.println it.toBase64() private void persist() {
} persistGood.delete()
} persistGood.withPrintWriter { writer ->
persistBad.delete() good.each {k,v ->
persistBad.withPrintWriter { writer -> writer.println v.toBase64()
bad.each { }
writer.println it.toBase64() }
} persistBad.delete()
} persistBad.withPrintWriter { writer ->
} bad.each { k,v ->
writer.println v.toBase64()
TrustLevel getLevel(Destination dest) { }
if (good.contains(dest)) }
return TrustLevel.TRUSTED }
else if (bad.contains(dest))
return TrustLevel.DISTRUSTED TrustLevel getLevel(Destination dest) {
TrustLevel.NEUTRAL if (good.containsKey(dest))
} return TrustLevel.TRUSTED
else if (bad.containsKey(dest))
void onTrustEvent(TrustEvent e) { return TrustLevel.DISTRUSTED
switch(e.level) { TrustLevel.NEUTRAL
case TrustLevel.TRUSTED: }
bad.remove(e.destination)
good.add(e.destination) void onTrustEvent(TrustEvent e) {
break switch(e.level) {
case TrustLevel.DISTRUSTED: case TrustLevel.TRUSTED:
good.remove(e.destination) bad.remove(e.persona.destination)
bad.add(e.destination) good.put(e.persona.destination, e.persona)
break break
case TrustLevel.NEUTRAL: case TrustLevel.DISTRUSTED:
good.remove(e.destination) good.remove(e.persona.destination)
bad.remove(e.destination) bad.put(e.persona.destination, e.persona)
break break
} case TrustLevel.NEUTRAL:
} good.remove(e.persona.destination)
bad.remove(e.persona.destination)
break
}
}
} }

View File

@@ -0,0 +1,161 @@
package com.muwire.core.trust
import java.nio.charset.StandardCharsets
import java.util.concurrent.ConcurrentHashMap
import java.util.concurrent.ExecutorService
import java.util.concurrent.Executors
import java.util.logging.Level
import com.muwire.core.EventBus
import com.muwire.core.MuWireSettings
import com.muwire.core.Persona
import com.muwire.core.UILoadedEvent
import com.muwire.core.connection.Endpoint
import com.muwire.core.connection.I2PConnector
import com.muwire.core.util.DataUtil
import groovy.util.logging.Log
import net.i2p.data.Destination
@Log
class TrustSubscriber {
private final EventBus eventBus
private final I2PConnector i2pConnector
private final MuWireSettings settings
private final Map<Destination, RemoteTrustList> remoteTrustLists = new ConcurrentHashMap<>()
private final Object waitLock = new Object()
private volatile boolean shutdown
private volatile Thread thread
private final ExecutorService updateThreads = Executors.newCachedThreadPool()
TrustSubscriber(EventBus eventBus, I2PConnector i2pConnector, MuWireSettings settings) {
this.eventBus = eventBus
this.i2pConnector = i2pConnector
this.settings = settings
}
void onUILoadedEvent(UILoadedEvent e) {
thread = new Thread({checkLoop()} as Runnable, "trust-subscriber")
thread.setDaemon(true)
thread.start()
}
void stop() {
shutdown = true
thread?.interrupt()
updateThreads.shutdownNow()
}
void onTrustSubscriptionEvent(TrustSubscriptionEvent e) {
if (!e.subscribe) {
remoteTrustLists.remove(e.persona.destination)
} else {
RemoteTrustList trustList = remoteTrustLists.putIfAbsent(e.persona.destination, new RemoteTrustList(e.persona))
trustList?.forceUpdate = true
synchronized(waitLock) {
waitLock.notify()
}
}
}
private void checkLoop() {
try {
while(!shutdown) {
synchronized(waitLock) {
waitLock.wait(60 * 1000)
}
final long now = System.currentTimeMillis()
remoteTrustLists.values().each { trustList ->
if (trustList.status == RemoteTrustList.Status.UPDATING)
return
if (!trustList.forceUpdate &&
now - trustList.timestamp < settings.trustListInterval * 60 * 60 * 1000)
return
trustList.forceUpdate = false
updateThreads.submit(new UpdateJob(trustList))
}
}
} catch (InterruptedException e) {
if (!shutdown)
throw e
}
}
private class UpdateJob implements Runnable {
private final RemoteTrustList trustList
UpdateJob(RemoteTrustList trustList) {
this.trustList = trustList
}
public void run() {
trustList.status = RemoteTrustList.Status.UPDATING
eventBus.publish(new TrustSubscriptionUpdatedEvent(trustList : trustList))
if (check(trustList, System.currentTimeMillis()))
trustList.status = RemoteTrustList.Status.UPDATED
else
trustList.status = RemoteTrustList.Status.UPDATE_FAILED
eventBus.publish(new TrustSubscriptionUpdatedEvent(trustList : trustList))
}
}
private boolean check(RemoteTrustList trustList, long now) {
log.info("fetching trust list from ${trustList.persona.getHumanReadableName()}")
Endpoint endpoint = null
try {
endpoint = i2pConnector.connect(trustList.persona.destination)
OutputStream os = endpoint.getOutputStream()
InputStream is = endpoint.getInputStream()
os.write("TRUST\r\n\r\n".getBytes(StandardCharsets.US_ASCII))
os.flush()
String codeString = DataUtil.readTillRN(is)
int space = codeString.indexOf(' ')
if (space > 0)
codeString = codeString.substring(0,space)
int code = Integer.parseInt(codeString.trim())
if (code != 200) {
log.info("couldn't fetch trust list, code $code")
return false
}
// swallow any headers
String header
while (( header = DataUtil.readTillRN(is)) != "");
DataInputStream dis = new DataInputStream(is)
Set<Persona> good = new HashSet<>()
int nGood = dis.readUnsignedShort()
for (int i = 0; i < nGood; i++) {
Persona p = new Persona(dis)
good.add(p)
}
Set<Persona> bad = new HashSet<>()
int nBad = dis.readUnsignedShort()
for (int i = 0; i < nBad; i++) {
Persona p = new Persona(dis)
bad.add(p)
}
trustList.timestamp = now
trustList.good.clear()
trustList.good.addAll(good)
trustList.bad.clear()
trustList.bad.addAll(bad)
return true
} catch (Exception e) {
log.log(Level.WARNING,"exception fetching trust list from ${trustList.persona.getHumanReadableName()}",e)
return false
} finally {
endpoint?.close()
}
}
}

View File

@@ -0,0 +1,9 @@
package com.muwire.core.trust
import com.muwire.core.Event
import com.muwire.core.Persona
class TrustSubscriptionEvent extends Event {
Persona persona
boolean subscribe
}

View File

@@ -0,0 +1,7 @@
package com.muwire.core.trust
import com.muwire.core.Event
class TrustSubscriptionUpdatedEvent extends Event {
RemoteTrustList trustList
}

View File

@@ -0,0 +1,10 @@
package com.muwire.core.update
import com.muwire.core.Event
import com.muwire.core.InfoHash
class UpdateAvailableEvent extends Event {
String version
String signer
String infoHash
}

View File

@@ -0,0 +1,191 @@
package com.muwire.core.update
import java.util.logging.Level
import com.muwire.core.EventBus
import com.muwire.core.InfoHash
import com.muwire.core.MuWireSettings
import com.muwire.core.Persona
import com.muwire.core.download.UIDownloadEvent
import com.muwire.core.files.FileDownloadedEvent
import com.muwire.core.files.FileManager
import com.muwire.core.search.QueryEvent
import com.muwire.core.search.SearchEvent
import com.muwire.core.search.UIResultBatchEvent
import groovy.json.JsonOutput
import groovy.json.JsonSlurper
import groovy.util.logging.Log
import net.i2p.client.I2PSession
import net.i2p.client.I2PSessionMuxedListener
import net.i2p.client.SendMessageOptions
import net.i2p.client.datagram.I2PDatagramDissector
import net.i2p.client.datagram.I2PDatagramMaker
import net.i2p.data.Base64
import net.i2p.util.VersionComparator
@Log
class UpdateClient {
final EventBus eventBus
final I2PSession session
final String myVersion
final MuWireSettings settings
final FileManager fileManager
final Persona me
private final Timer timer
private long lastUpdateCheckTime
private volatile InfoHash updateInfoHash
private volatile String version, signer
private volatile boolean updateDownloading
UpdateClient(EventBus eventBus, I2PSession session, String myVersion, MuWireSettings settings, FileManager fileManager, Persona me) {
this.eventBus = eventBus
this.session = session
this.myVersion = myVersion
this.settings = settings
this.fileManager = fileManager
this.me = me
timer = new Timer("update-client",true)
}
void start() {
session.addMuxedSessionListener(new Listener(), I2PSession.PROTO_DATAGRAM, 2)
timer.schedule({checkUpdate()} as TimerTask, 60000, 60 * 60 * 1000)
}
void stop() {
timer.cancel()
}
void onUIResultBatchEvent(UIResultBatchEvent results) {
if (results.results[0].infohash != updateInfoHash)
return
if (updateDownloading)
return
updateDownloading = true
def file = new File(settings.downloadLocation, results.results[0].name)
def downloadEvent = new UIDownloadEvent(result: results.results[0], sources : results.results[0].sources, target : file)
eventBus.publish(downloadEvent)
}
void onFileDownloadedEvent(FileDownloadedEvent e) {
if (e.downloadedFile.infoHash != updateInfoHash)
return
updateDownloading = false
eventBus.publish(new UpdateDownloadedEvent(version : version, signer : signer))
}
private void checkUpdate() {
final long now = System.currentTimeMillis()
if (lastUpdateCheckTime > 0) {
if (now - lastUpdateCheckTime < settings.updateCheckInterval * 60 * 60 * 1000)
return
}
lastUpdateCheckTime = now
log.info("checking for update")
def ping = [version : 1, myVersion : myVersion]
ping = JsonOutput.toJson(ping)
def maker = new I2PDatagramMaker(session)
ping = maker.makeI2PDatagram(ping.bytes)
def options = new SendMessageOptions()
options.setSendLeaseSet(true)
session.sendMessage(UpdateServers.UPDATE_SERVER, ping, 0, ping.length, I2PSession.PROTO_DATAGRAM, 2, 0, options)
}
class Listener implements I2PSessionMuxedListener {
final JsonSlurper slurper = new JsonSlurper()
@Override
public void messageAvailable(I2PSession session, int msgId, long size) {
}
@Override
public void messageAvailable(I2PSession session, int msgId, long size, int proto, int fromport, int toport) {
if (proto != I2PSession.PROTO_DATAGRAM) {
log.warning "Received unexpected protocol $proto"
return
}
def payload = session.receiveMessage(msgId)
def dissector = new I2PDatagramDissector()
try {
dissector.loadI2PDatagram(payload)
def sender = dissector.getSender()
if (sender != UpdateServers.UPDATE_SERVER) {
log.warning("received something not from update server " + sender.toBase32())
return
}
log.info("Received something from update server")
payload = dissector.getPayload()
payload = slurper.parse(payload)
if (payload.version == null) {
log.warning("version missing")
return
}
if (payload.signer == null) {
log.warning("signer missing")
}
if (VersionComparator.comp(myVersion, payload.version) >= 0) {
log.info("no new version available")
return
}
String infoHash
if (settings.updateType == "jar") {
infoHash = payload.infoHash
} else
infoHash = payload[settings.updateType]
if (!settings.autoDownloadUpdate) {
log.info("new version $payload.version available, publishing event")
eventBus.publish(new UpdateAvailableEvent(version : payload.version, signer : payload.signer, infoHash : infoHash))
} else {
log.info("new version $payload.version available")
updateInfoHash = new InfoHash(Base64.decode(infoHash))
if (fileManager.rootToFiles.containsKey(updateInfoHash))
eventBus.publish(new UpdateDownloadedEvent(version : payload.version, signer : payload.signer))
else {
updateDownloading = false
version = payload.version
signer = payload.signer
log.info("starting search for new version hash $payload.infoHash")
def searchEvent = new SearchEvent(searchHash : updateInfoHash.getRoot(), uuid : UUID.randomUUID(), oobInfohash : true)
def queryEvent = new QueryEvent(searchEvent : searchEvent, firstHop : true, replyTo : me.destination,
receivedOn : me.destination, originator : me)
eventBus.publish(queryEvent)
}
}
} catch (Exception e) {
log.log(Level.WARNING,"Invalid datagram",e)
}
}
@Override
public void reportAbuse(I2PSession session, int severity) {
}
@Override
public void disconnected(I2PSession session) {
log.severe("I2P session disconnected")
}
@Override
public void errorOccurred(I2PSession session, String message, Throwable error) {
log.log(Level.SEVERE, message, error)
}
}
}

View File

@@ -0,0 +1,8 @@
package com.muwire.core.update
import com.muwire.core.Event
class UpdateDownloadedEvent extends Event {
String version
String signer
}

View File

@@ -0,0 +1,7 @@
package com.muwire.core.update
import net.i2p.data.Destination
class UpdateServers {
static final Destination UPDATE_SERVER = new Destination("pSWieSRB3czCl3Zz4WpKp4Z8tjv-05zbogRDS7SEnKcSdWOupVwjzQ92GsgQh1VqgoSRk1F8dpZOnHxxz5HFy9D7ri0uFdkMyXdSKoB7IgkkvCfTAyEmeaPwSYnurF3Zk7u286E7YG2rZkQZgJ77tow7ZS0mxFB7Z0Ti-VkZ9~GeGePW~howwNm4iSQACZA0DyTpI8iv5j4I0itPCQRgaGziob~Vfvjk49nd8N4jtaDGo9cEcafikVzQ2OgBgYWL6LRbrrItwuGqsDvITUHWaElUYIDhRQYUq8gYiUA6rwAJputfhFU0J7lIxFR9vVY7YzRvcFckfr0DNI4VQVVlPnRPkUxQa--BlldMaCIppWugjgKLwqiSiHywKpSMlBWgY2z1ry4ueEBo1WEP-mEf88wRk4cFQBCKtctCQnIG2GsnATqTl-VGUAsuzeNWZiFSwXiTy~gQ094yWx-K06fFZUDt4CMiLZVhGlixiInD~34FCRC9LVMtFcqiFB2M-Ql2AAAA")
}

View File

@@ -0,0 +1,6 @@
package com.muwire.core.upload
class ContentRequest extends Request {
Range range
int have
}

View File

@@ -0,0 +1,126 @@
package com.muwire.core.upload
import java.nio.ByteBuffer
import java.nio.channels.FileChannel
import java.nio.charset.StandardCharsets
import java.nio.file.Files
import java.nio.file.StandardOpenOption
import java.util.stream.Collectors
import com.muwire.core.Persona
import com.muwire.core.connection.Endpoint
import com.muwire.core.mesh.Mesh
import com.muwire.core.util.DataUtil
import net.i2p.data.Destination
class ContentUploader extends Uploader {
private final File file
private final ContentRequest request
private final Mesh mesh
private final int pieceSize
ContentUploader(File file, ContentRequest request, Endpoint endpoint, Mesh mesh, int pieceSize) {
super(endpoint)
this.file = file
this.request = request
this.mesh = mesh
this.pieceSize = pieceSize
}
@Override
void respond() {
OutputStream os = endpoint.getOutputStream()
Range range = request.getRange()
boolean satisfiable = true
final long length = file.length()
if (range.start >= length || range.end >= length)
satisfiable = false
if (satisfiable) {
int startPiece = range.start / (0x1 << pieceSize)
int endPiece = range.end / (0x1 << pieceSize)
for (int i = startPiece; i <= endPiece; i++)
satisfiable &= mesh.pieces.isDownloaded(i)
}
if (!satisfiable) {
os.write("416 Range Not Satisfiable\r\n".getBytes(StandardCharsets.US_ASCII))
writeMesh(request.downloader)
os.write("\r\n".getBytes(StandardCharsets.US_ASCII))
os.flush()
return
}
os.write("200 OK\r\n".getBytes(StandardCharsets.US_ASCII))
os.write("Content-Range: $range.start-$range.end\r\n".getBytes(StandardCharsets.US_ASCII))
writeMesh(request.downloader)
os.write("\r\n".getBytes(StandardCharsets.US_ASCII))
FileChannel channel = null
try {
channel = Files.newByteChannel(file.toPath(), EnumSet.of(StandardOpenOption.READ))
mapped = channel.map(FileChannel.MapMode.READ_ONLY, range.start, range.end - range.start + 1)
byte [] tmp = new byte[0x1 << 13]
while(mapped.hasRemaining()) {
int start = mapped.position()
synchronized(this) {
mapped.get(tmp, 0, Math.min(tmp.length, mapped.remaining()))
}
int read = mapped.position() - start
endpoint.getOutputStream().write(tmp, 0, read)
}
} finally {
try {channel?.close() } catch (IOException ignored) {}
endpoint.getOutputStream().flush()
synchronized(this) {
DataUtil.tryUnmap(mapped)
mapped = null
}
}
}
private void writeMesh(Persona toExclude) {
String xHave = DataUtil.encodeXHave(mesh.pieces.getDownloaded(), mesh.pieces.nPieces)
endpoint.getOutputStream().write("X-Have: $xHave\r\n".getBytes(StandardCharsets.US_ASCII))
Set<Persona> sources = mesh.getRandom(9, toExclude)
if (!sources.isEmpty()) {
String xAlts = sources.stream().map({ it.toBase64() }).collect(Collectors.joining(","))
endpoint.getOutputStream().write("X-Alt: $xAlts\r\n".getBytes(StandardCharsets.US_ASCII))
}
}
@Override
public String getName() {
return file.getName();
}
@Override
public synchronized int getProgress() {
if (mapped == null)
return 0
int position = mapped.position()
int total = request.getRange().end - request.getRange().start
(int)(position * 100.0 / total)
}
@Override
public String getDownloader() {
request.downloader.getHumanReadableName()
}
@Override
public int getDonePieces() {
return request.have;
}
@Override
public int getTotalPieces() {
return mesh.pieces.nPieces;
}
@Override
public long getTotalSize() {
return file.length();
}
}

View File

@@ -0,0 +1,4 @@
package com.muwire.core.upload
class HashListRequest extends Request {
}

View File

@@ -0,0 +1,68 @@
package com.muwire.core.upload
import java.nio.ByteBuffer
import java.nio.charset.StandardCharsets
import com.muwire.core.InfoHash
import com.muwire.core.connection.Endpoint
import net.i2p.data.Base64
class HashListUploader extends Uploader {
private final InfoHash infoHash
private final HashListRequest request
HashListUploader(Endpoint endpoint, InfoHash infoHash, HashListRequest request) {
super(endpoint)
this.infoHash = infoHash
mapped = ByteBuffer.wrap(infoHash.getHashList())
this.request = request
}
void respond() {
OutputStream os = endpoint.getOutputStream()
os.write("200 OK\r\n".getBytes(StandardCharsets.US_ASCII))
os.write("Content-Range: 0-${mapped.remaining()}\r\n\r\n".getBytes(StandardCharsets.US_ASCII))
byte[]tmp = new byte[0x1 << 13]
while(mapped.hasRemaining()) {
int start = mapped.position()
synchronized(this) {
mapped.get(tmp, 0, Math.min(tmp.length, mapped.remaining()))
}
int read = mapped.position() - start
endpoint.getOutputStream().write(tmp, 0, read)
}
endpoint.getOutputStream().flush()
}
@Override
public String getName() {
return "Hash list for " + Base64.encode(infoHash.getRoot());
}
@Override
public synchronized int getProgress() {
(int)(mapped.position() * 100.0 / mapped.capacity())
}
@Override
public String getDownloader() {
request.downloader.getHumanReadableName()
}
@Override
public int getDonePieces() {
return 0;
}
@Override
public int getTotalPieces() {
return 1;
}
@Override
public long getTotalSize() {
return -1;
}
}

View File

@@ -2,7 +2,7 @@ package com.muwire.core.upload
class Range { class Range {
final long start, end final long start, end
Range(long start, long end) { Range(long start, long end) {
this.start = start this.start = start
this.end = end this.end = end

View File

@@ -4,71 +4,29 @@ import java.nio.charset.StandardCharsets
import com.muwire.core.Constants import com.muwire.core.Constants
import com.muwire.core.InfoHash import com.muwire.core.InfoHash
import com.muwire.core.Persona
import com.muwire.core.util.DataUtil
import groovy.util.logging.Log import groovy.util.logging.Log
import net.i2p.data.Base64 import net.i2p.data.Base64
@Log @Log
class Request { class Request {
private static final byte R = "\r".getBytes(StandardCharsets.US_ASCII)[0] private static final byte R = "\r".getBytes(StandardCharsets.US_ASCII)[0]
private static final byte N = "\n".getBytes(StandardCharsets.US_ASCII)[0] private static final byte N = "\n".getBytes(StandardCharsets.US_ASCII)[0]
InfoHash infoHash InfoHash infoHash
Range range Persona downloader
Map<String, String> headers Map<String, String> headers
static Request parse(InfoHash infoHash, InputStream is) throws IOException { static Request parseContentRequest(InfoHash infoHash, InputStream is) throws IOException {
Map<String,String> headers = new HashMap<>()
byte [] tmp = new byte[Constants.MAX_HEADER_SIZE] Map<String, String> headers = parseHeaders(is)
while(headers.size() < Constants.MAX_HEADERS) {
boolean r = false
boolean n = false
int idx = 0
while (true) {
byte read = is.read()
if (read == -1)
throw new IOException("Stream closed")
if (!r && read == N)
throw new IOException("Received N before R")
if (read == R) {
if (r)
throw new IOException("double R")
r = true
continue
}
if (r && !n) {
if (read != N)
throw new IOException("R not followed by N")
n = true
break
}
if (idx == 0x1 << 14)
throw new IOException("Header too long")
tmp[idx++] = read
}
if (idx == 0)
break
String header = new String(tmp, 0, idx, StandardCharsets.US_ASCII)
log.fine("Read header $header")
int keyIdx = header.indexOf(":")
if (keyIdx < 1)
throw new IOException("Header key not found")
if (keyIdx == header.length())
throw new IOException("Header value not found")
String key = header.substring(0, keyIdx)
String value = header.substring(keyIdx + 1)
headers.put(key, value)
}
if (!headers.containsKey("Range")) if (!headers.containsKey("Range"))
throw new IOException("Range header not found") throw new IOException("Range header not found")
String range = headers.get("Range").trim() String range = headers.get("Range").trim()
String[] split = range.split("-") String[] split = range.split("-")
if (split.length != 2) if (split.length != 2)
@@ -81,11 +39,85 @@ class Request {
} catch (NumberFormatException nfe) { } catch (NumberFormatException nfe) {
throw new IOException(nfe) throw new IOException(nfe)
} }
if (start < 0 || end < start) if (start < 0 || end < start)
throw new IOException("Invalid range $start - $end") throw new IOException("Invalid range $start - $end")
new Request( infoHash : infoHash, range : new Range(start, end), headers : headers) Persona downloader = null
if (headers.containsKey("X-Persona")) {
def encoded = headers["X-Persona"].trim()
def decoded = Base64.decode(encoded)
downloader = new Persona(new ByteArrayInputStream(decoded))
}
int have = 0
if (headers.containsKey("X-Have")) {
def encoded = headers["X-Have"].trim()
have = DataUtil.decodeXHave(encoded).size()
}
new ContentRequest( infoHash : infoHash, range : new Range(start, end),
headers : headers, downloader : downloader, have : have)
} }
static Request parseHashListRequest(InfoHash infoHash, InputStream is) throws IOException {
Map<String,String> headers = parseHeaders(is)
Persona downloader = null
if (headers.containsKey("X-Persona")) {
def encoded = headers["X-Persona"].trim()
def decoded = Base64.decode(encoded)
downloader = new Persona(new ByteArrayInputStream(decoded))
}
new HashListRequest(infoHash : infoHash, headers : headers, downloader : downloader)
}
private static Map<String, String> parseHeaders(InputStream is) {
Map<String,String> headers = new HashMap<>()
byte [] tmp = new byte[Constants.MAX_HEADER_SIZE]
while(headers.size() < Constants.MAX_HEADERS) {
boolean r = false
boolean n = false
int idx = 0
while (true) {
byte read = is.read()
if (read == -1)
throw new IOException("Stream closed")
if (!r && read == N)
throw new IOException("Received N before R")
if (read == R) {
if (r)
throw new IOException("double R")
r = true
continue
}
if (r && !n) {
if (read != N)
throw new IOException("R not followed by N")
n = true
break
}
if (idx == 0x1 << 14)
throw new IOException("Header too long")
tmp[idx++] = read
}
if (idx == 0)
break
String header = new String(tmp, 0, idx, StandardCharsets.US_ASCII)
log.fine("Read header $header")
int keyIdx = header.indexOf(":")
if (keyIdx < 1)
throw new IOException("Header key not found")
if (keyIdx == header.length())
throw new IOException("Header value not found")
String key = header.substring(0, keyIdx)
String value = header.substring(keyIdx + 1)
headers.put(key, value)
}
headers
}
} }

View File

@@ -6,7 +6,12 @@ import com.muwire.core.EventBus
import com.muwire.core.InfoHash import com.muwire.core.InfoHash
import com.muwire.core.SharedFile import com.muwire.core.SharedFile
import com.muwire.core.connection.Endpoint import com.muwire.core.connection.Endpoint
import com.muwire.core.download.DownloadManager
import com.muwire.core.download.Downloader
import com.muwire.core.download.SourceDiscoveredEvent
import com.muwire.core.files.FileManager import com.muwire.core.files.FileManager
import com.muwire.core.mesh.Mesh
import com.muwire.core.mesh.MeshManager
import groovy.util.logging.Log import groovy.util.logging.Log
import net.i2p.data.Base64 import net.i2p.data.Base64
@@ -15,17 +20,22 @@ import net.i2p.data.Base64
public class UploadManager { public class UploadManager {
private final EventBus eventBus private final EventBus eventBus
private final FileManager fileManager private final FileManager fileManager
private final MeshManager meshManager
private final DownloadManager downloadManager
public UploadManager() {} public UploadManager() {}
public UploadManager(EventBus eventBus, FileManager fileManager) { public UploadManager(EventBus eventBus, FileManager fileManager,
MeshManager meshManager, DownloadManager downloadManager) {
this.eventBus = eventBus this.eventBus = eventBus
this.fileManager = fileManager this.fileManager = fileManager
this.meshManager = meshManager
this.downloadManager = downloadManager
} }
public void processEndpoint(Endpoint e) throws IOException { public void processGET(Endpoint e) throws IOException {
byte [] infoHashStringBytes = new byte[44] byte [] infoHashStringBytes = new byte[44]
DataInputStream dis = new DataInputStream(e.getInputStream()) DataInputStream dis = new DataInputStream(e.getInputStream())
boolean first = true boolean first = true
while(true) { while(true) {
if (first) if (first)
@@ -44,8 +54,10 @@ public class UploadManager {
log.info("Responding to upload request for root $infoHashString") log.info("Responding to upload request for root $infoHashString")
byte [] infoHashRoot = Base64.decode(infoHashString) byte [] infoHashRoot = Base64.decode(infoHashString)
InfoHash infoHash = new InfoHash(infoHashRoot)
Set<SharedFile> sharedFiles = fileManager.getSharedFiles(infoHashRoot) Set<SharedFile> sharedFiles = fileManager.getSharedFiles(infoHashRoot)
if (sharedFiles == null || sharedFiles.isEmpty()) { Downloader downloader = downloadManager.downloaders.get(infoHash)
if (downloader == null && (sharedFiles == null || sharedFiles.isEmpty())) {
log.info "file not found" log.info "file not found"
e.getOutputStream().write("404 File Not Found\r\n\r\n".getBytes(StandardCharsets.US_ASCII)) e.getOutputStream().write("404 File Not Found\r\n\r\n".getBytes(StandardCharsets.US_ASCII))
e.getOutputStream().flush() e.getOutputStream().flush()
@@ -61,8 +73,156 @@ public class UploadManager {
return return
} }
Request request = Request.parse(new InfoHash(infoHashRoot), e.getInputStream()) ContentRequest request = Request.parseContentRequest(infoHash, e.getInputStream())
Uploader uploader = new Uploader(sharedFiles.iterator().next().file, request, e) if (request.downloader != null && request.downloader.destination != e.destination) {
log.info("Downloader persona doesn't match their destination")
e.close()
return
}
if (request.have > 0)
eventBus.publish(new SourceDiscoveredEvent(infoHash : request.infoHash, source : request.downloader))
Mesh mesh
File file
int pieceSize
if (downloader != null) {
mesh = meshManager.get(infoHash)
file = downloader.incompleteFile
pieceSize = downloader.pieceSizePow2
} else {
SharedFile sharedFile = sharedFiles.iterator().next();
mesh = meshManager.getOrCreate(request.infoHash, sharedFile.NPieces)
file = sharedFile.file
pieceSize = sharedFile.pieceSize
}
Uploader uploader = new ContentUploader(file, request, e, mesh, pieceSize)
eventBus.publish(new UploadEvent(uploader : uploader))
try {
uploader.respond()
} finally {
eventBus.publish(new UploadFinishedEvent(uploader : uploader))
}
}
}
public void processHashList(Endpoint e) {
byte [] infoHashStringBytes = new byte[44]
DataInputStream dis = new DataInputStream(e.getInputStream())
dis.readFully(infoHashStringBytes)
String infoHashString = new String(infoHashStringBytes, StandardCharsets.US_ASCII)
log.info("Responding to hashlist request for root $infoHashString")
byte [] infoHashRoot = Base64.decode(infoHashString)
InfoHash infoHash = new InfoHash(infoHashRoot)
Downloader downloader = downloadManager.downloaders.get(infoHash)
Set<SharedFile> sharedFiles = fileManager.getSharedFiles(infoHashRoot)
if (downloader == null && (sharedFiles == null || sharedFiles.isEmpty())) {
log.info "file not found"
e.getOutputStream().write("404 File Not Found\r\n\r\n".getBytes(StandardCharsets.US_ASCII))
e.getOutputStream().flush()
e.close()
return
}
byte [] rn = new byte[2]
dis.readFully(rn)
if (rn != "\r\n".getBytes(StandardCharsets.US_ASCII)) {
log.warning("Malformed HASHLIST header")
e.close()
return
}
Request request = Request.parseHashListRequest(infoHash, e.getInputStream())
if (request.downloader != null && request.downloader.destination != e.destination) {
log.info("Downloader persona doesn't match their destination")
e.close()
return
}
InfoHash fullInfoHash
if (downloader == null) {
fullInfoHash = sharedFiles.iterator().next().infoHash
} else {
byte [] hashList = downloader.getInfoHash().getHashList()
if (hashList != null && hashList.length > 0)
fullInfoHash = downloader.getInfoHash()
else {
log.info("infohash not found in downloader")
e.getOutputStream().write("404 File Not Found\r\n\r\n".getBytes(StandardCharsets.US_ASCII))
e.getOutputStream().flush()
e.close()
return
}
}
Uploader uploader = new HashListUploader(e, fullInfoHash, request)
eventBus.publish(new UploadEvent(uploader : uploader))
try {
uploader.respond()
} finally {
eventBus.publish(new UploadFinishedEvent(uploader : uploader))
}
// proceed with content
while(true) {
byte[] get = new byte[4]
dis.readFully(get)
if (get != "GET ".getBytes(StandardCharsets.US_ASCII)) {
log.warning("received a method other than GET on subsequent call")
e.close()
return
}
dis.readFully(infoHashStringBytes)
infoHashString = new String(infoHashStringBytes, StandardCharsets.US_ASCII)
log.info("Responding to upload request for root $infoHashString")
infoHashRoot = Base64.decode(infoHashString)
infoHash = new InfoHash(infoHashRoot)
sharedFiles = fileManager.getSharedFiles(infoHashRoot)
downloader = downloadManager.downloaders.get(infoHash)
if (downloader == null && (sharedFiles == null || sharedFiles.isEmpty())) {
log.info "file not found"
e.getOutputStream().write("404 File Not Found\r\n\r\n".getBytes(StandardCharsets.US_ASCII))
e.getOutputStream().flush()
e.close()
return
}
rn = new byte[2]
dis.readFully(rn)
if (rn != "\r\n".getBytes(StandardCharsets.US_ASCII)) {
log.warning("Malformed GET header")
e.close()
return
}
request = Request.parseContentRequest(new InfoHash(infoHashRoot), e.getInputStream())
if (request.downloader != null && request.downloader.destination != e.destination) {
log.info("Downloader persona doesn't match their destination")
e.close()
return
}
if (request.have > 0)
eventBus.publish(new SourceDiscoveredEvent(infoHash : request.infoHash, source : request.downloader))
Mesh mesh
File file
int pieceSize
if (downloader != null) {
mesh = meshManager.get(infoHash)
file = downloader.incompleteFile
pieceSize = downloader.pieceSizePow2
} else {
SharedFile sharedFile = sharedFiles.iterator().next();
mesh = meshManager.getOrCreate(request.infoHash, sharedFile.NPieces)
file = sharedFile.file
pieceSize = sharedFile.pieceSize
}
uploader = new ContentUploader(file, request, e, mesh, pieceSize)
eventBus.publish(new UploadEvent(uploader : uploader)) eventBus.publish(new UploadEvent(uploader : uploader))
try { try {
uploader.respond() uploader.respond()
@@ -70,7 +230,6 @@ public class UploadManager {
eventBus.publish(new UploadFinishedEvent(uploader : uploader)) eventBus.publish(new UploadFinishedEvent(uploader : uploader))
} }
} }
} }
} }

View File

@@ -8,52 +8,34 @@ import java.nio.file.StandardOpenOption
import com.muwire.core.connection.Endpoint import com.muwire.core.connection.Endpoint
class Uploader { abstract class Uploader {
private final File file protected final Endpoint endpoint
private final Request request protected ByteBuffer mapped
private final Endpoint endpoint
private ByteBuffer mapped Uploader(Endpoint endpoint) {
Uploader(File file, Request request, Endpoint endpoint) {
this.file = file
this.request = request
this.endpoint = endpoint this.endpoint = endpoint
} }
void respond() {
OutputStream os = endpoint.getOutputStream()
Range range = request.getRange()
if (range.start >= file.length() || range.end >= file.length()) {
os.write("416 Range Not Satisfiable\r\n\r\n".getBytes(StandardCharsets.US_ASCII))
os.flush()
return
}
os.write("200 OK\r\n".getBytes(StandardCharsets.US_ASCII)) abstract void respond()
os.write("Content-Range: $range.start-$range.end\r\n\r\n".getBytes(StandardCharsets.US_ASCII))
FileChannel channel
try {
channel = Files.newByteChannel(file.toPath(), EnumSet.of(StandardOpenOption.READ))
mapped = channel.map(FileChannel.MapMode.READ_ONLY, range.start, range.end - range.start + 1)
byte [] tmp = new byte[0x1 << 13]
while(mapped.hasRemaining()) {
int start = mapped.position()
synchronized(this) {
mapped.get(tmp, 0, Math.min(tmp.length, mapped.remaining()))
}
int read = mapped.position() - start
endpoint.getOutputStream().write(tmp, 0, read)
}
} finally {
try {channel?.close() } catch (IOException ignored) {}
endpoint.getOutputStream().flush()
}
}
public synchronized int getPosition() { public synchronized int getPosition() {
if (mapped == null) if (mapped == null)
return -1 return -1
mapped.position() mapped.position()
} }
abstract String getName();
/**
* @return an integer between 0 and 100
*/
abstract int getProgress();
abstract String getDownloader();
abstract int getDonePieces();
abstract int getTotalPieces();
abstract long getTotalSize();
} }

View File

@@ -1,46 +1,51 @@
package com.muwire.core.util package com.muwire.core.util
import java.lang.reflect.Field
import java.lang.reflect.Method
import java.nio.ByteBuffer
import java.nio.charset.StandardCharsets import java.nio.charset.StandardCharsets
import com.muwire.core.Constants import com.muwire.core.Constants
class DataUtil { import net.i2p.data.Base64
private final static int MAX_SHORT = (0x1 << 16) - 1 class DataUtil {
private final static int MAX_SHORT = (0x1 << 16) - 1
static void writeUnsignedShort(int value, OutputStream os) {
if (value > MAX_SHORT || value < 0)
throw new IllegalArgumentException("$value invalid")
byte lsb = (byte) (value & 0xFF)
byte msb = (byte) (value >> 8)
os.write(msb)
os.write(lsb)
}
private final static int MAX_HEADER = 0x7FFFFF
static void packHeader(int length, byte [] header) {
if (header.length != 3)
throw new IllegalArgumentException("header length $header.length")
if (length < 0 || length > MAX_HEADER)
throw new IllegalArgumentException("length $length")
header[2] = (byte) (length & 0xFF)
header[1] = (byte) ((length >> 8) & 0xFF)
header[0] = (byte) ((length >> 16) & 0x7F)
}
static int readLength(byte [] header) {
if (header.length != 3)
throw new IllegalArgumentException("header length $header.length")
return (((int)(header[0] & 0x7F)) << 16) |
(((int)(header[1] & 0xFF) << 8)) |
((int)header[2] & 0xFF)
}
static void writeUnsignedShort(int value, OutputStream os) {
if (value > MAX_SHORT || value < 0)
throw new IllegalArgumentException("$value invalid")
byte lsb = (byte) (value & 0xFF)
byte msb = (byte) (value >> 8)
os.write(msb)
os.write(lsb)
}
private final static int MAX_HEADER = 0x7FFFFF
static void packHeader(int length, byte [] header) {
if (header.length != 3)
throw new IllegalArgumentException("header length $header.length")
if (length < 0 || length > MAX_HEADER)
throw new IllegalArgumentException("length $length")
header[2] = (byte) (length & 0xFF)
header[1] = (byte) ((length >> 8) & 0xFF)
header[0] = (byte) ((length >> 16) & 0x7F)
}
static int readLength(byte [] header) {
if (header.length != 3)
throw new IllegalArgumentException("header length $header.length")
return (((int)(header[0] & 0x7F)) << 16) |
(((int)(header[1] & 0xFF) << 8)) |
((int)header[2] & 0xFF)
}
static String readi18nString(byte [] encoded) { static String readi18nString(byte [] encoded) {
if (encoded.length < 2) if (encoded.length < 2)
throw new IllegalArgumentException("encoding too short $encoded.length") throw new IllegalArgumentException("encoding too short $encoded.length")
@@ -49,9 +54,9 @@ class DataUtil {
throw new IllegalArgumentException("encoding doesn't match length, expected $length found $encoded.length") throw new IllegalArgumentException("encoding doesn't match length, expected $length found $encoded.length")
byte [] string = new byte[length] byte [] string = new byte[length]
System.arraycopy(encoded, 2, string, 0, length) System.arraycopy(encoded, 2, string, 0, length)
new String(string, StandardCharsets.UTF_8) new String(string, StandardCharsets.UTF_8)
} }
static byte[] encodei18nString(String string) { static byte[] encodei18nString(String string) {
byte [] utf8 = string.getBytes(StandardCharsets.UTF_8) byte [] utf8 = string.getBytes(StandardCharsets.UTF_8)
if (utf8.length > Short.MAX_VALUE) if (utf8.length > Short.MAX_VALUE)
@@ -59,11 +64,11 @@ class DataUtil {
def baos = new ByteArrayOutputStream() def baos = new ByteArrayOutputStream()
def daos = new DataOutputStream(baos) def daos = new DataOutputStream(baos)
daos.writeShort((short) utf8.length) daos.writeShort((short) utf8.length)
daos.write(utf8) daos.write(utf8)
daos.close() daos.close()
baos.toByteArray() baos.toByteArray()
} }
public static String readTillRN(InputStream is) { public static String readTillRN(InputStream is) {
def baos = new ByteArrayOutputStream() def baos = new ByteArrayOutputStream()
while(baos.size() < (Constants.MAX_HEADER_SIZE)) { while(baos.size() < (Constants.MAX_HEADER_SIZE)) {
@@ -79,4 +84,73 @@ class DataUtil {
} }
new String(baos.toByteArray(), StandardCharsets.US_ASCII) new String(baos.toByteArray(), StandardCharsets.US_ASCII)
} }
public static String encodeXHave(List<Integer> pieces, int totalPieces) {
int bytes = totalPieces / 8
if (totalPieces % 8 != 0)
bytes++
byte[] raw = new byte[bytes]
pieces.each {
int byteIdx = it / 8
int offset = it % 8
int mask = 0x80 >>> offset
raw[byteIdx] |= mask
}
Base64.encode(raw)
}
public static List<Integer> decodeXHave(String xHave) {
byte [] availablePieces = Base64.decode(xHave)
List<Integer> available = new ArrayList<>()
availablePieces.eachWithIndex {b, i ->
for (int j = 0; j < 8 ; j++) {
byte mask = 0x80 >>> j
if ((b & mask) == mask) {
available.add(i * 8 + j)
}
}
}
available
}
public static Exception findRoot(Exception e) {
while(e.getCause() != null)
e = e.getCause()
e
}
public static void tryUnmap(ByteBuffer cb) {
if (cb==null || !cb.isDirect()) return;
// we could use this type cast and call functions without reflection code,
// but static import from sun.* package is risky for non-SUN virtual machine.
//try { ((sun.nio.ch.DirectBuffer)cb).cleaner().clean(); } catch (Exception ex) { }
// JavaSpecVer: 1.6, 1.7, 1.8, 9, 10
boolean isOldJDK = System.getProperty("java.specification.version","99").startsWith("1.");
try {
if (isOldJDK) {
Method cleaner = cb.getClass().getMethod("cleaner");
cleaner.setAccessible(true);
Method clean = Class.forName("sun.misc.Cleaner").getMethod("clean");
clean.setAccessible(true);
clean.invoke(cleaner.invoke(cb));
} else {
Class unsafeClass;
try {
unsafeClass = Class.forName("sun.misc.Unsafe");
} catch(Exception ex) {
// jdk.internal.misc.Unsafe doesn't yet have an invokeCleaner() method,
// but that method should be added if sun.misc.Unsafe is removed.
unsafeClass = Class.forName("jdk.internal.misc.Unsafe");
}
Method clean = unsafeClass.getMethod("invokeCleaner", ByteBuffer.class);
clean.setAccessible(true);
Field theUnsafeField = unsafeClass.getDeclaredField("theUnsafe");
theUnsafeField.setAccessible(true);
Object theUnsafe = theUnsafeField.get(null);
clean.invoke(theUnsafe, cb);
}
} catch(Exception ex) { }
cb = null;
}
} }

View File

@@ -16,10 +16,10 @@ class JULLog extends Log {
I2P_TO_JUL.put(Log.ERROR, Level.SEVERE) I2P_TO_JUL.put(Log.ERROR, Level.SEVERE)
I2P_TO_JUL.put(Log.CRIT, Level.SEVERE) I2P_TO_JUL.put(Log.CRIT, Level.SEVERE)
} }
private final Logger delegate private final Logger delegate
private final Level level private final Level level
public JULLog(Class<?> cls) { public JULLog(Class<?> cls) {
super(cls) super(cls)
delegate = Logger.getLogger(cls.getName()) delegate = Logger.getLogger(cls.getName())
@@ -31,7 +31,7 @@ class JULLog extends Log {
delegate = Logger.getLogger(name) delegate = Logger.getLogger(name)
level = findLevel(delegate) level = findLevel(delegate)
} }
private static Level findLevel(Logger log) { private static Level findLevel(Logger log) {
while (log.getLevel() == null) while (log.getLevel() == null)
log = log.getParent() log = log.getParent()
@@ -55,21 +55,21 @@ class JULLog extends Log {
@Override @Override
public boolean shouldDebug() { public boolean shouldDebug() {
level.intValue().intValue() >= Level.FINE.intValue() level.intValue().intValue() <= Level.FINE.intValue()
} }
@Override @Override
public boolean shouldInfo() { public boolean shouldInfo() {
level.intValue().intValue() >= Level.INFO.intValue() level.intValue().intValue() <= Level.INFO.intValue()
} }
@Override @Override
public boolean shouldWarn() { public boolean shouldWarn() {
level.intValue().intValue() >= Level.WARNING.intValue() level.intValue().intValue() <= Level.WARNING.intValue()
} }
@Override @Override
public boolean shouldError() { public boolean shouldError() {
level.intValue().intValue() >= Level.SEVERE.intValue() level.intValue().intValue() <= Level.SEVERE.intValue()
} }
} }

View File

@@ -5,14 +5,14 @@ import net.i2p.util.Log
import net.i2p.util.LogManager import net.i2p.util.LogManager
class MuWireLogManager extends LogManager { class MuWireLogManager extends LogManager {
private static final Map<Class<?>, Log> classLogs = new HashMap<>() private static final Map<Class<?>, Log> classLogs = new HashMap<>()
private static final Map<String, Log> stringLogs = new HashMap<>() private static final Map<String, Log> stringLogs = new HashMap<>()
MuWireLogManager() { MuWireLogManager() {
super(I2PAppContext.getGlobalContext()) super(I2PAppContext.getGlobalContext())
} }
@Override @Override
public synchronized Log getLog(Class<?> cls, String name) { public synchronized Log getLog(Class<?> cls, String name) {
@@ -24,7 +24,7 @@ class MuWireLogManager extends LogManager {
} }
return rv return rv
} }
Log rv = stringLogs.get(name) Log rv = stringLogs.get(name)
if (rv == null) { if (rv == null) {
rv = new JULLog(name) rv = new JULLog(name)
@@ -32,5 +32,5 @@ class MuWireLogManager extends LogManager {
} }
rv rv
} }
} }

View File

@@ -1,21 +1,23 @@
package com.muwire.core; package com.muwire.core;
import java.io.File; import java.io.File;
import java.io.IOException;
import java.util.Set; import java.util.Set;
import net.i2p.data.Destination; import net.i2p.data.Destination;
public class DownloadedFile extends SharedFile { public class DownloadedFile extends SharedFile {
private final Set<Destination> sources;
public DownloadedFile(File file, InfoHash infoHash, Set<Destination> sources) { private final Set<Destination> sources;
super(file, infoHash);
this.sources = sources; public DownloadedFile(File file, InfoHash infoHash, int pieceSize, Set<Destination> sources)
} throws IOException {
super(file, infoHash, pieceSize);
public Set<Destination> getSources() { this.sources = sources;
return sources; }
}
public Set<Destination> getSources() {
return sources;
}
} }

Some files were not shown because too many files have changed in this diff Show More