Compare commits

...

254 Commits

Author SHA1 Message Date
Zlatin Balevsky
b412f9fb0c Release 0.5.3 2019-10-23 09:01:19 +01:00
Zlatin Balevsky
b24d04811d set apple quit strategy 2019-10-23 08:55:10 +01:00
Zlatin Balevsky
771f645df0 proper close 2019-10-23 08:48:53 +01:00
Zlatin Balevsky
b6483ad0f4 add an exit menu 2019-10-23 08:45:03 +01:00
Zlatin Balevsky
decb72c8ef show a warning that MW will continue running 2019-10-23 08:31:23 +01:00
Zlatin Balevsky
439b3bf18b fixes 2019-10-23 06:46:20 +01:00
Zlatin Balevsky
06679ffee0 only show MW if the core has loaded 2019-10-23 06:39:25 +01:00
Zlatin Balevsky
1d5b12e2d7 if core is not initialized, just shutdown 2019-10-23 06:31:08 +01:00
Zlatin Balevsky
4e6e1b6f5b Do not show warnings if core is already shutting down 2019-10-23 06:15:34 +01:00
Zlatin Balevsky
f0b5361d7b smaller icon 2019-10-23 06:06:37 +01:00
Zlatin Balevsky
e0c6bfbf51 show the clsoing window if tray is disabled 2019-10-23 06:01:21 +01:00
Zlatin Balevsky
2a0ecd8a47 fix constructor 2019-10-23 05:48:14 +01:00
Zlatin Balevsky
fb1804e849 Use explicit event to shutdown the application. This fixes closing on Linux 2019-10-23 05:45:50 +01:00
Zlatin Balevsky
d4eaa0df8d do not shutdown core on awt thread 2019-10-22 23:37:44 +01:00
Zlatin Balevsky
ffde6ac86f show a window while MW is shutting down 2019-10-22 23:26:54 +01:00
Zlatin Balevsky
7ad677ead2 add an explicit menu to show MW 2019-10-22 21:48:51 +01:00
Zlatin Balevsky
ddb0568aab do not auto-shutdown 2019-10-22 21:40:47 +01:00
Zlatin Balevsky
ff50a84a48 try to get a tray icon working 2019-10-22 21:34:50 +01:00
Zlatin Balevsky
770396ba41 update test 2019-10-22 10:31:28 +01:00
Zlatin Balevsky
b55852e993 typo 2019-10-22 10:16:41 +01:00
Zlatin Balevsky
a6945275a4 i2p 0.9.43 2019-10-22 08:27:08 +01:00
Zlatin Balevsky
7241809e55 update readme 2019-10-22 00:42:18 +01:00
Zlatin Balevsky
54073af933 Release 0.5.2 2019-10-22 00:28:53 +01:00
Zlatin Balevsky
a32903fc8c prettier i2p status panel 2019-10-22 00:11:57 +01:00
Zlatin Balevsky
e40520be46 count hopeless and failing hosts, prettier status panel 2019-10-21 23:57:15 +01:00
Zlatin Balevsky
97482b949a de-capitalize for consistency 2019-10-21 22:50:21 +01:00
Zlatin Balevsky
92ee107312 remove duplicate variable 2019-10-21 22:23:29 +01:00
Zlatin Balevsky
2e8082af64 use titled borders everywhere for consistency 2019-10-21 22:12:39 +01:00
Zlatin Balevsky
8da5a428c9 make the i2p version a variable 2019-10-21 21:02:37 +01:00
Zlatin Balevsky
fd46b3c7d6 do not display fractions in percentage 2019-10-21 20:37:30 +01:00
Zlatin Balevsky
eea3b2563b allign router-specific settings 2019-10-21 20:16:36 +01:00
Zlatin Balevsky
50719f3828 move settings to top of panel 2019-10-21 20:12:08 +01:00
Zlatin Balevsky
01a45a89a8 reorganize the options view 2019-10-21 19:44:33 +01:00
Zlatin Balevsky
66bd249ed3 show percentage of fetched results 2019-10-21 18:28:37 +01:00
Zlatin Balevsky
265cd6ee15 more accurate description 2019-10-20 20:19:47 +01:00
Zlatin Balevsky
1dc88cb96b make speed smoothing interval configurable 2019-10-20 20:09:24 +01:00
Zlatin Balevsky
3e10d497b1 add an ETA column to downloads table 2019-10-20 19:11:32 +01:00
Zlatin Balevsky
9a0b3bb9d6 fix download table selection when sorted 2019-10-20 18:47:48 +01:00
Zlatin Balevsky
a1fe3c01b9 if no incompletes are in serialized json, use the default one, assuming an upgrade 2019-10-20 18:24:16 +01:00
Zlatin Balevsky
ab323db62a add ability to choose the incompletes location 2019-10-20 18:16:07 +01:00
Zlatin Balevsky
d954387e41 fix showing of local files in results 2019-10-20 11:59:48 +01:00
Zlatin Balevsky
ea9db21a18 wip on compressed results 2019-10-20 01:01:34 +01:00
Zlatin Balevsky
136cf89c9b groovy != java 2019-10-20 00:55:31 +01:00
Zlatin Balevsky
46de1baf88 compressed results 2019-10-20 00:54:32 +01:00
Zlatin Balevsky
13f7b8563c fix a bug where disabled browsing was shown as browsable. Log the response code if it's not 200 2019-10-19 22:33:47 +01:00
Zlatin Balevsky
9c15208f3a Release 0.5.1 2019-10-19 19:11:04 +01:00
Zlatin Balevsky
a9ce9d96b3 wip on menu; close zlib stream 2019-10-19 18:54:58 +01:00
Zlatin Balevsky
4d2a5a8018 MainFrameModel doesn't need to listen to single result events anymore 2019-10-19 18:12:30 +01:00
Zlatin Balevsky
8395047386 compress results in browse connections 2019-10-19 17:59:08 +01:00
Zlatin Balevsky
cb23aa44f0 enable SEVERE log messages if no config file specified 2019-10-19 05:53:33 +01:00
Zlatin Balevsky
dbcb8508b8 add a view comment button 2019-10-19 05:35:04 +01:00
Zlatin Balevsky
47d406d93b add a border around the two panels 2019-10-19 04:59:37 +01:00
Zlatin Balevsky
e06f1805c2 redirect griffon logging to jul 2019-10-19 04:45:45 +01:00
Zlatin Balevsky
2b04374e23 add option to disable browsing of files, make the dialog bigger 2019-10-19 00:53:13 +01:00
Zlatin Balevsky
383addbc37 implement view comment from browse window 2019-10-19 00:30:03 +01:00
Zlatin Balevsky
cc39cd7f8e implement downloading from browse window 2019-10-19 00:23:43 +01:00
Zlatin Balevsky
83665d7524 wip on browse host 2019-10-18 23:55:07 +01:00
Zlatin Balevsky
94340480b4 wip on browse host 2019-10-18 23:25:26 +01:00
Zlatin Balevsky
8850d49c63 wip on browse host 2019-10-18 23:16:37 +01:00
Zlatin Balevsky
f0f9d840f0 wip on browse host 2019-10-18 22:35:17 +01:00
Zlatin Balevsky
7f4cd4f331 wip on browse host 2019-10-18 21:17:34 +01:00
Zlatin Balevsky
e6162503f6 wip on browse host 2019-10-18 20:29:39 +01:00
Zlatin Balevsky
7a5d71dc36 add copy name to clipboard option 2019-10-17 19:01:53 +01:00
Zlatin Balevsky
6fa39a5e35 turn off logging if there is no config file 2019-10-17 18:39:28 +01:00
Zlatin Balevsky
c5ae804f61 Implement automatic font sizing; set all font properties on change of font 2019-10-17 18:15:04 +01:00
Zlatin Balevsky
d7695b448d remove my DS_Store 2019-10-17 05:50:29 +01:00
Zlatin Balevsky
946d9c8f32 disable sharing of hidden files by default, add option to enable 2019-10-17 05:46:27 +01:00
Zlatin Balevsky
02441ca1e3 add option to disable searching in comments 2019-10-16 19:57:18 +01:00
Zlatin Balevsky
5fa21b2360 keep tree expanded on modifications 2019-10-16 14:42:40 +01:00
Zlatin Balevsky
d4c08f4fe6 only remove from index if no more files have the same comment pt.2 2019-10-16 14:23:12 +01:00
Zlatin Balevsky
942de287c6 only remove from index if no more files have the same comment 2019-10-16 14:21:50 +01:00
Zlatin Balevsky
d0299f80c6 search through comments 2019-10-16 14:06:11 +01:00
Zlatin Balevsky
1227cf9263 Release 0.5.0 2019-10-15 12:38:25 +01:00
Zlatin Balevsky
a05575485f move things around 2019-10-15 10:40:50 +01:00
Zlatin Balevsky
f5bccd8126 All shared directories are watched directories. Fix manipulation of tree structure 2019-10-15 08:38:23 +01:00
Zlatin Balevsky
70fb789abf remove the watched directories table 2019-10-15 04:51:21 +01:00
Zlatin Balevsky
feb712c253 Move persisting of files on dedicated thread. Introduce an event to forcefully persist files. Do that immediately after unsharing anything 2019-10-15 04:21:40 +01:00
Zlatin Balevsky
d22b403e2a stop watching multiple directories at once 2019-10-14 23:16:05 +01:00
Zlatin Balevsky
a24982e0df fix comments for local results 2019-10-14 22:47:52 +01:00
Zlatin Balevsky
6c26019164 allow switching without restart 2019-10-14 21:40:03 +01:00
Zlatin Balevsky
965fa79bbf fix count of shared files in tree view mode 2019-10-14 20:57:50 +01:00
Zlatin Balevsky
60ddb85461 Tree view of the shared files. The count is wrong for some reason 2019-10-14 20:13:25 +01:00
Zlatin Balevsky
c7284623bc Release 0.4.16 2019-10-13 22:14:33 +01:00
Zlatin Balevsky
3e7f2aa70a Add a note about DND, automatically watch shared directories 2019-10-13 20:21:28 +01:00
Zlatin Balevsky
4f436a636c implement drop on MW -> share files/directories 2019-10-13 20:00:08 +01:00
Zlatin Balevsky
b49dbc30c3 comment already decoded by the time it gets to the gui 2019-10-11 19:01:40 +01:00
Zlatin Balevsky
c25d314e1c typo 2019-10-11 18:56:46 +01:00
Zlatin Balevsky
b28587a275 wip on file comments 2019-10-11 18:42:02 +01:00
Zlatin Balevsky
8b8e5d59be Silence an IllegalArgumentException while sorting downloads table 2019-10-11 11:21:56 +01:00
Zlatin Balevsky
70bbe1f636 update version 2019-10-10 17:33:07 +01:00
Zlatin Balevsky
337605dc0f Release 0.4.15 2019-10-10 16:48:10 +01:00
Zlatin Balevsky
14bdfa6b2e throttle even further - 500/s 2019-10-09 17:34:54 +01:00
Zlatin Balevsky
ed3f9da773 throttle loading even further, to 1000/sec 2019-10-09 16:46:17 +01:00
Zlatin Balevsky
251080d08f throttle loading of files to 500/s 2019-10-09 16:34:09 +01:00
Zlatin Balevsky
f530ab999d operations on multiple selection in shared files table 2019-10-09 03:38:08 +01:00
Zlatin Balevsky
4133384e48 ability to share multiple files and directories 2019-10-08 21:30:34 +01:00
Zlatin Balevsky
600fc98868 update TODO 2019-10-07 12:38:26 +01:00
Zlatin Balevsky
129eeb3b88 JDK needed, not JRE 2019-10-07 12:38:09 +01:00
Zlatin Balevsky
20b51b78a0 reduce priority of file persister thread 2019-10-07 11:59:51 +01:00
Zlatin Balevsky
33fe755b60 implement multiple-selection on downloads table 2019-10-07 04:26:35 +01:00
Zlatin Balevsky
8b0668a134 Rewrite utils into Java, cache the persistable data of shared files to reduce object churn 2019-10-05 22:50:32 +01:00
Zlatin Balevsky
730d2202fd bundles for linux available now 2019-10-05 18:53:43 +01:00
Zlatin Balevsky
69906a986d set i2p.dir.base to prevent router creating files in PWD 2019-10-05 15:03:59 +01:00
Zlatin Balevsky
5bc8fa8633 Preserve selection on refresh #18 2019-10-05 05:13:49 +01:00
Zlatin Balevsky
7de7c9d8f3 Add 'Clear Hits' button to content control panel #18 2019-10-05 05:03:25 +01:00
Zlatin Balevsky
e943f6019d disable all GUI unit tests, enable host-cache unit tests. The 'build' target now succeeds 2019-10-05 04:31:11 +01:00
Zlatin Balevsky
2eec7bec5b fix most core tests 2019-10-05 04:20:14 +01:00
Zlatin Balevsky
c36110cf76 update readme 2019-10-04 16:41:07 +01:00
Zlatin Balevsky
abe28517bc Release 0.4.14 2019-10-04 13:00:57 +01:00
Zlatin Balevsky
15bc4c064d center the button 2019-10-03 21:32:32 +01:00
Zlatin Balevsky
91d771944b add option for sequential download 2019-10-03 20:45:22 +01:00
Zlatin Balevsky
e09c456a13 make the download retry interval in seconds, default still 1 minute 2019-10-03 19:31:15 +01:00
Zlatin Balevsky
d9c1067226 Add Neutral button to search tab, issue #17 2019-10-02 06:02:06 +01:00
Zlatin Balevsky
eda3e7ad3a Add option to not search extra hop, only considered if connecting only to trusted peers, issue #6 2019-10-02 05:45:46 +01:00
Zlatin Balevsky
e9798c7eaa remember last rejection and back off from hosts that reject us. Fix return value of retry and hopelessness predicates 2019-10-01 08:34:43 +01:00
Zlatin Balevsky
66bb4eef5b close outbound establishments on a separate thread 2019-10-01 07:50:29 +01:00
Zlatin Balevsky
55f260b3f4 update version 2019-09-29 19:21:06 +01:00
Zlatin Balevsky
32d4c3965e Release 0.4.13 2019-09-29 19:00:20 +01:00
Zlatin Balevsky
de1534d837 reduce the default host retry interval 2019-09-29 18:45:09 +01:00
Zlatin Balevsky
7b58e8a88a separate setting for the interval after which a host is considered hopeless 2019-09-29 18:43:39 +01:00
Zlatin Balevsky
8a03b89985 clean up the filtering logic; allow serialization of hosts that can be retried 2019-09-29 16:49:02 +01:00
Zlatin Balevsky
1d97374857 track last successful attempt. Only re-attempt hosts if they have ever been successful. Do not serialize hosts considered hopeless 2019-09-29 16:19:19 +01:00
Zlatin Balevsky
549e8c2d98 Release 0.4.12 2019-09-22 16:55:04 +01:00
Zlatin Balevsky
b54d24db0d new update server destination 2019-09-22 16:47:35 +01:00
Zlatin Balevsky
fa12e84345 stronger sig type 2019-09-22 16:23:01 +01:00
Zlatin Balevsky
6430ff2691 bump i2p libs version 2019-09-22 16:13:12 +01:00
Zlatin Balevsky
591313c81c point to the pkg project 2019-09-20 21:09:53 +01:00
Zlatin Balevsky
ce7b6a0c65 change to gasp AA font table, try metal lnf if the others fail 2019-09-16 15:06:45 +01:00
Zlatin Balevsky
5c4d4c4580 embedded router will not work without reseed certificates, so remove it 2019-09-16 15:04:34 +01:00
Zlatin Balevsky
4cb864ff9f update version 2019-09-16 15:03:20 +01:00
Zlatin Balevsky
417675ad07 update dark_trion's hostcache address 2019-07-22 21:48:29 +01:00
Zlatin Balevsky
9513e5ba3c update todo 2019-07-20 13:15:44 +01:00
Zlatin Balevsky
85610cf169 add new host-cache 2019-07-15 22:05:09 +01:00
Zlatin Balevsky
e8322384b8 Release 0.4.11 2019-07-15 14:28:21 +01:00
Zlatin Balevsky
179279ed30 Merge branch 'master' of https://github.com/zlatinb/muwire 2019-07-14 06:19:18 +01:00
Zlatin Balevsky
ae79f0fded Clear Done button, thanks to Aegon 2019-07-14 06:19:05 +01:00
Zlatin Balevsky
ed878b3762 Merge pull request #11 from zetok/readme
Add info about the default I2CP port to README.md
2019-07-12 09:17:24 +01:00
Zetok Zalbavar
623cca0ef2 Add info about the default I2CP port to README.md
Also:
 - improved formatting a bit
 - removed trailing whitespaces
2019-07-12 07:28:12 +01:00
Zlatin Balevsky
eaa883c3ba count duplicate files towards total in Uploads panel 2019-07-11 23:28:12 +01:00
Zlatin Balevsky
7ae8076865 disable webui for now 2019-07-11 22:29:47 +01:00
Zlatin Balevsky
b1aa92661c do not pack200 some jars because of duplicate entries 2019-07-11 20:42:24 +01:00
Zlatin Balevsky
9ed94c8376 do not include tomcat runtime 2019-07-11 20:41:57 +01:00
Zlatin Balevsky
fa6aea1abe attempt to produce an I2P plugin 2019-07-11 19:49:04 +01:00
Zlatin Balevsky
0de84e704b hello webui 2019-07-11 18:34:27 +01:00
Zlatin Balevsky
a767dda044 add empty grails project for a web ui 2019-07-11 17:56:42 +01:00
Zlatin Balevsky
56e9235d7b avoid FS call to get file length 2019-07-11 15:28:25 +01:00
Zlatin Balevsky
2fba9a74ce persist files.json every minute 2019-07-11 14:32:57 +01:00
Zlatin Balevsky
2bb6826906 canonicalize all files before they enter FileManager and do not look for absolute path on persistence 2019-07-11 14:32:12 +01:00
Zlatin Balevsky
9f339629a9 remove unnecessary canonicalization 2019-07-11 11:58:20 +01:00
Zlatin Balevsky
58d4207f94 Release 0.4.10 2019-07-11 05:09:05 +01:00
Zlatin Balevsky
32577a28dc some download stats 2019-07-11 05:00:25 +01:00
Zlatin Balevsky
f7b43304d4 use split pane in downloads tab as well 2019-07-11 03:57:49 +01:00
Zlatin Balevsky
dcbe09886d split pane instead of gridlayout 2019-07-11 03:48:05 +01:00
Zlatin Balevsky
5a54b2dcda shift focus to search pane on search 2019-07-10 22:33:21 +01:00
Zlatin Balevsky
581293b24f column sizes 2019-07-10 22:27:07 +01:00
Zlatin Balevsky
cd072b9f76 enable/disable download button correctly 2019-07-10 22:23:20 +01:00
Zlatin Balevsky
6b74fc5956 fix trust/distrust buttons 2019-07-10 22:17:32 +01:00
Zlatin Balevsky
3de2f872bb show results per sender 2019-07-10 22:08:18 +01:00
Zlatin Balevsky
fcde917d08 fix context menu and double-click 2019-07-10 21:26:13 +01:00
Zlatin Balevsky
4ded065010 move buttons onto search result tab 2019-07-10 21:23:00 +01:00
Zlatin Balevsky
18a1c7091a move downloads to their own pane 2019-07-10 20:54:45 +01:00
Zlatin Balevsky
46aee19f80 disable the button of the currently open pane 2019-07-10 20:37:09 +01:00
Zlatin Balevsky
92dd7064c6 Release 0.4.9 2019-07-10 12:02:36 +01:00
Zlatin Balevsky
b2e4dda677 rearrange tables 2019-07-10 11:55:06 +01:00
Zlatin Balevsky
e77a2c8961 clear hits table on refresh 2019-07-09 21:42:52 +01:00
Zlatin Balevsky
ee2fd2ef68 single hit per search uuid 2019-07-09 21:22:31 +01:00
Zlatin Balevsky
3f95d2bf1d trust and distrust buttons 2019-07-09 21:15:08 +01:00
Zlatin Balevsky
1390983732 populate hits table 2019-07-09 21:05:49 +01:00
Zlatin Balevsky
ce660cefe9 deleting of rules 2019-07-09 20:50:07 +01:00
Zlatin Balevsky
72b81eb886 fix matching 2019-07-09 20:27:28 +01:00
Zlatin Balevsky
57d593a68a persist watched keywords and regexes 2019-07-09 20:11:29 +01:00
Zlatin Balevsky
39a81a3376 hook up rule creation 2019-07-09 19:53:40 +01:00
Zlatin Balevsky
fd0bf17c24 add ability to unregister event listeners 2019-07-09 19:53:08 +01:00
Zlatin Balevsky
ac12bff69b wip on content control panel ui 2019-07-09 19:20:06 +01:00
Zlatin Balevsky
feef773bac hook up content control panel to rest of UI 2019-07-09 17:55:36 +01:00
Zlatin Balevsky
239d8f12a7 wip on core side of content management 2019-07-09 17:13:09 +01:00
Zlatin Balevsky
8bbc61a7cb add settings for watched keywords and regexes 2019-07-09 16:50:51 +01:00
Zlatin Balevsky
7f31c4477f matchers for keywords 2019-07-09 11:47:55 +01:00
Zlatin Balevsky
6bad67c1bf Release 0.4.8 2019-07-08 18:30:19 +01:00
Zlatin Balevsky
c76e6dc99f Merge pull request #9 from zetok/backticks
Replace deprecated backticks with $() for command substitution
2019-07-08 08:24:37 +01:00
Zetok Zalbavar
acf9db0db3 Replace deprecated backticks with $() for command substitution
Although it's a Bash FAQ, the point also applies to POSIX-compatible
shells: https://mywiki.wooledge.org/BashFAQ/082
2019-07-08 06:29:33 +01:00
Zlatin Balevsky
69b4f0b547 Add trust/distrust action from monitor window. Thanks Aegon 2019-07-07 15:31:21 +01:00
Zlatin Balevsky
80e165b505 fix download size in renderer, thanks Aegon 2019-07-07 11:17:56 +01:00
Zlatin Balevsky
bcce55b873 fix integer overflow 2019-07-07 10:58:39 +01:00
Zlatin Balevsky
d5c92560db fix integer overflow 2019-07-07 10:56:14 +01:00
Zlatin Balevsky
f827c1c9bf Home directories for different OSes 2019-07-07 09:14:13 +01:00
Zlatin Balevsky
88c5f1a02d Add GPG key link 2019-07-07 09:04:52 +01:00
Zlatin Balevsky
d8e44f5f39 kill other workers if download is finished 2019-07-06 22:21:13 +01:00
Zlatin Balevsky
72ff47ffe5 use custom renderer and comparator for download progress 2019-07-06 12:53:49 +01:00
Zlatin Balevsky
066ee2c96d wrong list 2019-07-06 11:28:04 +01:00
Zlatin Balevsky
0a8016dea7 enable stealing of pieces from other download workers 2019-07-06 11:26:18 +01:00
Zlatin Balevsky
db36367b11 avoid AIOOBE 2019-07-06 11:00:31 +01:00
Zlatin Balevsky
b6c9ccb7f6 return up to 9 X-Alts 2019-07-06 09:03:27 +01:00
Zlatin Balevsky
a9dc636bce write pieces every time a downloader finishes 2019-07-06 00:52:49 +01:00
Zlatin Balevsky
3cc0574d11 working partial pieces 2019-07-06 00:47:45 +01:00
Zlatin Balevsky
20fab9b16d work on partial piece persistence 2019-07-06 00:17:46 +01:00
Zlatin Balevsky
4015818323 center buttons 2019-07-05 17:15:50 +01:00
Zlatin Balevsky
f569d45c8c reallign tables 2019-07-05 17:07:14 +01:00
Zlatin Balevsky
3773647869 remove diff rejects 2019-07-05 16:24:57 +01:00
Zlatin Balevsky
29cdbf018c remove trailing spaces 2019-07-05 16:24:19 +01:00
Zlatin Balevsky
94bb7022eb tabs -> spaces 2019-07-05 16:22:34 +01:00
Zlatin Balevsky
39808302df Show which file is hashing, thanks to Aegon 2019-07-05 16:20:03 +01:00
Zlatin Balevsky
2d22f9c39e override router log manager 2019-07-05 12:32:23 +01:00
Zlatin Balevsky
ee8f80bab6 up i2p to 0.9.41 2019-07-05 12:26:48 +01:00
Zlatin Balevsky
3e6242e583 break when matching search is found 2019-07-04 18:12:22 +01:00
Zlatin Balevsky
41181616ee compact display of incoming searches, thanks Aegon 2019-07-04 17:59:53 +01:00
Zlatin Balevsky
eb2530ca32 fix sorting of download/upload tables thanks Aegon 2019-07-04 17:58:06 +01:00
Zlatin Balevsky
b5233780ef Release 0.4.7 2019-07-03 20:36:54 +01:00
Zlatin Balevsky
78753d7538 shut down cache client on shutdown 2019-07-03 19:50:00 +01:00
Zlatin Balevsky
4740e8b4f5 log hostcache stats 2019-07-03 19:46:24 +01:00
Zlatin Balevsky
ad5b00fc90 prettier progress status thanks to Aegon 2019-07-03 12:50:24 +01:00
Zlatin Balevsky
d6c6880848 update readme 2019-07-03 07:27:48 +01:00
Zlatin Balevsky
4f948c1b9e Release 0.4.6 2019-07-03 07:11:59 +01:00
Zlatin Balevsky
2b68c24f9c use switch 2019-07-03 07:01:27 +01:00
Zlatin Balevsky
bcdf0422db update for embedded router 2019-07-03 07:00:04 +01:00
Zlatin Balevsky
f6434b478d remove FAQ 2019-07-03 06:56:20 +01:00
Zlatin Balevsky
e979fdd26f update list view tables 2019-07-03 06:51:21 +01:00
Zlatin Balevsky
e6bfcaaab9 size columns, center integers 2019-07-03 06:11:02 +01:00
Zlatin Balevsky
9780108e8a disable trust buttons on action 2019-07-03 06:00:09 +01:00
Zlatin Balevsky
697c7d2d6d enable/disable trust panel buttons 2019-07-03 05:41:17 +01:00
Zlatin Balevsky
887d10c8bf move buttons around 2019-07-03 05:30:39 +01:00
Zlatin Balevsky
ef6b8fe458 add a state for failed updates 2019-07-03 05:12:00 +01:00
Zlatin Balevsky
20ab55d763 update todo 2019-07-03 00:23:21 +01:00
Zlatin Balevsky
eda58c9e0d Merge branch 'trust-lists' 2019-07-03 00:04:50 +01:00
Zlatin Balevsky
fb42fc0e35 add trust panel in options 2019-07-03 00:04:08 +01:00
Zlatin Balevsky
35cabc47ad hook up trust and distrust buttons 2019-07-02 23:44:43 +01:00
Zlatin Balevsky
5be97d0404 show something when review button is pressed 2019-07-02 22:51:04 +01:00
Zlatin Balevsky
82b0fa253c enable update and unsubscribe buttons 2019-07-02 22:26:29 +01:00
Zlatin Balevsky
011a4d5766 prevent duplicate updates and zero timestamps 2019-07-02 22:02:15 +01:00
Zlatin Balevsky
5cd1ca88c1 do actual updating on in a threadpool 2019-07-02 21:34:29 +01:00
Zlatin Balevsky
44c880d911 store subscriber list upon subscription 2019-07-02 20:53:29 +01:00
Zlatin Balevsky
14857cb5ad swallow headers in trust list response 2019-07-02 20:35:50 +01:00
Zlatin Balevsky
7daf981f1a fix NPE 2019-07-02 20:24:51 +01:00
Zlatin Balevsky
b99bc0ea32 fix 2019-07-02 20:12:22 +01:00
Zlatin Balevsky
1ccf6fbdfa participating bandwidth grid cell 2019-07-02 15:35:42 +01:00
Zlatin Balevsky
5711979272 Release 0.4.5 2019-07-02 15:01:51 +01:00
Zlatin Balevsky
9a5e2b1fa3 speed smoothing patch courtesy of Aegon 2019-07-02 14:46:40 +01:00
Zlatin Balevsky
cafc5f582e subscribe button 2019-07-02 14:35:52 +01:00
Zlatin Balevsky
a89b423dfc simpler speed calculation 2019-07-02 13:05:06 +01:00
Zlatin Balevsky
79e8438941 always assume interval is at least 1 second 2019-07-02 12:49:00 +01:00
Zlatin Balevsky
19c2c46491 prevent NPE on startup 2019-07-02 12:27:15 +01:00
Zlatin Balevsky
78f1d54b69 add new host cache 2019-07-02 10:04:24 +01:00
Zlatin Balevsky
9461649ed4 change sig type 2019-07-02 09:49:13 +01:00
Zlatin Balevsky
8573ab2850 work on trust list UI 2019-07-02 09:35:21 +01:00
Zlatin Balevsky
8b3d752727 add status to the trust list object 2019-07-02 08:59:30 +01:00
Zlatin Balevsky
7c54bd8966 start work on sharing of trust lists 2019-07-01 23:33:39 +01:00
Zlatin Balevsky
5d0fcb7027 start work on sharing of trust lists 2019-07-01 23:15:13 +01:00
Zlatin Balevsky
3ec9654d3c start work on sharing of trust lists 2019-07-01 22:05:43 +01:00
Zlatin Balevsky
7c8d64b462 start work on sharing of trust lists 2019-07-01 21:40:07 +01:00
Zlatin Balevsky
31e30e3d31 excludePeerCaps 2019-07-01 18:31:58 +01:00
Zlatin Balevsky
8caf6e99b0 show floodfill status 2019-07-01 13:18:31 +01:00
Zlatin Balevsky
624155debd update todo 2019-07-01 06:17:46 +01:00
Zlatin Balevsky
4468a262ae actually add timestamps to the list 2019-06-30 21:40:18 +01:00
Zlatin Balevsky
1780901cb0 throttle connections to 10 searches per second 2019-06-30 21:22:49 +01:00
278 changed files with 34854 additions and 5424 deletions

View File

@@ -4,11 +4,11 @@ MuWire is an easy to use file-sharing program which offers anonymity using [I2P
It is inspired by the LimeWire Gnutella client and developped by a former LimeWire developer.
The current stable release - 0.4.0 is avaiable for download at https://muwire.com. You can find technical documentation in the "doc" folder.
The current stable release - 0.5.2 is avaiable for download at https://muwire.com. You can find technical documentation in the "doc" folder.
### Building
You need JRE 8 or newer. After installing that and setting up the appropriate paths, just type
You need JDK 8 or newer. After installing that and setting up the appropriate paths, just type
```
./gradlew clean assemble
@@ -19,38 +19,23 @@ If you want to run the unit tests, type
./gradlew clean build
```
Some of the UI tests will fail because they haven't been written yet :-/
If you want to build binary bundles that do not depend on Java or I2P, see the https://github.com/zlatinb/muwire-pkg project
### Running
You need to have an I2P router up and running on the same machine. After you build the application, look inside `gui/build/distributions`. Untar/unzip one of the `shadow` files and then run the jar contained inside by typing `java -jar MuWire-x.y.z.jar` in a terminal or command prompt. If you use a custom I2CP host and port, create a file `$HOME/.MuWire/i2p.properties` and put `i2cp.tcp.host=<host>` and `i2cp.tcp.port=<port>` in there.
After you build the application, look inside `gui/build/distributions`. Untar/unzip one of the `shadow` files and then run the jar contained inside by typing `java -jar gui-x.y.z.jar` in a terminal or command prompt.
The first time you run MuWire it will ask you to select a nickname. This nickname will be displayed with search results, so that others can verify the file was shared by you. It is best to leave MuWire running all the time, just like I2P.
If you have an I2P router running on the same machine that is all you need to do. If you use a custom I2CP host and port, create a file `i2p.properties` and put `i2cp.tcp.host=<host>` and `i2cp.tcp.port=<port>` in there. On Windows that file should go into `%HOME%\AppData\Roaming\MuWire`, on Mac into `$HOME/Library/Application Support/MuWire` and on Linux `$HOME/.MuWire`
[Default I2CP port]\: `7654`
### GPG Fingerprint
```
471B 9FD4 5517 A5ED 101F C57D A728 3207 2D52 5E41
```
You can find the full key at https://keybase.io/zlatinb
### Known bugs and limitations
* Many UI features you would expect are not there yet
### Quick FAQ
* why is MuWire slow ?
- too few sources you're downloading from
- you can increase the number of tunnels by using more tunnels via Options->I2P Inbound/Outbound Quantity
the default is 4 and you could raise up to as high as 16 ( Caution !!!!)
* my search is not returning (enough) results !
- search is keyword or hash based
- keywords and hash(es) are NOT regexed or wildcarded so they have to be complete
so searching for 'musi' will not return results with 'music' - you have to search for 'music'
- ALL keywords have to match
- only use space for keyword separation
- if you already have the file in question it is not displayed ( can be changed via Options )
* what's this right click -> 'Copy hash to clipboard' for ?
- if you have a specific file you wish to share or download you can use the hash as a unique identifier
to make sure you have exactly the right file.
- you can share this hash with others to ensure they are getting the right file
[Default I2CP port]: https://geti2p.net/en/docs/ports

12
TODO.md
View File

@@ -12,14 +12,6 @@ This reduces query traffic by not sending last hop queries to peers that definit
This helps with scalability
##### Trust List Sharing
For helping users make better decisions whom to trust
##### Content Control Panel
To allow every user to not route queries for content they do not like. This is mostly GUI work, the backend part is simple
##### Web UI, REST Interface, etc.
Basically any non-gui non-cli user interface
@@ -31,6 +23,4 @@ To enable parsing of metadata from known file types and the user editing it or a
### Small Items
* Wrapper of some kind for in-place upgrades
* Download file sequentially
* Unsharing of files (half done)
* Multiple-selection download, Ctrl-A
* Automatic adjustment of number of I2P tunnels

View File

@@ -2,7 +2,7 @@ subprojects {
apply plugin: 'groovy'
dependencies {
compile 'net.i2p:i2p:0.9.40'
compile "net.i2p:i2p:${i2pVersion}"
compile 'org.codehaus.groovy:groovy-all:2.4.15'
}

View File

@@ -35,7 +35,7 @@ class Cli {
Core core
try {
core = new Core(props, home, "0.4.4")
core = new Core(props, home, "0.5.3")
} catch (Exception bad) {
bad.printStackTrace(System.out)
println "Failed to initialize core, exiting"

View File

@@ -53,7 +53,7 @@ class CliDownloader {
Core core
try {
core = new Core(props, home, "0.4.4")
core = new Core(props, home, "0.5.3")
} catch (Exception bad) {
bad.printStackTrace(System.out)
println "Failed to initialize core, exiting"

View File

@@ -2,9 +2,9 @@ apply plugin : 'application'
mainClassName = 'com.muwire.core.Core'
applicationDefaultJvmArgs = ['-Djava.util.logging.config.file=logging.properties']
dependencies {
compile 'net.i2p:router:0.9.40'
compile 'net.i2p.client:mstreaming:0.9.40'
compile 'net.i2p.client:streaming:0.9.40'
compile "net.i2p:router:${i2pVersion}"
compile "net.i2p.client:mstreaming:${i2pVersion}"
compile "net.i2p.client:streaming:${i2pVersion}"
testCompile 'org.junit.jupiter:junit-jupiter-api:5.4.2'
testCompile 'junit:junit:4.12'

View File

@@ -1,13 +0,0 @@
package com.muwire.core
import net.i2p.crypto.SigType
class Constants {
public static final byte PERSONA_VERSION = (byte)1
public static final SigType SIG_TYPE = SigType.EdDSA_SHA512_Ed25519
public static final int MAX_HEADER_SIZE = 0x1 << 14
public static final int MAX_HEADERS = 16
public static final String SPLIT_PATTERN = "[\\*\\+\\-,\\.:;\\(\\)=_/\\\\\\!\\\"\\\'\\\$%\\|\\[\\]\\{\\}\\?]"
}

View File

@@ -20,6 +20,7 @@ import com.muwire.core.download.UIDownloadPausedEvent
import com.muwire.core.download.UIDownloadResumedEvent
import com.muwire.core.files.FileDownloadedEvent
import com.muwire.core.files.FileHashedEvent
import com.muwire.core.files.FileHashingEvent
import com.muwire.core.files.FileHasher
import com.muwire.core.files.FileLoadedEvent
import com.muwire.core.files.FileManager
@@ -27,6 +28,8 @@ import com.muwire.core.files.FileSharedEvent
import com.muwire.core.files.FileUnsharedEvent
import com.muwire.core.files.HasherService
import com.muwire.core.files.PersisterService
import com.muwire.core.files.UICommentEvent
import com.muwire.core.files.UIPersistFilesEvent
import com.muwire.core.files.AllFilesLoadedEvent
import com.muwire.core.files.DirectoryUnsharedEvent
import com.muwire.core.files.DirectoryWatcher
@@ -34,17 +37,23 @@ import com.muwire.core.hostcache.CacheClient
import com.muwire.core.hostcache.HostCache
import com.muwire.core.hostcache.HostDiscoveredEvent
import com.muwire.core.mesh.MeshManager
import com.muwire.core.search.BrowseManager
import com.muwire.core.search.QueryEvent
import com.muwire.core.search.ResultsEvent
import com.muwire.core.search.ResultsSender
import com.muwire.core.search.SearchEvent
import com.muwire.core.search.SearchManager
import com.muwire.core.search.UIBrowseEvent
import com.muwire.core.search.UIResultBatchEvent
import com.muwire.core.trust.TrustEvent
import com.muwire.core.trust.TrustService
import com.muwire.core.trust.TrustSubscriber
import com.muwire.core.trust.TrustSubscriptionEvent
import com.muwire.core.update.UpdateClient
import com.muwire.core.upload.UploadManager
import com.muwire.core.util.MuWireLogManager
import com.muwire.core.content.ContentControlEvent
import com.muwire.core.content.ContentManager
import groovy.util.logging.Log
import net.i2p.I2PAppContext
@@ -74,6 +83,7 @@ public class Core {
final MuWireSettings muOptions
private final TrustService trustService
private final TrustSubscriber trustSubscriber
private final PersisterService persisterService
private final HostCache hostCache
private final ConnectionManager connectionManager
@@ -86,6 +96,7 @@ public class Core {
private final DirectoryWatcher directoryWatcher
final FileManager fileManager
final UploadManager uploadManager
final ContentManager contentManager
private final Router router
@@ -128,7 +139,9 @@ public class Core {
} else {
log.info("launching embedded router")
Properties routerProps = new Properties()
routerProps.setProperty("i2p.dir.base", home.getAbsolutePath())
routerProps.setProperty("i2p.dir.config", home.getAbsolutePath())
routerProps.setProperty("router.excludePeerCaps", "KLM")
routerProps.setProperty("i2np.inboundKBytesPerSecond", String.valueOf(props.inBw))
routerProps.setProperty("i2np.outboundKBytesPerSecond", String.valueOf(props.outBw))
routerProps.setProperty("i2cp.disableInterface", "true")
@@ -136,33 +149,33 @@ public class Core {
routerProps.setProperty("i2np.udp.port", i2pOptions["i2np.udp.port"])
routerProps.setProperty("i2np.udp.internalPort", i2pOptions["i2np.udp.port"])
router = new Router(routerProps)
I2PAppContext.getGlobalContext().metaClass = new RouterContextMetaClass()
router.getContext().setLogManager(new MuWireLogManager())
router.runRouter()
while(!router.isRunning())
Thread.sleep(100)
}
log.info("initializing I2P socket manager")
def i2pClient = new I2PClientFactory().createClient()
File keyDat = new File(home, "key.dat")
if (!keyDat.exists()) {
log.info("Creating new key.dat")
keyDat.withOutputStream {
i2pClient.createDestination(it, Constants.SIG_TYPE)
}
}
log.info("initializing I2P socket manager")
def i2pClient = new I2PClientFactory().createClient()
File keyDat = new File(home, "key.dat")
if (!keyDat.exists()) {
log.info("Creating new key.dat")
keyDat.withOutputStream {
i2pClient.createDestination(it, Constants.SIG_TYPE)
}
}
// options like tunnel length and quantity
I2PSession i2pSession
I2PSocketManager socketManager
keyDat.withInputStream {
socketManager = new I2PSocketManagerFactory().createManager(it, i2pOptions["i2cp.tcp.host"], i2pOptions["i2cp.tcp.port"].toInteger(), i2pOptions)
}
socketManager.getDefaultOptions().setReadTimeout(60000)
socketManager.getDefaultOptions().setConnectTimeout(30000)
I2PSession i2pSession
I2PSocketManager socketManager
keyDat.withInputStream {
socketManager = new I2PSocketManagerFactory().createManager(it, i2pOptions["i2cp.tcp.host"], i2pOptions["i2cp.tcp.port"].toInteger(), i2pOptions)
}
socketManager.getDefaultOptions().setReadTimeout(60000)
socketManager.getDefaultOptions().setConnectTimeout(30000)
socketManager.addDisconnectListener({eventBus.publish(new RouterDisconnectedEvent())} as DisconnectListener)
i2pSession = socketManager.getSession()
i2pSession = socketManager.getSession()
def destination = new Destination()
def spk = new SigningPrivateKey(Constants.SIG_TYPE)
@@ -171,7 +184,7 @@ public class Core {
def privateKey = new PrivateKey()
privateKey.readBytes(it)
spk.readBytes(it)
}
}
def baos = new ByteArrayOutputStream()
def daos = new DataOutputStream(baos)
@@ -189,65 +202,67 @@ public class Core {
me = new Persona(new ByteArrayInputStream(baos.toByteArray()))
log.info("Loaded myself as "+me.getHumanReadableName())
eventBus = new EventBus()
eventBus = new EventBus()
log.info("initializing trust service")
File goodTrust = new File(home, "trusted")
File badTrust = new File(home, "distrusted")
trustService = new TrustService(goodTrust, badTrust, 5000)
eventBus.register(TrustEvent.class, trustService)
log.info("initializing trust service")
File goodTrust = new File(home, "trusted")
File badTrust = new File(home, "distrusted")
trustService = new TrustService(goodTrust, badTrust, 5000)
eventBus.register(TrustEvent.class, trustService)
log.info "initializing file manager"
fileManager = new FileManager(eventBus, props)
eventBus.register(FileHashedEvent.class, fileManager)
eventBus.register(FileLoadedEvent.class, fileManager)
eventBus.register(FileDownloadedEvent.class, fileManager)
eventBus.register(FileUnsharedEvent.class, fileManager)
eventBus.register(SearchEvent.class, fileManager)
log.info "initializing file manager"
fileManager = new FileManager(eventBus, props)
eventBus.register(FileHashedEvent.class, fileManager)
eventBus.register(FileLoadedEvent.class, fileManager)
eventBus.register(FileDownloadedEvent.class, fileManager)
eventBus.register(FileUnsharedEvent.class, fileManager)
eventBus.register(SearchEvent.class, fileManager)
eventBus.register(DirectoryUnsharedEvent.class, fileManager)
eventBus.register(UICommentEvent.class, fileManager)
log.info("initializing mesh manager")
MeshManager meshManager = new MeshManager(fileManager, home, props)
eventBus.register(SourceDiscoveredEvent.class, meshManager)
log.info "initializing persistence service"
persisterService = new PersisterService(new File(home, "files.json"), eventBus, 15000, fileManager)
log.info "initializing persistence service"
persisterService = new PersisterService(new File(home, "files.json"), eventBus, 60000, fileManager)
eventBus.register(UILoadedEvent.class, persisterService)
eventBus.register(UIPersistFilesEvent.class, persisterService)
log.info("initializing host cache")
File hostStorage = new File(home, "hosts.json")
log.info("initializing host cache")
File hostStorage = new File(home, "hosts.json")
hostCache = new HostCache(trustService,hostStorage, 30000, props, i2pSession.getMyDestination())
eventBus.register(HostDiscoveredEvent.class, hostCache)
eventBus.register(ConnectionEvent.class, hostCache)
eventBus.register(HostDiscoveredEvent.class, hostCache)
eventBus.register(ConnectionEvent.class, hostCache)
log.info("initializing connection manager")
connectionManager = props.isLeaf() ?
new LeafConnectionManager(eventBus, me, 3, hostCache, props) :
log.info("initializing connection manager")
connectionManager = props.isLeaf() ?
new LeafConnectionManager(eventBus, me, 3, hostCache, props) :
new UltrapeerConnectionManager(eventBus, me, 512, 512, hostCache, trustService, props)
eventBus.register(TrustEvent.class, connectionManager)
eventBus.register(ConnectionEvent.class, connectionManager)
eventBus.register(DisconnectionEvent.class, connectionManager)
eventBus.register(TrustEvent.class, connectionManager)
eventBus.register(ConnectionEvent.class, connectionManager)
eventBus.register(DisconnectionEvent.class, connectionManager)
eventBus.register(QueryEvent.class, connectionManager)
log.info("initializing cache client")
cacheClient = new CacheClient(eventBus,hostCache, connectionManager, i2pSession, props, 10000)
log.info("initializing cache client")
cacheClient = new CacheClient(eventBus,hostCache, connectionManager, i2pSession, props, 10000)
log.info("initializing update client")
updateClient = new UpdateClient(eventBus, i2pSession, myVersion, props, fileManager, me)
eventBus.register(FileDownloadedEvent.class, updateClient)
eventBus.register(UIResultBatchEvent.class, updateClient)
log.info("initializing connector")
I2PConnector i2pConnector = new I2PConnector(socketManager)
log.info("initializing connector")
I2PConnector i2pConnector = new I2PConnector(socketManager)
log.info "initializing results sender"
ResultsSender resultsSender = new ResultsSender(eventBus, i2pConnector, me)
log.info "initializing results sender"
ResultsSender resultsSender = new ResultsSender(eventBus, i2pConnector, me, props)
log.info "initializing search manager"
SearchManager searchManager = new SearchManager(eventBus, me, resultsSender)
eventBus.register(QueryEvent.class, searchManager)
eventBus.register(ResultsEvent.class, searchManager)
log.info "initializing search manager"
SearchManager searchManager = new SearchManager(eventBus, me, resultsSender)
eventBus.register(QueryEvent.class, searchManager)
eventBus.register(ResultsEvent.class, searchManager)
log.info("initializing download manager")
downloadManager = new DownloadManager(eventBus, trustService, meshManager, props, i2pConnector, home, me)
@@ -265,21 +280,38 @@ public class Core {
log.info("initializing connection establisher")
connectionEstablisher = new ConnectionEstablisher(eventBus, i2pConnector, props, connectionManager, hostCache)
log.info("initializing acceptor")
I2PAcceptor i2pAcceptor = new I2PAcceptor(socketManager)
connectionAcceptor = new ConnectionAcceptor(eventBus, connectionManager, props,
i2pAcceptor, hostCache, trustService, searchManager, uploadManager, connectionEstablisher)
log.info("initializing acceptor")
I2PAcceptor i2pAcceptor = new I2PAcceptor(socketManager)
connectionAcceptor = new ConnectionAcceptor(eventBus, connectionManager, props,
i2pAcceptor, hostCache, trustService, searchManager, uploadManager, fileManager, connectionEstablisher)
log.info("initializing directory watcher")
directoryWatcher = new DirectoryWatcher(eventBus, fileManager)
directoryWatcher = new DirectoryWatcher(eventBus, fileManager, home, props)
eventBus.register(FileSharedEvent.class, directoryWatcher)
eventBus.register(AllFilesLoadedEvent.class, directoryWatcher)
eventBus.register(DirectoryUnsharedEvent.class, directoryWatcher)
log.info("initializing hasher service")
hasherService = new HasherService(new FileHasher(), eventBus, fileManager)
hasherService = new HasherService(new FileHasher(), eventBus, fileManager, props)
eventBus.register(FileSharedEvent.class, hasherService)
}
eventBus.register(FileUnsharedEvent.class, hasherService)
eventBus.register(DirectoryUnsharedEvent.class, hasherService)
log.info("initializing trust subscriber")
trustSubscriber = new TrustSubscriber(eventBus, i2pConnector, props)
eventBus.register(UILoadedEvent.class, trustSubscriber)
eventBus.register(TrustSubscriptionEvent.class, trustSubscriber)
log.info("initializing content manager")
contentManager = new ContentManager()
eventBus.register(ContentControlEvent.class, contentManager)
eventBus.register(QueryEvent.class, contentManager)
log.info("initializing browse manager")
BrowseManager browseManager = new BrowseManager(i2pConnector, eventBus)
eventBus.register(UIBrowseEvent.class, browseManager)
}
public void startServices() {
hasherService.start()
@@ -299,6 +331,8 @@ public class Core {
log.info("already shutting down")
return
}
log.info("shutting down trust subscriber")
trustSubscriber.stop()
log.info("shutting down download manageer")
downloadManager.shutdown()
log.info("shutting down connection acceeptor")
@@ -307,6 +341,8 @@ public class Core {
connectionEstablisher.stop()
log.info("shutting down directory watcher")
directoryWatcher.stop()
log.info("shutting down cache client")
cacheClient.stop()
log.info("shutting down connection manager")
connectionManager.shutdown()
if (router != null) {
@@ -315,19 +351,6 @@ public class Core {
}
}
static class RouterContextMetaClass extends DelegatingMetaClass {
private final Object logManager = new MuWireLogManager()
RouterContextMetaClass() {
super(RouterContext.class)
}
Object invokeMethod(Object object, String name, Object[] args) {
if (name == "logManager")
return logManager
super.invokeMethod(object, name, args)
}
}
static main(args) {
def home = System.getProperty("user.home") + File.separator + ".MuWire"
home = new File(home)
@@ -352,7 +375,7 @@ public class Core {
}
}
Core core = new Core(props, home, "0.4.4")
Core core = new Core(props, home, "0.5.3")
core.startServices()
// ... at the end, sleep or execute script

View File

@@ -4,17 +4,17 @@ import java.util.concurrent.atomic.AtomicLong
class Event {
private static final AtomicLong SEQ_NO = new AtomicLong();
final long seqNo
final long timestamp
private static final AtomicLong SEQ_NO = new AtomicLong();
final long seqNo
final long timestamp
Event() {
seqNo = SEQ_NO.getAndIncrement()
timestamp = System.currentTimeMillis()
}
Event() {
seqNo = SEQ_NO.getAndIncrement()
timestamp = System.currentTimeMillis()
}
@Override
public String toString() {
"seqNo $seqNo timestamp $timestamp"
}
@Override
public String toString() {
"seqNo $seqNo timestamp $timestamp"
}
}

View File

@@ -11,41 +11,46 @@ import groovy.util.logging.Log
@Log
class EventBus {
private Map handlers = new HashMap()
private final Executor executor = Executors.newSingleThreadExecutor {r ->
def rv = new Thread(r)
rv.setDaemon(true)
rv.setName("event-bus")
rv
}
private Map handlers = new HashMap()
private final Executor executor = Executors.newSingleThreadExecutor {r ->
def rv = new Thread(r)
rv.setDaemon(true)
rv.setName("event-bus")
rv
}
void publish(Event e) {
executor.execute({publishInternal(e)} as Runnable)
}
void publish(Event e) {
executor.execute({publishInternal(e)} as Runnable)
}
private void publishInternal(Event e) {
log.fine "publishing event $e of type ${e.getClass().getSimpleName()} event $e"
def currentHandlers
final def clazz = e.getClass()
synchronized(this) {
currentHandlers = handlers.getOrDefault(clazz, [])
}
currentHandlers.each {
private void publishInternal(Event e) {
log.fine "publishing event $e of type ${e.getClass().getSimpleName()} event $e"
def currentHandlers
final def clazz = e.getClass()
synchronized(this) {
currentHandlers = handlers.getOrDefault(clazz, [])
}
currentHandlers.each {
try {
it."on${clazz.getSimpleName()}"(e)
} catch (Exception bad) {
log.log(Level.SEVERE, "exception dispatching event",bad)
}
}
}
}
}
synchronized void register(Class<? extends Event> eventType, def handler) {
log.info "Registering $handler for type $eventType"
def currentHandlers = handlers.get(eventType)
if (currentHandlers == null) {
currentHandlers = new CopyOnWriteArrayList()
handlers.put(eventType, currentHandlers)
}
currentHandlers.add handler
}
synchronized void register(Class<? extends Event> eventType, def handler) {
log.info "Registering $handler for type $eventType"
def currentHandlers = handlers.get(eventType)
if (currentHandlers == null) {
currentHandlers = new CopyOnWriteArrayList()
handlers.put(eventType, currentHandlers)
}
currentHandlers.add handler
}
synchronized void unregister(Class<? extends Event> eventType, def handler) {
log.info("Unregistering $handler for type $eventType")
handlers[eventType]?.remove(handler)
}
}

View File

@@ -6,105 +6,168 @@ import com.muwire.core.hostcache.CrawlerResponse
import com.muwire.core.util.DataUtil
import net.i2p.data.Base64
import net.i2p.util.ConcurrentHashSet
class MuWireSettings {
final boolean isLeaf
boolean allowUntrusted
boolean searchExtraHop
boolean allowTrustLists
int trustListInterval
Set<Persona> trustSubscriptions
int downloadRetryInterval
int updateCheckInterval
boolean autoDownloadUpdate
String updateType
String nickname
File downloadLocation
File incompleteLocation
CrawlerResponse crawlerResponse
boolean shareDownloadedFiles
boolean shareHiddenFiles
boolean searchComments
boolean browseFiles
Set<String> watchedDirectories
float downloadSequentialRatio
int hostClearInterval
int hostClearInterval, hostHopelessInterval, hostRejectInterval
int meshExpiration
int speedSmoothSeconds
boolean embeddedRouter
int inBw, outBw
Set<String> watchedKeywords
Set<String> watchedRegexes
MuWireSettings() {
MuWireSettings() {
this(new Properties())
}
MuWireSettings(Properties props) {
isLeaf = Boolean.valueOf(props.get("leaf","false"))
allowUntrusted = Boolean.valueOf(props.get("allowUntrusted","true"))
crawlerResponse = CrawlerResponse.valueOf(props.get("crawlerResponse","REGISTERED"))
MuWireSettings(Properties props) {
isLeaf = Boolean.valueOf(props.get("leaf","false"))
allowUntrusted = Boolean.valueOf(props.getProperty("allowUntrusted","true"))
searchExtraHop = Boolean.valueOf(props.getProperty("searchExtraHop","false"))
allowTrustLists = Boolean.valueOf(props.getProperty("allowTrustLists","true"))
trustListInterval = Integer.valueOf(props.getProperty("trustListInterval","1"))
crawlerResponse = CrawlerResponse.valueOf(props.get("crawlerResponse","REGISTERED"))
nickname = props.getProperty("nickname","MuWireUser")
downloadLocation = new File((String)props.getProperty("downloadLocation",
System.getProperty("user.home")))
downloadRetryInterval = Integer.parseInt(props.getProperty("downloadRetryInterval","1"))
String incompleteLocationProp = props.getProperty("incompleteLocation")
if (incompleteLocationProp != null)
incompleteLocation = new File(incompleteLocationProp)
downloadRetryInterval = Integer.parseInt(props.getProperty("downloadRetryInterval","60"))
updateCheckInterval = Integer.parseInt(props.getProperty("updateCheckInterval","24"))
autoDownloadUpdate = Boolean.parseBoolean(props.getProperty("autoDownloadUpdate","true"))
updateType = props.getProperty("updateType","jar")
shareDownloadedFiles = Boolean.parseBoolean(props.getProperty("shareDownloadedFiles","true"))
shareHiddenFiles = Boolean.parseBoolean(props.getProperty("shareHiddenFiles","false"))
downloadSequentialRatio = Float.valueOf(props.getProperty("downloadSequentialRatio","0.8"))
hostClearInterval = Integer.valueOf(props.getProperty("hostClearInterval","60"))
hostClearInterval = Integer.valueOf(props.getProperty("hostClearInterval","15"))
hostHopelessInterval = Integer.valueOf(props.getProperty("hostHopelessInterval", "1440"))
hostRejectInterval = Integer.valueOf(props.getProperty("hostRejectInterval", "1"))
meshExpiration = Integer.valueOf(props.getProperty("meshExpiration","60"))
embeddedRouter = Boolean.valueOf(props.getProperty("embeddedRouter","false"))
inBw = Integer.valueOf(props.getProperty("inBw","256"))
outBw = Integer.valueOf(props.getProperty("outBw","128"))
searchComments = Boolean.valueOf(props.getProperty("searchComments","true"))
browseFiles = Boolean.valueOf(props.getProperty("browseFiles","true"))
speedSmoothSeconds = Integer.valueOf(props.getProperty("speedSmoothSeconds","60"))
watchedDirectories = new HashSet<>()
if (props.containsKey("watchedDirectories")) {
String[] encoded = props.getProperty("watchedDirectories").split(",")
encoded.each { watchedDirectories << DataUtil.readi18nString(Base64.decode(it)) }
watchedDirectories = readEncodedSet(props, "watchedDirectories")
watchedKeywords = readEncodedSet(props, "watchedKeywords")
watchedRegexes = readEncodedSet(props, "watchedRegexes")
trustSubscriptions = new HashSet<>()
if (props.containsKey("trustSubscriptions")) {
props.getProperty("trustSubscriptions").split(",").each {
trustSubscriptions.add(new Persona(new ByteArrayInputStream(Base64.decode(it))))
}
}
}
}
void write(OutputStream out) throws IOException {
Properties props = new Properties()
props.setProperty("leaf", isLeaf.toString())
props.setProperty("allowUntrusted", allowUntrusted.toString())
props.setProperty("searchExtraHop", String.valueOf(searchExtraHop))
props.setProperty("allowTrustLists", String.valueOf(allowTrustLists))
props.setProperty("trustListInterval", String.valueOf(trustListInterval))
props.setProperty("crawlerResponse", crawlerResponse.toString())
props.setProperty("nickname", nickname)
props.setProperty("downloadLocation", downloadLocation.getAbsolutePath())
if (incompleteLocation != null)
props.setProperty("incompleteLocation", incompleteLocation.getAbsolutePath())
props.setProperty("downloadRetryInterval", String.valueOf(downloadRetryInterval))
props.setProperty("updateCheckInterval", String.valueOf(updateCheckInterval))
props.setProperty("autoDownloadUpdate", String.valueOf(autoDownloadUpdate))
props.setProperty("updateType",updateType)
props.setProperty("updateType",String.valueOf(updateType))
props.setProperty("shareDownloadedFiles", String.valueOf(shareDownloadedFiles))
props.setProperty("shareHiddenFiles", String.valueOf(shareHiddenFiles))
props.setProperty("downloadSequentialRatio", String.valueOf(downloadSequentialRatio))
props.setProperty("hostClearInterval", String.valueOf(hostClearInterval))
props.setProperty("hostHopelessInterval", String.valueOf(hostHopelessInterval))
props.setProperty("hostRejectInterval", String.valueOf(hostRejectInterval))
props.setProperty("meshExpiration", String.valueOf(meshExpiration))
props.setProperty("embeddedRouter", String.valueOf(embeddedRouter))
props.setProperty("inBw", String.valueOf(inBw))
props.setProperty("outBw", String.valueOf(outBw))
props.setProperty("searchComments", String.valueOf(searchComments))
props.setProperty("browseFiles", String.valueOf(browseFiles))
props.setProperty("speedSmoothSeconds", String.valueOf(speedSmoothSeconds))
if (!watchedDirectories.isEmpty()) {
String encoded = watchedDirectories.stream().
map({Base64.encode(DataUtil.encodei18nString(it))}).
writeEncodedSet(watchedDirectories, "watchedDirectories", props)
writeEncodedSet(watchedKeywords, "watchedKeywords", props)
writeEncodedSet(watchedRegexes, "watchedRegexes", props)
if (!trustSubscriptions.isEmpty()) {
String encoded = trustSubscriptions.stream().
map({it.toBase64()}).
collect(Collectors.joining(","))
props.setProperty("watchedDirectories", encoded)
props.setProperty("trustSubscriptions", encoded)
}
props.store(out, "")
}
boolean isLeaf() {
isLeaf
}
private static Set<String> readEncodedSet(Properties props, String property) {
Set<String> rv = new ConcurrentHashSet<>()
if (props.containsKey(property)) {
String[] encoded = props.getProperty(property).split(",")
encoded.each { rv << DataUtil.readi18nString(Base64.decode(it)) }
}
rv
}
boolean allowUntrusted() {
allowUntrusted
}
private static void writeEncodedSet(Set<String> set, String property, Properties props) {
if (set.isEmpty())
return
String encoded = set.stream().
map({Base64.encode(DataUtil.encodei18nString(it))}).
collect(Collectors.joining(","))
props.setProperty(property, encoded)
}
void setAllowUntrusted(boolean allowUntrusted) {
this.allowUntrusted = allowUntrusted
}
boolean isLeaf() {
isLeaf
}
CrawlerResponse getCrawlerResponse() {
crawlerResponse
}
boolean allowUntrusted() {
allowUntrusted
}
void setCrawlerResponse(CrawlerResponse crawlerResponse) {
this.crawlerResponse = crawlerResponse
}
void setAllowUntrusted(boolean allowUntrusted) {
this.allowUntrusted = allowUntrusted
}
CrawlerResponse getCrawlerResponse() {
crawlerResponse
}
void setCrawlerResponse(CrawlerResponse crawlerResponse) {
this.crawlerResponse = crawlerResponse
}
String getNickname() {
nickname

View File

@@ -2,12 +2,12 @@ package com.muwire.core
abstract class Service {
volatile boolean loaded
volatile boolean loaded
abstract void load()
abstract void load()
void waitForLoad() {
while (!loaded)
Thread.sleep(10)
}
void waitForLoad() {
while (!loaded)
Thread.sleep(10)
}
}

View File

@@ -0,0 +1,7 @@
package com.muwire.core
class SplitPattern {
public static final String SPLIT_PATTERN = "[\\*\\+\\-,\\.:;\\(\\)=_/\\\\\\!\\\"\\\'\\\$%\\|\\[\\]\\{\\}\\?]";
}

View File

@@ -22,103 +22,107 @@ import net.i2p.data.Destination
@Log
abstract class Connection implements Closeable {
final EventBus eventBus
final Endpoint endpoint
final boolean incoming
final HostCache hostCache
private static final int SEARCHES = 10
private static final long INTERVAL = 1000
final EventBus eventBus
final Endpoint endpoint
final boolean incoming
final HostCache hostCache
final TrustService trustService
final MuWireSettings settings
private final AtomicBoolean running = new AtomicBoolean()
private final BlockingQueue messages = new LinkedBlockingQueue()
private final Thread reader, writer
private final AtomicBoolean running = new AtomicBoolean()
private final BlockingQueue messages = new LinkedBlockingQueue()
private final Thread reader, writer
private final LinkedList<Long> searchTimestamps = new LinkedList<>()
protected final String name
protected final String name
long lastPingSentTime, lastPongReceivedTime
long lastPingSentTime, lastPongReceivedTime
Connection(EventBus eventBus, Endpoint endpoint, boolean incoming,
Connection(EventBus eventBus, Endpoint endpoint, boolean incoming,
HostCache hostCache, TrustService trustService, MuWireSettings settings) {
this.eventBus = eventBus
this.incoming = incoming
this.endpoint = endpoint
this.hostCache = hostCache
this.eventBus = eventBus
this.incoming = incoming
this.endpoint = endpoint
this.hostCache = hostCache
this.trustService = trustService
this.settings = settings
this.name = endpoint.destination.toBase32().substring(0,8)
this.name = endpoint.destination.toBase32().substring(0,8)
this.reader = new Thread({readLoop()} as Runnable)
this.reader.setName("reader-$name")
this.reader.setDaemon(true)
this.reader = new Thread({readLoop()} as Runnable)
this.reader.setName("reader-$name")
this.reader.setDaemon(true)
this.writer = new Thread({writeLoop()} as Runnable)
this.writer.setName("writer-$name")
this.writer.setDaemon(true)
}
this.writer = new Thread({writeLoop()} as Runnable)
this.writer.setName("writer-$name")
this.writer.setDaemon(true)
}
/**
* starts the connection threads
*/
void start() {
if (!running.compareAndSet(false, true)) {
log.log(Level.WARNING,"$name already running", new Exception())
return
}
reader.start()
writer.start()
}
/**
* starts the connection threads
*/
void start() {
if (!running.compareAndSet(false, true)) {
log.log(Level.WARNING,"$name already running", new Exception())
return
}
reader.start()
writer.start()
}
@Override
public void close() {
if (!running.compareAndSet(true, false)) {
log.log(Level.WARNING, "$name already closed", new Exception() )
return
}
@Override
public void close() {
if (!running.compareAndSet(true, false)) {
log.log(Level.WARNING, "$name already closed", new Exception() )
return
}
log.info("closing $name")
reader.interrupt()
writer.interrupt()
endpoint.close()
eventBus.publish(new DisconnectionEvent(destination: endpoint.destination))
}
reader.interrupt()
writer.interrupt()
endpoint.close()
eventBus.publish(new DisconnectionEvent(destination: endpoint.destination))
}
protected void readLoop() {
try {
while(running.get()) {
read()
}
} catch (SocketTimeoutException e) {
protected void readLoop() {
try {
while(running.get()) {
read()
}
} catch (SocketTimeoutException e) {
} catch (Exception e) {
log.log(Level.WARNING,"unhandled exception in reader",e)
} finally {
close()
}
}
}
protected abstract void read()
protected abstract void read()
protected void writeLoop() {
try {
while(running.get()) {
def message = messages.take()
write(message)
}
} catch (Exception e) {
protected void writeLoop() {
try {
while(running.get()) {
def message = messages.take()
write(message)
}
} catch (Exception e) {
log.log(Level.WARNING, "unhandled exception in writer",e)
} finally {
close()
}
}
}
protected abstract void write(def message);
protected abstract void write(def message);
void sendPing() {
def ping = [:]
ping.type = "Ping"
ping.version = 1
messages.put(ping)
lastPingSentTime = System.currentTimeMillis()
}
void sendPing() {
def ping = [:]
ping.type = "Ping"
ping.version = 1
messages.put(ping)
lastPingSentTime = System.currentTimeMillis()
}
void sendQuery(QueryEvent e) {
def query = [:]
@@ -128,6 +132,8 @@ abstract class Connection implements Closeable {
query.firstHop = e.firstHop
query.keywords = e.searchEvent.getSearchTerms()
query.oobInfohash = e.searchEvent.oobInfohash
query.searchComments = e.searchEvent.searchComments
query.compressedResults = e.searchEvent.compressedResults
if (e.searchEvent.searchHash != null)
query.infohash = Base64.encode(e.searchEvent.searchHash)
query.replyTo = e.replyTo.toBase64()
@@ -136,27 +142,45 @@ abstract class Connection implements Closeable {
messages.put(query)
}
protected void handlePing() {
log.fine("$name received ping")
def pong = [:]
pong.type = "Pong"
pong.version = 1
pong.pongs = hostCache.getGoodHosts(10).collect { d -> d.toBase64() }
messages.put(pong)
}
protected void handlePing() {
log.fine("$name received ping")
def pong = [:]
pong.type = "Pong"
pong.version = 1
pong.pongs = hostCache.getGoodHosts(10).collect { d -> d.toBase64() }
messages.put(pong)
}
protected void handlePong(def pong) {
log.fine("$name received pong")
lastPongReceivedTime = System.currentTimeMillis()
if (pong.pongs == null)
throw new Exception("Pong doesn't have pongs")
pong.pongs.each {
def dest = new Destination(it)
eventBus.publish(new HostDiscoveredEvent(destination: dest))
}
}
protected void handlePong(def pong) {
log.fine("$name received pong")
lastPongReceivedTime = System.currentTimeMillis()
if (pong.pongs == null)
throw new Exception("Pong doesn't have pongs")
pong.pongs.each {
def dest = new Destination(it)
eventBus.publish(new HostDiscoveredEvent(destination: dest))
}
}
private boolean throttleSearch() {
final long now = System.currentTimeMillis()
if (searchTimestamps.size() < SEARCHES) {
searchTimestamps.addLast(now)
return false
}
Long oldest = searchTimestamps.getFirst()
if (now - oldest.longValue() < INTERVAL)
return true
searchTimestamps.addLast(now)
searchTimestamps.removeFirst()
false
}
protected void handleSearch(def search) {
if (throttleSearch()) {
log.info("dropping excessive search")
return
}
UUID uuid = UUID.fromString(search.uuid)
byte [] infohash = null
if (search.infohash != null) {
@@ -187,11 +211,19 @@ abstract class Connection implements Closeable {
boolean oob = false
if (search.oobInfohash != null)
oob = search.oobInfohash
boolean searchComments = false
if (search.searchComments != null)
searchComments = search.searchComments
boolean compressedResults = false
if (search.compressedResults != null)
compressedResults = search.compressedResults
SearchEvent searchEvent = new SearchEvent(searchTerms : search.keywords,
searchHash : infohash,
uuid : uuid,
oobInfohash : oob)
oobInfohash : oob,
searchComments : searchComments,
compressedResults : compressedResults)
QueryEvent event = new QueryEvent ( searchEvent : searchEvent,
replyTo : replyTo,
originator : originator,

View File

@@ -5,17 +5,23 @@ import java.util.concurrent.ExecutorService
import java.util.concurrent.Executors
import java.util.logging.Level
import java.util.zip.DeflaterOutputStream
import java.util.zip.GZIPInputStream
import java.util.zip.GZIPOutputStream
import java.util.zip.InflaterInputStream
import com.muwire.core.Constants
import com.muwire.core.EventBus
import com.muwire.core.MuWireSettings
import com.muwire.core.Persona
import com.muwire.core.files.FileManager
import com.muwire.core.hostcache.HostCache
import com.muwire.core.trust.TrustLevel
import com.muwire.core.trust.TrustService
import com.muwire.core.upload.UploadManager
import com.muwire.core.util.DataUtil
import com.muwire.core.search.InvalidSearchResultException
import com.muwire.core.search.ResultsParser
import com.muwire.core.search.ResultsSender
import com.muwire.core.search.SearchManager
import com.muwire.core.search.UIResultBatchEvent
import com.muwire.core.search.UIResultEvent
@@ -24,129 +30,141 @@ import com.muwire.core.search.UnexpectedResultsException
import groovy.json.JsonOutput
import groovy.json.JsonSlurper
import groovy.util.logging.Log
import net.i2p.data.Base64
@Log
class ConnectionAcceptor {
final EventBus eventBus
final UltrapeerConnectionManager manager
final MuWireSettings settings
final I2PAcceptor acceptor
final HostCache hostCache
final TrustService trustService
final EventBus eventBus
final UltrapeerConnectionManager manager
final MuWireSettings settings
final I2PAcceptor acceptor
final HostCache hostCache
final TrustService trustService
final SearchManager searchManager
final UploadManager uploadManager
final FileManager fileManager
final ConnectionEstablisher establisher
final ExecutorService acceptorThread
final ExecutorService handshakerThreads
final ExecutorService acceptorThread
final ExecutorService handshakerThreads
private volatile shutdown
ConnectionAcceptor(EventBus eventBus, UltrapeerConnectionManager manager,
MuWireSettings settings, I2PAcceptor acceptor, HostCache hostCache,
TrustService trustService, SearchManager searchManager, UploadManager uploadManager,
ConnectionEstablisher establisher) {
this.eventBus = eventBus
this.manager = manager
this.settings = settings
this.acceptor = acceptor
this.hostCache = hostCache
this.trustService = trustService
ConnectionAcceptor(EventBus eventBus, UltrapeerConnectionManager manager,
MuWireSettings settings, I2PAcceptor acceptor, HostCache hostCache,
TrustService trustService, SearchManager searchManager, UploadManager uploadManager,
FileManager fileManager, ConnectionEstablisher establisher) {
this.eventBus = eventBus
this.manager = manager
this.settings = settings
this.acceptor = acceptor
this.hostCache = hostCache
this.trustService = trustService
this.searchManager = searchManager
this.fileManager = fileManager
this.uploadManager = uploadManager
this.establisher = establisher
this.establisher = establisher
acceptorThread = Executors.newSingleThreadExecutor { r ->
def rv = new Thread(r)
rv.setDaemon(true)
rv.setName("acceptor")
rv
}
acceptorThread = Executors.newSingleThreadExecutor { r ->
def rv = new Thread(r)
rv.setDaemon(true)
rv.setName("acceptor")
rv
}
handshakerThreads = Executors.newCachedThreadPool { r ->
def rv = new Thread(r)
rv.setDaemon(true)
rv.setName("acceptor-processor-${System.currentTimeMillis()}")
rv
}
}
handshakerThreads = Executors.newCachedThreadPool { r ->
def rv = new Thread(r)
rv.setDaemon(true)
rv.setName("acceptor-processor-${System.currentTimeMillis()}")
rv
}
}
void start() {
acceptorThread.execute({acceptLoop()} as Runnable)
}
void start() {
acceptorThread.execute({acceptLoop()} as Runnable)
}
void stop() {
void stop() {
shutdown = true
acceptorThread.shutdownNow()
handshakerThreads.shutdownNow()
}
acceptorThread.shutdownNow()
handshakerThreads.shutdownNow()
}
private void acceptLoop() {
private void acceptLoop() {
try {
while(true) {
def incoming = acceptor.accept()
log.info("accepted connection from ${incoming.destination.toBase32()}")
switch(trustService.getLevel(incoming.destination)) {
case TrustLevel.TRUSTED : break
case TrustLevel.NEUTRAL :
if (settings.allowUntrusted())
break
case TrustLevel.DISTRUSTED :
log.info("Disallowing distrusted connection")
incoming.close()
continue
}
handshakerThreads.execute({processIncoming(incoming)} as Runnable)
}
while(true) {
def incoming = acceptor.accept()
log.info("accepted connection from ${incoming.destination.toBase32()}")
switch(trustService.getLevel(incoming.destination)) {
case TrustLevel.TRUSTED : break
case TrustLevel.NEUTRAL :
if (settings.allowUntrusted())
break
case TrustLevel.DISTRUSTED :
log.info("Disallowing distrusted connection")
incoming.close()
continue
}
handshakerThreads.execute({processIncoming(incoming)} as Runnable)
}
} catch (Exception e) {
log.log(Level.WARNING, "exception in accept loop",e)
if (!shutdown)
throw e
}
}
}
private void processIncoming(Endpoint e) {
InputStream is = e.inputStream
try {
int read = is.read()
switch(read) {
case (byte)'M':
private void processIncoming(Endpoint e) {
InputStream is = e.inputStream
try {
int read = is.read()
switch(read) {
case (byte)'M':
if (settings.isLeaf())
throw new IOException("Incoming connection as leaf")
processMuWire(e)
break
case (byte)'G':
processGET(e)
break
processMuWire(e)
break
case (byte)'G':
processGET(e)
break
case (byte)'H':
processHashList(e)
break
case (byte)'P':
processPOST(e)
break
default:
throw new Exception("Invalid read $read")
}
} catch (Exception ex) {
log.log(Level.WARNING, "incoming connection failed",ex)
e.close()
eventBus.publish new ConnectionEvent(endpoint: e, incoming: true, leaf: null, status: ConnectionAttemptStatus.FAILED)
}
}
case (byte)'R':
processRESULTS(e)
break
case (byte)'T':
processTRUST(e)
break
case (byte)'B':
processBROWSE(e)
break
default:
throw new Exception("Invalid read $read")
}
} catch (Exception ex) {
log.log(Level.WARNING, "incoming connection failed",ex)
e.close()
eventBus.publish new ConnectionEvent(endpoint: e, incoming: true, leaf: null, status: ConnectionAttemptStatus.FAILED)
}
}
private void processMuWire(Endpoint e) {
byte[] uWire = "uWire ".bytes
for (int i = 0; i < uWire.length; i++) {
int read = e.inputStream.read()
if (read != uWire[i]) {
throw new IOException("unexpected value $read at position $i")
}
}
private void processMuWire(Endpoint e) {
byte[] uWire = "uWire ".bytes
for (int i = 0; i < uWire.length; i++) {
int read = e.inputStream.read()
if (read != uWire[i]) {
throw new IOException("unexpected value $read at position $i")
}
}
byte[] type = new byte[4]
DataInputStream dis = new DataInputStream(e.inputStream)
dis.readFully(type)
byte[] type = new byte[4]
DataInputStream dis = new DataInputStream(e.inputStream)
dis.readFully(type)
if (type == "leaf".bytes)
handleIncoming(e, true)
@@ -156,44 +174,44 @@ class ConnectionAcceptor {
throw new IOException("unknown connection type $type")
}
private void handleIncoming(Endpoint e, boolean leaf) {
boolean accept = !manager.isConnected(e.destination) &&
private void handleIncoming(Endpoint e, boolean leaf) {
boolean accept = !manager.isConnected(e.destination) &&
!establisher.isInProgress(e.destination) &&
(leaf ? manager.hasLeafSlots() : manager.hasPeerSlots())
if (accept) {
log.info("accepting connection, leaf:$leaf")
e.outputStream.write("OK".bytes)
e.outputStream.flush()
def wrapped = new Endpoint(e.destination, new InflaterInputStream(e.inputStream), new DeflaterOutputStream(e.outputStream, true), e.toClose)
eventBus.publish(new ConnectionEvent(endpoint: wrapped, incoming: true, leaf: leaf, status: ConnectionAttemptStatus.SUCCESSFUL))
} else {
log.info("rejecting connection, leaf:$leaf")
e.outputStream.write("REJECT".bytes)
def hosts = hostCache.getGoodHosts(10)
if (!hosts.isEmpty()) {
def json = [:]
json.tryHosts = hosts.collect { d -> d.toBase64() }
json = JsonOutput.toJson(json)
def os = new DataOutputStream(e.outputStream)
os.writeShort(json.bytes.length)
os.write(json.bytes)
}
e.outputStream.flush()
e.close()
eventBus.publish(new ConnectionEvent(endpoint: e, incoming: true, leaf: leaf, status: ConnectionAttemptStatus.REJECTED))
}
}
if (accept) {
log.info("accepting connection, leaf:$leaf")
e.outputStream.write("OK".bytes)
e.outputStream.flush()
def wrapped = new Endpoint(e.destination, new InflaterInputStream(e.inputStream), new DeflaterOutputStream(e.outputStream, true), e.toClose)
eventBus.publish(new ConnectionEvent(endpoint: wrapped, incoming: true, leaf: leaf, status: ConnectionAttemptStatus.SUCCESSFUL))
} else {
log.info("rejecting connection, leaf:$leaf")
e.outputStream.write("REJECT".bytes)
def hosts = hostCache.getGoodHosts(10)
if (!hosts.isEmpty()) {
def json = [:]
json.tryHosts = hosts.collect { d -> d.toBase64() }
json = JsonOutput.toJson(json)
def os = new DataOutputStream(e.outputStream)
os.writeShort(json.bytes.length)
os.write(json.bytes)
}
e.outputStream.flush()
e.close()
eventBus.publish(new ConnectionEvent(endpoint: e, incoming: true, leaf: leaf, status: ConnectionAttemptStatus.REJECTED))
}
}
private void processGET(Endpoint e) {
private void processGET(Endpoint e) {
byte[] et = new byte[3]
final DataInputStream dis = new DataInputStream(e.getInputStream())
dis.readFully(et)
if (et != "ET ".getBytes(StandardCharsets.US_ASCII))
throw new IOException("Invalid GET connection")
uploadManager.processGET(e)
}
}
private void processHashList(Endpoint e) {
byte[] ashList = new byte[8]
@@ -225,7 +243,7 @@ class ConnectionAcceptor {
Persona sender = new Persona(dis)
if (sender.destination != e.getDestination())
throw new IOException("Sender destination mismatch expected $e.getDestination(), got $sender.destination")
throw new IOException("Sender destination mismatch expected ${e.getDestination()}, got $sender.destination")
int nResults = dis.readUnsignedShort()
UIResultEvent[] results = new UIResultEvent[nResults]
for (int i = 0; i < nResults; i++) {
@@ -243,4 +261,146 @@ class ConnectionAcceptor {
}
}
private void processRESULTS(Endpoint e) {
InputStream is = e.getInputStream()
DataInputStream dis = new DataInputStream(is)
byte[] esults = new byte[7]
dis.readFully(esults)
if (esults != "ESULTS ".getBytes(StandardCharsets.US_ASCII))
throw new IOException("Invalid RESULTS connection")
JsonSlurper slurper = new JsonSlurper()
try {
String uuid = DataUtil.readTillRN(dis)
UUID resultsUUID = UUID.fromString(uuid)
if (!searchManager.hasLocalSearch(resultsUUID))
throw new UnexpectedResultsException(resultsUUID.toString())
// parse all headers
Map<String,String> headers = new HashMap<>()
String header
while((header = DataUtil.readTillRN(is)) != "" && headers.size() < Constants.MAX_HEADERS) {
int colon = header.indexOf(':')
if (colon == -1 || colon == header.length() - 1)
throw new IOException("invalid header $header")
String key = header.substring(0, colon)
String value = header.substring(colon + 1)
headers[key] = value.trim()
}
if (!headers.containsKey("Sender"))
throw new IOException("No Sender header")
if (!headers.containsKey("Count"))
throw new IOException("No Count header")
byte [] personaBytes = Base64.decode(headers['Sender'])
Persona sender = new Persona(new ByteArrayInputStream(personaBytes))
if (sender.destination != e.getDestination())
throw new IOException("Sender destination mismatch expected ${e.getDestination()}, got $sender.destination")
int nResults = Integer.parseInt(headers['Count'])
if (nResults > Constants.MAX_RESULTS)
throw new IOException("too many results $nResults")
dis = new DataInputStream(new GZIPInputStream(dis))
UIResultEvent[] results = new UIResultEvent[nResults]
for (int i = 0; i < nResults; i++) {
int jsonSize = dis.readUnsignedShort()
byte [] payload = new byte[jsonSize]
dis.readFully(payload)
def json = slurper.parse(payload)
results[i] = ResultsParser.parse(sender, resultsUUID, json)
}
eventBus.publish(new UIResultBatchEvent(uuid: resultsUUID, results: results))
} catch (IOException bad) {
log.log(Level.WARNING, "failed to process RESULTS", bad)
} finally {
e.close()
}
}
private void processBROWSE(Endpoint e) {
try {
byte [] rowse = new byte[7]
DataInputStream dis = new DataInputStream(e.getInputStream())
dis.readFully(rowse)
if (rowse != "ROWSE\r\n".getBytes(StandardCharsets.US_ASCII))
throw new IOException("Invalid BROWSE connection")
String header
while ((header = DataUtil.readTillRN(dis)) != ""); // ignore headers for now
OutputStream os = e.getOutputStream()
if (!settings.browseFiles) {
os.write("403 Not Allowed\r\n\r\n".getBytes(StandardCharsets.US_ASCII))
os.flush()
e.close()
return
}
os.write("200 OK\r\n".getBytes(StandardCharsets.US_ASCII))
def sharedFiles = fileManager.getSharedFiles().values()
os.write("Count: ${sharedFiles.size()}\r\n\r\n".getBytes(StandardCharsets.US_ASCII))
DataOutputStream dos = new DataOutputStream(new GZIPOutputStream(os))
JsonOutput jsonOutput = new JsonOutput()
sharedFiles.each {
def obj = ResultsSender.sharedFileToObj(it, false)
def json = jsonOutput.toJson(obj)
dos.writeShort((short)json.length())
dos.write(json.getBytes(StandardCharsets.US_ASCII))
}
dos.flush()
dos.close()
} finally {
e.close()
}
}
private void processTRUST(Endpoint e) {
try {
byte[] RUST = new byte[6]
DataInputStream dis = new DataInputStream(e.getInputStream())
dis.readFully(RUST)
if (RUST != "RUST\r\n".getBytes(StandardCharsets.US_ASCII))
throw new IOException("Invalid TRUST connection")
String header
while ((header = DataUtil.readTillRN(dis)) != ""); // ignore headers for now
OutputStream os = e.getOutputStream()
if (!settings.allowTrustLists) {
os.write("403 Not Allowed\r\n\r\n".getBytes(StandardCharsets.US_ASCII))
os.flush()
e.close()
return
}
os.write("200 OK\r\n\r\n".getBytes(StandardCharsets.US_ASCII))
List<Persona> good = new ArrayList<>(trustService.good.values())
int size = Math.min(Short.MAX_VALUE * 2, good.size())
good = good.subList(0, size)
DataOutputStream dos = new DataOutputStream(os)
dos.writeShort(size)
good.each {
it.write(dos)
}
List<Persona> bad = new ArrayList<>(trustService.bad.values())
size = Math.min(Short.MAX_VALUE * 2, bad.size())
bad = bad.subList(0, size)
dos.writeShort(size)
bad.each {
it.write(dos)
}
dos.flush()
} finally {
e.close()
}
}
}

View File

@@ -22,162 +22,167 @@ import net.i2p.util.ConcurrentHashSet
@Log
class ConnectionEstablisher {
private static final int CONCURRENT = 4
private static final int CONCURRENT = 4
final EventBus eventBus
final I2PConnector i2pConnector
final MuWireSettings settings
final ConnectionManager connectionManager
final HostCache hostCache
final EventBus eventBus
final I2PConnector i2pConnector
final MuWireSettings settings
final ConnectionManager connectionManager
final HostCache hostCache
final Timer timer
final ExecutorService executor
final Timer timer
final ExecutorService executor, closer
final Set inProgress = new ConcurrentHashSet()
final Set inProgress = new ConcurrentHashSet()
ConnectionEstablisher(){}
ConnectionEstablisher(EventBus eventBus, I2PConnector i2pConnector, MuWireSettings settings,
ConnectionManager connectionManager, HostCache hostCache) {
this.eventBus = eventBus
this.i2pConnector = i2pConnector
this.settings = settings
this.connectionManager = connectionManager
this.hostCache = hostCache
timer = new Timer("connection-timer",true)
executor = Executors.newFixedThreadPool(CONCURRENT, { r ->
def rv = new Thread(r)
rv.setDaemon(true)
rv.setName("connector-${System.currentTimeMillis()}")
rv
} as ThreadFactory)
}
ConnectionEstablisher(EventBus eventBus, I2PConnector i2pConnector, MuWireSettings settings,
ConnectionManager connectionManager, HostCache hostCache) {
this.eventBus = eventBus
this.i2pConnector = i2pConnector
this.settings = settings
this.connectionManager = connectionManager
this.hostCache = hostCache
timer = new Timer("connection-timer",true)
executor = Executors.newFixedThreadPool(CONCURRENT, { r ->
def rv = new Thread(r)
rv.setDaemon(true)
rv.setName("connector-${System.currentTimeMillis()}")
rv
} as ThreadFactory)
void start() {
timer.schedule({connectIfNeeded()} as TimerTask, 100, 1000)
}
closer = Executors.newSingleThreadExecutor()
}
void stop() {
timer.cancel()
executor.shutdownNow()
}
void start() {
timer.schedule({connectIfNeeded()} as TimerTask, 100, 1000)
}
private void connectIfNeeded() {
if (!connectionManager.needsConnections())
return
if (inProgress.size() >= CONCURRENT)
return
void stop() {
timer.cancel()
executor.shutdownNow()
closer.shutdown()
}
def toTry = null
for (int i = 0; i < 5; i++) {
toTry = hostCache.getHosts(1)
if (toTry.isEmpty())
return
toTry = toTry[0]
if (!connectionManager.isConnected(toTry) &&
!inProgress.contains(toTry)) {
break
}
}
if (toTry == null)
return
if (!connectionManager.isConnected(toTry) && inProgress.add(toTry))
executor.execute({connect(toTry)} as Runnable)
}
private void connectIfNeeded() {
if (!connectionManager.needsConnections())
return
if (inProgress.size() >= CONCURRENT)
return
private void connect(Destination toTry) {
log.info("starting connect to ${toTry.toBase32()}")
try {
def endpoint = i2pConnector.connect(toTry)
log.info("successful transport connect to ${toTry.toBase32()}")
def toTry = null
for (int i = 0; i < 5; i++) {
toTry = hostCache.getHosts(1)
if (toTry.isEmpty())
return
toTry = toTry[0]
if (!connectionManager.isConnected(toTry) &&
!inProgress.contains(toTry)) {
break
}
}
if (toTry == null)
return
if (!connectionManager.isConnected(toTry) && inProgress.add(toTry))
executor.execute({connect(toTry)} as Runnable)
}
// outgoing handshake
endpoint.outputStream.write("MuWire ".bytes)
def type = settings.isLeaf() ? "leaf" : "peer"
endpoint.outputStream.write(type.bytes)
endpoint.outputStream.flush()
private void connect(Destination toTry) {
log.info("starting connect to ${toTry.toBase32()}")
try {
def endpoint = i2pConnector.connect(toTry)
log.info("successful transport connect to ${toTry.toBase32()}")
InputStream is = endpoint.inputStream
int read = is.read()
if (read == -1) {
fail endpoint
return
}
switch(read) {
case (byte)'O': readK(endpoint); break
case (byte)'R': readEJECT(endpoint); break
default :
log.warning("unknown response $read")
fail endpoint
}
} catch (Exception e) {
log.log(Level.WARNING, "Couldn't connect to ${toTry.toBase32()}", e)
def endpoint = new Endpoint(toTry, null, null, null)
fail(endpoint)
} finally {
inProgress.remove(toTry)
}
}
// outgoing handshake
endpoint.outputStream.write("MuWire ".bytes)
def type = settings.isLeaf() ? "leaf" : "peer"
endpoint.outputStream.write(type.bytes)
endpoint.outputStream.flush()
private void fail(Endpoint endpoint) {
endpoint.close()
eventBus.publish(new ConnectionEvent(endpoint: endpoint, incoming: false, leaf: false, status: ConnectionAttemptStatus.FAILED))
}
InputStream is = endpoint.inputStream
int read = is.read()
if (read == -1) {
fail endpoint
return
}
switch(read) {
case (byte)'O': readK(endpoint); break
case (byte)'R': readEJECT(endpoint); break
default :
log.warning("unknown response $read")
fail endpoint
}
} catch (Exception e) {
log.log(Level.WARNING, "Couldn't connect to ${toTry.toBase32()}", e)
def endpoint = new Endpoint(toTry, null, null, null)
fail(endpoint)
} finally {
inProgress.remove(toTry)
}
}
private void readK(Endpoint e) {
int read = e.inputStream.read()
if (read != 'K') {
log.warning("unknown response after O: $read")
fail e
return
}
private void fail(Endpoint endpoint) {
closer.execute {
endpoint.close()
eventBus.publish(new ConnectionEvent(endpoint: endpoint, incoming: false, leaf: false, status: ConnectionAttemptStatus.FAILED))
} as Runnable
}
log.info("connection to ${e.destination.toBase32()} established")
private void readK(Endpoint e) {
int read = e.inputStream.read()
if (read != 'K') {
log.warning("unknown response after O: $read")
fail e
return
}
// wrap into deflater / inflater streams and publish
def wrapped = new Endpoint(e.destination, new InflaterInputStream(e.inputStream), new DeflaterOutputStream(e.outputStream, true), e.toClose)
eventBus.publish(new ConnectionEvent(endpoint: wrapped, incoming: false, leaf: false, status: ConnectionAttemptStatus.SUCCESSFUL))
}
log.info("connection to ${e.destination.toBase32()} established")
private void readEJECT(Endpoint e) {
byte[] eject = "EJECT".bytes
for (int i = 0; i < eject.length; i++) {
int read = e.inputStream.read()
if (read != eject[i]) {
log.warning("Unknown response after R at position $i")
fail e
return
}
}
log.info("connection to ${e.destination.toBase32()} rejected")
// wrap into deflater / inflater streams and publish
def wrapped = new Endpoint(e.destination, new InflaterInputStream(e.inputStream), new DeflaterOutputStream(e.outputStream, true), e.toClose)
eventBus.publish(new ConnectionEvent(endpoint: wrapped, incoming: false, leaf: false, status: ConnectionAttemptStatus.SUCCESSFUL))
}
private void readEJECT(Endpoint e) {
byte[] eject = "EJECT".bytes
for (int i = 0; i < eject.length; i++) {
int read = e.inputStream.read()
if (read != eject[i]) {
log.warning("Unknown response after R at position $i")
fail e
return
}
}
log.info("connection to ${e.destination.toBase32()} rejected")
eventBus.publish(new ConnectionEvent(endpoint: e, incoming: false, leaf: false, status: ConnectionAttemptStatus.REJECTED))
try {
DataInputStream dais = new DataInputStream(e.inputStream)
int payloadSize = dais.readUnsignedShort()
byte[] payload = new byte[payloadSize]
dais.readFully(payload)
eventBus.publish(new ConnectionEvent(endpoint: e, incoming: false, leaf: false, status: ConnectionAttemptStatus.REJECTED))
try {
DataInputStream dais = new DataInputStream(e.inputStream)
int payloadSize = dais.readUnsignedShort()
byte[] payload = new byte[payloadSize]
dais.readFully(payload)
def json = new JsonSlurper()
json = json.parse(payload)
def json = new JsonSlurper()
json = json.parse(payload)
if (json.tryHosts == null) {
log.warning("post-rejection json didn't contain hosts to try")
return
}
if (json.tryHosts == null) {
log.warning("post-rejection json didn't contain hosts to try")
return
}
json.tryHosts.asList().each {
Destination suggested = new Destination(it)
eventBus.publish(new HostDiscoveredEvent(destination: suggested))
}
} catch (Exception ignore) {
log.log(Level.WARNING,"Problem parsing post-rejection payload",ignore)
} finally {
// the end
e.close()
}
}
json.tryHosts.asList().each {
Destination suggested = new Destination(it)
eventBus.publish(new HostDiscoveredEvent(destination: suggested))
}
} catch (Exception ignore) {
log.log(Level.WARNING,"Problem parsing post-rejection payload",ignore)
} finally {
// the end
closer.execute({e.close()} as Runnable)
}
}
public boolean isInProgress(Destination d) {
inProgress.contains(d)

View File

@@ -6,14 +6,14 @@ import net.i2p.data.Destination
class ConnectionEvent extends Event {
Endpoint endpoint
boolean incoming
Boolean leaf // can be null if uknown
ConnectionAttemptStatus status
Endpoint endpoint
boolean incoming
Boolean leaf // can be null if uknown
ConnectionAttemptStatus status
@Override
public String toString() {
"ConnectionEvent ${super.toString()} endpoint: $endpoint incoming: $incoming leaf : $leaf status : $status"
}
@Override
public String toString() {
"ConnectionEvent ${super.toString()} endpoint: $endpoint incoming: $incoming leaf : $leaf status : $status"
}
}

View File

@@ -12,63 +12,63 @@ import net.i2p.data.Destination
abstract class ConnectionManager {
private static final int PING_TIME = 20000
private static final int PING_TIME = 20000
final EventBus eventBus
final EventBus eventBus
private final Timer timer
private final Timer timer
protected final HostCache hostCache
protected final HostCache hostCache
protected final Persona me
protected final MuWireSettings settings
ConnectionManager() {}
ConnectionManager() {}
ConnectionManager(EventBus eventBus, Persona me, HostCache hostCache, MuWireSettings settings) {
this.eventBus = eventBus
ConnectionManager(EventBus eventBus, Persona me, HostCache hostCache, MuWireSettings settings) {
this.eventBus = eventBus
this.me = me
this.hostCache = hostCache
this.hostCache = hostCache
this.settings = settings
this.timer = new Timer("connections-pinger",true)
}
this.timer = new Timer("connections-pinger",true)
}
void start() {
timer.schedule({sendPings()} as TimerTask, 1000,1000)
}
void start() {
timer.schedule({sendPings()} as TimerTask, 1000,1000)
}
void stop() {
timer.cancel()
getConnections().each { it.close() }
}
void stop() {
timer.cancel()
getConnections().each { it.close() }
}
void onTrustEvent(TrustEvent e) {
if (e.level == TrustLevel.DISTRUSTED)
drop(e.persona.destination)
}
void onTrustEvent(TrustEvent e) {
if (e.level == TrustLevel.DISTRUSTED)
drop(e.persona.destination)
}
abstract void drop(Destination d)
abstract void drop(Destination d)
abstract Collection<Connection> getConnections()
abstract Collection<Connection> getConnections()
protected abstract int getDesiredConnections()
protected abstract int getDesiredConnections()
boolean needsConnections() {
return getConnections().size() < getDesiredConnections()
}
boolean needsConnections() {
return getConnections().size() < getDesiredConnections()
}
abstract boolean isConnected(Destination d)
abstract boolean isConnected(Destination d)
abstract void onConnectionEvent(ConnectionEvent e)
abstract void onConnectionEvent(ConnectionEvent e)
abstract void onDisconnectionEvent(DisconnectionEvent e)
abstract void onDisconnectionEvent(DisconnectionEvent e)
abstract void shutdown()
protected void sendPings() {
final long now = System.currentTimeMillis()
getConnections().each {
if (now - it.lastPingSentTime > PING_TIME)
it.sendPing()
}
}
protected void sendPings() {
final long now = System.currentTimeMillis()
getConnections().each {
if (now - it.lastPingSentTime > PING_TIME)
it.sendPing()
}
}
}

View File

@@ -6,10 +6,10 @@ import net.i2p.data.Destination
class DisconnectionEvent extends Event {
Destination destination
Destination destination
@Override
public String toString() {
"DisconnectionEvent ${super.toString()} destination:${destination.toBase32()}"
}
@Override
public String toString() {
"DisconnectionEvent ${super.toString()} destination:${destination.toBase32()}"
}
}

View File

@@ -8,39 +8,39 @@ import net.i2p.data.Destination
@Log
class Endpoint implements Closeable {
final Destination destination
final InputStream inputStream
final OutputStream outputStream
final Destination destination
final InputStream inputStream
final OutputStream outputStream
final def toClose
private final AtomicBoolean closed = new AtomicBoolean()
private final AtomicBoolean closed = new AtomicBoolean()
Endpoint(Destination destination, InputStream inputStream, OutputStream outputStream, def toClose) {
this.destination = destination
this.inputStream = inputStream
this.outputStream = outputStream
Endpoint(Destination destination, InputStream inputStream, OutputStream outputStream, def toClose) {
this.destination = destination
this.inputStream = inputStream
this.outputStream = outputStream
this.toClose = toClose
}
}
@Override
public void close() {
if (!closed.compareAndSet(false, true)) {
log.log(Level.WARNING,"Close loop detected for ${destination.toBase32()}", new Exception())
return
}
if (inputStream != null) {
try {inputStream.close()} catch (Exception ignore) {}
}
if (outputStream != null) {
try {outputStream.close()} catch (Exception ignore) {}
}
@Override
public void close() {
if (!closed.compareAndSet(false, true)) {
log.log(Level.WARNING,"Close loop detected for ${destination.toBase32()}", new Exception())
return
}
if (inputStream != null) {
try {inputStream.close()} catch (Exception ignore) {}
}
if (outputStream != null) {
try {outputStream.close()} catch (Exception ignore) {}
}
if (toClose != null) {
try {toClose.reset()} catch (Exception ignore) {}
}
}
}
@Override
public String toString() {
"destination: ${destination.toBase32()}"
}
@Override
public String toString() {
"destination: ${destination.toBase32()}"
}
}

View File

@@ -5,18 +5,18 @@ import net.i2p.client.streaming.I2PSocketManager
class I2PAcceptor {
final I2PSocketManager socketManager
final I2PServerSocket serverSocket
final I2PSocketManager socketManager
final I2PServerSocket serverSocket
I2PAcceptor() {}
I2PAcceptor() {}
I2PAcceptor(I2PSocketManager socketManager) {
this.socketManager = socketManager
this.serverSocket = socketManager.getServerSocket()
}
I2PAcceptor(I2PSocketManager socketManager) {
this.socketManager = socketManager
this.serverSocket = socketManager.getServerSocket()
}
Endpoint accept() {
def socket = serverSocket.accept()
new Endpoint(socket.getPeerDestination(), socket.getInputStream(), socket.getOutputStream(), socket)
}
Endpoint accept() {
def socket = serverSocket.accept()
new Endpoint(socket.getPeerDestination(), socket.getInputStream(), socket.getOutputStream(), socket)
}
}

View File

@@ -5,17 +5,17 @@ import net.i2p.data.Destination
class I2PConnector {
final I2PSocketManager socketManager
final I2PSocketManager socketManager
I2PConnector() {}
I2PConnector() {}
I2PConnector(I2PSocketManager socketManager) {
this.socketManager = socketManager
}
I2PConnector(I2PSocketManager socketManager) {
this.socketManager = socketManager
}
Endpoint connect(Destination dest) {
def socket = socketManager.connect(dest)
new Endpoint(dest, socket.getInputStream(), socket.getOutputStream(), socket)
}
Endpoint connect(Destination dest) {
def socket = socketManager.connect(dest)
new Endpoint(dest, socket.getInputStream(), socket.getOutputStream(), socket)
}
}

View File

@@ -17,21 +17,21 @@ import net.i2p.data.Destination
*/
class LeafConnection extends Connection {
public LeafConnection(EventBus eventBus, Endpoint endpoint, HostCache hostCache,
public LeafConnection(EventBus eventBus, Endpoint endpoint, HostCache hostCache,
TrustService trustService, MuWireSettings settings) {
super(eventBus, endpoint, true, hostCache, trustService, settings);
}
super(eventBus, endpoint, true, hostCache, trustService, settings);
}
@Override
protected void read() {
// TODO Auto-generated method stub
@Override
protected void read() {
// TODO Auto-generated method stub
}
}
@Override
protected void write(Object message) {
// TODO Auto-generated method stub
@Override
protected void write(Object message) {
// TODO Auto-generated method stub
}
}
}

View File

@@ -14,21 +14,21 @@ import net.i2p.data.Destination
@Log
class LeafConnectionManager extends ConnectionManager {
final int maxConnections
final int maxConnections
final Map<Destination, UltrapeerConnection> connections = new ConcurrentHashMap()
final Map<Destination, UltrapeerConnection> connections = new ConcurrentHashMap()
public LeafConnectionManager(EventBus eventBus, Persona me, int maxConnections,
public LeafConnectionManager(EventBus eventBus, Persona me, int maxConnections,
HostCache hostCache, MuWireSettings settings) {
super(eventBus, me, hostCache, settings)
this.maxConnections = maxConnections
}
super(eventBus, me, hostCache, settings)
this.maxConnections = maxConnections
}
@Override
public void drop(Destination d) {
// TODO Auto-generated method stub
@Override
public void drop(Destination d) {
// TODO Auto-generated method stub
}
}
void onQueryEvent(QueryEvent e) {
if (me.destination == e.receivedOn) {
@@ -37,41 +37,41 @@ class LeafConnectionManager extends ConnectionManager {
}
@Override
public Collection<Connection> getConnections() {
connections.values()
}
@Override
public Collection<Connection> getConnections() {
connections.values()
}
@Override
protected int getDesiredConnections() {
return maxConnections;
}
@Override
protected int getDesiredConnections() {
return maxConnections;
}
@Override
public boolean isConnected(Destination d) {
connections.containsKey(d)
}
@Override
public boolean isConnected(Destination d) {
connections.containsKey(d)
}
@Override
public void onConnectionEvent(ConnectionEvent e) {
if (e.incoming || e.leaf) {
log.severe("Got inconsistent event as a leaf! $e")
return
}
if (e.status != ConnectionAttemptStatus.SUCCESSFUL)
return
@Override
public void onConnectionEvent(ConnectionEvent e) {
if (e.incoming || e.leaf) {
log.severe("Got inconsistent event as a leaf! $e")
return
}
if (e.status != ConnectionAttemptStatus.SUCCESSFUL)
return
Connection c = new UltrapeerConnection(eventBus, e.endpoint)
connections.put(e.endpoint.destination, c)
c.start()
}
Connection c = new UltrapeerConnection(eventBus, e.endpoint)
connections.put(e.endpoint.destination, c)
c.start()
}
@Override
public void onDisconnectionEvent(DisconnectionEvent e) {
def removed = connections.remove(e.destination)
if (removed == null)
log.severe("removed destination not present in connection manager ${e.destination.toBase32()}")
}
@Override
public void onDisconnectionEvent(DisconnectionEvent e) {
def removed = connections.remove(e.destination)
if (removed == null)
log.severe("removed destination not present in connection manager ${e.destination.toBase32()}")
}
@Override
void shutdown() {

View File

@@ -21,62 +21,62 @@ import net.i2p.data.Destination
@Log
class PeerConnection extends Connection {
private final DataInputStream dis
private final DataOutputStream dos
private final DataInputStream dis
private final DataOutputStream dos
private final byte[] readHeader = new byte[3]
private final byte[] writeHeader = new byte[3]
private final byte[] readHeader = new byte[3]
private final byte[] writeHeader = new byte[3]
private final JsonSlurper slurper = new JsonSlurper()
private final JsonSlurper slurper = new JsonSlurper()
public PeerConnection(EventBus eventBus, Endpoint endpoint,
boolean incoming, HostCache hostCache, TrustService trustService,
public PeerConnection(EventBus eventBus, Endpoint endpoint,
boolean incoming, HostCache hostCache, TrustService trustService,
MuWireSettings settings) {
super(eventBus, endpoint, incoming, hostCache, trustService, settings)
this.dis = new DataInputStream(endpoint.inputStream)
this.dos = new DataOutputStream(endpoint.outputStream)
}
super(eventBus, endpoint, incoming, hostCache, trustService, settings)
this.dis = new DataInputStream(endpoint.inputStream)
this.dos = new DataOutputStream(endpoint.outputStream)
}
@Override
protected void read() {
dis.readFully(readHeader)
int length = DataUtil.readLength(readHeader)
log.fine("$name read length $length")
@Override
protected void read() {
dis.readFully(readHeader)
int length = DataUtil.readLength(readHeader)
log.fine("$name read length $length")
byte[] payload = new byte[length]
dis.readFully(payload)
byte[] payload = new byte[length]
dis.readFully(payload)
if ((readHeader[0] & (byte)0x80) == 0x80) {
// TODO process binary
} else {
def json = slurper.parse(payload)
if (json.type == null)
throw new Exception("missing json type")
switch(json.type) {
case "Ping" : handlePing(); break;
case "Pong" : handlePong(json); break;
if ((readHeader[0] & (byte)0x80) == 0x80) {
// TODO process binary
} else {
def json = slurper.parse(payload)
if (json.type == null)
throw new Exception("missing json type")
switch(json.type) {
case "Ping" : handlePing(); break;
case "Pong" : handlePong(json); break;
case "Search": handleSearch(json); break
default :
throw new Exception("unknown json type ${json.type}")
}
}
}
default :
throw new Exception("unknown json type ${json.type}")
}
}
}
@Override
protected void write(Object message) {
byte[] payload
if (message instanceof Map) {
payload = JsonOutput.toJson(message).bytes
DataUtil.packHeader(payload.length, writeHeader)
log.fine "$name writing message type ${message.type} length $payload.length"
writeHeader[0] &= (byte)0x7F
} else {
// TODO: write binary
}
@Override
protected void write(Object message) {
byte[] payload
if (message instanceof Map) {
payload = JsonOutput.toJson(message).bytes
DataUtil.packHeader(payload.length, writeHeader)
log.fine "$name writing message type ${message.type} length $payload.length"
writeHeader[0] &= (byte)0x7F
} else {
// TODO: write binary
}
dos.write(writeHeader)
dos.write(payload)
dos.flush()
}
dos.write(writeHeader)
dos.write(payload)
dos.flush()
}
}

View File

@@ -17,30 +17,30 @@ import net.i2p.data.Destination
*/
class UltrapeerConnection extends Connection {
public UltrapeerConnection(EventBus eventBus, Endpoint endpoint, HostCache hostCache, TrustService trustService) {
super(eventBus, endpoint, false, hostCache, trustService)
}
public UltrapeerConnection(EventBus eventBus, Endpoint endpoint, HostCache hostCache, TrustService trustService) {
super(eventBus, endpoint, false, hostCache, trustService)
}
@Override
protected void read() {
// TODO Auto-generated method stub
@Override
protected void read() {
// TODO Auto-generated method stub
}
}
@Override
protected void write(Object message) {
if (message instanceof Map) {
writeJsonMessage(message)
} else {
writeBinaryMessage(message)
}
}
@Override
protected void write(Object message) {
if (message instanceof Map) {
writeJsonMessage(message)
} else {
writeBinaryMessage(message)
}
}
private void writeJsonMessage(def message) {
private void writeJsonMessage(def message) {
}
}
private void writeBinaryMessage(def message) {
private void writeBinaryMessage(def message) {
}
}
}

View File

@@ -16,26 +16,26 @@ import net.i2p.data.Destination
@Log
class UltrapeerConnectionManager extends ConnectionManager {
final int maxPeers, maxLeafs
final int maxPeers, maxLeafs
final TrustService trustService
final Map<Destination, PeerConnection> peerConnections = new ConcurrentHashMap()
final Map<Destination, LeafConnection> leafConnections = new ConcurrentHashMap()
final Map<Destination, PeerConnection> peerConnections = new ConcurrentHashMap()
final Map<Destination, LeafConnection> leafConnections = new ConcurrentHashMap()
UltrapeerConnectionManager() {}
UltrapeerConnectionManager() {}
public UltrapeerConnectionManager(EventBus eventBus, Persona me, int maxPeers, int maxLeafs,
public UltrapeerConnectionManager(EventBus eventBus, Persona me, int maxPeers, int maxLeafs,
HostCache hostCache, TrustService trustService, MuWireSettings settings) {
super(eventBus, me, hostCache, settings)
this.maxPeers = maxPeers
this.maxLeafs = maxLeafs
super(eventBus, me, hostCache, settings)
this.maxPeers = maxPeers
this.maxLeafs = maxLeafs
this.trustService = trustService
}
@Override
public void drop(Destination d) {
peerConnections.get(d)?.close()
}
@Override
public void drop(Destination d) {
peerConnections.get(d)?.close()
leafConnections.get(d)?.close()
}
}
void onQueryEvent(QueryEvent e) {
forwardQueryToLeafs(e)
@@ -50,57 +50,57 @@ class UltrapeerConnectionManager extends ConnectionManager {
}
}
@Override
public Collection<Connection> getConnections() {
def rv = new ArrayList(peerConnections.size() + leafConnections.size())
rv.addAll(peerConnections.values())
rv.addAll(leafConnections.values())
rv
}
@Override
public Collection<Connection> getConnections() {
def rv = new ArrayList(peerConnections.size() + leafConnections.size())
rv.addAll(peerConnections.values())
rv.addAll(leafConnections.values())
rv
}
boolean hasLeafSlots() {
leafConnections.size() < maxLeafs
}
boolean hasLeafSlots() {
leafConnections.size() < maxLeafs
}
boolean hasPeerSlots() {
peerConnections.size() < maxPeers
}
boolean hasPeerSlots() {
peerConnections.size() < maxPeers
}
@Override
protected int getDesiredConnections() {
return maxPeers / 2;
}
@Override
public boolean isConnected(Destination d) {
peerConnections.containsKey(d) || leafConnections.containsKey(d)
}
@Override
protected int getDesiredConnections() {
return maxPeers / 2;
}
@Override
public boolean isConnected(Destination d) {
peerConnections.containsKey(d) || leafConnections.containsKey(d)
}
@Override
public void onConnectionEvent(ConnectionEvent e) {
if (!e.incoming && e.leaf) {
log.severe("Inconsistent event $e")
return
}
@Override
public void onConnectionEvent(ConnectionEvent e) {
if (!e.incoming && e.leaf) {
log.severe("Inconsistent event $e")
return
}
if (e.status != ConnectionAttemptStatus.SUCCESSFUL)
return
if (e.status != ConnectionAttemptStatus.SUCCESSFUL)
return
Connection c = e.leaf ?
new LeafConnection(eventBus, e.endpoint, hostCache, trustService, settings) :
new PeerConnection(eventBus, e.endpoint, e.incoming, hostCache, trustService, settings)
def map = e.leaf ? leafConnections : peerConnections
map.put(e.endpoint.destination, c)
c.start()
}
Connection c = e.leaf ?
new LeafConnection(eventBus, e.endpoint, hostCache, trustService, settings) :
new PeerConnection(eventBus, e.endpoint, e.incoming, hostCache, trustService, settings)
def map = e.leaf ? leafConnections : peerConnections
map.put(e.endpoint.destination, c)
c.start()
}
@Override
public void onDisconnectionEvent(DisconnectionEvent e) {
def removed = peerConnections.remove(e.destination)
if (removed == null)
removed = leafConnections.remove(e.destination)
if (removed == null)
log.severe("Removed connection not present in either leaf or peer map ${e.destination.toBase32()}")
}
@Override
public void onDisconnectionEvent(DisconnectionEvent e) {
def removed = peerConnections.remove(e.destination)
if (removed == null)
removed = leafConnections.remove(e.destination)
if (removed == null)
log.severe("Removed connection not present in either leaf or peer map ${e.destination.toBase32()}")
}
@Override
void shutdown() {
@@ -110,7 +110,7 @@ class UltrapeerConnectionManager extends ConnectionManager {
leafConnections.clear()
}
void forwardQueryToLeafs(QueryEvent e) {
void forwardQueryToLeafs(QueryEvent e) {
}
}
}

View File

@@ -0,0 +1,9 @@
package com.muwire.core.content
import com.muwire.core.Event
class ContentControlEvent extends Event {
String term
boolean regex
boolean add
}

View File

@@ -0,0 +1,30 @@
package com.muwire.core.content
import java.util.concurrent.ConcurrentHashMap
import com.muwire.core.search.QueryEvent
import net.i2p.util.ConcurrentHashSet
class ContentManager {
Set<Matcher> matchers = new ConcurrentHashSet()
void onContentControlEvent(ContentControlEvent e) {
Matcher m
if (e.regex)
m = new RegexMatcher(e.term)
else
m = new KeywordMatcher(e.term)
if (e.add)
matchers.add(m)
else
matchers.remove(m)
}
void onQueryEvent(QueryEvent e) {
if (e.searchEvent.searchTerms == null)
return
matchers.each { it.process(e) }
}
}

View File

@@ -0,0 +1,36 @@
package com.muwire.core.content
class KeywordMatcher extends Matcher {
private final String keyword
KeywordMatcher(String keyword) {
this.keyword = keyword
}
@Override
protected boolean match(List<String> searchTerms) {
boolean found = false
searchTerms.each {
if (keyword == it)
found = true
}
found
}
@Override
public String getTerm() {
keyword
}
@Override
public int hashCode() {
keyword.hashCode()
}
@Override
public boolean equals(Object o) {
if (!(o instanceof KeywordMatcher))
return false
KeywordMatcher other = (KeywordMatcher) o
keyword.equals(other.keyword)
}
}

View File

@@ -0,0 +1,9 @@
package com.muwire.core.content
import com.muwire.core.Persona
class Match {
Persona persona
String [] keywords
long timestamp
}

View File

@@ -0,0 +1,20 @@
package com.muwire.core.content
import com.muwire.core.search.QueryEvent
abstract class Matcher {
final List<Match> matches = Collections.synchronizedList(new ArrayList<>())
final Set<UUID> uuids = new HashSet<>()
protected abstract boolean match(List<String> searchTerms);
public abstract String getTerm();
public void process(QueryEvent qe) {
def terms = qe.searchEvent.searchTerms
if (match(terms) && uuids.add(qe.searchEvent.uuid)) {
long now = System.currentTimeMillis()
matches << new Match(persona : qe.originator, keywords : terms, timestamp : now)
}
}
}

View File

@@ -0,0 +1,35 @@
package com.muwire.core.content
import java.util.regex.Pattern
import java.util.stream.Collectors
class RegexMatcher extends Matcher {
private final Pattern pattern
RegexMatcher(String pattern) {
this.pattern = Pattern.compile(pattern)
}
@Override
protected boolean match(List<String> keywords) {
String combined = keywords.join(" ")
return pattern.matcher(combined).find()
}
@Override
public String getTerm() {
pattern.pattern()
}
@Override
public int hashCode() {
pattern.pattern().hashCode()
}
@Override
public boolean equals(Object o) {
if (!(o instanceof RegexMatcher))
return false
RegexMatcher other = (RegexMatcher) o
pattern.pattern() == other.pattern.pattern()
}
}

View File

@@ -34,7 +34,7 @@ public class DownloadManager {
private final MuWireSettings muSettings
private final I2PConnector connector
private final Executor executor
private final File incompletes, home
private final File home
private final Persona me
private final Map<InfoHash, Downloader> downloaders = new ConcurrentHashMap<>()
@@ -46,12 +46,9 @@ public class DownloadManager {
this.meshManager = meshManager
this.muSettings = muSettings
this.connector = connector
this.incompletes = new File(home,"incompletes")
this.home = home
this.me = me
incompletes.mkdir()
this.executor = Executors.newCachedThreadPool({ r ->
Thread rv = new Thread(r)
rv.setName("download-worker")
@@ -63,6 +60,11 @@ public class DownloadManager {
public void onUIDownloadEvent(UIDownloadEvent e) {
File incompletes = muSettings.incompleteLocation
if (incompletes == null)
incompletes = new File(home, "incompletes")
incompletes.mkdirs()
def size = e.result[0].size
def infohash = e.result[0].infohash
def pieceSize = e.result[0].pieceSize
@@ -74,7 +76,7 @@ public class DownloadManager {
destinations.addAll(e.sources)
destinations.remove(me.destination)
Pieces pieces = getPieces(infohash, size, pieceSize)
Pieces pieces = getPieces(infohash, size, pieceSize, e.sequential)
def downloader = new Downloader(eventBus, this, me, e.target, size,
infohash, pieceSize, connector, destinations,
@@ -123,7 +125,17 @@ public class DownloadManager {
infoHash = new InfoHash(root)
}
Pieces pieces = getPieces(infoHash, (long)json.length, json.pieceSizePow2)
boolean sequential = false
if (json.sequential != null)
sequential = json.sequential
File incompletes
if (json.incompletes != null)
incompletes = new File(DataUtil.readi18nString(Base64.decode(json.incompletes)))
else
incompletes = new File(home, "incompletes")
Pieces pieces = getPieces(infoHash, (long)json.length, json.pieceSizePow2, sequential)
def downloader = new Downloader(eventBus, this, me, file, (long)json.length,
infoHash, json.pieceSizePow2, connector, destinations, incompletes, pieces)
@@ -137,12 +149,12 @@ public class DownloadManager {
}
}
private Pieces getPieces(InfoHash infoHash, long length, int pieceSizePow2) {
private Pieces getPieces(InfoHash infoHash, long length, int pieceSizePow2, boolean sequential) {
int pieceSize = 0x1 << pieceSizePow2
int nPieces = (int)(length / pieceSize)
if (length % pieceSize != 0)
nPieces++
Mesh mesh = meshManager.getOrCreate(infoHash, nPieces)
Mesh mesh = meshManager.getOrCreate(infoHash, nPieces, sequential)
mesh.pieces
}
@@ -188,6 +200,11 @@ public class DownloadManager {
json.hashRoot = Base64.encode(infoHash.getRoot())
json.paused = downloader.paused
json.sequential = downloader.pieces.ratio == 0f
json.incompletes = Base64.encode(DataUtil.encodei18nString(downloader.incompletes.getAbsolutePath()))
writer.println(JsonOutput.toJson(json))
}
}

View File

@@ -14,6 +14,7 @@ import static com.muwire.core.util.DataUtil.readTillRN
import groovy.util.logging.Log
import java.nio.ByteBuffer
import java.nio.MappedByteBuffer
import java.nio.channels.FileChannel
import java.nio.charset.StandardCharsets
import java.nio.file.Files
@@ -25,8 +26,6 @@ import java.util.logging.Level
@Log
class DownloadSession {
private static int SAMPLES = 10
private final EventBus eventBus
private final String meB64
private final Pieces pieces
@@ -38,10 +37,10 @@ class DownloadSession {
private final Set<Integer> available
private final MessageDigest digest
private final LinkedList<Long> timestamps = new LinkedList<>()
private final LinkedList<Integer> reads = new LinkedList<>()
private long lastSpeedRead = System.currentTimeMillis()
private long dataSinceLastRead
private ByteBuffer mapped
private MappedByteBuffer mapped
DownloadSession(EventBus eventBus, String meB64, Pieces pieces, InfoHash infoHash, Endpoint endpoint, File file,
int pieceSize, long fileLength, Set<Integer> available) {
@@ -71,21 +70,23 @@ class DownloadSession {
OutputStream os = endpoint.getOutputStream()
InputStream is = endpoint.getInputStream()
int piece
int[] pieceAndPosition
if (available.isEmpty())
piece = pieces.claim()
pieceAndPosition = pieces.claim()
else
piece = pieces.claim(new HashSet<>(available))
if (piece == -1)
pieceAndPosition = pieces.claim(new HashSet<>(available))
if (pieceAndPosition == null)
return false
int piece = pieceAndPosition[0]
int position = pieceAndPosition[1]
boolean steal = pieceAndPosition[2] == 1
boolean unclaim = true
log.info("will download piece $piece")
long start = piece * pieceSize
long end = Math.min(fileLength, start + pieceSize) - 1
long length = end - start + 1
log.info("will download piece $piece from position $position steal $steal")
long pieceStart = piece * ((long)pieceSize)
long end = Math.min(fileLength, pieceStart + pieceSize) - 1
long start = pieceStart + position
String root = Base64.encode(infoHash.getRoot())
try {
@@ -174,8 +175,9 @@ class DownloadSession {
FileChannel channel
try {
channel = Files.newByteChannel(file.toPath(), EnumSet.of(StandardOpenOption.READ, StandardOpenOption.WRITE,
StandardOpenOption.SPARSE, StandardOpenOption.CREATE)) // TODO: double-check, maybe CREATE_NEW
mapped = channel.map(FileChannel.MapMode.READ_WRITE, start, end - start + 1)
StandardOpenOption.SPARSE, StandardOpenOption.CREATE))
mapped = channel.map(FileChannel.MapMode.READ_WRITE, pieceStart, end - pieceStart + 1)
mapped.position(position)
byte[] tmp = new byte[0x1 << 13]
while(mapped.hasRemaining()) {
@@ -186,31 +188,28 @@ class DownloadSession {
throw new IOException()
synchronized(this) {
mapped.put(tmp, 0, read)
if (timestamps.size() == SAMPLES) {
timestamps.removeFirst()
reads.removeFirst()
}
timestamps.addLast(System.currentTimeMillis())
reads.addLast(read)
dataSinceLastRead += read
pieces.markPartial(piece, mapped.position())
}
}
mapped.clear()
digest.update(mapped)
DataUtil.tryUnmap(mapped)
byte [] hash = digest.digest()
byte [] expected = new byte[32]
System.arraycopy(infoHash.getHashList(), piece * 32, expected, 0, 32)
if (hash != expected)
throw new BadHashException()
if (hash != expected) {
pieces.markPartial(piece, 0)
throw new BadHashException("bad hash on piece $piece")
}
} finally {
try { channel?.close() } catch (IOException ignore) {}
DataUtil.tryUnmap(mapped)
}
pieces.markDownloaded(piece)
unclaim = false
} finally {
if (unclaim)
if (unclaim && !steal)
pieces.unclaim(piece)
}
return true
@@ -223,24 +222,11 @@ class DownloadSession {
}
synchronized int speed() {
if (timestamps.size() < SAMPLES)
return 0
int totalRead = 0
int idx = 0
final long now = System.currentTimeMillis()
while(idx < SAMPLES && timestamps.get(idx) < now - 1000)
idx++
if (idx == SAMPLES)
return 0
if (idx == SAMPLES - 1)
return reads[idx]
long interval = timestamps.last - timestamps[idx]
if (interval == 0)
interval = 1
for (int i = idx; i < SAMPLES; i++)
totalRead += reads[idx]
(int)(totalRead * 1000.0 / interval)
long interval = Math.max(1000, now - lastSpeedRead)
lastSpeedRead = now;
int rv = (int) (dataSinceLastRead * 1000.0 / interval)
dataSinceLastRead = 0
rv
}
}

View File

@@ -7,6 +7,7 @@ import com.muwire.core.connection.Endpoint
import java.nio.file.AtomicMoveNotSupportedException
import java.nio.file.Files
import java.nio.file.StandardCopyOption
import java.time.Instant
import java.util.concurrent.ConcurrentHashMap
import java.util.concurrent.ExecutorService
import java.util.concurrent.Executors
@@ -26,6 +27,7 @@ import net.i2p.util.ConcurrentHashSet
@Log
public class Downloader {
public enum DownloadState { CONNECTING, HASHLIST, DOWNLOADING, FAILED, CANCELLED, PAUSED, FINISHED }
private enum WorkerState { CONNECTING, HASHLIST, DOWNLOADING, FINISHED}
@@ -47,6 +49,7 @@ public class Downloader {
private final I2PConnector connector
private final Set<Destination> destinations
private final int nPieces
private final File incompletes
private final File piecesFile
private final File incompleteFile
final int pieceSizePow2
@@ -58,6 +61,11 @@ public class Downloader {
private final AtomicBoolean eventFired = new AtomicBoolean()
private boolean piecesFileClosed
private ArrayList speedArr = new ArrayList<Integer>()
private int speedPos = 0
private int speedAvg = 0
private long timestamp = Instant.now().toEpochMilli()
public Downloader(EventBus eventBus, DownloadManager downloadManager,
Persona me, File file, long length, InfoHash infoHash,
int pieceSizePow2, I2PConnector connector, Set<Destination> destinations,
@@ -70,6 +78,7 @@ public class Downloader {
this.length = length
this.connector = connector
this.destinations = destinations
this.incompletes = incompletes
this.piecesFile = new File(incompletes, file.getName()+".pieces")
this.incompleteFile = new File(incompletes, file.getName()+".part")
this.pieceSizePow2 = pieceSizePow2
@@ -101,8 +110,14 @@ public class Downloader {
if (!piecesFile.exists())
return
piecesFile.eachLine {
int piece = Integer.parseInt(it)
pieces.markDownloaded(piece)
String [] split = it.split(",")
int piece = Integer.parseInt(split[0])
if (split.length == 1)
pieces.markDownloaded(piece)
else {
int position = Integer.parseInt(split[1])
pieces.markPartial(piece, position)
}
}
}
@@ -111,9 +126,7 @@ public class Downloader {
if (piecesFileClosed)
return
piecesFile.withPrintWriter { writer ->
pieces.getDownloaded().each { piece ->
writer.println(piece)
}
pieces.write(writer)
}
}
}
@@ -124,14 +137,41 @@ public class Downloader {
public int speed() {
int total = 0
int currSpeed = 0
if (getCurrentState() == DownloadState.DOWNLOADING) {
activeWorkers.values().each {
if (it.currentState == WorkerState.DOWNLOADING)
total += it.speed()
currSpeed += it.speed()
}
}
total
if (speedArr.size() != downloadManager.muSettings.speedSmoothSeconds) {
speedArr.clear()
downloadManager.muSettings.speedSmoothSeconds.times { speedArr.add(0) }
speedPos = 0
}
// normalize to speedArr.size
currSpeed /= speedArr.size()
// compute new speedAvg and update speedArr
if ( speedArr[speedPos] > speedAvg ) {
speedAvg = 0
} else {
speedAvg -= speedArr[speedPos]
}
speedAvg += currSpeed
speedArr[speedPos] = currSpeed
// this might be necessary due to rounding errors
if (speedAvg < 0)
speedAvg = 0
// rolling index over the speedArr
speedPos++
if (speedPos >= speedArr.size())
speedPos=0
speedAvg
}
public DownloadState getCurrentState() {
@@ -272,12 +312,17 @@ public class Downloader {
} catch (Exception bad) {
log.log(Level.WARNING,"Exception while downloading",DataUtil.findRoot(bad))
} finally {
writePieces()
currentState = WorkerState.FINISHED
if (pieces.isComplete() && eventFired.compareAndSet(false, true)) {
synchronized(piecesFile) {
piecesFileClosed = true
piecesFile.delete()
}
activeWorkers.values().each {
if (it.destination != destination)
it.cancel()
}
try {
Files.move(incompleteFile.toPath(), file.toPath(), StandardCopyOption.ATOMIC_MOVE)
} catch (AtomicMoveNotSupportedException e) {
@@ -286,7 +331,7 @@ public class Downloader {
}
eventBus.publish(
new FileDownloadedEvent(
downloadedFile : new DownloadedFile(file, getInfoHash(), pieceSizePow2, successfulDestinations),
downloadedFile : new DownloadedFile(file.getCanonicalFile(), getInfoHash(), pieceSizePow2, successfulDestinations),
downloader : Downloader.this))
}

View File

@@ -5,6 +5,7 @@ class Pieces {
private final int nPieces
private final float ratio
private final Random random = new Random()
private final Map<Integer,Integer> partials = new HashMap<>()
Pieces(int nPieces) {
this(nPieces, 1.0f)
@@ -17,16 +18,22 @@ class Pieces {
claimed = new BitSet(nPieces)
}
synchronized int claim() {
synchronized int[] claim() {
int claimedCardinality = claimed.cardinality()
if (claimedCardinality == nPieces)
return -1
if (claimedCardinality == nPieces) {
// steal
int downloadedCardinality = done.cardinality()
if (downloadedCardinality == nPieces)
return null
int rv = done.nextClearBit(0)
return [rv, partials.getOrDefault(rv, 0), 1]
}
// if fuller than ratio just do sequential
if ( (1.0f * claimedCardinality) / nPieces > ratio) {
if ( (1.0f * claimedCardinality) / nPieces >= ratio) {
int rv = claimed.nextClearBit(0)
claimed.set(rv)
return rv
return [rv, partials.getOrDefault(rv, 0), 0]
}
while(true) {
@@ -34,20 +41,29 @@ class Pieces {
if (claimed.get(start))
continue
claimed.set(start)
return start
return [start, partials.getOrDefault(start,0), 0]
}
}
synchronized int claim(Set<Integer> available) {
for (int i = claimed.nextSetBit(0); i >= 0; i = claimed.nextSetBit(i+1))
synchronized int[] claim(Set<Integer> available) {
for (int i = done.nextSetBit(0); i >= 0; i = done.nextSetBit(i+1))
available.remove(i)
if (available.isEmpty())
return -1
List<Integer> toList = available.toList()
Collections.shuffle(toList)
return null
Set<Integer> availableCopy = new HashSet<>(available)
for (int i = claimed.nextSetBit(0); i >= 0; i = claimed.nextSetBit(i+1))
availableCopy.remove(i)
if (availableCopy.isEmpty()) {
// steal
int rv = available.first()
return [rv, partials.getOrDefault(rv, 0), 1]
}
List<Integer> toList = availableCopy.toList()
if (ratio > 0f)
Collections.shuffle(toList)
int rv = toList[0]
claimed.set(rv)
rv
[rv, partials.getOrDefault(rv, 0), 0]
}
synchronized def getDownloaded() {
@@ -61,6 +77,11 @@ class Pieces {
synchronized void markDownloaded(int piece) {
done.set(piece)
claimed.set(piece)
partials.remove(piece)
}
synchronized void markPartial(int piece, int position) {
partials.put(piece, position)
}
synchronized void unclaim(int piece) {
@@ -82,5 +103,15 @@ class Pieces {
synchronized void clearAll() {
done.clear()
claimed.clear()
partials.clear()
}
synchronized void write(PrintWriter writer) {
for (int i = done.nextSetBit(0); i >= 0; i = done.nextSetBit(i+1)) {
writer.println(i)
}
partials.each { piece, position ->
writer.println("$piece,$position")
}
}
}

View File

@@ -10,4 +10,5 @@ class UIDownloadEvent extends Event {
UIResultEvent[] result
Set<Destination> sources
File target
boolean sequential
}

View File

@@ -13,6 +13,7 @@ import java.nio.file.WatchService
import java.util.concurrent.ConcurrentHashMap
import com.muwire.core.EventBus
import com.muwire.core.MuWireSettings
import com.muwire.core.SharedFile
import groovy.util.logging.Log
@@ -31,6 +32,8 @@ class DirectoryWatcher {
kinds = [ENTRY_CREATE, ENTRY_MODIFY, ENTRY_DELETE]
}
private final File home
private final MuWireSettings muOptions
private final EventBus eventBus
private final FileManager fileManager
private final Thread watcherThread, publisherThread
@@ -39,7 +42,9 @@ class DirectoryWatcher {
private WatchService watchService
private volatile boolean shutdown
DirectoryWatcher(EventBus eventBus, FileManager fileManager) {
DirectoryWatcher(EventBus eventBus, FileManager fileManager, File home, MuWireSettings muOptions) {
this.home = home
this.muOptions = muOptions
this.eventBus = eventBus
this.fileManager = fileManager
this.watcherThread = new Thread({watch() } as Runnable, "directory-watcher")
@@ -64,15 +69,28 @@ class DirectoryWatcher {
void onFileSharedEvent(FileSharedEvent e) {
if (!e.file.isDirectory())
return
Path path = e.file.getCanonicalFile().toPath()
File canonical = e.file.getCanonicalFile()
Path path = canonical.toPath()
WatchKey wk = path.register(watchService, kinds)
watchedDirectories.put(e.file, wk)
watchedDirectories.put(canonical, wk)
if (muOptions.watchedDirectories.add(canonical.toString()))
saveMuSettings()
}
void onDirectoryUnsharedEvent(DirectoryUnsharedEvent e) {
WatchKey wk = watchedDirectories.remove(e.directory)
wk?.cancel()
if (muOptions.watchedDirectories.remove(e.directory.toString()))
saveMuSettings()
}
private void saveMuSettings() {
File muSettingsFile = new File(home, "MuWire.properties")
muSettingsFile.withOutputStream {
muOptions.write(it)
}
}
private void watch() {

View File

@@ -8,5 +8,5 @@ import net.i2p.data.Destination
class FileDownloadedEvent extends Event {
Downloader downloader
DownloadedFile downloadedFile
DownloadedFile downloadedFile
}

View File

@@ -5,8 +5,8 @@ import com.muwire.core.SharedFile
class FileHashedEvent extends Event {
SharedFile sharedFile
String error
SharedFile sharedFile
String error
@Override
public String toString() {

View File

@@ -13,67 +13,67 @@ import java.security.NoSuchAlgorithmException
class FileHasher {
/** max size of shared file is 128 GB */
public static final long MAX_SIZE = 0x1L << 37
/** max size of shared file is 128 GB */
public static final long MAX_SIZE = 0x1L << 37
/**
* @param size of the file to be shared
* @return the size of each piece in power of 2
/**
* @param size of the file to be shared
* @return the size of each piece in power of 2
* piece size is minimum 128 KBytees and maximum 16 MBytes in power of 2 steps (2^17 - 2^24)
* there can be up to 8192 pieces maximum per file
*/
static int getPieceSize(long size) {
if (size <= 0x1 << 30)
return 17
*/
static int getPieceSize(long size) {
if (size <= 0x1 << 30)
return 17
for (int i = 31; i <= 37; i++) {
if (size <= 0x1L << i) {
return i-13
}
}
for (int i = 31; i <= 37; i++) {
if (size <= 0x1L << i) {
return i-13
}
}
throw new IllegalArgumentException("File too large $size")
}
throw new IllegalArgumentException("File too large $size")
}
final MessageDigest digest
final MessageDigest digest
FileHasher() {
try {
digest = MessageDigest.getInstance("SHA-256")
} catch (NoSuchAlgorithmException impossible) {
digest = null
System.exit(1)
}
}
FileHasher() {
try {
digest = MessageDigest.getInstance("SHA-256")
} catch (NoSuchAlgorithmException impossible) {
digest = null
System.exit(1)
}
}
InfoHash hashFile(File file) {
final long length = file.length()
final int size = 0x1 << getPieceSize(length)
int numPieces = (int) (length / size)
if (numPieces * size < length)
numPieces++
InfoHash hashFile(File file) {
final long length = file.length()
final int size = 0x1 << getPieceSize(length)
int numPieces = (int) (length / size)
if (numPieces * size < length)
numPieces++
def output = new ByteArrayOutputStream()
RandomAccessFile raf = new RandomAccessFile(file, "r")
try {
MappedByteBuffer buf
for (int i = 0; i < numPieces - 1; i++) {
buf = raf.getChannel().map(MapMode.READ_ONLY, ((long)size) * i, size)
digest.update buf
def output = new ByteArrayOutputStream()
RandomAccessFile raf = new RandomAccessFile(file, "r")
try {
MappedByteBuffer buf
for (int i = 0; i < numPieces - 1; i++) {
buf = raf.getChannel().map(MapMode.READ_ONLY, ((long)size) * i, size)
digest.update buf
DataUtil.tryUnmap(buf)
output.write(digest.digest(), 0, 32)
}
def lastPieceLength = length - (numPieces - 1) * ((long)size)
buf = raf.getChannel().map(MapMode.READ_ONLY, length - lastPieceLength, lastPieceLength)
digest.update buf
output.write(digest.digest(), 0, 32)
} finally {
raf.close()
}
output.write(digest.digest(), 0, 32)
}
def lastPieceLength = length - (numPieces - 1) * ((long)size)
buf = raf.getChannel().map(MapMode.READ_ONLY, length - lastPieceLength, lastPieceLength)
digest.update buf
output.write(digest.digest(), 0, 32)
} finally {
raf.close()
}
byte [] hashList = output.toByteArray()
InfoHash.fromHashList(hashList)
}
byte [] hashList = output.toByteArray()
InfoHash.fromHashList(hashList)
}
public static void main(String[] args) {
if (args.length != 1) {

View File

@@ -0,0 +1,15 @@
package com.muwire.core.files
import com.muwire.core.Event
import com.muwire.core.SharedFile
class FileHashingEvent extends Event {
File hashingFile
@Override
public String toString() {
super.toString() + " hashingFile " + hashingFile.getAbsolutePath()
}
}

View File

@@ -5,5 +5,5 @@ import com.muwire.core.SharedFile
class FileLoadedEvent extends Event {
SharedFile loadedFile
SharedFile loadedFile
}

View File

@@ -8,33 +8,36 @@ import com.muwire.core.UILoadedEvent
import com.muwire.core.search.ResultsEvent
import com.muwire.core.search.SearchEvent
import com.muwire.core.search.SearchIndex
import com.muwire.core.util.DataUtil
import groovy.util.logging.Log
import net.i2p.data.Base64
@Log
class FileManager {
final EventBus eventBus
final EventBus eventBus
final MuWireSettings settings
final Map<InfoHash, Set<SharedFile>> rootToFiles = Collections.synchronizedMap(new HashMap<>())
final Map<File, SharedFile> fileToSharedFile = Collections.synchronizedMap(new HashMap<>())
final Map<String, Set<File>> nameToFiles = new HashMap<>()
final SearchIndex index = new SearchIndex()
final Map<InfoHash, Set<SharedFile>> rootToFiles = Collections.synchronizedMap(new HashMap<>())
final Map<File, SharedFile> fileToSharedFile = Collections.synchronizedMap(new HashMap<>())
final Map<String, Set<File>> nameToFiles = new HashMap<>()
final Map<String, Set<File>> commentToFile = new HashMap<>()
final SearchIndex index = new SearchIndex()
FileManager(EventBus eventBus, MuWireSettings settings) {
FileManager(EventBus eventBus, MuWireSettings settings) {
this.settings = settings
this.eventBus = eventBus
}
this.eventBus = eventBus
}
void onFileHashedEvent(FileHashedEvent e) {
if (e.sharedFile != null)
addToIndex(e.sharedFile)
}
void onFileLoadedEvent(FileLoadedEvent e) {
addToIndex(e.loadedFile)
}
void onFileLoadedEvent(FileLoadedEvent e) {
addToIndex(e.loadedFile)
}
void onFileDownloadedEvent(FileDownloadedEvent e) {
if (settings.shareDownloadedFiles) {
@@ -42,88 +45,141 @@ class FileManager {
}
}
private void addToIndex(SharedFile sf) {
private void addToIndex(SharedFile sf) {
log.info("Adding shared file " + sf.getFile())
InfoHash infoHash = sf.getInfoHash()
Set<SharedFile> existing = rootToFiles.get(infoHash)
if (existing == null) {
InfoHash infoHash = sf.getInfoHash()
Set<SharedFile> existing = rootToFiles.get(infoHash)
if (existing == null) {
log.info("adding new root")
existing = new HashSet<>()
rootToFiles.put(infoHash, existing);
}
existing.add(sf)
fileToSharedFile.put(sf.file, sf)
existing = new HashSet<>()
rootToFiles.put(infoHash, existing);
}
existing.add(sf)
fileToSharedFile.put(sf.file, sf)
String name = sf.getFile().getName()
Set<File> existingFiles = nameToFiles.get(name)
if (existingFiles == null) {
existingFiles = new HashSet<>()
nameToFiles.put(name, existingFiles)
}
existingFiles.add(sf.getFile())
String name = sf.getFile().getName()
Set<File> existingFiles = nameToFiles.get(name)
if (existingFiles == null) {
existingFiles = new HashSet<>()
nameToFiles.put(name, existingFiles)
}
existingFiles.add(sf.getFile())
index.add(name)
}
String comment = sf.getComment()
if (comment != null) {
comment = DataUtil.readi18nString(Base64.decode(comment))
index.add(comment)
Set<File> existingComment = commentToFile.get(comment)
if(existingComment == null) {
existingComment = new HashSet<>()
commentToFile.put(comment, existingComment)
}
existingComment.add(sf.getFile())
}
void onFileUnsharedEvent(FileUnsharedEvent e) {
SharedFile sf = e.unsharedFile
InfoHash infoHash = sf.getInfoHash()
Set<SharedFile> existing = rootToFiles.get(infoHash)
if (existing != null) {
existing.remove(sf)
if (existing.isEmpty()) {
rootToFiles.remove(infoHash)
}
}
index.add(name)
}
fileToSharedFile.remove(sf.file)
void onFileUnsharedEvent(FileUnsharedEvent e) {
SharedFile sf = e.unsharedFile
InfoHash infoHash = sf.getInfoHash()
Set<SharedFile> existing = rootToFiles.get(infoHash)
if (existing != null) {
existing.remove(sf)
if (existing.isEmpty()) {
rootToFiles.remove(infoHash)
}
}
String name = sf.getFile().getName()
Set<File> existingFiles = nameToFiles.get(name)
if (existingFiles != null) {
existingFiles.remove(sf.file)
if (existingFiles.isEmpty()) {
nameToFiles.remove(name)
}
}
fileToSharedFile.remove(sf.file)
index.remove(name)
}
String name = sf.getFile().getName()
Set<File> existingFiles = nameToFiles.get(name)
if (existingFiles != null) {
existingFiles.remove(sf.file)
if (existingFiles.isEmpty()) {
nameToFiles.remove(name)
}
}
Map<File, SharedFile> getSharedFiles() {
String comment = sf.getComment()
if (comment != null) {
Set<File> existingComment = commentToFile.get(comment)
if (existingComment != null) {
existingComment.remove(sf.getFile())
if (existingComment.isEmpty()) {
commentToFile.remove(comment)
index.remove(comment)
}
}
}
index.remove(name)
}
void onUICommentEvent(UICommentEvent e) {
if (e.oldComment != null) {
def comment = DataUtil.readi18nString(Base64.decode(e.oldComment))
Set<File> existingFiles = commentToFile.get(comment)
existingFiles.remove(e.sharedFile.getFile())
if (existingFiles.isEmpty()) {
commentToFile.remove(comment)
index.remove(comment)
}
}
String comment = e.sharedFile.getComment()
comment = DataUtil.readi18nString(Base64.decode(comment))
if (comment != null) {
index.add(comment)
Set<File> existingComment = commentToFile.get(comment)
if(existingComment == null) {
existingComment = new HashSet<>()
commentToFile.put(comment, existingComment)
}
existingComment.add(e.sharedFile.getFile())
}
}
Map<File, SharedFile> getSharedFiles() {
synchronized(fileToSharedFile) {
return new HashMap<>(fileToSharedFile)
}
}
}
Set<SharedFile> getSharedFiles(byte []root) {
return rootToFiles.get(new InfoHash(root))
}
void onSearchEvent(SearchEvent e) {
// hash takes precedence
ResultsEvent re = null
if (e.searchHash != null) {
void onSearchEvent(SearchEvent e) {
// hash takes precedence
ResultsEvent re = null
if (e.searchHash != null) {
Set<SharedFile> found
found = rootToFiles.get new InfoHash(e.searchHash)
found = filter(found, e.oobInfohash)
if (found != null && !found.isEmpty())
re = new ResultsEvent(results: found.asList(), uuid: e.uuid, searchEvent: e)
} else {
def names = index.search e.searchTerms
Set<File> files = new HashSet<>()
names.each { files.addAll nameToFiles.getOrDefault(it, []) }
Set<SharedFile> sharedFiles = new HashSet<>()
files.each { sharedFiles.add fileToSharedFile[it] }
if (found != null && !found.isEmpty())
re = new ResultsEvent(results: found.asList(), uuid: e.uuid, searchEvent: e)
} else {
def names = index.search e.searchTerms
Set<File> files = new HashSet<>()
names.each {
files.addAll nameToFiles.getOrDefault(it, [])
if (e.searchComments)
files.addAll commentToFile.getOrDefault(it, [])
}
Set<SharedFile> sharedFiles = new HashSet<>()
files.each { sharedFiles.add fileToSharedFile[it] }
files = filter(sharedFiles, e.oobInfohash)
if (!sharedFiles.isEmpty())
re = new ResultsEvent(results: sharedFiles.asList(), uuid: e.uuid, searchEvent: e)
}
if (!sharedFiles.isEmpty())
re = new ResultsEvent(results: sharedFiles.asList(), uuid: e.uuid, searchEvent: e)
if (re != null)
eventBus.publish(re)
}
}
if (re != null)
eventBus.publish(re)
}
private static Set<SharedFile> filter(Set<SharedFile> files, boolean oob) {
if (!oob)

View File

@@ -4,7 +4,7 @@ import com.muwire.core.Event
class FileSharedEvent extends Event {
File file
File file
@Override
public String toString() {

View File

@@ -4,5 +4,5 @@ import com.muwire.core.Event
import com.muwire.core.SharedFile
class FileUnsharedEvent extends Event {
SharedFile unsharedFile
SharedFile unsharedFile
}

View File

@@ -4,44 +4,60 @@ import java.util.concurrent.Executor
import java.util.concurrent.Executors
import com.muwire.core.EventBus
import com.muwire.core.MuWireSettings
import com.muwire.core.SharedFile
class HasherService {
final FileHasher hasher
final EventBus eventBus
final FileHasher hasher
final EventBus eventBus
final FileManager fileManager
Executor executor
final Set<File> hashed = new HashSet<>()
final MuWireSettings settings
Executor executor
HasherService(FileHasher hasher, EventBus eventBus, FileManager fileManager) {
this.hasher = hasher
this.eventBus = eventBus
HasherService(FileHasher hasher, EventBus eventBus, FileManager fileManager, MuWireSettings settings) {
this.hasher = hasher
this.eventBus = eventBus
this.fileManager = fileManager
}
this.settings = settings
}
void start() {
executor = Executors.newSingleThreadExecutor()
}
void start() {
executor = Executors.newSingleThreadExecutor()
}
void onFileSharedEvent(FileSharedEvent evt) {
if (fileManager.fileToSharedFile.containsKey(evt.file.getCanonicalFile()))
void onFileSharedEvent(FileSharedEvent evt) {
File canonical = evt.file.getCanonicalFile()
if (!settings.shareHiddenFiles && canonical.isHidden())
return
executor.execute( { -> process(evt.file) } as Runnable)
}
if (fileManager.fileToSharedFile.containsKey(canonical))
return
if (hashed.add(canonical))
executor.execute( { -> process(canonical) } as Runnable)
}
private void process(File f) {
f = f.getCanonicalFile()
if (f.isDirectory()) {
f.listFiles().each {eventBus.publish new FileSharedEvent(file: it) }
} else {
if (f.length() == 0) {
eventBus.publish new FileHashedEvent(error: "Not sharing empty file $f")
} else if (f.length() > FileHasher.MAX_SIZE) {
eventBus.publish new FileHashedEvent(error: "$f is too large to be shared ${f.length()}")
} else {
def hash = hasher.hashFile f
eventBus.publish new FileHashedEvent(sharedFile: new SharedFile(f, hash, FileHasher.getPieceSize(f.length())))
}
}
}
void onFileUnsharedEvent(FileUnsharedEvent evt) {
hashed.remove(evt.unsharedFile.file)
}
void onDirectoryUnsharedEvent(DirectoryUnsharedEvent evt) {
hashed.remove(evt.directory)
}
private void process(File f) {
if (f.isDirectory()) {
f.listFiles().each {eventBus.publish new FileSharedEvent(file: it) }
} else {
if (f.length() == 0) {
eventBus.publish new FileHashedEvent(error: "Not sharing empty file $f")
} else if (f.length() > FileHasher.MAX_SIZE) {
eventBus.publish new FileHashedEvent(error: "$f is too large to be shared ${f.length()}")
} else {
eventBus.publish new FileHashingEvent(hashingFile: f)
def hash = hasher.hashFile f
eventBus.publish new FileHashedEvent(sharedFile: new SharedFile(f, hash, FileHasher.getPieceSize(f.length())))
}
}
}
}

View File

@@ -3,6 +3,9 @@ package com.muwire.core.files
import java.nio.file.CopyOption
import java.nio.file.Files
import java.nio.file.StandardCopyOption
import java.util.concurrent.ExecutorService
import java.util.concurrent.Executors
import java.util.concurrent.ThreadFactory
import java.util.logging.Level
import java.util.stream.Collectors
@@ -23,135 +26,148 @@ import net.i2p.data.Destination
@Log
class PersisterService extends Service {
final File location
final EventBus listener
final int interval
final Timer timer
final FileManager fileManager
final File location
final EventBus listener
final int interval
final Timer timer
final FileManager fileManager
final ExecutorService persisterExecutor = Executors.newSingleThreadExecutor({ r ->
new Thread(r, "file persister")
} as ThreadFactory)
PersisterService(File location, EventBus listener, int interval, FileManager fileManager) {
this.location = location
this.listener = listener
this.interval = interval
this.fileManager = fileManager
timer = new Timer("file persister", true)
}
PersisterService(File location, EventBus listener, int interval, FileManager fileManager) {
this.location = location
this.listener = listener
this.interval = interval
this.fileManager = fileManager
timer = new Timer("file persister timer", true)
}
void stop() {
timer.cancel()
}
void stop() {
timer.cancel()
}
void onUILoadedEvent(UILoadedEvent e) {
timer.schedule({load()} as TimerTask, 1)
}
void load() {
if (location.exists() && location.isFile()) {
def slurper = new JsonSlurper()
try {
location.eachLine {
if (it.trim().length() > 0) {
def parsed = slurper.parseText it
def event = fromJson parsed
if (event != null) {
void onUIPersistFilesEvent(UIPersistFilesEvent e) {
persistFiles()
}
void load() {
Thread.currentThread().setPriority(Thread.MIN_PRIORITY)
if (location.exists() && location.isFile()) {
int loaded = 0
def slurper = new JsonSlurper()
try {
location.eachLine {
if (it.trim().length() > 0) {
def parsed = slurper.parseText it
def event = fromJson parsed
if (event != null) {
log.fine("loaded file $event.loadedFile.file")
listener.publish event
}
}
}
listener.publish event
loaded++
if (loaded % 10 == 0)
Thread.sleep(20)
}
}
}
listener.publish(new AllFilesLoadedEvent())
} catch (IllegalArgumentException|NumberFormatException e) {
} catch (IllegalArgumentException|NumberFormatException e) {
log.log(Level.WARNING, "couldn't load files",e)
}
} else {
}
} else {
listener.publish(new AllFilesLoadedEvent())
}
timer.schedule({persistFiles()} as TimerTask, 0, interval)
loaded = true
}
timer.schedule({persistFiles()} as TimerTask, 0, interval)
loaded = true
}
private static FileLoadedEvent fromJson(def json) {
if (json.file == null || json.length == null || json.infoHash == null || json.hashList == null)
throw new IllegalArgumentException()
if (!(json.hashList instanceof List))
throw new IllegalArgumentException()
private static FileLoadedEvent fromJson(def json) {
if (json.file == null || json.length == null || json.infoHash == null || json.hashList == null)
throw new IllegalArgumentException()
if (!(json.hashList instanceof List))
throw new IllegalArgumentException()
def file = new File(DataUtil.readi18nString(Base64.decode(json.file)))
file = file.getCanonicalFile()
if (!file.exists() || file.isDirectory())
return null
long length = Long.valueOf(json.length)
if (length != file.length())
return null
def file = new File(DataUtil.readi18nString(Base64.decode(json.file)))
file = file.getCanonicalFile()
if (!file.exists() || file.isDirectory())
return null
long length = Long.valueOf(json.length)
if (length != file.length())
return null
List hashList = (List) json.hashList
ByteArrayOutputStream baos = new ByteArrayOutputStream()
hashList.each {
byte [] hash = Base64.decode it.toString()
if (hash == null)
throw new IllegalArgumentException()
baos.write hash
}
byte[] hashListBytes = baos.toByteArray()
List hashList = (List) json.hashList
ByteArrayOutputStream baos = new ByteArrayOutputStream()
hashList.each {
byte [] hash = Base64.decode it.toString()
if (hash == null)
throw new IllegalArgumentException()
baos.write hash
}
byte[] hashListBytes = baos.toByteArray()
InfoHash ih = InfoHash.fromHashList(hashListBytes)
byte [] root = Base64.decode(json.infoHash.toString())
if (root == null)
throw new IllegalArgumentException()
if (!Arrays.equals(root, ih.getRoot()))
return null
InfoHash ih = InfoHash.fromHashList(hashListBytes)
byte [] root = Base64.decode(json.infoHash.toString())
if (root == null)
throw new IllegalArgumentException()
if (!Arrays.equals(root, ih.getRoot()))
return null
int pieceSize = 0
if (json.pieceSize != null)
pieceSize = json.pieceSize
if (json.sources != null) {
List sources = (List)json.sources
Set<Destination> sourceSet = sources.stream().map({d -> new Destination(d.toString())}).collect Collectors.toSet()
DownloadedFile df = new DownloadedFile(file, ih, pieceSize, sourceSet)
return new FileLoadedEvent(loadedFile : df)
}
List sources = (List)json.sources
Set<Destination> sourceSet = sources.stream().map({d -> new Destination(d.toString())}).collect Collectors.toSet()
DownloadedFile df = new DownloadedFile(file, ih, pieceSize, sourceSet)
df.setComment(json.comment)
return new FileLoadedEvent(loadedFile : df)
}
SharedFile sf = new SharedFile(file, ih, pieceSize)
return new FileLoadedEvent(loadedFile: sf)
SharedFile sf = new SharedFile(file, ih, pieceSize)
sf.setComment(json.comment)
return new FileLoadedEvent(loadedFile: sf)
}
}
private void persistFiles() {
def sharedFiles = fileManager.getSharedFiles()
private void persistFiles() {
persisterExecutor.submit( {
def sharedFiles = fileManager.getSharedFiles()
File tmp = File.createTempFile("muwire-files", "tmp")
tmp.deleteOnExit()
tmp.withPrintWriter { writer ->
sharedFiles.each { k, v ->
def json = toJson(k,v)
json = JsonOutput.toJson(json)
writer.println json
}
}
Files.copy(tmp.toPath(), location.toPath(), StandardCopyOption.REPLACE_EXISTING)
tmp.delete()
}
File tmp = File.createTempFile("muwire-files", "tmp")
tmp.deleteOnExit()
tmp.withPrintWriter { writer ->
sharedFiles.each { k, v ->
def json = toJson(k,v)
json = JsonOutput.toJson(json)
writer.println json
}
}
Files.copy(tmp.toPath(), location.toPath(), StandardCopyOption.REPLACE_EXISTING)
tmp.delete()
} as Runnable)
}
private def toJson(File f, SharedFile sf) {
def json = [:]
json.file = Base64.encode DataUtil.encodei18nString(f.getCanonicalFile().toString())
json.length = f.length()
InfoHash ih = sf.getInfoHash()
json.infoHash = Base64.encode ih.getRoot()
private def toJson(File f, SharedFile sf) {
def json = [:]
json.file = sf.getB64EncodedFileName()
json.length = sf.getCachedLength()
InfoHash ih = sf.getInfoHash()
json.infoHash = sf.getB64EncodedHashRoot()
json.pieceSize = sf.getPieceSize()
byte [] tmp = new byte [32]
json.hashList = []
for (int i = 0;i < ih.getHashList().length / 32; i++) {
System.arraycopy(ih.getHashList(), i * 32, tmp, 0, 32)
json.hashList.add Base64.encode(tmp)
}
json.hashList = sf.getB64EncodedHashList()
json.comment = sf.getComment()
if (sf instanceof DownloadedFile) {
json.sources = sf.sources.stream().map( {d -> d.toBase64()}).collect(Collectors.toList())
}
if (sf instanceof DownloadedFile) {
json.sources = sf.sources.stream().map( {d -> d.toBase64()}).collect(Collectors.toList())
}
json
}
json
}
}

View File

@@ -0,0 +1,9 @@
package com.muwire.core.files
import com.muwire.core.Event
import com.muwire.core.SharedFile
class UICommentEvent extends Event {
SharedFile sharedFile
String oldComment
}

View File

@@ -0,0 +1,6 @@
package com.muwire.core.files
import com.muwire.core.Event
class UIPersistFilesEvent extends Event {
}

View File

@@ -18,176 +18,176 @@ import net.i2p.data.Destination
@Log
class CacheClient {
private static final int CRAWLER_RETURN = 10
private static final int CRAWLER_RETURN = 10
final EventBus eventBus
final HostCache cache
final ConnectionManager manager
final I2PSession session
final long interval
final MuWireSettings settings
final Timer timer
final EventBus eventBus
final HostCache cache
final ConnectionManager manager
final I2PSession session
final long interval
final MuWireSettings settings
final Timer timer
public CacheClient(EventBus eventBus, HostCache cache,
ConnectionManager manager, I2PSession session,
MuWireSettings settings, long interval) {
this.eventBus = eventBus
this.cache = cache
this.manager = manager
this.session = session
this.settings = settings
this.interval = interval
this.timer = new Timer("hostcache-client",true)
}
public CacheClient(EventBus eventBus, HostCache cache,
ConnectionManager manager, I2PSession session,
MuWireSettings settings, long interval) {
this.eventBus = eventBus
this.cache = cache
this.manager = manager
this.session = session
this.settings = settings
this.interval = interval
this.timer = new Timer("hostcache-client",true)
}
void start() {
session.addMuxedSessionListener(new Listener(), I2PSession.PROTO_DATAGRAM, 0)
timer.schedule({queryIfNeeded()} as TimerTask, 1, interval)
}
void start() {
session.addMuxedSessionListener(new Listener(), I2PSession.PROTO_DATAGRAM, 0)
timer.schedule({queryIfNeeded()} as TimerTask, 1, interval)
}
void stop() {
timer.cancel()
}
void stop() {
timer.cancel()
}
private void queryIfNeeded() {
if (!manager.getConnections().isEmpty())
return
if (!cache.getHosts(1).isEmpty())
return
private void queryIfNeeded() {
if (!manager.getConnections().isEmpty())
return
if (!cache.getHosts(1).isEmpty())
return
log.info "Will query hostcaches"
log.info "Will query hostcaches"
def ping = [type: "Ping", version: 1, leaf: settings.isLeaf()]
ping = JsonOutput.toJson(ping)
def maker = new I2PDatagramMaker(session)
ping = maker.makeI2PDatagram(ping.bytes)
def options = new SendMessageOptions()
options.setSendLeaseSet(true)
CacheServers.getCacheServers().each {
log.info "Querying hostcache ${it.toBase32()}"
session.sendMessage(it, ping, 0, ping.length, I2PSession.PROTO_DATAGRAM, 1, 0, options)
}
}
def ping = [type: "Ping", version: 1, leaf: settings.isLeaf()]
ping = JsonOutput.toJson(ping)
def maker = new I2PDatagramMaker(session)
ping = maker.makeI2PDatagram(ping.bytes)
def options = new SendMessageOptions()
options.setSendLeaseSet(true)
CacheServers.getCacheServers().each {
log.info "Querying hostcache ${it.toBase32()}"
session.sendMessage(it, ping, 0, ping.length, I2PSession.PROTO_DATAGRAM, 1, 0, options)
}
}
class Listener implements I2PSessionMuxedListener {
class Listener implements I2PSessionMuxedListener {
private final JsonSlurper slurper = new JsonSlurper()
private final JsonSlurper slurper = new JsonSlurper()
@Override
public void messageAvailable(I2PSession session, int msgId, long size) {
}
@Override
public void messageAvailable(I2PSession session, int msgId, long size) {
}
@Override
public void messageAvailable(I2PSession session, int msgId, long size, int proto, int fromport, int toport) {
@Override
public void messageAvailable(I2PSession session, int msgId, long size, int proto, int fromport, int toport) {
if (proto != I2PSession.PROTO_DATAGRAM) {
log.warning "Received unexpected protocol $proto"
return
}
if (proto != I2PSession.PROTO_DATAGRAM) {
log.warning "Received unexpected protocol $proto"
return
}
def payload = session.receiveMessage(msgId)
def dissector = new I2PDatagramDissector()
try {
dissector.loadI2PDatagram(payload)
def sender = dissector.getSender()
log.info("Received something from ${sender.toBase32()}")
def payload = session.receiveMessage(msgId)
def dissector = new I2PDatagramDissector()
try {
dissector.loadI2PDatagram(payload)
def sender = dissector.getSender()
log.info("Received something from ${sender.toBase32()}")
payload = dissector.getPayload()
payload = slurper.parse(payload)
payload = dissector.getPayload()
payload = slurper.parse(payload)
if (payload.type == null) {
log.warning("type missing")
return
}
if (payload.type == null) {
log.warning("type missing")
return
}
switch(payload.type) {
case "Pong" : handlePong(sender, payload); break
case "CrawlerPing": handleCrawlerPing(session, sender, payload); break
default : log.warning("unknown type ${payload.type}")
}
} catch (Exception e) {
log.warning("Invalid datagram $e")
}
}
switch(payload.type) {
case "Pong" : handlePong(sender, payload); break
case "CrawlerPing": handleCrawlerPing(session, sender, payload); break
default : log.warning("unknown type ${payload.type}")
}
} catch (Exception e) {
log.warning("Invalid datagram $e")
}
}
@Override
public void reportAbuse(I2PSession session, int severity) {
}
@Override
public void reportAbuse(I2PSession session, int severity) {
}
@Override
public void disconnected(I2PSession session) {
log.severe "I2P session disconnected"
}
@Override
public void disconnected(I2PSession session) {
log.severe "I2P session disconnected"
}
@Override
public void errorOccurred(I2PSession session, String message, Throwable error) {
log.severe "I2P error occured $message $error"
}
@Override
public void errorOccurred(I2PSession session, String message, Throwable error) {
log.severe "I2P error occured $message $error"
}
}
}
private void handlePong(Destination from, def pong) {
if (!CacheServers.isRegistered(from)) {
log.warning("received pong from non-registered destination")
return
}
private void handlePong(Destination from, def pong) {
if (!CacheServers.isRegistered(from)) {
log.warning("received pong from non-registered destination")
return
}
if (pong.pongs == null) {
log.warning("malformed pong - no pongs")
return
}
if (pong.pongs == null) {
log.warning("malformed pong - no pongs")
return
}
pong.pongs.asList().each {
Destination dest = new Destination(it)
if (!session.getMyDestination().equals(dest))
eventBus.publish(new HostDiscoveredEvent(destination: dest, fromHostcache : true))
}
pong.pongs.asList().each {
Destination dest = new Destination(it)
if (!session.getMyDestination().equals(dest))
eventBus.publish(new HostDiscoveredEvent(destination: dest, fromHostcache : true))
}
}
}
private void handleCrawlerPing(I2PSession session, Destination from, def ping) {
if (settings.isLeaf()) {
log.warning("Received crawler ping but I'm a leaf")
return
}
private void handleCrawlerPing(I2PSession session, Destination from, def ping) {
if (settings.isLeaf()) {
log.warning("Received crawler ping but I'm a leaf")
return
}
switch(settings.getCrawlerResponse()) {
case CrawlerResponse.NONE:
log.info("Responding to crawlers is disabled by user")
break
case CrawlerResponse.ALL:
respondToCrawler(session, from, ping)
break;
case CrawlerResponse.REGISTERED:
if (CacheServers.isRegistered(from))
respondToCrawler(session, from, ping)
else
log.warning("Ignoring crawler ping from non-registered crawler")
break
}
}
switch(settings.getCrawlerResponse()) {
case CrawlerResponse.NONE:
log.info("Responding to crawlers is disabled by user")
break
case CrawlerResponse.ALL:
respondToCrawler(session, from, ping)
break;
case CrawlerResponse.REGISTERED:
if (CacheServers.isRegistered(from))
respondToCrawler(session, from, ping)
else
log.warning("Ignoring crawler ping from non-registered crawler")
break
}
}
private void respondToCrawler(I2PSession session, Destination from, def ping) {
log.info "responding to crawler ping"
private void respondToCrawler(I2PSession session, Destination from, def ping) {
log.info "responding to crawler ping"
def neighbors = manager.getConnections().collect { c -> c.endpoint.destination.toBase64() }
Collections.shuffle(neighbors)
if (neighbors.size() > CRAWLER_RETURN)
neighbors = neighbors[0..CRAWLER_RETURN - 1]
def neighbors = manager.getConnections().collect { c -> c.endpoint.destination.toBase64() }
Collections.shuffle(neighbors)
if (neighbors.size() > CRAWLER_RETURN)
neighbors = neighbors[0..CRAWLER_RETURN - 1]
def upManager = (UltrapeerConnectionManager) manager;
def pong = [:]
pong.peers = neighbors
pong.uuid = ping.uuid
pong.type = "CrawlerPong"
pong.version = 1
pong.leafSlots = upManager.hasLeafSlots()
pong.peerSlots = upManager.hasPeerSlots()
pong = JsonOutput.toJson(pong)
def upManager = (UltrapeerConnectionManager) manager;
def pong = [:]
pong.peers = neighbors
pong.uuid = ping.uuid
pong.type = "CrawlerPong"
pong.version = 1
pong.leafSlots = upManager.hasLeafSlots()
pong.peerSlots = upManager.hasPeerSlots()
pong = JsonOutput.toJson(pong)
def maker = new I2PDatagramMaker(session)
pong = maker.makeI2PDatagram(pong.bytes)
session.sendMessage(from, pong, I2PSession.PROTO_DATAGRAM, 0, 0)
}
def maker = new I2PDatagramMaker(session)
pong = maker.makeI2PDatagram(pong.bytes)
session.sendMessage(from, pong, I2PSession.PROTO_DATAGRAM, 0, 0)
}
}

View File

@@ -4,20 +4,25 @@ import net.i2p.data.Destination
class CacheServers {
private static final int TO_GIVE = 3
private static Set<Destination> CACHES = [
new Destination("Wddh2E6FyyXBF7SvUYHKdN-vjf3~N6uqQWNeBDTM0P33YjiQCOsyedrjmDZmWFrXUJfJLWnCb5bnKezfk4uDaMyj~uvDG~yvLVcFgcPWSUd7BfGgym-zqcG1q1DcM8vfun-US7YamBlmtC6MZ2j-~Igqzmgshita8aLPCfNAA6S6e2UMjjtG7QIXlxpMec75dkHdJlVWbzrk9z8Qgru3YIk0UztYgEwDNBbm9wInsbHhr3HtAfa02QcgRVqRN2PnQXuqUJs7R7~09FZPEviiIcUpkY3FeyLlX1sgQFBeGeA96blaPvZNGd6KnNdgfLgMebx5SSxC-N4KZMSMBz5cgonQF3~m2HHFRSI85zqZNG5X9bJN85t80ltiv1W1es8ZnQW4es11r7MrvJNXz5bmSH641yJIvS6qI8OJJNpFVBIQSXLD-96TayrLQPaYw~uNZ-eXaE6G5dYhiuN8xHsFI1QkdaUaVZnvDGfsRbpS5GtpUbBDbyLkdPurG0i7dN1wAAAA")
]
private static final int TO_GIVE = 3
private static Set<Destination> CACHES = [
// zlatinb
new Destination("Wddh2E6FyyXBF7SvUYHKdN-vjf3~N6uqQWNeBDTM0P33YjiQCOsyedrjmDZmWFrXUJfJLWnCb5bnKezfk4uDaMyj~uvDG~yvLVcFgcPWSUd7BfGgym-zqcG1q1DcM8vfun-US7YamBlmtC6MZ2j-~Igqzmgshita8aLPCfNAA6S6e2UMjjtG7QIXlxpMec75dkHdJlVWbzrk9z8Qgru3YIk0UztYgEwDNBbm9wInsbHhr3HtAfa02QcgRVqRN2PnQXuqUJs7R7~09FZPEviiIcUpkY3FeyLlX1sgQFBeGeA96blaPvZNGd6KnNdgfLgMebx5SSxC-N4KZMSMBz5cgonQF3~m2HHFRSI85zqZNG5X9bJN85t80ltiv1W1es8ZnQW4es11r7MrvJNXz5bmSH641yJIvS6qI8OJJNpFVBIQSXLD-96TayrLQPaYw~uNZ-eXaE6G5dYhiuN8xHsFI1QkdaUaVZnvDGfsRbpS5GtpUbBDbyLkdPurG0i7dN1wAAAA"),
// sNL
new Destination("JC63wJNOqSJmymkj4~UJWywBTvDGikKMoYP0HX2Wz9c5l3otXSkwnxWAFL4cKr~Ygh3BNNi2t93vuLIiI1W8AsE42kR~PwRx~Y-WvIHXR6KUejRmOp-n8WidtjKg9k4aDy428uSOedqXDxys5mpoeQXwDsv1CoPTTwnmb1GWFy~oTGIsCguCl~aJWGnqiKarPO3GJQ~ev-NbvAQzUfC3HeP1e6pdI5CGGjExahTCID5UjpJw8GaDXWlGmYWWH303Xu4x-vAHQy1dJLsOBCn8dZravsn5BKJk~j0POUon45CCx-~NYtaPe0Itt9cMdD2ciC76Rep1D0X0sm1SjlSs8sZ52KmF3oaLZ6OzgI9QLMIyBUrfi41sK5I0qTuUVBAkvW1xr~L-20dYJ9TrbOaOb2-vDIfKaxVi6xQOuhgQDiSBhd3qv2m0xGu-BM9DQYfNA0FdMjnZmqjmji9RMavzQSsVFIbQGLbrLepiEFlb7TseCK5UtRp8TxnG7L4gbYevBQAEAAcAAA=="),
// dark_trion
new Destination("Gec9L29FVcQvYDgpcYuEYdltJn06PPoOWAcAM8Af-gDm~ehlrJcwlLXXs0hidq~yP2A0X7QcDi6i6shAfuEofTchxGJl8LRNqj9lio7WnB7cIixXWL~uCkD7Np5LMX0~akNX34oOb9RcBYVT2U5rFGJmJ7OtBv~IBkGeLhsMrqaCjahd0jdBO~QJ-t82ZKZhh044d24~JEfF9zSJxdBoCdAcXzryGNy7sYtFVDFsPKJudAxSW-UsSQiGw2~k-TxyF0r-iAt1IdzfNu8Lu0WPqLdhDYJWcPldx2PR5uJorI~zo~z3I5RX3NwzarlbD4nEP5s65ahPSfVCEkzmaJUBgP8DvBqlFaX89K4nGRYc7jkEjJ8cX4L6YPXUpTPWcfKkW259WdQY3YFh6x7rzijrGZewpczOLCrt-bZRYgDrUibmZxKZmNhy~lQu4gYVVjkz1i4tL~DWlhIc4y0x2vItwkYLArPPi~ejTnt-~Lhb7oPMXRcWa3UrwGKpFvGZY4NXBQAEAAcAAA==")
]
static List<Destination> getCacheServers() {
List<Destination> allCaches = new ArrayList<>(CACHES)
Collections.shuffle(allCaches)
if (allCaches.size() <= TO_GIVE)
return allCaches
allCaches[0..TO_GIVE-1]
}
static List<Destination> getCacheServers() {
List<Destination> allCaches = new ArrayList<>(CACHES)
Collections.shuffle(allCaches)
if (allCaches.size() <= TO_GIVE)
return allCaches
allCaches[0..TO_GIVE-1]
}
static boolean isRegistered(Destination d) {
return CACHES.contains(d)
}
static boolean isRegistered(Destination d) {
return CACHES.contains(d)
}
}

View File

@@ -4,43 +4,67 @@ import net.i2p.data.Destination
class Host {
private static final int MAX_FAILURES = 3
private static final int MAX_FAILURES = 3
final Destination destination
private final int clearInterval
int failures,successes
final Destination destination
private final int clearInterval, hopelessInterval, rejectionInterval
int failures,successes
long lastAttempt
long lastSuccessfulAttempt
long lastRejection
public Host(Destination destination, int clearInterval) {
this.destination = destination
public Host(Destination destination, int clearInterval, int hopelessInterval, int rejectionInterval) {
this.destination = destination
this.clearInterval = clearInterval
}
this.hopelessInterval = hopelessInterval
this.rejectionInterval = rejectionInterval
}
synchronized void onConnect() {
failures = 0
successes++
private void connectSuccessful() {
failures = 0
successes++
lastAttempt = System.currentTimeMillis()
}
}
synchronized void onFailure() {
failures++
successes = 0
synchronized void onConnect() {
connectSuccessful()
lastSuccessfulAttempt = lastAttempt
}
synchronized void onReject() {
connectSuccessful()
lastRejection = lastAttempt;
}
synchronized void onFailure() {
failures++
successes = 0
lastAttempt = System.currentTimeMillis()
}
}
synchronized boolean isFailed() {
failures >= MAX_FAILURES
}
synchronized boolean isFailed() {
failures >= MAX_FAILURES
}
synchronized boolean hasSucceeded() {
successes > 0
}
synchronized boolean hasSucceeded() {
successes > 0
}
synchronized void clearFailures() {
failures = 0
}
synchronized void canTryAgain() {
System.currentTimeMillis() - lastAttempt > (clearInterval * 60 * 1000)
synchronized boolean canTryAgain() {
lastSuccessfulAttempt > 0 &&
System.currentTimeMillis() - lastAttempt > (clearInterval * 60 * 1000)
}
synchronized boolean isHopeless() {
isFailed() &&
System.currentTimeMillis() - lastSuccessfulAttempt > (hopelessInterval * 60 * 1000)
}
synchronized boolean isRecentlyRejected() {
System.currentTimeMillis() - lastRejection < (rejectionInterval * 60 * 1000)
}
}

View File

@@ -15,141 +15,167 @@ import net.i2p.data.Destination
class HostCache extends Service {
final TrustService trustService
final File storage
final int interval
final Timer timer
final MuWireSettings settings
final Destination myself
final Map<Destination, Host> hosts = new ConcurrentHashMap<>()
final TrustService trustService
final File storage
final int interval
final Timer timer
final MuWireSettings settings
final Destination myself
final Map<Destination, Host> hosts = new ConcurrentHashMap<>()
HostCache(){}
HostCache(){}
public HostCache(TrustService trustService, File storage, int interval,
MuWireSettings settings, Destination myself) {
this.trustService = trustService
this.storage = storage
this.interval = interval
this.settings = settings
this.myself = myself
this.timer = new Timer("host-persister",true)
}
public HostCache(TrustService trustService, File storage, int interval,
MuWireSettings settings, Destination myself) {
this.trustService = trustService
this.storage = storage
this.interval = interval
this.settings = settings
this.myself = myself
this.timer = new Timer("host-persister",true)
}
void start() {
timer.schedule({load()} as TimerTask, 1)
}
void start() {
timer.schedule({load()} as TimerTask, 1)
}
void stop() {
timer.cancel()
}
void stop() {
timer.cancel()
}
void onHostDiscoveredEvent(HostDiscoveredEvent e) {
if (myself == e.destination)
return
if (hosts.containsKey(e.destination)) {
void onHostDiscoveredEvent(HostDiscoveredEvent e) {
if (myself == e.destination)
return
if (hosts.containsKey(e.destination)) {
if (!e.fromHostcache)
return
hosts.get(e.destination).clearFailures()
return
}
Host host = new Host(e.destination, settings.hostClearInterval)
if (allowHost(host)) {
hosts.put(e.destination, host)
}
}
}
Host host = new Host(e.destination, settings.hostClearInterval, settings.hostHopelessInterval, settings.hostRejectInterval)
if (allowHost(host)) {
hosts.put(e.destination, host)
}
}
void onConnectionEvent(ConnectionEvent e) {
if (e.leaf)
return
Destination dest = e.endpoint.destination
Host host = hosts.get(dest)
if (host == null) {
host = new Host(dest, settings.hostClearInterval)
hosts.put(dest, host)
}
void onConnectionEvent(ConnectionEvent e) {
if (e.leaf)
return
Destination dest = e.endpoint.destination
Host host = hosts.get(dest)
if (host == null) {
host = new Host(dest, settings.hostClearInterval, settings.hostHopelessInterval, settings.hostRejectInterval)
hosts.put(dest, host)
}
switch(e.status) {
case ConnectionAttemptStatus.SUCCESSFUL:
case ConnectionAttemptStatus.REJECTED:
host.onConnect()
break
case ConnectionAttemptStatus.FAILED:
host.onFailure()
break
}
}
switch(e.status) {
case ConnectionAttemptStatus.SUCCESSFUL:
host.onConnect()
break
case ConnectionAttemptStatus.REJECTED:
host.onReject()
break
case ConnectionAttemptStatus.FAILED:
host.onFailure()
break
}
}
List<Destination> getHosts(int n) {
List<Destination> rv = new ArrayList<>(hosts.keySet())
rv.retainAll {allowHost(hosts[it])}
if (rv.size() <= n)
return rv
Collections.shuffle(rv)
rv[0..n-1]
}
List<Destination> getHosts(int n) {
List<Destination> rv = new ArrayList<>(hosts.keySet())
rv.retainAll {allowHost(hosts[it])}
rv.removeAll {
def h = hosts[it];
(h.isFailed() && !h.canTryAgain()) || h.isRecentlyRejected()
}
if (rv.size() <= n)
return rv
Collections.shuffle(rv)
rv[0..n-1]
}
List<Destination> getGoodHosts(int n) {
List<Destination> rv = new ArrayList<>(hosts.keySet())
rv.retainAll {
Host host = hosts[it]
allowHost(host) && host.hasSucceeded()
}
if (rv.size() <= n)
return rv
Collections.shuffle(rv)
rv[0..n-1]
}
List<Destination> getGoodHosts(int n) {
List<Destination> rv = new ArrayList<>(hosts.keySet())
rv.retainAll {
Host host = hosts[it]
allowHost(host) && host.hasSucceeded()
}
if (rv.size() <= n)
return rv
Collections.shuffle(rv)
rv[0..n-1]
}
void load() {
if (storage.exists()) {
JsonSlurper slurper = new JsonSlurper()
storage.eachLine {
def entry = slurper.parseText(it)
Destination dest = new Destination(entry.destination)
Host host = new Host(dest, settings.hostClearInterval)
host.failures = Integer.valueOf(String.valueOf(entry.failures))
host.successes = Integer.valueOf(String.valueOf(entry.successes))
int countFailingHosts() {
List<Destination> rv = new ArrayList<>(hosts.keySet())
rv.retainAll {
hosts[it].isFailed()
}
rv.size()
}
int countHopelessHosts() {
List<Destination> rv = new ArrayList<>(hosts.keySet())
rv.retainAll {
hosts[it].isHopeless()
}
rv.size()
}
void load() {
if (storage.exists()) {
JsonSlurper slurper = new JsonSlurper()
storage.eachLine {
def entry = slurper.parseText(it)
Destination dest = new Destination(entry.destination)
Host host = new Host(dest, settings.hostClearInterval, settings.hostHopelessInterval, settings.hostRejectInterval)
host.failures = Integer.valueOf(String.valueOf(entry.failures))
host.successes = Integer.valueOf(String.valueOf(entry.successes))
if (entry.lastAttempt != null)
host.lastAttempt = entry.lastAttempt
if (allowHost(host))
hosts.put(dest, host)
}
}
timer.schedule({save()} as TimerTask, interval, interval)
loaded = true
}
if (entry.lastSuccessfulAttempt != null)
host.lastSuccessfulAttempt = entry.lastSuccessfulAttempt
if (entry.lastRejection != null)
host.lastRejection = entry.lastRejection
if (allowHost(host))
hosts.put(dest, host)
}
}
timer.schedule({save()} as TimerTask, interval, interval)
loaded = true
}
private boolean allowHost(Host host) {
if (host.isFailed() && !host.canTryAgain())
return false
if (host.destination == myself)
return false
TrustLevel trust = trustService.getLevel(host.destination)
switch(trust) {
case TrustLevel.DISTRUSTED :
return false
case TrustLevel.TRUSTED :
return true
case TrustLevel.NEUTRAL :
return settings.allowUntrusted()
}
false
}
private boolean allowHost(Host host) {
if (host.destination == myself)
return false
TrustLevel trust = trustService.getLevel(host.destination)
switch(trust) {
case TrustLevel.DISTRUSTED :
return false
case TrustLevel.TRUSTED :
return true
case TrustLevel.NEUTRAL :
return settings.allowUntrusted()
}
false
}
private void save() {
storage.delete()
storage.withPrintWriter { writer ->
hosts.each { dest, host ->
if (allowHost(host)) {
def map = [:]
map.destination = dest.toBase64()
map.failures = host.failures
map.successes = host.successes
private void save() {
storage.delete()
storage.withPrintWriter { writer ->
hosts.each { dest, host ->
if (allowHost(host) && !host.isHopeless()) {
def map = [:]
map.destination = dest.toBase64()
map.failures = host.failures
map.successes = host.successes
map.lastAttempt = host.lastAttempt
def json = JsonOutput.toJson(map)
writer.println json
}
}
}
}
map.lastSuccessfulAttempt = host.lastSuccessfulAttempt
map.lastRejection = host.lastRejection
def json = JsonOutput.toJson(map)
writer.println json
}
}
}
}
}

View File

@@ -6,11 +6,11 @@ import net.i2p.data.Destination
class HostDiscoveredEvent extends Event {
Destination destination
Destination destination
boolean fromHostcache
@Override
public String toString() {
"HostDiscoveredEvent ${super.toString()} destination:${destination.toBase32()} from hostcache $fromHostcache"
}
@Override
public String toString() {
"HostDiscoveredEvent ${super.toString()} destination:${destination.toBase32()} from hostcache $fromHostcache"
}
}

View File

@@ -33,11 +33,12 @@ class MeshManager {
meshes.get(infoHash)
}
Mesh getOrCreate(InfoHash infoHash, int nPieces) {
Mesh getOrCreate(InfoHash infoHash, int nPieces, boolean sequential) {
synchronized(meshes) {
if (meshes.containsKey(infoHash))
return meshes.get(infoHash)
Pieces pieces = new Pieces(nPieces, settings.downloadSequentialRatio)
float ratio = sequential ? 0f : settings.downloadSequentialRatio
Pieces pieces = new Pieces(nPieces, ratio)
if (fileManager.rootToFiles.containsKey(infoHash)) {
for (int i = 0; i < nPieces; i++)
pieces.markDownloaded(i)

View File

@@ -0,0 +1,87 @@
package com.muwire.core.search
import com.muwire.core.Constants
import com.muwire.core.EventBus
import com.muwire.core.connection.Endpoint
import com.muwire.core.connection.I2PConnector
import com.muwire.core.util.DataUtil
import groovy.json.JsonSlurper
import groovy.util.logging.Log
import java.nio.charset.StandardCharsets
import java.util.concurrent.Executor
import java.util.concurrent.Executors
import java.util.logging.Level
import java.util.zip.GZIPInputStream
@Log
class BrowseManager {
private final I2PConnector connector
private final EventBus eventBus
private final Executor browserThread = Executors.newSingleThreadExecutor()
BrowseManager(I2PConnector connector, EventBus eventBus) {
this.connector = connector
this.eventBus = eventBus
}
void onUIBrowseEvent(UIBrowseEvent e) {
browserThread.execute({
Endpoint endpoint = null
try {
eventBus.publish(new BrowseStatusEvent(status : BrowseStatus.CONNECTING))
endpoint = connector.connect(e.host.destination)
OutputStream os = endpoint.getOutputStream()
os.write("BROWSE\r\n\r\n".getBytes(StandardCharsets.US_ASCII))
InputStream is = endpoint.getInputStream()
String code = DataUtil.readTillRN(is)
if (!code.startsWith("200"))
throw new IOException("Invalid code $code")
// parse all headers
Map<String,String> headers = new HashMap<>()
String header
while((header = DataUtil.readTillRN(is)) != "" && headers.size() < Constants.MAX_HEADERS) {
int colon = header.indexOf(':')
if (colon == -1 || colon == header.length() - 1)
throw new IOException("invalid header $header")
String key = header.substring(0, colon)
String value = header.substring(colon + 1)
headers[key] = value.trim()
}
if (!headers.containsKey("Count"))
throw new IOException("No count header")
int results = Integer.parseInt(headers['Count'])
// at this stage, start pulling the results
eventBus.publish(new BrowseStatusEvent(status : BrowseStatus.FETCHING, totalResults : results))
JsonSlurper slurper = new JsonSlurper()
DataInputStream dis = new DataInputStream(new GZIPInputStream(is))
UUID uuid = UUID.randomUUID()
for (int i = 0; i < results; i++) {
int size = dis.readUnsignedShort()
byte [] tmp = new byte[size]
dis.readFully(tmp)
def json = slurper.parse(tmp)
UIResultEvent result = ResultsParser.parse(e.host, uuid, json)
eventBus.publish(result)
}
eventBus.publish(new BrowseStatusEvent(status : BrowseStatus.FINISHED))
} catch (Exception bad) {
log.log(Level.WARNING, "browse failed", bad)
eventBus.publish(new BrowseStatusEvent(status : BrowseStatus.FAILED))
} finally {
endpoint?.close()
}
} as Runnable)
}
}

View File

@@ -0,0 +1,5 @@
package com.muwire.core.search;
public enum BrowseStatus {
CONNECTING, FETCHING, FINISHED, FAILED
}

View File

@@ -0,0 +1,8 @@
package com.muwire.core.search
import com.muwire.core.Event
class BrowseStatusEvent extends Event {
BrowseStatus status
int totalResults
}

View File

@@ -6,11 +6,11 @@ import net.i2p.data.Base32
import net.i2p.data.Destination
class DeleteEvent extends Event {
byte [] infoHash
Destination leaf
byte [] infoHash
Destination leaf
@Override
public String toString() {
"DeleteEvent ${super.toString()} infoHash:${Base32.encode(infoHash)} leaf:${leaf.toBase32()}"
}
@Override
public String toString() {
"DeleteEvent ${super.toString()} infoHash:${Base32.encode(infoHash)} leaf:${leaf.toBase32()}"
}
}

View File

@@ -7,32 +7,32 @@ import net.i2p.data.Destination
class LeafSearcher {
final UltrapeerConnectionManager connectionManager
final SearchIndex searchIndex = new SearchIndex()
final UltrapeerConnectionManager connectionManager
final SearchIndex searchIndex = new SearchIndex()
final Map<String, Set<byte[]>> fileNameToHashes = new HashMap<>()
final Map<byte[], Set<Destination>> hashToLeafs = new HashMap<>()
final Map<String, Set<byte[]>> fileNameToHashes = new HashMap<>()
final Map<byte[], Set<Destination>> hashToLeafs = new HashMap<>()
final Map<Destination, Map<byte[], Set<String>>> leafToFiles = new HashMap<>()
final Map<Destination, Map<byte[], Set<String>>> leafToFiles = new HashMap<>()
LeafSearcher(UltrapeerConnectionManager connectionManager) {
this.connectionManager = connectionManager
}
LeafSearcher(UltrapeerConnectionManager connectionManager) {
this.connectionManager = connectionManager
}
void onUpsertEvent(UpsertEvent e) {
// TODO: implement
}
void onUpsertEvent(UpsertEvent e) {
// TODO: implement
}
void onDeleteEvent(DeleteEvent e) {
// TODO: implement
}
void onDeleteEvent(DeleteEvent e) {
// TODO: implement
}
void onDisconnectionEvent(DisconnectionEvent e) {
// TODO: implement
}
void onDisconnectionEvent(DisconnectionEvent e) {
// TODO: implement
}
void onQueryEvent(QueryEvent e) {
// TODO: implement
}
void onQueryEvent(QueryEvent e) {
// TODO: implement
}
}

View File

@@ -8,10 +8,10 @@ import net.i2p.data.Destination
class QueryEvent extends Event {
SearchEvent searchEvent
boolean firstHop
Destination replyTo
boolean firstHop
Destination replyTo
Persona originator
Destination receivedOn
Destination receivedOn
String toString() {
"searchEvent: $searchEvent firstHop:$firstHop, replyTo:${replyTo.toBase32()}" +

View File

@@ -6,6 +6,6 @@ import com.muwire.core.SharedFile
class ResultsEvent extends Event {
SearchEvent searchEvent
SharedFile[] results
UUID uuid
SharedFile[] results
UUID uuid
}

View File

@@ -91,12 +91,22 @@ class ResultsParser {
if (json.sources != null)
sources = json.sources.stream().map({new Destination(it)}).collect(Collectors.toSet())
String comment = null
if (json.comment != null)
comment = DataUtil.readi18nString(Base64.decode(json.comment))
boolean browse = false
if (json.browse != null)
browse = json.browse
return new UIResultEvent( sender : p,
name : name,
size : size,
infohash : new InfoHash(infoHash),
pieceSize : pieceSize,
sources : sources,
comment : comment,
browse : browse,
uuid: uuid)
} catch (Exception e) {
throw new InvalidSearchResultException("parsing search result failed",e)

View File

@@ -4,6 +4,7 @@ import com.muwire.core.SharedFile
import com.muwire.core.connection.Endpoint
import com.muwire.core.connection.I2PConnector
import com.muwire.core.files.FileHasher
import com.muwire.core.util.DataUtil
import com.muwire.core.Persona
import java.nio.charset.StandardCharsets
@@ -13,10 +14,12 @@ import java.util.concurrent.ThreadFactory
import java.util.concurrent.atomic.AtomicInteger
import java.util.logging.Level
import java.util.stream.Collectors
import java.util.zip.GZIPOutputStream
import com.muwire.core.DownloadedFile
import com.muwire.core.EventBus
import com.muwire.core.InfoHash
import com.muwire.core.MuWireSettings
import groovy.json.JsonOutput
import groovy.util.logging.Log
@@ -42,16 +45,19 @@ class ResultsSender {
private final I2PConnector connector
private final Persona me
private final EventBus eventBus
private final MuWireSettings settings
ResultsSender(EventBus eventBus, I2PConnector connector, Persona me) {
ResultsSender(EventBus eventBus, I2PConnector connector, Persona me, MuWireSettings settings) {
this.connector = connector;
this.eventBus = eventBus
this.me = me
this.settings = settings
}
void sendResults(UUID uuid, SharedFile[] results, Destination target, boolean oobInfohash) {
void sendResults(UUID uuid, SharedFile[] results, Destination target, boolean oobInfohash, boolean compressedResults) {
log.info("Sending $results.length results for uuid $uuid to ${target.toBase32()} oobInfohash : $oobInfohash")
if (target.equals(me.destination)) {
def uiResultEvents = []
results.each {
long length = it.getFile().length()
int pieceSize = it.getPieceSize()
@@ -60,19 +66,25 @@ class ResultsSender {
Set<Destination> suggested = Collections.emptySet()
if (it instanceof DownloadedFile)
suggested = it.sources
def comment = null
if (it.getComment() != null) {
comment = DataUtil.readi18nString(Base64.decode(it.getComment()))
}
def uiResultEvent = new UIResultEvent( sender : me,
name : it.getFile().getName(),
size : length,
infohash : it.getInfoHash(),
pieceSize : pieceSize,
uuid : uuid,
sources : suggested
sources : suggested,
comment : comment
)
eventBus.publish(uiResultEvent)
uiResultEvents << uiResultEvent
}
eventBus.publish(new UIResultBatchEvent(uuid : uuid, results : uiResultEvents))
} else {
executor.execute(new ResultSendJob(uuid : uuid, results : results,
target: target, oobInfohash : oobInfohash))
target: target, oobInfohash : oobInfohash, compressedResults : compressedResults))
}
}
@@ -81,58 +93,79 @@ class ResultsSender {
SharedFile [] results
Destination target
boolean oobInfohash
boolean compressedResults
@Override
public void run() {
try {
byte [] tmp = new byte[InfoHash.SIZE]
JsonOutput jsonOutput = new JsonOutput()
Endpoint endpoint = null;
try {
endpoint = connector.connect(target)
DataOutputStream os = new DataOutputStream(endpoint.getOutputStream())
os.write("POST $uuid\r\n\r\n".getBytes(StandardCharsets.US_ASCII))
me.write(os)
os.writeShort((short)results.length)
results.each {
byte [] name = it.getFile().getName().getBytes(StandardCharsets.UTF_8)
def baos = new ByteArrayOutputStream()
def daos = new DataOutputStream(baos)
daos.writeShort((short) name.length)
daos.write(name)
daos.flush()
String encodedName = Base64.encode(baos.toByteArray())
def obj = [:]
obj.type = "Result"
obj.version = oobInfohash ? 2 : 1
obj.name = encodedName
obj.infohash = Base64.encode(it.getInfoHash().getRoot())
obj.size = it.getFile().length()
obj.pieceSize = it.getPieceSize()
if (!oobInfohash) {
byte [] hashList = it.getInfoHash().getHashList()
def hashListB64 = []
for (int i = 0; i < hashList.length / InfoHash.SIZE; i++) {
System.arraycopy(hashList, InfoHash.SIZE * i, tmp, 0, InfoHash.SIZE)
hashListB64 << Base64.encode(tmp)
}
obj.hashList = hashListB64
if (!compressedResults) {
try {
endpoint = connector.connect(target)
DataOutputStream os = new DataOutputStream(endpoint.getOutputStream())
os.write("POST $uuid\r\n\r\n".getBytes(StandardCharsets.US_ASCII))
me.write(os)
os.writeShort((short)results.length)
results.each {
def obj = sharedFileToObj(it, settings.browseFiles)
def json = jsonOutput.toJson(obj)
os.writeShort((short)json.length())
os.write(json.getBytes(StandardCharsets.US_ASCII))
}
if (it instanceof DownloadedFile)
obj.sources = it.sources.stream().map({dest -> dest.toBase64()}).collect(Collectors.toSet())
def json = jsonOutput.toJson(obj)
os.writeShort((short)json.length())
os.write(json.getBytes(StandardCharsets.US_ASCII))
os.flush()
} finally {
endpoint?.close()
}
} else {
try {
endpoint = connector.connect(target)
OutputStream os = endpoint.getOutputStream()
os.write("RESULTS $uuid\r\n".getBytes(StandardCharsets.US_ASCII))
os.write("Sender: ${me.toBase64()}\r\n".getBytes(StandardCharsets.US_ASCII))
os.write("Count: $results.length\r\n".getBytes(StandardCharsets.US_ASCII))
os.write("\r\n".getBytes(StandardCharsets.US_ASCII))
DataOutputStream dos = new DataOutputStream(new GZIPOutputStream(os))
results.each {
def obj = sharedFileToObj(it, settings.browseFiles)
def json = jsonOutput.toJson(obj)
dos.writeShort((short)json.length())
dos.write(json.getBytes(StandardCharsets.US_ASCII))
}
dos.close()
} finally {
endpoint?.close()
}
os.flush()
} finally {
endpoint?.close()
}
} catch (Exception e) {
log.log(Level.WARNING, "problem sending results",e)
}
}
}
public static def sharedFileToObj(SharedFile sf, boolean browseFiles) {
byte [] name = sf.getFile().getName().getBytes(StandardCharsets.UTF_8)
def baos = new ByteArrayOutputStream()
def daos = new DataOutputStream(baos)
daos.writeShort((short) name.length)
daos.write(name)
daos.flush()
String encodedName = Base64.encode(baos.toByteArray())
def obj = [:]
obj.type = "Result"
obj.version = 2
obj.name = encodedName
obj.infohash = Base64.encode(sf.getInfoHash().getRoot())
obj.size = sf.getCachedLength()
obj.pieceSize = sf.getPieceSize()
if (sf instanceof DownloadedFile)
obj.sources = sf.sources.stream().map({dest -> dest.toBase64()}).collect(Collectors.toSet())
if (sf.getComment() != null)
obj.comment = sf.getComment()
obj.browse = browseFiles
obj
}
}

View File

@@ -5,15 +5,17 @@ import com.muwire.core.InfoHash
class SearchEvent extends Event {
List<String> searchTerms
byte [] searchHash
UUID uuid
List<String> searchTerms
byte [] searchHash
UUID uuid
boolean oobInfohash
boolean searchComments
boolean compressedResults
String toString() {
def infoHash = null
if (searchHash != null)
infoHash = new InfoHash(searchHash)
"searchTerms: $searchTerms searchHash:$infoHash, uuid:$uuid oobInfohash:$oobInfohash"
"searchTerms: $searchTerms searchHash:$infoHash, uuid:$uuid oobInfohash:$oobInfohash searchComments:$searchComments compressedResults:$compressedResults"
}
}

View File

@@ -1,59 +1,59 @@
package com.muwire.core.search
import com.muwire.core.Constants
import com.muwire.core.SplitPattern
class SearchIndex {
final Map<String, Set<String>> keywords = new HashMap<>()
final Map<String, Set<String>> keywords = new HashMap<>()
void add(String string) {
String [] split = split(string)
split.each {
Set<String> existing = keywords.get(it)
if (existing == null) {
existing = new HashSet<>()
keywords.put(it, existing)
}
existing.add(string)
}
}
void add(String string) {
String [] split = split(string)
split.each {
Set<String> existing = keywords.get(it)
if (existing == null) {
existing = new HashSet<>()
keywords.put(it, existing)
}
existing.add(string)
}
}
void remove(String string) {
String [] split = split(string)
split.each {
Set<String> existing = keywords.get it
if (existing != null) {
existing.remove(string)
if (existing.isEmpty()) {
keywords.remove(it)
}
}
}
}
void remove(String string) {
String [] split = split(string)
split.each {
Set<String> existing = keywords.get it
if (existing != null) {
existing.remove(string)
if (existing.isEmpty()) {
keywords.remove(it)
}
}
}
}
private static String[] split(String source) {
source = source.replaceAll(Constants.SPLIT_PATTERN, " ").toLowerCase()
String [] split = source.split(" ")
private static String[] split(String source) {
source = source.replaceAll(SplitPattern.SPLIT_PATTERN, " ").toLowerCase()
String [] split = source.split(" ")
def rv = []
split.each { if (it.length() > 0) rv << it }
rv.toArray(new String[0])
}
}
String[] search(List<String> terms) {
Set<String> rv = null;
String[] search(List<String> terms) {
Set<String> rv = null;
terms.each {
Set<String> forWord = keywords.getOrDefault(it,[])
if (rv == null) {
rv = new HashSet<>(forWord)
} else {
rv.retainAll(forWord)
}
terms.each {
Set<String> forWord = keywords.getOrDefault(it,[])
if (rv == null) {
rv = new HashSet<>(forWord)
} else {
rv.retainAll(forWord)
}
}
}
if (rv != null)
return rv.asList()
[]
}
if (rv != null)
return rv.asList()
[]
}
}

View File

@@ -44,7 +44,7 @@ public class SearchManager {
log.info("No results for search uuid $event.uuid")
return
}
resultsSender.sendResults(event.uuid, event.results, target, event.searchEvent.oobInfohash)
resultsSender.sendResults(event.uuid, event.results, target, event.searchEvent.oobInfohash, event.searchEvent.compressedResults)
}
boolean hasLocalSearch(UUID uuid) {

View File

@@ -0,0 +1,8 @@
package com.muwire.core.search
import com.muwire.core.Event
import com.muwire.core.Persona
class UIBrowseEvent extends Event {
Persona host
}

View File

@@ -14,6 +14,8 @@ class UIResultEvent extends Event {
long size
InfoHash infohash
int pieceSize
String comment
boolean browse
@Override
public String toString() {

View File

@@ -7,12 +7,12 @@ import net.i2p.data.Destination
class UpsertEvent extends Event {
Set<String> names
byte [] infoHash
Destination leaf
Set<String> names
byte [] infoHash
Destination leaf
@Override
public String toString() {
"UpsertEvent ${super.toString()} names:$names infoHash:${Base32.encode(infoHash)} leaf:${leaf.toBase32()}"
}
@Override
public String toString() {
"UpsertEvent ${super.toString()} names:$names infoHash:${Base32.encode(infoHash)} leaf:${leaf.toBase32()}"
}
}

View File

@@ -0,0 +1,31 @@
package com.muwire.core.trust
import java.util.concurrent.ConcurrentHashMap
import com.muwire.core.Persona
import net.i2p.util.ConcurrentHashSet
class RemoteTrustList {
public enum Status { NEW, UPDATING, UPDATED, UPDATE_FAILED }
private final Persona persona
private final Set<Persona> good, bad
volatile long timestamp
volatile boolean forceUpdate
Status status = Status.NEW
RemoteTrustList(Persona persona) {
this.persona = persona
good = new ConcurrentHashSet<>()
bad = new ConcurrentHashSet<>()
}
@Override
public boolean equals(Object o) {
if (!(o instanceof RemoteTrustList))
return false
RemoteTrustList other = (RemoteTrustList)o
persona == other.persona
}
}

View File

@@ -5,6 +5,6 @@ import com.muwire.core.Persona
class TrustEvent extends Event {
Persona persona
TrustLevel level
Persona persona
TrustLevel level
}

View File

@@ -11,87 +11,87 @@ import net.i2p.util.ConcurrentHashSet
class TrustService extends Service {
final File persistGood, persistBad
final long persistInterval
final File persistGood, persistBad
final long persistInterval
final Map<Destination, Persona> good = new ConcurrentHashMap<>()
final Map<Destination, Persona> bad = new ConcurrentHashMap<>()
final Map<Destination, Persona> good = new ConcurrentHashMap<>()
final Map<Destination, Persona> bad = new ConcurrentHashMap<>()
final Timer timer
final Timer timer
TrustService() {}
TrustService() {}
TrustService(File persistGood, File persistBad, long persistInterval) {
this.persistBad = persistBad
this.persistGood = persistGood
this.persistInterval = persistInterval
this.timer = new Timer("trust-persister",true)
}
TrustService(File persistGood, File persistBad, long persistInterval) {
this.persistBad = persistBad
this.persistGood = persistGood
this.persistInterval = persistInterval
this.timer = new Timer("trust-persister",true)
}
void start() {
timer.schedule({load()} as TimerTask, 1)
}
void start() {
timer.schedule({load()} as TimerTask, 1)
}
void stop() {
timer.cancel()
}
void stop() {
timer.cancel()
}
void load() {
if (persistGood.exists()) {
persistGood.eachLine {
void load() {
if (persistGood.exists()) {
persistGood.eachLine {
byte [] decoded = Base64.decode(it)
Persona persona = new Persona(new ByteArrayInputStream(decoded))
good.put(persona.destination, persona)
}
}
if (persistBad.exists()) {
persistBad.eachLine {
good.put(persona.destination, persona)
}
}
if (persistBad.exists()) {
persistBad.eachLine {
byte [] decoded = Base64.decode(it)
Persona persona = new Persona(new ByteArrayInputStream(decoded))
bad.put(persona.destination, persona)
}
}
timer.schedule({persist()} as TimerTask, persistInterval, persistInterval)
loaded = true
}
}
}
timer.schedule({persist()} as TimerTask, persistInterval, persistInterval)
loaded = true
}
private void persist() {
persistGood.delete()
persistGood.withPrintWriter { writer ->
good.each {k,v ->
writer.println v.toBase64()
}
}
persistBad.delete()
persistBad.withPrintWriter { writer ->
bad.each { k,v ->
writer.println v.toBase64()
}
}
}
private void persist() {
persistGood.delete()
persistGood.withPrintWriter { writer ->
good.each {k,v ->
writer.println v.toBase64()
}
}
persistBad.delete()
persistBad.withPrintWriter { writer ->
bad.each { k,v ->
writer.println v.toBase64()
}
}
}
TrustLevel getLevel(Destination dest) {
if (good.containsKey(dest))
return TrustLevel.TRUSTED
else if (bad.containsKey(dest))
return TrustLevel.DISTRUSTED
TrustLevel.NEUTRAL
}
TrustLevel getLevel(Destination dest) {
if (good.containsKey(dest))
return TrustLevel.TRUSTED
else if (bad.containsKey(dest))
return TrustLevel.DISTRUSTED
TrustLevel.NEUTRAL
}
void onTrustEvent(TrustEvent e) {
switch(e.level) {
case TrustLevel.TRUSTED:
bad.remove(e.persona.destination)
good.put(e.persona.destination, e.persona)
break
case TrustLevel.DISTRUSTED:
good.remove(e.persona.destination)
bad.put(e.persona.destination, e.persona)
break
case TrustLevel.NEUTRAL:
good.remove(e.persona.destination)
bad.remove(e.persona.destination)
break
}
}
void onTrustEvent(TrustEvent e) {
switch(e.level) {
case TrustLevel.TRUSTED:
bad.remove(e.persona.destination)
good.put(e.persona.destination, e.persona)
break
case TrustLevel.DISTRUSTED:
good.remove(e.persona.destination)
bad.put(e.persona.destination, e.persona)
break
case TrustLevel.NEUTRAL:
good.remove(e.persona.destination)
bad.remove(e.persona.destination)
break
}
}
}

View File

@@ -0,0 +1,161 @@
package com.muwire.core.trust
import java.nio.charset.StandardCharsets
import java.util.concurrent.ConcurrentHashMap
import java.util.concurrent.ExecutorService
import java.util.concurrent.Executors
import java.util.logging.Level
import com.muwire.core.EventBus
import com.muwire.core.MuWireSettings
import com.muwire.core.Persona
import com.muwire.core.UILoadedEvent
import com.muwire.core.connection.Endpoint
import com.muwire.core.connection.I2PConnector
import com.muwire.core.util.DataUtil
import groovy.util.logging.Log
import net.i2p.data.Destination
@Log
class TrustSubscriber {
private final EventBus eventBus
private final I2PConnector i2pConnector
private final MuWireSettings settings
private final Map<Destination, RemoteTrustList> remoteTrustLists = new ConcurrentHashMap<>()
private final Object waitLock = new Object()
private volatile boolean shutdown
private volatile Thread thread
private final ExecutorService updateThreads = Executors.newCachedThreadPool()
TrustSubscriber(EventBus eventBus, I2PConnector i2pConnector, MuWireSettings settings) {
this.eventBus = eventBus
this.i2pConnector = i2pConnector
this.settings = settings
}
void onUILoadedEvent(UILoadedEvent e) {
thread = new Thread({checkLoop()} as Runnable, "trust-subscriber")
thread.setDaemon(true)
thread.start()
}
void stop() {
shutdown = true
thread?.interrupt()
updateThreads.shutdownNow()
}
void onTrustSubscriptionEvent(TrustSubscriptionEvent e) {
if (!e.subscribe) {
remoteTrustLists.remove(e.persona.destination)
} else {
RemoteTrustList trustList = remoteTrustLists.putIfAbsent(e.persona.destination, new RemoteTrustList(e.persona))
trustList?.forceUpdate = true
synchronized(waitLock) {
waitLock.notify()
}
}
}
private void checkLoop() {
try {
while(!shutdown) {
synchronized(waitLock) {
waitLock.wait(60 * 1000)
}
final long now = System.currentTimeMillis()
remoteTrustLists.values().each { trustList ->
if (trustList.status == RemoteTrustList.Status.UPDATING)
return
if (!trustList.forceUpdate &&
now - trustList.timestamp < settings.trustListInterval * 60 * 60 * 1000)
return
trustList.forceUpdate = false
updateThreads.submit(new UpdateJob(trustList))
}
}
} catch (InterruptedException e) {
if (!shutdown)
throw e
}
}
private class UpdateJob implements Runnable {
private final RemoteTrustList trustList
UpdateJob(RemoteTrustList trustList) {
this.trustList = trustList
}
public void run() {
trustList.status = RemoteTrustList.Status.UPDATING
eventBus.publish(new TrustSubscriptionUpdatedEvent(trustList : trustList))
if (check(trustList, System.currentTimeMillis()))
trustList.status = RemoteTrustList.Status.UPDATED
else
trustList.status = RemoteTrustList.Status.UPDATE_FAILED
eventBus.publish(new TrustSubscriptionUpdatedEvent(trustList : trustList))
}
}
private boolean check(RemoteTrustList trustList, long now) {
log.info("fetching trust list from ${trustList.persona.getHumanReadableName()}")
Endpoint endpoint = null
try {
endpoint = i2pConnector.connect(trustList.persona.destination)
OutputStream os = endpoint.getOutputStream()
InputStream is = endpoint.getInputStream()
os.write("TRUST\r\n\r\n".getBytes(StandardCharsets.US_ASCII))
os.flush()
String codeString = DataUtil.readTillRN(is)
int space = codeString.indexOf(' ')
if (space > 0)
codeString = codeString.substring(0,space)
int code = Integer.parseInt(codeString.trim())
if (code != 200) {
log.info("couldn't fetch trust list, code $code")
return false
}
// swallow any headers
String header
while (( header = DataUtil.readTillRN(is)) != "");
DataInputStream dis = new DataInputStream(is)
Set<Persona> good = new HashSet<>()
int nGood = dis.readUnsignedShort()
for (int i = 0; i < nGood; i++) {
Persona p = new Persona(dis)
good.add(p)
}
Set<Persona> bad = new HashSet<>()
int nBad = dis.readUnsignedShort()
for (int i = 0; i < nBad; i++) {
Persona p = new Persona(dis)
bad.add(p)
}
trustList.timestamp = now
trustList.good.clear()
trustList.good.addAll(good)
trustList.bad.clear()
trustList.bad.addAll(bad)
return true
} catch (Exception e) {
log.log(Level.WARNING,"exception fetching trust list from ${trustList.persona.getHumanReadableName()}",e)
return false
} finally {
endpoint?.close()
}
}
}

View File

@@ -0,0 +1,9 @@
package com.muwire.core.trust
import com.muwire.core.Event
import com.muwire.core.Persona
class TrustSubscriptionEvent extends Event {
Persona persona
boolean subscribe
}

View File

@@ -0,0 +1,7 @@
package com.muwire.core.trust
import com.muwire.core.Event
class TrustSubscriptionUpdatedEvent extends Event {
RemoteTrustList trustList
}

View File

@@ -3,5 +3,5 @@ package com.muwire.core.update
import net.i2p.data.Destination
class UpdateServers {
static final Destination UPDATE_SERVER = new Destination("pSWieSRB3czCl3Zz4WpKp4Z8tjv-05zbogRDS7SEnKcSdWOupVwjzQ92GsgQh1VqgoSRk1F8dpZOnHxxz5HFy9D7ri0uFdkMyXdSKoB7IgkkvCfTAyEmeaPwSYnurF3Zk7u286E7YG2rZkQZgJ77tow7ZS0mxFB7Z0Ti-VkZ9~GeGePW~howwNm4iSQACZA0DyTpI8iv5j4I0itPCQRgaGziob~Vfvjk49nd8N4jtaDGo9cEcafikVzQ2OgBgYWL6LRbrrItwuGqsDvITUHWaElUYIDhRQYUq8gYiUA6rwAJputfhFU0J7lIxFR9vVY7YzRvcFckfr0DNI4VQVVlPnRPkUxQa--BlldMaCIppWugjgKLwqiSiHywKpSMlBWgY2z1ry4ueEBo1WEP-mEf88wRk4cFQBCKtctCQnIG2GsnATqTl-VGUAsuzeNWZiFSwXiTy~gQ094yWx-K06fFZUDt4CMiLZVhGlixiInD~34FCRC9LVMtFcqiFB2M-Ql2AAAA")
static final Destination UPDATE_SERVER = new Destination("VJYAiCPZHNLraWvLkeRLxRiT4PHAqNqRO1nH240r7u1noBw8Pa~-lJOhKR7CccPkEN8ejSi4H6XjqKYLC8BKLVLeOgnAbedUVx81MV7DETPDdPEGV4RVu6YDFri7-tJOeqauGHxtlXT44YWuR69xKrTG3u4~iTWgxKnlBDht9Q3aVpSPFD2KqEizfVxolqXI0zmAZ2xMi8jfl0oe4GbgHrD9hR2FYj6yKfdqcUgHVobY4kDdJt-u31QqwWdsQMEj8Y3tR2XcNaITEVPiAjoKgBrYwB4jddWPNaT4XdHz76d9p9Iqes7dhOKq3OKpk6kg-bfIKiEOiA1mY49fn5h8pNShTqV7QBhh4CE4EDT3Szl~WsLdrlHUKJufSi7erEMh3coF7HORpF1wah2Xw7q470t~b8dKGKi7N7xQsqhGruDm66PH9oE9Kt9WBVBq2zORdPRtRM61I7EnrwDlbOkL0y~XpvQ3JKUQKdBQ3QsOJt8CHlhHHXMMbvqhntR61RSDBQAEAAcAAA==")
}

View File

@@ -83,7 +83,7 @@ class ContentUploader extends Uploader {
String xHave = DataUtil.encodeXHave(mesh.pieces.getDownloaded(), mesh.pieces.nPieces)
endpoint.getOutputStream().write("X-Have: $xHave\r\n".getBytes(StandardCharsets.US_ASCII))
Set<Persona> sources = mesh.getRandom(3, toExclude)
Set<Persona> sources = mesh.getRandom(9, toExclude)
if (!sources.isEmpty()) {
String xAlts = sources.stream().map({ it.toBase64() }).collect(Collectors.joining(","))
endpoint.getOutputStream().write("X-Alt: $xAlts\r\n".getBytes(StandardCharsets.US_ASCII))
@@ -119,4 +119,8 @@ class ContentUploader extends Uploader {
return mesh.pieces.nPieces;
}
@Override
public long getTotalSize() {
return file.length();
}
}

View File

@@ -61,5 +61,8 @@ class HashListUploader extends Uploader {
return 1;
}
@Override
public long getTotalSize() {
return -1;
}
}

View File

@@ -92,7 +92,7 @@ public class UploadManager {
pieceSize = downloader.pieceSizePow2
} else {
SharedFile sharedFile = sharedFiles.iterator().next();
mesh = meshManager.getOrCreate(request.infoHash, sharedFile.NPieces)
mesh = meshManager.getOrCreate(request.infoHash, sharedFile.NPieces, false)
file = sharedFile.file
pieceSize = sharedFile.pieceSize
}
@@ -217,7 +217,7 @@ public class UploadManager {
pieceSize = downloader.pieceSizePow2
} else {
SharedFile sharedFile = sharedFiles.iterator().next();
mesh = meshManager.getOrCreate(request.infoHash, sharedFile.NPieces)
mesh = meshManager.getOrCreate(request.infoHash, sharedFile.NPieces, false)
file = sharedFile.file
pieceSize = sharedFile.pieceSize
}

View File

@@ -35,5 +35,7 @@ abstract class Uploader {
abstract int getDonePieces();
abstract int getTotalPieces()
abstract int getTotalPieces();
abstract long getTotalSize();
}

View File

@@ -1,156 +0,0 @@
package com.muwire.core.util
import java.lang.reflect.Field
import java.lang.reflect.Method
import java.nio.ByteBuffer
import java.nio.charset.StandardCharsets
import com.muwire.core.Constants
import net.i2p.data.Base64
class DataUtil {
private final static int MAX_SHORT = (0x1 << 16) - 1
static void writeUnsignedShort(int value, OutputStream os) {
if (value > MAX_SHORT || value < 0)
throw new IllegalArgumentException("$value invalid")
byte lsb = (byte) (value & 0xFF)
byte msb = (byte) (value >> 8)
os.write(msb)
os.write(lsb)
}
private final static int MAX_HEADER = 0x7FFFFF
static void packHeader(int length, byte [] header) {
if (header.length != 3)
throw new IllegalArgumentException("header length $header.length")
if (length < 0 || length > MAX_HEADER)
throw new IllegalArgumentException("length $length")
header[2] = (byte) (length & 0xFF)
header[1] = (byte) ((length >> 8) & 0xFF)
header[0] = (byte) ((length >> 16) & 0x7F)
}
static int readLength(byte [] header) {
if (header.length != 3)
throw new IllegalArgumentException("header length $header.length")
return (((int)(header[0] & 0x7F)) << 16) |
(((int)(header[1] & 0xFF) << 8)) |
((int)header[2] & 0xFF)
}
static String readi18nString(byte [] encoded) {
if (encoded.length < 2)
throw new IllegalArgumentException("encoding too short $encoded.length")
int length = ((encoded[0] & 0xFF) << 8) | (encoded[1] & 0xFF)
if (encoded.length != length + 2)
throw new IllegalArgumentException("encoding doesn't match length, expected $length found $encoded.length")
byte [] string = new byte[length]
System.arraycopy(encoded, 2, string, 0, length)
new String(string, StandardCharsets.UTF_8)
}
static byte[] encodei18nString(String string) {
byte [] utf8 = string.getBytes(StandardCharsets.UTF_8)
if (utf8.length > Short.MAX_VALUE)
throw new IllegalArgumentException("String in utf8 too long $utf8.length")
def baos = new ByteArrayOutputStream()
def daos = new DataOutputStream(baos)
daos.writeShort((short) utf8.length)
daos.write(utf8)
daos.close()
baos.toByteArray()
}
public static String readTillRN(InputStream is) {
def baos = new ByteArrayOutputStream()
while(baos.size() < (Constants.MAX_HEADER_SIZE)) {
byte read = is.read()
if (read == -1)
throw new IOException()
if (read == '\r') {
if (is.read() != '\n')
throw new IOException("invalid header")
break
}
baos.write(read)
}
new String(baos.toByteArray(), StandardCharsets.US_ASCII)
}
public static String encodeXHave(List<Integer> pieces, int totalPieces) {
int bytes = totalPieces / 8
if (totalPieces % 8 != 0)
bytes++
byte[] raw = new byte[bytes]
pieces.each {
int byteIdx = it / 8
int offset = it % 8
int mask = 0x80 >>> offset
raw[byteIdx] |= mask
}
Base64.encode(raw)
}
public static List<Integer> decodeXHave(String xHave) {
byte [] availablePieces = Base64.decode(xHave)
List<Integer> available = new ArrayList<>()
availablePieces.eachWithIndex {b, i ->
for (int j = 0; j < 8 ; j++) {
byte mask = 0x80 >>> j
if ((b & mask) == mask) {
available.add(i * 8 + j)
}
}
}
available
}
public static Exception findRoot(Exception e) {
while(e.getCause() != null)
e = e.getCause()
e
}
public static void tryUnmap(ByteBuffer cb) {
if (cb==null || !cb.isDirect()) return;
// we could use this type cast and call functions without reflection code,
// but static import from sun.* package is risky for non-SUN virtual machine.
//try { ((sun.nio.ch.DirectBuffer)cb).cleaner().clean(); } catch (Exception ex) { }
// JavaSpecVer: 1.6, 1.7, 1.8, 9, 10
boolean isOldJDK = System.getProperty("java.specification.version","99").startsWith("1.");
try {
if (isOldJDK) {
Method cleaner = cb.getClass().getMethod("cleaner");
cleaner.setAccessible(true);
Method clean = Class.forName("sun.misc.Cleaner").getMethod("clean");
clean.setAccessible(true);
clean.invoke(cleaner.invoke(cb));
} else {
Class unsafeClass;
try {
unsafeClass = Class.forName("sun.misc.Unsafe");
} catch(Exception ex) {
// jdk.internal.misc.Unsafe doesn't yet have an invokeCleaner() method,
// but that method should be added if sun.misc.Unsafe is removed.
unsafeClass = Class.forName("jdk.internal.misc.Unsafe");
}
Method clean = unsafeClass.getMethod("invokeCleaner", ByteBuffer.class);
clean.setAccessible(true);
Field theUnsafeField = unsafeClass.getDeclaredField("theUnsafe");
theUnsafeField.setAccessible(true);
Object theUnsafe = theUnsafeField.get(null);
clean.invoke(theUnsafe, cb);
}
} catch(Exception ex) { }
cb = null;
}
}

View File

@@ -0,0 +1,13 @@
package com.muwire.core;
import net.i2p.crypto.SigType;
public class Constants {
public static final byte PERSONA_VERSION = (byte)1;
public static final SigType SIG_TYPE = SigType.EdDSA_SHA512_Ed25519;
public static final int MAX_HEADER_SIZE = 0x1 << 14;
public static final int MAX_HEADERS = 16;
public static final int MAX_RESULTS = 0x1 << 16;
}

View File

@@ -8,16 +8,16 @@ import net.i2p.data.Destination;
public class DownloadedFile extends SharedFile {
private final Set<Destination> sources;
private final Set<Destination> sources;
public DownloadedFile(File file, InfoHash infoHash, int pieceSize, Set<Destination> sources)
throws IOException {
super(file, infoHash, pieceSize);
this.sources = sources;
}
public DownloadedFile(File file, InfoHash infoHash, int pieceSize, Set<Destination> sources)
throws IOException {
super(file, infoHash, pieceSize);
this.sources = sources;
}
public Set<Destination> getSources() {
return sources;
}
public Set<Destination> getSources() {
return sources;
}
}

View File

@@ -11,83 +11,83 @@ import net.i2p.data.Base64;
public class InfoHash {
public static final int SIZE = 0x1 << 5;
public static final int SIZE = 0x1 << 5;
private final byte[] root;
private final byte[] hashList;
private final byte[] root;
private final byte[] hashList;
private final int hashCode;
private final int hashCode;
public InfoHash(byte[] root, byte[] hashList) {
if (root.length != SIZE)
throw new IllegalArgumentException("invalid root size "+root.length);
if (hashList != null && hashList.length % SIZE != 0)
throw new IllegalArgumentException("invalid hashList size " + hashList.length);
this.root = root;
this.hashList = hashList;
hashCode = root[0] << 24 |
root[1] << 16 |
root[2] << 8 |
root[3];
}
public InfoHash(byte[] root, byte[] hashList) {
if (root.length != SIZE)
throw new IllegalArgumentException("invalid root size "+root.length);
if (hashList != null && hashList.length % SIZE != 0)
throw new IllegalArgumentException("invalid hashList size " + hashList.length);
this.root = root;
this.hashList = hashList;
hashCode = root[0] << 24 |
root[1] << 16 |
root[2] << 8 |
root[3];
}
public InfoHash(byte[] root) {
this(root, null);
}
public InfoHash(byte[] root) {
this(root, null);
}
public InfoHash(String base32) {
this(Base32.decode(base32));
}
public InfoHash(String base32) {
this(Base32.decode(base32));
}
public static InfoHash fromHashList(byte []hashList) {
try {
MessageDigest sha256 = MessageDigest.getInstance("SHA-256");
byte[] root = sha256.digest(hashList);
return new InfoHash(root, hashList);
} catch (NoSuchAlgorithmException impossible) {
impossible.printStackTrace();
System.exit(1);
}
return null;
}
public static InfoHash fromHashList(byte []hashList) {
try {
MessageDigest sha256 = MessageDigest.getInstance("SHA-256");
byte[] root = sha256.digest(hashList);
return new InfoHash(root, hashList);
} catch (NoSuchAlgorithmException impossible) {
impossible.printStackTrace();
System.exit(1);
}
return null;
}
public byte[] getRoot() {
return root;
}
public byte[] getRoot() {
return root;
}
public byte[] getHashList() {
return hashList;
}
public byte[] getHashList() {
return hashList;
}
@Override
public int hashCode() {
return hashCode;
}
@Override
public int hashCode() {
return hashCode;
}
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (!(o instanceof InfoHash)) {
return false;
}
InfoHash other = (InfoHash) o;
return Arrays.equals(root, other.root);
}
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (!(o instanceof InfoHash)) {
return false;
}
InfoHash other = (InfoHash) o;
return Arrays.equals(root, other.root);
}
public String toString() {
String rv = "InfoHash[root:"+Base64.encode(root) + " hashList:";
List<String> b64HashList = new ArrayList<>();
if (hashList != null) {
byte [] tmp = new byte[SIZE];
for (int i = 0; i < hashList.length / SIZE; i++) {
System.arraycopy(hashList, SIZE * i, tmp, 0, SIZE);
b64HashList.add(Base64.encode(tmp));
}
}
rv += b64HashList.toString();
rv += "]";
return rv;
}
public String toString() {
String rv = "InfoHash[root:"+Base64.encode(root) + " hashList:";
List<String> b64HashList = new ArrayList<>();
if (hashList != null) {
byte [] tmp = new byte[SIZE];
for (int i = 0; i < hashList.length / SIZE; i++) {
System.arraycopy(hashList, SIZE * i, tmp, 0, SIZE);
b64HashList.add(Base64.encode(tmp));
}
}
rv += b64HashList.toString();
rv += "]";
return rv;
}
}

View File

@@ -2,63 +2,105 @@ package com.muwire.core;
import java.io.File;
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
import com.muwire.core.util.DataUtil;
import net.i2p.data.Base64;
public class SharedFile {
private final File file;
private final InfoHash infoHash;
private final int pieceSize;
private final File file;
private final InfoHash infoHash;
private final int pieceSize;
private final String cachedPath;
private final long cachedLength;
private final String cachedPath;
private final long cachedLength;
public SharedFile(File file, InfoHash infoHash, int pieceSize) throws IOException {
this.file = file;
this.infoHash = infoHash;
this.pieceSize = pieceSize;
this.cachedPath = file.getAbsolutePath();
this.cachedLength = file.length();
}
private final String b64EncodedFileName;
private final String b64EncodedHashRoot;
private final List<String> b64EncodedHashList;
public File getFile() {
return file;
}
private volatile String comment;
public InfoHash getInfoHash() {
return infoHash;
}
public SharedFile(File file, InfoHash infoHash, int pieceSize) throws IOException {
this.file = file;
this.infoHash = infoHash;
this.pieceSize = pieceSize;
this.cachedPath = file.getAbsolutePath();
this.cachedLength = file.length();
this.b64EncodedFileName = Base64.encode(DataUtil.encodei18nString(file.toString()));
this.b64EncodedHashRoot = Base64.encode(infoHash.getRoot());
public int getPieceSize() {
return pieceSize;
}
List<String> b64List = new ArrayList<String>();
byte[] tmp = new byte[32];
for (int i = 0; i < infoHash.getHashList().length / 32; i++) {
System.arraycopy(infoHash.getHashList(), i * 32, tmp, 0, 32);
b64List.add(Base64.encode(tmp));
}
this.b64EncodedHashList = b64List;
}
public int getNPieces() {
long length = file.length();
int rawPieceSize = 0x1 << pieceSize;
int rv = (int) (length / rawPieceSize);
if (length % rawPieceSize != 0)
rv++;
return rv;
}
public File getFile() {
return file;
}
public String getCachedPath() {
return cachedPath;
}
public InfoHash getInfoHash() {
return infoHash;
}
public long getCachedLength() {
return cachedLength;
}
public int getPieceSize() {
return pieceSize;
}
@Override
public int hashCode() {
return file.hashCode() ^ infoHash.hashCode();
}
public int getNPieces() {
long length = file.length();
int rawPieceSize = 0x1 << pieceSize;
int rv = (int) (length / rawPieceSize);
if (length % rawPieceSize != 0)
rv++;
return rv;
}
@Override
public boolean equals(Object o) {
if (!(o instanceof SharedFile))
return false;
SharedFile other = (SharedFile)o;
return file.equals(other.file) && infoHash.equals(other.infoHash);
}
public String getB64EncodedFileName() {
return b64EncodedFileName;
}
public String getB64EncodedHashRoot() {
return b64EncodedHashRoot;
}
public List<String> getB64EncodedHashList() {
return b64EncodedHashList;
}
public String getCachedPath() {
return cachedPath;
}
public long getCachedLength() {
return cachedLength;
}
public void setComment(String comment) {
this.comment = comment;
}
public String getComment() {
return comment;
}
@Override
public int hashCode() {
return file.hashCode() ^ infoHash.hashCode();
}
@Override
public boolean equals(Object o) {
if (!(o instanceof SharedFile))
return false;
SharedFile other = (SharedFile)o;
return file.equals(other.file) && infoHash.equals(other.infoHash);
}
}

View File

@@ -1,5 +1,5 @@
package com.muwire.core.connection;
public enum ConnectionAttemptStatus {
SUCCESSFUL, REJECTED, FAILED
SUCCESSFUL, REJECTED, FAILED
}

View File

@@ -6,5 +6,5 @@ package com.muwire.core.hostcache;
*
*/
public enum CrawlerResponse {
ALL, REGISTERED, NONE
ALL, REGISTERED, NONE
}

View File

@@ -1,5 +1,5 @@
package com.muwire.core.trust;
public enum TrustLevel {
TRUSTED, NEUTRAL, DISTRUSTED
TRUSTED, NEUTRAL, DISTRUSTED
}

View File

@@ -0,0 +1,168 @@
package com.muwire.core.util;
import java.io.ByteArrayOutputStream;
import java.io.DataOutputStream;
import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
import java.lang.reflect.Field;
import java.lang.reflect.Method;
import java.nio.ByteBuffer;
import java.nio.charset.StandardCharsets;
import java.util.ArrayList;
import java.util.List;
import com.muwire.core.Constants;
import net.i2p.data.Base64;
public class DataUtil {
private final static int MAX_SHORT = (0x1 << 16) - 1;
static void writeUnsignedShort(int value, OutputStream os) throws IOException {
if (value > MAX_SHORT || value < 0)
throw new IllegalArgumentException("$value invalid");
byte lsb = (byte) (value & 0xFF);
byte msb = (byte) (value >> 8);
os.write(msb);
os.write(lsb);
}
private final static int MAX_HEADER = 0x7FFFFF;
static void packHeader(int length, byte [] header) {
if (header.length != 3)
throw new IllegalArgumentException("header length $header.length");
if (length < 0 || length > MAX_HEADER)
throw new IllegalArgumentException("length $length");
header[2] = (byte) (length & 0xFF);
header[1] = (byte) ((length >> 8) & 0xFF);
header[0] = (byte) ((length >> 16) & 0x7F);
}
static int readLength(byte [] header) {
if (header.length != 3)
throw new IllegalArgumentException("header length $header.length");
return (((int)(header[0] & 0x7F)) << 16) |
(((int)(header[1] & 0xFF) << 8)) |
((int)header[2] & 0xFF);
}
static String readi18nString(byte [] encoded) {
if (encoded.length < 2)
throw new IllegalArgumentException("encoding too short $encoded.length");
int length = ((encoded[0] & 0xFF) << 8) | (encoded[1] & 0xFF);
if (encoded.length != length + 2)
throw new IllegalArgumentException("encoding doesn't match length, expected $length found $encoded.length");
byte [] string = new byte[length];
System.arraycopy(encoded, 2, string, 0, length);
return new String(string, StandardCharsets.UTF_8);
}
public static byte[] encodei18nString(String string) {
byte [] utf8 = string.getBytes(StandardCharsets.UTF_8);
if (utf8.length > Short.MAX_VALUE)
throw new IllegalArgumentException("String in utf8 too long $utf8.length");
ByteArrayOutputStream baos = new ByteArrayOutputStream();
DataOutputStream daos = new DataOutputStream(baos);
try {
daos.writeShort((short) utf8.length);
daos.write(utf8);
daos.close();
} catch (IOException impossible) {
throw new IllegalStateException(impossible);
}
return baos.toByteArray();
}
public static String readTillRN(InputStream is) throws IOException {
ByteArrayOutputStream baos = new ByteArrayOutputStream();
while(baos.size() < (Constants.MAX_HEADER_SIZE)) {
int read = is.read();
if (read == -1)
throw new IOException();
if (read == '\r') {
if (is.read() != '\n')
throw new IOException("invalid header");
break;
}
baos.write(read);
}
return new String(baos.toByteArray(), StandardCharsets.US_ASCII);
}
public static String encodeXHave(List<Integer> pieces, int totalPieces) {
int bytes = totalPieces / 8;
if (totalPieces % 8 != 0)
bytes++;
byte[] raw = new byte[bytes];
for (int it : pieces) {
int byteIdx = it / 8;
int offset = it % 8;
int mask = 0x80 >>> offset;
raw[byteIdx] |= mask;
}
return Base64.encode(raw);
}
public static List<Integer> decodeXHave(String xHave) {
byte [] availablePieces = Base64.decode(xHave);
List<Integer> available = new ArrayList<>();
for (int i = 0; i < availablePieces.length; i ++) {
byte b = availablePieces[i];
for (int j = 0; j < 8 ; j++) {
byte mask = (byte) (0x80 >>> j);
if ((b & mask) == mask) {
available.add(i * 8 + j);
}
}
}
return available;
}
public static Throwable findRoot(Throwable e) {
while(e.getCause() != null)
e = e.getCause();
return e;
}
public static void tryUnmap(ByteBuffer cb) {
if (cb==null || !cb.isDirect()) return;
// we could use this type cast and call functions without reflection code,
// but static import from sun.* package is risky for non-SUN virtual machine.
//try { ((sun.nio.ch.DirectBuffer)cb).cleaner().clean(); } catch (Exception ex) { }
// JavaSpecVer: 1.6, 1.7, 1.8, 9, 10
boolean isOldJDK = System.getProperty("java.specification.version","99").startsWith("1.");
try {
if (isOldJDK) {
Method cleaner = cb.getClass().getMethod("cleaner");
cleaner.setAccessible(true);
Method clean = Class.forName("sun.misc.Cleaner").getMethod("clean");
clean.setAccessible(true);
clean.invoke(cleaner.invoke(cb));
} else {
Class unsafeClass;
try {
unsafeClass = Class.forName("sun.misc.Unsafe");
} catch(Exception ex) {
// jdk.internal.misc.Unsafe doesn't yet have an invokeCleaner() method,
// but that method should be added if sun.misc.Unsafe is removed.
unsafeClass = Class.forName("jdk.internal.misc.Unsafe");
}
Method clean = unsafeClass.getMethod("invokeCleaner", ByteBuffer.class);
clean.setAccessible(true);
Field theUnsafeField = unsafeClass.getDeclaredField("theUnsafe");
theUnsafeField.setAccessible(true);
Object theUnsafe = theUnsafeField.get(null);
clean.invoke(theUnsafe, cb);
}
} catch(Exception ex) { }
cb = null;
}
}

View File

@@ -4,6 +4,6 @@ import net.i2p.data.Destination
class Destinations {
Destination dest1 = new Destination("KvwWPKMSAtzf7Yruj8TQaHi2jaQpSNsXJskbpmSBTxkcYlDB2GllH~QBu-cs4FSYdaRmKDUUx7793jjnYJgTMbrjqeIL5-BTORZ09n6PUfhSejDpJjdkUxaV1OHRatfYs70RNBv7rvdj1-nXUow5tMfOJtoWVocUoKefUGFQFbJLDDkBqjm1kFyKFZv6m6S6YqXxBgVB1qYicooy67cNQF5HLUFtP15pk5fMDNGz5eNCjPfC~2Gp8FF~OpSy92HT0XN7uAMJykPcbdnWfcvVwqD7eS0K4XEnsqnMPLEiMAhqsugEFiFqtB3Wmm7UHVc03lcAfRhr1e2uZBNFTtM2Uol4MD5sCCKRZVHGcH-WGPSEz0BM5YO~Xi~dQ~N3NVud32PVzhh8xoGcAlhTqMqAbRJndCv-H6NflX90pYmbirCTIDOaR9758mThrqX0d4CwCn4jFXer52l8Qe8CErGoLuB-4LL~Gwrn7R1k7ZQc2PthkqeW8MfigyiN7hZVkul9AAAA")
Destination dest2 = new Destination("KvwWPKMSAtzf7Yruj8TQaHi2jaQpSNsXJskbpmSBTxkcYlDB2GllH~QBu-cs4FSYdaRmKDUUx7793jjnYJgTMbrjqeIL5-BTORZ09n6PUfhSejDpJjdkUxaV1OHRatfYs70RNBv7rvdj1-nXUow5tMfOJtoWVocUoKefUGFQFbJLDDkBqjm1kFyKFZv6m6S6YqXxBgVB1qYicooy67cNQF5HLUFtP15pk5fMDNGz5eNCjPfC~2Gp8FF~OpSy92HT0XN7uAMJykPcbdnWfcvVwqD7eS0K4XEnsqnMPLEiMAhqsugEFiFqtB3Wmm7UHVc03lcAfRhr1e2uZBNFTtM2Uol4MD5sCCKRZVHGcH-WGPSEz0BM5YO~Xi~dQ~N3NVud32PVzhh8xoGcAlhTqMqAbRJndCv-H6NflX90pYmbirCTIDOaR9758mThrqX0d4CwCn4jFXer52l8Qe8CErGoLuB-4LL~Gwrn7R1k7ZQc2PthkqeW8MfigyiN7hZVkul8AAAA")
Destination dest1 = new Destination("KvwWPKMSAtzf7Yruj8TQaHi2jaQpSNsXJskbpmSBTxkcYlDB2GllH~QBu-cs4FSYdaRmKDUUx7793jjnYJgTMbrjqeIL5-BTORZ09n6PUfhSejDpJjdkUxaV1OHRatfYs70RNBv7rvdj1-nXUow5tMfOJtoWVocUoKefUGFQFbJLDDkBqjm1kFyKFZv6m6S6YqXxBgVB1qYicooy67cNQF5HLUFtP15pk5fMDNGz5eNCjPfC~2Gp8FF~OpSy92HT0XN7uAMJykPcbdnWfcvVwqD7eS0K4XEnsqnMPLEiMAhqsugEFiFqtB3Wmm7UHVc03lcAfRhr1e2uZBNFTtM2Uol4MD5sCCKRZVHGcH-WGPSEz0BM5YO~Xi~dQ~N3NVud32PVzhh8xoGcAlhTqMqAbRJndCv-H6NflX90pYmbirCTIDOaR9758mThrqX0d4CwCn4jFXer52l8Qe8CErGoLuB-4LL~Gwrn7R1k7ZQc2PthkqeW8MfigyiN7hZVkul9AAAA")
Destination dest2 = new Destination("KvwWPKMSAtzf7Yruj8TQaHi2jaQpSNsXJskbpmSBTxkcYlDB2GllH~QBu-cs4FSYdaRmKDUUx7793jjnYJgTMbrjqeIL5-BTORZ09n6PUfhSejDpJjdkUxaV1OHRatfYs70RNBv7rvdj1-nXUow5tMfOJtoWVocUoKefUGFQFbJLDDkBqjm1kFyKFZv6m6S6YqXxBgVB1qYicooy67cNQF5HLUFtP15pk5fMDNGz5eNCjPfC~2Gp8FF~OpSy92HT0XN7uAMJykPcbdnWfcvVwqD7eS0K4XEnsqnMPLEiMAhqsugEFiFqtB3Wmm7UHVc03lcAfRhr1e2uZBNFTtM2Uol4MD5sCCKRZVHGcH-WGPSEz0BM5YO~Xi~dQ~N3NVud32PVzhh8xoGcAlhTqMqAbRJndCv-H6NflX90pYmbirCTIDOaR9758mThrqX0d4CwCn4jFXer52l8Qe8CErGoLuB-4LL~Gwrn7R1k7ZQc2PthkqeW8MfigyiN7hZVkul8AAAA")
}

View File

@@ -4,23 +4,23 @@ import org.junit.Test
class EventBusTest {
class FakeEvent extends Event {}
class FakeEvent extends Event {}
class FakeEventHandler {
def onFakeEvent(FakeEvent e) {
assert e == fakeEvent
}
}
class FakeEventHandler {
def onFakeEvent(FakeEvent e) {
assert e == fakeEvent
}
}
FakeEvent fakeEvent = new FakeEvent()
FakeEvent fakeEvent = new FakeEvent()
EventBus bus = new EventBus()
def handler = new FakeEventHandler()
EventBus bus = new EventBus()
def handler = new FakeEventHandler()
@Test
void testDynamicEvent() {
bus.register(FakeEvent.class, handler)
bus.publish(fakeEvent)
}
@Test
void testDynamicEvent() {
bus.register(FakeEvent.class, handler)
bus.publish(fakeEvent)
}
}

View File

@@ -6,11 +6,11 @@ import org.junit.Test
class InfoHashTest {
@Test
void testEmpty() {
byte [] empty = new byte[0x1 << 6];
def ih = InfoHash.fromHashList(empty)
def ih2 = new InfoHash("6ws72qwrniqdaj4y55xngcmxtnbqapjdedm7b2hktay2sj2z7nfq");
assertEquals(ih, ih2);
}
@Test
void testEmpty() {
byte [] empty = new byte[0x1 << 6];
def ih = InfoHash.fromHashList(empty)
def ih2 = new InfoHash("6ws72qwrniqdaj4y55xngcmxtnbqapjdedm7b2hktay2sj2z7nfq");
assertEquals(ih, ih2);
}
}

View File

@@ -22,21 +22,21 @@ import groovy.mock.interceptor.MockFor
class ConnectionAcceptorTest {
EventBus eventBus
final Destinations destinations = new Destinations()
def settings
EventBus eventBus
final Destinations destinations = new Destinations()
def settings
def connectionManagerMock
UltrapeerConnectionManager connectionManager
def connectionManagerMock
UltrapeerConnectionManager connectionManager
def i2pAcceptorMock
I2PAcceptor i2pAcceptor
def i2pAcceptorMock
I2PAcceptor i2pAcceptor
def hostCacheMock
HostCache hostCache
def hostCacheMock
HostCache hostCache
def trustServiceMock
TrustService trustService
def trustServiceMock
TrustService trustService
def searchManagerMock
SearchManager searchManager
@@ -47,361 +47,361 @@ class ConnectionAcceptorTest {
def connectionEstablisherMock
ConnectionEstablisher connectionEstablisher
ConnectionAcceptor acceptor
List<ConnectionEvent> connectionEvents
InputStream inputStream
OutputStream outputStream
ConnectionAcceptor acceptor
List<ConnectionEvent> connectionEvents
InputStream inputStream
OutputStream outputStream
@Before
void before() {
connectionManagerMock = new MockFor(UltrapeerConnectionManager.class)
i2pAcceptorMock = new MockFor(I2PAcceptor.class)
hostCacheMock = new MockFor(HostCache.class)
trustServiceMock = new MockFor(TrustService.class)
@Before
void before() {
connectionManagerMock = new MockFor(UltrapeerConnectionManager.class)
i2pAcceptorMock = new MockFor(I2PAcceptor.class)
hostCacheMock = new MockFor(HostCache.class)
trustServiceMock = new MockFor(TrustService.class)
searchManagerMock = new MockFor(SearchManager.class)
uploadManagerMock = new MockFor(UploadManager.class)
connectionEstablisherMock = new MockFor(ConnectionEstablisher.class)
}
}
@After
void after() {
acceptor?.stop()
connectionManagerMock.verify connectionManager
i2pAcceptorMock.verify i2pAcceptor
hostCacheMock.verify hostCache
trustServiceMock.verify trustService
@After
void after() {
acceptor?.stop()
connectionManagerMock.verify connectionManager
i2pAcceptorMock.verify i2pAcceptor
hostCacheMock.verify hostCache
trustServiceMock.verify trustService
searchManagerMock.verify searchManager
uploadManagerMock.verify uploadManager
connectionEstablisherMock.verify connectionEstablisher
Thread.sleep(100)
}
Thread.sleep(100)
}
private void initMocks() {
connectionEvents = new CopyOnWriteArrayList()
eventBus = new EventBus()
def listener = new Object() {
void onConnectionEvent(ConnectionEvent e) {
connectionEvents.add e
}
}
eventBus.register(ConnectionEvent.class, listener)
private void initMocks() {
connectionEvents = new CopyOnWriteArrayList()
eventBus = new EventBus()
def listener = new Object() {
void onConnectionEvent(ConnectionEvent e) {
connectionEvents.add e
}
}
eventBus.register(ConnectionEvent.class, listener)
connectionManager = connectionManagerMock.proxyInstance()
i2pAcceptor = i2pAcceptorMock.proxyInstance()
hostCache = hostCacheMock.proxyInstance()
trustService = trustServiceMock.proxyInstance()
connectionManager = connectionManagerMock.proxyInstance()
i2pAcceptor = i2pAcceptorMock.proxyInstance()
hostCache = hostCacheMock.proxyInstance()
trustService = trustServiceMock.proxyInstance()
searchManager = searchManagerMock.proxyInstance()
uploadManager = uploadManagerMock.proxyInstance()
connectionEstablisher = connectionEstablisherMock.proxyInstance()
acceptor = new ConnectionAcceptor(eventBus, connectionManager, settings, i2pAcceptor,
hostCache, trustService, searchManager, uploadManager, connectionEstablisher)
acceptor.start()
Thread.sleep(100)
}
acceptor = new ConnectionAcceptor(eventBus, connectionManager, settings, i2pAcceptor,
hostCache, trustService, searchManager, uploadManager, null, connectionEstablisher)
acceptor.start()
Thread.sleep(100)
}
@Test
void testSuccessfulLeaf() {
settings = new MuWireSettings() {
boolean isLeaf() {
false
}
}
i2pAcceptorMock.demand.accept {
def is = new PipedInputStream()
outputStream = new PipedOutputStream(is)
def os = new PipedOutputStream()
inputStream = new PipedInputStream(os)
new Endpoint(destinations.dest1, is, os, null)
}
i2pAcceptorMock.demand.accept { Thread.sleep(Integer.MAX_VALUE) }
@Test
void testSuccessfulLeaf() {
settings = new MuWireSettings() {
boolean isLeaf() {
false
}
}
i2pAcceptorMock.demand.accept {
def is = new PipedInputStream()
outputStream = new PipedOutputStream(is)
def os = new PipedOutputStream()
inputStream = new PipedInputStream(os)
new Endpoint(destinations.dest1, is, os, null)
}
i2pAcceptorMock.demand.accept { Thread.sleep(Integer.MAX_VALUE) }
connectionEstablisherMock.demand.isInProgress(destinations.dest1) { false }
connectionManagerMock.demand.isConnected { dest ->
assert dest == destinations.dest1
false
}
connectionManagerMock.demand.hasLeafSlots() { true }
trustServiceMock.demand.getLevel { dest ->
assert dest == destinations.dest1
TrustLevel.TRUSTED
}
trustServiceMock.demand.getLevel { dest ->
assert dest == destinations.dest1
TrustLevel.TRUSTED
}
initMocks()
initMocks()
outputStream.write("MuWire leaf".bytes)
byte [] OK = new byte[2]
def dis = new DataInputStream(inputStream)
dis.readFully(OK)
assert OK == "OK".bytes
outputStream.write("MuWire leaf".bytes)
byte [] OK = new byte[2]
def dis = new DataInputStream(inputStream)
dis.readFully(OK)
assert OK == "OK".bytes
Thread.sleep(50)
assert connectionEvents.size() == 1
def event = connectionEvents[0]
assert event.endpoint.destination == destinations.dest1
assert event.status == ConnectionAttemptStatus.SUCCESSFUL
assert event.incoming == true
assert event.leaf == true
}
Thread.sleep(50)
assert connectionEvents.size() == 1
def event = connectionEvents[0]
assert event.endpoint.destination == destinations.dest1
assert event.status == ConnectionAttemptStatus.SUCCESSFUL
assert event.incoming == true
assert event.leaf == true
}
@Test
void testSuccessfulPeer() {
settings = new MuWireSettings() {
boolean isLeaf() {
false
}
}
i2pAcceptorMock.demand.accept {
def is = new PipedInputStream()
outputStream = new PipedOutputStream(is)
def os = new PipedOutputStream()
inputStream = new PipedInputStream(os)
new Endpoint(destinations.dest1, is, os, null)
}
i2pAcceptorMock.demand.accept { Thread.sleep(Integer.MAX_VALUE) }
@Test
void testSuccessfulPeer() {
settings = new MuWireSettings() {
boolean isLeaf() {
false
}
}
i2pAcceptorMock.demand.accept {
def is = new PipedInputStream()
outputStream = new PipedOutputStream(is)
def os = new PipedOutputStream()
inputStream = new PipedInputStream(os)
new Endpoint(destinations.dest1, is, os, null)
}
i2pAcceptorMock.demand.accept { Thread.sleep(Integer.MAX_VALUE) }
connectionEstablisherMock.demand.isInProgress(destinations.dest1) { false }
connectionManagerMock.demand.isConnected { dest ->
assert dest == destinations.dest1
false
}
connectionManagerMock.demand.hasPeerSlots() { true }
trustServiceMock.demand.getLevel { dest ->
assert dest == destinations.dest1
TrustLevel.TRUSTED
}
trustServiceMock.demand.getLevel { dest ->
assert dest == destinations.dest1
TrustLevel.TRUSTED
}
initMocks()
initMocks()
outputStream.write("MuWire peer".bytes)
byte [] OK = new byte[2]
def dis = new DataInputStream(inputStream)
dis.readFully(OK)
assert OK == "OK".bytes
outputStream.write("MuWire peer".bytes)
byte [] OK = new byte[2]
def dis = new DataInputStream(inputStream)
dis.readFully(OK)
assert OK == "OK".bytes
Thread.sleep(50)
assert connectionEvents.size() == 1
def event = connectionEvents[0]
assert event.endpoint.destination == destinations.dest1
assert event.status == ConnectionAttemptStatus.SUCCESSFUL
assert event.incoming == true
assert event.leaf == false
}
Thread.sleep(50)
assert connectionEvents.size() == 1
def event = connectionEvents[0]
assert event.endpoint.destination == destinations.dest1
assert event.status == ConnectionAttemptStatus.SUCCESSFUL
assert event.incoming == true
assert event.leaf == false
}
@Test
void testLeafRejectsLeaf() {
settings = new MuWireSettings() {
boolean isLeaf() {
true
}
}
i2pAcceptorMock.demand.accept {
def is = new PipedInputStream()
outputStream = new PipedOutputStream(is)
def os = new PipedOutputStream()
inputStream = new PipedInputStream(os)
new Endpoint(destinations.dest1, is, os, null)
}
i2pAcceptorMock.demand.accept { Thread.sleep(Integer.MAX_VALUE) }
trustServiceMock.demand.getLevel { dest ->
assert dest == destinations.dest1
TrustLevel.TRUSTED
}
@Test
void testLeafRejectsLeaf() {
settings = new MuWireSettings() {
boolean isLeaf() {
true
}
}
i2pAcceptorMock.demand.accept {
def is = new PipedInputStream()
outputStream = new PipedOutputStream(is)
def os = new PipedOutputStream()
inputStream = new PipedInputStream(os)
new Endpoint(destinations.dest1, is, os, null)
}
i2pAcceptorMock.demand.accept { Thread.sleep(Integer.MAX_VALUE) }
trustServiceMock.demand.getLevel { dest ->
assert dest == destinations.dest1
TrustLevel.TRUSTED
}
initMocks()
initMocks()
outputStream.write("MuWire leaf".bytes)
outputStream.flush()
Thread.sleep(50)
assert inputStream.read() == -1
outputStream.write("MuWire leaf".bytes)
outputStream.flush()
Thread.sleep(50)
assert inputStream.read() == -1
Thread.sleep(50)
assert connectionEvents.size() == 1
def event = connectionEvents[0]
assert event.endpoint.destination == destinations.dest1
assert event.status == ConnectionAttemptStatus.FAILED
assert event.incoming == true
assert event.leaf == null
}
Thread.sleep(50)
assert connectionEvents.size() == 1
def event = connectionEvents[0]
assert event.endpoint.destination == destinations.dest1
assert event.status == ConnectionAttemptStatus.FAILED
assert event.incoming == true
assert event.leaf == null
}
@Test
void testLeafRejectsPeer() {
settings = new MuWireSettings() {
boolean isLeaf() {
true
}
}
i2pAcceptorMock.demand.accept {
def is = new PipedInputStream()
outputStream = new PipedOutputStream(is)
def os = new PipedOutputStream()
inputStream = new PipedInputStream(os)
new Endpoint(destinations.dest1, is, os, null)
}
i2pAcceptorMock.demand.accept { Thread.sleep(Integer.MAX_VALUE) }
trustServiceMock.demand.getLevel { dest ->
assert dest == destinations.dest1
TrustLevel.TRUSTED
}
@Test
void testLeafRejectsPeer() {
settings = new MuWireSettings() {
boolean isLeaf() {
true
}
}
i2pAcceptorMock.demand.accept {
def is = new PipedInputStream()
outputStream = new PipedOutputStream(is)
def os = new PipedOutputStream()
inputStream = new PipedInputStream(os)
new Endpoint(destinations.dest1, is, os, null)
}
i2pAcceptorMock.demand.accept { Thread.sleep(Integer.MAX_VALUE) }
trustServiceMock.demand.getLevel { dest ->
assert dest == destinations.dest1
TrustLevel.TRUSTED
}
initMocks()
initMocks()
outputStream.write("MuWire peer".bytes)
outputStream.flush()
Thread.sleep(50)
assert inputStream.read() == -1
outputStream.write("MuWire peer".bytes)
outputStream.flush()
Thread.sleep(50)
assert inputStream.read() == -1
Thread.sleep(50)
assert connectionEvents.size() == 1
def event = connectionEvents[0]
assert event.endpoint.destination == destinations.dest1
assert event.status == ConnectionAttemptStatus.FAILED
assert event.incoming == true
assert event.leaf == null
}
Thread.sleep(50)
assert connectionEvents.size() == 1
def event = connectionEvents[0]
assert event.endpoint.destination == destinations.dest1
assert event.status == ConnectionAttemptStatus.FAILED
assert event.incoming == true
assert event.leaf == null
}
@Test
void testPeerRejectsPeerSlots() {
settings = new MuWireSettings() {
boolean isLeaf() {
false
}
}
i2pAcceptorMock.demand.accept {
def is = new PipedInputStream()
outputStream = new PipedOutputStream(is)
def os = new PipedOutputStream()
inputStream = new PipedInputStream(os)
new Endpoint(destinations.dest1, is, os, null)
}
i2pAcceptorMock.demand.accept { Thread.sleep(Integer.MAX_VALUE) }
@Test
void testPeerRejectsPeerSlots() {
settings = new MuWireSettings() {
boolean isLeaf() {
false
}
}
i2pAcceptorMock.demand.accept {
def is = new PipedInputStream()
outputStream = new PipedOutputStream(is)
def os = new PipedOutputStream()
inputStream = new PipedInputStream(os)
new Endpoint(destinations.dest1, is, os, null)
}
i2pAcceptorMock.demand.accept { Thread.sleep(Integer.MAX_VALUE) }
connectionEstablisherMock.demand.isInProgress(destinations.dest1) { false }
connectionManagerMock.demand.isConnected { dest ->
assert dest == destinations.dest1
false
}
connectionManagerMock.demand.hasPeerSlots() { false }
trustServiceMock.demand.getLevel { dest ->
assert dest == destinations.dest1
TrustLevel.TRUSTED
}
hostCacheMock.ignore.getGoodHosts { n -> [] }
trustServiceMock.demand.getLevel { dest ->
assert dest == destinations.dest1
TrustLevel.TRUSTED
}
hostCacheMock.ignore.getGoodHosts { n -> [] }
initMocks()
initMocks()
outputStream.write("MuWire peer".bytes)
byte [] OK = new byte[6]
def dis = new DataInputStream(inputStream)
dis.readFully(OK)
assert OK == "REJECT".bytes
outputStream.write("MuWire peer".bytes)
byte [] OK = new byte[6]
def dis = new DataInputStream(inputStream)
dis.readFully(OK)
assert OK == "REJECT".bytes
Thread.sleep(50)
assert dis.read() == -1
Thread.sleep(50)
assert dis.read() == -1
Thread.sleep(50)
assert connectionEvents.size() == 1
def event = connectionEvents[0]
assert event.endpoint.destination == destinations.dest1
assert event.status == ConnectionAttemptStatus.REJECTED
assert event.incoming == true
assert event.leaf == false
}
Thread.sleep(50)
assert connectionEvents.size() == 1
def event = connectionEvents[0]
assert event.endpoint.destination == destinations.dest1
assert event.status == ConnectionAttemptStatus.REJECTED
assert event.incoming == true
assert event.leaf == false
}
@Test
void testPeerRejectsLeafSlots() {
settings = new MuWireSettings() {
boolean isLeaf() {
false
}
}
i2pAcceptorMock.demand.accept {
def is = new PipedInputStream()
outputStream = new PipedOutputStream(is)
def os = new PipedOutputStream()
inputStream = new PipedInputStream(os)
new Endpoint(destinations.dest1, is, os, null)
}
i2pAcceptorMock.demand.accept { Thread.sleep(Integer.MAX_VALUE) }
@Test
void testPeerRejectsLeafSlots() {
settings = new MuWireSettings() {
boolean isLeaf() {
false
}
}
i2pAcceptorMock.demand.accept {
def is = new PipedInputStream()
outputStream = new PipedOutputStream(is)
def os = new PipedOutputStream()
inputStream = new PipedInputStream(os)
new Endpoint(destinations.dest1, is, os, null)
}
i2pAcceptorMock.demand.accept { Thread.sleep(Integer.MAX_VALUE) }
connectionEstablisherMock.demand.isInProgress(destinations.dest1) { false }
connectionManagerMock.demand.isConnected { dest ->
assert dest == destinations.dest1
false
}
connectionManagerMock.demand.hasLeafSlots() { false }
trustServiceMock.demand.getLevel { dest ->
assert dest == destinations.dest1
TrustLevel.TRUSTED
}
hostCacheMock.ignore.getGoodHosts { n -> [] }
trustServiceMock.demand.getLevel { dest ->
assert dest == destinations.dest1
TrustLevel.TRUSTED
}
hostCacheMock.ignore.getGoodHosts { n -> [] }
initMocks()
initMocks()
outputStream.write("MuWire leaf".bytes)
byte [] OK = new byte[6]
def dis = new DataInputStream(inputStream)
dis.readFully(OK)
assert OK == "REJECT".bytes
outputStream.write("MuWire leaf".bytes)
byte [] OK = new byte[6]
def dis = new DataInputStream(inputStream)
dis.readFully(OK)
assert OK == "REJECT".bytes
Thread.sleep(50)
assert dis.read() == -1
Thread.sleep(50)
assert dis.read() == -1
Thread.sleep(50)
assert connectionEvents.size() == 1
def event = connectionEvents[0]
assert event.endpoint.destination == destinations.dest1
assert event.status == ConnectionAttemptStatus.REJECTED
assert event.incoming == true
assert event.leaf == true
}
Thread.sleep(50)
assert connectionEvents.size() == 1
def event = connectionEvents[0]
assert event.endpoint.destination == destinations.dest1
assert event.status == ConnectionAttemptStatus.REJECTED
assert event.incoming == true
assert event.leaf == true
}
@Test
void testPeerRejectsPeerSuggests() {
settings = new MuWireSettings() {
boolean isLeaf() {
false
}
}
i2pAcceptorMock.demand.accept {
def is = new PipedInputStream()
outputStream = new PipedOutputStream(is)
def os = new PipedOutputStream()
inputStream = new PipedInputStream(os)
new Endpoint(destinations.dest1, is, os, null)
}
i2pAcceptorMock.demand.accept { Thread.sleep(Integer.MAX_VALUE) }
@Test
void testPeerRejectsPeerSuggests() {
settings = new MuWireSettings() {
boolean isLeaf() {
false
}
}
i2pAcceptorMock.demand.accept {
def is = new PipedInputStream()
outputStream = new PipedOutputStream(is)
def os = new PipedOutputStream()
inputStream = new PipedInputStream(os)
new Endpoint(destinations.dest1, is, os, null)
}
i2pAcceptorMock.demand.accept { Thread.sleep(Integer.MAX_VALUE) }
connectionEstablisherMock.demand.isInProgress(destinations.dest1) { false }
connectionManagerMock.demand.isConnected { dest ->
assert dest == destinations.dest1
false
}
connectionManagerMock.demand.hasPeerSlots() { false }
trustServiceMock.demand.getLevel { dest ->
assert dest == destinations.dest1
TrustLevel.TRUSTED
}
hostCacheMock.ignore.getGoodHosts { n -> [destinations.dest2] }
trustServiceMock.demand.getLevel { dest ->
assert dest == destinations.dest1
TrustLevel.TRUSTED
}
hostCacheMock.ignore.getGoodHosts { n -> [destinations.dest2] }
initMocks()
initMocks()
outputStream.write("MuWire peer".bytes)
byte [] OK = new byte[6]
def dis = new DataInputStream(inputStream)
dis.readFully(OK)
assert OK == "REJECT".bytes
outputStream.write("MuWire peer".bytes)
byte [] OK = new byte[6]
def dis = new DataInputStream(inputStream)
dis.readFully(OK)
assert OK == "REJECT".bytes
short payloadSize = dis.readUnsignedShort()
byte[] payload = new byte[payloadSize]
dis.readFully(payload)
assert dis.read() == -1
short payloadSize = dis.readUnsignedShort()
byte[] payload = new byte[payloadSize]
dis.readFully(payload)
assert dis.read() == -1
def json = new JsonSlurper()
json = json.parse(payload)
assert json.tryHosts != null
assert json.tryHosts.size() == 1
assert json.tryHosts.contains(destinations.dest2.toBase64())
def json = new JsonSlurper()
json = json.parse(payload)
assert json.tryHosts != null
assert json.tryHosts.size() == 1
assert json.tryHosts.contains(destinations.dest2.toBase64())
Thread.sleep(50)
assert connectionEvents.size() == 1
def event = connectionEvents[0]
assert event.endpoint.destination == destinations.dest1
assert event.status == ConnectionAttemptStatus.REJECTED
}
Thread.sleep(50)
assert connectionEvents.size() == 1
def event = connectionEvents[0]
assert event.endpoint.destination == destinations.dest1
assert event.status == ConnectionAttemptStatus.REJECTED
}
}

View File

@@ -17,271 +17,271 @@ import groovy.mock.interceptor.MockFor
class ConnectionEstablisherTest {
EventBus eventBus
final Destinations destinations = new Destinations()
List<ConnectionEvent> connectionEvents
List<HostDiscoveredEvent> discoveredEvents
DataInputStream inputStream
DataOutputStream outputStream
EventBus eventBus
final Destinations destinations = new Destinations()
List<ConnectionEvent> connectionEvents
List<HostDiscoveredEvent> discoveredEvents
DataInputStream inputStream
DataOutputStream outputStream
def i2pConnectorMock
I2PConnector i2pConnector
def i2pConnectorMock
I2PConnector i2pConnector
MuWireSettings settings
MuWireSettings settings
def connectionManagerMock
ConnectionManager connectionManager
def connectionManagerMock
ConnectionManager connectionManager
def hostCacheMock
HostCache hostCache
def hostCacheMock
HostCache hostCache
ConnectionEstablisher establisher
ConnectionEstablisher establisher
@Before
void before() {
connectionEvents = new CopyOnWriteArrayList()
discoveredEvents = new CopyOnWriteArrayList()
def listener = new Object() {
void onConnectionEvent(ConnectionEvent e) {
connectionEvents.add(e)
}
void onHostDiscoveredEvent(HostDiscoveredEvent e) {
discoveredEvents.add e
}
}
eventBus = new EventBus()
eventBus.register(ConnectionEvent.class, listener)
eventBus.register(HostDiscoveredEvent.class, listener)
i2pConnectorMock = new MockFor(I2PConnector.class)
connectionManagerMock = new MockFor(ConnectionManager.class)
hostCacheMock = new MockFor(HostCache.class)
}
@Before
void before() {
connectionEvents = new CopyOnWriteArrayList()
discoveredEvents = new CopyOnWriteArrayList()
def listener = new Object() {
void onConnectionEvent(ConnectionEvent e) {
connectionEvents.add(e)
}
void onHostDiscoveredEvent(HostDiscoveredEvent e) {
discoveredEvents.add e
}
}
eventBus = new EventBus()
eventBus.register(ConnectionEvent.class, listener)
eventBus.register(HostDiscoveredEvent.class, listener)
i2pConnectorMock = new MockFor(I2PConnector.class)
connectionManagerMock = new MockFor(ConnectionManager.class)
hostCacheMock = new MockFor(HostCache.class)
}
@After
void after() {
establisher?.stop()
i2pConnectorMock.verify i2pConnector
connectionManagerMock.verify connectionManager
hostCacheMock.verify hostCache
Thread.sleep(100)
}
@After
void after() {
establisher?.stop()
i2pConnectorMock.verify i2pConnector
connectionManagerMock.verify connectionManager
hostCacheMock.verify hostCache
Thread.sleep(100)
}
private void initMocks() {
i2pConnector = i2pConnectorMock.proxyInstance()
connectionManager = connectionManagerMock.proxyInstance()
hostCache = hostCacheMock.proxyInstance()
establisher = new ConnectionEstablisher(eventBus, i2pConnector, settings, connectionManager, hostCache)
establisher.start()
Thread.sleep(250)
}
private void initMocks() {
i2pConnector = i2pConnectorMock.proxyInstance()
connectionManager = connectionManagerMock.proxyInstance()
hostCache = hostCacheMock.proxyInstance()
establisher = new ConnectionEstablisher(eventBus, i2pConnector, settings, connectionManager, hostCache)
establisher.start()
Thread.sleep(250)
}
@Test
void testConnectFails() {
settings = new MuWireSettings()
connectionManagerMock.ignore.needsConnections {
true
}
hostCacheMock.ignore.getHosts { num ->
assert num == 1
[destinations.dest1]
}
connectionManagerMock.ignore.isConnected { dest ->
assert dest == destinations.dest1
false
}
i2pConnectorMock.demand.connect { dest ->
assert dest == destinations.dest1
throw new IOException()
}
@Test
void testConnectFails() {
settings = new MuWireSettings()
connectionManagerMock.ignore.needsConnections {
true
}
hostCacheMock.ignore.getHosts { num ->
assert num == 1
[destinations.dest1]
}
connectionManagerMock.ignore.isConnected { dest ->
assert dest == destinations.dest1
false
}
i2pConnectorMock.demand.connect { dest ->
assert dest == destinations.dest1
throw new IOException()
}
initMocks()
initMocks()
assert connectionEvents.size() == 1
def event = connectionEvents[0]
assert event.endpoint.destination == destinations.dest1
assert event.incoming == false
assert event.status == ConnectionAttemptStatus.FAILED
}
assert connectionEvents.size() == 1
def event = connectionEvents[0]
assert event.endpoint.destination == destinations.dest1
assert event.incoming == false
assert event.status == ConnectionAttemptStatus.FAILED
}
@Test
void testConnectionSucceedsPeer() {
settings = new MuWireSettings() {
boolean isLeaf() {false}
}
connectionManagerMock.ignore.needsConnections {
true
}
hostCacheMock.ignore.getHosts { num ->
assert num == 1
[destinations.dest1]
}
connectionManagerMock.ignore.isConnected { dest ->
assert dest == destinations.dest1
false
}
i2pConnectorMock.demand.connect { dest ->
PipedOutputStream os = new PipedOutputStream()
inputStream = new DataInputStream(new PipedInputStream(os))
PipedInputStream is = new PipedInputStream()
outputStream = new DataOutputStream(new PipedOutputStream(is))
new Endpoint(dest, is, os, null)
}
@Test
void testConnectionSucceedsPeer() {
settings = new MuWireSettings() {
boolean isLeaf() {false}
}
connectionManagerMock.ignore.needsConnections {
true
}
hostCacheMock.ignore.getHosts { num ->
assert num == 1
[destinations.dest1]
}
connectionManagerMock.ignore.isConnected { dest ->
assert dest == destinations.dest1
false
}
i2pConnectorMock.demand.connect { dest ->
PipedOutputStream os = new PipedOutputStream()
inputStream = new DataInputStream(new PipedInputStream(os))
PipedInputStream is = new PipedInputStream()
outputStream = new DataOutputStream(new PipedOutputStream(is))
new Endpoint(dest, is, os, null)
}
initMocks()
initMocks()
byte [] header = new byte[11]
inputStream.readFully(header)
assert header == "MuWire peer".bytes
byte [] header = new byte[11]
inputStream.readFully(header)
assert header == "MuWire peer".bytes
outputStream.write("OK".bytes)
outputStream.flush()
outputStream.write("OK".bytes)
outputStream.flush()
Thread.sleep(100)
Thread.sleep(100)
assert connectionEvents.size() == 1
def event = connectionEvents[0]
assert event.endpoint.destination == destinations.dest1
assert event.incoming == false
assert event.status == ConnectionAttemptStatus.SUCCESSFUL
assert connectionEvents.size() == 1
def event = connectionEvents[0]
assert event.endpoint.destination == destinations.dest1
assert event.incoming == false
assert event.status == ConnectionAttemptStatus.SUCCESSFUL
}
}
@Test
void testConnectionSucceedsLeaf() {
settings = new MuWireSettings() {
boolean isLeaf() {true}
}
connectionManagerMock.ignore.needsConnections {
true
}
hostCacheMock.ignore.getHosts { num ->
assert num == 1
[destinations.dest1]
}
connectionManagerMock.ignore.isConnected { dest ->
assert dest == destinations.dest1
false
}
i2pConnectorMock.demand.connect { dest ->
PipedOutputStream os = new PipedOutputStream()
inputStream = new DataInputStream(new PipedInputStream(os))
PipedInputStream is = new PipedInputStream()
outputStream = new DataOutputStream(new PipedOutputStream(is))
new Endpoint(dest, is, os, null)
}
@Test
void testConnectionSucceedsLeaf() {
settings = new MuWireSettings() {
boolean isLeaf() {true}
}
connectionManagerMock.ignore.needsConnections {
true
}
hostCacheMock.ignore.getHosts { num ->
assert num == 1
[destinations.dest1]
}
connectionManagerMock.ignore.isConnected { dest ->
assert dest == destinations.dest1
false
}
i2pConnectorMock.demand.connect { dest ->
PipedOutputStream os = new PipedOutputStream()
inputStream = new DataInputStream(new PipedInputStream(os))
PipedInputStream is = new PipedInputStream()
outputStream = new DataOutputStream(new PipedOutputStream(is))
new Endpoint(dest, is, os, null)
}
initMocks()
initMocks()
byte [] header = new byte[11]
inputStream.readFully(header)
assert header == "MuWire leaf".bytes
byte [] header = new byte[11]
inputStream.readFully(header)
assert header == "MuWire leaf".bytes
outputStream.write("OK".bytes)
outputStream.flush()
outputStream.write("OK".bytes)
outputStream.flush()
Thread.sleep(100)
Thread.sleep(100)
assert connectionEvents.size() == 1
def event = connectionEvents[0]
assert event.endpoint.destination == destinations.dest1
assert event.incoming == false
assert event.status == ConnectionAttemptStatus.SUCCESSFUL
}
assert connectionEvents.size() == 1
def event = connectionEvents[0]
assert event.endpoint.destination == destinations.dest1
assert event.incoming == false
assert event.status == ConnectionAttemptStatus.SUCCESSFUL
}
@Test
void testConnectionRejected() {
settings = new MuWireSettings() {
boolean isLeaf() {false}
}
connectionManagerMock.ignore.needsConnections {
true
}
hostCacheMock.ignore.getHosts { num ->
assert num == 1
[destinations.dest1]
}
connectionManagerMock.ignore.isConnected { dest ->
assert dest == destinations.dest1
false
}
i2pConnectorMock.demand.connect { dest ->
PipedOutputStream os = new PipedOutputStream()
inputStream = new DataInputStream(new PipedInputStream(os))
PipedInputStream is = new PipedInputStream()
outputStream = new DataOutputStream(new PipedOutputStream(is))
new Endpoint(dest, is, os, null)
}
@Test
void testConnectionRejected() {
settings = new MuWireSettings() {
boolean isLeaf() {false}
}
connectionManagerMock.ignore.needsConnections {
true
}
hostCacheMock.ignore.getHosts { num ->
assert num == 1
[destinations.dest1]
}
connectionManagerMock.ignore.isConnected { dest ->
assert dest == destinations.dest1
false
}
i2pConnectorMock.demand.connect { dest ->
PipedOutputStream os = new PipedOutputStream()
inputStream = new DataInputStream(new PipedInputStream(os))
PipedInputStream is = new PipedInputStream()
outputStream = new DataOutputStream(new PipedOutputStream(is))
new Endpoint(dest, is, os, null)
}
initMocks()
initMocks()
byte [] header = new byte[11]
inputStream.readFully(header)
assert header == "MuWire peer".bytes
byte [] header = new byte[11]
inputStream.readFully(header)
assert header == "MuWire peer".bytes
outputStream.write("REJECT".bytes)
outputStream.flush()
outputStream.write("REJECT".bytes)
outputStream.flush()
Thread.sleep(100)
Thread.sleep(100)
assert connectionEvents.size() == 1
def event = connectionEvents[0]
assert event.endpoint.destination == destinations.dest1
assert event.incoming == false
assert event.status == ConnectionAttemptStatus.REJECTED
assert discoveredEvents.isEmpty()
}
assert connectionEvents.size() == 1
def event = connectionEvents[0]
assert event.endpoint.destination == destinations.dest1
assert event.incoming == false
assert event.status == ConnectionAttemptStatus.REJECTED
assert discoveredEvents.isEmpty()
}
@Test
void testConnectionRejectedSuggestions() {
settings = new MuWireSettings() {
boolean isLeaf() {false}
}
connectionManagerMock.ignore.needsConnections {
true
}
hostCacheMock.ignore.getHosts { num ->
assert num == 1
[destinations.dest1]
}
connectionManagerMock.ignore.isConnected { dest ->
assert dest == destinations.dest1
false
}
i2pConnectorMock.demand.connect { dest ->
PipedOutputStream os = new PipedOutputStream()
inputStream = new DataInputStream(new PipedInputStream(os))
PipedInputStream is = new PipedInputStream()
outputStream = new DataOutputStream(new PipedOutputStream(is))
new Endpoint(dest, is, os, null)
}
@Test
void testConnectionRejectedSuggestions() {
settings = new MuWireSettings() {
boolean isLeaf() {false}
}
connectionManagerMock.ignore.needsConnections {
true
}
hostCacheMock.ignore.getHosts { num ->
assert num == 1
[destinations.dest1]
}
connectionManagerMock.ignore.isConnected { dest ->
assert dest == destinations.dest1
false
}
i2pConnectorMock.demand.connect { dest ->
PipedOutputStream os = new PipedOutputStream()
inputStream = new DataInputStream(new PipedInputStream(os))
PipedInputStream is = new PipedInputStream()
outputStream = new DataOutputStream(new PipedOutputStream(is))
new Endpoint(dest, is, os, null)
}
initMocks()
initMocks()
byte [] header = new byte[11]
inputStream.readFully(header)
assert header == "MuWire peer".bytes
byte [] header = new byte[11]
inputStream.readFully(header)
assert header == "MuWire peer".bytes
outputStream.write("REJECT".bytes)
outputStream.flush()
outputStream.write("REJECT".bytes)
outputStream.flush()
def json = [:]
json.tryHosts = [destinations.dest2.toBase64()]
json = JsonOutput.toJson(json)
outputStream.writeShort(json.bytes.length)
outputStream.write(json.bytes)
outputStream.flush()
Thread.sleep(100)
def json = [:]
json.tryHosts = [destinations.dest2.toBase64()]
json = JsonOutput.toJson(json)
outputStream.writeShort(json.bytes.length)
outputStream.write(json.bytes)
outputStream.flush()
Thread.sleep(100)
assert connectionEvents.size() == 1
def event = connectionEvents[0]
assert event.endpoint.destination == destinations.dest1
assert event.incoming == false
assert event.status == ConnectionAttemptStatus.REJECTED
assert connectionEvents.size() == 1
def event = connectionEvents[0]
assert event.endpoint.destination == destinations.dest1
assert event.incoming == false
assert event.status == ConnectionAttemptStatus.REJECTED
assert discoveredEvents.size() == 1
event = discoveredEvents[0]
assert event.destination == destinations.dest2
}
assert discoveredEvents.size() == 1
event = discoveredEvents[0]
assert event.destination == destinations.dest2
}
}

View File

@@ -4,6 +4,7 @@ import static org.junit.Assert.fail
import org.junit.After
import org.junit.Before
import org.junit.Ignore
import org.junit.Test
import com.muwire.core.EventBus
@@ -180,10 +181,11 @@ class DownloadSessionTest {
}
@Test
@Ignore // this needs to be rewritten with stealing in mind
public void testSmallFileClaimed() {
initSession(20, [0])
long now = System.currentTimeMillis()
downloadThread.join(100)
downloadThread.join(150)
assert 100 >= (System.currentTimeMillis() - now)
assert !performed
assert available.isEmpty()

View File

@@ -16,7 +16,7 @@ class PiecesTest {
public void testSinglePiece() {
pieces = new Pieces(1)
assert !pieces.isComplete()
assert pieces.claim() == 0
assert pieces.claim() == [0,0,0]
pieces.markDownloaded(0)
assert pieces.isComplete()
}
@@ -25,28 +25,28 @@ class PiecesTest {
public void testTwoPieces() {
pieces = new Pieces(2)
assert !pieces.isComplete()
int piece = pieces.claim()
assert piece == 0 || piece == 1
pieces.markDownloaded(piece)
int[] piece = pieces.claim()
assert piece[0] == 0 || piece[0] == 1
pieces.markDownloaded(piece[0])
assert !pieces.isComplete()
int piece2 = pieces.claim()
assert piece != piece2
pieces.markDownloaded(piece2)
int[] piece2 = pieces.claim()
assert piece[0] != piece2[0]
pieces.markDownloaded(piece2[0])
assert pieces.isComplete()
}
@Test
public void testClaimAvailable() {
pieces = new Pieces(2)
int claimed = pieces.claim([0].toSet())
assert claimed == 0
assert -1 == pieces.claim([0].toSet())
int[] claimed = pieces.claim([0].toSet())
assert claimed == [0,0,0]
assert [0,0,1] == pieces.claim([0].toSet())
}
@Test
public void testClaimNoneAvailable() {
pieces = new Pieces(20)
int claimed = pieces.claim()
assert -1 == pieces.claim([claimed].toSet())
int[] claimed = pieces.claim()
assert [0,0,0] == pieces.claim(claimed.toSet())
}
}

View File

@@ -8,76 +8,76 @@ import org.junit.Test
class FileHasherTest extends GroovyTestCase {
def hasher = new FileHasher()
File tmp
def hasher = new FileHasher()
File tmp
@Before
void setUp() {
tmp = File.createTempFile("testFile", "test")
tmp.deleteOnExit()
}
@Before
void setUp() {
tmp = File.createTempFile("testFile", "test")
tmp.deleteOnExit()
}
@After
void tearDown() {
tmp?.delete()
}
@After
void tearDown() {
tmp?.delete()
}
@Test
void testPieceSize() {
assert 17 == FileHasher.getPieceSize(1000000)
assert 17 == FileHasher.getPieceSize(100000000)
assert 24 == FileHasher.getPieceSize(FileHasher.MAX_SIZE)
shouldFail IllegalArgumentException, {
FileHasher.getPieceSize(Long.MAX_VALUE)
}
}
@Test
void testPieceSize() {
assert 17 == FileHasher.getPieceSize(1000000)
assert 17 == FileHasher.getPieceSize(100000000)
assert 24 == FileHasher.getPieceSize(FileHasher.MAX_SIZE)
shouldFail IllegalArgumentException, {
FileHasher.getPieceSize(Long.MAX_VALUE)
}
}
@Test
void testHash1Byte() {
def fos = new FileOutputStream(tmp)
fos.write(0)
fos.close()
def ih = hasher.hashFile(tmp)
assert ih.getHashList().length == 32
}
@Test
void testHash1Byte() {
def fos = new FileOutputStream(tmp)
fos.write(0)
fos.close()
def ih = hasher.hashFile(tmp)
assert ih.getHashList().length == 32
}
@Test
void testHash1PieceExact() {
def fos = new FileOutputStream(tmp)
byte [] b = new byte[ 0x1 << 18]
fos.write b
fos.close()
def ih = hasher.hashFile tmp
assert ih.getHashList().length == 64
}
@Test
void testHash1PieceExact() {
def fos = new FileOutputStream(tmp)
byte [] b = new byte[ 0x1 << 18]
fos.write b
fos.close()
def ih = hasher.hashFile tmp
assert ih.getHashList().length == 64
}
@Test
void testHash1Piece1Byte() {
def fos = new FileOutputStream(tmp)
byte [] b = new byte[ (0x1 << 18) + 1]
fos.write b
fos.close()
def ih = hasher.hashFile tmp
assert ih.getHashList().length == 96
}
@Test
void testHash1Piece1Byte() {
def fos = new FileOutputStream(tmp)
byte [] b = new byte[ (0x1 << 18) + 1]
fos.write b
fos.close()
def ih = hasher.hashFile tmp
assert ih.getHashList().length == 96
}
@Test
void testHash2Pieces() {
def fos = new FileOutputStream(tmp)
byte [] b = new byte[ (0x1 << 19)]
fos.write b
fos.close()
def ih = hasher.hashFile tmp
assert ih.getHashList().length == 128
}
@Test
void testHash2Pieces() {
def fos = new FileOutputStream(tmp)
byte [] b = new byte[ (0x1 << 19)]
fos.write b
fos.close()
def ih = hasher.hashFile tmp
assert ih.getHashList().length == 128
}
@Test
void testHash2Pieces2Bytes() {
def fos = new FileOutputStream(tmp)
byte [] b = new byte[ (0x1 << 19) + 2]
fos.write b
fos.close()
def ih = hasher.hashFile tmp
assert ih.getHashList().length == 160
}
@Test
void testHash2Pieces2Bytes() {
def fos = new FileOutputStream(tmp)
byte [] b = new byte[ (0x1 << 19) + 2]
fos.write b
fos.close()
def ih = hasher.hashFile tmp
assert ih.getHashList().length == 160
}
}

Some files were not shown because too many files have changed in this diff Show More