Compare commits

..

81 Commits

Author SHA1 Message Date
jrandom
870e94e184 * 2006-05-09 0.6.1.18 released
2006-05-09  jrandom
    * Further tunnel creation timeout revamp
2006-05-09 21:17:17 +00:00
complication
6b0d507644 2006-05-07 Complication
* Fix problem whereby repeated calls to allowed() would make
      the 1-tunnel exception permit more than one concurrent build
2006-05-08 03:19:46 +00:00
jrandom
70cf9e4ca7 2006-05-06 jrandom
* Readjust the tunnel creation timeouts to reject less but fail earlier,
      while tracking the extended timeout events.
2006-05-06 20:27:34 +00:00
jrandom
2a3974c71d 2006-05-04 jrandom
* Short circuit a highly congested part of the stat logging unless its
      required (may or may not help with a synchronization issue reported by
      andreas)
2006-05-04 23:08:48 +00:00
complication
46ac9292e8 2006-05-03 Complication
* Allow a single build attempt to proceed despite 1-minute overload
      only if the 1-second rate shows enough spare bandwidth
      (e.g. overload has already eased)
2006-05-03 11:13:26 +00:00
complication
4307097472 2006-05-02 Complication
* Correct a misnamed property in SummaryHelper.java
      to avoid confusion
    * Make the maximum allowance of our own concurrent
      tunnel builds slightly adaptive: one concurrent build per 6 KB/s
      within the fixed range 2..10
    * While overloaded, try to avoid completely choking our own build attempts,
      instead prefer limiting them to 1
2006-05-03 04:30:26 +00:00
complication
ed3fdaf4f1 2006-05-02 Complication
* Fixed URL in previous update, sorry
2006-05-03 02:11:06 +00:00
complication
378a9a8f5c 2006-05-02 Complication
* Weekly news.xml update
2006-05-03 02:03:01 +00:00
jrandom
4ef6180455 2006-05-01 jrandom
* Adjust the tunnel build timeouts to cut down on expirations, and
      increased the SSU connection establishment retransmission rate to
      something less glacial.
    * For the first 5 minutes of uptime, be less aggressive with tunnel
      exploration, opting for more reliable peers to start with.
2006-05-01 22:40:21 +00:00
jrandom
d4970e23c0 2006-05-01 jrandom
* Fix for a netDb lookup race (thanks cervantes!)
2006-05-01 19:09:02 +00:00
duck
0c9f165016 fix typos 2006-05-01 15:39:37 +00:00
jrandom
be3a899ecb 2006-04-27 jrandom
* Avoid a race in the message reply registry (thanks cervantes!)
2006-04-28 00:31:20 +00:00
jrandom
7a6a749004 2006-04-27 jrandom
* Fixed the tunnel expiration desync code (thanks Complication!)
2006-04-28 00:08:40 +00:00
complication
17271ee3f0 2006-04-25 Complication
* weekly news.xml update
2006-04-26 02:30:05 +00:00
complication
99bcfa90df 2006-04-24 Complication
* Update news.xml to reflect 0.6.1.17
2006-04-24 12:43:25 +00:00
jrandom
eb36e993c1 * 2006-04-23 0.6.1.17 released 2006-04-23 21:06:12 +00:00
zzz
e5eca5fa45 zzz update 2006-04-22 20:37:21 +00:00
jrandom
8cba2f4236 2006-04-19 jrandom
* Adjust how we pick high capacity peers to allow the inclusion of fast
      peers (the previous filter assumed an old usage pattern)
    * New set of stats to help track per-packet-type bandwidth usage better
    * Cut out the proactive tail drop from the SSU transport, for now
    * Reduce the frequency of tunnel build attempts while we're saturated
    * Don't drop tunnel requests as easily - prefer to explicitly reject them
2006-04-19 17:46:51 +00:00
complication
40d5ed31ac 2006-04-15 Complication
* Update news.xml to reflect 0.6.1.16
2006-04-15 17:25:50 +00:00
jrandom
181275fe35 * 2006-04-15 0.6.1.16 released 2006-04-15 07:58:12 +00:00
jrandom
23d8c01ce7 2006-04-15 jrandom
* Adjust the proactive tunnel request dropping so we will reject what we
      can instead of dropping so much (but still dropping if we get too far
      overloaded)
2006-04-15 07:15:19 +00:00
jrandom
de83944486 2006-04-14 jrandom
* 0 isn't very random
    * Adjust the tunnel drop to be more reasonable
2006-04-14 20:24:07 +00:00
jrandom
90cd7ff23a 2006-04-14 jrandom
* -28.00230115311259 is not between 0 and 1 in any universe I know.
    * Made the bw-related tunnel join throttle much simpler
2006-04-14 18:04:11 +00:00
jrandom
8d0a9b4ccd 2006-04-14 jrandom
* Make some more stats graphable, and allow some internal tweaking on the
      tunnel pairing for creation and testing.
2006-04-14 11:40:35 +00:00
jrandom
230d4cd23f * 2006-04-13 0.6.1.15 released 2006-04-13 12:40:21 +00:00
jrandom
e9b6fcc0a4 2006-04-12 jrandom
* Added a further failsafe against trying to queue up too many messages to
      a peer.
2006-04-13 04:22:06 +00:00
jrandom
8fcb871409 2006-04-12 jrandom
* Watch out for failed syndie index fetches (thanks bar!)
2006-04-12 06:49:01 +00:00
jrandom
83bef43fd5 2006-04-11 jrandom
* Throttling improvements on SSU - throttle all transmissions to a peer
      when we are retransmitting, not just retransmissions.  Also, if
      we're already retransmitting to a peer, probabalistically tail drop new
      messages targetting that peer, based on the estimated wait time before
      transmission.
    * Fixed the rounding error in the inbound tunnel drop probability.
2006-04-11 13:39:06 +00:00
jrandom
b4fc6ca31b 2006-04-10 jrandom
* Include a combined send/receive graph (good idea cervantes!)
    * Proactively drop inbound tunnel requests probabalistically as the
      estimated queue time approaches our limit, rather than letting them all
      through up to that limit.
2006-04-10 05:37:28 +00:00
jrandom
ab3f1b708d 2006-04-08 jrandom
* Stat summarization fix (removing the occational holes in the jrobin
      graphs)
2006-04-09 01:14:08 +00:00
jrandom
c76402a160 2006-04-08 jrandom
* Process inbound tunnel requests more efficiently
    * Proactively drop inbound tunnel requests if the queue before we'd
      process it in is too long (dynamically adjusted by cpu load)
    * Adjust the tunnel rejection throttle to reject requeusts when we have to
      proactively drop too many requests.
    * Display the number of pending inbound tunnel join requests on the router
      console (as the "handle backlog")
    * Include a few more stats in the default set of graphs
2006-04-08 06:15:43 +00:00
jrandom
a50c73aa5e 2006-04-06 jrandom
* Fix for a bug in the new irc ping/pong filter (thanks Complication!)
2006-04-07 01:26:32 +00:00
jrandom
5aa66795d2 2006-04-06 jrandom
* Fixed a typo in the reply cleanup code
2006-04-06 10:33:44 +00:00
jrandom
ac3c2d2b15 * 2006-04-05 0.6.1.14 released 2006-04-05 17:08:04 +00:00
jrandom
072a45e5ce 2006-04-05 jrandom
* Cut down on the time that we allow a tunnel creation request to sit by
      without response, and reject tunnel creation requests that are lagged
      locally.  Also switch to a bounded FIFO instead of a LIFO
    * Threading tweaks for the message handling (thanks bar!)
    * Don't add addresses to syndie with blank names (thanks Complication!)
    * Further ban clearance
2006-04-05 04:40:00 +00:00
complication
1ab14e52d2 2006-04-04 Complication
* weekly news.xml update
2006-04-05 03:06:00 +00:00
jrandom
9a820961a2 2006-04-05 jrandom
* Fix during the ssu handshake to avoid an unnecessary failure on
      packet retransmission (thanks ripple!)
    * Fix during the SSU handshake to use the negotiated session key asap,
      rather than using the intro key for more than we should (thanks ripple!)
    * Fixes to the message reply registry (thanks Complication!)
    * More comprehensive syndie banning (for repeated pushes)
    * Publish the router's ballpark bandwidth limit (w/in a power of 2), for
      testing purposes
    * Put a floor back on the capacity threshold, so too many failing peers
      won't cause us to pick very bad peers (unless we have very few good
      ones)
    * Bugfix to cut down on peers using introducers unneessarily (thanks
      Complication!)
    * Reduced the default streaming lib message size to fit into a single
      tunnel message, rather than require 5 tunnel messages to be transferred
      without loss before recomposition.  This reduces throughput, but should
      increase reliability, at least for the time being.
    * Misc small bugfixes in the router (thanks all!)
    * More tweaking for Syndie's CSS (thanks Doubtful Salmon!)
2006-04-04 12:20:32 +00:00
jrandom
764149aef3 2006-04-01 jrandom
* Take out the router watchdog's teeth (don't restart on leaseset failure)
    * Filter the IRC ping/pong messages, as some clients send unsafe
      information in them (thanks aardvax and dust!)
2006-04-03 10:07:22 +00:00
jrandom
1b3ad31bff 2006-04-01 jrandom
* Take out the router watchdog's teeth (don't restart on leaseset failure)
2006-04-01 19:05:35 +00:00
jrandom
15e6c27c04 2006-03-30 jrandom
* Substantially reduced the lock contention in the message registry (a
      major hotspot that can choke most threads).  Also reworked the locking
      so we don't need per-message timer events
    * No need to have additional per-peer message clearing, as they are
      either unregistered individually or expired.
    * Include some of the more transient tunnel throttling
2006-03-30 07:26:43 +00:00
complication
8b707e569f 2006-03-28 Complication
* weekly news.xml update
2006-03-29 02:09:23 +00:00
complication
e4c4b24c61 2006-03-26 Complication
* announce 0.6.1.3
2006-03-27 03:24:38 +00:00
jrandom
031636e607 * 2006-03-26 0.6.1.13 released 2006-03-26 23:23:49 +00:00
jrandom
b5c0d77c69 2006-03-25 jrandom
* Added a simple purge and ban of syndie authors, shown as the
      "Purge and ban" button on the addressbook for authors that are already
      on the ignore list.  All of their entries and metadata are deleted from
      the archive, and the are transparently filtered from any remote
      syndication (so no user on the syndie instance will pull any new posts
      from them)
    * More strict tunnel join throtting when congested
2006-03-25 23:50:48 +00:00
jrandom
d489caa88c 2006-03-24 jrandom
* Try to desync tunnel building near startup (thanks Complication!)
    * If we are highly congested, fall back on only querying the floodfill
      netDb peers, and only storing to those peers too
    * Cleaned up the floodfill-only queries
2006-03-24 20:53:28 +00:00
complication
2a24029acf 2006-03-21 Complication
* Weekly news.xml update
2006-03-22 02:15:13 +00:00
jrandom
c5aab8c750 2006-03-21 jrandom
* Avoid a very strange (unconfirmed) bug that people using the systray's
      browser picker dialog could cause by disabling the GUI-based browser
      picker.
    * Cut down on subsequent streaming lib reset packets transmitted
    * Use a larger MTU more often
    * Allow netDb searches to query shitlisted peers, as the queries are
      indirect.
    * Add an option to disable non-floodfill netDb searches (non-floodfill
      searches are used by default, but can be disabled by adding
      netDb.floodfillOnly=true to the advanced config)
2006-03-21 23:11:32 +00:00
jrandom
343748111a 2006-03-20 jrandom
* Fix to allow for some slack when coalescing stats
    * Workaround some oddball errors
2006-03-20 05:39:54 +00:00
jrandom
c5ddfabfe9 2006-03-20 jrandom
* Fix to allow for some slack when coalescing stats
    * Workaround some oddball errors
2006-03-20 05:31:09 +00:00
jrandom
1ef33906ed 2006-03-18 jrandom
* Added a new graphs.jsp page to show all of the stats being harvested
2006-03-19 00:23:23 +00:00
jrandom
f3849a22ad 2006-03-18 jrandom
* Made the netDb search load limitations a little less stringent
    * Add support for specifying the number of periods to be plotted on the
      graphs - e.g. to plot only the last hour of a stat that is averaged at
      the 60 second period, add &periodCount=60
2006-03-18 23:09:35 +00:00
jrandom
b03ff21d3b 2006-03-17 jrandom
* Add support for graphing the event count as well as the average stat
      value (done by adding &showEvents=true to the URL).  Also supports
      hiding the legend (&hideLegend=true), the grid (&hideGrid=true), and
      the title (&hideTitle=true).
    * Removed an unnecessary arbitrary filter on the profile organizer so we
      can pick high capacity and fast peers more appropriately
2006-03-17 23:46:00 +00:00
jrandom
52094b10c9 aych tee emm ell smells 2006-03-16 22:37:57 +00:00
jrandom
fc927efaa3 2006-03-16 jrandom
* Integrate basic hooks for jrobin (http://jrobin.org) into the router
      console.  Selected stats can be harvested automatically and fed into
      in-memory RRD databases, and those databases can be served up either as
      PNG images or as RRDtool compatible XML dumps (see oldstats.jsp for
      details).  A base set of stats are harvested by default, but an
      alternate list can be specified by setting the 'stat.summaries' list on
      the advanced config.  For instance:
      stat.summaries=bw.recvRate.60000,bw.sendRate.60000
    * HTML tweaking for the general config page (thanks void!)
    * Odd NPE fix (thanks Complication!)
2006-03-16 21:52:09 +00:00
jrandom
65dc803fb7 2006-03-16 jrandom
* Integrate basic hooks for jrobin (http://jrobin.org) into the router
      console.  Selected stats can be harvested automatically and fed into
      in-memory RRD databases, and those databases can be served up either as
      PNG images or as RRDtool compatible XML dumps (see oldstats.jsp for
      details).  A base set of stats are harvested by default, but an
      alternate list can be specified by setting the 'stat.summaries' list on
      the advanced config.  For instance:
      stat.summaries=bw.recvRate.60000,bw.sendRate.60000
    * HTML tweaking for the general config page (thanks void!)
    * Odd NPE fix (thanks Complication!)
2006-03-16 21:45:17 +00:00
complication
349adf6690 2006-03-15 Complication
* Trim out an old, inactive IP second-guessing method
      (thanks for spotting, Anonymous!)
2006-03-16 00:49:22 +00:00
jrandom
2c843fd818 2006-03-15 jrandom
* Further stat cleanup
    * Keep track of how many peers we are actively trying to communicate with,
      beyond those who are just trying to communicate with us.
    * Further router tunnel participation throttle revisions to avoid spurious
      rejections
    * Rate stat display cleanup (thanks ripple!)
    * Don't even try to send messages that have been queued too long
2006-03-15 22:36:10 +00:00
jrandom
863b511cde 2006-03-15 jrandom
* Further stat cleanup
    * Keep track of how many peers we are actively trying to communicate with,
      beyond those who are just trying to communicate with us.
    * Further router tunnel participation throttle revisions to avoid spurious
      rejections
    * Rate stat display cleanup (thanks ripple!)
    * Don't even try to send messages that have been queued too long
2006-03-15 22:26:42 +00:00
zzz
c417e7c237 2006-03-14 zzz update 2006-03-15 06:02:07 +00:00
zzz
1822c0d7d8 2006-03-07 zzz update 2006-03-09 02:19:42 +00:00
zzz
94c1c32b51 2006-03-05 zzz
* Remove the +++--- from the logs on i2psnark startup
2006-03-06 01:57:47 +00:00
jrandom
deb35f4af4 2006-03-05 jrandom
* HTML fixes in Syndie to work better with opera (thanks shaklen!)
    * Give netDb lookups to floodfill peers more time, as they are much more
      likely to succeed (thereby cutting down on the unnecessary netDb
      searches outside the floodfill set)
    * Fix to the SSU IP detection code so we won't use introducers when we
      don't need them (thanks Complication!)
    * Add a brief shitlist to i2psnark so it doesn't keep on trying to reach
      peers given to it
    * Don't let netDb searches wander across too many peers
    * Don't use the 1s bandwidth usage in the tunnel participation throttle,
      as its too volatile to have much meaning.
    * Don't bork if a Syndie post is missing an entry.sml
2006-03-05 17:07:07 +00:00
complication
883150f943 2006-03-05 Complication
* Reduce exposed statistical information,
      to make build and uptime tracking more expensive
2006-03-05 07:44:59 +00:00
complication
717d1b97b2 2006-03-04 Complication
* Fix the announce URL of orion's tracker in Snark sources
2006-03-04 23:50:01 +00:00
complication
e62135eacc 2006-03-03 Complication
* Explicit check for an index out of bounds exception while parsing
      an inbound IRC command (implicit check was there already)
2006-03-04 03:04:06 +00:00
jrandom
2c6d953359 2006-03-01 jrandom
* More aggressive tunnel throttling as we approach our bandwidth limit,
      and throttle based off periods wider than 1 second.
    * Included Doubtful Salmon's syndie stylings (thanks!)
2006-03-01 23:01:20 +00:00
zzz
2b79e2df3f 2006-02-28 zzz update 2006-03-01 04:11:16 +00:00
zzz
fab6e421b8 2006-02-27 zzz
* Update error page templates to add \r, Connection: close, and
      Proxy-connection: close.
2006-02-28 03:55:18 +00:00
jrandom
589cbd675a * 2006-02-27 0.6.1.12 released
2006-02-27  jrandom
    * Adjust the jbigi.jar to use the athlon-optimized jbigi on windows/amd64
      machines, rather than the generic jbigi (until we have an athlon64
      optimized version)
2006-02-27 19:05:40 +00:00
jrandom
c486f5980a * 2006-02-27 0.6.1.12 released
2006-02-27  jrandom
    * Adjust the jbigi.jar to use the athlon-optimized jbigi on windows/amd64
      machines, rather than the generic jbigi (until we have an athlon64
      optimized version)
2006-02-27 18:51:31 +00:00
jrandom
eee21aa301 2006-02-26 jrandom
* Switch from the bouncycastle to the gnu-crypto implementation for
      SHA256, as benchmarks show a 10-30% speedup.
    * Removed some unnecessary object caches
    * Don't close i2psnark streams prematurely
2006-02-26 21:30:56 +00:00
zzz
a2854cf6f6 2006-02-25 zzz spelling fix 2006-02-25 21:51:46 +00:00
jrandom
62b7cf64da 2006-02-25 jrandom
* Made the Syndie permalinks in the thread view point to the blog view
    * Disabled TCP again (since the live net seems to be doing well w/out it)
    * Fix the message time on inbound SSU establishment (thanks zzz!)
    * Don't be so aggressive with parallel tunnel creation when a tunnel pool
      just starts up
2006-02-25 20:41:51 +00:00
jrandom
7b2a435aad 2006-02-24 jrandom
* Rounding calculation cleanup in the stats, and avoid an uncontested
      mutex (thanks ripple!)
    * SSU handshake cleanup to help force incompatible peers to stop nagging
      us by both not giving them an updated reference to us and by dropping
      future handshake packets from them.
2006-02-24 09:35:52 +00:00
jrandom
3d8d21e543 2006-02-23 jrandom
* Increase the SSU retransmit ceiling (for slow links)
    * Estimate the sender's SSU MTU (to help see if we agree)
2006-02-23 14:38:39 +00:00
jrandom
8b7958cff2 2006-02-22 jrandom
* Fix to properly profile tunnel joins (thanks Ragnarok, frosk, et al!)
    * More aggressive poor-man's PMTU, allowing larger MTUs on less reliable
      links
    * Further class validator refactorings
2006-02-23 08:08:37 +00:00
jrandom
7bb792836d 2006-02-22 jrandom
* Fix to properly profile tunnel joins (thanks Ragnarok, frosk, et al!)
    * More aggressive poor-man's PMTU, allowing larger MTUs on less reliable
      links
    * Further class validator refactorings
2006-02-23 01:48:47 +00:00
jrandom
03f509ca54 2006-02-22 jrandom
* Handle a rare race under high bandwidth situations in the SSU transport
    * Minor refactoring so we don't confuse sun's 1.6.0-b2 validator
2006-02-22 14:54:22 +00:00
complication
5f05631936 2006-02-21 Complication
* Reactivate TCP tranport by default, in addition to re-allowing
2006-02-22 06:19:19 +00:00
zzz
5cfedd4c8b 2006-02-21 zzz update 2006-02-22 03:34:02 +00:00
zzz
269fec64a5 2006-02-21 zzz
announce 0.6.1.11
2006-02-21 20:12:14 +00:00
125 changed files with 5004 additions and 1848 deletions

View File

@@ -21,11 +21,12 @@ NATIVE_DIR=native
# router.jar: full I2P router
# jbigi.jar: collection of native optimized GMP routines for crypto
JAR_BASE=i2p.jar mstreaming.jar streaming.jar
JAR_CLIENTS=i2ptunnel.jar sam.jar i2psnark.jar
JAR_CLIENTS=i2ptunnel.jar sam.jar
JAR_ROUTER=router.jar
JAR_JBIGI=jbigi.jar
JAR_XML=xml-apis.jar resolver.jar xercesImpl.jar
JAR_CONSOLE=\
i2psnark.jar \
javax.servlet.jar \
commons-el.jar \
commons-logging.jar \
@@ -79,15 +80,15 @@ native_clean:
native_shared: libi2p.so
@cd build ; ${GCJ} ${OPTIMIZE} -fjni -L../${NATIVE_DIR} -li2p ${SYSTEM_PROPS} -o ../${NATIVE_DIR}/i2p_dsa --main=net.i2p.crypto.DSAEngine
@echo "* i2p_dsa is a simple test app with the DSA engine and Fortuna PRNG to make sure crypto is working"
@cd build ; ${GCJ} ${OPTIMIZE} -fjni -L../${NATIVE_DIR} -li2p ${SYSTEM_PROPS} -o ../${NATIVE_DIR}/prng --main=gnu.crypto.prng.Fortuna
@cd build ; ${GCJ} ${OPTIMIZE} -fjni -L../${NATIVE_DIR} -li2p ${SYSTEM_PROPS} -o ../${NATIVE_DIR}/prng --main=gnu.crypto.prng.FortunaStandalone
@cd build ; ${GCJ} ${OPTIMIZE} -fjni -L../${NATIVE_DIR} -li2p ${SYSTEM_PROPS} -o ../${NATIVE_DIR}/i2ptunnel --main=net.i2p.i2ptunnel.I2PTunnel
@echo "* i2ptunnel is mihi's I2PTunnel CLI"
@echo " run it as ./i2ptunnel -cli to avoid awt complaints"
@cd build ; ${GCJ} ${OPTIMIZE} -fjni -L../${NATIVE_DIR} -li2p ${SYSTEM_PROPS} -o ../${NATIVE_DIR}/i2ptunnelctl --main=net.i2p.i2ptunnel.TunnelControllerGroup
@echo "* i2ptunnelctl is a controller for I2PTunnel, reading i2ptunnel.config"
@echo " and launching the appropriate proxies"
@cd build ; ${GCJ} ${OPTIMIZE} -fjni -L../${NATIVE_DIR} -li2p ${SYSTEM_PROPS} -o ../${NATIVE_DIR}/i2psnark --main=org.klomp.snark.Snark
@echo "* i2psnark is an anonymous bittorrent client"
#@cd build ; ${GCJ} ${OPTIMIZE} -fjni -L../${NATIVE_DIR} -li2p ${SYSTEM_PROPS} -o ../${NATIVE_DIR}/i2psnark --main=org.klomp.snark.Snark
#@echo "* i2psnark is an anonymous bittorrent client"
@cd build ; ${GCJ} ${OPTIMIZE} -fjni -L../${NATIVE_DIR} -li2p ${SYSTEM_PROPS} -o ../${NATIVE_DIR}/i2prouter --main=net.i2p.router.Router
@echo "* i2prouter is the main I2P router"
@echo " it can be used, and while the router console won't load,"
@@ -95,6 +96,6 @@ native_shared: libi2p.so
libi2p.so:
@echo "* Building libi2p.so"
@(cd build ; ${GCJ} ${OPTIMIZE} -fPIC -fjni -shared -o ../${NATIVE_DIR}/libi2p.so ${LIBI2P_JARS} ; cd .. )
@(cd build ; time ${GCJ} ${OPTIMIZE} -fPIC -fjni -shared -o ../${NATIVE_DIR}/libi2p.so ${LIBI2P_JARS} ; cd .. )
@ls -l ${NATIVE_DIR}/libi2p.so
@echo "* libi2p.so built"

View File

@@ -10,6 +10,7 @@ import net.i2p.client.streaming.I2PSocket;
import net.i2p.client.streaming.I2PSocketManager;
import net.i2p.client.streaming.I2PSocketManagerFactory;
import net.i2p.util.Log;
import net.i2p.util.SimpleTimer;
import java.io.*;
import java.util.*;
@@ -31,6 +32,7 @@ public class I2PSnarkUtil {
private Map _opts;
private I2PSocketManager _manager;
private boolean _configured;
private Set _shitlist;
private I2PSnarkUtil() {
_context = I2PAppContext.getGlobalContext();
@@ -38,6 +40,7 @@ public class I2PSnarkUtil {
_opts = new HashMap();
setProxy("127.0.0.1", 4444);
setI2CPConfig("127.0.0.1", 7654, null);
_shitlist = new HashSet(64);
_configured = false;
}
@@ -93,9 +96,9 @@ public class I2PSnarkUtil {
if (opts.getProperty("i2p.streaming.inactivityTimeout") == null)
opts.setProperty("i2p.streaming.inactivityTimeout", "90000");
if (opts.getProperty("i2p.streaming.inactivityAction") == null)
opts.setProperty("i2p.streaming.inactivityAction", "1");
if (opts.getProperty("i2p.streaming.writeTimeout") == null)
opts.setProperty("i2p.streaming.writeTimeout", "90000");
opts.setProperty("i2p.streaming.inactivityAction", "2"); // 1 == disconnect, 2 == ping
//if (opts.getProperty("i2p.streaming.writeTimeout") == null)
// opts.setProperty("i2p.streaming.writeTimeout", "90000");
//if (opts.getProperty("i2p.streaming.readTimeout") == null)
// opts.setProperty("i2p.streaming.readTimeout", "120000");
_manager = I2PSocketManagerFactory.createManager(_i2cpHost, _i2cpPort, opts);
@@ -110,18 +113,36 @@ public class I2PSnarkUtil {
public void disconnect() {
I2PSocketManager mgr = _manager;
_manager = null;
_shitlist.clear();
mgr.destroySocketManager();
}
/** connect to the given destination */
I2PSocket connect(PeerID peer) throws IOException {
Hash dest = peer.getAddress().calculateHash();
synchronized (_shitlist) {
if (_shitlist.contains(dest))
throw new IOException("Not trying to contact " + dest.toBase64() + ", as they are shitlisted");
}
try {
return _manager.connect(peer.getAddress());
I2PSocket rv = _manager.connect(peer.getAddress());
if (rv != null) synchronized (_shitlist) { _shitlist.remove(dest); }
return rv;
} catch (I2PException ie) {
synchronized (_shitlist) {
_shitlist.add(dest);
}
SimpleTimer.getInstance().addEvent(new Unshitlist(dest), 10*60*1000);
throw new IOException("Unable to reach the peer " + peer + ": " + ie.getMessage());
}
}
private class Unshitlist implements SimpleTimer.TimedEvent {
private Hash _dest;
public Unshitlist(Hash dest) { _dest = dest; }
public void timeReached() { synchronized (_shitlist) { _shitlist.remove(_dest); } }
}
/**
* fetch the given URL, returning the file it is stored in, or null on error
*/

View File

@@ -633,11 +633,11 @@ public class Snark
boolean allocating = false;
public void storageCreateFile(Storage storage, String name, long length)
{
if (allocating)
System.out.println(); // Done with last file.
//if (allocating)
// System.out.println(); // Done with last file.
System.out.print("Creating file '" + name
+ "' of length " + length + ": ");
//System.out.print("Creating file '" + name
// + "' of length " + length + ": ");
allocating = true;
}
@@ -647,10 +647,10 @@ public class Snark
public void storageAllocated(Storage storage, long length)
{
allocating = true;
System.out.print(".");
//System.out.print(".");
allocated += length;
if (allocated == meta.getTotalLength())
System.out.println(); // We have all the disk space we need.
//if (allocated == meta.getTotalLength())
// System.out.println(); // We have all the disk space we need.
}
boolean allChecked = false;
@@ -664,26 +664,21 @@ public class Snark
// Use the MetaInfo from the storage since our own might not
// yet be setup correctly.
MetaInfo meta = storage.getMetaInfo();
if (meta != null)
System.out.print("Checking existing "
+ meta.getPieces()
+ " pieces: ");
//if (meta != null)
// System.out.print("Checking existing "
// + meta.getPieces()
// + " pieces: ");
checking = true;
}
if (checking)
if (checked)
System.out.print("+");
else
System.out.print("-");
else
if (!checking)
Snark.debug("Got " + (checked ? "" : "BAD ") + "piece: " + num,
Snark.INFO);
}
public void storageAllChecked(Storage storage)
{
if (checking)
System.out.println();
//if (checking)
// System.out.println();
allChecked = true;
checking = false;
@@ -693,7 +688,7 @@ public class Snark
{
Snark.debug("Completely received " + torrent, Snark.INFO);
//storage.close();
System.out.println("Completely received: " + torrent);
//System.out.println("Completely received: " + torrent);
if (completeListener != null)
completeListener.torrentComplete(this);
}

View File

@@ -464,7 +464,7 @@ public class SnarkManager implements Snark.CompleteListener {
private static final String DEFAULT_TRACKERS[] = {
"Postman's tracker", "http://YRgrgTLGnbTq2aZOZDJQ~o6Uk5k6TK-OZtx0St9pb0G-5EGYURZioxqYG8AQt~LgyyI~NCj6aYWpPO-150RcEvsfgXLR~CxkkZcVpgt6pns8SRc3Bi-QSAkXpJtloapRGcQfzTtwllokbdC-aMGpeDOjYLd8b5V9Im8wdCHYy7LRFxhEtGb~RL55DA8aYOgEXcTpr6RPPywbV~Qf3q5UK55el6Kex-6VCxreUnPEe4hmTAbqZNR7Fm0hpCiHKGoToRcygafpFqDw5frLXToYiqs9d4liyVB-BcOb0ihORbo0nS3CLmAwZGvdAP8BZ7cIYE3Z9IU9D1G8JCMxWarfKX1pix~6pIA-sp1gKlL1HhYhPMxwyxvuSqx34o3BqU7vdTYwWiLpGM~zU1~j9rHL7x60pVuYaXcFQDR4-QVy26b6Pt6BlAZoFmHhPcAuWfu-SFhjyZYsqzmEmHeYdAwa~HojSbofg0TMUgESRXMw6YThK1KXWeeJVeztGTz25sL8AAAA.i2p/announce.php"
, "Orion's tracker", "http://gKik1lMlRmuroXVGTZ~7v4Vez3L3ZSpddrGZBrxVriosCQf7iHu6CIk8t15BKsj~P0JJpxrofeuxtm7SCUAJEr0AIYSYw8XOmp35UfcRPQWyb1LsxUkMT4WqxAT3s1ClIICWlBu5An~q-Mm0VFlrYLIPBWlUFnfPR7jZ9uP5ZMSzTKSMYUWao3ejiykr~mtEmyls6g-ZbgKZawa9II4zjOy-hdxHgP-eXMDseFsrym4Gpxvy~3Fv9TuiSqhpgm~UeTo5YBfxn6~TahKtE~~sdCiSydqmKBhxAQ7uT9lda7xt96SS09OYMsIWxLeQUWhns-C~FjJPp1D~IuTrUpAFcVEGVL-BRMmdWbfOJEcWPZ~CBCQSO~VkuN1ebvIOr9JBerFMZSxZtFl8JwcrjCIBxeKPBmfh~xYh16BJm1BBBmN1fp2DKmZ2jBNkAmnUbjQOqWvUcehrykWk5lZbE7bjJMDFH48v3SXwRuDBiHZmSbsTY6zhGY~GkMQHNGxPMMSIAAAA.i2p/bt"
, "Orion's tracker", "http://gKik1lMlRmuroXVGTZ~7v4Vez3L3ZSpddrGZBrxVriosCQf7iHu6CIk8t15BKsj~P0JJpxrofeuxtm7SCUAJEr0AIYSYw8XOmp35UfcRPQWyb1LsxUkMT4WqxAT3s1ClIICWlBu5An~q-Mm0VFlrYLIPBWlUFnfPR7jZ9uP5ZMSzTKSMYUWao3ejiykr~mtEmyls6g-ZbgKZawa9II4zjOy-hdxHgP-eXMDseFsrym4Gpxvy~3Fv9TuiSqhpgm~UeTo5YBfxn6~TahKtE~~sdCiSydqmKBhxAQ7uT9lda7xt96SS09OYMsIWxLeQUWhns-C~FjJPp1D~IuTrUpAFcVEGVL-BRMmdWbfOJEcWPZ~CBCQSO~VkuN1ebvIOr9JBerFMZSxZtFl8JwcrjCIBxeKPBmfh~xYh16BJm1BBBmN1fp2DKmZ2jBNkAmnUbjQOqWvUcehrykWk5lZbE7bjJMDFH48v3SXwRuDBiHZmSbsTY6zhGY~GkMQHNGxPMMSIAAAA.i2p/bt/announce.php"
// , "The freak's tracker", "http://mHKva9x24E5Ygfey2llR1KyQHv5f8hhMpDMwJDg1U-hABpJ2NrQJd6azirdfaR0OKt4jDlmP2o4Qx0H598~AteyD~RJU~xcWYdcOE0dmJ2e9Y8-HY51ie0B1yD9FtIV72ZI-V3TzFDcs6nkdX9b81DwrAwwFzx0EfNvK1GLVWl59Ow85muoRTBA1q8SsZImxdyZ-TApTVlMYIQbdI4iQRwU9OmmtefrCe~ZOf4UBS9-KvNIqUL0XeBSqm0OU1jq-D10Ykg6KfqvuPnBYT1BYHFDQJXW5DdPKwcaQE4MtAdSGmj1epDoaEBUa9btQlFsM2l9Cyn1hzxqNWXELmx8dRlomQLlV4b586dRzW~fLlOPIGC13ntPXogvYvHVyEyptXkv890jC7DZNHyxZd5cyrKC36r9huKvhQAmNABT2Y~pOGwVrb~RpPwT0tBuPZ3lHYhBFYmD8y~AOhhNHKMLzea1rfwTvovBMByDdFps54gMN1mX4MbCGT4w70vIopS9yAAAA.i2p/bytemonsoon/announce.php"
};

View File

@@ -127,7 +127,7 @@ public class I2PSnarkServlet extends HttpServlet {
}
} else if ( (newURL != null) && (newURL.trim().length() > "http://.i2p/".length()) ) {
_manager.addMessage("Fetching " + newURL);
I2PThread fetch = new I2PThread(new FetchAndAdd(newURL), "Fetch and add");
I2PThread fetch = new I2PThread(new FetchAndAdd(_manager, newURL), "Fetch and add");
fetch.start();
} else {
// no file or URL specified
@@ -267,56 +267,6 @@ public class I2PSnarkServlet extends HttpServlet {
}
}
private class FetchAndAdd implements Runnable {
private String _url;
public FetchAndAdd(String url) {
_url = url;
}
public void run() {
_url = _url.trim();
File file = I2PSnarkUtil.instance().get(_url, false);
try {
if ( (file != null) && (file.exists()) && (file.length() > 0) ) {
_manager.addMessage("Torrent fetched from " + _url);
FileInputStream in = null;
try {
in = new FileInputStream(file);
MetaInfo info = new MetaInfo(in);
String name = info.getName();
name = name.replace('/', '_');
name = name.replace('\\', '_');
name = name.replace('&', '+');
name = name.replace('\'', '_');
name = name.replace('"', '_');
name = name.replace('`', '_');
name = name + ".torrent";
File torrentFile = new File(_manager.getDataDir(), name);
String canonical = torrentFile.getCanonicalPath();
if (torrentFile.exists()) {
if (_manager.getTorrent(canonical) != null)
_manager.addMessage("Torrent already running: " + name);
else
_manager.addMessage("Torrent already in the queue: " + name);
} else {
FileUtil.copy(file.getAbsolutePath(), canonical, true);
_manager.addTorrent(canonical);
}
} catch (IOException ioe) {
_manager.addMessage("Torrent at " + _url + " was not valid: " + ioe.getMessage());
} finally {
try { in.close(); } catch (IOException ioe) {}
}
} else {
_manager.addMessage("Torrent was not retrieved from " + _url);
}
} finally {
if (file != null) file.delete();
}
}
}
private List getSortedSnarks(HttpServletRequest req) {
Set files = _manager.listTorrentFiles();
TreeSet fileNames = new TreeSet(files); // sorts it alphabetically
@@ -635,4 +585,57 @@ public class I2PSnarkServlet extends HttpServlet {
private static final String TABLE_FOOTER = "</table>\n";
private static final String FOOTER = "</body></html>";
}
}
class FetchAndAdd implements Runnable {
private SnarkManager _manager;
private String _url;
public FetchAndAdd(SnarkManager mgr, String url) {
_manager = mgr;
_url = url;
}
public void run() {
_url = _url.trim();
File file = I2PSnarkUtil.instance().get(_url, false);
try {
if ( (file != null) && (file.exists()) && (file.length() > 0) ) {
_manager.addMessage("Torrent fetched from " + _url);
FileInputStream in = null;
try {
in = new FileInputStream(file);
MetaInfo info = new MetaInfo(in);
String name = info.getName();
name = name.replace('/', '_');
name = name.replace('\\', '_');
name = name.replace('&', '+');
name = name.replace('\'', '_');
name = name.replace('"', '_');
name = name.replace('`', '_');
name = name + ".torrent";
File torrentFile = new File(_manager.getDataDir(), name);
String canonical = torrentFile.getCanonicalPath();
if (torrentFile.exists()) {
if (_manager.getTorrent(canonical) != null)
_manager.addMessage("Torrent already running: " + name);
else
_manager.addMessage("Torrent already in the queue: " + name);
} else {
FileUtil.copy(file.getAbsolutePath(), canonical, true);
_manager.addTorrent(canonical);
}
} catch (IOException ioe) {
_manager.addMessage("Torrent at " + _url + " was not valid: " + ioe.getMessage());
} finally {
try { in.close(); } catch (IOException ioe) {}
}
} else {
_manager.addMessage("Torrent was not retrieved from " + _url);
}
} finally {
if (file != null) file.delete();
}
}
}

View File

@@ -5,6 +5,7 @@ import java.net.Socket;
import java.util.ArrayList;
import java.util.List;
import java.util.StringTokenizer;
import java.lang.IndexOutOfBoundsException;
import net.i2p.I2PAppContext;
import net.i2p.client.streaming.I2PSocket;
@@ -26,6 +27,8 @@ public class I2PTunnelIRCClient extends I2PTunnelClientBase implements Runnable
protected List dests;
private static final long DEFAULT_READ_TIMEOUT = 5*60*1000; // -1
protected long readTimeout = DEFAULT_READ_TIMEOUT;
/** this is the pong response the client expects for their last ping. at least, i hope so... */
private String _expectedPong;
/**
* @throws IllegalArgumentException if the I2PTunnel does not contain
@@ -43,6 +46,8 @@ public class I2PTunnelIRCClient extends I2PTunnelClientBase implements Runnable
l,
notifyThis,
"IRCHandler " + (++__clientId), tunnel);
_expectedPong = null;
StringTokenizer tok = new StringTokenizer(destinations, ",");
dests = new ArrayList(1);
@@ -146,6 +151,8 @@ public class I2PTunnelIRCClient extends I2PTunnelClientBase implements Runnable
break;
if(inmsg.endsWith("\r"))
inmsg=inmsg.substring(0,inmsg.length()-1);
if (_log.shouldLog(Log.DEBUG))
_log.debug("in: [" + inmsg + "]");
String outmsg = inboundFilter(inmsg);
if(outmsg!=null)
{
@@ -216,6 +223,8 @@ public class I2PTunnelIRCClient extends I2PTunnelClientBase implements Runnable
break;
if(inmsg.endsWith("\r"))
inmsg=inmsg.substring(0,inmsg.length()-1);
if (_log.shouldLog(Log.DEBUG))
_log.debug("out: [" + inmsg + "]");
String outmsg = outboundFilter(inmsg);
if(outmsg!=null)
{
@@ -255,7 +264,7 @@ public class I2PTunnelIRCClient extends I2PTunnelClientBase implements Runnable
*
*/
public static String inboundFilter(String s) {
public String inboundFilter(String s) {
String field[]=s.split(" ",4);
String command;
@@ -263,8 +272,8 @@ public class I2PTunnelIRCClient extends I2PTunnelClientBase implements Runnable
final String[] allowedCommands =
{
"NOTICE",
"PING",
"PONG",
//"PING",
//"PONG",
"MODE",
"JOIN",
"NICK",
@@ -277,9 +286,14 @@ public class I2PTunnelIRCClient extends I2PTunnelClientBase implements Runnable
if(field[0].charAt(0)==':')
idx++;
command = field[idx++];
try { command = field[idx++]; }
catch (IndexOutOfBoundsException ioobe) // wtf, server sent borked command?
{
_log.warn("Dropping defective message: index out of bounds while extracting command.");
return null;
}
idx++; //skip victim
// Allow numerical responses
@@ -287,6 +301,21 @@ public class I2PTunnelIRCClient extends I2PTunnelClientBase implements Runnable
new Integer(command);
return s;
} catch(NumberFormatException nfe){}
if ("PING".equals(command))
return "PING 127.0.0.1"; // no way to know what the ircd to i2ptunnel server con is, so localhost works
if ("PONG".equals(command)) {
// Turn the received ":irc.freshcoffee.i2p PONG irc.freshcoffee.i2p :127.0.0.1"
// into ":127.0.0.1 PONG 127.0.0.1 " so that the caller can append the client's extra parameter
// though, does 127.0.0.1 work for irc clients connecting remotely? and for all of them? sure would
// be great if irc clients actually followed the RFCs here, but i guess thats too much to ask.
// If we haven't PINGed them, or the PING we sent isn't something we know how to filter, this
// is null.
String pong = _expectedPong;
_expectedPong = null;
return pong;
}
// Allow all allowedCommands
for(int i=0;i<allowedCommands.length;i++) {
@@ -318,14 +347,13 @@ public class I2PTunnelIRCClient extends I2PTunnelClientBase implements Runnable
return null;
}
public static String outboundFilter(String s) {
public String outboundFilter(String s) {
String field[]=s.split(" ",3);
String command;
final String[] allowedCommands =
{
"NOTICE",
"PONG",
"MODE",
"JOIN",
"NICK",
@@ -339,7 +367,8 @@ public class I2PTunnelIRCClient extends I2PTunnelClientBase implements Runnable
"MAP", // seems safe enough, the ircd should protect themselves though
"PART",
"OPER",
"PING",
// "PONG", // replaced with a filtered PING/PONG since some clients send the server IP (thanks aardvax!)
// "PING",
"KICK",
"HELPME",
"RULES",
@@ -355,6 +384,43 @@ public class I2PTunnelIRCClient extends I2PTunnelClientBase implements Runnable
command = field[0].toUpperCase();
if ("PING".equals(command)) {
// Most clients just send a PING and are happy with any old PONG. Others,
// like BitchX, actually expect certain behavior. It sends two different pings:
// "PING :irc.freshcoffee.i2p" and "PING 1234567890 127.0.0.1" (where the IP is the proxy)
// the PONG to the former seems to be "PONG 127.0.0.1", while the PONG to the later is
// ":irc.freshcoffee.i2p PONG irc.freshcoffe.i2p :1234567890".
// We don't want to send them our proxy's IP address, so we need to rewrite the PING
// sent to the server, but when we get a PONG back, use what we expected, rather than
// what they sent.
//
// Yuck.
String rv = null;
if (field.length == 1) { // PING
rv = "PING";
_expectedPong = "PONG 127.0.0.1";
} else if (field.length == 2) { // PING nonce
rv = "PING " + field[1];
_expectedPong = "PONG " + field[1];
} else if (field.length == 3) { // PING nonce serverLocation
rv = "PING " + field[1];
_expectedPong = "PONG " + field[1];
} else {
if (_log.shouldLog(Log.ERROR))
_log.error("IRC client sent a PING we don't understand, filtering it (\"" + s + "\")");
rv = null;
_expectedPong = null;
}
if (_log.shouldLog(Log.WARN))
_log.warn("sending ping " + rv + ", waiting for " + _expectedPong + " orig was [" + s + "]");
return rv;
}
if ("PONG".equals(command))
return "PONG 127.0.0.1"; // no way to know what the ircd to i2ptunnel server con is, so localhost works
// Allow all allowedCommands
for(int i=0;i<allowedCommands.length;i++)
{

Binary file not shown.

View File

@@ -25,6 +25,7 @@
<pathelement location="../../systray/java/build/systray.jar" />
<pathelement location="../../systray/java/lib/systray4j.jar" />
<pathelement location="../../../installer/lib/wrapper/win32/wrapper.jar" /> <!-- we dont care if we're not on win32 -->
<pathelement location="../../jrobin/jrobin-1.4.0.jar" />
</classpath>
</javac>
</target>
@@ -34,6 +35,12 @@
<attribute name="Class-Path" value="i2p.jar router.jar" />
</manifest>
</jar>
<delete dir="./tmpextract" />
<unjar src="../../jrobin/jrobin-1.4.0.jar" dest="./tmpextract" />
<jar destfile="./build/routerconsole.jar" basedir="./tmpextract" update="true" />
<delete dir="./tmpextract" />
<ant target="war" />
</target>
<target name="war" depends="precompilejsp">
@@ -60,6 +67,7 @@
<pathelement location="../../systray/java/lib/systray4j.jar" />
<pathelement location="../../../installer/lib/wrapper/win32/wrapper.jar" />
<pathelement location="build/routerconsole.jar" />
<pathelement location="build/" />
<pathelement location="../../../router/java/build/router.jar" />
<pathelement location="../../../core/java/build/i2p.jar" />
</classpath>
@@ -86,6 +94,7 @@
<pathelement location="../../systray/java/lib/systray4j.jar" />
<pathelement location="../../../installer/lib/wrapper/win32/wrapper.jar" />
<pathelement location="build/routerconsole.jar" />
<pathelement location="build" />
<pathelement location="../../../router/java/build/router.jar" />
<pathelement location="../../../core/java/build/i2p.jar" />
</classpath>

View File

@@ -30,7 +30,6 @@ import net.i2p.router.web.ConfigServiceHandler.UpdateWrapperManagerAndRekeyTask;
*/
public class ConfigNetHandler extends FormHandler {
private String _hostname;
private boolean _guessRequested;
private boolean _reseedRequested;
private boolean _saveRequested;
private boolean _recheckReachabilityRequested;
@@ -52,9 +51,7 @@ public class ConfigNetHandler extends FormHandler {
private boolean _ratesOnly;
protected void processForm() {
if (_guessRequested) {
guessHostname();
} else if (_reseedRequested) {
if (_reseedRequested) {
reseed();
} else if (_saveRequested || ( (_action != null) && ("Save changes".equals(_action)) )) {
saveChanges();
@@ -65,7 +62,6 @@ public class ConfigNetHandler extends FormHandler {
}
}
public void setGuesshost(String moo) { _guessRequested = true; }
public void setReseed(String moo) { _reseedRequested = true; }
public void setSave(String moo) { _saveRequested = true; }
public void setEnabletimesync(String moo) { _timeSyncEnabled = true; }
@@ -110,37 +106,7 @@ public class ConfigNetHandler extends FormHandler {
_sharePct = (pct != null ? pct.trim() : null);
}
private static final String IP_PREFIX = "<h1>Your IP is ";
private static final String IP_SUFFIX = " <br></h1>";
private void guessHostname() {
BufferedReader reader = null;
try {
URL url = new URL("http://www.whatismyip.com/");
URLConnection con = url.openConnection();
con.connect();
reader = new BufferedReader(new InputStreamReader(con.getInputStream()));
String line = null;
while ( (line = reader.readLine()) != null) {
if (line.startsWith(IP_PREFIX)) {
int end = line.indexOf(IP_SUFFIX);
if (end == -1) {
addFormError("Unable to guess the host (BAD_SUFFIX)");
return;
}
String ip = line.substring(IP_PREFIX.length(), end);
addFormNotice("Host guess: " + ip);
return;
}
}
addFormError("Unable to guess the host (NO_PREFIX)");
} catch (IOException ioe) {
addFormError("Unable to guess the host (IO_ERROR)");
_context.logManager().getLog(ConfigNetHandler.class).error("Unable to guess the host", ioe);
} finally {
if (reader != null) try { reader.close(); } catch (IOException ioe) {}
}
}
private static final String DEFAULT_SEED_URL = ReseedHandler.DEFAULT_SEED_URL;
/**
* Reseed has been requested, so lets go ahead and do it. Fetch all of
@@ -284,7 +250,7 @@ public class ConfigNetHandler extends FormHandler {
// If hidden mode value changes, restart is required
if (_hiddenMode && "false".equalsIgnoreCase(_context.getProperty(Router.PROP_HIDDEN, "false"))) {
_context.router().setConfigSetting(Router.PROP_HIDDEN, "true");
_context.router().getRouterInfo().addCapability(RouterInfo.CAPABILITY_HIDDEN);
_context.router().addCapabilities(_context.router().getRouterInfo());
addFormNotice("Gracefully restarting into Hidden Router Mode. Make sure you have no 0-1 length "
+ "<a href=\"configtunnels.jsp\">tunnels!</a>");
hiddenSwitch();

View File

@@ -0,0 +1,122 @@
package net.i2p.router.web;
import java.io.IOException;
import java.io.Writer;
import java.util.*;
import net.i2p.data.DataHelper;
import net.i2p.stat.Rate;
import net.i2p.router.RouterContext;
public class GraphHelper {
private RouterContext _context;
private Writer _out;
private int _periodCount;
private boolean _showEvents;
private int _width;
private int _height;
private int _refreshDelaySeconds;
/**
* Configure this bean to query a particular router context
*
* @param contextId begging few characters of the routerHash, or null to pick
* the first one we come across.
*/
public void setContextId(String contextId) {
try {
_context = ContextHelper.getContext(contextId);
} catch (Throwable t) {
t.printStackTrace();
}
}
public GraphHelper() {
_periodCount = 60; // SummaryListener.PERIODS;
_showEvents = false;
_width = 250;
_height = 100;
_refreshDelaySeconds = 60;
}
public void setOut(Writer out) { _out = out; }
public void setPeriodCount(String str) {
try { _periodCount = Integer.parseInt(str); } catch (NumberFormatException nfe) {}
}
public void setShowEvents(boolean b) { _showEvents = b; }
public void setHeight(String str) {
try { _height = Integer.parseInt(str); } catch (NumberFormatException nfe) {}
}
public void setWidth(String str) {
try { _width = Integer.parseInt(str); } catch (NumberFormatException nfe) {}
}
public void setRefreshDelay(String str) {
try { _refreshDelaySeconds = Integer.parseInt(str); } catch (NumberFormatException nfe) {}
}
public String getImages() {
try {
_out.write("<img src=\"viewstat.jsp?stat=bw.combined"
+ "&amp;periodCount=" + _periodCount
+ "&amp;width=" + _width
+ "&amp;height=" + _height
+ "\" title=\"Combined bandwidth graph\" />\n");
List listeners = StatSummarizer.instance().getListeners();
TreeSet ordered = new TreeSet(new AlphaComparator());
ordered.addAll(listeners);
for (Iterator iter = ordered.iterator(); iter.hasNext(); ) {
SummaryListener lsnr = (SummaryListener)iter.next();
Rate r = lsnr.getRate();
String title = r.getRateStat().getName() + " for " + DataHelper.formatDuration(_periodCount * r.getPeriod());
_out.write("<img src=\"viewstat.jsp?stat=" + r.getRateStat().getName()
+ "&amp;showEvents=" + _showEvents
+ "&amp;period=" + r.getPeriod()
+ "&amp;periodCount=" + _periodCount
+ "&amp;width=" + _width
+ "&amp;height=" + _height
+ "\" title=\"" + title + "\" />\n");
}
if (_refreshDelaySeconds > 0)
_out.write("<meta http-equiv=\"refresh\" content=\"" + _refreshDelaySeconds + "\" />\n");
} catch (IOException ioe) {
ioe.printStackTrace();
}
return "";
}
public String getForm() {
try {
_out.write("<form action=\"graphs.jsp\" method=\"GET\">");
_out.write("Periods: <input size=\"3\" type=\"text\" name=\"periodCount\" value=\"" + _periodCount + "\" /><br />\n");
_out.write("Plot averages: <input type=\"radio\" name=\"showEvents\" value=\"false\" " + (_showEvents ? "" : "checked=\"true\" ") + " /> ");
_out.write("or plot events: <input type=\"radio\" name=\"showEvents\" value=\"true\" "+ (_showEvents ? "checked=\"true\" " : "") + " /><br />\n");
_out.write("Image sizes: width: <input size=\"4\" type=\"text\" name=\"width\" value=\"" + _width
+ "\" /> pixels, height: <input size=\"4\" type=\"text\" name=\"height\" value=\"" + _height
+ "\" /><br />\n");
_out.write("Refresh delay: <select name=\"refreshDelay\"><option value=\"60\">1 minute</option><option value=\"120\">2 minutes</option><option value=\"300\">5 minutes</option><option value=\"600\">10 minutes</option><option value=\"-1\">Never</option></select><br />\n");
_out.write("<input type=\"submit\" value=\"Redraw\" />");
} catch (IOException ioe) {
ioe.printStackTrace();
}
return "";
}
public String getPeerSummary() {
try {
_context.commSystem().renderStatusHTML(_out);
_context.bandwidthLimiter().renderStatusHTML(_out);
} catch (IOException ioe) {
ioe.printStackTrace();
}
return "";
}
}
class AlphaComparator implements Comparator {
public int compare(Object lhs, Object rhs) {
SummaryListener l = (SummaryListener)lhs;
SummaryListener r = (SummaryListener)rhs;
String lName = l.getRate().getRateStat().getName() + "." + l.getRate().getPeriod();
String rName = r.getRate().getRateStat().getName() + "." + r.getRate().getPeriod();
return lName.compareTo(rName);
}
}

View File

@@ -25,6 +25,7 @@ public class RouterConsoleRunner {
static {
System.setProperty("org.mortbay.http.Version.paranoid", "true");
System.setProperty("java.awt.headless", "true");
}
public RouterConsoleRunner(String args[]) {
@@ -95,6 +96,10 @@ public class RouterConsoleRunner {
I2PThread t = new I2PThread(fetcher, "NewsFetcher");
t.setDaemon(true);
t.start();
I2PThread st = new I2PThread(new StatSummarizer(), "StatSummarizer");
st.setDaemon(true);
st.start();
}
private void initialize(WebApplicationContext context) {

View File

@@ -0,0 +1,230 @@
package net.i2p.router.web;
import java.io.*;
import java.util.*;
import net.i2p.stat.*;
import net.i2p.router.*;
import net.i2p.util.Log;
import java.awt.Color;
import org.jrobin.graph.RrdGraph;
import org.jrobin.graph.RrdGraphDef;
import org.jrobin.graph.RrdGraphDefTemplate;
import org.jrobin.core.RrdException;
/**
*
*/
public class StatSummarizer implements Runnable {
private RouterContext _context;
private Log _log;
/** list of SummaryListener instances */
private List _listeners;
private static StatSummarizer _instance;
public StatSummarizer() {
_context = (RouterContext)RouterContext.listContexts().get(0); // fuck it, only summarize one per jvm
_log = _context.logManager().getLog(getClass());
_listeners = new ArrayList(16);
_instance = this;
}
public static StatSummarizer instance() { return _instance; }
public void run() {
String specs = "";
while (_context.router().isAlive()) {
specs = adjustDatabases(specs);
try { Thread.sleep(60*1000); } catch (InterruptedException ie) {}
}
}
/** list of SummaryListener instances */
List getListeners() { return _listeners; }
private static final String DEFAULT_DATABASES = "bw.sendRate.60000" +
",bw.recvRate.60000" +
",tunnel.testSuccessTime.60000" +
",udp.outboundActiveCount.60000" +
",udp.receivePacketSize.60000" +
",udp.receivePacketSkew.60000" +
",udp.sendConfirmTime.60000" +
",udp.sendPacketSize.60000" +
",router.activePeers.60000" +
",router.activeSendPeers.60000" +
",tunnel.acceptLoad.60000" +
",tunnel.dropLoadProactive.60000" +
",tunnel.buildExploratorySuccess.60000" +
",tunnel.buildExploratoryReject.60000" +
",tunnel.buildExploratoryExpire.60000" +
",client.sendAckTime.60000" +
",client.dispatchNoACK.60000" +
",transport.sendMessageFailureLifetime.60000" +
",transport.sendProcessingTime.60000";
private String adjustDatabases(String oldSpecs) {
String spec = _context.getProperty("stat.summaries", DEFAULT_DATABASES);
if ( ( (spec == null) && (oldSpecs == null) ) ||
( (spec != null) && (oldSpecs != null) && (oldSpecs.equals(spec))) )
return oldSpecs;
List old = parseSpecs(oldSpecs);
List newSpecs = parseSpecs(spec);
// remove old ones
for (int i = 0; i < old.size(); i++) {
Rate r = (Rate)old.get(i);
if (!newSpecs.contains(r))
removeDb(r);
}
// add new ones
StringBuffer buf = new StringBuffer();
for (int i = 0; i < newSpecs.size(); i++) {
Rate r = (Rate)newSpecs.get(i);
if (!old.contains(r))
addDb(r);
buf.append(r.getRateStat().getName()).append(".").append(r.getPeriod());
if (i + 1 < newSpecs.size())
buf.append(',');
}
return buf.toString();
}
private void removeDb(Rate r) {
for (int i = 0; i < _listeners.size(); i++) {
SummaryListener lsnr = (SummaryListener)_listeners.get(i);
if (lsnr.getRate().equals(r)) {
_listeners.remove(i);
lsnr.stopListening();
return;
}
}
}
private void addDb(Rate r) {
SummaryListener lsnr = new SummaryListener(r);
_listeners.add(lsnr);
lsnr.startListening();
//System.out.println("Start listening for " + r.getRateStat().getName() + ": " + r.getPeriod());
}
public boolean renderPng(Rate rate, OutputStream out) throws IOException {
return renderPng(rate, out, -1, -1, false, false, false, false, -1, true);
}
public boolean renderPng(Rate rate, OutputStream out, int width, int height, boolean hideLegend, boolean hideGrid, boolean hideTitle, boolean showEvents, int periodCount, boolean showCredit) throws IOException {
for (int i = 0; i < _listeners.size(); i++) {
SummaryListener lsnr = (SummaryListener)_listeners.get(i);
if (lsnr.getRate().equals(rate)) {
lsnr.renderPng(out, width, height, hideLegend, hideGrid, hideTitle, showEvents, periodCount, showCredit);
return true;
}
}
return false;
}
public boolean renderPng(OutputStream out, String templateFilename) throws IOException {
SummaryRenderer.render(_context, out, templateFilename);
return true;
}
public boolean getXML(Rate rate, OutputStream out) throws IOException {
for (int i = 0; i < _listeners.size(); i++) {
SummaryListener lsnr = (SummaryListener)_listeners.get(i);
if (lsnr.getRate().equals(rate)) {
lsnr.getData().exportXml(out);
out.write(("<!-- Rate: " + lsnr.getRate().getRateStat().getName() + " for period " + lsnr.getRate().getPeriod() + " -->\n").getBytes());
out.write(("<!-- Average data soure name: " + lsnr.getName() + " event count data source name: " + lsnr.getEventName() + " -->\n").getBytes());
return true;
}
}
return false;
}
public boolean renderRatePng(OutputStream out, int width, int height, boolean hideLegend, boolean hideGrid, boolean hideTitle, boolean showEvents, int periodCount, boolean showCredit) throws IOException {
long end = _context.clock().now();
if (periodCount <= 0) periodCount = SummaryListener.PERIODS;
if (periodCount > SummaryListener.PERIODS)
periodCount = SummaryListener.PERIODS;
long period = 60*1000;
long start = end - period*periodCount;
long begin = System.currentTimeMillis();
try {
RrdGraphDef def = new RrdGraphDef();
def.setTimePeriod(start/1000, end/1000);
String title = "Bandwidth usage";
if (!hideTitle)
def.setTitle(title);
String sendName = SummaryListener.createName(_context, "bw.sendRate.60000");
String recvName = SummaryListener.createName(_context, "bw.recvRate.60000");
def.datasource(sendName, sendName, sendName, "AVERAGE", "MEMORY");
def.datasource(recvName, recvName, recvName, "AVERAGE", "MEMORY");
def.area(sendName, Color.BLUE, "Outbound bytes/second");
//def.line(sendName, Color.BLUE, "Outbound bytes/second", 3);
//def.line(recvName, Color.RED, "Inbound bytes/second@r", 3);
def.area(recvName, Color.RED, "Inbound bytes/second@r");
if (!hideLegend) {
def.gprint(sendName, "AVERAGE", "outbound average: @2@sbytes/second");
def.gprint(sendName, "MAX", " max: @2@sbytes/second@r");
def.gprint(recvName, "AVERAGE", "inbound average: @2bytes/second@s");
def.gprint(recvName, "MAX", " max: @2@sbytes/second@r");
}
if (!showCredit)
def.setShowSignature(false);
if (hideLegend)
def.setShowLegend(false);
if (hideGrid) {
def.setGridX(false);
def.setGridY(false);
}
//System.out.println("rendering: path=" + path + " dsNames[0]=" + dsNames[0] + " dsNames[1]=" + dsNames[1] + " lsnr.getName=" + _listener.getName());
def.setAntiAliasing(false);
//System.out.println("Rendering: \n" + def.exportXmlTemplate());
//System.out.println("*****************\nData: \n" + _listener.getData().dump());
RrdGraph graph = new RrdGraph(def);
//System.out.println("Graph created");
byte data[] = null;
if ( (width <= 0) || (height <= 0) )
data = graph.getPNGBytes();
else
data = graph.getPNGBytes(width, height);
long timeToPlot = System.currentTimeMillis() - begin;
out.write(data);
//File t = File.createTempFile("jrobinData", ".xml");
//_listener.getData().dumpXml(new FileOutputStream(t));
//System.out.println("plotted: " + (data != null ? data.length : 0) + " bytes in " + timeToPlot
// ); // + ", data written to " + t.getAbsolutePath());
return true;
} catch (RrdException re) {
_log.error("Error rendering", re);
throw new IOException("Error plotting: " + re.getMessage());
} catch (IOException ioe) {
_log.error("Error rendering", ioe);
throw ioe;
}
}
/**
* @param specs statName.period,statName.period,statName.period
* @return list of Rate objects
*/
private List parseSpecs(String specs) {
StringTokenizer tok = new StringTokenizer(specs, ",");
List rv = new ArrayList();
while (tok.hasMoreTokens()) {
String spec = tok.nextToken();
int split = spec.lastIndexOf('.');
if ( (split <= 0) || (split + 1 >= spec.length()) )
continue;
String name = spec.substring(0, split);
String per = spec.substring(split+1);
long period = -1;
try {
period = Long.parseLong(per);
RateStat rs = _context.statManager().getRate(name);
if (rs != null) {
Rate r = rs.getRate(period);
if (r != null)
rv.add(r);
}
} catch (NumberFormatException nfe) {}
}
return rv;
}
}

View File

@@ -213,11 +213,11 @@ public class SummaryHelper {
}
/**
* How fast we have been receiving data over the last minute (pretty printed
* How fast we have been receiving data over the last second (pretty printed
* string with 2 decimal places representing the KBps)
*
*/
public String getInboundMinuteKBps() {
public String getInboundSecondKBps() {
if (_context == null)
return "0.0";
double kbps = _context.bandwidthLimiter().getReceiveBps()/1024d;
@@ -225,11 +225,11 @@ public class SummaryHelper {
return fmt.format(kbps);
}
/**
* How fast we have been sending data over the last minute (pretty printed
* How fast we have been sending data over the last second (pretty printed
* string with 2 decimal places representing the KBps)
*
*/
public String getOutboundMinuteKBps() {
public String getOutboundSecondKBps() {
if (_context == null)
return "0.0";
double kbps = _context.bandwidthLimiter().getSendBps()/1024d;
@@ -493,6 +493,13 @@ public class SummaryHelper {
return _context.throttle().getTunnelLag() + "ms";
}
public String getInboundBacklog() {
if (_context == null)
return "0";
return String.valueOf(_context.tunnelManager().getInboundBuildQueueSize());
}
public boolean updateAvailable() {
return NewsFetcher.getInstance(_context).updateAvailable();
}

View File

@@ -0,0 +1,250 @@
package net.i2p.router.web;
import java.io.*;
import net.i2p.I2PAppContext;
import net.i2p.data.DataHelper;
import net.i2p.stat.Rate;
import net.i2p.stat.RateStat;
import net.i2p.stat.RateSummaryListener;
import net.i2p.util.Log;
import org.jrobin.core.RrdDb;
import org.jrobin.core.RrdDef;
import org.jrobin.core.RrdBackendFactory;
import org.jrobin.core.RrdMemoryBackendFactory;
import org.jrobin.core.Sample;
import java.awt.Color;
import org.jrobin.graph.RrdGraph;
import org.jrobin.graph.RrdGraphDef;
import org.jrobin.graph.RrdGraphDefTemplate;
import org.jrobin.core.RrdException;
class SummaryListener implements RateSummaryListener {
private I2PAppContext _context;
private Log _log;
private Rate _rate;
private String _name;
private String _eventName;
private RrdDb _db;
private Sample _sample;
private RrdMemoryBackendFactory _factory;
private SummaryRenderer _renderer;
static final int PERIODS = 1440;
static {
try {
RrdBackendFactory.setDefaultFactory("MEMORY");
} catch (RrdException re) {
re.printStackTrace();
}
}
public SummaryListener(Rate r) {
_context = I2PAppContext.getGlobalContext();
_rate = r;
_log = _context.logManager().getLog(SummaryListener.class);
}
public void add(double totalValue, long eventCount, double totalEventTime, long period) {
long now = now();
long when = now / 1000;
//System.out.println("add to " + getRate().getRateStat().getName() + " on " + System.currentTimeMillis() + " / " + now + " / " + when);
if (_db != null) {
// add one value to the db (the average value for the period)
try {
_sample.setTime(when);
double val = eventCount > 0 ? (totalValue / (double)eventCount) : 0d;
_sample.setValue(_name, val);
_sample.setValue(_eventName, eventCount);
//_sample.setValue(0, val);
//_sample.setValue(1, eventCount);
_sample.update();
//String names[] = _sample.getDsNames();
//System.out.println("Add " + val + " over " + eventCount + " for " + _name
// + " [" + names[0] + ", " + names[1] + "]");
} catch (IOException ioe) {
_log.error("Error adding", ioe);
} catch (RrdException re) {
_log.error("Error adding", re);
}
}
}
/**
* JRobin can only deal with 20 character data source names, so we need to create a unique,
* munged version from the user/developer-visible name.
*
*/
static String createName(I2PAppContext ctx, String wanted) {
return ctx.sha().calculateHash(DataHelper.getUTF8(wanted)).toBase64().substring(0,20);
}
public Rate getRate() { return _rate; }
public void startListening() {
RateStat rs = _rate.getRateStat();
long period = _rate.getPeriod();
String baseName = rs.getName() + "." + period;
_name = createName(_context, baseName);
_eventName = createName(_context, baseName + ".events");
try {
RrdDef def = new RrdDef(_name, now()/1000, period/1000);
// for info on the heartbeat, xff, steps, etc, see the rrdcreate man page, aka
// http://www.jrobin.org/support/man/rrdcreate.html
long heartbeat = period*10/1000;
def.addDatasource(_name, "GAUGE", heartbeat, Double.NaN, Double.NaN);
def.addDatasource(_eventName, "GAUGE", heartbeat, 0, Double.NaN);
double xff = 0.9;
int steps = 1;
int rows = PERIODS;
def.addArchive("AVERAGE", xff, steps, rows);
_factory = (RrdMemoryBackendFactory)RrdBackendFactory.getDefaultFactory();
_db = new RrdDb(def, _factory);
_sample = _db.createSample();
_renderer = new SummaryRenderer(_context, this);
_rate.setSummaryListener(this);
} catch (RrdException re) {
_log.error("Error starting", re);
} catch (IOException ioe) {
_log.error("Error starting", ioe);
}
}
public void stopListening() {
if (_db == null) return;
try {
_db.close();
} catch (IOException ioe) {
_log.error("Error closing", ioe);
}
_rate.setSummaryListener(null);
_factory.delete(_db.getPath());
_db = null;
}
public void renderPng(OutputStream out, int width, int height, boolean hideLegend, boolean hideGrid, boolean hideTitle, boolean showEvents, int periodCount, boolean showCredit) throws IOException {
_renderer.render(out, width, height, hideLegend, hideGrid, hideTitle, showEvents, periodCount, showCredit);
}
public void renderPng(OutputStream out) throws IOException { _renderer.render(out); }
String getName() { return _name; }
String getEventName() { return _eventName; }
RrdDb getData() { return _db; }
long now() { return _context.clock().now(); }
public boolean equals(Object obj) {
return ((obj instanceof SummaryListener) && ((SummaryListener)obj)._rate.equals(_rate));
}
public int hashCode() { return _rate.hashCode(); }
}
class SummaryRenderer {
private Log _log;
private SummaryListener _listener;
public SummaryRenderer(I2PAppContext ctx, SummaryListener lsnr) {
_log = ctx.logManager().getLog(SummaryRenderer.class);
_listener = lsnr;
}
/**
* Render the stats as determined by the specified JRobin xml config,
* but note that this doesn't work on stock jvms, as it requires
* DOM level 3 load and store support. Perhaps we can bundle that, or
* specify who can get it from where, etc.
*
*/
public static synchronized void render(I2PAppContext ctx, OutputStream out, String filename) throws IOException {
long end = ctx.clock().now();
long start = end - 60*1000*SummaryListener.PERIODS;
long begin = System.currentTimeMillis();
try {
RrdGraphDefTemplate template = new RrdGraphDefTemplate(filename);
RrdGraphDef def = template.getRrdGraphDef();
def.setTimePeriod(start/1000, end/1000); // ignore the periods in the template
RrdGraph graph = new RrdGraph(def);
byte img[] = graph.getPNGBytes();
out.write(img);
} catch (RrdException re) {
//_log.error("Error rendering " + filename, re);
throw new IOException("Error plotting: " + re.getMessage());
} catch (IOException ioe) {
//_log.error("Error rendering " + filename, ioe);
throw ioe;
}
}
public void render(OutputStream out) throws IOException { render(out, -1, -1, false, false, false, false, -1, true); }
public void render(OutputStream out, int width, int height, boolean hideLegend, boolean hideGrid, boolean hideTitle, boolean showEvents, int periodCount, boolean showCredit) throws IOException {
long end = _listener.now();
if (periodCount <= 0) periodCount = SummaryListener.PERIODS;
if (periodCount > SummaryListener.PERIODS)
periodCount = SummaryListener.PERIODS;
long start = end - _listener.getRate().getPeriod()*periodCount;
long begin = System.currentTimeMillis();
try {
RrdGraphDef def = new RrdGraphDef();
def.setTimePeriod(start/1000, end/1000);
String title = _listener.getRate().getRateStat().getName() + " averaged for "
+ DataHelper.formatDuration(_listener.getRate().getPeriod());
if (!hideTitle)
def.setTitle(title);
String path = _listener.getData().getPath();
String dsNames[] = _listener.getData().getDsNames();
String plotName = null;
String descr = null;
if (showEvents) {
// include the average event count on the plot
plotName = dsNames[1];
descr = "Events per period";
} else {
// include the average value
plotName = dsNames[0];
descr = _listener.getRate().getRateStat().getDescription();
}
def.datasource(plotName, path, plotName, "AVERAGE", "MEMORY");
def.area(plotName, Color.BLUE, descr + "@r");
if (!hideLegend) {
def.gprint(plotName, "AVERAGE", "average: @2@s");
def.gprint(plotName, "MAX", " max: @2@s@r");
}
if (!showCredit)
def.setShowSignature(false);
/*
// these four lines set up a graph plotting both values and events on the same chart
// (but with the same coordinates, so the values may look pretty skewed)
def.datasource(dsNames[0], path, dsNames[0], "AVERAGE", "MEMORY");
def.datasource(dsNames[1], path, dsNames[1], "AVERAGE", "MEMORY");
def.area(dsNames[0], Color.BLUE, _listener.getRate().getRateStat().getDescription());
def.line(dsNames[1], Color.RED, "Events per period");
*/
if (hideLegend)
def.setShowLegend(false);
if (hideGrid) {
def.setGridX(false);
def.setGridY(false);
}
//System.out.println("rendering: path=" + path + " dsNames[0]=" + dsNames[0] + " dsNames[1]=" + dsNames[1] + " lsnr.getName=" + _listener.getName());
def.setAntiAliasing(false);
//System.out.println("Rendering: \n" + def.exportXmlTemplate());
//System.out.println("*****************\nData: \n" + _listener.getData().dump());
RrdGraph graph = new RrdGraph(def);
//System.out.println("Graph created");
byte data[] = null;
if ( (width <= 0) || (height <= 0) )
data = graph.getPNGBytes();
else
data = graph.getPNGBytes(width, height);
long timeToPlot = System.currentTimeMillis() - begin;
out.write(data);
//File t = File.createTempFile("jrobinData", ".xml");
//_listener.getData().dumpXml(new FileOutputStream(t));
//System.out.println("plotted: " + (data != null ? data.length : 0) + " bytes in " + timeToPlot
// ); // + ", data written to " + t.getAbsolutePath());
} catch (RrdException re) {
_log.error("Error rendering", re);
throw new IOException("Error plotting: " + re.getMessage());
} catch (IOException ioe) {
_log.error("Error rendering", ioe);
throw ioe;
}
}
}

View File

@@ -43,13 +43,14 @@
A negative rate means a default limit of 16KBytes per second.</i><br />
Bandwidth share percentage:
<jsp:getProperty name="nethelper" property="sharePercentageBox" /><br />
Sharing a higher percentage will improve your anonymity and help the network
Sharing a higher percentage will improve your anonymity and help the network<br />
<input type="submit" name="save" value="Save changes" /> <input type="reset" value="Cancel" /><br />
<hr />
<b>Enable load testing: </b>
<input type="checkbox" name="enableloadtesting" value="true" <jsp:getProperty name="nethelper" property="enableLoadTesting" /> />
<p>If enabled, your router will periodically anonymously probe some of your peers
to see what sort of throughput they can handle. This improves your router's ability
to pick faster peers, but can cost substantial bandwidth. Relevent data from the
to pick faster peers, but can cost substantial bandwidth. Relevant data from the
load testing is fed into the profiles as well as the
<a href="oldstats.jsp#test.rtt">test.rtt</a> and related stats.</p>
<hr />

View File

@@ -0,0 +1,23 @@
<%@page contentType="text/html"%>
<%@page pageEncoding="UTF-8"%>
<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">
<html><head>
<title>I2P Router Console - graphs</title>
<link rel="stylesheet" href="default.css" type="text/css" />
</head><body>
<%@include file="nav.jsp" %>
<%@include file="summary.jsp" %>
<div class="main" id="main">
<jsp:useBean class="net.i2p.router.web.GraphHelper" id="graphHelper" scope="request" />
<jsp:setProperty name="graphHelper" property="*" />
<jsp:setProperty name="graphHelper" property="contextId" value="<%=(String)session.getAttribute("i2p.contextId")%>" />
<jsp:setProperty name="graphHelper" property="out" value="<%=out%>" />
<jsp:getProperty name="graphHelper" property="images" />
<jsp:getProperty name="graphHelper" property="form" />
</div>
</body>
</html>

View File

@@ -33,6 +33,7 @@
<a href="netdb.jsp">NetDB</a> |
<a href="logs.jsp">Logs</a> |
<a href="jobs.jsp">Jobs</a> |
<a href="graphs.jsp">Graphs</a> |
<a href="oldstats.jsp">Stats</a> |
<a href="oldconsole.jsp">Internals</a>
<% } %>

View File

@@ -65,7 +65,7 @@
%><hr />
<u><b><a href="config.jsp" title="Configure the bandwidth limits">Bandwidth in/out</a></b></u><br />
<b>1s:</b> <jsp:getProperty name="helper" property="inboundMinuteKBps" />/<jsp:getProperty name="helper" property="outboundMinuteKBps" />KBps<br />
<b>1s:</b> <jsp:getProperty name="helper" property="inboundSecondKBps" />/<jsp:getProperty name="helper" property="outboundSecondKBps" />KBps<br />
<b>5m:</b> <jsp:getProperty name="helper" property="inboundFiveMinuteKBps" />/<jsp:getProperty name="helper" property="outboundFiveMinuteKBps" />KBps<br />
<b>Total:</b> <jsp:getProperty name="helper" property="inboundLifetimeKBps" />/<jsp:getProperty name="helper" property="outboundLifetimeKBps" />KBps<br />
<b>Used:</b> <jsp:getProperty name="helper" property="inboundTransferred" />/<jsp:getProperty name="helper" property="outboundTransferred" /><br />
@@ -83,6 +83,7 @@
<b>Job lag:</b> <jsp:getProperty name="helper" property="jobLag" /><br />
<b>Message delay:</b> <jsp:getProperty name="helper" property="messageDelay" /><br />
<b>Tunnel lag:</b> <jsp:getProperty name="helper" property="tunnelLag" /><br />
<b>Handle backlog:</b> <jsp:getProperty name="helper" property="inboundBacklog" /><br />
<hr />
</div>

View File

@@ -0,0 +1,63 @@
<%
boolean rendered = false;
String templateFile = request.getParameter("template");
if (templateFile != null) {
java.io.OutputStream cout = response.getOutputStream();
response.setContentType("image/png");
rendered = net.i2p.router.web.StatSummarizer.instance().renderPng(cout, templateFile);
}
net.i2p.stat.Rate rate = null;
String stat = request.getParameter("stat");
String period = request.getParameter("period");
boolean fakeBw = (stat != null && ("bw.combined".equals(stat)));
net.i2p.stat.RateStat rs = net.i2p.I2PAppContext.getGlobalContext().statManager().getRate(stat);
if ( !rendered && ((rs != null) || fakeBw) ) {
long per = -1;
try {
if (fakeBw)
per = 60*1000;
else
per = Long.parseLong(period);
if (!fakeBw)
rate = rs.getRate(per);
if ( (rate != null) || (fakeBw) ) {
java.io.OutputStream cout = response.getOutputStream();
String format = request.getParameter("format");
if ("xml".equals(format)) {
if (!fakeBw) {
response.setContentType("text/xml");
rendered = net.i2p.router.web.StatSummarizer.instance().getXML(rate, cout);
}
} else {
response.setContentType("image/png");
int width = -1;
int height = -1;
int periodCount = -1;
String str = request.getParameter("width");
if (str != null) try { width = Integer.parseInt(str); } catch (NumberFormatException nfe) {}
str = request.getParameter("height");
if (str != null) try { height = Integer.parseInt(str); } catch (NumberFormatException nfe) {}
str = request.getParameter("periodCount");
if (str != null) try { periodCount = Integer.parseInt(str); } catch (NumberFormatException nfe) {}
boolean hideLegend = Boolean.valueOf(""+request.getParameter("hideLegend")).booleanValue();
boolean hideGrid = Boolean.valueOf(""+request.getParameter("hideGrid")).booleanValue();
boolean hideTitle = Boolean.valueOf(""+request.getParameter("hideTitle")).booleanValue();
boolean showEvents = Boolean.valueOf(""+request.getParameter("showEvents")).booleanValue();
boolean showCredit = true;
if (request.getParameter("showCredit") != null)
showCredit = Boolean.valueOf(""+request.getParameter("showCredit")).booleanValue();
if (fakeBw)
rendered = net.i2p.router.web.StatSummarizer.instance().renderRatePng(cout, width, height, hideLegend, hideGrid, hideTitle, showEvents, periodCount, showCredit);
else
rendered = net.i2p.router.web.StatSummarizer.instance().renderPng(rate, cout, width, height, hideLegend, hideGrid, hideTitle, showEvents, periodCount, showCredit);
}
if (rendered)
cout.close();
//System.out.println("Rendered period " + per + " for the stat " + stat + "? " + rendered);
}
} catch (NumberFormatException nfe) {}
}
if (!rendered) {
response.sendError(404, "That stat is not available");
}
%>

View File

@@ -210,6 +210,11 @@ public class Connection {
}
}
if (packet != null) {
if (packet.isFlagSet(Packet.FLAG_RESET)) {
// sendReset takes care to prevent too-frequent RSET transmissions
sendReset();
return;
}
ResendPacketEvent evt = (ResendPacketEvent)packet.getResendEvent();
if (evt != null) {
boolean sent = evt.retransmit(false);
@@ -240,9 +245,11 @@ public class Connection {
_disconnectScheduledOn = _context.clock().now();
SimpleTimer.getInstance().addEvent(new DisconnectEvent(), DISCONNECT_TIMEOUT);
}
long now = _context.clock().now();
if (_resetSentOn + 10*1000 > now) return; // don't send resets too fast
_resetSent = true;
if (_resetSentOn <= 0)
_resetSentOn = _context.clock().now();
_resetSentOn = now;
if ( (_remotePeer == null) || (_sendStreamId <= 0) ) return;
PacketLocal reply = new PacketLocal(_context, _remotePeer);
reply.setFlag(Packet.FLAG_RESET);

View File

@@ -101,7 +101,7 @@ public class ConnectionOptions extends I2PSocketOptionsImpl {
setMaxWindowSize(getInt(opts, PROP_MAX_WINDOW_SIZE, Connection.MAX_WINDOW_SIZE));
setConnectDelay(getInt(opts, PROP_CONNECT_DELAY, -1));
setProfile(getInt(opts, PROP_PROFILE, PROFILE_BULK));
setMaxMessageSize(getInt(opts, PROP_MAX_MESSAGE_SIZE, 4*1024));
setMaxMessageSize(getInt(opts, PROP_MAX_MESSAGE_SIZE, 960)); // 960 fits inside a single tunnel message
setRTT(getInt(opts, PROP_INITIAL_RTT, 10*1000));
setReceiveWindow(getInt(opts, PROP_INITIAL_RECEIVE_WINDOW, 1));
setResendDelay(getInt(opts, PROP_INITIAL_RESEND_DELAY, 1000));

View File

@@ -6,6 +6,7 @@ import java.text.*;
import net.i2p.I2PAppContext;
import net.i2p.data.*;
import net.i2p.syndie.data.*;
import net.i2p.util.FileUtil;
import net.i2p.util.Log;
/**
@@ -27,6 +28,7 @@ import net.i2p.util.Log;
public class Archive {
private I2PAppContext _context;
private Log _log;
private BlogManager _mgr;
private File _rootDir;
private File _cacheDir;
private Map _blogInfo;
@@ -42,9 +44,10 @@ public class Archive {
public boolean accept(File dir, String name) { return name.endsWith(".snd"); }
};
public Archive(I2PAppContext ctx, String rootDir, String cacheDir) {
public Archive(I2PAppContext ctx, String rootDir, String cacheDir, BlogManager mgr) {
_context = ctx;
_log = ctx.logManager().getLog(Archive.class);
_mgr = mgr;
_rootDir = new File(rootDir);
if (!_rootDir.exists())
_rootDir.mkdirs();
@@ -71,6 +74,13 @@ public class Archive {
try {
fi = new FileInputStream(meta);
bi.load(fi);
if (_mgr.isBanned(bi.getKey().calculateHash())) {
fi.close();
fi = null;
_log.error("Deleting banned blog " + bi.getKey().calculateHash().toBase64());
delete(bi.getKey().calculateHash());
continue;
}
if (bi.verify(_context)) {
info.add(bi);
} else {
@@ -119,6 +129,12 @@ public class Archive {
_log.warn("Not storing invalid blog " + info);
return false;
}
if (_mgr.isBanned(info.getKey().calculateHash())) {
_log.error("Not storing banned blog " + info.getKey().calculateHash().toBase64(), new Exception("Stored by"));
return false;
}
boolean isNew = true;
synchronized (_blogInfo) {
BlogInfo old = (BlogInfo)_blogInfo.get(info.getKey().calculateHash());
@@ -211,7 +227,13 @@ public class Archive {
if (!entryDir.exists())
entryDir.mkdirs();
boolean ok = _extractor.extract(entryFile, entryDir, null, info);
boolean ok = true;
try {
ok = _extractor.extract(entryFile, entryDir, null, info);
} catch (IOException ioe) {
ok = false;
_log.error("Error extracting " + entryFile.getPath() + ", deleting it", ioe);
}
if (!ok) {
File files[] = entryDir.listFiles();
for (int i = 0; i < files.length; i++)
@@ -267,8 +289,9 @@ public class Archive {
if (blogKey == null) {
// no key, cache.
File entryDir = getEntryDir(entries[i]);
if (entryDir.exists())
if (entryDir.exists()) {
entry = getCachedEntry(entryDir);
}
if ((entry == null) || !entryDir.exists()) {
if (!extractEntry(entries[i], entryDir, info)) {
_log.error("Entry " + entries[i].getPath() + " is not valid");
@@ -326,6 +349,15 @@ public class Archive {
return rv;
}
public synchronized void delete(Hash blog) {
if (blog == null) return;
File blogDir = new File(_rootDir, blog.toBase64());
boolean deleted = FileUtil.rmdir(blogDir, false);
File cacheDir = new File(_cacheDir, blog.toBase64());
deleted = FileUtil.rmdir(cacheDir, false) && deleted;
_log.info("Deleted blog " + blog.toBase64() + " completely? " + deleted);
}
public boolean storeEntry(EntryContainer container) {
if (container == null) return false;
BlogURI uri = container.getURI();

View File

@@ -74,7 +74,7 @@ public class BlogManager {
_cacheDir.mkdirs();
_userDir.mkdirs();
_tempDir.mkdirs();
_archive = new Archive(ctx, _archiveDir.getAbsolutePath(), _cacheDir.getAbsolutePath());
_archive = new Archive(ctx, _archiveDir.getAbsolutePath(), _cacheDir.getAbsolutePath(), this);
if (regenIndex)
_archive.regenerateIndex();
}
@@ -890,6 +890,8 @@ public class BlogManager {
try {
BlogInfo info = new BlogInfo();
info.load(metadataStream);
if (isBanned(info.getKey().calculateHash()))
return false;
return _archive.storeBlogInfo(info);
} catch (IOException ioe) {
_log.error("Error importing meta", ioe);
@@ -906,6 +908,8 @@ public class BlogManager {
try {
EntryContainer c = new EntryContainer();
c.load(entryStream);
if (isBanned(c.getURI().getKeyHash()))
return false;
return _archive.storeEntry(c);
} catch (IOException ioe) {
_log.error("Error importing entry", ioe);
@@ -1060,4 +1064,49 @@ public class BlogManager {
return true;
return false;
}
public boolean isBanned(Hash blog) {
if ( (blog == null) || (blog.getData() == null) || (blog.getData().length <= 0) ) return false;
String str = blog.toBase64();
String banned = System.getProperty("syndie.bannedBlogs", "");
return (banned.indexOf(str) >= 0);
}
public String[] getBannedBlogs() {
List blogs = new ArrayList();
String str = System.getProperty("syndie.bannedBlogs", "");
StringTokenizer tok = new StringTokenizer(str, ",");
while (tok.hasMoreTokens()) {
String blog = tok.nextToken();
try {
Hash h = new Hash();
h.fromBase64(blog);
blogs.add(blog); // the base64 string, but verified
} catch (DataFormatException dfe) {
// ignored
}
}
String rv[] = new String[blogs.size()];
for (int i = 0; i < blogs.size(); i++)
rv[i] = (String)blogs.get(i);
return rv;
}
/**
* Delete the blog from the archive completely, and ban them from ever being added again
*/
public void purgeAndBan(Hash blog) {
String banned[] = getBannedBlogs();
StringBuffer buf = new StringBuffer();
String str = blog.toBase64();
buf.append(str);
for (int i = 0; banned != null && i < banned.length; i++) {
if (!banned[i].equals(str))
buf.append(",").append(banned[i]);
}
System.setProperty("syndie.bannedBlogs", buf.toString());
writeConfig();
_archive.delete(blog);
_archive.regenerateIndex();
}
}

View File

@@ -59,9 +59,9 @@ public class EntryExtractor {
}
public void extract(EntryContainer entry, File entryDir) throws IOException {
extractEntry(entry, entryDir);
extractHeaders(entry, entryDir);
extractMeta(entry, entryDir);
extractEntry(entry, entryDir);
Attachment attachments[] = entry.getAttachments();
if (attachments != null) {
for (int i = 0; i < attachments.length; i++) {
@@ -97,10 +97,14 @@ public class EntryExtractor {
}
}
private void extractEntry(EntryContainer entry, File entryDir) throws IOException {
Entry e = entry.getEntry();
if (e == null) throw new IOException("Entry is null");
String text = e.getText();
if (text == null) throw new IOException("Entry text is null");
FileOutputStream out = null;
try {
out = new FileOutputStream(new File(entryDir, ENTRY));
out.write(DataHelper.getUTF8(entry.getEntry().getText()));
out.write(DataHelper.getUTF8(text));
} finally {
out.close();
}

View File

@@ -163,8 +163,9 @@ public class ArchiveIndex {
/** list of unique blogs locally known (set of Hash) */
public Set getUniqueBlogs() {
Set rv = new HashSet();
for (int i = 0; i < _blogs.size(); i++)
for (int i = 0; i < _blogs.size(); i++) {
rv.add(getBlog(i));
}
return rv;
}
public List getReplies(BlogURI uri) {
@@ -367,7 +368,10 @@ public class ArchiveIndex {
return;
tok.nextToken();
String keyStr = tok.nextToken();
Hash keyHash = new Hash(Base64.decode(keyStr));
byte k[] = Base64.decode(keyStr);
if ( (k == null) || (k.length != Hash.HASH_LENGTH) )
return; // ignore bad hashes
Hash keyHash = new Hash(k);
String whenStr = tok.nextToken();
long when = getIndexDate(whenStr);
String tag = tok.nextToken();

View File

@@ -60,7 +60,7 @@ public class EntryContainer {
this();
_entryURI = uri;
if ( (smlData == null) || (smlData.length <= 0) )
_entryData = new Entry(null);
_entryData = new Entry(""); //null);
else
_entryData = new Entry(DataHelper.getUTF8(smlData));
setHeader(HEADER_BLOGKEY, Base64.encode(uri.getKeyHash().getData()));
@@ -277,7 +277,7 @@ public class EntryContainer {
}
if (_entryData == null)
_entryData = new Entry(null);
_entryData = new Entry(""); //null);
_attachments = new Attachment[attachments.size()];

View File

@@ -34,7 +34,9 @@ public class BlogRenderer extends HTMLRenderer {
}
public void receiveHeaderEnd() {
_preBodyBuffer.append("<div class=\"syndieBlogPost\"><hr style=\"display: none\" />\n");
_preBodyBuffer.append("<div class=\"syndieBlogPost\" id=\"");
_preBodyBuffer.append(_entry.getURI().getKeyHash().toBase64()).append('/').append(_entry.getURI().getEntryId());
_preBodyBuffer.append("\"><hr style=\"display: none\" />\n");
_preBodyBuffer.append("<div class=\"syndieBlogPostHeader\">\n");
_preBodyBuffer.append("<div class=\"syndieBlogPostSubject\">");
String subject = (String)_headers.get(HEADER_SUBJECT);
@@ -160,12 +162,24 @@ public class BlogRenderer extends HTMLRenderer {
protected String getEntryURL(boolean showImages) {
return getEntryURL(_entry, _blog, showImages);
}
static String getEntryURL(EntryContainer entry, BlogInfo blog, boolean showImages) {
static String getEntryURL(EntryContainer entry, BlogInfo blog, boolean showImages) {
if (entry == null) return "unknown";
return "blog.jsp?"
+ ViewBlogServlet.PARAM_BLOG + "=" + (blog != null ? blog.getKey().calculateHash().toBase64() : "") + "&amp;"
+ ViewBlogServlet.PARAM_ENTRY + "="
+ Base64.encode(entry.getURI().getKeyHash().getData()) + '/' + entry.getURI().getEntryId();
return getEntryURL(entry.getURI(), blog, null, showImages);
}
static String getEntryURL(BlogURI entry, BlogInfo blog, BlogURI comment, boolean showImages) {
if (entry == null) return "unknown";
if (comment == null) {
return "blog.jsp?"
+ ViewBlogServlet.PARAM_BLOG + "=" + (blog != null ? blog.getKey().calculateHash().toBase64() : "") + "&amp;"
+ ViewBlogServlet.PARAM_ENTRY + "="
+ Base64.encode(entry.getKeyHash().getData()) + '/' + entry.getEntryId();
} else {
return "blog.jsp?"
+ ViewBlogServlet.PARAM_BLOG + "=" + (blog != null ? blog.getKey().calculateHash().toBase64() : "") + "&amp;"
+ ViewBlogServlet.PARAM_ENTRY + "="
+ Base64.encode(entry.getKeyHash().getData()) + '/' + entry.getEntryId()
+ '#' + Base64.encode(comment.getKeyHash().getData()) + '/' + comment.getEntryId();
}
}
protected String getAttachmentURLBase() {
@@ -218,4 +232,4 @@ public class BlogRenderer extends HTMLRenderer {
buf.append(ViewBlogServlet.PARAM_OFFSET).append('=').append(pageNum*numPerPage).append("&amp;");
return buf.toString();
}
}
}

View File

@@ -122,36 +122,52 @@ public class ThreadedHTMLRenderer extends HTMLRenderer {
public static String getViewPostLink(String uri, ThreadNode node, User user, boolean isPermalink,
String offset, String tags, String author, boolean authorOnly) {
StringBuffer buf = new StringBuffer(64);
buf.append(uri);
if (node.getChildCount() > 0) {
buf.append('?').append(PARAM_VISIBLE).append('=');
ThreadNode child = node.getChild(0);
buf.append(child.getEntry().getKeyHash().toBase64()).append('/');
buf.append(child.getEntry().getEntryId()).append('&');
if (isPermalink) {
// link to the blog view of the original poster
BlogURI rootBlog = null;
ThreadNode parent = node;
while (parent != null) {
if (parent.getParent() != null) {
parent = parent.getParent();
} else {
rootBlog = parent.getEntry();
break;
}
}
BlogInfo root = BlogManager.instance().getArchive().getBlogInfo(rootBlog.getKeyHash());
return BlogRenderer.getEntryURL(parent.getEntry(), root, node.getEntry(), true);
} else {
buf.append('?').append(PARAM_VISIBLE).append('=');
StringBuffer buf = new StringBuffer(64);
buf.append(uri);
if (node.getChildCount() > 0) {
buf.append('?').append(PARAM_VISIBLE).append('=');
ThreadNode child = node.getChild(0);
buf.append(child.getEntry().getKeyHash().toBase64()).append('/');
buf.append(child.getEntry().getEntryId()).append('&');
} else {
buf.append('?').append(PARAM_VISIBLE).append('=');
buf.append(node.getEntry().getKeyHash().toBase64()).append('/');
buf.append(node.getEntry().getEntryId()).append('&');
}
buf.append(PARAM_VIEW_POST).append('=');
buf.append(node.getEntry().getKeyHash().toBase64()).append('/');
buf.append(node.getEntry().getEntryId()).append('&');
if (!isPermalink) {
if (!empty(offset))
buf.append(PARAM_OFFSET).append('=').append(offset).append('&');
if (!empty(tags))
buf.append(PARAM_TAGS).append('=').append(tags).append('&');
}
if (authorOnly && !empty(author)) {
buf.append(PARAM_AUTHOR).append('=').append(author).append('&');
buf.append(PARAM_THREAD_AUTHOR).append("=true&");
} else if (!isPermalink && !empty(author))
buf.append(PARAM_AUTHOR).append('=').append(author).append('&');
return buf.toString();
}
buf.append(PARAM_VIEW_POST).append('=');
buf.append(node.getEntry().getKeyHash().toBase64()).append('/');
buf.append(node.getEntry().getEntryId()).append('&');
if (!isPermalink) {
if (!empty(offset))
buf.append(PARAM_OFFSET).append('=').append(offset).append('&');
if (!empty(tags))
buf.append(PARAM_TAGS).append('=').append(tags).append('&');
}
if (authorOnly && !empty(author)) {
buf.append(PARAM_AUTHOR).append('=').append(author).append('&');
buf.append(PARAM_THREAD_AUTHOR).append("=true&");
} else if (!isPermalink && !empty(author))
buf.append(PARAM_AUTHOR).append('=').append(author).append('&');
return buf.toString();
}
public static String getViewPostLink(String uri, BlogURI post, User user, boolean isPermalink,
@@ -272,8 +288,7 @@ public class ThreadedHTMLRenderer extends HTMLRenderer {
out.write("\n<a href=\"");
out.write(getViewPostLink(baseURI, node, user, true, offset, requestTags, filteredAuthor, authorOnly));
out.write("\" title=\"Select a shareable link directly to this post\">permalink</a>\n");
out.write("\" title=\"Select a link directly to this post within the blog\">permalink</a>\n");
if (true || (!inlineReply) ) {
String refuseReply = (String)_headers.get(HEADER_REFUSE_REPLIES);

View File

@@ -46,6 +46,7 @@ public class AddressesServlet extends BaseServlet {
public static final String ACTION_DELETE_BLOG = "Delete author";
public static final String ACTION_UPDATE_BLOG = "Update author";
public static final String ACTION_ADD_BLOG = "Add author";
public static final String ACTION_PURGE_AND_BAN_BLOG = "Purge and ban author";
public static final String ACTION_DELETE_ARCHIVE = "Delete archive";
public static final String ACTION_UPDATE_ARCHIVE = "Update archive";
@@ -128,6 +129,8 @@ public class AddressesServlet extends BaseServlet {
if (pn.isMember(FilteredThreadIndex.GROUP_IGNORE)) {
out.write("Ignored? <input type=\"checkbox\" name=\"" + PARAM_IGNORE
+ "\" checked=\"true\" value=\"true\" title=\"If true, their threads are hidden\" /> ");
if (BlogManager.instance().authorizeRemote(user))
out.write("<input type=\"submit\" name=\"" + PARAM_ACTION + "\" value=\"" + ACTION_PURGE_AND_BAN_BLOG + "\" /> ");
} else {
out.write("Ignored? <input type=\"checkbox\" name=\"" + PARAM_IGNORE
+ "\" value=\"true\" title=\"If true, their threads are hidden\" /> ");

View File

@@ -64,13 +64,13 @@ public abstract class BaseServlet extends HttpServlet {
* key=value& of params that need to be tacked onto an http request that updates data, to
* prevent spoofing
*/
protected static String getAuthActionParams() { return PARAM_AUTH_ACTION + '=' + _authNonce + '&'; }
protected static String getAuthActionParams() { return PARAM_AUTH_ACTION + '=' + _authNonce + "&amp;"; }
/**
* key=value& of params that need to be tacked onto an http request that updates data, to
* prevent spoofing
*/
public static void addAuthActionParams(StringBuffer buf) {
buf.append(PARAM_AUTH_ACTION).append('=').append(_authNonce).append('&');
buf.append(PARAM_AUTH_ACTION).append('=').append(_authNonce).append("&amp;");
}
public void service(HttpServletRequest req, HttpServletResponse resp) throws ServletException, IOException {
@@ -295,7 +295,7 @@ public abstract class BaseServlet extends HttpServlet {
if (AddressesServlet.ACTION_ADD_TAG.equals(action)) {
String name = req.getParameter(AddressesServlet.PARAM_NAME);
if (!user.getPetNameDB().containsName(name)) {
if ((name != null) && (name.trim().length() > 0) && (!user.getPetNameDB().containsName(name)) ) {
PetName pn = new PetName(name, AddressesServlet.NET_SYNDIE, AddressesServlet.PROTO_TAG, name);
user.getPetNameDB().add(pn);
BlogManager.instance().saveUser(user);
@@ -307,7 +307,7 @@ public abstract class BaseServlet extends HttpServlet {
(AddressesServlet.ACTION_ADD_OTHER.equals(action)) ||
(AddressesServlet.ACTION_ADD_PEER.equals(action)) ) {
PetName pn = buildNewAddress(req);
if ( (pn != null) && (pn.getName() != null) && (pn.getLocation() != null) &&
if ( (pn != null) && (pn.getName() != null) && (pn.getName().trim().length() > 0) && (pn.getLocation() != null) &&
(!user.getPetNameDB().containsName(pn.getName())) ) {
user.getPetNameDB().add(pn);
BlogManager.instance().saveUser(user);
@@ -329,6 +329,34 @@ public abstract class BaseServlet extends HttpServlet {
(AddressesServlet.ACTION_UPDATE_OTHER.equals(action)) ||
(AddressesServlet.ACTION_UPDATE_PEER.equals(action)) ) {
return updateAddress(user, req);
} else if (AddressesServlet.ACTION_PURGE_AND_BAN_BLOG.equals(action)) {
String name = req.getParameter(AddressesServlet.PARAM_NAME);
PetName pn = user.getPetNameDB().getByName(name);
if (pn != null) {
boolean purged = false;
if (BlogManager.instance().authorizeRemote(user)) {
Hash h = null;
BlogURI uri = new BlogURI(pn.getLocation());
if (uri.getKeyHash() != null) {
h = uri.getKeyHash();
}
if (h == null) {
byte b[] = Base64.decode(pn.getLocation());
if ( (b != null) && (b.length == Hash.HASH_LENGTH) )
h = new Hash(b);
}
if (h != null) {
BlogManager.instance().purgeAndBan(h);
purged = true;
}
}
if (purged) // force a new thread index
return true;
else
return false;
} else {
return false;
}
} else if ( (AddressesServlet.ACTION_DELETE_ARCHIVE.equals(action)) ||
(AddressesServlet.ACTION_DELETE_BLOG.equals(action)) ||
(AddressesServlet.ACTION_DELETE_EEPSITE.equals(action)) ||
@@ -716,6 +744,8 @@ public abstract class BaseServlet extends HttpServlet {
for (Iterator iter = names.iterator(); iter.hasNext(); ) {
String name = (String) iter.next();
PetName pn = db.getByName(name);
if (pn == null)
continue;
String proto = pn.getProtocol();
String loc = pn.getLocation();
if (proto != null && loc != null && "syndieblog".equals(proto) && pn.isMember(FilteredThreadIndex.GROUP_FAVORITE)) {
@@ -866,22 +896,22 @@ public abstract class BaseServlet extends HttpServlet {
ThreadNode child = node.getChild(0);
buf.append(ThreadedHTMLRenderer.PARAM_VISIBLE).append('=');
buf.append(child.getEntry().getKeyHash().toBase64()).append('/');
buf.append(child.getEntry().getEntryId()).append('&');
buf.append(child.getEntry().getEntryId()).append("&amp;");
}
if (!empty(viewPost))
buf.append(ThreadedHTMLRenderer.PARAM_VIEW_POST).append('=').append(viewPost).append('&');
buf.append(ThreadedHTMLRenderer.PARAM_VIEW_POST).append('=').append(viewPost).append("&amp;");
else if (!empty(viewThread))
buf.append(ThreadedHTMLRenderer.PARAM_VIEW_THREAD).append('=').append(viewThread).append('&');
buf.append(ThreadedHTMLRenderer.PARAM_VIEW_THREAD).append('=').append(viewThread).append("&amp;");
if (!empty(offset))
buf.append(ThreadedHTMLRenderer.PARAM_OFFSET).append('=').append(offset).append('&');
buf.append(ThreadedHTMLRenderer.PARAM_OFFSET).append('=').append(offset).append("&amp;");
if (!empty(tags))
buf.append(ThreadedHTMLRenderer.PARAM_TAGS).append('=').append(tags).append('&');
buf.append(ThreadedHTMLRenderer.PARAM_TAGS).append('=').append(tags).append("&amp;");
if (!empty(author))
buf.append(ThreadedHTMLRenderer.PARAM_AUTHOR).append('=').append(author).append('&');
buf.append(ThreadedHTMLRenderer.PARAM_AUTHOR).append('=').append(author).append("&amp;");
return buf.toString();
}
@@ -901,21 +931,21 @@ public abstract class BaseServlet extends HttpServlet {
// collapse node == let the node be visible
buf.append('?').append(ThreadedHTMLRenderer.PARAM_VISIBLE).append('=');
buf.append(node.getEntry().getKeyHash().toBase64()).append('/');
buf.append(node.getEntry().getEntryId()).append('&');
buf.append(node.getEntry().getEntryId()).append("&amp;");
if (!empty(viewPost))
buf.append(ThreadedHTMLRenderer.PARAM_VIEW_POST).append('=').append(viewPost).append('&');
buf.append(ThreadedHTMLRenderer.PARAM_VIEW_POST).append('=').append(viewPost).append("&amp;");
else if (!empty(viewThread))
buf.append(ThreadedHTMLRenderer.PARAM_VIEW_THREAD).append('=').append(viewThread).append('&');
buf.append(ThreadedHTMLRenderer.PARAM_VIEW_THREAD).append('=').append(viewThread).append("&amp;");
if (!empty(offset))
buf.append(ThreadedHTMLRenderer.PARAM_OFFSET).append('=').append(offset).append('&');
buf.append(ThreadedHTMLRenderer.PARAM_OFFSET).append('=').append(offset).append("&amp;");
if (!empty(tags))
buf.append(ThreadedHTMLRenderer.PARAM_TAGS).append('=').append(tags).append('&');
buf.append(ThreadedHTMLRenderer.PARAM_TAGS).append('=').append(tags).append("&amp;");
if (!empty(author))
buf.append(ThreadedHTMLRenderer.PARAM_AUTHOR).append('=').append(author).append('&');
buf.append(ThreadedHTMLRenderer.PARAM_AUTHOR).append('=').append(author).append("&amp;");
return buf.toString();
}
@@ -939,23 +969,23 @@ public abstract class BaseServlet extends HttpServlet {
buf.append(uri);
buf.append('?');
if (!empty(visible))
buf.append(ThreadedHTMLRenderer.PARAM_VISIBLE).append('=').append(visible).append('&');
buf.append(ThreadedHTMLRenderer.PARAM_ADD_TO_GROUP_LOCATION).append('=').append(author.toBase64()).append('&');
buf.append(ThreadedHTMLRenderer.PARAM_ADD_TO_GROUP_NAME).append('=').append(group).append('&');
buf.append(ThreadedHTMLRenderer.PARAM_VISIBLE).append('=').append(visible).append("&amp;");
buf.append(ThreadedHTMLRenderer.PARAM_ADD_TO_GROUP_LOCATION).append('=').append(author.toBase64()).append("&amp;");
buf.append(ThreadedHTMLRenderer.PARAM_ADD_TO_GROUP_NAME).append('=').append(group).append("&amp;");
if (!empty(viewPost))
buf.append(ThreadedHTMLRenderer.PARAM_VIEW_POST).append('=').append(viewPost).append('&');
buf.append(ThreadedHTMLRenderer.PARAM_VIEW_POST).append('=').append(viewPost).append("&amp;");
else if (!empty(viewThread))
buf.append(ThreadedHTMLRenderer.PARAM_VIEW_THREAD).append('=').append(viewThread).append('&');
buf.append(ThreadedHTMLRenderer.PARAM_VIEW_THREAD).append('=').append(viewThread).append("&amp;");
if (!empty(offset))
buf.append(ThreadedHTMLRenderer.PARAM_OFFSET).append('=').append(offset).append('&');
buf.append(ThreadedHTMLRenderer.PARAM_OFFSET).append('=').append(offset).append("&amp;");
if (!empty(tags))
buf.append(ThreadedHTMLRenderer.PARAM_TAGS).append('=').append(tags).append('&');
buf.append(ThreadedHTMLRenderer.PARAM_TAGS).append('=').append(tags).append("&amp;");
if (!empty(filteredAuthor))
buf.append(ThreadedHTMLRenderer.PARAM_AUTHOR).append('=').append(filteredAuthor).append('&');
buf.append(ThreadedHTMLRenderer.PARAM_AUTHOR).append('=').append(filteredAuthor).append("&amp;");
addAuthActionParams(buf);
return buf.toString();
@@ -966,23 +996,23 @@ public abstract class BaseServlet extends HttpServlet {
buf.append(uri);
buf.append('?');
if (!empty(visible))
buf.append(ThreadedHTMLRenderer.PARAM_VISIBLE).append('=').append(visible).append('&');
buf.append(ThreadedHTMLRenderer.PARAM_REMOVE_FROM_GROUP_NAME).append('=').append(name).append('&');
buf.append(ThreadedHTMLRenderer.PARAM_REMOVE_FROM_GROUP).append('=').append(group).append('&');
buf.append(ThreadedHTMLRenderer.PARAM_VISIBLE).append('=').append(visible).append("&amp;");
buf.append(ThreadedHTMLRenderer.PARAM_REMOVE_FROM_GROUP_NAME).append('=').append(name).append("&amp;");
buf.append(ThreadedHTMLRenderer.PARAM_REMOVE_FROM_GROUP).append('=').append(group).append("&amp;");
if (!empty(viewPost))
buf.append(ThreadedHTMLRenderer.PARAM_VIEW_POST).append('=').append(viewPost).append('&');
buf.append(ThreadedHTMLRenderer.PARAM_VIEW_POST).append('=').append(viewPost).append("&amp;");
else if (!empty(viewThread))
buf.append(ThreadedHTMLRenderer.PARAM_VIEW_THREAD).append('=').append(viewThread).append('&');
buf.append(ThreadedHTMLRenderer.PARAM_VIEW_THREAD).append('=').append(viewThread).append("&amp;");
if (!empty(offset))
buf.append(ThreadedHTMLRenderer.PARAM_OFFSET).append('=').append(offset).append('&');
buf.append(ThreadedHTMLRenderer.PARAM_OFFSET).append('=').append(offset).append("&amp;");
if (!empty(tags))
buf.append(ThreadedHTMLRenderer.PARAM_TAGS).append('=').append(tags).append('&');
buf.append(ThreadedHTMLRenderer.PARAM_TAGS).append('=').append(tags).append("&amp;");
if (!empty(filteredAuthor))
buf.append(ThreadedHTMLRenderer.PARAM_AUTHOR).append('=').append(filteredAuthor).append('&');
buf.append(ThreadedHTMLRenderer.PARAM_AUTHOR).append('=').append(filteredAuthor).append("&amp;");
addAuthActionParams(buf);
return buf.toString();
@@ -1024,24 +1054,23 @@ public abstract class BaseServlet extends HttpServlet {
}
buf.append('?').append(ThreadedHTMLRenderer.PARAM_VISIBLE).append('=');
buf.append(expandTo.getKeyHash().toBase64()).append('/');
buf.append(expandTo.getEntryId()).append('&');
buf.append(expandTo.getEntryId()).append("&amp;");
buf.append(ThreadedHTMLRenderer.PARAM_VIEW_THREAD).append('=');
buf.append(node.getEntry().getKeyHash().toBase64()).append('/');
buf.append(node.getEntry().getEntryId()).append('&');
buf.append(node.getEntry().getEntryId()).append("&amp;");
if (!empty(offset))
buf.append(ThreadedHTMLRenderer.PARAM_OFFSET).append('=').append(offset).append('&');
buf.append(ThreadedHTMLRenderer.PARAM_OFFSET).append('=').append(offset).append("&amp;");
if (!empty(tags))
buf.append(ThreadedHTMLRenderer.PARAM_TAGS).append('=').append(tags).append('&');
buf.append(ThreadedHTMLRenderer.PARAM_TAGS).append('=').append(tags).append("&amp;");
if (!empty(author)) {
buf.append(ThreadedHTMLRenderer.PARAM_AUTHOR).append('=').append(author).append('&');
buf.append(ThreadedHTMLRenderer.PARAM_AUTHOR).append('=').append(author).append("&amp;");
if (authorOnly)
buf.append(ThreadedHTMLRenderer.PARAM_THREAD_AUTHOR).append("=true&");
buf.append(ThreadedHTMLRenderer.PARAM_THREAD_AUTHOR).append("=true&amp;");
}
buf.append("#").append(node.getEntry().toString());
return buf.toString();
}

View File

@@ -62,6 +62,8 @@ public class RemoteArchiveBean {
}
private boolean ignoreBlog(User user, Hash blog) {
if (BlogManager.instance().isBanned(blog))
return true;
PetNameDB db = user.getPetNameDB();
PetName pn = db.getByLocation(blog.toBase64());
return ( (pn!= null) && (pn.isMember("Ignore")) );
@@ -639,6 +641,8 @@ public class RemoteArchiveBean {
int newBlogs = 0;
for (Iterator iter = remoteBlogs.iterator(); iter.hasNext(); ) {
Hash blog = (Hash)iter.next();
if ( (blog == null) || (blog.getData() == null) || (blog.getData().length <= 0) )
continue;
if (ignoreBlog(user, blog))
continue;
if (!localBlogs.contains(blog)) {

View File

@@ -85,4 +85,360 @@ td.s_detail_summDetail {
td.s_summary_summ {
font-size: 0.8em;
background-color: #DDDDFF;
}
<!-- following are doubtful salmon's contributions -->
body {
margin : 0px;
padding : 0px;
width: 99%;
font-family : Arial, sans-serif, Helvetica;
background-color : #FFF;
color : black;
font-size : 100%;
/* we've avoided Tantek Hacks so far,
** but we can't avoid using the non-w3c method of
** box rendering. (and therefore one of mozilla's
** proprietry -moz properties (which hopefully they'll
** drop soon).
*/
-moz-box-sizing : border-box;
box-sizing : border-box;
}
a:link{color:#007}
a:visited{color:#606}
a:hover{color:#720}
a:active{color:#900}
select {
min-width: 1.5em;
}
.overallTable {
border-spacing: 0px;
border-collapse: collapse;
float: left;
}
.topNav {
background-color: #BBB;
}
.topNav_user {
text-align: left;
float: left;
display: inline;
}
.topNav_admin {
text-align: right;
float: right;
margin: 0 5px 0 0;
display: inline;
}
.controlBar {
border-bottom: thick double #CCF;
border-left: medium solid #CCF;
border-right: medium solid #CCF;
background-color: #EEF;
color: inherit;
font-size: small;
clear: left; /* fixes a bug in Opera */
}
.controlBarRight {
text-align: right;
}
.threadEven {
background-color: #FFF;
white-space: nowrap;
}
.threadOdd {
background-color: #FFC;
white-space: nowrap;
}
.threadLeft {
text-align: left;
align: left;
}
.threadNav {
background-color: #EEF;
border: medium solid #CCF;
}
.threadNavRight {
text-align: right;
float: right;
background-color: #EEF;
}
.rightOffset {
float: right;
margin: 0 5px 0 0;
display: inline;
}
.threadInfoLeft {
float: left;
margin: 5px 0px 0 0;
display: inline;
}
.threadInfoRight {
float: right;
margin: 0 5px 0 0;
display: inline;
}
.postMeta {
border-top: 1px solid black;
background-color: #FFB;
}
.postMetaSubject {
text-align: left;
font-size: large;
}
.postMetaLink {
text-align: right;
}
.postDetails {
background-color: #FFC;
}
.postReply {
background-color: #CCF;
}
.postReplyText {
background-color: #CCF;
}
.postReplyOptions {
background-color: #CCF;
}
.syndieBlogTopNav {
padding: 0.5em;
width: 98%;
border: medium solid #CCF;
background-color: #EEF;
font-size: small;
}
.syndieBlogTopNavUser {
text-align: left;
}
.syndieBlogTopNavAdmin {
text-align: right;
}
.syndieBlogHeader {
width: 100%;
font-size: 1.4em;
background-color: #000;
text-align: Left;
float: Left;
}
.syndieBlogHeader a {
color: #FFF;
padding: 4px;
}
.syndieBlogHeader a:hover {
color:#88F;
padding: 4px;
}
.syndieBlogLogo {
float: left;
display: inline;
}
.syndieBlogLinks {
width: 20%;
float: left;
}
.syndieBlogLinkGroup {
font-size: 0.8em;
background-color: #DDD;
border: 1px solid black;
margin: 5px;
padding: 2px;
}
.syndieBlogLinkGroup ul {
list-style: none;
}
.syndieBlogLinkGroup li {
}
.syndieBlogLinkGroupName {
font-weight: bold;
width: 100%;
border-bottom: 1px dashed black;
display: block;
}
.syndieBlogPostInfoGroup {
font-size: 0.8em;
background-color: #FFEA9F;
border: 1px solid black;
margin: 5px;
padding: 2px;
}
.syndieBlogPostInfoGroup ol {
list-style: none;
}
.syndieBlogPostInfoGroup li {
}
.syndieBlogPostInfoGroup li a {
display: block;
}
.syndieBlogPostInfoGroupName {
font-weight: bold;
width: 100%;
border-bottom: 1px dashed black;
display: block;
}
.syndieBlogMeta {
text-align: left;
font-size: 0.8em;
background-color: #DDD;
border: 1px solid black;
margin: 5px;
padding: 2px;
}
.syndieBlogBody {
width: 80%;
float: left;
}
.syndieBlogPost {
border: 1px solid black;
margin-top: 5px;
margin-right: 5px;
}
.syndieBlogPostHeader {
background-color: #FFB;
padding: 2px;
}
.syndieBlogPostSubject {
font-weight: bold;
}
.syndieBlogPostFrom {
text-align: right;
}
.syndieBlogPostSummary {
background-color: #FFF;
padding: 2px;
}
.syndieBlogPostDetails {
background-color: #FFC;
padding: 2px;
}
.syndieBlogNav {
text-align: center;
}
.syndieBlogComments {
border: none;
margin-top: 5px;
margin-left: 0px;
float: left;
}
.syndieBlogComments ul {
list-style: none;
margin-left: 10px;
}
.syndieBlogCommentInfoGroup {
font-size: 0.8em;
margin-right: 5px;
}
.syndieBlogCommentInfoGroup ol {
list-style: none;
}
.syndieBlogCommentInfoGroup li {
}
.syndieBlogCommentInfoGroup li a {
display: block;
}
.syndieBlogCommentInfoGroupName {
font-size: 0.8em;
font-weight: bold;
}
.syndieBlogFavorites {
float: left;
margin: 5px 0px 0 0;
display: inline;
}
.syndieBlogList {
float: right;
margin: 5px 0px 0 0;
display: inline;
}
.b_topnavUser {
text-align: right;
background-color: #CCD;
}
.b_topnavHome {
background-color: #CCD;
color: #000;
width: 50px;
text-align: left;
}
.b_topnav {
background-color: #CCD;
}
.b_content {
}
.s_summary_overall {
}
.s_detail_overall {
}
.s_detail_subject {
font-size: 0.8em;
text-align: left;
background-color: #CCF;
}
.s_detail_quote {
margin-left: 1em;
border: 1px solid #DBDBDB;
background-color: #E0E0E0;
}
.s_detail_italic {
font-style: italic;
}
.s_detail_bold {
font-style: normal;
font-weight: bold;
}
.s_detail_underline {
font-style: normal;
text-decoration: underline;
}
.s_detail_meta {
font-size: 0.8em;
text-align: right;
background-color: #CCF;
}
.s_summary_subject {
font-size: 0.8em;
text-align: left;
background-color: #CCF;
}
.s_summary_meta {
font-size: 0.8em;
text-align: right;
background-color: #CCF;
}
.s_summary_quote {
margin-left: 1em;
border-width: 1px solid #DBDBDB;
background-color: #E0E0E0;
}
.s_summary_italic {
font-style: italic;
}
.s_summary_bold {
font-style: normal;
font-weight: bold;
}
.s_summary_underline {
font-style: normal;
text-decoration: underline;
}
.s_summary_summDetail {
font-size: 0.8em;
}
.s_detail_summDetail {
}
.s_detail_summDetailBlog {
}
.s_detail_summDetailBlogLink {
}
td.s_detail_summDetail {
background-color: #CCF;
}
td.s_summary_summ { width: 80%;
font-size: 0.8em;
background-color: #CCF;
}

View File

@@ -169,7 +169,14 @@ public class SysTray implements SysTrayMenuListener {
_itemOpenConsole.addSysTrayMenuListener(this);
// _sysTrayMenu.addItem(_itemShutdown);
// _sysTrayMenu.addSeparator();
_sysTrayMenu.addItem(_itemSelectBrowser);
// hide it, as there have been reports of b0rked behavior on some JVMs.
// specifically, that on XP & sun1.5.0.1, a user launching i2p w/out the
// service wrapper would create netDb/, peerProfiles/, and other files
// underneath each directory browsed to - as if the router's "." directory
// is changing whenever the itemSelectBrowser's JFileChooser changed
// directories. This has not been reproduced or confirmed yet, but is
// pretty scary, and this function isn't too necessary.
//_sysTrayMenu.addItem(_itemSelectBrowser);
_sysTrayMenu.addItem(_itemOpenConsole);
refreshDisplay();
}

View File

@@ -0,0 +1,198 @@
package gnu.crypto.hash;
// ----------------------------------------------------------------------------
// $Id: BaseHash.java,v 1.10 2005/10/06 04:24:14 rsdio Exp $
//
// Copyright (C) 2001, 2002, Free Software Foundation, Inc.
//
// This file is part of GNU Crypto.
//
// GNU Crypto is free software; you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation; either version 2, or (at your option)
// any later version.
//
// GNU Crypto is distributed in the hope that it will be useful, but
// WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
// General Public License for more details.
//
// You should have received a copy of the GNU General Public License
// along with this program; see the file COPYING. If not, write to the
//
// Free Software Foundation Inc.,
// 51 Franklin Street, Fifth Floor,
// Boston, MA 02110-1301
// USA
//
// Linking this library statically or dynamically with other modules is
// making a combined work based on this library. Thus, the terms and
// conditions of the GNU General Public License cover the whole
// combination.
//
// As a special exception, the copyright holders of this library give
// you permission to link this library with independent modules to
// produce an executable, regardless of the license terms of these
// independent modules, and to copy and distribute the resulting
// executable under terms of your choice, provided that you also meet,
// for each linked independent module, the terms and conditions of the
// license of that module. An independent module is a module which is
// not derived from or based on this library. If you modify this
// library, you may extend this exception to your version of the
// library, but you are not obligated to do so. If you do not wish to
// do so, delete this exception statement from your version.
// ----------------------------------------------------------------------------
/**
* <p>A base abstract class to facilitate hash implementations.</p>
*
* @version $Revision: 1.10 $
*/
public abstract class BaseHash implements IMessageDigest {
// Constants and variables
// -------------------------------------------------------------------------
/** The canonical name prefix of the hash. */
protected String name;
/** The hash (output) size in bytes. */
protected int hashSize;
/** The hash (inner) block size in bytes. */
protected int blockSize;
/** Number of bytes processed so far. */
protected long count;
/** Temporary input buffer. */
protected byte[] buffer;
// Constructor(s)
// -------------------------------------------------------------------------
/**
* <p>Trivial constructor for use by concrete subclasses.</p>
*
* @param name the canonical name prefix of this instance.
* @param hashSize the block size of the output in bytes.
* @param blockSize the block size of the internal transform.
*/
protected BaseHash(String name, int hashSize, int blockSize) {
super();
this.name = name;
this.hashSize = hashSize;
this.blockSize = blockSize;
this.buffer = new byte[blockSize];
resetContext();
}
// Class methods
// -------------------------------------------------------------------------
// Instance methods
// -------------------------------------------------------------------------
// IMessageDigest interface implementation ---------------------------------
public String name() {
return name;
}
public int hashSize() {
return hashSize;
}
public int blockSize() {
return blockSize;
}
public void update(byte b) {
// compute number of bytes still unhashed; ie. present in buffer
int i = (int)(count % blockSize);
count++;
buffer[i] = b;
if (i == (blockSize - 1)) {
transform(buffer, 0);
}
}
public void update(byte[] b) {
update(b, 0, b.length);
}
public void update(byte[] b, int offset, int len) {
int n = (int)(count % blockSize);
count += len;
int partLen = blockSize - n;
int i = 0;
if (len >= partLen) {
System.arraycopy(b, offset, buffer, n, partLen);
transform(buffer, 0);
for (i = partLen; i + blockSize - 1 < len; i+= blockSize) {
transform(b, offset + i);
}
n = 0;
}
if (i < len) {
System.arraycopy(b, offset + i, buffer, n, len - i);
}
}
public byte[] digest() {
byte[] tail = padBuffer(); // pad remaining bytes in buffer
update(tail, 0, tail.length); // last transform of a message
byte[] result = getResult(); // make a result out of context
reset(); // reset this instance for future re-use
return result;
}
public void reset() { // reset this instance for future re-use
count = 0L;
for (int i = 0; i < blockSize; ) {
buffer[i++] = 0;
}
resetContext();
}
// methods to be implemented by concrete subclasses ------------------------
public abstract Object clone();
public abstract boolean selfTest();
/**
* <p>Returns the byte array to use as padding before completing a hash
* operation.</p>
*
* @return the bytes to pad the remaining bytes in the buffer before
* completing a hash operation.
*/
protected abstract byte[] padBuffer();
/**
* <p>Constructs the result from the contents of the current context.</p>
*
* @return the output of the completed hash operation.
*/
protected abstract byte[] getResult();
/** Resets the instance for future re-use. */
protected abstract void resetContext();
/**
* <p>The block digest transformation per se.</p>
*
* @param in the <i>blockSize</i> long block, as an array of bytes to digest.
* @param offset the index where the data to digest is located within the
* input buffer.
*/
protected abstract void transform(byte[] in, int offset);
}

View File

@@ -0,0 +1,141 @@
package gnu.crypto.hash;
// ----------------------------------------------------------------------------
// $Id: IMessageDigest.java,v 1.11 2005/10/06 04:24:14 rsdio Exp $
//
// Copyright (C) 2001, 2002, Free Software Foundation, Inc.
//
// This file is part of GNU Crypto.
//
// GNU Crypto is free software; you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation; either version 2, or (at your option)
// any later version.
//
// GNU Crypto is distributed in the hope that it will be useful, but
// WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
// General Public License for more details.
//
// You should have received a copy of the GNU General Public License
// along with this program; see the file COPYING. If not, write to the
//
// Free Software Foundation Inc.,
// 51 Franklin Street, Fifth Floor,
// Boston, MA 02110-1301
// USA
//
// Linking this library statically or dynamically with other modules is
// making a combined work based on this library. Thus, the terms and
// conditions of the GNU General Public License cover the whole
// combination.
//
// As a special exception, the copyright holders of this library give
// you permission to link this library with independent modules to
// produce an executable, regardless of the license terms of these
// independent modules, and to copy and distribute the resulting
// executable under terms of your choice, provided that you also meet,
// for each linked independent module, the terms and conditions of the
// license of that module. An independent module is a module which is
// not derived from or based on this library. If you modify this
// library, you may extend this exception to your version of the
// library, but you are not obligated to do so. If you do not wish to
// do so, delete this exception statement from your version.
// ----------------------------------------------------------------------------
/**
* <p>The basic visible methods of any hash algorithm.</p>
*
* <p>A hash (or message digest) algorithm produces its output by iterating a
* basic compression function on blocks of data.</p>
*
* @version $Revision: 1.11 $
*/
public interface IMessageDigest extends Cloneable {
// Constants
// -------------------------------------------------------------------------
// Methods
// -------------------------------------------------------------------------
/**
* <p>Returns the canonical name of this algorithm.</p>
*
* @return the canonical name of this instance.
*/
String name();
/**
* <p>Returns the output length in bytes of this message digest algorithm.</p>
*
* @return the output length in bytes of this message digest algorithm.
*/
int hashSize();
/**
* <p>Returns the algorithm's (inner) block size in bytes.</p>
*
* @return the algorithm's inner block size in bytes.
*/
int blockSize();
/**
* <p>Continues a message digest operation using the input byte.</p>
*
* @param b the input byte to digest.
*/
void update(byte b);
/**
* <p>Continues a message digest operation, by filling the buffer, processing
* data in the algorithm's HASH_SIZE-bit block(s), updating the context and
* count, and buffering the remaining bytes in buffer for the next
* operation.</p>
*
* @param in the input block.
*/
void update(byte[] in);
/**
* <p>Continues a message digest operation, by filling the buffer, processing
* data in the algorithm's HASH_SIZE-bit block(s), updating the context and
* count, and buffering the remaining bytes in buffer for the next
* operation.</p>
*
* @param in the input block.
* @param offset start of meaningful bytes in input block.
* @param length number of bytes, in input block, to consider.
*/
void update(byte[] in, int offset, int length);
/**
* <p>Completes the message digest by performing final operations such as
* padding and resetting the instance.</p>
*
* @return the array of bytes representing the hash value.
*/
byte[] digest();
/**
* <p>Resets the current context of this instance clearing any eventually cached
* intermediary values.</p>
*/
void reset();
/**
* <p>A basic test. Ensures that the digest of a pre-determined message is equal
* to a known pre-computed value.</p>
*
* @return <tt>true</tt> if the implementation passes a basic self-test.
* Returns <tt>false</tt> otherwise.
*/
boolean selfTest();
/**
* <p>Returns a clone copy of this instance.</p>
*
* @return a clone copy of this instance.
*/
Object clone();
}

View File

@@ -0,0 +1,276 @@
package gnu.crypto.hash;
// ----------------------------------------------------------------------------
// $Id: Sha256Standalone.java,v 1.1 2006/02/26 16:30:59 jrandom Exp $
//
// Copyright (C) 2003 Free Software Foundation, Inc.
//
// This file is part of GNU Crypto.
//
// GNU Crypto is free software; you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation; either version 2, or (at your option)
// any later version.
//
// GNU Crypto is distributed in the hope that it will be useful, but
// WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
// General Public License for more details.
//
// You should have received a copy of the GNU General Public License
// along with this program; see the file COPYING. If not, write to the
//
// Free Software Foundation Inc.,
// 51 Franklin Street, Fifth Floor,
// Boston, MA 02110-1301
// USA
//
// Linking this library statically or dynamically with other modules is
// making a combined work based on this library. Thus, the terms and
// conditions of the GNU General Public License cover the whole
// combination.
//
// As a special exception, the copyright holders of this library give
// you permission to link this library with independent modules to
// produce an executable, regardless of the license terms of these
// independent modules, and to copy and distribute the resulting
// executable under terms of your choice, provided that you also meet,
// for each linked independent module, the terms and conditions of the
// license of that module. An independent module is a module which is
// not derived from or based on this library. If you modify this
// library, you may extend this exception to your version of the
// library, but you are not obligated to do so. If you do not wish to
// do so, delete this exception statement from your version.
// ----------------------------------------------------------------------------
//import gnu.crypto.util.Util;
/**
* <p>Implementation of SHA2-1 [SHA-256] per the IETF Draft Specification.</p>
*
* <p>References:</p>
* <ol>
* <li><a href="http://ftp.ipv4.heanet.ie/pub/ietf/internet-drafts/draft-ietf-ipsec-ciph-aes-cbc-03.txt">
* Descriptions of SHA-256, SHA-384, and SHA-512</a>,</li>
* <li>http://csrc.nist.gov/cryptval/shs/sha256-384-512.pdf</li>
* </ol>
*
* Modified by jrandom@i2p.net to remove unnecessary gnu-crypto dependencies, and
* renamed from Sha256 to avoid conflicts with JVMs using gnu-crypto as their JCE
* provider.
*
* @version $Revision: 1.1 $
*/
public class Sha256Standalone extends BaseHash {
// Constants and variables
// -------------------------------------------------------------------------
private static final int[] k = {
0x428a2f98, 0x71374491, 0xb5c0fbcf, 0xe9b5dba5,
0x3956c25b, 0x59f111f1, 0x923f82a4, 0xab1c5ed5,
0xd807aa98, 0x12835b01, 0x243185be, 0x550c7dc3,
0x72be5d74, 0x80deb1fe, 0x9bdc06a7, 0xc19bf174,
0xe49b69c1, 0xefbe4786, 0x0fc19dc6, 0x240ca1cc,
0x2de92c6f, 0x4a7484aa, 0x5cb0a9dc, 0x76f988da,
0x983e5152, 0xa831c66d, 0xb00327c8, 0xbf597fc7,
0xc6e00bf3, 0xd5a79147, 0x06ca6351, 0x14292967,
0x27b70a85, 0x2e1b2138, 0x4d2c6dfc, 0x53380d13,
0x650a7354, 0x766a0abb, 0x81c2c92e, 0x92722c85,
0xa2bfe8a1, 0xa81a664b, 0xc24b8b70, 0xc76c51a3,
0xd192e819, 0xd6990624, 0xf40e3585, 0x106aa070,
0x19a4c116, 0x1e376c08, 0x2748774c, 0x34b0bcb5,
0x391c0cb3, 0x4ed8aa4a, 0x5b9cca4f, 0x682e6ff3,
0x748f82ee, 0x78a5636f, 0x84c87814, 0x8cc70208,
0x90befffa, 0xa4506ceb, 0xbef9a3f7, 0xc67178f2
};
private static final int BLOCK_SIZE = 64; // inner block size in bytes
private static final String DIGEST0 =
"BA7816BF8F01CFEA414140DE5DAE2223B00361A396177A9CB410FF61F20015AD";
private static final int[] w = new int[64];
/** caches the result of the correctness test, once executed. */
private static Boolean valid;
/** 256-bit interim result. */
private int h0, h1, h2, h3, h4, h5, h6, h7;
// Constructor(s)
// -------------------------------------------------------------------------
/** Trivial 0-arguments constructor. */
public Sha256Standalone() {
super("sha256/standalone", 32, BLOCK_SIZE);
}
/**
* <p>Private constructor for cloning purposes.</p>
*
* @param md the instance to clone.
*/
private Sha256Standalone(Sha256Standalone md) {
this();
this.h0 = md.h0;
this.h1 = md.h1;
this.h2 = md.h2;
this.h3 = md.h3;
this.h4 = md.h4;
this.h5 = md.h5;
this.h6 = md.h6;
this.h7 = md.h7;
this.count = md.count;
this.buffer = (byte[]) md.buffer.clone();
}
// Class methods
// -------------------------------------------------------------------------
/*
public static final int[] G(int hh0, int hh1, int hh2, int hh3, int hh4,
int hh5, int hh6, int hh7, byte[] in, int offset) {
return sha(hh0, hh1, hh2, hh3, hh4, hh5, hh6, hh7, in, offset);
}
*/
// Instance methods
// -------------------------------------------------------------------------
// java.lang.Cloneable interface implementation ----------------------------
public Object clone() {
return new Sha256Standalone(this);
}
// Implementation of concrete methods in BaseHash --------------------------
private int transformResult[] = new int[8];
protected void transform(byte[] in, int offset) {
//int[] result = sha(h0, h1, h2, h3, h4, h5, h6, h7, in, offset);
sha(h0, h1, h2, h3, h4, h5, h6, h7, in, offset, transformResult);
h0 = transformResult[0];
h1 = transformResult[1];
h2 = transformResult[2];
h3 = transformResult[3];
h4 = transformResult[4];
h5 = transformResult[5];
h6 = transformResult[6];
h7 = transformResult[7];
}
protected byte[] padBuffer() {
int n = (int) (count % BLOCK_SIZE);
int padding = (n < 56) ? (56 - n) : (120 - n);
byte[] result = new byte[padding + 8];
// padding is always binary 1 followed by binary 0s
result[0] = (byte) 0x80;
// save number of bits, casting the long to an array of 8 bytes
long bits = count << 3;
result[padding++] = (byte)(bits >>> 56);
result[padding++] = (byte)(bits >>> 48);
result[padding++] = (byte)(bits >>> 40);
result[padding++] = (byte)(bits >>> 32);
result[padding++] = (byte)(bits >>> 24);
result[padding++] = (byte)(bits >>> 16);
result[padding++] = (byte)(bits >>> 8);
result[padding ] = (byte) bits;
return result;
}
protected byte[] getResult() {
return new byte[] {
(byte)(h0 >>> 24), (byte)(h0 >>> 16), (byte)(h0 >>> 8), (byte) h0,
(byte)(h1 >>> 24), (byte)(h1 >>> 16), (byte)(h1 >>> 8), (byte) h1,
(byte)(h2 >>> 24), (byte)(h2 >>> 16), (byte)(h2 >>> 8), (byte) h2,
(byte)(h3 >>> 24), (byte)(h3 >>> 16), (byte)(h3 >>> 8), (byte) h3,
(byte)(h4 >>> 24), (byte)(h4 >>> 16), (byte)(h4 >>> 8), (byte) h4,
(byte)(h5 >>> 24), (byte)(h5 >>> 16), (byte)(h5 >>> 8), (byte) h5,
(byte)(h6 >>> 24), (byte)(h6 >>> 16), (byte)(h6 >>> 8), (byte) h6,
(byte)(h7 >>> 24), (byte)(h7 >>> 16), (byte)(h7 >>> 8), (byte) h7
};
}
protected void resetContext() {
// magic SHA-256 initialisation constants
h0 = 0x6a09e667;
h1 = 0xbb67ae85;
h2 = 0x3c6ef372;
h3 = 0xa54ff53a;
h4 = 0x510e527f;
h5 = 0x9b05688c;
h6 = 0x1f83d9ab;
h7 = 0x5be0cd19;
}
public boolean selfTest() {
if (valid == null) {
Sha256Standalone md = new Sha256Standalone();
md.update((byte) 0x61); // a
md.update((byte) 0x62); // b
md.update((byte) 0x63); // c
String result = "broken"; //Util.toString(md.digest());
valid = new Boolean(DIGEST0.equals(result));
}
return valid.booleanValue();
}
// SHA specific methods ----------------------------------------------------
private static final synchronized void
sha(int hh0, int hh1, int hh2, int hh3, int hh4, int hh5, int hh6, int hh7, byte[] in, int offset, int out[]) {
int A = hh0;
int B = hh1;
int C = hh2;
int D = hh3;
int E = hh4;
int F = hh5;
int G = hh6;
int H = hh7;
int r, T, T2;
for (r = 0; r < 16; r++) {
w[r] = in[offset++] << 24 |
(in[offset++] & 0xFF) << 16 |
(in[offset++] & 0xFF) << 8 |
(in[offset++] & 0xFF);
}
for (r = 16; r < 64; r++) {
T = w[r - 2];
T2 = w[r - 15];
w[r] = (((T >>> 17) | (T << 15)) ^ ((T >>> 19) | (T << 13)) ^ (T >>> 10)) + w[r - 7] + (((T2 >>> 7) | (T2 << 25)) ^ ((T2 >>> 18) | (T2 << 14)) ^ (T2 >>> 3)) + w[r - 16];
}
for (r = 0; r < 64; r++) {
T = H + (((E >>> 6) | (E << 26)) ^ ((E >>> 11) | (E << 21)) ^ ((E >>> 25) | (E << 7))) + ((E & F) ^ (~E & G)) + k[r] + w[r];
T2 = (((A >>> 2) | (A << 30)) ^ ((A >>> 13) | (A << 19)) ^ ((A >>> 22) | (A << 10))) + ((A & B) ^ (A & C) ^ (B & C));
H = G;
G = F;
F = E;
E = D + T;
D = C;
C = B;
B = A;
A = T + T2;
}
/*
return new int[] {
hh0 + A, hh1 + B, hh2 + C, hh3 + D, hh4 + E, hh5 + F, hh6 + G, hh7 + H
};
*/
out[0] = hh0 + A;
out[1] = hh1 + B;
out[2] = hh2 + C;
out[3] = hh3 + D;
out[4] = hh4 + E;
out[5] = hh5 + F;
out[6] = hh6 + G;
out[7] = hh7 + H;
}
}

View File

@@ -54,7 +54,7 @@ import java.util.Iterator;
import java.util.Map;
import java.util.HashMap;
import org.bouncycastle.crypto.digests.SHA256Digest;
import gnu.crypto.hash.Sha256Standalone;
import net.i2p.crypto.CryptixRijndael_Algorithm;
import net.i2p.crypto.CryptixAESKeyCache;
@@ -91,7 +91,7 @@ import net.i2p.crypto.CryptixAESKeyCache;
* Bruce Schneier). ISBN 0-471-22357-3.</li>
* </ul>
*
* Modified by jrandom for I2P to use Bouncycastle's SHA256, Cryptix's AES,
* Modified by jrandom for I2P to use a standalone gnu-crypto SHA256, Cryptix's AES,
* to strip out some unnecessary dependencies and increase the buffer size.
* Renamed from Fortuna to FortunaStandalone so it doesn't conflict with the
* gnu-crypto implementation, which has been imported into GNU/classpath
@@ -106,7 +106,7 @@ public class FortunaStandalone extends BasePRNGStandalone implements Serializabl
private static final int NUM_POOLS = 32;
private static final int MIN_POOL_SIZE = 64;
private final Generator generator;
private final SHA256Digest[] pools;
private final Sha256Standalone[] pools;
private long lastReseed;
private int pool;
private int pool0Count;
@@ -118,9 +118,9 @@ public class FortunaStandalone extends BasePRNGStandalone implements Serializabl
{
super("Fortuna i2p");
generator = new Generator();
pools = new SHA256Digest[NUM_POOLS];
pools = new Sha256Standalone[NUM_POOLS];
for (int i = 0; i < NUM_POOLS; i++)
pools[i] = new SHA256Digest();
pools[i] = new Sha256Standalone();
lastReseed = 0;
pool = 0;
pool0Count = 0;
@@ -143,8 +143,6 @@ public class FortunaStandalone extends BasePRNGStandalone implements Serializabl
generator.init(attributes);
}
/** fillBlock is not thread safe, so will be locked anyway */
private byte fillBlockBuf[] = new byte[32];
public void fillBlock()
{
if (pool0Count >= MIN_POOL_SIZE
@@ -155,9 +153,7 @@ public class FortunaStandalone extends BasePRNGStandalone implements Serializabl
for (int i = 0; i < NUM_POOLS; i++)
{
if (reseedCount % (1 << i) == 0) {
byte buf[] = fillBlockBuf;//new byte[32];
pools[i].doFinal(buf, 0);
generator.addRandomBytes(buf);//pools[i].digest());
generator.addRandomBytes(pools[i].digest());
}
}
lastReseed = System.currentTimeMillis();
@@ -221,7 +217,7 @@ public class FortunaStandalone extends BasePRNGStandalone implements Serializabl
private static final int LIMIT = 1 << 20;
private final SHA256Digest hash;
private final Sha256Standalone hash;
private final byte[] counter;
private final byte[] key;
/** current encryption key built from the keying material */
@@ -232,7 +228,7 @@ public class FortunaStandalone extends BasePRNGStandalone implements Serializabl
public Generator ()
{
super("Fortuna.generator.i2p");
this.hash = new SHA256Digest();
this.hash = new Sha256Standalone();
counter = new byte[16]; //cipher.defaultBlockSize()];
buffer = new byte[16]; //cipher.defaultBlockSize()];
int keysize = 32;
@@ -285,9 +281,9 @@ public class FortunaStandalone extends BasePRNGStandalone implements Serializabl
{
hash.update(key, 0, key.length);
hash.update(seed, offset, length);
//byte[] newkey = hash.digest();
//System.arraycopy(newkey, 0, key, 0, Math.min(key.length, newkey.length));
hash.doFinal(key, 0);
byte[] newkey = hash.digest();
System.arraycopy(newkey, 0, key, 0, Math.min(key.length, newkey.length));
//hash.doFinal(key, 0);
resetKey();
incrementCounter();
seeded = true;

View File

@@ -14,8 +14,8 @@ package net.i2p;
*
*/
public class CoreVersion {
public final static String ID = "$Revision: 1.53 $ $Date: 2006/02/16 15:44:09 $";
public final static String VERSION = "0.6.1.11";
public final static String ID = "$Revision: 1.60 $ $Date: 2006/04/23 16:06:13 $";
public final static String VERSION = "0.6.1.18";
public static void main(String args[]) {
System.out.println("I2P Core version: " + VERSION);

View File

@@ -11,11 +11,10 @@ import net.i2p.crypto.CryptixAESEngine;
import net.i2p.crypto.DSAEngine;
import net.i2p.crypto.DummyDSAEngine;
import net.i2p.crypto.DummyElGamalEngine;
import net.i2p.crypto.DummyHMACSHA256Generator;
import net.i2p.crypto.DummyPooledRandomSource;
import net.i2p.crypto.ElGamalAESEngine;
import net.i2p.crypto.ElGamalEngine;
import net.i2p.crypto.HMACSHA256Generator;
import net.i2p.crypto.HMACGenerator;
import net.i2p.crypto.KeyGenerator;
import net.i2p.crypto.PersistentSessionKeyManager;
import net.i2p.crypto.SHA256Generator;
@@ -67,7 +66,7 @@ public class I2PAppContext {
private ElGamalAESEngine _elGamalAESEngine;
private AESEngine _AESEngine;
private LogManager _logManager;
private HMACSHA256Generator _hmac;
private HMACGenerator _hmac;
private SHA256Generator _sha;
private Clock _clock;
private DSAEngine _dsa;
@@ -342,17 +341,14 @@ public class I2PAppContext {
* other than for consistency, and perhaps later we'll want to
* include some stats.
*/
public HMACSHA256Generator hmac() {
public HMACGenerator hmac() {
if (!_hmacInitialized) initializeHMAC();
return _hmac;
}
private void initializeHMAC() {
synchronized (this) {
if (_hmac == null) {
if ("true".equals(getProperty("i2p.fakeHMAC", "false")))
_hmac = new DummyHMACSHA256Generator(this);
else
_hmac= new HMACSHA256Generator(this);
_hmac= new HMACGenerator(this);
}
_hmacInitialized = true;
}

View File

@@ -1,52 +0,0 @@
package net.i2p.crypto;
import java.util.Arrays;
import java.util.ArrayList;
import java.util.List;
import net.i2p.I2PAppContext;
import net.i2p.data.DataHelper;
import net.i2p.data.Hash;
import net.i2p.data.SessionKey;
/**
* Calculate the HMAC-SHA256 of a key+message. All the good stuff occurs
* in {@link org.bouncycastle.crypto.macs.HMac} and
* {@link org.bouncycastle.crypto.digests.SHA256Digest}.
*
*/
public class DummyHMACSHA256Generator extends HMACSHA256Generator {
private I2PAppContext _context;
public DummyHMACSHA256Generator(I2PAppContext context) {
super(context);
_context = context;
}
public static HMACSHA256Generator getInstance() {
return I2PAppContext.getGlobalContext().hmac();
}
/**
* Calculate the HMAC of the data with the given key
*/
public Hash calculate(SessionKey key, byte data[]) {
if ((key == null) || (key.getData() == null) || (data == null))
throw new NullPointerException("Null arguments for HMAC");
return calculate(key, data, 0, data.length);
}
/**
* Calculate the HMAC of the data with the given key
*/
public Hash calculate(SessionKey key, byte data[], int offset, int length) {
if ((key == null) || (key.getData() == null) || (data == null))
throw new NullPointerException("Null arguments for HMAC");
byte rv[] = new byte[Hash.HASH_LENGTH];
System.arraycopy(key.getData(), 0, rv, 0, Hash.HASH_LENGTH);
if (Hash.HASH_LENGTH >= length)
DataHelper.xor(data, offset, rv, 0, rv, 0, length);
else
DataHelper.xor(data, offset, rv, 0, rv, 0, Hash.HASH_LENGTH);
return new Hash(rv);
}
}

View File

@@ -8,46 +8,26 @@ import net.i2p.data.DataHelper;
import net.i2p.data.Hash;
import net.i2p.data.SessionKey;
import org.bouncycastle.crypto.digests.SHA256Digest;
import org.bouncycastle.crypto.digests.MD5Digest;
import org.bouncycastle.crypto.macs.HMac;
/**
* Calculate the HMAC-SHA256 of a key+message. All the good stuff occurs
* Calculate the HMAC-MD5 of a key+message. All the good stuff occurs
* in {@link org.bouncycastle.crypto.macs.HMac} and
* {@link org.bouncycastle.crypto.digests.SHA256Digest}. Alternately, if
* the context property "i2p.HMACMD5" is set to true, then this whole HMAC
* generator will be transformed into HMACMD5, maintaining the same size and
* using {@link org.bouncycastle.crypto.digests.MD5Digest}.
* {@link org.bouncycastle.crypto.digests.MD5Digest}.
*
*/
public class HMACSHA256Generator {
public class HMACGenerator {
private I2PAppContext _context;
/** set of available HMAC instances for calculate */
private List _available;
/** set of available byte[] buffers for verify */
private List _availableTmp;
private boolean _useMD5;
private int _macSize;
public static final boolean DEFAULT_USE_MD5 = true;
public HMACSHA256Generator(I2PAppContext context) {
public HMACGenerator(I2PAppContext context) {
_context = context;
_available = new ArrayList(32);
_availableTmp = new ArrayList(32);
if ("true".equals(context.getProperty("i2p.HMACMD5", Boolean.toString(DEFAULT_USE_MD5).toLowerCase())))
_useMD5 = true;
else
_useMD5 = false;
if ("true".equals(context.getProperty("i2p.HMACBrokenSize", "false")))
_macSize = 32;
else
_macSize = (_useMD5 ? 16 : 32);
}
public static HMACSHA256Generator getInstance() {
return I2PAppContext.getGlobalContext().hmac();
}
/**
@@ -61,24 +41,6 @@ public class HMACSHA256Generator {
return new Hash(rv);
}
/**
* Calculate the HMAC of the data with the given key
*/
/*
public Hash calculate(SessionKey key, byte data[], int offset, int length) {
if ((key == null) || (key.getData() == null) || (data == null))
throw new NullPointerException("Null arguments for HMAC");
HMac mac = acquire();
mac.init(key.getData());
mac.update(data, offset, length);
byte rv[] = new byte[Hash.HASH_LENGTH];
mac.doFinal(rv, 0);
release(mac);
return new Hash(rv);
}
*/
/**
* Calculate the HMAC of the data with the given key
*/
@@ -131,10 +93,7 @@ public class HMACSHA256Generator {
// the HMAC is hardcoded to use SHA256 digest size
// for backwards compatability. next time we have a backwards
// incompatible change, we should update this by removing ", 32"
if (_useMD5)
return new HMac(new MD5Digest(), 32);
else
return new HMac(new SHA256Digest(), 32);
return new HMac(new MD5Digest(), 32);
}
private void release(HMac mac) {
synchronized (_available) {

View File

@@ -7,17 +7,19 @@ import net.i2p.I2PAppContext;
import net.i2p.data.Base64;
import net.i2p.data.Hash;
import org.bouncycastle.crypto.digests.SHA256Digest;
import gnu.crypto.hash.Sha256Standalone;
/**
* Defines a wrapper for SHA-256 operation. All the good stuff occurs
* in the Bouncycastle {@link org.bouncycastle.crypto.digests.SHA256Digest}
* in the GNU-Crypto {@link gnu.crypto.hash.Sha256Standalone}
*
*/
public final class SHA256Generator {
private List _digests;
private List _digestsGnu;
public SHA256Generator(I2PAppContext context) {
_digests = new ArrayList(32);
_digestsGnu = new ArrayList(32);
}
public static final SHA256Generator getInstance() {
@@ -32,47 +34,44 @@ public final class SHA256Generator {
return calculateHash(source, 0, source.length);
}
public final Hash calculateHash(byte[] source, int start, int len) {
byte rv[] = new byte[Hash.HASH_LENGTH];
calculateHash(source, start, len, rv, 0);
Sha256Standalone digest = acquireGnu();
digest.update(source, start, len);
byte rv[] = digest.digest();
releaseGnu(digest);
return new Hash(rv);
}
public final void calculateHash(byte[] source, int start, int len, byte out[], int outOffset) {
SHA256Digest digest = acquire();
Sha256Standalone digest = acquireGnu();
digest.update(source, start, len);
digest.doFinal(out, outOffset);
release(digest);
byte rv[] = digest.digest();
releaseGnu(digest);
System.arraycopy(rv, 0, out, outOffset, rv.length);
}
private SHA256Digest acquire() {
SHA256Digest rv = null;
synchronized (_digests) {
if (_digests.size() > 0)
rv = (SHA256Digest)_digests.remove(0);
private Sha256Standalone acquireGnu() {
Sha256Standalone rv = null;
synchronized (_digestsGnu) {
if (_digestsGnu.size() > 0)
rv = (Sha256Standalone)_digestsGnu.remove(0);
}
if (rv != null)
rv.reset();
else
rv = new SHA256Digest();
rv = new Sha256Standalone();
return rv;
}
private void release(SHA256Digest digest) {
synchronized (_digests) {
if (_digests.size() < 32) {
_digests.add(digest);
private void releaseGnu(Sha256Standalone digest) {
synchronized (_digestsGnu) {
if (_digestsGnu.size() < 32) {
_digestsGnu.add(digest);
}
}
}
public static void main(String args[]) {
I2PAppContext ctx = I2PAppContext.getGlobalContext();
byte orig[] = new byte[4096];
ctx.random().nextBytes(orig);
Hash old = ctx.sha().calculateHash(orig);
SHA256Digest d = new SHA256Digest();
d.update(orig, 0, orig.length);
byte out[] = new byte[Hash.HASH_LENGTH];
d.doFinal(out, 0);
System.out.println("eq? " + net.i2p.data.DataHelper.eq(out, old.getData()));
for (int i = 0; i < args.length; i++)
System.out.println("SHA256 [" + args[i] + "] = [" + Base64.encode(ctx.sha().calculateHash(args[i].getBytes()).getData()) + "]");
}

View File

@@ -29,6 +29,8 @@ public class BufferedStatLog implements StatLog {
private String _lastFilters;
private BufferedWriter _out;
private String _outFile;
/** short circuit for adding data, set to true if some filters are set, false if its empty (so we can skip the sync) */
private volatile boolean _filtersSpecified;
private static final int BUFFER_SIZE = 1024;
private static final boolean DISABLE_LOGGING = false;
@@ -44,6 +46,7 @@ public class BufferedStatLog implements StatLog {
_lastWrite = _events.length-1;
_statFilters = new ArrayList(10);
_flushFrequency = 500;
_filtersSpecified = false;
I2PThread writer = new I2PThread(new StatLogWriter(), "StatLogWriter");
writer.setDaemon(true);
writer.start();
@@ -51,6 +54,7 @@ public class BufferedStatLog implements StatLog {
public void addData(String scope, String stat, long value, long duration) {
if (DISABLE_LOGGING) return;
if (!shouldLog(stat)) return;
synchronized (_events) {
_events[_eventNext].init(scope, stat, value, duration);
_eventNext = (_eventNext + 1) % _events.length;
@@ -72,6 +76,7 @@ public class BufferedStatLog implements StatLog {
}
private boolean shouldLog(String stat) {
if (!_filtersSpecified) return false;
synchronized (_statFilters) {
return _statFilters.contains(stat) || _statFilters.contains("*");
}
@@ -88,11 +93,18 @@ public class BufferedStatLog implements StatLog {
_statFilters.clear();
while (tok.hasMoreTokens())
_statFilters.add(tok.nextToken().trim());
if (_statFilters.size() > 0)
_filtersSpecified = true;
else
_filtersSpecified = false;
}
}
_lastFilters = val;
} else {
synchronized (_statFilters) { _statFilters.clear(); }
synchronized (_statFilters) {
_statFilters.clear();
_filtersSpecified = false;
}
}
String filename = _context.getProperty(StatManager.PROP_STAT_FILE);
@@ -146,7 +158,7 @@ public class BufferedStatLog implements StatLog {
updateFilters();
int cur = start;
while (cur != end) {
if (shouldLog(_events[cur].getStat())) {
//if (shouldLog(_events[cur].getStat())) {
String when = null;
synchronized (_fmt) {
when = _fmt.format(new Date(_events[cur].getTime()));
@@ -164,7 +176,7 @@ public class BufferedStatLog implements StatLog {
_out.write(" ");
_out.write(Long.toString(_events[cur].getDuration()));
_out.write("\n");
}
//}
cur = (cur + 1) % _events.length;
}
_out.flush();

View File

@@ -26,6 +26,8 @@ public class Rate {
private volatile double _lifetimeTotalValue;
private volatile long _lifetimeEventCount;
private volatile long _lifetimeTotalEventTime;
private RateSummaryListener _summaryListener;
private RateStat _stat;
private volatile long _lastCoalesceDate;
private long _creationDate;
@@ -108,6 +110,9 @@ public class Rate {
public long getPeriod() {
return _period;
}
public RateStat getRateStat() { return _stat; }
public void setRateStat(RateStat rs) { _stat = rs; }
/**
*
@@ -175,22 +180,26 @@ public class Rate {
}
}
/** 2s is plenty of slack to deal with slow coalescing (across many stats) */
private static final int SLACK = 2000;
public void coalesce() {
long now = now();
synchronized (_lock) {
long measuredPeriod = now - _lastCoalesceDate;
if (measuredPeriod < _period) {
// no need to coalesce
if (measuredPeriod < _period - SLACK) {
// no need to coalesce (assuming we only try to do so once per minute)
if (_log.shouldLog(Log.WARN))
_log.warn("not coalescing, measuredPeriod = " + measuredPeriod + " period = " + _period);
return;
}
// ok ok, lets coalesce
// how much were we off by? (so that we can sample down the measured values)
double periodFactor = measuredPeriod / _period;
_lastTotalValue = (_currentTotalValue == 0 ? 0.0D : _currentTotalValue / periodFactor);
_lastEventCount = (_currentEventCount == 0 ? 0L : (long) (_currentEventCount / periodFactor));
_lastTotalEventTime = (_currentTotalEventTime == 0 ? 0L : (long) (_currentTotalEventTime / periodFactor));
double periodFactor = measuredPeriod / (double)_period;
_lastTotalValue = _currentTotalValue / periodFactor;
_lastEventCount = (long) ( (_currentEventCount + periodFactor - 1) / periodFactor);
_lastTotalEventTime = (long) (_currentTotalEventTime / periodFactor);
_lastCoalesceDate = now;
if (_lastTotalValue > _extremeTotalValue) {
@@ -203,8 +212,13 @@ public class Rate {
_currentEventCount = 0;
_currentTotalEventTime = 0;
}
if (_summaryListener != null)
_summaryListener.add(_lastTotalValue, _lastEventCount, _lastTotalEventTime, _period);
}
public void setSummaryListener(RateSummaryListener listener) { _summaryListener = listener; }
public RateSummaryListener getSummaryListener() { return _summaryListener; }
/** what was the average value across the events in the last period? */
public double getAverageValue() {
if ((_lastTotalValue != 0) && (_lastEventCount > 0))
@@ -237,10 +251,12 @@ public class Rate {
*/
public double getLastEventSaturation() {
if ((_lastEventCount > 0) && (_lastTotalEventTime > 0)) {
double eventTime = (double) _lastTotalEventTime / (double) _lastEventCount;
/*double eventTime = (double) _lastTotalEventTime / (double) _lastEventCount;
double maxEvents = _period / eventTime;
double saturation = _lastEventCount / maxEvents;
return saturation;
*/
return ((double)_lastTotalEventTime) / (double)_period;
}
return 0.0D;
@@ -417,6 +433,7 @@ public class Rate {
public boolean equals(Object obj) {
if ((obj == null) || (obj.getClass() != Rate.class)) return false;
if (obj == this) return true;
Rate r = (Rate) obj;
return _period == r.getPeriod() && _creationDate == r.getCreationDate() &&
//_lastCoalesceDate == r.getLastCoalesceDate() &&

View File

@@ -27,8 +27,10 @@ public class RateStat {
_description = description;
_groupName = group;
_rates = new Rate[periods.length];
for (int i = 0; i < periods.length; i++)
for (int i = 0; i < periods.length; i++) {
_rates[i] = new Rate(periods[i]);
_rates[i].setRateStat(this);
}
}
public void setStatLog(StatLog sl) { _statLog = sl; }
@@ -159,6 +161,7 @@ public class RateStat {
_rates[i].load(props, curPrefix, treatAsCurrent);
} catch (IllegalArgumentException iae) {
_rates[i] = new Rate(period);
_rates[i].setRateStat(this);
if (_log.shouldLog(Log.WARN))
_log.warn("Rate for " + prefix + " is corrupt, reinitializing that period");
}

View File

@@ -0,0 +1,14 @@
package net.i2p.stat;
/**
* Receive the state of the rate when its coallesced
*/
public interface RateSummaryListener {
/**
* @param totalValue sum of all event values in the most recent period
* @param eventCount how many events occurred
* @param totalEventTime how long the events were running for
* @param period how long this period is
*/
void add(double totalValue, long eventCount, double totalEventTime, long period);
}

View File

@@ -146,15 +146,15 @@ public class DecayingBloomFilter {
for (int i = 0; i < _extenders.length; i++)
DataHelper.xor(entry, offset, _extenders[i], 0, _extended, _entryBytes * (i+1), _entryBytes);
boolean seen = _current.member(_extended);
seen = seen || _previous.member(_extended);
boolean seen = _current.locked_member(_extended);
seen = seen || _previous.locked_member(_extended);
if (seen) {
_currentDuplicates++;
return true;
} else {
if (addIfNew) {
_current.insert(_extended);
_previous.insert(_extended);
_current.locked_insert(_extended);
_previous.locked_insert(_extended);
}
return false;
}

View File

@@ -157,11 +157,16 @@ public class FortunaRandomSource extends RandomSource implements EntropyHarveste
* through 2^numBits-1
*/
protected synchronized int nextBits(int numBits) {
int rv = 0;
long rv = 0;
int bytes = (numBits + 7) / 8;
for (int i = 0; i < bytes; i++)
rv += ((_fortuna.nextByte() & 0xFF) << i*8);
return rv;
//rv >>>= (64-numBits);
if (rv < 0)
rv = 0 - rv;
int off = 8*bytes - numBits;
rv >>>= off;
return (int)rv;
}
public EntropyHarvester harvester() { return this; }

View File

@@ -1,292 +0,0 @@
package org.bouncycastle.crypto.digests;
/*
* Copyright (c) 2000 - 2004 The Legion Of The Bouncy Castle
* (http://www.bouncycastle.org)
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated
* documentation files (the "Software"), to deal in the Software
* without restriction, including without limitation the rights to
* use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following
* conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
* OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
* HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
* WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
*/
/**
* FIPS 180-2 implementation of SHA-256.
*
* <pre>
* block word digest
* SHA-1 512 32 160
* SHA-256 512 32 256
* SHA-384 1024 64 384
* SHA-512 1024 64 512
* </pre>
*/
public class SHA256Digest
extends GeneralDigest
{
private static final int DIGEST_LENGTH = 32;
private int H1, H2, H3, H4, H5, H6, H7, H8;
private int[] X = new int[64];
private int xOff;
/**
* Standard constructor
*/
public SHA256Digest()
{
reset();
}
/**
* Copy constructor. This will copy the state of the provided
* message digest.
*/
public SHA256Digest(SHA256Digest t)
{
super(t);
H1 = t.H1;
H2 = t.H2;
H3 = t.H3;
H4 = t.H4;
H5 = t.H5;
H6 = t.H6;
H7 = t.H7;
H8 = t.H8;
System.arraycopy(t.X, 0, X, 0, t.X.length);
xOff = t.xOff;
}
public String getAlgorithmName()
{
return "SHA-256";
}
public int getDigestSize()
{
return DIGEST_LENGTH;
}
protected void processWord(
byte[] in,
int inOff)
{
X[xOff++] = ((in[inOff] & 0xff) << 24) | ((in[inOff + 1] & 0xff) << 16)
| ((in[inOff + 2] & 0xff) << 8) | ((in[inOff + 3] & 0xff));
if (xOff == 16)
{
processBlock();
}
}
private void unpackWord(
int word,
byte[] out,
int outOff)
{
out[outOff] = (byte)(word >>> 24);
out[outOff + 1] = (byte)(word >>> 16);
out[outOff + 2] = (byte)(word >>> 8);
out[outOff + 3] = (byte)word;
}
protected void processLength(
long bitLength)
{
if (xOff > 14)
{
processBlock();
}
X[14] = (int)(bitLength >>> 32);
X[15] = (int)(bitLength & 0xffffffff);
}
public int doFinal(
byte[] out,
int outOff)
{
finish();
unpackWord(H1, out, outOff);
unpackWord(H2, out, outOff + 4);
unpackWord(H3, out, outOff + 8);
unpackWord(H4, out, outOff + 12);
unpackWord(H5, out, outOff + 16);
unpackWord(H6, out, outOff + 20);
unpackWord(H7, out, outOff + 24);
unpackWord(H8, out, outOff + 28);
reset();
return DIGEST_LENGTH;
}
/**
* reset the chaining variables
*/
public void reset()
{
super.reset();
/* SHA-256 initial hash value
* The first 32 bits of the fractional parts of the square roots
* of the first eight prime numbers
*/
H1 = 0x6a09e667;
H2 = 0xbb67ae85;
H3 = 0x3c6ef372;
H4 = 0xa54ff53a;
H5 = 0x510e527f;
H6 = 0x9b05688c;
H7 = 0x1f83d9ab;
H8 = 0x5be0cd19;
xOff = 0;
for (int i = 0; i != X.length; i++)
{
X[i] = 0;
}
}
protected void processBlock()
{
//
// expand 16 word block into 64 word blocks.
//
for (int t = 16; t <= 63; t++)
{
X[t] = Theta1(X[t - 2]) + X[t - 7] + Theta0(X[t - 15]) + X[t - 16];
}
//
// set up working variables.
//
int a = H1;
int b = H2;
int c = H3;
int d = H4;
int e = H5;
int f = H6;
int g = H7;
int h = H8;
for (int t = 0; t <= 63; t++)
{
int T1, T2;
T1 = h + Sum1(e) + Ch(e, f, g) + K[t] + X[t];
T2 = Sum0(a) + Maj(a, b, c);
h = g;
g = f;
f = e;
e = d + T1;
d = c;
c = b;
b = a;
a = T1 + T2;
}
H1 += a;
H2 += b;
H3 += c;
H4 += d;
H5 += e;
H6 += f;
H7 += g;
H8 += h;
//
// reset the offset and clean out the word buffer.
//
xOff = 0;
for (int i = 0; i != X.length; i++)
{
X[i] = 0;
}
}
private int rotateRight(
int x,
int n)
{
return (x >>> n) | (x << (32 - n));
}
/* SHA-256 functions */
private int Ch(
int x,
int y,
int z)
{
return ((x & y) ^ ((~x) & z));
}
private int Maj(
int x,
int y,
int z)
{
return ((x & y) ^ (x & z) ^ (y & z));
}
private int Sum0(
int x)
{
return rotateRight(x, 2) ^ rotateRight(x, 13) ^ rotateRight(x, 22);
}
private int Sum1(
int x)
{
return rotateRight(x, 6) ^ rotateRight(x, 11) ^ rotateRight(x, 25);
}
private int Theta0(
int x)
{
return rotateRight(x, 7) ^ rotateRight(x, 18) ^ (x >>> 3);
}
private int Theta1(
int x)
{
return rotateRight(x, 17) ^ rotateRight(x, 19) ^ (x >>> 10);
}
/* SHA-256 Constants
* (represent the first 32 bits of the fractional parts of the
* cube roots of the first sixty-four prime numbers)
*/
static final int K[] = {
0x428a2f98, 0x71374491, 0xb5c0fbcf, 0xe9b5dba5, 0x3956c25b, 0x59f111f1, 0x923f82a4, 0xab1c5ed5,
0xd807aa98, 0x12835b01, 0x243185be, 0x550c7dc3, 0x72be5d74, 0x80deb1fe, 0x9bdc06a7, 0xc19bf174,
0xe49b69c1, 0xefbe4786, 0x0fc19dc6, 0x240ca1cc, 0x2de92c6f, 0x4a7484aa, 0x5cb0a9dc, 0x76f988da,
0x983e5152, 0xa831c66d, 0xb00327c8, 0xbf597fc7, 0xc6e00bf3, 0xd5a79147, 0x06ca6351, 0x14292967,
0x27b70a85, 0x2e1b2138, 0x4d2c6dfc, 0x53380d13, 0x650a7354, 0x766a0abb, 0x81c2c92e, 0x92722c85,
0xa2bfe8a1, 0xa81a664b, 0xc24b8b70, 0xc76c51a3, 0xd192e819, 0xd6990624, 0xf40e3585, 0x106aa070, 0x19a4c116, 0x1e376c08, 0x2748774c, 0x34b0bcb5, 0x391c0cb3, 0x4ed8aa4a, 0x5b9cca4f, 0x682e6ff3,
0x748f82ee, 0x78a5636f, 0x84c87814, 0x8cc70208, 0x90befffa, 0xa4506ceb, 0xbef9a3f7, 0xc67178f2
};
}

View File

@@ -1,4 +1,324 @@
$Id: history.txt,v 1.413 2006/02/21 08:31:25 jrandom Exp $
$Id: history.txt,v 1.471 2006/05/07 22:19:46 complication Exp $
* 2006-05-09 0.6.1.18 released
2006-05-09 jrandom
* Further tunnel creation timeout revamp
2006-05-07 Complication
* Fix problem whereby repeated calls to allowed() would make
the 1-tunnel exception permit more than one concurrent build
2006-05-06 jrandom
* Readjust the tunnel creation timeouts to reject less but fail earlier,
while tracking the extended timeout events.
2006-05-04 jrandom
* Short circuit a highly congested part of the stat logging unless its
required (may or may not help with a synchronization issue reported by
andreas)
2006-05-03 Complication
* Allow a single build attempt to proceed despite 1-minute overload
only if the 1-second rate shows enough spare bandwidth
(e.g. overload has already eased)
2006-05-02 Complication
* Correct a misnamed property in SummaryHelper.java
to avoid confusion
* Make the maximum allowance of our own concurrent
tunnel builds slightly adaptive: one concurrent build per 6 KB/s
within the fixed range 2..10
* While overloaded, try to avoid completely choking our own build attempts,
instead prefer limiting them to 1
2006-05-01 jrandom
* Adjust the tunnel build timeouts to cut down on expirations, and
increased the SSU connection establishment retransmission rate to
something less glacial.
* For the first 5 minutes of uptime, be less aggressive with tunnel
exploration, opting for more reliable peers to start with.
2006-05-01 jrandom
* Fix for a netDb lookup race (thanks cervantes!)
2006-04-27 jrandom
* Avoid a race in the message reply registry (thanks cervantes!)
2006-04-27 jrandom
* Fixed the tunnel expiration desync code (thanks Complication!)
* 2006-04-23 0.6.1.17 released
2006-04-19 jrandom
* Adjust how we pick high capacity peers to allow the inclusion of fast
peers (the previous filter assumed an old usage pattern)
* New set of stats to help track per-packet-type bandwidth usage better
* Cut out the proactive tail drop from the SSU transport, for now
* Reduce the frequency of tunnel build attempts while we're saturated
* Don't drop tunnel requests as easily - prefer to explicitly reject them
* 2006-04-15 0.6.1.16 released
2006-04-15 jrandom
* Adjust the proactive tunnel request dropping so we will reject what we
can instead of dropping so much (but still dropping if we get too far
overloaded)
2006-04-14 jrandom
* 0 isn't very random
* Adjust the tunnel drop to be more reasonable
2006-04-14 jrandom
* -28.00230115311259 is not between 0 and 1 in any universe I know.
* Made the bw-related tunnel join throttle much simpler
2006-04-14 jrandom
* Make some more stats graphable, and allow some internal tweaking on the
tunnel pairing for creation and testing.
* 2006-04-13 0.6.1.15 released
2006-04-12 jrandom
* Added a further failsafe against trying to queue up too many messages to
a peer.
2006-04-12 jrandom
* Watch out for failed syndie index fetches (thanks bar!)
2006-04-11 jrandom
* Throttling improvements on SSU - throttle all transmissions to a peer
when we are retransmitting, not just retransmissions. Also, if
we're already retransmitting to a peer, probabalistically tail drop new
messages targetting that peer, based on the estimated wait time before
transmission.
* Fixed the rounding error in the inbound tunnel drop probability.
2006-04-10 jrandom
* Include a combined send/receive graph (good idea cervantes!)
* Proactively drop inbound tunnel requests probabalistically as the
estimated queue time approaches our limit, rather than letting them all
through up to that limit.
2006-04-08 jrandom
* Stat summarization fix (removing the occational holes in the jrobin
graphs)
2006-04-08 jrandom
* Process inbound tunnel requests more efficiently
* Proactively drop inbound tunnel requests if the queue before we'd
process it in is too long (dynamically adjusted by cpu load)
* Adjust the tunnel rejection throttle to reject requeusts when we have to
proactively drop too many requests.
* Display the number of pending inbound tunnel join requests on the router
console (as the "handle backlog")
* Include a few more stats in the default set of graphs
2006-04-06 jrandom
* Fix for a bug in the new irc ping/pong filter (thanks Complication!)
2006-04-06 jrandom
* Fixed a typo in the reply cleanup code
* 2006-04-05 0.6.1.14 released
2006-04-05 jrandom
* Cut down on the time that we allow a tunnel creation request to sit by
without response, and reject tunnel creation requests that are lagged
locally. Also switch to a bounded FIFO instead of a LIFO
* Threading tweaks for the message handling (thanks bar!)
* Don't add addresses to syndie with blank names (thanks Complication!)
* Further ban clearance
2006-04-05 jrandom
* Fix during the ssu handshake to avoid an unnecessary failure on
packet retransmission (thanks ripple!)
* Fix during the SSU handshake to use the negotiated session key asap,
rather than using the intro key for more than we should (thanks ripple!)
* Fixes to the message reply registry (thanks Complication!)
* More comprehensive syndie banning (for repeated pushes)
* Publish the router's ballpark bandwidth limit (w/in a power of 2), for
testing purposes
* Put a floor back on the capacity threshold, so too many failing peers
won't cause us to pick very bad peers (unless we have very few good
ones)
* Bugfix to cut down on peers using introducers unneessarily (thanks
Complication!)
* Reduced the default streaming lib message size to fit into a single
tunnel message, rather than require 5 tunnel messages to be transferred
without loss before recomposition. This reduces throughput, but should
increase reliability, at least for the time being.
* Misc small bugfixes in the router (thanks all!)
* More tweaking for Syndie's CSS (thanks Doubtful Salmon!)
2006-04-01 jrandom
* Take out the router watchdog's teeth (don't restart on leaseset failure)
* Filter the IRC ping/pong messages, as some clients send unsafe
information in them (thanks aardvax and dust!)
2006-03-30 jrandom
* Substantially reduced the lock contention in the message registry (a
major hotspot that can choke most threads). Also reworked the locking
so we don't need per-message timer events
* No need to have additional per-peer message clearing, as they are
either unregistered individually or expired.
* Include some of the more transient tunnel throttling
* 2006-03-26 0.6.1.13 released
2006-03-25 jrandom
* Added a simple purge and ban of syndie authors, shown as the
"Purge and ban" button on the addressbook for authors that are already
on the ignore list. All of their entries and metadata are deleted from
the archive, and the are transparently filtered from any remote
syndication (so no user on the syndie instance will pull any new posts
from them)
* More strict tunnel join throtting when congested
2006-03-24 jrandom
* Try to desync tunnel building near startup (thanks Complication!)
* If we are highly congested, fall back on only querying the floodfill
netDb peers, and only storing to those peers too
* Cleaned up the floodfill-only queries
2006-03-21 jrandom
* Avoid a very strange (unconfirmed) bug that people using the systray's
browser picker dialog could cause by disabling the GUI-based browser
picker.
* Cut down on subsequent streaming lib reset packets transmitted
* Use a larger MTU more often
* Allow netDb searches to query shitlisted peers, as the queries are
indirect.
* Add an option to disable non-floodfill netDb searches (non-floodfill
searches are used by default, but can be disabled by adding
netDb.floodfillOnly=true to the advanced config)
2006-03-20 jrandom
* Fix to allow for some slack when coalescing stats
* Workaround some oddball errors
2006-03-18 jrandom
* Added a new graphs.jsp page to show all of the stats being harvested
2006-03-18 jrandom
* Made the netDb search load limitations a little less stringent
* Add support for specifying the number of periods to be plotted on the
graphs - e.g. to plot only the last hour of a stat that is averaged at
the 60 second period, add &periodCount=60
2006-03-17 jrandom
* Add support for graphing the event count as well as the average stat
value (done by adding &showEvents=true to the URL). Also supports
hiding the legend (&hideLegend=true), the grid (&hideGrid=true), and
the title (&hideTitle=true).
* Removed an unnecessary arbitrary filter on the profile organizer so we
can pick high capacity and fast peers more appropriately
2006-03-16 jrandom
* Integrate basic hooks for jrobin (http://jrobin.org) into the router
console. Selected stats can be harvested automatically and fed into
in-memory RRD databases, and those databases can be served up either as
PNG images or as RRDtool compatible XML dumps (see oldstats.jsp for
details). A base set of stats are harvested by default, but an
alternate list can be specified by setting the 'stat.summaries' list on
the advanced config. For instance:
stat.summaries=bw.recvRate.60000,bw.sendRate.60000
* HTML tweaking for the general config page (thanks void!)
* Odd NPE fix (thanks Complication!)
2006-03-15 Complication
* Trim out an old, inactive IP second-guessing method
(thanks for spotting, Anonymous!)
2006-03-15 jrandom
* Further stat cleanup
* Keep track of how many peers we are actively trying to communicate with,
beyond those who are just trying to communicate with us.
* Further router tunnel participation throttle revisions to avoid spurious
rejections
* Rate stat display cleanup (thanks ripple!)
* Don't even try to send messages that have been queued too long
2006-03-05 zzz
* Remove the +++--- from the logs on i2psnark startup
2006-03-05 jrandom
* HTML fixes in Syndie to work better with opera (thanks shaklen!)
* Give netDb lookups to floodfill peers more time, as they are much more
likely to succeed (thereby cutting down on the unnecessary netDb
searches outside the floodfill set)
* Fix to the SSU IP detection code so we won't use introducers when we
don't need them (thanks Complication!)
* Add a brief shitlist to i2psnark so it doesn't keep on trying to reach
peers given to it
* Don't let netDb searches wander across too many peers
* Don't use the 1s bandwidth usage in the tunnel participation throttle,
as its too volatile to have much meaning.
* Don't bork if a Syndie post is missing an entry.sml
2006-03-05 Complication
* Reduce exposed statistical information,
to make build and uptime tracking more expensive
2006-03-04 Complication
* Fix the announce URL of orion's tracker in Snark sources
2006-03-03 Complication
* Explicit check for an index out of bounds exception while parsing
an inbound IRC command (implicit check was there already)
2006-03-01 jrandom
* More aggressive tunnel throttling as we approach our bandwidth limit,
and throttle based off periods wider than 1 second.
* Included Doubtful Salmon's syndie stylings (thanks!)
2006-02-27 zzz
* Update error page templates to add \r, Connection: close, and
Proxy-connection: close to headers.
* 2006-02-27 0.6.1.12 released
2006-02-27 jrandom
* Adjust the jbigi.jar to use the athlon-optimized jbigi on windows/amd64
machines, rather than the generic jbigi (until we have an athlon64
optimized version)
2006-02-26 jrandom
* Switch from the bouncycastle to the gnu-crypto implementation for
SHA256, as benchmarks show a 10-30% speedup.
* Removed some unnecessary object caches
* Don't close i2psnark streams prematurely
2006-02-25 jrandom
* Made the Syndie permalinks in the thread view point to the blog view
* Disabled TCP again (since the live net seems to be doing well w/out it)
* Fix the message time on inbound SSU establishment (thanks zzz!)
* Don't be so aggressive with parallel tunnel creation when a tunnel pool
just starts up
2006-02-24 jrandom
* Rounding calculation cleanup in the stats, and avoid an uncontested
mutex (thanks ripple!)
* SSU handshake cleanup to help force incompatible peers to stop nagging
us by both not giving them an updated reference to us and by dropping
future handshake packets from them.
2006-02-23 jrandom
* Increase the SSU retransmit ceiling (for slow links)
* Estimate the sender's SSU MTU (to help see if we agree)
2006-02-22 jrandom
* Fix to properly profile tunnel joins (thanks Ragnarok, frosk, et al!)
* More aggressive poor-man's PMTU, allowing larger MTUs on less reliable
links
* Further class validator refactorings
2006-02-22 jrandom
* Handle a rare race under high bandwidth situations in the SSU transport
* Minor refactoring so we don't confuse sun's 1.6.0-b2 validator
2006-02-21 Complication
* Reactivate TCP tranport by default, in addition to re-allowing
* 2006-02-21 0.6.1.11 released

View File

@@ -1,5 +1,5 @@
<i2p.news date="$Date: 2006/02/16 15:44:07 $">
<i2p.release version="0.6.1.11" date="2006/02/21" minVersion="0.6"
<i2p.news date="$Date: 2006/04/15 02:58:12 $">
<i2p.release version="0.6.1.18" date="2006/05/09" minVersion="0.6"
anonurl="http://i2p/NF2RLVUxVulR3IqK0sGJR0dHQcGXAzwa6rEO4WAWYXOHw-DoZhKnlbf1nzHXwMEJoex5nFTyiNMqxJMWlY54cvU~UenZdkyQQeUSBZXyuSweflUXFqKN-y8xIoK2w9Ylq1k8IcrAFDsITyOzjUKoOPfVq34rKNDo7fYyis4kT5bAHy~2N1EVMs34pi2RFabATIOBk38Qhab57Umpa6yEoE~rbyR~suDRvD7gjBvBiIKFqhFueXsR2uSrPB-yzwAGofTXuklofK3DdKspciclTVzqbDjsk5UXfu2nTrC1agkhLyqlOfjhyqC~t1IXm-Vs2o7911k7KKLGjB4lmH508YJ7G9fLAUyjuB-wwwhejoWqvg7oWvqo4oIok8LG6ECR71C3dzCvIjY2QcrhoaazA9G4zcGMm6NKND-H4XY6tUWhpB~5GefB3YczOqMbHq4wi0O9MzBFrOJEOs3X4hwboKWANf7DT5PZKJZ5KorQPsYRSq0E3wSOsFCSsdVCKUGsAAAA/i2p/i2pupdate.sud"
publicurl="http://dev.i2p.net/i2p/i2pupdate.sud"
anonannouncement="http://i2p/NF2RLVUxVulR3IqK0sGJR0dHQcGXAzwa6rEO4WAWYXOHw-DoZhKnlbf1nzHXwMEJoex5nFTyiNMqxJMWlY54cvU~UenZdkyQQeUSBZXyuSweflUXFqKN-y8xIoK2w9Ylq1k8IcrAFDsITyOzjUKoOPfVq34rKNDo7fYyis4kT5bAHy~2N1EVMs34pi2RFabATIOBk38Qhab57Umpa6yEoE~rbyR~suDRvD7gjBvBiIKFqhFueXsR2uSrPB-yzwAGofTXuklofK3DdKspciclTVzqbDjsk5UXfu2nTrC1agkhLyqlOfjhyqC~t1IXm-Vs2o7911k7KKLGjB4lmH508YJ7G9fLAUyjuB-wwwhejoWqvg7oWvqo4oIok8LG6ECR71C3dzCvIjY2QcrhoaazA9G4zcGMm6NKND-H4XY6tUWhpB~5GefB3YczOqMbHq4wi0O9MzBFrOJEOs3X4hwboKWANf7DT5PZKJZ5KorQPsYRSq0E3wSOsFCSsdVCKUGsAAAA/pipermail/i2p/2005-September/000878.html"

View File

@@ -4,7 +4,7 @@
<info>
<appname>i2p</appname>
<appversion>0.6.1.11</appversion>
<appversion>0.6.1.18</appversion>
<authors>
<author name="I2P" email="support@i2p.net"/>
</authors>

View File

@@ -19,3 +19,7 @@ the libg++.so.5 dependency that has been a problem for a few linux distros.
On Feb 8, 2006, the libjbigi-linux-viac3.so was added to jbigi.jar after
being compiled by jrandom on linux/p4 (cross compiled to --host=viac3)
On Feb 27, 2006, jbigi-win-athlon.dll was copied to jbigi-win-athlon64.dll,
as it should offer amd64 users better performance than jbigi-win-none.dll
until we get a full amd64 build.

Binary file not shown.

Binary file not shown.

View File

@@ -1,7 +1,9 @@
HTTP/1.1 409 Conflict
Content-Type: text/html; charset=iso-8859-1
Cache-control: no-cache
HTTP/1.1 409 Conflict
Content-Type: text/html; charset=iso-8859-1
Cache-control: no-cache
Connection: close
Proxy-Connection: close
<html><head>
<title>Destination key conflict</title>
<style type='text/css'>

View File

@@ -1,7 +1,9 @@
HTTP/1.1 504 Gateway Timeout
Content-Type: text/html; charset=iso-8859-1
Cache-control: no-cache
HTTP/1.1 504 Gateway Timeout
Content-Type: text/html; charset=iso-8859-1
Cache-control: no-cache
Connection: close
Proxy-Connection: close
<html><head>
<title>Eepsite not reachable</title>
<style type='text/css'>

View File

@@ -1,7 +1,9 @@
HTTP/1.1 400 Destination Not Found
Content-Type: text/html; charset=iso-8859-1
Cache-control: no-cache
HTTP/1.1 400 Destination Not Found
Content-Type: text/html; charset=iso-8859-1
Cache-control: no-cache
Connection: close
Proxy-Connection: close
<html><head>
<title>Invalid eepsite destination</title>
<style type='text/css'>

View File

@@ -1,7 +1,9 @@
HTTP/1.1 404 Domain Not Found
Content-Type: text/html; charset=iso-8859-1
Cache-control: no-cache
HTTP/1.1 404 Domain Not Found
Content-Type: text/html; charset=iso-8859-1
Cache-control: no-cache
Connection: close
Proxy-Connection: close
<html><head>
<title>Eepsite unknown</title>
<style type='text/css'>

View File

@@ -1,7 +1,9 @@
HTTP/1.1 504 Gateway Timeout
Content-Type: text/html; charset=iso-8859-1
Cache-control: no-cache
HTTP/1.1 504 Gateway Timeout
Content-Type: text/html; charset=iso-8859-1
Cache-control: no-cache
Connection: close
Proxy-Connection: close
<html><head>
<title>Outproxy Not Found</title>
<style type='text/css'>

View File

@@ -1,5 +1,5 @@
<i2p.news date="$Date: 2006/02/16 15:44:07 $">
<i2p.release version="0.6.1.11" date="2006/02/21" minVersion="0.6"
<i2p.news date="$Date: 2006/05/02 21:11:06 $">
<i2p.release version="0.6.1.18" date="2006/05/09" minVersion="0.6"
anonurl="http://i2p/NF2RLVUxVulR3IqK0sGJR0dHQcGXAzwa6rEO4WAWYXOHw-DoZhKnlbf1nzHXwMEJoex5nFTyiNMqxJMWlY54cvU~UenZdkyQQeUSBZXyuSweflUXFqKN-y8xIoK2w9Ylq1k8IcrAFDsITyOzjUKoOPfVq34rKNDo7fYyis4kT5bAHy~2N1EVMs34pi2RFabATIOBk38Qhab57Umpa6yEoE~rbyR~suDRvD7gjBvBiIKFqhFueXsR2uSrPB-yzwAGofTXuklofK3DdKspciclTVzqbDjsk5UXfu2nTrC1agkhLyqlOfjhyqC~t1IXm-Vs2o7911k7KKLGjB4lmH508YJ7G9fLAUyjuB-wwwhejoWqvg7oWvqo4oIok8LG6ECR71C3dzCvIjY2QcrhoaazA9G4zcGMm6NKND-H4XY6tUWhpB~5GefB3YczOqMbHq4wi0O9MzBFrOJEOs3X4hwboKWANf7DT5PZKJZ5KorQPsYRSq0E3wSOsFCSsdVCKUGsAAAA/i2p/i2pupdate.sud"
publicurl="http://dev.i2p.net/i2p/i2pupdate.sud"
anonannouncement="http://i2p/NF2RLVUxVulR3IqK0sGJR0dHQcGXAzwa6rEO4WAWYXOHw-DoZhKnlbf1nzHXwMEJoex5nFTyiNMqxJMWlY54cvU~UenZdkyQQeUSBZXyuSweflUXFqKN-y8xIoK2w9Ylq1k8IcrAFDsITyOzjUKoOPfVq34rKNDo7fYyis4kT5bAHy~2N1EVMs34pi2RFabATIOBk38Qhab57Umpa6yEoE~rbyR~suDRvD7gjBvBiIKFqhFueXsR2uSrPB-yzwAGofTXuklofK3DdKspciclTVzqbDjsk5UXfu2nTrC1agkhLyqlOfjhyqC~t1IXm-Vs2o7911k7KKLGjB4lmH508YJ7G9fLAUyjuB-wwwhejoWqvg7oWvqo4oIok8LG6ECR71C3dzCvIjY2QcrhoaazA9G4zcGMm6NKND-H4XY6tUWhpB~5GefB3YczOqMbHq4wi0O9MzBFrOJEOs3X4hwboKWANf7DT5PZKJZ5KorQPsYRSq0E3wSOsFCSsdVCKUGsAAAA/pipermail/i2p/2005-September/000878.html"
@@ -10,13 +10,13 @@
anonlogs="http://i2p/Nf3ab-ZFkmI-LyMt7GjgT-jfvZ3zKDl0L96pmGQXF1B82W2Bfjf0n7~288vafocjFLnQnVcmZd~-p0-Oolfo9aW2Rm-AhyqxnxyLlPBqGxsJBXjPhm1JBT4Ia8FB-VXt0BuY0fMKdAfWwN61-tj4zIcQWRxv3DFquwEf035K~Ra4SWOqiuJgTRJu7~o~DzHVljVgWIzwf8Z84cz0X33pv-mdG~~y0Bsc2qJVnYwjjR178YMcRSmNE0FVMcs6f17c6zqhMw-11qjKpY~EJfHYCx4lBWF37CD0obbWqTNUIbL~78vxqZRT3dgAgnLixog9nqTO-0Rh~NpVUZnoUi7fNR~awW5U3Cf7rU7nNEKKobLue78hjvRcWn7upHUF45QqTDuaM3yZa7OsjbcH-I909DOub2Q0Dno6vIwuA7yrysccN1sbnkwZbKlf4T6~iDdhaSLJd97QCyPOlbyUfYy9QLNExlRqKgNVJcMJRrIual~Lb1CLbnzt0uvobM57UpqSAAAA/meeting141"
publiclogs="http://www.i2p.net/meeting141" />
&#149;
2006-02-16:
0.6.1.10 released with some major updates - it is <b>not</b> backwards compatible, so upgrading is essential.
2006-04-23: 0.6.1.17 <a href="http://dev.i2p/pipermail/i2p/2006-April/001282.html">released</a>
with multiple improvements. Upgrading should alleviate congestion and peer selection issues.
<br>
&#149;
2006-02-14:
<a href="http://dev.i2p/pipermail/i2p/2006-February/001260.html">status notes</a>
2006-05-02:
<a href="http://dev.i2p/pipermail/i2p/2006-May/001285.html">status notes</a>
and
<a href="http://www.i2p/meeting168">meeting log</a>
<a href="http://www.i2p/meeting178">meeting log</a>
<br>
</i2p.news>

View File

@@ -44,9 +44,9 @@ public class DatabaseLookupMessage extends I2NPMessageImpl {
}
public DatabaseLookupMessage(I2PAppContext context, boolean locallyCreated) {
super(context);
setSearchKey(null);
setFrom(null);
setDontIncludePeers(null);
//setSearchKey(null);
//setFrom(null);
//setDontIncludePeers(null);
context.statManager().createRateStat("router.throttleNetDbDoSSend", "How many netDb lookup messages we are sending during a period with a DoS detected", "Throttle", new long[] { 60*1000, 10*60*1000, 60*60*1000, 24*60*60*1000 });

View File

@@ -290,7 +290,11 @@ public abstract class I2NPMessageImpl extends DataStructureImpl implements I2NPM
public void readMessage(byte data[], int offset, int dataSize, int type, I2NPMessageHandler handler) throws I2NPMessageException, IOException {
// ignore the handler (overridden in subclasses if necessary
readMessage(data, offset, dataSize, type);
try {
readMessage(data, offset, dataSize, type);
} catch (IllegalArgumentException iae) {
throw new I2NPMessageException("Error reading the message", iae);
}
}
@@ -313,20 +317,24 @@ public abstract class I2NPMessageImpl extends DataStructureImpl implements I2NPM
return msg;
}
long expiration = DataHelper.fromLong(buffer, offset, 4) * 1000; // seconds
offset += 4;
int dataSize = len - 1 - 4;
try {
long expiration = DataHelper.fromLong(buffer, offset, 4) * 1000; // seconds
offset += 4;
int dataSize = len - 1 - 4;
msg.readMessage(buffer, offset, dataSize, type, handler);
msg.setMessageExpiration(expiration);
msg.read();
return msg;
} catch (IOException ioe) {
throw new I2NPMessageException("IO error reading raw message", ioe);
} catch (IllegalArgumentException iae) {
throw new I2NPMessageException("Corrupt message (negative expiration)", iae);
}
}
protected void verifyUnwritten() { if (_written) throw new RuntimeException("Already written"); }
protected void verifyUnwritten() {
if (_written) throw new IllegalStateException("Already written");
}
protected void written() { _written = true; }
protected void read() { _read = true; }

View File

@@ -29,6 +29,7 @@ public abstract class CommSystemFacade implements Service {
public Set createAddresses() { return new HashSet(); }
public int countActivePeers() { return 0; }
public int countActiveSendPeers() { return 0; }
public List getMostRecentErrorMessages() { return Collections.EMPTY_LIST; }
/**

View File

@@ -26,7 +26,7 @@ class JobQueueRunner implements Runnable {
_context.statManager().createRateStat("jobQueue.jobRun", "How long jobs take", "JobQueue", new long[] { 60*1000l, 60*60*1000l, 24*60*60*1000l });
_context.statManager().createRateStat("jobQueue.jobRunSlow", "How long jobs that take over a second take", "JobQueue", new long[] { 10*60*1000l, 60*60*1000l, 24*60*60*1000l });
_context.statManager().createRateStat("jobQueue.jobLag", "How long jobs have to wait before running", "JobQueue", new long[] { 60*1000l, 60*60*1000l, 24*60*60*1000l });
_context.statManager().createRateStat("jobQueue.jobWait", "How long does a job sat on the job queue?", "JobQueue", new long[] { 60*1000l, 60*60*1000l, 24*60*60*1000l });
_context.statManager().createRateStat("jobQueue.jobWait", "How long does a job sit on the job queue?", "JobQueue", new long[] { 60*1000l, 60*60*1000l, 24*60*60*1000l });
_context.statManager().createRateStat("jobQueue.jobRunnerInactive", "How long are runners inactive?", "JobQueue", new long[] { 60*1000l, 60*60*1000l, 24*60*60*1000l });
_state = 1;
}

View File

@@ -483,10 +483,6 @@ public class LoadTestManager {
// length == #hops+1 (as it includes the creator)
if (cfg.getLength() < 2)
return false;
// only load test the client tunnels
// XXX why?
////if (cfg.getTunnel().getDestination() == null)
//// return false;
_active.add(cfg);
return true;
} else {
@@ -496,18 +492,26 @@ public class LoadTestManager {
private boolean bandwidthOverloaded() {
int msgLoadBps = CONCURRENT_MESSAGES
* 5 // message size
* 5 * 1024 // message size
/ 10; // 10 seconds before timeout & retransmission
msgLoadBps *= 2; // buffer
if (_context.bandwidthLimiter().getSendBps()/1024d + (double)msgLoadBps >= _context.bandwidthLimiter().getOutboundKBytesPerSecond())
int curBps = getBps();
if ((curBps + msgLoadBps)/1024 >= _context.bandwidthLimiter().getOutboundKBytesPerSecond())
return true;
if (_context.bandwidthLimiter().getReceiveBps()/1024d + (double)msgLoadBps >= _context.bandwidthLimiter().getInboundKBytesPerSecond())
if ((curBps + msgLoadBps)/1024 >= _context.bandwidthLimiter().getInboundKBytesPerSecond())
return true;
if (_context.throttle().getMessageDelay() > 1000)
return true;
return false;
}
private int getBps() {
int used1s = _context.router().get1sRate();
int used1m = _context.router().get1mRate();
int used5m = _context.router().get5mRate();
return Math.max(used1s, Math.max(used1m, used5m));
}
private class CreatedJob extends JobImpl {
private LoadTestTunnelConfig _cfg;
public CreatedJob(RouterContext ctx, LoadTestTunnelConfig cfg) {

View File

@@ -35,8 +35,10 @@ import net.i2p.router.message.GarlicMessageHandler;
//import net.i2p.router.message.TunnelMessageHandler;
import net.i2p.router.networkdb.kademlia.FloodfillNetworkDatabaseFacade;
import net.i2p.router.startup.StartupJob;
import net.i2p.router.transport.FIFOBandwidthLimiter;
import net.i2p.stat.Rate;
import net.i2p.stat.RateStat;
import net.i2p.stat.StatManager;
import net.i2p.util.FileUtil;
import net.i2p.util.I2PThread;
import net.i2p.util.SimpleTimer;
@@ -153,7 +155,7 @@ public class Router {
shutdown(EXIT_OOM);
}
};
_shutdownHook = new ShutdownHook();
_shutdownHook = new ShutdownHook(_context);
_gracefulShutdownDetector = new I2PThread(new GracefulShutdown());
_gracefulShutdownDetector.setDaemon(true);
_gracefulShutdownDetector.setName("Graceful shutdown hook");
@@ -210,7 +212,7 @@ public class Router {
public void setRouterInfo(RouterInfo info) {
_routerInfo = info;
if (info != null)
_context.jobQueue().addJob(new PersistRouterInfoJob());
_context.jobQueue().addJob(new PersistRouterInfoJob(_context));
}
/**
@@ -245,8 +247,9 @@ public class Router {
_context.tunnelDispatcher().startup();
_context.inNetMessagePool().startup();
startupQueue();
_context.jobQueue().addJob(new CoalesceStatsJob());
_context.jobQueue().addJob(new UpdateRoutingKeyModifierJob());
//_context.jobQueue().addJob(new CoalesceStatsJob(_context));
SimpleTimer.getInstance().addEvent(new CoalesceStatsEvent(_context), 0);
_context.jobQueue().addJob(new UpdateRoutingKeyModifierJob(_context));
warmupCrypto();
_sessionKeyPersistenceHelper.startup();
//_context.adminManager().startup();
@@ -322,13 +325,8 @@ public class Router {
stats.setProperty(RouterInfo.PROP_NETWORK_ID, NETWORK_ID+"");
ri.setOptions(stats);
ri.setAddresses(_context.commSystem().createAddresses());
if (FloodfillNetworkDatabaseFacade.floodfillEnabled(_context))
ri.addCapability(FloodfillNetworkDatabaseFacade.CAPACITY_FLOODFILL);
if("true".equalsIgnoreCase(_context.getProperty(Router.PROP_HIDDEN, "false"))) {
ri.addCapability(RouterInfo.CAPABILITY_HIDDEN);
}
addReachabilityCapability(ri);
addCapabilities(ri);
SigningPrivateKey key = _context.keyManager().getSigningPrivateKey();
if (key == null) {
_log.log(Log.CRIT, "Internal error - signing private key not known? wtf");
@@ -357,15 +355,43 @@ public class Router {
}
}
// publicize our ballpark capacity - this does not affect anything at
// the moment
public static final char CAPABILITY_BW16 = 'K';
public static final char CAPABILITY_BW32 = 'L';
public static final char CAPABILITY_BW64 = 'M';
public static final char CAPABILITY_BW128 = 'N';
public static final char CAPABILITY_BW256 = 'O';
public static final char CAPABILITY_REACHABLE = 'R';
public static final char CAPABILITY_UNREACHABLE = 'U';
public static final String PROP_FORCE_UNREACHABLE = "router.forceUnreachable";
public static final char CAPABILITY_NEW_TUNNEL = 'T';
public void addReachabilityCapability(RouterInfo ri) {
// routers who can understand TunnelBuildMessages
////ri.addCapability(CAPABILITY_NEW_TUNNEL);
public void addCapabilities(RouterInfo ri) {
int bwLim = Math.min(_context.bandwidthLimiter().getInboundKBytesPerSecond(),
_context.bandwidthLimiter().getInboundKBytesPerSecond());
if (_log.shouldLog(Log.WARN))
_log.warn("Adding capabilities w/ bw limit @ " + bwLim, new Exception("caps"));
if (bwLim <= 16) {
ri.addCapability(CAPABILITY_BW16);
} else if (bwLim <= 32) {
ri.addCapability(CAPABILITY_BW32);
} else if (bwLim <= 64) {
ri.addCapability(CAPABILITY_BW64);
} else if (bwLim <= 128) {
ri.addCapability(CAPABILITY_BW128);
} else { // ok, more than 128KBps... aka "lots"
ri.addCapability(CAPABILITY_BW256);
}
if (FloodfillNetworkDatabaseFacade.floodfillEnabled(_context))
ri.addCapability(FloodfillNetworkDatabaseFacade.CAPACITY_FLOODFILL);
if("true".equalsIgnoreCase(_context.getProperty(Router.PROP_HIDDEN, "false")))
ri.addCapability(RouterInfo.CAPABILITY_HIDDEN);
String forceUnreachable = _context.getProperty(PROP_FORCE_UNREACHABLE);
if ( (forceUnreachable != null) && ("true".equalsIgnoreCase(forceUnreachable)) ) {
@@ -449,89 +475,6 @@ public class Router {
finalShutdown(EXIT_HARD_RESTART);
}
/**
* coalesce the stats framework every minute
*
*/
private final class CoalesceStatsJob extends JobImpl {
public CoalesceStatsJob() {
super(Router.this._context);
Router.this._context.statManager().createRateStat("bw.receiveBps", "How fast we receive data", "Bandwidth", new long[] { 60*1000, 5*60*1000, 60*60*1000 });
Router.this._context.statManager().createRateStat("bw.sendBps", "How fast we send data", "Bandwidth", new long[] { 60*1000, 5*60*1000, 60*60*1000 });
Router.this._context.statManager().createRateStat("router.activePeers", "How many peers we are actively talking with", "Throttle", new long[] { 5*60*1000, 60*60*1000 });
Router.this._context.statManager().createRateStat("router.highCapacityPeers", "How many high capacity peers we know", "Throttle", new long[] { 5*60*1000, 60*60*1000 });
Router.this._context.statManager().createRateStat("router.fastPeers", "How many fast peers we know", "Throttle", new long[] { 5*60*1000, 60*60*1000 });
}
public String getName() { return "Coalesce stats"; }
public void runJob() {
Router.this._context.statManager().coalesceStats();
RateStat receiveRate = _context.statManager().getRate("transport.receiveMessageSize");
if (receiveRate != null) {
Rate rate = receiveRate.getRate(60*1000);
if (rate != null) {
double bytes = rate.getLastTotalValue();
double bps = (bytes*1000.0d)/(rate.getPeriod()*1024.0d);
Router.this._context.statManager().addRateData("bw.receiveBps", (long)bps, 60*1000);
}
}
RateStat sendRate = _context.statManager().getRate("transport.sendMessageSize");
if (sendRate != null) {
Rate rate = sendRate.getRate(60*1000);
if (rate != null) {
double bytes = rate.getLastTotalValue();
double bps = (bytes*1000.0d)/(rate.getPeriod()*1024.0d);
Router.this._context.statManager().addRateData("bw.sendBps", (long)bps, 60*1000);
}
}
int active = Router.this._context.commSystem().countActivePeers();
Router.this._context.statManager().addRateData("router.activePeers", active, 60*1000);
int fast = Router.this._context.profileOrganizer().countFastPeers();
Router.this._context.statManager().addRateData("router.fastPeers", fast, 60*1000);
int highCap = Router.this._context.profileOrganizer().countHighCapacityPeers();
Router.this._context.statManager().addRateData("router.highCapacityPeers", highCap, 60*1000);
requeue(60*1000);
}
}
/**
* Update the routing Key modifier every day at midnight (plus on startup).
* This is done here because we want to make sure the key is updated before anyone
* uses it.
*/
private final class UpdateRoutingKeyModifierJob extends JobImpl {
private Calendar _cal = new GregorianCalendar(TimeZone.getTimeZone("GMT"));
public UpdateRoutingKeyModifierJob() { super(Router.this._context); }
public String getName() { return "Update Routing Key Modifier"; }
public void runJob() {
Router.this._context.routingKeyGenerator().generateDateBasedModData();
requeue(getTimeTillMidnight());
}
private long getTimeTillMidnight() {
long now = Router.this._context.clock().now();
_cal.setTime(new Date(now));
_cal.set(Calendar.YEAR, _cal.get(Calendar.YEAR)); // gcj <= 4.0 workaround
_cal.set(Calendar.DAY_OF_YEAR, _cal.get(Calendar.DAY_OF_YEAR)); // gcj <= 4.0 workaround
_cal.add(Calendar.DATE, 1);
_cal.set(Calendar.HOUR_OF_DAY, 0);
_cal.set(Calendar.MINUTE, 0);
_cal.set(Calendar.SECOND, 0);
_cal.set(Calendar.MILLISECOND, 0);
long then = _cal.getTime().getTime();
long howLong = then - now;
if (howLong < 0) // hi kaffe
howLong = 24*60*60*1000l + howLong;
if (_log.shouldLog(Log.DEBUG))
_log.debug("Time till midnight: " + howLong + "ms");
return howLong;
}
}
private void warmupCrypto() {
_context.random().nextBoolean();
new DHSessionKeyBuilder(); // load the class so it starts the precalc process
@@ -1060,7 +1003,7 @@ public class Router {
return _context.getProperty("router.pingFile", "router.ping");
}
private static final long LIVELINESS_DELAY = 60*1000;
static final long LIVELINESS_DELAY = 60*1000;
/**
* Start a thread that will periodically update the file "router.ping", but if
@@ -1082,84 +1025,263 @@ public class Router {
}
}
// not an I2PThread for context creation issues
Thread t = new Thread(new MarkLiveliness(f));
Thread t = new Thread(new MarkLiveliness(_context, this, f));
t.setName("Mark router liveliness");
t.setDaemon(true);
t.start();
return true;
}
private class MarkLiveliness implements Runnable {
private File _pingFile;
public MarkLiveliness(File f) {
_pingFile = f;
private static final String PROP_BANDWIDTH_SHARE_PERCENTAGE = "router.sharePercentage";
/**
* What fraction of the bandwidth specified in our bandwidth limits should
* we allow to be consumed by participating tunnels?
*
*/
public double getSharePercentage() {
RouterContext ctx = _context;
if (ctx == null) return 0;
String pct = ctx.getProperty(PROP_BANDWIDTH_SHARE_PERCENTAGE);
if (pct != null) {
try {
double d = Double.parseDouble(pct);
if (d > 1)
return d/100d; // *cough* sometimes its 80 instead of .8 (!stab jrandom)
else
return d;
} catch (NumberFormatException nfe) {
if (_log.shouldLog(Log.INFO))
_log.info("Unable to get the share percentage");
}
}
public void run() {
_pingFile.deleteOnExit();
do {
ping();
try { Thread.sleep(LIVELINESS_DELAY); } catch (InterruptedException ie) {}
} while (_isAlive);
_pingFile.delete();
return 0.8;
}
public int get1sRate() { return get1sRate(false); }
public int get1sRate(boolean outboundOnly) {
RouterContext ctx = _context;
if (ctx != null) {
FIFOBandwidthLimiter bw = ctx.bandwidthLimiter();
if (bw != null) {
int out = (int)bw.getSendBps();
if (outboundOnly)
return out;
return (int)Math.max(out, bw.getReceiveBps());
}
}
return 0;
}
public int get1mRate() { return get1mRate(false); }
public int get1mRate(boolean outboundOnly) {
int send = 0;
RouterContext ctx = _context;
if (ctx == null)
return 0;
StatManager mgr = ctx.statManager();
if (mgr == null)
return 0;
RateStat rs = mgr.getRate("bw.sendRate");
if (rs != null)
send = (int)rs.getRate(1*60*1000).getAverageValue();
if (outboundOnly)
return send;
int recv = 0;
rs = mgr.getRate("bw.recvRate");
if (rs != null)
recv = (int)rs.getRate(1*60*1000).getAverageValue();
return Math.max(send, recv);
}
public int get5mRate() { return get5mRate(false); }
public int get5mRate(boolean outboundOnly) {
int send = 0;
RateStat rs = _context.statManager().getRate("bw.sendRate");
if (rs != null)
send = (int)rs.getRate(5*60*1000).getAverageValue();
if (outboundOnly)
return send;
int recv = 0;
rs = _context.statManager().getRate("bw.recvRate");
if (rs != null)
recv = (int)rs.getRate(5*60*1000).getAverageValue();
return Math.max(send, recv);
}
}
/**
* coalesce the stats framework every minute
*
*/
class CoalesceStatsEvent implements SimpleTimer.TimedEvent {
private RouterContext _ctx;
public CoalesceStatsEvent(RouterContext ctx) {
_ctx = ctx;
ctx.statManager().createRateStat("bw.receiveBps", "How fast we receive data (in KBps)", "Bandwidth", new long[] { 60*1000, 5*60*1000, 60*60*1000 });
ctx.statManager().createRateStat("bw.sendBps", "How fast we send data (in KBps)", "Bandwidth", new long[] { 60*1000, 5*60*1000, 60*60*1000 });
ctx.statManager().createRateStat("bw.sendRate", "Low level bandwidth send rate", "Bandwidth", new long[] { 60*1000l, 5*60*1000l, 10*60*1000l, 60*60*1000l });
ctx.statManager().createRateStat("bw.recvRate", "Low level bandwidth receive rate", "Bandwidth", new long[] { 60*1000l, 5*60*1000l, 10*60*1000l, 60*60*1000l });
ctx.statManager().createRateStat("router.activePeers", "How many peers we are actively talking with", "Throttle", new long[] { 60*1000, 5*60*1000, 60*60*1000 });
ctx.statManager().createRateStat("router.activeSendPeers", "How many peers we've sent to this minute", "Throttle", new long[] { 60*1000, 5*60*1000, 60*60*1000 });
ctx.statManager().createRateStat("router.highCapacityPeers", "How many high capacity peers we know", "Throttle", new long[] { 5*60*1000, 60*60*1000 });
ctx.statManager().createRateStat("router.fastPeers", "How many fast peers we know", "Throttle", new long[] { 5*60*1000, 60*60*1000 });
}
private RouterContext getContext() { return _ctx; }
public void timeReached() {
int active = getContext().commSystem().countActivePeers();
getContext().statManager().addRateData("router.activePeers", active, 60*1000);
int activeSend = getContext().commSystem().countActiveSendPeers();
getContext().statManager().addRateData("router.activeSendPeers", activeSend, 60*1000);
int fast = getContext().profileOrganizer().countFastPeers();
getContext().statManager().addRateData("router.fastPeers", fast, 60*1000);
int highCap = getContext().profileOrganizer().countHighCapacityPeers();
getContext().statManager().addRateData("router.highCapacityPeers", highCap, 60*1000);
getContext().statManager().addRateData("bw.sendRate", (long)getContext().bandwidthLimiter().getSendBps(), 0);
getContext().statManager().addRateData("bw.recvRate", (long)getContext().bandwidthLimiter().getReceiveBps(), 0);
getContext().statManager().coalesceStats();
RateStat receiveRate = getContext().statManager().getRate("transport.receiveMessageSize");
if (receiveRate != null) {
Rate rate = receiveRate.getRate(60*1000);
if (rate != null) {
double bytes = rate.getLastTotalValue();
double KBps = (bytes*1000.0d)/(rate.getPeriod()*1024.0d);
getContext().statManager().addRateData("bw.receiveBps", (long)KBps, 60*1000);
}
}
RateStat sendRate = getContext().statManager().getRate("transport.sendMessageSize");
if (sendRate != null) {
Rate rate = sendRate.getRate(60*1000);
if (rate != null) {
double bytes = rate.getLastTotalValue();
double KBps = (bytes*1000.0d)/(rate.getPeriod()*1024.0d);
getContext().statManager().addRateData("bw.sendBps", (long)KBps, 60*1000);
}
}
private void ping() {
FileOutputStream fos = null;
try {
fos = new FileOutputStream(_pingFile);
fos.write(("" + System.currentTimeMillis()).getBytes());
} catch (IOException ioe) {
if (_log != null) {
_log.log(Log.CRIT, "Error writing to ping file", ioe);
} else {
System.err.println("Error writing to ping file");
ioe.printStackTrace();
}
} finally {
if (fos != null) try { fos.close(); } catch (IOException ioe) {}
}
}
SimpleTimer.getInstance().addEvent(this, 20*1000);
}
private static int __id = 0;
private class ShutdownHook extends Thread {
private int _id;
public ShutdownHook() {
_id = ++__id;
}
public void run() {
setName("Router " + _id + " shutdown");
_log.log(Log.CRIT, "Shutting down the router...");
shutdown(EXIT_HARD);
}
}
/**
* Update the routing Key modifier every day at midnight (plus on startup).
* This is done here because we want to make sure the key is updated before anyone
* uses it.
*/
class UpdateRoutingKeyModifierJob extends JobImpl {
private Log _log;
private Calendar _cal = new GregorianCalendar(TimeZone.getTimeZone("GMT"));
public UpdateRoutingKeyModifierJob(RouterContext ctx) {
super(ctx);
}
/** update the router.info file whenever its, er, updated */
private class PersistRouterInfoJob extends JobImpl {
public PersistRouterInfoJob() { super(Router.this._context); }
public String getName() { return "Persist Updated Router Information"; }
public void runJob() {
if (_log.shouldLog(Log.DEBUG))
_log.debug("Persisting updated router info");
public String getName() { return "Update Routing Key Modifier"; }
public void runJob() {
_log = getContext().logManager().getLog(getClass());
getContext().routingKeyGenerator().generateDateBasedModData();
requeue(getTimeTillMidnight());
}
private long getTimeTillMidnight() {
long now = getContext().clock().now();
_cal.setTime(new Date(now));
_cal.set(Calendar.YEAR, _cal.get(Calendar.YEAR)); // gcj <= 4.0 workaround
_cal.set(Calendar.DAY_OF_YEAR, _cal.get(Calendar.DAY_OF_YEAR)); // gcj <= 4.0 workaround
_cal.add(Calendar.DATE, 1);
_cal.set(Calendar.HOUR_OF_DAY, 0);
_cal.set(Calendar.MINUTE, 0);
_cal.set(Calendar.SECOND, 0);
_cal.set(Calendar.MILLISECOND, 0);
long then = _cal.getTime().getTime();
long howLong = then - now;
if (howLong < 0) // hi kaffe
howLong = 24*60*60*1000l + howLong;
if (_log.shouldLog(Log.DEBUG))
_log.debug("Time till midnight: " + howLong + "ms");
return howLong;
}
}
String infoFilename = getConfigSetting(PROP_INFO_FILENAME);
if (infoFilename == null)
infoFilename = PROP_INFO_FILENAME_DEFAULT;
class MarkLiveliness implements Runnable {
private RouterContext _context;
private Router _router;
private File _pingFile;
public MarkLiveliness(RouterContext ctx, Router router, File pingFile) {
_context = ctx;
_router = router;
_pingFile = pingFile;
}
public void run() {
_pingFile.deleteOnExit();
do {
ping();
try { Thread.sleep(Router.LIVELINESS_DELAY); } catch (InterruptedException ie) {}
} while (_router.isAlive());
_pingFile.delete();
}
RouterInfo info = getRouterInfo();
FileOutputStream fos = null;
try {
fos = new FileOutputStream(infoFilename);
info.writeBytes(fos);
} catch (DataFormatException dfe) {
_log.error("Error rebuilding the router information", dfe);
} catch (IOException ioe) {
_log.error("Error writing out the rebuilt router information", ioe);
} finally {
if (fos != null) try { fos.close(); } catch (IOException ioe) {}
}
private void ping() {
FileOutputStream fos = null;
try {
fos = new FileOutputStream(_pingFile);
fos.write(("" + System.currentTimeMillis()).getBytes());
} catch (IOException ioe) {
System.err.println("Error writing to ping file");
ioe.printStackTrace();
} finally {
if (fos != null) try { fos.close(); } catch (IOException ioe) {}
}
}
}
class ShutdownHook extends Thread {
private RouterContext _context;
private static int __id = 0;
private int _id;
public ShutdownHook(RouterContext ctx) {
_context = ctx;
_id = ++__id;
}
public void run() {
setName("Router " + _id + " shutdown");
Log l = _context.logManager().getLog(Router.class);
l.log(Log.CRIT, "Shutting down the router...");
_context.router().shutdown(Router.EXIT_HARD);
}
}
/** update the router.info file whenever its, er, updated */
class PersistRouterInfoJob extends JobImpl {
private Log _log;
public PersistRouterInfoJob(RouterContext ctx) {
super(ctx);
}
public String getName() { return "Persist Updated Router Information"; }
public void runJob() {
_log = getContext().logManager().getLog(PersistRouterInfoJob.class);
if (_log.shouldLog(Log.DEBUG))
_log.debug("Persisting updated router info");
String infoFilename = getContext().getProperty(Router.PROP_INFO_FILENAME);
if (infoFilename == null)
infoFilename = Router.PROP_INFO_FILENAME_DEFAULT;
RouterInfo info = getContext().router().getRouterInfo();
FileOutputStream fos = null;
try {
fos = new FileOutputStream(infoFilename);
info.writeBytes(fos);
} catch (DataFormatException dfe) {
_log.error("Error rebuilding the router information", dfe);
} catch (IOException ioe) {
_log.error("Error writing out the rebuilt router information", ioe);
} finally {
if (fos != null) try { fos.close(); } catch (IOException ioe) {}
}
}
}

View File

@@ -32,8 +32,7 @@ class RouterThrottleImpl implements RouterThrottle {
private static final String PROP_MAX_TUNNELS = "router.maxParticipatingTunnels";
private static final String PROP_DEFAULT_KBPS_THROTTLE = "router.defaultKBpsThrottle";
private static final String PROP_BANDWIDTH_SHARE_PERCENTAGE = "router.sharePercentage";
/** tunnel acceptance */
public static final int TUNNEL_ACCEPT = 0;
@@ -88,36 +87,8 @@ class RouterThrottleImpl implements RouterThrottle {
}
long lag = _context.jobQueue().getMaxLag();
/*
RateStat rs = _context.statManager().getRate("router.throttleNetworkCause");
Rate r = null;
if (rs != null)
r = rs.getRate(60*1000);
long throttleEvents = (r != null ? r.getCurrentEventCount() + r.getLastEventCount() : 0);
if (throttleEvents > THROTTLE_EVENT_LIMIT) {
if (_log.shouldLog(Log.DEBUG))
_log.debug("Refusing tunnel request with the job lag of " + lag
+ " since there have been " + throttleEvents
+ " throttle events in the last 15 minutes or so");
_context.statManager().addRateData("router.throttleTunnelCause", lag, lag);
return TunnelHistory.TUNNEL_REJECT_TRANSIENT_OVERLOAD;
}
*/
RateStat rs = _context.statManager().getRate("transport.sendProcessingTime");
Rate r = null;
/*
if (rs != null)
r = rs.getRate(1*60*1000);
double processTime = (r != null ? r.getAverageValue() : 0);
if (processTime > 2000) {
if (_log.shouldLog(Log.DEBUG))
_log.debug("Refusing tunnel request with the job lag of " + lag
+ "since the 10 minute message processing time is too slow (" + processTime + ")");
_context.statManager().addRateData("router.throttleTunnelProcessingTime10m", (long)processTime, (long)processTime);
return TunnelHistory.TUNNEL_REJECT_TRANSIENT_OVERLOAD;
}
*/
if (rs != null)
r = rs.getRate(60*1000);
double processTime = (r != null ? r.getAverageValue() : 0);
@@ -129,27 +100,6 @@ class RouterThrottleImpl implements RouterThrottle {
return TunnelHistory.TUNNEL_REJECT_TRANSIENT_OVERLOAD;
}
/*
rs = _context.statManager().getRate("transport.sendMessageFailureLifetime");
r = null;
if (rs != null)
r = rs.getRate(60*1000);
double failCount = (r != null ? r.getCurrentEventCount() + r.getLastEventCount() : 0);
if (failCount > 100) {
long periods = r.getLifetimePeriods();
long maxFailCount = r.getExtremeEventCount();
if ( (periods > 0) && (maxFailCount > 100) ) {
if (_context.random().nextInt((int)maxFailCount) <= failCount) {
if (_log.shouldLog(Log.DEBUG))
_log.debug("Refusing tunnel request with the job lag of " + lag
+ "since the 1 minute message failure count is too high (" + failCount + "/" + maxFailCount + ")");
_context.statManager().addRateData("router.throttleTunnelFailCount1m", (long)failCount, (long)maxFailCount);
return TunnelHistory.TUNNEL_REJECT_TRANSIENT_OVERLOAD;
}
}
}
*/
int numTunnels = _context.tunnelManager().getParticipatingCount();
if (numTunnels > getMinThrottleTunnels()) {
@@ -239,14 +189,19 @@ class RouterThrottleImpl implements RouterThrottle {
// of another tunnel?
rs = _context.statManager().getRate("tunnel.participatingMessageCount");
r = null;
if (rs != null)
double messagesPerTunnel = DEFAULT_MESSAGES_PER_TUNNEL_ESTIMATE;
if (rs != null) {
r = rs.getRate(10*60*1000);
double messagesPerTunnel = (r != null ? r.getAverageValue() : 0d);
if (r != null) {
if (r.getLastEventCount() > 0)
messagesPerTunnel = r.getAverageValue();
else
messagesPerTunnel = r.getLifetimeAverageValue();
}
}
if (messagesPerTunnel < DEFAULT_MESSAGES_PER_TUNNEL_ESTIMATE)
messagesPerTunnel = DEFAULT_MESSAGES_PER_TUNNEL_ESTIMATE;
int participatingTunnels = (r != null ? (int) (r.getLastEventCount() + r.getCurrentEventCount()) : 0);
if (participatingTunnels <= 0)
participatingTunnels = _context.tunnelManager().getParticipatingCount();
int participatingTunnels = _context.tunnelManager().getParticipatingCount();
double bytesAllocated = messagesPerTunnel * participatingTunnels * 1024;
if (!allowTunnel(bytesAllocated, numTunnels)) {
@@ -261,8 +216,9 @@ class RouterThrottleImpl implements RouterThrottle {
+ " tunnels with lag of " + lag + ")");
return TUNNEL_ACCEPT;
}
private static final int DEFAULT_MESSAGES_PER_TUNNEL_ESTIMATE = 600; // 1KBps
private static final int DEFAULT_MESSAGES_PER_TUNNEL_ESTIMATE = 60; // .1KBps
private static final int MIN_AVAILABLE_BPS = 4*1024; // always leave at least 4KBps free when allowing
/**
* with bytesAllocated already accounted for across the numTunnels existing
@@ -272,59 +228,77 @@ class RouterThrottleImpl implements RouterThrottle {
*/
private boolean allowTunnel(double bytesAllocated, int numTunnels) {
int maxKBps = Math.min(_context.bandwidthLimiter().getOutboundKBytesPerSecond(), _context.bandwidthLimiter().getInboundKBytesPerSecond());
int used = (int)Math.max(_context.bandwidthLimiter().getSendBps(), _context.bandwidthLimiter().getReceiveBps());
int availBps = (int)(((maxKBps*1024) - used) * getSharePercentage());
int used1s = 0; //get1sRate(_context); // dont throttle on the 1s rate, its too volatile
int used1m = _context.router().get1mRate();
int used5m = 0; //get5mRate(_context); // don't throttle on the 5m rate, as that'd hide available bandwidth
int used = Math.max(Math.max(used1s, used1m), used5m);
double share = _context.router().getSharePercentage();
int availBps = (int)(((maxKBps*1024)*share) - used); //(int)(((maxKBps*1024) - used) * getSharePercentage());
_context.statManager().addRateData("router.throttleTunnelBytesUsed", used, maxKBps);
_context.statManager().addRateData("router.throttleTunnelBytesAllowed", availBps, (long)bytesAllocated);
if (maxKBps <= 8) {
// lets be more conservative for dialup users and assume 1KBps per tunnel
return ( (numTunnels + 1)*1024 < availBps);
if (true) {
// ok, ignore any predictions of 'bytesAllocated', since that makes poorly
// grounded conclusions about future use (or even the bursty use). Instead,
// simply say "do we have the bw to handle a new request"?
float maxBps = maxKBps * 1024f;
float pctFull = (maxBps - availBps) / (maxBps);
double probReject = Math.pow(pctFull, 16); // steep curve
double rand = _context.random().nextFloat();
boolean reject = (availBps < MIN_AVAILABLE_BPS) || (rand <= probReject);
if (_log.shouldLog(Log.WARN))
_log.warn("reject = " + reject + " avail/maxK/used " + availBps + "/" + maxKBps + "/"
+ used + " pReject = " + probReject + " pFull = " + pctFull + " numTunnels = " + numTunnels
+ "rand = " + rand + " est = " + bytesAllocated + " share = " + (float)share);
if (reject) {
return false;
} else {
return true;
}
}
/*
if (availBps <= 8*1024) {
// lets be more conservative for people near their limit and assume 1KBps per tunnel
boolean rv = ( (numTunnels + 1)*1024 < availBps);
if (_log.shouldLog(Log.DEBUG))
_log.debug("Nearly full router (" + availBps + ") with " + numTunnels + " tunnels, allow a new request? " + rv);
return rv;
}
*/
double growthFactor = ((double)(numTunnels+1))/(double)numTunnels;
double toAllocate = (numTunnels > 0 ? bytesAllocated * growthFactor : 0);
double allocatedKBps = toAllocate / (10 * 60 * 1024);
double pctFull = allocatedKBps / availBps;
double allocatedBps = toAllocate / (10 * 60);
double pctFull = allocatedBps / availBps;
if ( (pctFull < 1.0) && (pctFull >= 0.0) ) { // (_context.random().nextInt(100) > 100 * pctFull) {
if (_log.shouldLog(Log.DEBUG))
_log.debug("Probabalistically allowing the tunnel w/ " + pctFull + " of our " + availBps
+ "Bps/" + allocatedKBps + "KBps allocated through " + numTunnels + " tunnels");
_log.debug("Allowing the tunnel w/ " + pctFull + " of our " + availBps
+ "Bps/" + allocatedBps + "KBps allocated through " + numTunnels + " tunnels");
return true;
} else {
if (_log.shouldLog(Log.DEBUG))
_log.debug("Rejecting the tunnel w/ " + pctFull + " of our " + availBps
+ "Bps allowed (" + toAllocate + "bytes / " + allocatedKBps
+ "KBps) through " + numTunnels + " tunnels");
return false;
double probAllow = availBps / (allocatedBps + availBps);
boolean allow = (availBps > MIN_AVAILABLE_BPS) && (_context.random().nextFloat() <= probAllow);
if (allow) {
if (_log.shouldLog(Log.INFO))
_log.info("Probabalistically allowing the tunnel w/ " + (pctFull*100d) + "% of our " + availBps
+ "Bps allowed (" + toAllocate + "bytes / " + allocatedBps
+ "Bps) through " + numTunnels + " tunnels");
return true;
} else {
if (_log.shouldLog(Log.WARN))
_log.warn("Rejecting the tunnel w/ " + (pctFull*100d) + "% of our " + availBps
+ "Bps allowed (" + toAllocate + "bytes / " + allocatedBps
+ "Bps) through " + numTunnels + " tunnels");
return false;
}
}
}
/**
* What fraction of the bandwidth specified in our bandwidth limits should
* we allow to be consumed by participating tunnels?
*
*/
private double getSharePercentage() {
String pct = _context.getProperty(PROP_BANDWIDTH_SHARE_PERCENTAGE, "0.8");
if (pct != null) {
try {
double d = Double.parseDouble(pct);
if (d > 1)
return d/100d; // *cough* sometimes its 80 instead of .8 (!stab jrandom)
else
return d;
} catch (NumberFormatException nfe) {
if (_log.shouldLog(Log.INFO))
_log.info("Unable to get the share percentage");
}
}
return 0.8;
}
/** dont ever probabalistically throttle tunnels if we have less than this many */
private int getMinThrottleTunnels() {
try {

View File

@@ -15,8 +15,8 @@ import net.i2p.CoreVersion;
*
*/
public class RouterVersion {
public final static String ID = "$Revision: 1.355 $ $Date: 2006/02/21 08:31:24 $";
public final static String VERSION = "0.6.1.11";
public final static String ID = "$Revision: 1.411 $ $Date: 2006/05/07 22:19:47 $";
public final static String VERSION = "0.6.1.18";
public final static long BUILD = 0;
public static void main(String args[]) {
System.out.println("I2P Router version: " + VERSION + "-" + BUILD);

View File

@@ -47,12 +47,13 @@ class RouterWatchdog implements Runnable {
}
private boolean shutdownOnHang() {
return Boolean.valueOf(_context.getProperty("watchdog.haltOnHang", "true")).booleanValue();
return Boolean.valueOf(_context.getProperty("watchdog.haltOnHang", "false")).booleanValue();
}
private void dumpStatus() {
if (_log.shouldLog(Log.ERROR)) {
Job cur = _context.jobQueue().getLastJob();
/*
if (cur != null)
_log.error("Most recent job: " + cur);
_log.error("Last job began: "
@@ -61,6 +62,7 @@ class RouterWatchdog implements Runnable {
_log.error("Last job ended: "
+ DataHelper.formatDuration(_context.clock().now()-_context.jobQueue().getLastJobEnd())
+ " ago");
*/
_log.error("Ready and waiting jobs: " + _context.jobQueue().getReadyCount());
_log.error("Job lag: " + _context.jobQueue().getMaxLag());
_log.error("Participating tunnel count: " + _context.tunnelManager().getParticipatingCount());

View File

@@ -84,7 +84,7 @@ public class Shitlist {
_context.netDb().fail(peer);
//_context.tunnelManager().peerFailed(peer);
_context.messageRegistry().peerFailed(peer);
//_context.messageRegistry().peerFailed(peer);
if (!wasAlready)
_context.messageHistory().shitlist(peer, reason);
return wasAlready;

View File

@@ -95,9 +95,11 @@ public class StatisticsManager implements Service {
public Properties publishStatistics() {
Properties stats = new Properties();
stats.setProperty("router.version", RouterVersion.VERSION);
stats.setProperty("router.id", RouterVersion.ID);
stats.setProperty("coreVersion", CoreVersion.VERSION);
stats.setProperty("core.id", CoreVersion.ID);
// No longer expose, to make build tracking more expensive
// stats.setProperty("router.id", RouterVersion.ID);
// stats.setProperty("core.id", CoreVersion.ID);
if (_includePeerRankings) {
if (false)
@@ -147,7 +149,12 @@ public class StatisticsManager implements Service {
includeRate("udp.congestionOccurred", stats, new long[] { 10*60*1000 });
//includeRate("stream.con.sendDuplicateSize", stats, new long[] { 60*60*1000 });
//includeRate("stream.con.receiveDuplicateSize", stats, new long[] { 60*60*1000 });
stats.setProperty("stat_uptime", DataHelper.formatDuration(_context.router().getUptime()));
// Round smaller uptimes to 1 hour, to frustrate uptime tracking
long publishedUptime = _context.router().getUptime();
if (publishedUptime < 60*60*1000) publishedUptime = 60*60*1000;
stats.setProperty("stat_uptime", DataHelper.formatDuration(publishedUptime));
stats.setProperty("stat__rateKey", "avg;maxAvg;pctLifetime;[sat;satLim;maxSat;maxSatLim;][num;lifetimeFreq;maxFreq]");
includeRate("tunnel.buildRequestTime", stats, new long[] { 60*1000, 10*60*1000 });
@@ -280,8 +287,14 @@ public class StatisticsManager implements Service {
private String getPeriod(Rate rate) { return DataHelper.formatDuration(rate.getPeriod()); }
private final String num(double num) { synchronized (_fmt) { return _fmt.format(num); } }
private final String pct(double num) { synchronized (_pct) { return _pct.format(num); } }
private final String num(double num) {
if (num < 0) num = 0;
synchronized (_fmt) { return _fmt.format(num); }
}
private final String pct(double num) {
if (num < 0) num = 0;
synchronized (_pct) { return _pct.format(num); }
}
public void renderStatusHTML(Writer out) { }
}

View File

@@ -56,6 +56,9 @@ public interface TunnelManagerFacade extends Service {
/** When does the last tunnel we are participating in expire? */
public long getLastParticipatingExpiration();
/** count how many inbound tunnel requests we have received but not yet processed */
public int getInboundBuildQueueSize();
/**
* the client connected (or updated their settings), so make sure we have
* the tunnels for them, and whenever necessary, ask them to authorize
@@ -97,6 +100,7 @@ class DummyTunnelManagerFacade implements TunnelManagerFacade {
public void setOutboundSettings(TunnelPoolSettings settings) {}
public void setInboundSettings(Hash client, TunnelPoolSettings settings) {}
public void setOutboundSettings(Hash client, TunnelPoolSettings settings) {}
public int getInboundBuildQueueSize() { return 0; }
public void renderStatusHTML(Writer out) throws IOException {}
public void restart() {}

View File

@@ -180,6 +180,17 @@ public class StatsGenerator {
buf.append(num(curRate.getExtremeEventCount()));
buf.append(")");
}
if (curRate.getSummaryListener() != null) {
buf.append(" <a href=\"viewstat.jsp?stat=").append(name);
buf.append("&amp;period=").append(periods[i]);
buf.append("\" title=\"Render summarized data\">render</a>");
buf.append(" <a href=\"viewstat.jsp?stat=").append(name);
buf.append("&amp;period=").append(periods[i]).append("&amp;showEvents=true\" title=\"Render summarized event counts\">events</a>");
buf.append(" (as <a href=\"viewstat.jsp?stat=").append(name);
buf.append("&amp;period=").append(periods[i]);
buf.append("&amp;format=xml\" title=\"Dump stat history as XML\">XML</a>");
buf.append(" in a format <a href=\"http://people.ee.ethz.ch/~oetiker/webtools/rrdtool\">RRDTool</a> understands)");
}
buf.append("</li>");
if (i + 1 == periods.length) {
// last one, so lets display the strict average

View File

@@ -122,8 +122,11 @@ public class GarlicMessageBuilder {
msg.setMessageExpiration(config.getExpiration());
long timeFromNow = config.getExpiration() - ctx.clock().now();
if (timeFromNow < 1*1000)
log.error("Building a message expiring in " + timeFromNow + "ms: " + config, new Exception("created by"));
if (timeFromNow < 1*1000) {
if (log.shouldLog(Log.WARN))
log.warn("Building a message expiring in " + timeFromNow + "ms: " + config, new Exception("created by"));
return null;
}
if (log.shouldLog(Log.WARN))
log.warn("CloveSet size for message " + msg.getUniqueId() + " is " + cloveSet.length

View File

@@ -131,9 +131,9 @@ class OutboundClientMessageJobHelper {
TunnelId replyToTunnelId = null; // tunnel id on that gateway
if (replyToTunnel == null) {
if (log.shouldLog(Log.ERROR))
log.error("Unable to send client message from " + from.toBase64()
+ ", as there are no inbound tunnels available");
if (log.shouldLog(Log.WARN))
log.warn("Unable to send client message from " + from.toBase64()
+ ", as there are no inbound tunnels available");
return null;
}
replyToTunnelId = replyToTunnel.getReceiveTunnelId(0);

View File

@@ -16,6 +16,7 @@ import net.i2p.data.Lease;
import net.i2p.data.LeaseSet;
import net.i2p.data.PublicKey;
import net.i2p.data.SessionKey;
import net.i2p.data.Payload;
import net.i2p.data.i2cp.MessageId;
import net.i2p.data.i2np.DataMessage;
@@ -107,18 +108,18 @@ public class OutboundClientMessageOneShotJob extends JobImpl {
ctx.statManager().createFrequencyStat("client.sendMessageFailFrequency", "How often does a client fail to send a message?", "ClientMessages", new long[] { 60*1000l, 60*60*1000l, 24*60*60*1000l });
ctx.statManager().createRateStat("client.sendMessageSize", "How large are messages sent by the client?", "ClientMessages", new long[] { 60*1000l, 60*60*1000l, 24*60*60*1000l });
ctx.statManager().createRateStat("client.sendAckTime", "How long does it take to get an ACK back from a message?", "ClientMessages", new long[] { 60*1000l, 5*60*1000l, 60*60*1000l, 24*60*60*1000l });
ctx.statManager().createRateStat("client.sendAckTime", "Message round trip time", "ClientMessages", new long[] { 60*1000l, 5*60*1000l, 60*60*1000l, 24*60*60*1000l });
ctx.statManager().createRateStat("client.timeoutCongestionTunnel", "How lagged our tunnels are when a send times out?", "ClientMessages", new long[] { 60*1000l, 5*60*1000l, 60*60*1000l, 24*60*60*1000l });
ctx.statManager().createRateStat("client.timeoutCongestionMessage", "How fast we process messages locally when a send times out?", "ClientMessages", new long[] { 5*60*1000l, 60*60*1000l, 24*60*60*1000l });
ctx.statManager().createRateStat("client.timeoutCongestionInbound", "How much faster we are receiving data than our average bps when a send times out?", "ClientMessages", new long[] { 5*60*1000l, 60*60*1000l, 24*60*60*1000l });
ctx.statManager().createRateStat("client.leaseSetFoundLocally", "How often we tried to look for a leaseSet and found it locally?", "ClientMessages", new long[] { 5*60*1000l, 60*60*1000l, 24*60*60*1000l });
ctx.statManager().createRateStat("client.leaseSetFoundRemoteTime", "How long we tried to look fora remote leaseSet (when we succeeded)?", "ClientMessages", new long[] { 5*60*1000l, 60*60*1000l, 24*60*60*1000l });
ctx.statManager().createRateStat("client.leaseSetFoundRemoteTime", "How long we tried to look for a remote leaseSet (when we succeeded)?", "ClientMessages", new long[] { 5*60*1000l, 60*60*1000l, 24*60*60*1000l });
ctx.statManager().createRateStat("client.leaseSetFailedRemoteTime", "How long we tried to look for a remote leaseSet (when we failed)?", "ClientMessages", new long[] { 5*60*1000l, 60*60*1000l, 24*60*60*1000l });
ctx.statManager().createRateStat("client.dispatchPrepareTime", "How long until we've queued up the dispatch job (since we started)?", "ClientMessages", new long[] { 5*60*1000l, 60*60*1000l, 24*60*60*1000l });
ctx.statManager().createRateStat("client.dispatchTime", "How long until we've dispatched the message (since we started)?", "ClientMessages", new long[] { 5*60*1000l, 60*60*1000l, 24*60*60*1000l });
ctx.statManager().createRateStat("client.dispatchSendTime", "How long the actual dispatching takes?", "ClientMessages", new long[] { 5*60*1000l, 60*60*1000l, 24*60*60*1000l });
ctx.statManager().createRateStat("client.dispatchNoTunnels", "How long after start do we run out of tunnels to send/receive with?", "ClientMessages", new long[] { 5*60*1000l, 60*60*1000l, 24*60*60*1000l });
ctx.statManager().createRateStat("client.dispatchNoACK", "How often we send a client message without asking for an ACK?", "ClientMessages", new long[] { 60*1000l, 5*60*1000l, 60*60*1000l });
ctx.statManager().createRateStat("client.dispatchNoACK", "Repeated message sends to a peer (no ack required)", "ClientMessages", new long[] { 60*1000l, 5*60*1000l, 60*60*1000l });
long timeoutMs = OVERALL_TIMEOUT_MS_DEFAULT;
_clientMessage = msg;
_clientMessageId = msg.getMessageId();
@@ -212,8 +213,8 @@ public class OutboundClientMessageOneShotJob extends JobImpl {
if (ok) {
send();
} else {
if (_log.shouldLog(Log.ERROR))
_log.error("Unable to send on a random lease, as getNext returned null (to=" + _toString + ")");
if (_log.shouldLog(Log.WARN))
_log.warn("Unable to send on a random lease, as getNext returned null (to=" + _toString + ")");
dieFatal();
}
}
@@ -313,6 +314,10 @@ public class OutboundClientMessageOneShotJob extends JobImpl {
*/
private void send() {
if (_finished) return;
if (getContext().clock().now() >= _overallExpiration) {
dieFatal();
return;
}
boolean wantACK = true;
int existingTags = GarlicMessageBuilder.estimateAvailableTags(getContext(), _leaseSet.getEncryptionKey());
if (existingTags > 30)
@@ -330,11 +335,16 @@ public class OutboundClientMessageOneShotJob extends JobImpl {
if (wantACK)
_inTunnel = selectInboundTunnel();
buildClove();
boolean ok = (_clientMessage != null) && buildClove();
if (!ok) {
dieFatal();
return;
}
if (_log.shouldLog(Log.DEBUG))
_log.debug(getJobId() + ": Clove built to " + _toString);
long msgExpiration = _overallExpiration; // getContext().clock().now() + OVERALL_TIMEOUT_MS_DEFAULT;
GarlicMessage msg = OutboundClientMessageJobHelper.createGarlicMessage(getContext(), token,
_overallExpiration, key,
msgExpiration, key,
_clove, _from.calculateHash(),
_to, _inTunnel,
sessKey, tags,
@@ -344,7 +354,7 @@ public class OutboundClientMessageOneShotJob extends JobImpl {
// (should we always fail for this? or should we send it anyway, even if
// we dont receive the reply? hmm...)
if (_log.shouldLog(Log.WARN))
_log.warn(getJobId() + ": Unable to create the garlic message (no tunnels left) to " + _toString);
_log.warn(getJobId() + ": Unable to create the garlic message (no tunnels left or too lagged) to " + _toString);
getContext().statManager().addRateData("client.dispatchNoTunnels", getContext().clock().now() - _start, 0);
dieFatal();
return;
@@ -470,7 +480,7 @@ public class OutboundClientMessageOneShotJob extends JobImpl {
}
/** build the payload clove that will be used for all of the messages, placing the clove in the status structure */
private void buildClove() {
private boolean buildClove() {
PayloadGarlicConfig clove = new PayloadGarlicConfig();
DeliveryInstructions instructions = new DeliveryInstructions();
@@ -487,7 +497,13 @@ public class OutboundClientMessageOneShotJob extends JobImpl {
clove.setId(getContext().random().nextLong(I2NPMessage.MAX_ID_VALUE));
DataMessage msg = new DataMessage(getContext());
msg.setData(_clientMessage.getPayload().getEncryptedData());
Payload p = _clientMessage.getPayload();
if (p == null)
return false;
byte d[] = p.getEncryptedData();
if (d == null)
return false;
msg.setData(d);
msg.setMessageExpiration(clove.getExpiration());
clove.setPayload(msg);
@@ -499,6 +515,7 @@ public class OutboundClientMessageOneShotJob extends JobImpl {
if (_log.shouldLog(Log.DEBUG))
_log.debug(getJobId() + ": Built payload clove with id " + clove.getId());
return true;
}
/**

View File

@@ -45,13 +45,8 @@ public class PublishLocalRouterInfoJob extends JobImpl {
ri.setPublished(getContext().clock().now());
ri.setOptions(stats);
ri.setAddresses(getContext().commSystem().createAddresses());
if (FloodfillNetworkDatabaseFacade.floodfillEnabled(getContext()))
ri.addCapability(FloodfillNetworkDatabaseFacade.CAPACITY_FLOODFILL);
if ("true".equalsIgnoreCase(getContext().getProperty(Router.PROP_HIDDEN, "false")))
ri.addCapability(RouterInfo.CAPABILITY_HIDDEN);
getContext().router().addReachabilityCapability(ri);
getContext().router().addCapabilities(ri);
SigningPrivateKey key = getContext().keyManager().getSigningPrivateKey();
if (key == null) {
_log.log(Log.CRIT, "Internal error - signing private key not known? rescheduling publish for 30s");

View File

@@ -0,0 +1,244 @@
package net.i2p.router.networkdb.kademlia;
import java.util.*;
import net.i2p.router.*;
import net.i2p.data.Hash;
import net.i2p.data.i2np.*;
import net.i2p.util.Log;
/**
* Try sending a search to some floodfill peers, failing completely if we don't get
* a match from one of those peers, with no fallback to the kademlia search
*
*/
class FloodOnlySearchJob extends FloodSearchJob {
private Log _log;
private FloodfillNetworkDatabaseFacade _facade;
private Hash _key;
private List _onFind;
private List _onFailed;
private long _expiration;
private int _timeoutMs;
private long _origExpiration;
private boolean _isLease;
private volatile int _lookupsRemaining;
private volatile boolean _dead;
private long _created;
private List _out;
private MessageSelector _replySelector;
private ReplyJob _onReply;
private Job _onTimeout;
public FloodOnlySearchJob(RouterContext ctx, FloodfillNetworkDatabaseFacade facade, Hash key, Job onFind, Job onFailed, int timeoutMs, boolean isLease) {
super(ctx, facade, key, onFind, onFailed, timeoutMs, isLease);
_log = ctx.logManager().getLog(FloodOnlySearchJob.class);
_facade = facade;
_key = key;
_onFind = new ArrayList();
_onFind.add(onFind);
_onFailed = new ArrayList();
_onFailed.add(onFailed);
_timeoutMs = Math.min(timeoutMs, SearchJob.PER_FLOODFILL_PEER_TIMEOUT);
_expiration = _timeoutMs + ctx.clock().now();
_origExpiration = _timeoutMs + ctx.clock().now();
_isLease = isLease;
_lookupsRemaining = 0;
_dead = false;
_out = new ArrayList(2);
_replySelector = new FloodOnlyLookupSelector(getContext(), this);
_onReply = new FloodOnlyLookupMatchJob(getContext(), this);
_onTimeout = new FloodOnlyLookupTimeoutJob(getContext(), this);
_created = System.currentTimeMillis();
}
void addDeferred(Job onFind, Job onFailed, long timeoutMs, boolean isLease) {
if (_dead) {
getContext().jobQueue().addJob(onFailed);
} else {
if (onFind != null) synchronized (_onFind) { _onFind.add(onFind); }
if (onFailed != null) synchronized (_onFailed) { _onFailed.add(onFailed); }
}
}
public long getExpiration() { return _expiration; }
private static final int CONCURRENT_SEARCHES = 2;
public void runJob() {
// pick some floodfill peers and send out the searches
List floodfillPeers = _facade.getFloodfillPeers();
if (floodfillPeers == null) {
if (_log.shouldLog(Log.ERROR))
_log.error("Running netDb searches against the floodfill peers, but we don't know any");
failed();
return;
}
OutNetMessage out = getContext().messageRegistry().registerPending(_replySelector, _onReply, _onTimeout, _timeoutMs);
_out.add(out);
for (int i = 0; _lookupsRemaining < CONCURRENT_SEARCHES && i < floodfillPeers.size(); i++) {
Hash peer = (Hash)floodfillPeers.get(i);
if (peer.equals(getContext().routerHash()))
continue;
DatabaseLookupMessage dlm = new DatabaseLookupMessage(getContext(), true);
TunnelInfo replyTunnel = getContext().tunnelManager().selectInboundTunnel();
TunnelInfo outTunnel = getContext().tunnelManager().selectOutboundTunnel();
if ( (replyTunnel == null) || (outTunnel == null) ) {
failed();
return;
}
dlm.setFrom(replyTunnel.getPeer(0));
dlm.setMessageExpiration(getContext().clock().now()+10*1000);
dlm.setReplyTunnel(replyTunnel.getReceiveTunnelId(0));
dlm.setSearchKey(_key);
if (_log.shouldLog(Log.INFO))
_log.info(getJobId() + ": Floodfill search for " + _key.toBase64() + " to " + peer.toBase64());
getContext().tunnelDispatcher().dispatchOutbound(dlm, outTunnel.getSendTunnelId(0), peer);
_lookupsRemaining++;
}
if (_lookupsRemaining <= 0) {
if (_log.shouldLog(Log.INFO))
_log.info(getJobId() + ": Floodfill search for " + _key.toBase64() + " had no peers to send to");
// no floodfill peers, fail
failed();
}
}
public String getName() { return "NetDb flood search (phase 1)"; }
Hash getKey() { return _key; }
void decrementRemaining() { _lookupsRemaining--; }
int getLookupsRemaining() { return _lookupsRemaining; }
void failed() {
synchronized (this) {
if (_dead) return;
_dead = true;
}
for (int i = 0; i < _out.size(); i++) {
OutNetMessage out = (OutNetMessage)_out.get(i);
getContext().messageRegistry().unregisterPending(out);
}
int timeRemaining = (int)(_origExpiration - getContext().clock().now());
if (_log.shouldLog(Log.INFO))
_log.info(getJobId() + ": Floodfill search for " + _key.toBase64() + " failed with " + timeRemaining + " remaining after " + (System.currentTimeMillis()-_created));
_facade.complete(_key);
getContext().statManager().addRateData("netDb.failedTime", System.currentTimeMillis()-_created, System.currentTimeMillis()-_created);
synchronized (_onFailed) {
for (int i = 0; i < _onFailed.size(); i++) {
Job j = (Job)_onFailed.remove(0);
getContext().jobQueue().addJob(j);
}
}
}
void success() {
synchronized (this) {
if (_dead) return;
_dead = true;
}
if (_log.shouldLog(Log.INFO))
_log.info(getJobId() + ": Floodfill search for " + _key.toBase64() + " successful");
_facade.complete(_key);
getContext().statManager().addRateData("netDb.successTime", System.currentTimeMillis()-_created, System.currentTimeMillis()-_created);
synchronized (_onFind) {
while (_onFind.size() > 0)
getContext().jobQueue().addJob((Job)_onFind.remove(0));
}
}
}
class FloodOnlyLookupTimeoutJob extends JobImpl {
private FloodSearchJob _search;
private Log _log;
public FloodOnlyLookupTimeoutJob(RouterContext ctx, FloodOnlySearchJob job) {
super(ctx);
_search = job;
_log = ctx.logManager().getLog(getClass());
}
public void runJob() {
if (_log.shouldLog(Log.INFO))
_log.info(_search.getJobId() + ": search timed out");
_search.failed();
}
public String getName() { return "NetDb flood search (phase 1) timeout"; }
}
class FloodOnlyLookupMatchJob extends JobImpl implements ReplyJob {
private Log _log;
private FloodOnlySearchJob _search;
public FloodOnlyLookupMatchJob(RouterContext ctx, FloodOnlySearchJob job) {
super(ctx);
_log = ctx.logManager().getLog(getClass());
_search = job;
}
public void runJob() {
if ( (getContext().netDb().lookupLeaseSetLocally(_search.getKey()) != null) ||
(getContext().netDb().lookupRouterInfoLocally(_search.getKey()) != null) ) {
if (_log.shouldLog(Log.INFO))
_log.info(_search.getJobId() + ": search match and found locally");
_search.success();
} else {
int remaining = _search.getLookupsRemaining();
if (_log.shouldLog(Log.INFO))
_log.info(_search.getJobId() + ": got a DatabasSearchReply when we were looking for "
+ _search.getKey().toBase64() + ", with " + remaining + " outstanding searches");
// netDb reply pointing us at other people
_search.failed();
}
}
public String getName() { return "NetDb flood search (phase 1) match"; }
public void setMessage(I2NPMessage message) {
if (message instanceof DatabaseSearchReplyMessage) {
// a dsrm is only passed in when there are no more lookups remaining
_search.failed();
return;
}
try {
DatabaseStoreMessage dsm = (DatabaseStoreMessage)message;
if (dsm.getValueType() == DatabaseStoreMessage.KEY_TYPE_LEASESET)
getContext().netDb().store(dsm.getKey(), dsm.getLeaseSet());
else
getContext().netDb().store(dsm.getKey(), dsm.getRouterInfo());
} catch (IllegalArgumentException iae) {
if (_log.shouldLog(Log.WARN))
_log.warn(_search.getJobId() + ": Received an invalid store reply", iae);
}
}
}
class FloodOnlyLookupSelector implements MessageSelector {
private RouterContext _context;
private FloodOnlySearchJob _search;
private boolean _matchFound;
private Log _log;
public FloodOnlyLookupSelector(RouterContext ctx, FloodOnlySearchJob search) {
_context = ctx;
_search = search;
_log = ctx.logManager().getLog(getClass());
_matchFound = false;
}
public boolean continueMatching() {
return _search.getLookupsRemaining() > 0 && !_matchFound && _context.clock().now() < getExpiration();
}
public long getExpiration() { return (_matchFound ? -1 : _search.getExpiration()); }
public boolean isMatch(I2NPMessage message) {
if (message == null) return false;
if (message instanceof DatabaseStoreMessage) {
DatabaseStoreMessage dsm = (DatabaseStoreMessage)message;
// is it worth making sure the reply came in on the right tunnel?
if (_search.getKey().equals(dsm.getKey())) {
_search.decrementRemaining();
_matchFound = true;
return true;
}
} else if (message instanceof DatabaseSearchReplyMessage) {
DatabaseSearchReplyMessage dsrm = (DatabaseSearchReplyMessage)message;
if (_search.getKey().equals(dsrm.getSearchKey())) {
_search.decrementRemaining();
if (_search.getLookupsRemaining() <= 0)
return true; // ok, no more left, so time to fail
else
return false;
}
}
return false;
}
}

View File

@@ -14,9 +14,23 @@ public class FloodfillNetworkDatabaseFacade extends KademliaNetworkDatabaseFacad
public static final char CAPACITY_FLOODFILL = 'f';
private static final String PROP_FLOODFILL_PARTICIPANT = "router.floodfillParticipant";
private static final String DEFAULT_FLOODFILL_PARTICIPANT = "false";
private Map _activeFloodQueries;
public FloodfillNetworkDatabaseFacade(RouterContext context) {
super(context);
_activeFloodQueries = new HashMap();
_context.statManager().createRateStat("netDb.successTime", "How long a successful search takes", "NetworkDatabase", new long[] { 60*60*1000l, 24*60*60*1000l });
_context.statManager().createRateStat("netDb.failedTime", "How long a failed search takes", "NetworkDatabase", new long[] { 60*60*1000l, 24*60*60*1000l });
_context.statManager().createRateStat("netDb.failedAttemptedPeers", "How many peers we sent a search to when the search fails", "NetworkDatabase", new long[] { 60*1000l, 10*60*1000l });
_context.statManager().createRateStat("netDb.successPeers", "How many peers are contacted in a successful search", "NetworkDatabase", new long[] { 60*60*1000l, 24*60*60*1000l });
_context.statManager().createRateStat("netDb.failedPeers", "How many peers fail to respond to a lookup?", "NetworkDatabase", new long[] { 60*60*1000l, 24*60*60*1000l });
_context.statManager().createRateStat("netDb.searchCount", "Overall number of searches sent", "NetworkDatabase", new long[] { 5*60*1000l, 10*60*1000l, 60*60*1000l, 3*60*60*1000l, 24*60*60*1000l });
_context.statManager().createRateStat("netDb.searchMessageCount", "Overall number of mesages for all searches sent", "NetworkDatabase", new long[] { 5*60*1000l, 10*60*1000l, 60*60*1000l, 3*60*60*1000l, 24*60*60*1000l });
_context.statManager().createRateStat("netDb.searchReplyValidated", "How many search replies we get that we are able to validate (fetch)", "NetworkDatabase", new long[] { 5*60*1000l, 10*60*1000l, 60*60*1000l, 3*60*60*1000l, 24*60*60*1000l });
_context.statManager().createRateStat("netDb.searchReplyNotValidated", "How many search replies we get that we are NOT able to validate (fetch)", "NetworkDatabase", new long[] { 5*60*1000l, 10*60*1000l, 60*60*1000l, 3*60*60*1000l, 24*60*60*1000l });
_context.statManager().createRateStat("netDb.searchReplyValidationSkipped", "How many search replies we get from unreliable peers that we skip?", "NetworkDatabase", new long[] { 5*60*1000l, 10*60*1000l, 60*60*1000l, 3*60*60*1000l, 24*60*60*1000l });
_context.statManager().createRateStat("netDb.republishQuantity", "How many peers do we need to send a found leaseSet to?", "NetworkDatabase", new long[] { 10*60*1000l, 60*60*1000l, 3*60*60*1000l, 24*60*60*1000l });
}
protected void createHandlers() {
@@ -106,4 +120,306 @@ public class FloodfillNetworkDatabaseFacade extends KademliaNetworkDatabaseFacad
else
return false;
}
/**
* Begin a kademlia style search for the key specified, which can take up to timeoutMs and
* will fire the appropriate jobs on success or timeout (or if the kademlia search completes
* without any match)
*
*/
SearchJob search(Hash key, Job onFindJob, Job onFailedLookupJob, long timeoutMs, boolean isLease) {
//if (true) return super.search(key, onFindJob, onFailedLookupJob, timeoutMs, isLease);
if (key == null) throw new IllegalArgumentException("searchin for nothin, eh?");
boolean isNew = true;
FloodSearchJob searchJob = null;
synchronized (_activeFloodQueries) {
searchJob = (FloodSearchJob)_activeFloodQueries.get(key);
if (searchJob == null) {
if (SearchJob.onlyQueryFloodfillPeers(_context)) {
searchJob = new FloodOnlySearchJob(_context, this, key, onFindJob, onFailedLookupJob, (int)timeoutMs, isLease);
} else {
searchJob = new FloodSearchJob(_context, this, key, onFindJob, onFailedLookupJob, (int)timeoutMs, isLease);
}
_activeFloodQueries.put(key, searchJob);
isNew = true;
}
}
if (isNew) {
if (_log.shouldLog(Log.DEBUG))
_log.debug("this is the first search for that key, fire off the FloodSearchJob");
_context.jobQueue().addJob(searchJob);
} else {
if (_log.shouldLog(Log.INFO))
_log.info("Deferring flood search for " + key.toBase64() + " with " + onFindJob);
searchJob.addDeferred(onFindJob, onFailedLookupJob, timeoutMs, isLease);
_context.statManager().addRateData("netDb.lookupLeaseSetDeferred", 1, searchJob.getExpiration()-_context.clock().now());
}
return null;
}
/**
* Ok, the initial set of searches to the floodfill peers timed out, lets fall back on the
* wider kademlia-style searches
*/
void searchFull(Hash key, List onFind, List onFailed, long timeoutMs, boolean isLease) {
synchronized (_activeFloodQueries) { _activeFloodQueries.remove(key); }
Job find = null;
Job fail = null;
if (onFind != null) {
synchronized (onFind) {
if (onFind.size() > 0)
find = (Job)onFind.remove(0);
}
}
if (onFailed != null) {
synchronized (onFailed) {
if (onFailed.size() > 0)
fail = (Job)onFailed.remove(0);
}
}
SearchJob job = super.search(key, find, fail, timeoutMs, isLease);
if (job != null) {
if (_log.shouldLog(Log.INFO))
_log.info("Floodfill search timed out for " + key.toBase64() + ", falling back on normal search (#"
+ job.getJobId() + ") with " + timeoutMs + " remaining");
long expiration = timeoutMs + _context.clock().now();
List removed = null;
if (onFind != null) {
synchronized (onFind) {
removed = new ArrayList(onFind);
onFind.clear();
}
for (int i = 0; i < removed.size(); i++)
job.addDeferred((Job)removed.get(i), null, expiration, isLease);
removed = null;
}
if (onFailed != null) {
synchronized (onFailed) {
removed = new ArrayList(onFailed);
onFailed.clear();
}
for (int i = 0; i < removed.size(); i++)
job.addDeferred(null, (Job)removed.get(i), expiration, isLease);
removed = null;
}
}
}
void complete(Hash key) {
synchronized (_activeFloodQueries) { _activeFloodQueries.remove(key); }
}
/** list of the Hashes of currently known floodfill peers */
List getFloodfillPeers() {
FloodfillPeerSelector sel = (FloodfillPeerSelector)getPeerSelector();
return sel.selectFloodfillParticipants(getKBuckets());
}
}
/**
* Try sending a search to some floodfill peers, but if we don't get a successful
* match within half the allowed lookup time, give up and start querying through
* the normal (kademlia) channels. This should cut down on spurious lookups caused
* by simple delays in responses from floodfill peers
*
*/
class FloodSearchJob extends JobImpl {
private Log _log;
private FloodfillNetworkDatabaseFacade _facade;
private Hash _key;
private List _onFind;
private List _onFailed;
private long _expiration;
private int _timeoutMs;
private long _origExpiration;
private boolean _isLease;
private volatile int _lookupsRemaining;
private volatile boolean _dead;
public FloodSearchJob(RouterContext ctx, FloodfillNetworkDatabaseFacade facade, Hash key, Job onFind, Job onFailed, int timeoutMs, boolean isLease) {
super(ctx);
_log = ctx.logManager().getLog(FloodSearchJob.class);
_facade = facade;
_key = key;
_onFind = new ArrayList();
_onFind.add(onFind);
_onFailed = new ArrayList();
_onFailed.add(onFailed);
int timeout = -1;
timeout = timeoutMs / FLOOD_SEARCH_TIME_FACTOR;
if (timeout < timeoutMs)
timeout = timeoutMs;
_timeoutMs = timeout;
_expiration = timeout + ctx.clock().now();
_origExpiration = timeoutMs + ctx.clock().now();
_isLease = isLease;
_lookupsRemaining = 0;
_dead = false;
}
void addDeferred(Job onFind, Job onFailed, long timeoutMs, boolean isLease) {
if (_dead) {
getContext().jobQueue().addJob(onFailed);
} else {
if (onFind != null) synchronized (_onFind) { _onFind.add(onFind); }
if (onFailed != null) synchronized (_onFailed) { _onFailed.add(onFailed); }
}
}
public long getExpiration() { return _expiration; }
private static final int CONCURRENT_SEARCHES = 2;
private static final int FLOOD_SEARCH_TIME_FACTOR = 2;
private static final int FLOOD_SEARCH_TIME_MIN = 30*1000;
public void runJob() {
// pick some floodfill peers and send out the searches
List floodfillPeers = _facade.getFloodfillPeers();
FloodLookupSelector replySelector = new FloodLookupSelector(getContext(), this);
ReplyJob onReply = new FloodLookupMatchJob(getContext(), this);
Job onTimeout = new FloodLookupTimeoutJob(getContext(), this);
OutNetMessage out = getContext().messageRegistry().registerPending(replySelector, onReply, onTimeout, _timeoutMs);
for (int i = 0; _lookupsRemaining < CONCURRENT_SEARCHES && i < floodfillPeers.size(); i++) {
Hash peer = (Hash)floodfillPeers.get(i);
if (peer.equals(getContext().routerHash()))
continue;
DatabaseLookupMessage dlm = new DatabaseLookupMessage(getContext(), true);
TunnelInfo replyTunnel = getContext().tunnelManager().selectInboundTunnel();
TunnelInfo outTunnel = getContext().tunnelManager().selectOutboundTunnel();
if ( (replyTunnel == null) || (outTunnel == null) ) {
_dead = true;
List removed = null;
synchronized (_onFailed) {
removed = new ArrayList(_onFailed);
_onFailed.clear();
}
while (removed.size() > 0)
getContext().jobQueue().addJob((Job)removed.remove(0));
getContext().messageRegistry().unregisterPending(out);
return;
}
dlm.setFrom(replyTunnel.getPeer(0));
dlm.setMessageExpiration(getContext().clock().now()+10*1000);
dlm.setReplyTunnel(replyTunnel.getReceiveTunnelId(0));
dlm.setSearchKey(_key);
if (_log.shouldLog(Log.INFO))
_log.info(getJobId() + ": Floodfill search for " + _key.toBase64() + " to " + peer.toBase64());
getContext().tunnelDispatcher().dispatchOutbound(dlm, outTunnel.getSendTunnelId(0), peer);
_lookupsRemaining++;
}
if (_lookupsRemaining <= 0) {
if (_log.shouldLog(Log.INFO))
_log.info(getJobId() + ": Floodfill search for " + _key.toBase64() + " had no peers to send to");
// no floodfill peers, go to the normal ones
getContext().messageRegistry().unregisterPending(out);
_facade.searchFull(_key, _onFind, _onFailed, _timeoutMs*FLOOD_SEARCH_TIME_FACTOR, _isLease);
}
}
public String getName() { return "NetDb search (phase 1)"; }
Hash getKey() { return _key; }
void decrementRemaining() { _lookupsRemaining--; }
int getLookupsRemaining() { return _lookupsRemaining; }
void failed() {
if (_dead) return;
_dead = true;
int timeRemaining = (int)(_origExpiration - getContext().clock().now());
if (_log.shouldLog(Log.INFO))
_log.info(getJobId() + ": Floodfill search for " + _key.toBase64() + " failed with " + timeRemaining);
if (timeRemaining > 0) {
_facade.searchFull(_key, _onFind, _onFailed, timeRemaining, _isLease);
} else {
List removed = null;
synchronized (_onFailed) {
removed = new ArrayList(_onFailed);
_onFailed.clear();
}
while (removed.size() > 0)
getContext().jobQueue().addJob((Job)removed.remove(0));
}
}
void success() {
if (_dead) return;
if (_log.shouldLog(Log.INFO))
_log.info(getJobId() + ": Floodfill search for " + _key.toBase64() + " successful");
_dead = true;
_facade.complete(_key);
List removed = null;
synchronized (_onFind) {
removed = new ArrayList(_onFind);
_onFind.clear();
}
while (removed.size() > 0)
getContext().jobQueue().addJob((Job)removed.remove(0));
}
}
class FloodLookupTimeoutJob extends JobImpl {
private FloodSearchJob _search;
public FloodLookupTimeoutJob(RouterContext ctx, FloodSearchJob job) {
super(ctx);
_search = job;
}
public void runJob() {
_search.decrementRemaining();
if (_search.getLookupsRemaining() <= 0)
_search.failed();
}
public String getName() { return "NetDb search (phase 1) timeout"; }
}
class FloodLookupMatchJob extends JobImpl implements ReplyJob {
private Log _log;
private FloodSearchJob _search;
public FloodLookupMatchJob(RouterContext ctx, FloodSearchJob job) {
super(ctx);
_log = ctx.logManager().getLog(FloodLookupMatchJob.class);
_search = job;
}
public void runJob() {
if ( (getContext().netDb().lookupLeaseSetLocally(_search.getKey()) != null) ||
(getContext().netDb().lookupRouterInfoLocally(_search.getKey()) != null) ) {
_search.success();
} else {
int remaining = _search.getLookupsRemaining();
if (_log.shouldLog(Log.INFO))
_log.info(getJobId() + "/" + _search.getJobId() + ": got a reply looking for "
+ _search.getKey().toBase64() + ", with " + remaining + " outstanding searches");
// netDb reply pointing us at other people
if (remaining <= 0)
_search.failed();
}
}
public String getName() { return "NetDb search (phase 1) match"; }
public void setMessage(I2NPMessage message) {}
}
class FloodLookupSelector implements MessageSelector {
private RouterContext _context;
private FloodSearchJob _search;
public FloodLookupSelector(RouterContext ctx, FloodSearchJob search) {
_context = ctx;
_search = search;
}
public boolean continueMatching() { return _search.getLookupsRemaining() > 0; }
public long getExpiration() { return _search.getExpiration(); }
public boolean isMatch(I2NPMessage message) {
if (message == null) return false;
if (message instanceof DatabaseStoreMessage) {
DatabaseStoreMessage dsm = (DatabaseStoreMessage)message;
// is it worth making sure the reply came in on the right tunnel?
if (_search.getKey().equals(dsm.getKey())) {
_search.decrementRemaining();
return true;
}
} else if (message instanceof DatabaseSearchReplyMessage) {
DatabaseSearchReplyMessage dsrm = (DatabaseSearchReplyMessage)message;
if (_search.getKey().equals(dsrm.getSearchKey())) {
_search.decrementRemaining();
return true;
}
}
return false;
}
}

View File

@@ -74,16 +74,17 @@ class FloodfillPeerSelector extends PeerSelector {
return;
if (entry.equals(_context.routerHash()))
return;
if (_context.shitlist().isShitlisted(entry))
return;
// it isn't direct, so who cares if they're shitlisted
//if (_context.shitlist().isShitlisted(entry))
// return;
RouterInfo info = _context.netDb().lookupRouterInfoLocally(entry);
if (info == null)
return;
//if (info == null)
// return;
if (FloodfillNetworkDatabaseFacade.isFloodfill(info)) {
if (info != null && FloodfillNetworkDatabaseFacade.isFloodfill(info)) {
_floodfillMatches.add(entry);
} else {
if ( (_wanted > _matches) && (_key != null) ) {
if ( (!SearchJob.onlyQueryFloodfillPeers(_context)) && (_wanted > _matches) && (_key != null) ) {
BigInteger diff = getDistance(_key, entry);
_sorted.put(diff, entry);
}

View File

@@ -694,10 +694,10 @@ public class KademliaNetworkDatabaseFacade extends NetworkDatabaseFacade {
if (err != null)
throw new IllegalArgumentException("Invalid store attempt - " + err);
if (_log.shouldLog(Log.INFO))
_log.info("RouterInfo " + key.toBase64() + " is stored with "
+ routerInfo.getOptions().size() + " options on "
+ new Date(routerInfo.getPublished()));
if (_log.shouldLog(Log.DEBUG))
_log.debug("RouterInfo " + key.toBase64() + " is stored with "
+ routerInfo.getOptions().size() + " options on "
+ new Date(routerInfo.getPublished()));
_context.peerManager().setCapabilities(key, routerInfo.getCapabilities());
_ds.put(key, routerInfo);
@@ -799,8 +799,8 @@ public class KademliaNetworkDatabaseFacade extends NetworkDatabaseFacade {
* without any match)
*
*/
void search(Hash key, Job onFindJob, Job onFailedLookupJob, long timeoutMs, boolean isLease) {
if (!_initialized) return;
SearchJob search(Hash key, Job onFindJob, Job onFailedLookupJob, long timeoutMs, boolean isLease) {
if (!_initialized) return null;
boolean isNew = true;
SearchJob searchJob = null;
synchronized (_activeRequests) {
@@ -823,6 +823,7 @@ public class KademliaNetworkDatabaseFacade extends NetworkDatabaseFacade {
int deferred = searchJob.addDeferred(onFindJob, onFailedLookupJob, timeoutMs, isLease);
_context.statManager().addRateData("netDb.lookupLeaseSetDeferred", deferred, searchJob.getExpiration()-_context.clock().now());
}
return searchJob;
}
private Set getLeases() {
@@ -851,8 +852,8 @@ public class KademliaNetworkDatabaseFacade extends NetworkDatabaseFacade {
}
/** smallest allowed period */
private static final int MIN_PER_PEER_TIMEOUT = 3*1000;
private static final int MAX_PER_PEER_TIMEOUT = 5*1000;
private static final int MIN_PER_PEER_TIMEOUT = 5*1000;
private static final int MAX_PER_PEER_TIMEOUT = 10*1000;
public int getPeerTimeout(Hash peer) {
PeerProfile prof = _context.profileOrganizer().getProfile(peer);

View File

@@ -51,7 +51,7 @@ public class RepublishLeaseSetJob extends JobImpl {
_log.warn("Not publishing a LOCAL lease that isn't current - " + _dest, new Exception("Publish expired LOCAL lease?"));
} else {
getContext().statManager().addRateData("netDb.republishLeaseSetCount", 1, 0);
_facade.sendStore(_dest, ls, new OnSuccess(getContext()), new OnFailure(getContext()), REPUBLISH_LEASESET_TIMEOUT, null);
_facade.sendStore(_dest, ls, new OnRepublishSuccess(getContext()), new OnRepublishFailure(getContext(), this), REPUBLISH_LEASESET_TIMEOUT, null);
//getContext().jobQueue().addJob(new StoreJob(getContext(), _facade, _dest, ls, new OnSuccess(getContext()), new OnFailure(getContext()), REPUBLISH_LEASESET_TIMEOUT));
}
} else {
@@ -76,21 +76,28 @@ public class RepublishLeaseSetJob extends JobImpl {
}
}
private class OnSuccess extends JobImpl {
public OnSuccess(RouterContext ctx) { super(ctx); }
public String getName() { return "Publish leaseSet successful"; }
public void runJob() {
if (_log.shouldLog(Log.DEBUG))
_log.debug("successful publishing of the leaseSet for " + _dest.toBase64());
}
}
private class OnFailure extends JobImpl {
public OnFailure(RouterContext ctx) { super(ctx); }
public String getName() { return "Publish leaseSet failed"; }
public void runJob() {
if (_log.shouldLog(Log.WARN))
_log.warn("FAILED publishing of the leaseSet for " + _dest.toBase64());
RepublishLeaseSetJob.this.requeue(getContext().random().nextInt(60*1000));
}
void requeueRepublish() {
if (_log.shouldLog(Log.WARN))
_log.warn("FAILED publishing of the leaseSet for " + _dest.toBase64());
requeue(getContext().random().nextInt(60*1000));
}
}
class OnRepublishSuccess extends JobImpl {
public OnRepublishSuccess(RouterContext ctx) { super(ctx); }
public String getName() { return "Publish leaseSet successful"; }
public void runJob() {
//if (_log.shouldLog(Log.DEBUG))
// _log.debug("successful publishing of the leaseSet for " + _dest.toBase64());
}
}
class OnRepublishFailure extends JobImpl {
private RepublishLeaseSetJob _job;
public OnRepublishFailure(RouterContext ctx, RepublishLeaseSetJob job) {
super(ctx);
_job = job;
}
public String getName() { return "Publish leaseSet failed"; }
public void runJob() { _job.requeueRepublish(); }
}

View File

@@ -51,6 +51,8 @@ class SearchJob extends JobImpl {
private List _deferredSearches;
private boolean _deferredCleared;
private long _startedOn;
private boolean _floodfillPeersExhausted;
private int _floodfillSearchesOutstanding;
private static final int SEARCH_BREDTH = 3; // 10 peers at a time
private static final int SEARCH_PRIORITY = 400; // large because the search is probably for a real search
@@ -98,18 +100,9 @@ class SearchJob extends JobImpl {
_deferredCleared = false;
_peerSelector = facade.getPeerSelector();
_startedOn = -1;
_floodfillPeersExhausted = false;
_floodfillSearchesOutstanding = 0;
_expiration = getContext().clock().now() + timeoutMs;
getContext().statManager().createRateStat("netDb.successTime", "How long a successful search takes", "NetworkDatabase", new long[] { 60*60*1000l, 24*60*60*1000l });
getContext().statManager().createRateStat("netDb.failedTime", "How long a failed search takes", "NetworkDatabase", new long[] { 60*60*1000l, 24*60*60*1000l });
getContext().statManager().createRateStat("netDb.successPeers", "How many peers are contacted in a successful search", "NetworkDatabase", new long[] { 60*60*1000l, 24*60*60*1000l });
getContext().statManager().createRateStat("netDb.failedPeers", "How many peers fail to respond to a lookup?", "NetworkDatabase", new long[] { 60*60*1000l, 24*60*60*1000l });
getContext().statManager().createRateStat("netDb.searchCount", "Overall number of searches sent", "NetworkDatabase", new long[] { 5*60*1000l, 10*60*1000l, 60*60*1000l, 3*60*60*1000l, 24*60*60*1000l });
getContext().statManager().createRateStat("netDb.searchMessageCount", "Overall number of mesages for all searches sent", "NetworkDatabase", new long[] { 5*60*1000l, 10*60*1000l, 60*60*1000l, 3*60*60*1000l, 24*60*60*1000l });
getContext().statManager().createRateStat("netDb.searchReplyValidated", "How many search replies we get that we are able to validate (fetch)", "NetworkDatabase", new long[] { 5*60*1000l, 10*60*1000l, 60*60*1000l, 3*60*60*1000l, 24*60*60*1000l });
getContext().statManager().createRateStat("netDb.searchReplyNotValidated", "How many search replies we get that we are NOT able to validate (fetch)", "NetworkDatabase", new long[] { 5*60*1000l, 10*60*1000l, 60*60*1000l, 3*60*60*1000l, 24*60*60*1000l });
getContext().statManager().createRateStat("netDb.searchReplyValidationSkipped", "How many search replies we get from unreliable peers that we skip?", "NetworkDatabase", new long[] { 5*60*1000l, 10*60*1000l, 60*60*1000l, 3*60*60*1000l, 24*60*60*1000l });
getContext().statManager().createRateStat("netDb.republishQuantity", "How many peers do we need to send a found leaseSet to?", "NetworkDatabase", new long[] { 10*60*1000l, 60*60*1000l, 3*60*60*1000l, 24*60*60*1000l });
getContext().statManager().addRateData("netDb.searchCount", 1, 0);
if (_log.shouldLog(Log.DEBUG))
_log.debug("Search (" + getClass().getName() + " for " + key.toBase64(), new Exception("Search enqueued by"));
@@ -128,11 +121,48 @@ class SearchJob extends JobImpl {
public long getExpiration() { return _expiration; }
public long getTimeoutMs() { return _timeoutMs; }
private static final boolean DEFAULT_FLOODFILL_ONLY = false;
static boolean onlyQueryFloodfillPeers(RouterContext ctx) {
if (isCongested(ctx))
return true;
return Boolean.valueOf(ctx.getProperty("netDb.floodfillOnly", DEFAULT_FLOODFILL_ONLY + "")).booleanValue();
}
static boolean isCongested(RouterContext ctx) {
float availableSend = ctx.bandwidthLimiter().getOutboundKBytesPerSecond()*1024 - ctx.bandwidthLimiter().getSendBps();
float availableRecv = ctx.bandwidthLimiter().getInboundKBytesPerSecond()*1024 - ctx.bandwidthLimiter().getReceiveBps();
// 6KBps is an arbitrary limit, but a wider search should be able to operate
// in that range without a problem
return ( (availableSend < 6*1024) || (availableRecv < 6*1024) );
}
static final int PER_FLOODFILL_PEER_TIMEOUT = 10*1000;
protected int getPerPeerTimeoutMs(Hash peer) {
int timeout = 0;
if (_floodfillPeersExhausted && _floodfillSearchesOutstanding <= 0)
timeout = _facade.getPeerTimeout(peer);
else
timeout = PER_FLOODFILL_PEER_TIMEOUT;
long now = getContext().clock().now();
if (now + timeout > _expiration)
return (int)(_expiration - now);
else
return timeout;
}
/**
* Let each peer take up to the average successful search RTT
*
*/
protected int getPerPeerTimeoutMs() {
if (_floodfillPeersExhausted && _floodfillSearchesOutstanding <= 0)
return PER_PEER_TIMEOUT;
else
return PER_FLOODFILL_PEER_TIMEOUT;
/*
if (true)
return PER_PEER_TIMEOUT;
int rv = -1;
@@ -145,8 +175,11 @@ class SearchJob extends JobImpl {
return PER_PEER_TIMEOUT;
else
return rv + 1025; // tunnel delay
*/
}
private static int MAX_PEERS_QUERIED = 40;
/**
* Send the next search, or stop if its completed
*/
@@ -168,6 +201,11 @@ class SearchJob extends JobImpl {
_log.info(getJobId() + ": Key search expired");
_state.complete(true);
fail();
} else if (_state.getAttempted().size() > MAX_PEERS_QUERIED) {
if (_log.shouldLog(Log.INFO))
_log.info(getJobId() + ": Too many peers quried");
_state.complete(true);
fail();
} else {
//_log.debug("Continuing search");
continueSearch();
@@ -211,6 +249,14 @@ class SearchJob extends JobImpl {
int sent = 0;
Set attempted = _state.getAttempted();
while (sent <= 0) {
boolean onlyFloodfill = onlyQueryFloodfillPeers(getContext());
if (_floodfillPeersExhausted && onlyFloodfill && _state.getPending().size() <= 0) {
if (_log.shouldLog(Log.WARN))
_log.warn(getJobId() + ": no non-floodfill peers left, and no more pending. Searched: "
+ _state.getAttempted().size() + " failed: " + _state.getFailed().size());
fail();
return;
}
List closestHashes = getClosestRouters(_state.getTarget(), toCheck, attempted);
if ( (closestHashes == null) || (closestHashes.size() <= 0) ) {
if (_state.getPending().size() <= 0) {
@@ -243,8 +289,14 @@ class SearchJob extends JobImpl {
+ peer + " : " + (ds == null ? "null" : ds.getClass().getName()));
_state.replyTimeout(peer);
} else {
if (((RouterInfo)ds).isHidden() ||
getContext().shitlist().isShitlisted(peer)) {
RouterInfo ri = (RouterInfo)ds;
if (!FloodfillNetworkDatabaseFacade.isFloodfill(ri)) {
_floodfillPeersExhausted = true;
if (onlyFloodfill)
continue;
}
if (ri.isHidden()) {// || // allow querying shitlisted, since its indirect
//getContext().shitlist().isShitlisted(peer)) {
// dont bother
} else {
_state.addPending(peer);
@@ -319,12 +371,13 @@ class SearchJob extends JobImpl {
} else {
if (_log.shouldLog(Log.INFO))
_log.info(getJobId() + ": Send search to " + router.getIdentity().getHash().toBase64()
+ " for " + _state.getTarget().toBase64());
+ " for " + _state.getTarget().toBase64()
+ " w/ timeout " + getPerPeerTimeoutMs(router.getIdentity().calculateHash()));
}
getContext().statManager().addRateData("netDb.searchMessageCount", 1, 0);
if (_isLease || false) // moo
if (_isLease || true) // always send searches out tunnels
sendLeaseSearch(router);
else
sendRouterSearch(router);
@@ -355,7 +408,7 @@ class SearchJob extends JobImpl {
// return;
//}
int timeout = _facade.getPeerTimeout(router.getIdentity().getHash());
int timeout = getPerPeerTimeoutMs(router.getIdentity().getHash());
long expiration = getContext().clock().now() + timeout;
DatabaseLookupMessage msg = buildMessage(inTunnelId, inTunnel.getPeer(0), expiration);
@@ -379,6 +432,8 @@ class SearchJob extends JobImpl {
SearchUpdateReplyFoundJob reply = new SearchUpdateReplyFoundJob(getContext(), router, _state, _facade,
this, outTunnel, inTunnel);
if (FloodfillNetworkDatabaseFacade.isFloodfill(router))
_floodfillSearchesOutstanding++;
getContext().messageRegistry().registerPending(sel, reply, new FailedJob(getContext(), router), timeout);
getContext().tunnelDispatcher().dispatchOutbound(msg, outTunnelId, router.getIdentity().getHash());
}
@@ -398,6 +453,8 @@ class SearchJob extends JobImpl {
SearchUpdateReplyFoundJob reply = new SearchUpdateReplyFoundJob(getContext(), router, _state, _facade, this);
SendMessageDirectJob j = new SendMessageDirectJob(getContext(), msg, router.getIdentity().getHash(),
reply, new FailedJob(getContext(), router), sel, timeout, SEARCH_PRIORITY);
if (FloodfillNetworkDatabaseFacade.isFloodfill(router))
_floodfillSearchesOutstanding++;
j.runJob();
//getContext().jobQueue().addJob(j);
}
@@ -456,7 +513,7 @@ class SearchJob extends JobImpl {
void replyFound(DatabaseSearchReplyMessage message, Hash peer) {
long duration = _state.replyFound(peer);
// this processing can take a while, so split 'er up
getContext().jobQueue().addJob(new SearchReplyJob(getContext(), (DatabaseSearchReplyMessage)message, peer, duration));
getContext().jobQueue().addJob(new SearchReplyJob(getContext(), this, (DatabaseSearchReplyMessage)message, peer, duration));
}
/**
@@ -468,132 +525,6 @@ class SearchJob extends JobImpl {
// noop
}
private final class SearchReplyJob extends JobImpl {
private DatabaseSearchReplyMessage _msg;
/**
* Peer who we think sent us the reply. Note: could be spoofed! If the
* attacker knew we were searching for a particular key from a
* particular peer, they could send us some searchReply messages with
* shitty values, trying to get us to consider that peer unreliable.
* Potential fixes include either authenticated 'from' address or use a
* nonce in the search + searchReply (and check for it in the selector).
*
*/
private Hash _peer;
private int _curIndex;
private int _invalidPeers;
private int _seenPeers;
private int _newPeers;
private int _duplicatePeers;
private int _repliesPendingVerification;
private long _duration;
public SearchReplyJob(RouterContext enclosingContext, DatabaseSearchReplyMessage message, Hash peer, long duration) {
super(enclosingContext);
_msg = message;
_peer = peer;
_curIndex = 0;
_invalidPeers = 0;
_seenPeers = 0;
_newPeers = 0;
_duplicatePeers = 0;
_repliesPendingVerification = 0;
}
public String getName() { return "Process Reply for Kademlia Search"; }
public void runJob() {
if (_curIndex >= _msg.getNumReplies()) {
if (_repliesPendingVerification > 0) {
// we received new references from the peer, but still
// haven't verified all of them, so lets give it more time
SearchReplyJob.this.requeue(_timeoutMs);
} else {
// either they didn't tell us anything new or we have verified
// (or failed to verify) all of them. we're done
getContext().profileManager().dbLookupReply(_peer, _newPeers, _seenPeers,
_invalidPeers, _duplicatePeers, _duration);
if (_newPeers > 0)
newPeersFound(_newPeers);
}
} else {
Hash peer = _msg.getReply(_curIndex);
boolean shouldAdd = false;
RouterInfo info = getContext().netDb().lookupRouterInfoLocally(peer);
if (info == null) {
// if the peer is giving us lots of bad peer references,
// dont try to fetch them.
boolean sendsBadInfo = getContext().profileOrganizer().peerSendsBadReplies(_peer);
if (!sendsBadInfo) {
// we don't need to search for everthing we're given here - only ones that
// are next in our search path...
if (getContext().shitlist().isShitlisted(peer)) {
if (_log.shouldLog(Log.INFO))
_log.info("Not looking for a shitlisted peer...");
getContext().statManager().addRateData("netDb.searchReplyValidationSkipped", 1, 0);
} else {
//getContext().netDb().lookupRouterInfo(peer, new ReplyVerifiedJob(getContext(), peer), new ReplyNotVerifiedJob(getContext(), peer), _timeoutMs);
//_repliesPendingVerification++;
shouldAdd = true;
}
} else {
if (_log.shouldLog(Log.INFO))
_log.info("Peer " + _peer.toBase64() + " sends us bad replies, so not verifying " + peer.toBase64());
getContext().statManager().addRateData("netDb.searchReplyValidationSkipped", 1, 0);
}
}
if (_state.wasAttempted(peer)) {
_duplicatePeers++;
}
if (_log.shouldLog(Log.DEBUG))
_log.debug(getJobId() + ": dbSearchReply received on search referencing router "
+ peer);
if (shouldAdd) {
if (_facade.getKBuckets().add(peer))
_newPeers++;
else
_seenPeers++;
}
_curIndex++;
requeue(0);
}
}
/** the peer gave us a reference to a new router, and we were able to fetch it */
private final class ReplyVerifiedJob extends JobImpl {
private Hash _key;
public ReplyVerifiedJob(RouterContext enclosingContext, Hash key) {
super(enclosingContext);
_key = key;
}
public String getName() { return "Search reply value verified"; }
public void runJob() {
if (_log.shouldLog(Log.INFO))
_log.info("Peer reply from " + _peer.toBase64() + " verified: " + _key.toBase64());
_repliesPendingVerification--;
getContext().statManager().addRateData("netDb.searchReplyValidated", 1, 0);
}
}
/** the peer gave us a reference to a new router, and we were NOT able to fetch it */
private final class ReplyNotVerifiedJob extends JobImpl {
private Hash _key;
public ReplyNotVerifiedJob(RouterContext enclosingContext, Hash key) {
super(enclosingContext);
_key = key;
}
public String getName() { return "Search reply value NOT verified"; }
public void runJob() {
if (_log.shouldLog(Log.INFO))
_log.info("Peer reply from " + _peer.toBase64() + " failed verification: " + _key.toBase64());
_repliesPendingVerification--;
_invalidPeers++;
getContext().statManager().addRateData("netDb.searchReplyNotValidated", 1, 0);
}
}
}
/**
* Called when a particular peer failed to respond before the timeout was
* reached, or if the peer could not be contacted at all.
@@ -601,6 +532,7 @@ class SearchJob extends JobImpl {
*/
protected class FailedJob extends JobImpl {
private Hash _peer;
private boolean _isFloodfill;
private boolean _penalizePeer;
private long _sentOn;
public FailedJob(RouterContext enclosingContext, RouterInfo peer) {
@@ -616,8 +548,11 @@ class SearchJob extends JobImpl {
_penalizePeer = penalizePeer;
_peer = peer.getIdentity().getHash();
_sentOn = enclosingContext.clock().now();
_isFloodfill = FloodfillNetworkDatabaseFacade.isFloodfill(peer);
}
public void runJob() {
if (_isFloodfill)
_floodfillSearchesOutstanding--;
if (_state.completed()) return;
_state.replyTimeout(_peer);
if (_penalizePeer) {
@@ -748,8 +683,11 @@ class SearchJob extends JobImpl {
if (_log.shouldLog(Log.DEBUG))
_log.debug(getJobId() + ": State of failed search: " + _state);
long time = getContext().clock().now() - _state.getWhenStarted();
int attempted = _state.getAttempted().size();
getContext().statManager().addRateData("netDb.failedAttemptedPeers", attempted, time);
if (_keepStats) {
long time = getContext().clock().now() - _state.getWhenStarted();
getContext().statManager().addRateData("netDb.failedTime", time, 0);
//_facade.fail(_state.getTarget());
}
@@ -833,4 +771,145 @@ class SearchJob extends JobImpl {
return super.toString() + " started "
+ DataHelper.formatDuration((getContext().clock().now() - _startedOn)) + " ago";
}
boolean wasAttempted(Hash peer) { return _state.wasAttempted(peer); }
long timeoutMs() { return _timeoutMs; }
boolean add(Hash peer) { return _facade.getKBuckets().add(peer); }
void decrementOutstandingFloodfillSearches() { _floodfillSearchesOutstanding--; }
}
class SearchReplyJob extends JobImpl {
private DatabaseSearchReplyMessage _msg;
private Log _log;
/**
* Peer who we think sent us the reply. Note: could be spoofed! If the
* attacker knew we were searching for a particular key from a
* particular peer, they could send us some searchReply messages with
* shitty values, trying to get us to consider that peer unreliable.
* Potential fixes include either authenticated 'from' address or use a
* nonce in the search + searchReply (and check for it in the selector).
*
*/
private Hash _peer;
private int _curIndex;
private int _invalidPeers;
private int _seenPeers;
private int _newPeers;
private int _duplicatePeers;
private int _repliesPendingVerification;
private long _duration;
private SearchJob _searchJob;
public SearchReplyJob(RouterContext enclosingContext, SearchJob job, DatabaseSearchReplyMessage message, Hash peer, long duration) {
super(enclosingContext);
_log = enclosingContext.logManager().getLog(getClass());
_searchJob = job;
_msg = message;
_peer = peer;
_curIndex = 0;
_invalidPeers = 0;
_seenPeers = 0;
_newPeers = 0;
_duplicatePeers = 0;
_repliesPendingVerification = 0;
}
public String getName() { return "Process Reply for Kademlia Search"; }
public void runJob() {
if (_curIndex >= _msg.getNumReplies()) {
if (_repliesPendingVerification > 0) {
// we received new references from the peer, but still
// haven't verified all of them, so lets give it more time
requeue(_searchJob.timeoutMs());
} else {
// either they didn't tell us anything new or we have verified
// (or failed to verify) all of them. we're done
getContext().profileManager().dbLookupReply(_peer, _newPeers, _seenPeers,
_invalidPeers, _duplicatePeers, _duration);
if (_newPeers > 0)
_searchJob.newPeersFound(_newPeers);
}
} else {
Hash peer = _msg.getReply(_curIndex);
boolean shouldAdd = false;
RouterInfo info = getContext().netDb().lookupRouterInfoLocally(peer);
if (info == null) {
// if the peer is giving us lots of bad peer references,
// dont try to fetch them.
boolean sendsBadInfo = getContext().profileOrganizer().peerSendsBadReplies(_peer);
if (!sendsBadInfo) {
// we don't need to search for everthing we're given here - only ones that
// are next in our search path...
if (getContext().shitlist().isShitlisted(peer)) {
if (_log.shouldLog(Log.INFO))
_log.info("Not looking for a shitlisted peer...");
getContext().statManager().addRateData("netDb.searchReplyValidationSkipped", 1, 0);
} else {
//getContext().netDb().lookupRouterInfo(peer, new ReplyVerifiedJob(getContext(), peer), new ReplyNotVerifiedJob(getContext(), peer), _timeoutMs);
//_repliesPendingVerification++;
shouldAdd = true;
}
} else {
if (_log.shouldLog(Log.INFO))
_log.info("Peer " + _peer.toBase64() + " sends us bad replies, so not verifying " + peer.toBase64());
getContext().statManager().addRateData("netDb.searchReplyValidationSkipped", 1, 0);
}
}
if (_searchJob.wasAttempted(peer)) {
_duplicatePeers++;
}
if (_log.shouldLog(Log.DEBUG))
_log.debug(getJobId() + ": dbSearchReply received on search referencing router " + peer);
if (shouldAdd) {
if (_searchJob.add(peer))
_newPeers++;
else
_seenPeers++;
}
_curIndex++;
requeue(0);
}
}
void replyVerified() {
if (_log.shouldLog(Log.INFO))
_log.info("Peer reply from " + _peer.toBase64());
_repliesPendingVerification--;
getContext().statManager().addRateData("netDb.searchReplyValidated", 1, 0);
}
void replyNotVerified() {
if (_log.shouldLog(Log.INFO))
_log.info("Peer reply from " + _peer.toBase64());
_repliesPendingVerification--;
_invalidPeers++;
getContext().statManager().addRateData("netDb.searchReplyNotValidated", 1, 0);
}
}
/** the peer gave us a reference to a new router, and we were able to fetch it */
class ReplyVerifiedJob extends JobImpl {
private Hash _key;
private SearchReplyJob _replyJob;
public ReplyVerifiedJob(RouterContext enclosingContext, SearchReplyJob srj, Hash key) {
super(enclosingContext);
_replyJob = srj;
_key = key;
}
public String getName() { return "Search reply value verified"; }
public void runJob() { _replyJob.replyVerified(); }
}
/** the peer gave us a reference to a new router, and we were NOT able to fetch it */
class ReplyNotVerifiedJob extends JobImpl {
private Hash _key;
private SearchReplyJob _replyJob;
public ReplyNotVerifiedJob(RouterContext enclosingContext, SearchReplyJob srj, Hash key) {
super(enclosingContext);
_key = key;
_replyJob = srj;
}
public String getName() { return "Search reply value NOT verified"; }
public void runJob() { _replyJob.replyNotVerified(); }
}

View File

@@ -26,6 +26,7 @@ class SearchUpdateReplyFoundJob extends JobImpl implements ReplyJob {
private SearchJob _job;
private TunnelInfo _outTunnel;
private TunnelInfo _replyTunnel;
private boolean _isFloodfillPeer;
private long _sentOn;
public SearchUpdateReplyFoundJob(RouterContext context, RouterInfo peer,
@@ -39,6 +40,7 @@ class SearchUpdateReplyFoundJob extends JobImpl implements ReplyJob {
super(context);
_log = context.logManager().getLog(SearchUpdateReplyFoundJob.class);
_peer = peer.getIdentity().getHash();
_isFloodfillPeer = FloodfillNetworkDatabaseFacade.isFloodfill(peer);
_state = state;
_facade = facade;
_job = job;
@@ -49,6 +51,9 @@ class SearchUpdateReplyFoundJob extends JobImpl implements ReplyJob {
public String getName() { return "Update Reply Found for Kademlia Search"; }
public void runJob() {
if (_isFloodfillPeer)
_job.decrementOutstandingFloodfillSearches();
I2NPMessage message = _message;
if (_log.shouldLog(Log.INFO))
_log.info(getJobId() + ": Reply from " + _peer.toBase64()

View File

@@ -29,7 +29,7 @@ class StartExplorersJob extends JobImpl {
/** don't explore more than 1 bucket at a time */
private static final int MAX_PER_RUN = 1;
/** dont explore the network more often than once every minute */
private static final int MIN_RERUN_DELAY_MS = 60*1000;
private static final int MIN_RERUN_DELAY_MS = 5*60*1000;
/** explore the network at least once every thirty minutes */
private static final int MAX_RERUN_DELAY_MS = 30*60*1000;

View File

@@ -92,6 +92,8 @@ class StoreJob extends JobImpl {
private boolean isExpired() {
return getContext().clock().now() >= _expiration;
}
private static final int MAX_PEERS_SENT = 10;
/**
* send the key to the next batch of peers
@@ -105,6 +107,9 @@ class StoreJob extends JobImpl {
if (isExpired()) {
_state.complete(true);
fail();
} else if (_state.getAttempted().size() > MAX_PEERS_SENT) {
_state.complete(true);
fail();
} else {
//if (_log.shouldLog(Log.INFO))
// _log.info(getJobId() + ": Sending: " + _state);

View File

@@ -20,35 +20,29 @@ class PersistProfilesJob extends JobImpl {
public String getName() { return "Persist profiles"; }
public void runJob() {
Set peers = _mgr.selectPeers();
Hash hashes[] = new Hash[peers.size()];
int i = 0;
for (Iterator iter = peers.iterator(); iter.hasNext(); )
hashes[i] = (Hash)iter.next();
getContext().jobQueue().addJob(new PersistProfileJob(getContext(), hashes));
}
private class PersistProfileJob extends JobImpl {
private Hash _peers[];
private int _cur;
public PersistProfileJob(RouterContext enclosingContext, Hash peers[]) {
super(enclosingContext);
_peers = peers;
_cur = 0;
}
public void runJob() {
if (_cur < _peers.length) {
_mgr.storeProfile(_peers[_cur]);
_cur++;
}
if (_cur >= _peers.length) {
// no more left, requeue up the main persist-em-all job
PersistProfilesJob.this.getTiming().setStartAfter(getContext().clock().now() + PERSIST_DELAY);
PersistProfilesJob.this.getContext().jobQueue().addJob(PersistProfilesJob.this);
} else {
// we've got peers left to persist, so requeue the persist profile job
PersistProfilesJob.PersistProfileJob.this.requeue(1000);
}
}
public String getName() { return "Persist profile"; }
getContext().jobQueue().addJob(new PersistProfileJob(getContext(), this, peers));
}
void persist(Hash peer) { _mgr.storeProfile(peer); }
void requeue() { requeue(PERSIST_DELAY); }
}
class PersistProfileJob extends JobImpl {
private PersistProfilesJob _job;
private Iterator _peers;
public PersistProfileJob(RouterContext enclosingContext, PersistProfilesJob job, Set peers) {
super(enclosingContext);
_peers = peers.iterator();
_job = job;
}
public void runJob() {
if (_peers.hasNext())
_job.persist((Hash)_peers.next());
if (_peers.hasNext()) {
requeue(1000);
} else {
// no more left, requeue up the main persist-em-all job
_job.requeue();
}
}
public String getName() { return "Persist profile"; }
}

View File

@@ -251,10 +251,12 @@ public class ProfileOrganizer {
// we only use selectHighCapacityPeers when we are selecting for PURPOSE_TEST
// or we are falling back due to _fastPeers being too small, so we can always
// exclude the fast peers
/*
if (exclude == null)
exclude = new HashSet(_fastPeers.keySet());
else
exclude.addAll(_fastPeers.keySet());
*/
locked_selectPeers(_highCapacityPeers, howMany, exclude, matches);
}
if (matches.size() < howMany) {
@@ -482,9 +484,10 @@ public class ProfileOrganizer {
placeTime = System.currentTimeMillis()-placeStart;
if (_log.shouldLog(Log.DEBUG)) {
_log.debug("Profiles reorganized. averages: [integration: " + _thresholdIntegrationValue
if (_log.shouldLog(Log.INFO))
_log.info("Profiles reorganized. averages: [integration: " + _thresholdIntegrationValue
+ ", capacity: " + _thresholdCapacityValue + ", speed: " + _thresholdSpeedValue + "]");
if (_log.shouldLog(Log.DEBUG)) {
StringBuffer buf = new StringBuffer(512);
for (Iterator iter = _strictCapacityOrder.iterator(); iter.hasNext(); ) {
PeerProfile prof = (PeerProfile)iter.next();
@@ -522,7 +525,7 @@ public class ProfileOrganizer {
if ( (!_fastPeers.containsKey(cur.getPeer())) && (!cur.getIsFailing()) ) {
if (!isSelectable(cur.getPeer())) {
// skip peers we dont have in the netDb
if (_log.shouldLog(Log.INFO))
if (_log.shouldLog(Log.INFO))
_log.info("skip unknown peer from fast promotion: " + cur.getPeer().toBase64());
continue;
}
@@ -611,8 +614,9 @@ public class ProfileOrganizer {
continue;
// dont bother trying to make sense of things below the baseline
if (profile.getCapacityValue() <= CapacityCalculator.GROWTH_FACTOR)
continue;
// otoh, keep them in the threshold calculation, so we can adapt
////if (profile.getCapacityValue() <= CapacityCalculator.GROWTH_FACTOR)
//// continue;
totalCapacity += profile.getCapacityValue();
totalIntegration += profile.getIntegrationValue();
@@ -680,6 +684,12 @@ public class ProfileOrganizer {
+ "], but there aren't enough of them " + numExceedingMean);
_thresholdCapacityValue = Math.max(thresholdAtMinHighCap, thresholdAtLowest);
}
// the base growth factor is the value we give to new routers that we don't
// know anything about. dont go under that limit unless you want to expose
// the selection to simple ident flooding attacks
if (_thresholdCapacityValue <= CapacityCalculator.GROWTH_FACTOR)
_thresholdCapacityValue = CapacityCalculator.GROWTH_FACTOR + 0.0001;
}
/**
@@ -801,6 +811,8 @@ public class ProfileOrganizer {
} else {
if (_log.shouldLog(Log.INFO))
_log.info("Peer " + peer.toBase64() + " is locally known, allowing its use");
// perhaps check to see if they are active?
return true;
}
} else {

View File

@@ -54,12 +54,8 @@ public class CreateRouterInfoJob extends JobImpl {
info.setAddresses(getContext().commSystem().createAddresses());
Properties stats = getContext().statPublisher().publishStatistics();
stats.setProperty(RouterInfo.PROP_NETWORK_ID, Router.NETWORK_ID+"");
getContext().router().addReachabilityCapability(info);
getContext().router().addCapabilities(info);
info.setOptions(stats);
if (FloodfillNetworkDatabaseFacade.floodfillEnabled(getContext()))
info.addCapability(FloodfillNetworkDatabaseFacade.CAPACITY_FLOODFILL);
if ("true".equalsIgnoreCase(getContext().getProperty(Router.PROP_HIDDEN, "false")))
info.addCapability(RouterInfo.CAPABILITY_HIDDEN);
info.setPeers(new HashSet());
info.setPublished(getCurrentPublishDate(getContext()));
RouterIdentity ident = new RouterIdentity();

View File

@@ -128,14 +128,7 @@ public class RebuildRouterInfoJob extends JobImpl {
Properties stats = getContext().statPublisher().publishStatistics();
stats.setProperty(RouterInfo.PROP_NETWORK_ID, ""+Router.NETWORK_ID);
info.setOptions(stats);
if (FloodfillNetworkDatabaseFacade.floodfillEnabled(getContext()))
info.addCapability(FloodfillNetworkDatabaseFacade.CAPACITY_FLOODFILL);
// Set caps=H for hidden mode routers
if ("true".equalsIgnoreCase(getContext().getProperty(Router.PROP_HIDDEN, "false")))
info.addCapability(RouterInfo.CAPABILITY_HIDDEN);
getContext().router().addReachabilityCapability(info);
getContext().router().addCapabilities(info);
// info.setPeers(new HashSet()); // this would have the trusted peers
info.setPublished(CreateRouterInfoJob.getCurrentPublishDate(getContext()));

View File

@@ -54,6 +54,7 @@ public class CommSystemFacadeImpl extends CommSystemFacade {
}
public int countActivePeers() { return (_manager == null ? 0 : _manager.countActivePeers()); }
public int countActiveSendPeers() { return (_manager == null ? 0 : _manager.countActiveSendPeers()); }
public List getBids(OutNetMessage msg) {
return _manager.getBids(msg);

View File

@@ -47,7 +47,6 @@ public class FIFOBandwidthLimiter {
private long _lastTotalSent;
private long _lastTotalReceived;
private long _lastStatsUpdated;
private long _lastRateUpdated;
private float _sendBps;
private float _recvBps;
@@ -65,8 +64,8 @@ public class FIFOBandwidthLimiter {
_context.statManager().createRateStat("bwLimiter.pendingInboundRequests", "How many inbound requests are ahead of the current one (ignoring ones with 0)?", "BandwidthLimiter", new long[] { 60*1000l, 5*60*1000l, 10*60*1000l, 60*60*1000l });
_context.statManager().createRateStat("bwLimiter.outboundDelayedTime", "How long it takes to honor an outbound request (ignoring ones with that go instantly)?", "BandwidthLimiter", new long[] { 60*1000l, 5*60*1000l, 10*60*1000l, 60*60*1000l });
_context.statManager().createRateStat("bwLimiter.inboundDelayedTime", "How long it takes to honor an inbound request (ignoring ones with that go instantly)?", "BandwidthLimiter", new long[] { 60*1000l, 5*60*1000l, 10*60*1000l, 60*60*1000l });
_context.statManager().createRateStat("bw.sendRate", "Low level bandwidth send rate, averaged every minute", "BandwidthLimiter", new long[] { 60*1000l, 5*60*1000l, 10*60*1000l, 60*60*1000l });
_context.statManager().createRateStat("bw.recvRate", "Low level bandwidth receive rate, averaged every minute", "BandwidthLimiter", new long[] { 60*1000l, 5*60*1000l, 10*60*1000l, 60*60*1000l });
_context.statManager().createRateStat("bw.sendBps1s", "How fast we are transmitting for the 1s quantization (period is the number of bytes transmitted)?", "Bandwidth", new long[] { 60*1000l, 10*60*1000l });
_context.statManager().createRateStat("bw.recvBps1s", "How fast we are receiving for the 1s quantization (period is the number of bytes transmitted)?", "Bandwidth", new long[] { 60*1000l, 10*60*1000l });
_pendingInboundRequests = new ArrayList(16);
_pendingOutboundRequests = new ArrayList(16);
_lastTotalSent = _totalAllocatedOutboundBytes;
@@ -74,7 +73,6 @@ public class FIFOBandwidthLimiter {
_sendBps = 0;
_recvBps = 0;
_lastStatsUpdated = now();
_lastRateUpdated = _lastStatsUpdated;
_refiller = new FIFOBandwidthRefiller(_context, this);
I2PThread t = new I2PThread(_refiller);
t.setName("BWRefiller" + (++__id));
@@ -295,11 +293,6 @@ public class FIFOBandwidthLimiter {
_context.statManager().getStatLog().addData("bw", "bw.recvBps1s", (long)_recvBps, recv);
}
}
if (60*1000 + _lastRateUpdated <= now) {
_lastRateUpdated = now;
_context.statManager().addRateData("bw.sendRate", (long)_sendBps, 0);
_context.statManager().addRateData("bw.recvRate", (long)_recvBps, 0);
}
}
/**

View File

@@ -47,7 +47,7 @@ public class GetBidsJob extends JobImpl {
if (context.shitlist().isShitlisted(to)) {
if (log.shouldLog(Log.WARN))
log.warn("Attempt to send a message to a shitlisted peer - " + to);
context.messageRegistry().peerFailed(to);
//context.messageRegistry().peerFailed(to);
fail(context, msg);
return;
}

View File

@@ -10,14 +10,8 @@ package net.i2p.router.transport;
import java.io.IOException;
import java.io.Writer;
import java.util.ArrayList;
import java.util.Date;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.TreeMap;
import java.util.*;
import net.i2p.data.Hash;
import net.i2p.data.i2np.I2NPMessage;
import net.i2p.router.Job;
import net.i2p.router.MessageSelector;
@@ -29,29 +23,25 @@ import net.i2p.util.SimpleTimer;
public class OutboundMessageRegistry {
private Log _log;
/** Expiration date (Long) to OutNetMessage */
private TreeMap _pendingMessages;
/** list of currently active MessageSelector instances */
private List _selectors;
/** map of active MessageSelector to either an OutNetMessage or a List of OutNetMessages causing it (for quick removal) */
private Map _selectorToMessage;
/** set of active OutNetMessage (for quick removal and selector fetching) */
private Set _activeMessages;
private CleanupTask _cleanupTask;
private RouterContext _context;
private final static long CLEANUP_DELAY = 1000*5; // how often to expire pending unreplied messages
public OutboundMessageRegistry(RouterContext context) {
_context = context;
_log = _context.logManager().getLog(OutboundMessageRegistry.class);
_pendingMessages = new TreeMap();
//_context.jobQueue().addJob(new CleanupPendingMessagesJob());
_selectors = new ArrayList(64);
_selectorToMessage = new HashMap(64);
_activeMessages = new HashSet(64);
_cleanupTask = new CleanupTask();
}
public void shutdown() {
if (_log.shouldLog(Log.WARN)) {
StringBuffer buf = new StringBuffer(1024);
buf.append("Pending messages: ").append(_pendingMessages.size()).append("\n");
for (Iterator iter = _pendingMessages.values().iterator(); iter.hasNext(); ) {
buf.append(iter.next().toString()).append("\n\t");
}
_log.log(Log.WARN, buf.toString());
}
}
public void shutdown() {}
/**
* Retrieve all messages that are waiting for the specified message. In
@@ -65,103 +55,72 @@ public class OutboundMessageRegistry {
* the payload
*/
public List getOriginalMessages(I2NPMessage message) {
ArrayList matches = new ArrayList(2);
ArrayList matchedSelectors = null;
ArrayList removedSelectors = null;
long beforeSync = _context.clock().now();
Map messages = null;
long matchTime = 0;
long continueTime = 0;
int numMessages = 0;
long afterSync1 = 0;
long afterSearch = 0;
int matchedRemoveCount = 0;
StringBuffer slow = null; // new StringBuffer(256);
synchronized (_pendingMessages) {
messages = _pendingMessages; //(Map)_pendingMessages.clone();
numMessages = messages.size();
afterSync1 = _context.clock().now();
for (Iterator iter = messages.keySet().iterator(); iter.hasNext(); ) {
Long exp = (Long)iter.next();
OutNetMessage msg = (OutNetMessage)messages.get(exp);
MessageSelector selector = msg.getReplySelector();
if (selector != null) {
long before = _context.clock().now();
boolean isMatch = selector.isMatch(message);
long after = _context.clock().now();
long diff = after-before;
if (diff > 100) {
if (_log.shouldLog(Log.WARN))
_log.warn("Matching with selector took too long (" + diff + "ms) : "
+ selector.getClass().getName());
if (slow == null) slow = new StringBuffer(256);
slow.append(selector.getClass().getName()).append(": ");
slow.append(diff).append(" ");
synchronized (_selectors) {
for (int i = 0; i < _selectors.size(); i++) {
MessageSelector sel = (MessageSelector)_selectors.get(i);
if (sel == null)
continue;
boolean isMatch = sel.isMatch(message);
if (isMatch) {
if (matchedSelectors == null) matchedSelectors = new ArrayList(1);
matchedSelectors.add(sel);
if (!sel.continueMatching()) {
if (removedSelectors == null) removedSelectors = new ArrayList(1);
removedSelectors.add(sel);
_selectors.remove(i);
i--;
}
matchTime += diff;
}
}
}
if (isMatch) {
if (_log.shouldLog(Log.DEBUG))
_log.debug("Selector matches [" + selector);
if (!matches.contains(msg))
matches.add(msg);
long beforeCon = _context.clock().now();
boolean continueMatching = selector.continueMatching();
long afterCon = _context.clock().now();
long diffCon = afterCon - beforeCon;
if (diffCon > 100) {
if (_log.shouldLog(Log.WARN))
_log.warn("Error continueMatching on a match took too long ("
+ diffCon + "ms) : " + selector.getClass().getName());
}
continueTime += diffCon;
if (continueMatching) {
if (_log.shouldLog(Log.DEBUG))
_log.debug("Continue matching");
// noop
} else {
if (_log.shouldLog(Log.DEBUG))
_log.debug("Stop matching selector " + selector + " for message "
+ msg.getMessageType());
// i give in mihi, i'll use iter.remove just this once ;)
// (TreeMap supports it, and this synchronized block is a hotspot)
iter.remove();
matchedRemoveCount++;
}
List rv = null;
if (matchedSelectors != null) {
rv = new ArrayList(matchedSelectors.size());
for (int i = 0; i < matchedSelectors.size(); i++) {
MessageSelector sel = (MessageSelector)matchedSelectors.get(i);
boolean removed = false;
OutNetMessage msg = null;
List msgs = null;
synchronized (_selectorToMessage) {
Object o = null;
if ( (removedSelectors != null) && (removedSelectors.contains(sel)) ) {
o = _selectorToMessage.remove(sel);
removed = true;
} else {
//_log.debug("Selector does not match [" + selector + "]");
o = _selectorToMessage.get(sel);
}
if (o instanceof OutNetMessage) {
msg = (OutNetMessage)o;
if (msg != null)
rv.add(msg);
} else if (o instanceof List) {
msgs = (List)o;
if (msgs != null)
rv.addAll(msgs);
}
}
if (removed) {
if (msg != null) {
synchronized (_activeMessages) {
_activeMessages.remove(msg);
}
} else if (msgs != null) {
synchronized (_activeMessages) {
_activeMessages.removeAll(msgs);
}
}
}
}
afterSearch = _context.clock().now();
}
long delay = _context.clock().now() - beforeSync;
long search = afterSearch - afterSync1;
long sync = afterSync1 - beforeSync;
int level = Log.DEBUG;
if (delay > 1000)
level = Log.ERROR;
if (_log.shouldLog(level)) {
StringBuffer buf = new StringBuffer(1024);
buf.append("getMessages took ").append(delay).append("ms with search time of");
buf.append(search).append("ms (match: ").append(matchTime).append("ms, continue: ");
buf.append(continueTime).append("ms, #: ").append(numMessages).append(") and sync time of ");
buf.append(sync).append("ms for ");
buf.append(matchedRemoveCount);
buf.append(" removed, ").append(matches.size()).append(" matches: slow = ");
if (slow != null)
buf.append(slow.toString());
_log.log(level, buf.toString());
} else {
rv = Collections.EMPTY_LIST;
}
return matches;
return rv;
}
public OutNetMessage registerPending(MessageSelector replySelector, ReplyJob onReply, Job onTimeout, int timeoutMs) {
@@ -174,271 +133,133 @@ public class OutboundMessageRegistry {
registerPending(msg, true);
return msg;
}
public void registerPending(OutNetMessage msg) {
registerPending(msg, false);
}
public void registerPending(OutNetMessage msg) { registerPending(msg, false); }
public void registerPending(OutNetMessage msg, boolean allowEmpty) {
if (msg == null)
throw new IllegalArgumentException("Null OutNetMessage specified? wtf");
if (!allowEmpty) {
if (msg.getMessage() == null)
if ( (!allowEmpty) && (msg.getMessage() == null) )
throw new IllegalArgumentException("OutNetMessage doesn't contain an I2NPMessage? wtf");
MessageSelector sel = msg.getReplySelector();
if (sel == null) throw new IllegalArgumentException("No reply selector? wtf");
boolean alreadyPending = false;
synchronized (_activeMessages) {
if (!_activeMessages.add(msg))
return; // dont add dups
}
long beforeSync = _context.clock().now();
long afterSync1 = 0;
long afterDone = 0;
try {
OutNetMessage oldMsg = null;
long l = msg.getExpiration();
synchronized (_pendingMessages) {
if (_pendingMessages.containsValue(msg)) {
if (_log.shouldLog(Log.DEBUG))
_log.debug("Not adding an already pending message: " + msg,
new Exception("Duplicate message registration"));
return;
}
while (_pendingMessages.containsKey(new Long(l)))
l++;
_pendingMessages.put(new Long(l), msg);
}
afterSync1 = _context.clock().now();
// this may get orphaned if the message is matched or explicitly
// removed, but its cheap enough to do an extra remove on the map
// that to poll the list periodically
SimpleTimer.getInstance().addEvent(new CleanupExpiredTask(l), l - _context.clock().now());
if (_log.shouldLog(Log.DEBUG))
_log.debug("Register pending: " + msg.getReplySelector().getClass().getName()
+ " for " + msg.getMessage() + ": "
+ msg.getReplySelector().toString(), new Exception("Register pending"));
afterDone = _context.clock().now();
} finally {
long delay = _context.clock().now() - beforeSync;
long sync1 = afterSync1 - beforeSync;
long done = afterDone - afterSync1;
String warn = delay + "ms (sync = " + sync1 + "ms, done = " + done + "ms)";
if ( (delay > 1000) && (_log.shouldLog(Log.WARN)) ) {
_log.error("Synchronizing in the registry.register took too long! " + warn);
//_context.messageHistory().messageProcessingError(msg.getMessage().getUniqueId(),
// msg.getMessage().getClass().getName(),
// "RegisterPending took too long: " + warn);
} else {
if (_log.shouldLog(Log.DEBUG))
_log.debug("Synchronizing in the registry.register was quick: " + warn);
synchronized (_selectorToMessage) {
Object oldMsg = _selectorToMessage.put(sel, msg);
if (oldMsg != null) {
List multi = null;
if (oldMsg instanceof OutNetMessage) {
//multi = Collections.synchronizedList(new ArrayList(4));
multi = new ArrayList(4);
multi.add(oldMsg);
multi.add(msg);
_selectorToMessage.put(sel, multi);
} else if (oldMsg instanceof List) {
multi = (List)oldMsg;
multi.add(msg);
_selectorToMessage.put(sel, multi);
}
if (_log.shouldLog(Log.WARN))
_log.warn("a single message selector [" + sel + "] with multiple messages ("+ multi + ")");
}
}
//_log.debug("* Register called of " + msg + "\n\nNow pending are: " + renderStatusHTML(), new Exception("who registered a new one?"));
synchronized (_selectors) { _selectors.add(sel); }
_cleanupTask.scheduleExpiration(sel);
}
public void unregisterPending(OutNetMessage msg) {
long beforeSync = _context.clock().now();
try {
synchronized (_pendingMessages) {
if (_pendingMessages.containsValue(msg)) {
Long found = null;
for (Iterator iter = _pendingMessages.keySet().iterator(); iter.hasNext();) {
Long exp = (Long)iter.next();
Object val = _pendingMessages.get(exp);
if (val.equals(msg)) {
found = exp;
break;
}
}
if (found != null) {
if (_log.shouldLog(Log.DEBUG))
_log.debug("Unregistered message " + msg.getReplySelector()
+ ": " + msg, new Exception("Who unregistered?"));
_pendingMessages.remove(found);
} else {
_log.error("Arg, couldn't find the message that we... thought we could find?",
new Exception("WTF"));
MessageSelector sel = msg.getReplySelector();
boolean stillActive = false;
synchronized (_selectorToMessage) {
Object old = _selectorToMessage.remove(sel);
if (old != null) {
if (old instanceof List) {
List l = (List)old;
l.remove(msg);
if (l.size() > 0) {
_selectorToMessage.put(sel, l);
stillActive = true;
}
}
}
} finally {
long delay = _context.clock().now() - beforeSync;
String warn = delay + "ms";
if ( (delay > 1000) && (_log.shouldLog(Log.WARN)) ) {
_log.warn("Synchronizing in the registry.unRegister took too long! " + warn);
_context.messageHistory().messageProcessingError(msg.getMessageId(), msg.getMessageType(), "Unregister took too long: " + warn);
} else {
if (_log.shouldLog(Log.DEBUG))
_log.debug("Synchronizing in the registry.unRegister was quick: " + warn);
}
}
if (!stillActive)
synchronized (_selectors) { _selectors.remove(sel); }
synchronized (_activeMessages) { _activeMessages.remove(msg); }
}
public void peerFailed(Hash peer) {
List failed = null;
int numFailed = 0;
synchronized (_pendingMessages) {
for (Iterator iter = _pendingMessages.values().iterator(); iter.hasNext(); ) {
OutNetMessage msg = (OutNetMessage)iter.next();
if (msg.getTarget() != null) {
Hash to = msg.getTarget().getIdentity().calculateHash();
if (to.equals(peer)) {
if (failed == null)
failed = new ArrayList(4);
failed.add(msg);
iter.remove();
numFailed++;
} else {
if (_log.shouldLog(Log.DEBUG))
_log.debug("Peer failed: " + peer.toBase64().substring(0,6)
+ " but not killing a message to "
+ to.toBase64().substring(0,6));
}
}
}
}
if (failed != null) {
for (int i = 0; i < failed.size(); i++) {
OutNetMessage msg = (OutNetMessage)failed.get(i);
msg.discardData();
if (msg.getOnFailedSendJob() != null)
_context.jobQueue().addJob(msg.getOnFailedSendJob());
}
}
if (_log.shouldLog(Log.WARN))
_log.warn("Peer failed: " + peer.toBase64().substring(0,6) + " killing " + numFailed);
}
public void renderStatusHTML(Writer out) throws IOException {}
public void renderStatusHTML(Writer out) throws IOException {
StringBuffer buf = new StringBuffer(8192);
buf.append("<h2>Pending messages</h2>\n");
Map msgs = null;
synchronized (_pendingMessages) {
msgs = (Map)_pendingMessages.clone();
}
buf.append("<ul>");
for (Iterator iter = msgs.keySet().iterator(); iter.hasNext();) {
Long exp = (Long)iter.next();
OutNetMessage msg = (OutNetMessage)msgs.get(exp);
buf.append("<li>").append(msg.getMessageType());
buf.append(": expiring on ").append(new Date(exp.longValue()));
if (msg.getTarget() != null)
buf.append(" targetting ").append(msg.getTarget().getIdentity().getHash());
if (msg.getReplySelector() != null)
buf.append(" with reply selector ").append(msg.getReplySelector().toString());
else
buf.append(" with NO reply selector? WTF!");
buf.append("</li>\n");
}
buf.append("</ul>");
out.write(buf.toString());
out.flush();
}
private class CleanupExpiredTask implements SimpleTimer.TimedEvent {
private long _expiration;
public CleanupExpiredTask(long expiration) {
_expiration = expiration;
private class CleanupTask implements SimpleTimer.TimedEvent {
private long _nextExpire;
public CleanupTask() {
_nextExpire = -1;
}
public void timeReached() {
OutNetMessage msg = null;
synchronized (_pendingMessages) {
msg = (OutNetMessage)_pendingMessages.remove(new Long(_expiration));
}
if (msg != null) {
_context.messageHistory().replyTimedOut(msg);
Job fail = msg.getOnFailedReplyJob();
if (fail != null) {
if (_log.shouldLog(Log.DEBUG))
_log.debug("Removing message with selector " + msg.getReplySelector()
+ ": " + msg.getMessageType()
+ " and firing fail job: " + fail.getClass().getName());
_context.jobQueue().addJob(fail);
} else {
if (_log.shouldLog(Log.DEBUG))
_log.debug("Removing message with selector " + msg.getReplySelector()
+ " and not firing any job");
long now = _context.clock().now();
List removing = new ArrayList(1);
synchronized (_selectors) {
for (int i = 0; i < _selectors.size(); i++) {
MessageSelector sel = (MessageSelector)_selectors.get(i);
if (sel == null) continue;
long expiration = sel.getExpiration();
if (expiration <= now) {
removing.add(sel);
_selectors.remove(i);
i--;
} else if (expiration < _nextExpire || _nextExpire < now) {
_nextExpire = expiration;
}
}
}
if (removing.size() > 0) {
for (int i = 0; i < removing.size(); i++) {
MessageSelector sel = (MessageSelector)removing.get(i);
OutNetMessage msg = null;
List msgs = null;
synchronized (_selectorToMessage) {
Object o = _selectorToMessage.remove(sel);
if (o instanceof OutNetMessage) {
msg = (OutNetMessage)o;
} else if (o instanceof List) {
//msgs = new ArrayList((List)o);
msgs = (List)o;
}
}
if (msg != null) {
synchronized (_activeMessages) {
_activeMessages.remove(msg);
}
Job fail = msg.getOnFailedReplyJob();
if (fail != null)
_context.jobQueue().addJob(fail);
} else if (msgs != null) {
synchronized (_activeMessages) {
_activeMessages.removeAll(msgs);
}
for (int j = 0; j < msgs.size(); j++) {
msg = (OutNetMessage)msgs.get(j);
Job fail = msg.getOnFailedReplyJob();
if (fail != null)
_context.jobQueue().addJob(fail);
}
}
}
}
if (_nextExpire <= now)
_nextExpire = now + 10*1000;
SimpleTimer.getInstance().addEvent(CleanupTask.this, _nextExpire - now);
}
public void scheduleExpiration(MessageSelector sel) {
long now = _context.clock().now();
if ( (_nextExpire <= now) || (sel.getExpiration() < _nextExpire) ) {
_nextExpire = sel.getExpiration();
SimpleTimer.getInstance().addEvent(CleanupTask.this, _nextExpire - now);
}
}
}
/**
* Cleanup any messages that were pending replies but have expired
*
*/
/*
private class CleanupPendingMessagesJob extends JobImpl {
public CleanupPendingMessagesJob() {
super(OutboundMessageRegistry.this._context);
}
public String getName() { return "Cleanup any messages that timed out"; }
public void runJob() {
List removed = removeMessages();
RouterContext ctx = OutboundMessageRegistry.this._context;
for (int i = 0; i < removed.size(); i++) {
OutNetMessage msg = (OutNetMessage)removed.get(i);
if (msg != null) {
_context.messageHistory().replyTimedOut(msg);
Job fail = msg.getOnFailedReplyJob();
if (fail != null) {
if (_log.shouldLog(Log.DEBUG))
_log.debug("Removing message with selector " + msg.getReplySelector()
+ ": " + msg.getMessageType()
+ " and firing fail job: " + fail.getClass().getName());
_context.jobQueue().addJob(fail);
} else {
if (_log.shouldLog(Log.DEBUG))
_log.debug("Removing message with selector " + msg.getReplySelector()
+ " and not firing any job");
}
}
}
requeue(CLEANUP_DELAY);
}
/**
* Remove any messages whose expirations are in the past
*
* @return list of OutNetMessage objects that have expired
*/ /*
private List removeMessages() {
long now = OutboundMessageRegistry.this._context.clock().now();
List removedMessages = new ArrayList(2);
List expirationsToRemove = null;
synchronized (_pendingMessages) {
for (Iterator iter = _pendingMessages.keySet().iterator(); iter.hasNext();) {
Long expiration = (Long)iter.next();
if (expiration.longValue() < now) {
if (expirationsToRemove == null)
expirationsToRemove = new ArrayList(8);
expirationsToRemove.add(expiration);
} else {
// its sorted
break;
}
}
if (expirationsToRemove != null) {
for (int i = 0; i < expirationsToRemove.size(); i++) {
Long expiration = (Long)expirationsToRemove.get(i);
OutNetMessage msg = (OutNetMessage)_pendingMessages.remove(expiration);
if (msg != null)
removedMessages.add(msg);
}
}
}
if (_log.shouldLog(Log.DEBUG))
_log.debug("Removed " + removedMessages.size() + " messages");
return removedMessages;
}
}
*/
}

View File

@@ -38,6 +38,7 @@ public interface Transport {
public String getStyle();
public int countActivePeers();
public int countActiveSendPeers();
public List getMostRecentErrorMessages();
public void renderStatusHTML(Writer out) throws IOException;

View File

@@ -64,6 +64,10 @@ public abstract class TransportImpl implements Transport {
*
*/
public int countActivePeers() { return 0; }
/**
* How many peers are we actively sending messages to (this minute)
*/
public int countActiveSendPeers() { return 0; }
public List getMostRecentErrorMessages() { return Collections.EMPTY_LIST; }
/**
@@ -199,6 +203,8 @@ public abstract class TransportImpl implements Transport {
+ msg.getMessageType() + " message with selector " + selector, new Exception("fail cause"));
if (msg.getOnFailedSendJob() != null)
_context.jobQueue().addJob(msg.getOnFailedSendJob());
if (msg.getOnFailedReplyJob() != null)
_context.jobQueue().addJob(msg.getOnFailedReplyJob());
if (selector != null)
_context.messageRegistry().unregisterPending(msg);
log = true;

View File

@@ -56,11 +56,12 @@ public class TransportManager implements TransportEventListener {
transport.setListener(null);
}
static final boolean ALLOW_TCP = true;
static final boolean ALLOW_TCP = false;
private void configTransports() {
String disableTCP = _context.router().getConfigSetting(PROP_DISABLE_TCP);
if ( !ALLOW_TCP || (disableTCP == null) || (Boolean.TRUE.toString().equalsIgnoreCase(disableTCP)) ) {
// Unless overridden by constant or explicit config property, start TCP tranport
if ( !ALLOW_TCP || ((disableTCP != null) && (Boolean.TRUE.toString().equalsIgnoreCase(disableTCP))) ) {
_log.info("Explicitly disabling the TCP transport!");
} else {
Transport t = new TCPTransport(_context);
@@ -119,6 +120,14 @@ public class TransportManager implements TransportEventListener {
return peers;
}
public int countActiveSendPeers() {
int peers = 0;
for (int i = 0; i < _transports.size(); i++) {
peers += ((Transport)_transports.get(i)).countActiveSendPeers();
}
return peers;
}
public short getReachabilityStatus() {
if (_transports.size() <= 0) return CommSystemFacade.STATUS_UNKNOWN;
short status[] = new short[_transports.size()];

View File

@@ -16,6 +16,8 @@ import net.i2p.data.RouterIdentity;
import net.i2p.data.SessionKey;
import net.i2p.data.Signature;
import net.i2p.data.i2np.DatabaseStoreMessage;
import net.i2p.data.i2np.DeliveryStatusMessage;
import net.i2p.data.i2np.I2NPMessage;
import net.i2p.router.CommSystemFacade;
import net.i2p.router.OutNetMessage;
import net.i2p.router.Router;
@@ -60,10 +62,10 @@ public class EstablishmentManager {
_queuedOutbound = new HashMap(32);
_liveIntroductions = new HashMap(32);
_activityLock = new Object();
_context.statManager().createRateStat("udp.inboundEstablishTime", "How long it takes for a new inbound session to be established", "udp", new long[] { 60*60*1000, 24*60*60*1000 });
_context.statManager().createRateStat("udp.outboundEstablishTime", "How long it takes for a new outbound session to be established", "udp", new long[] { 60*60*1000, 24*60*60*1000 });
_context.statManager().createRateStat("udp.inboundEstablishFailedState", "What state a failed inbound establishment request fails in", "udp", new long[] { 60*60*1000, 24*60*60*1000 });
_context.statManager().createRateStat("udp.outboundEstablishFailedState", "What state a failed outbound establishment request fails in", "udp", new long[] { 60*60*1000, 24*60*60*1000 });
_context.statManager().createRateStat("udp.inboundEstablishTime", "How long it takes for a new inbound session to be established", "udp", new long[] { 60*1000, 60*60*1000, 24*60*60*1000 });
_context.statManager().createRateStat("udp.outboundEstablishTime", "How long it takes for a new outbound session to be established", "udp", new long[] { 60*1000, 60*60*1000, 24*60*60*1000 });
_context.statManager().createRateStat("udp.inboundEstablishFailedState", "What state a failed inbound establishment request fails in", "udp", new long[] { 60*1000, 60*60*1000, 24*60*60*1000 });
_context.statManager().createRateStat("udp.outboundEstablishFailedState", "What state a failed outbound establishment request fails in", "udp", new long[] { 60*1000, 60*60*1000, 24*60*60*1000 });
_context.statManager().createRateStat("udp.sendIntroRelayRequest", "How often we send a relay request to reach a peer", "udp", new long[] { 60*60*1000, 24*60*60*1000 });
_context.statManager().createRateStat("udp.sendIntroRelayTimeout", "How often a relay request times out before getting a response (due to the target or intro peer being offline)", "udp", new long[] { 60*60*1000, 24*60*60*1000 });
_context.statManager().createRateStat("udp.receiveIntroRelayResponse", "How long it took to receive a relay response", "udp", new long[] { 60*60*1000, 24*60*60*1000 });
@@ -440,7 +442,41 @@ public class EstablishmentManager {
_transport.inboundConnectionReceived();
_context.statManager().addRateData("udp.inboundEstablishTime", state.getLifetime(), 0);
sendOurInfo(peer, true);
sendInboundComplete(peer);
}
/**
* dont send our info immediately, just send a small data packet, and 5-10s later,
* if the peer isnt shitlisted, *then* send them our info. this will help kick off
* the oldnet
*/
private void sendInboundComplete(PeerState peer) {
SimpleTimer.getInstance().addEvent(new PublishToNewInbound(peer), 10*1000);
if (_log.shouldLog(Log.INFO))
_log.info("Completing to the peer after confirm: " + peer);
DeliveryStatusMessage dsm = new DeliveryStatusMessage(_context);
dsm.setArrival(Router.NETWORK_ID); // overloaded, sure, but future versions can check this
dsm.setMessageExpiration(_context.clock().now()+10*1000);
dsm.setMessageId(_context.random().nextLong(I2NPMessage.MAX_ID_VALUE));
_transport.send(dsm, peer);
}
private class PublishToNewInbound implements SimpleTimer.TimedEvent {
private PeerState _peer;
public PublishToNewInbound(PeerState peer) { _peer = peer; }
public void timeReached() {
Hash peer = _peer.getRemotePeer();
if ((peer != null) && (!_context.shitlist().isShitlisted(peer))) {
// ok, we are fine with them, send them our latest info
if (_log.shouldLog(Log.INFO))
_log.info("Publishing to the peer after confirm plus delay (without shitlist): " + peer.toBase64());
sendOurInfo(_peer, true);
} else {
// nuh uh. fuck 'em.
if (_log.shouldLog(Log.WARN))
_log.warn("NOT publishing to the peer after confirm plus delay (WITH shitlist): " + (peer != null ? peer.toBase64() : "unknown"));
}
_peer = null;
}
}
/**
@@ -516,7 +552,11 @@ public class EstablishmentManager {
// offer to relay
// (perhaps we should check our bw usage and/or how many peers we are
// already offering introducing?)
state.setSentRelayTag(_context.random().nextLong(MAX_TAG_VALUE));
if (state.getSentRelayTag() < 0) {
state.setSentRelayTag(_context.random().nextLong(MAX_TAG_VALUE));
} else {
// don't change it, since we've already prepared our sig
}
} else {
// don't offer to relay
state.setSentRelayTag(0);
@@ -536,9 +576,9 @@ public class EstablishmentManager {
return;
}
_transport.send(_builder.buildSessionCreatedPacket(state, _transport.getExternalPort(), _transport.getIntroKey()));
// if they haven't advanced to sending us confirmed packets in 5s,
// if they haven't advanced to sending us confirmed packets in 1s,
// repeat
state.setNextSendTime(now + 5*1000);
state.setNextSendTime(now + 1000);
}
private void sendRequest(OutboundEstablishState state) {
@@ -948,15 +988,15 @@ public class EstablishmentManager {
long delay = nextSendTime - now;
if ( (nextSendTime == -1) || (delay > 0) ) {
if (delay > 5000)
delay = 5000;
if (delay > 1000)
delay = 1000;
boolean interrupted = false;
try {
synchronized (_activityLock) {
if (_activity > 0)
return;
if (nextSendTime == -1)
_activityLock.wait(5000);
_activityLock.wait(1000);
else
_activityLock.wait(delay);
}

View File

@@ -81,7 +81,7 @@ public class InboundMessageFragments /*implements UDPTransport.PartialACKSource
int acksIncluded = receiveACKs(from, data);
long afterACKs = _context.clock().now();
from.packetReceived();
from.packetReceived(data.getPacketSize());
_context.statManager().addRateData("udp.receiveMessagePeriod", afterMsgs-beforeMsgs, afterACKs-beforeMsgs);
_context.statManager().addRateData("udp.receiveACKPeriod", afterACKs-afterMsgs, afterACKs-beforeMsgs);
if ( (fragmentsIncluded > 0) && (acksIncluded > 0) )

Some files were not shown because too many files have changed in this diff Show More