From 393b1d767447d01c55dab7800e842233454d52b7 Mon Sep 17 00:00:00 2001 From: jrandom Date: Sat, 24 Apr 2004 11:54:35 +0000 Subject: [PATCH] big ol' update to strip out the singletons, replacing them with a rooted app context. The core itself has its own I2PAppContext (see its javadoc for, uh, docs), and the router extends that to expose the router's singletons. The main point of this is to make it so that we can run multiple routers in the same JVM, even to allow different apps in the same JVM to switch singleton implementations (e.g. run some routers with one set of profile calculators, and other routers with a different one). There is still some work to be done regarding the actual boot up of multiple routers in a JVM, as well as their configuration, though the plan is to have the RouterContext override the I2PAppContext's getProperty/getPropertyNames methods to read from a config file (seperate ones per context) instead of using the System.getProperty that the base I2PAppContext uses. Once the multi-router is working, i'll shim in a VMCommSystem that doesn't depend upon sockets or threads to read/write (and that uses configurable message send delays / disconnects / etc, perhaps using data from the routerContext.getProperty to drive it). I could hold off until the sim is all working, but there's a truckload of changes in here and I hate dealing with conflicts ;) Everything works - I've been running 'er for a while and kicked the tires a bit, but if you see something amiss, please let me know. --- .../src/net/i2p/httptunnel/HTTPListener.java | 2 +- .../i2p/httptunnel/handler/EepHandler.java | 8 +- .../i2p/httptunnel/handler/ProxyHandler.java | 6 +- .../i2p/httptunnel/handler/RootHandler.java | 2 +- .../java/src/net/i2p/i2ptunnel/I2PTunnel.java | 4 +- apps/sam/java/src/net/i2p/sam/SAMUtils.java | 4 +- core/java/src/net/i2p/I2PAppContext.java | 425 +++++ core/java/src/net/i2p/client/ATalk.java | 6 +- .../i2p/client/DisconnectMessageHandler.java | 5 +- core/java/src/net/i2p/client/HandlerImpl.java | 5 +- .../net/i2p/client/I2CPMessageProducer.java | 8 +- .../src/net/i2p/client/I2PClientImpl.java | 11 +- .../client/I2PClientMessageHandlerMap.java | 19 +- .../src/net/i2p/client/I2PSessionImpl.java | 27 +- .../src/net/i2p/client/I2PSessionImpl2.java | 51 +- .../client/MessagePayloadMessageHandler.java | 7 +- .../client/MessageStatusMessageHandler.java | 5 +- .../client/RequestLeaseSetMessageHandler.java | 5 +- .../client/SessionStatusMessageHandler.java | 5 +- .../net/i2p/client/SetDateMessageHandler.java | 5 +- .../i2p/client/naming/DummyNamingService.java | 10 + .../client/naming/HostsTxtNamingService.java | 12 +- .../net/i2p/client/naming/NamingService.java | 37 +- core/java/src/net/i2p/crypto/AESEngine.java | 43 +- .../src/net/i2p/crypto/AESInputStream.java | 83 +- .../src/net/i2p/crypto/AESOutputStream.java | 18 +- .../src/net/i2p/crypto/CryptixAESEngine.java | 12 +- .../net/i2p/crypto/DHSessionKeyBuilder.java | 13 +- core/java/src/net/i2p/crypto/DSAEngine.java | 43 +- .../net/i2p/crypto/DummyElGamalEngine.java | 13 +- .../src/net/i2p/crypto/ElGamalAESEngine.java | 122 +- .../src/net/i2p/crypto/ElGamalEngine.java | 78 +- .../net/i2p/crypto/HMACSHA256Generator.java | 23 +- .../java/src/net/i2p/crypto/KeyGenerator.java | 27 +- .../crypto/PersistentSessionKeyManager.java | 17 +- .../src/net/i2p/crypto/SHA256Generator.java | 6 +- .../src/net/i2p/crypto/SessionKeyManager.java | 12 +- .../crypto/TransientSessionKeyManager.java | 12 +- core/java/src/net/i2p/crypto/YKGenerator.java | 8 +- core/java/src/net/i2p/data/DataHelper.java | 72 +- .../src/net/i2p/data/RoutingKeyGenerator.java | 16 +- .../src/net/i2p/stat/SimpleStatDumper.java | 19 +- core/java/src/net/i2p/stat/StatManager.java | 20 +- core/java/src/net/i2p/util/Clock.java | 34 +- core/java/src/net/i2p/util/Log.java | 70 +- .../src/net/i2p/util/LogConsoleBuffer.java | 18 +- core/java/src/net/i2p/util/LogManager.java | 125 +- .../src/net/i2p/util/LogRecordFormatter.java | 10 +- core/java/src/net/i2p/util/LogWriter.java | 24 +- core/java/src/net/i2p/util/RandomSource.java | 9 +- .../java/test/net/i2p/crypto/AES256Bench.java | 240 +-- .../net/i2p/crypto/ElGamalAESEngineTest.java | 384 ++--- .../test/net/i2p/crypto/ElGamalBench.java | 158 +- .../net/i2p/crypto/SessionEncryptionTest.java | 708 ++++----- .../java/test/net/i2p/data/StructureTest.java | 2 + .../src/net/i2p/data/i2np/DataMessage.java | 34 +- .../data/i2np/DatabaseFindNearestMessage.java | 99 -- .../i2p/data/i2np/DatabaseLookupMessage.java | 122 +- .../data/i2np/DatabaseSearchReplyMessage.java | 124 +- .../i2p/data/i2np/DatabaseStoreMessage.java | 132 +- .../i2p/data/i2np/DeliveryStatusMessage.java | 36 +- .../src/net/i2p/data/i2np/GarlicClove.java | 184 +-- .../src/net/i2p/data/i2np/GarlicMessage.java | 36 +- .../net/i2p/data/i2np/I2NPMessageHandler.java | 67 +- .../net/i2p/data/i2np/I2NPMessageImpl.java | 72 +- .../net/i2p/data/i2np/I2NPMessageReader.java | 54 +- .../net/i2p/data/i2np/SourceRouteBlock.java | 172 ++- .../data/i2np/SourceRouteReplyMessage.java | 98 +- .../i2p/data/i2np/TunnelCreateMessage.java | 289 ++-- .../data/i2np/TunnelCreateStatusMessage.java | 52 +- .../src/net/i2p/data/i2np/TunnelMessage.java | 118 +- .../i2np/TunnelVerificationStructure.java | 60 +- .../net/i2p/router/ClientManagerFacade.java | 15 +- .../src/net/i2p/router/ClientMessagePool.java | 100 +- .../src/net/i2p/router/CommSystemFacade.java | 8 - .../i2p/router/GenerateStatusConsoleJob.java | 62 - .../src/net/i2p/router/InNetMessagePool.java | 60 +- router/java/src/net/i2p/router/JobImpl.java | 28 +- router/java/src/net/i2p/router/JobQueue.java | 1041 ++++++------- .../src/net/i2p/router/JobQueueRunner.java | 168 +- router/java/src/net/i2p/router/JobTiming.java | 39 +- .../java/src/net/i2p/router/KeyManager.java | 228 +-- .../src/net/i2p/router/MessageHistory.java | 540 ++++--- .../src/net/i2p/router/MessageValidator.java | 147 +- .../net/i2p/router/NetworkDatabaseFacade.java | 27 +- .../src/net/i2p/router/OutNetMessage.java | 261 ++-- .../src/net/i2p/router/OutNetMessagePool.java | 166 +- .../src/net/i2p/router/PeerManagerFacade.java | 12 +- .../src/net/i2p/router/ProfileManager.java | 43 +- router/java/src/net/i2p/router/Router.java | 117 +- .../src/net/i2p/router/RouterContext.java | 465 ++++++ .../router/SessionKeyPersistenceHelper.java | 122 +- router/java/src/net/i2p/router/Shitlist.java | 122 +- .../src/net/i2p/router/StatisticsManager.java | 19 +- .../i2p/router/SubmitMessageHistoryJob.java | 132 +- .../java/src/net/i2p/router/TunnelInfo.java | 450 +++--- .../net/i2p/router/TunnelManagerFacade.java | 20 +- .../src/net/i2p/router/TunnelSettings.java | 146 +- .../net/i2p/router/admin/AdminListener.java | 118 +- .../net/i2p/router/admin/AdminManager.java | 57 +- .../src/net/i2p/router/admin/AdminRunner.java | 138 +- .../net/i2p/router/admin/StatsGenerator.java | 346 +++-- .../router/client/ClientConnectionRunner.java | 404 ++--- .../router/client/ClientListenerRunner.java | 138 +- .../net/i2p/router/client/ClientManager.java | 388 ++--- .../client/ClientManagerFacadeImpl.java | 119 +- .../client/ClientMessageEventListener.java | 226 +-- .../i2p/router/client/CreateSessionJob.java | 61 +- .../i2p/router/client/MessageReceivedJob.java | 63 +- .../net/i2p/router/client/ReportAbuseJob.java | 55 +- .../i2p/router/client/RequestLeaseSetJob.java | 172 ++- .../message/BuildCreateTunnelMessageJob.java | 67 - .../router/message/BuildTestMessageJob.java | 236 +-- .../router/message/GarlicMessageBuilder.java | 310 ++-- .../router/message/GarlicMessageHandler.java | 18 +- .../router/message/GarlicMessageParser.java | 122 +- .../message/HandleGarlicMessageJob.java | 57 +- .../HandleSourceRouteReplyMessageJob.java | 190 +-- .../message/HandleTunnelMessageJob.java | 125 +- .../i2p/router/message/MessageHandler.java | 263 ++-- .../message/OutboundClientMessageJob.java | 879 +++++------ .../OutboundClientMessageJobHelper.java | 225 +-- .../net/i2p/router/message/SendGarlicJob.java | 117 +- .../i2p/router/message/SendMessageAckJob.java | 29 +- .../router/message/SendMessageDirectJob.java | 222 +-- .../router/message/SendReplyMessageJob.java | 44 +- .../router/message/SendTunnelMessageJob.java | 646 ++++---- .../SourceRouteReplyMessageHandler.java | 17 +- .../router/message/TunnelMessageHandler.java | 17 +- .../DatabaseLookupMessageHandler.java | 12 +- .../DatabaseSearchReplyMessageHandler.java | 15 +- .../DatabaseStoreMessageHandler.java | 15 +- .../HandleDatabaseLookupMessageJob.java | 62 +- .../HandleDatabaseSearchReplyMessageJob.java | 69 +- .../HandleDatabaseStoreMessageJob.java | 24 +- .../networkdb/PublishLocalRouterInfoJob.java | 45 +- .../networkdb/kademlia/DataPublisherJob.java | 108 +- .../kademlia/DataRepublishingSelectorJob.java | 198 +-- .../networkdb/kademlia/ExpireLeasesJob.java | 78 +- .../networkdb/kademlia/ExpireRoutersJob.java | 114 +- .../router/networkdb/kademlia/ExploreJob.java | 78 +- .../kademlia/ExploreKeySelectorJob.java | 88 +- .../networkdb/kademlia/KBucketImpl.java | 402 ++--- .../router/networkdb/kademlia/KBucketSet.java | 166 +- .../KademliaNetworkDatabaseFacade.java | 889 ++++++----- .../networkdb/kademlia/PeerSelector.java | 96 +- .../kademlia/PersistentDataStore.java | 475 +++--- .../kademlia/RepublishLeaseSetJob.java | 74 +- .../router/networkdb/kademlia/SearchJob.java | 122 +- .../kademlia/SearchMessageSelector.java | 119 +- .../networkdb/kademlia/SearchState.java | 223 +-- .../kademlia/SearchUpdateReplyFoundJob.java | 80 +- .../networkdb/kademlia/StartExplorersJob.java | 62 +- .../router/networkdb/kademlia/StoreJob.java | 66 +- .../kademlia/TransientDataStore.java | 202 +-- .../i2p/router/peermanager/Calculator.java | 14 +- .../net/i2p/router/peermanager/DBHistory.java | 201 +-- .../peermanager/EvaluateProfilesJob.java | 56 +- .../peermanager/IntegrationCalculator.java | 19 +- .../peermanager/IsFailingCalculator.java | 61 +- .../i2p/router/peermanager/PeerManager.java | 155 +- .../peermanager/PeerManagerFacadeImpl.java | 35 +- .../i2p/router/peermanager/PeerProfile.java | 297 ++-- .../peermanager/PersistProfilesJob.java | 63 +- .../peermanager/ProfileManagerImpl.java | 263 ++-- .../router/peermanager/ProfileOrganizer.java | 806 +++++----- .../peermanager/ProfilePersistenceHelper.java | 416 ++--- .../peermanager/ReliabilityCalculator.java | 121 +- .../router/peermanager/SpeedCalculator.java | 75 +- .../i2p/router/peermanager/TunnelHistory.java | 102 +- .../i2p/router/startup/BootCommSystemJob.java | 54 +- .../i2p/router/startup/BootNetworkDbJob.java | 21 +- .../router/startup/BuildTrustedLinksJob.java | 25 +- .../router/startup/CreateRouterInfoJob.java | 148 +- .../i2p/router/startup/LoadRouterInfoJob.java | 165 +- .../startup/ProcessInboundNetMessageJob.java | 47 - .../net/i2p/router/startup/ReadConfigJob.java | 69 +- .../router/startup/RebuildRouterInfoJob.java | 255 ++- .../startup/StartAcceptingClientsJob.java | 37 +- .../net/i2p/router/startup/StartupJob.java | 13 +- .../BandwidthLimitedInputStream.java | 43 +- .../BandwidthLimitedOutputStream.java | 79 +- .../router/transport/BandwidthLimiter.java | 53 +- .../transport/CommSystemFacadeImpl.java | 117 +- .../transport/FetchOutNetMessageJob.java | 49 - .../net/i2p/router/transport/GetBidsJob.java | 90 +- .../transport/OutboundMessageRegistry.java | 540 ++++--- .../i2p/router/transport/TransportImpl.java | 308 ++-- .../router/transport/TransportManager.java | 371 ++--- .../transport/TrivialBandwidthLimiter.java | 262 ++-- .../router/transport/phttp/PHTTPPoller.java | 375 ++--- .../router/transport/phttp/PHTTPSender.java | 400 ++--- .../transport/phttp/PHTTPTransport.java | 388 ++--- .../tcp/RestrictiveTCPConnection.java | 479 +++--- .../router/transport/tcp/TCPConnection.java | 69 +- .../i2p/router/transport/tcp/TCPListener.java | 316 ++-- .../router/transport/tcp/TCPTransport.java | 1368 ++++++++--------- .../ClientLeaseSetManagerJob.java | 268 ++-- .../tunnelmanager/ClientTunnelPool.java | 205 +-- .../ClientTunnelPoolExpirationJob.java | 123 +- .../ClientTunnelPoolManagerJob.java | 27 +- .../HandleTunnelCreateMessageJob.java | 62 +- .../PoolingTunnelManagerFacade.java | 53 +- .../tunnelmanager/PoolingTunnelSelector.java | 169 +- .../RequestInboundTunnelJob.java | 26 +- .../RequestOutboundTunnelJob.java | 17 +- .../tunnelmanager/RequestTunnelJob.java | 1333 ++++++++-------- .../router/tunnelmanager/TestTunnelJob.java | 350 ++--- .../router/tunnelmanager/TunnelBuilder.java | 543 +++---- .../TunnelCreateMessageHandler.java | 14 +- .../i2p/router/tunnelmanager/TunnelPool.java | 70 +- .../TunnelPoolExpirationJob.java | 152 +- .../tunnelmanager/TunnelPoolManagerJob.java | 218 +-- .../TunnelPoolPersistenceHelper.java | 289 ++-- .../tunnelmanager/TunnelTestManager.java | 140 +- .../data/i2np/DatabaseStoreMessageTest.java | 22 +- .../i2p/data/i2np/I2NPMessageReaderTest.java | 62 +- 217 files changed, 16662 insertions(+), 15452 deletions(-) create mode 100644 core/java/src/net/i2p/I2PAppContext.java delete mode 100644 router/java/src/net/i2p/data/i2np/DatabaseFindNearestMessage.java delete mode 100644 router/java/src/net/i2p/router/GenerateStatusConsoleJob.java create mode 100644 router/java/src/net/i2p/router/RouterContext.java delete mode 100644 router/java/src/net/i2p/router/message/BuildCreateTunnelMessageJob.java delete mode 100644 router/java/src/net/i2p/router/startup/ProcessInboundNetMessageJob.java delete mode 100644 router/java/src/net/i2p/router/transport/FetchOutNetMessageJob.java diff --git a/apps/httptunnel/java/src/net/i2p/httptunnel/HTTPListener.java b/apps/httptunnel/java/src/net/i2p/httptunnel/HTTPListener.java index d613f6033..60f6752c8 100644 --- a/apps/httptunnel/java/src/net/i2p/httptunnel/HTTPListener.java +++ b/apps/httptunnel/java/src/net/i2p/httptunnel/HTTPListener.java @@ -53,7 +53,7 @@ public class HTTPListener extends Thread { private boolean proxyUsed = false; /** - * Query whether this is the first use of the proxy or not . . . + * Query whether this is the first use of the proxy or not * @return Whether this is the first proxy use, no doubt. */ public boolean firstProxyUse() { diff --git a/apps/httptunnel/java/src/net/i2p/httptunnel/handler/EepHandler.java b/apps/httptunnel/java/src/net/i2p/httptunnel/handler/EepHandler.java index 1535d2915..9e5d105f9 100644 --- a/apps/httptunnel/java/src/net/i2p/httptunnel/handler/EepHandler.java +++ b/apps/httptunnel/java/src/net/i2p/httptunnel/handler/EepHandler.java @@ -19,6 +19,7 @@ import net.i2p.httptunnel.SocketManagerProducer; import net.i2p.httptunnel.filter.Filter; import net.i2p.httptunnel.filter.NullFilter; import net.i2p.util.Log; +import net.i2p.I2PAppContext; /** * Handler for browsing Eepsites. @@ -26,6 +27,7 @@ import net.i2p.util.Log; public class EepHandler { private static final Log _log = new Log(EepHandler.class); + private static I2PAppContext _context = new I2PAppContext(); protected ErrorHandler errorHandler; @@ -44,7 +46,7 @@ public class EepHandler { public void handle(Request req, HTTPListener httpl, OutputStream out, /* boolean fromProxy, */String destination) throws IOException { SocketManagerProducer smp = httpl.getSMP(); - Destination dest = NamingService.getInstance().lookup(destination); + Destination dest = _context.namingService().lookup(destination); if (dest == null) { errorHandler.handle(req, httpl, out, "Could not lookup host: " + destination); return; @@ -66,8 +68,8 @@ public class EepHandler { * @return boolean, true if something was written, false otherwise. * @throws IOException */ - public boolean handle(Request req, Filter f, OutputStream out, Destination dest, I2PSocketManager sm) - throws IOException { + public boolean handle(Request req, Filter f, OutputStream out, Destination dest, + I2PSocketManager sm) throws IOException { I2PSocket s = null; boolean written = false; try { diff --git a/apps/httptunnel/java/src/net/i2p/httptunnel/handler/ProxyHandler.java b/apps/httptunnel/java/src/net/i2p/httptunnel/handler/ProxyHandler.java index 02f454d49..da0b856b7 100644 --- a/apps/httptunnel/java/src/net/i2p/httptunnel/handler/ProxyHandler.java +++ b/apps/httptunnel/java/src/net/i2p/httptunnel/handler/ProxyHandler.java @@ -12,6 +12,7 @@ import net.i2p.httptunnel.SocketManagerProducer; import net.i2p.httptunnel.filter.Filter; import net.i2p.httptunnel.filter.NullFilter; import net.i2p.util.Log; +import net.i2p.I2PAppContext; /** * Handler for proxying "normal" HTTP requests. @@ -19,6 +20,7 @@ import net.i2p.util.Log; public class ProxyHandler extends EepHandler { private static final Log _log = new Log(ErrorHandler.class); + private static I2PAppContext _context = new I2PAppContext(); /* package private */ProxyHandler(ErrorHandler eh) { super(eh); @@ -31,7 +33,7 @@ public class ProxyHandler extends EepHandler { * @throws IOException */ public void handle(Request req, HTTPListener httpl, OutputStream out - /*, boolean fromProxy */) throws IOException { + /*, boolean fromProxy */) throws IOException { SocketManagerProducer smp = httpl.getSMP(); Destination dest = findProxy(); if (dest == null) { @@ -48,6 +50,6 @@ public class ProxyHandler extends EepHandler { private Destination findProxy() { //FIXME! - return NamingService.getInstance().lookup("squid.i2p"); + return _context.namingService().lookup("squid.i2p"); } } \ No newline at end of file diff --git a/apps/httptunnel/java/src/net/i2p/httptunnel/handler/RootHandler.java b/apps/httptunnel/java/src/net/i2p/httptunnel/handler/RootHandler.java index d40f14c0f..4f94d4744 100644 --- a/apps/httptunnel/java/src/net/i2p/httptunnel/handler/RootHandler.java +++ b/apps/httptunnel/java/src/net/i2p/httptunnel/handler/RootHandler.java @@ -29,7 +29,7 @@ public class RootHandler { private static RootHandler instance; /** - * Singleton stuff . . . + * Singleton stuff * @return the one and only instance, yay! */ public static synchronized RootHandler getInstance() { diff --git a/apps/i2ptunnel/java/src/net/i2p/i2ptunnel/I2PTunnel.java b/apps/i2ptunnel/java/src/net/i2p/i2ptunnel/I2PTunnel.java index 93fb64121..dcf28d1e1 100644 --- a/apps/i2ptunnel/java/src/net/i2p/i2ptunnel/I2PTunnel.java +++ b/apps/i2ptunnel/java/src/net/i2p/i2ptunnel/I2PTunnel.java @@ -49,6 +49,7 @@ import java.util.List; import java.util.Set; import java.util.StringTokenizer; +import net.i2p.I2PAppContext; import net.i2p.I2PException; import net.i2p.client.I2PClient; import net.i2p.client.I2PClientFactory; @@ -65,6 +66,7 @@ import net.i2p.util.Log; public class I2PTunnel implements Logging, EventDispatcher { private final static Log _log = new Log(I2PTunnel.class); private final EventDispatcherImpl _event = new EventDispatcherImpl(); + private static I2PAppContext _context = new I2PAppContext(); public static final int PACKET_DELAY = 100; @@ -954,7 +956,7 @@ public class I2PTunnel implements Logging, EventDispatcher { } } else { // ask naming service - NamingService inst = NamingService.getInstance(); + NamingService inst = _context.namingService(); return inst.lookup(name); } } diff --git a/apps/sam/java/src/net/i2p/sam/SAMUtils.java b/apps/sam/java/src/net/i2p/sam/SAMUtils.java index f4ef4ff84..09dce27f3 100644 --- a/apps/sam/java/src/net/i2p/sam/SAMUtils.java +++ b/apps/sam/java/src/net/i2p/sam/SAMUtils.java @@ -23,6 +23,7 @@ import net.i2p.data.Base64; import net.i2p.data.DataFormatException; import net.i2p.data.Destination; import net.i2p.util.Log; +import net.i2p.I2PAppContext; /** * Miscellaneous utility methods used by SAM protocol handlers. @@ -32,6 +33,7 @@ import net.i2p.util.Log; public class SAMUtils { private final static Log _log = new Log(SAMUtils.class); + private static I2PAppContext _context = new I2PAppContext(); /** * Generate a random destination key @@ -84,7 +86,7 @@ public class SAMUtils { * @return the Destination for the specified hostname, or null if not found */ public static Destination lookupHost(String name, OutputStream pubKey) { - NamingService ns = NamingService.getInstance(); + NamingService ns = _context.namingService(); Destination dest = ns.lookup(name); if ((pubKey != null) && (dest != null)) { diff --git a/core/java/src/net/i2p/I2PAppContext.java b/core/java/src/net/i2p/I2PAppContext.java new file mode 100644 index 000000000..7f5bc203e --- /dev/null +++ b/core/java/src/net/i2p/I2PAppContext.java @@ -0,0 +1,425 @@ +package net.i2p; + +import net.i2p.stat.StatManager; +import net.i2p.crypto.SessionKeyManager; +import net.i2p.crypto.PersistentSessionKeyManager; +import net.i2p.crypto.ElGamalAESEngine; +import net.i2p.crypto.ElGamalEngine; +import net.i2p.crypto.DummyElGamalEngine; +import net.i2p.crypto.SHA256Generator; +import net.i2p.crypto.HMACSHA256Generator; +import net.i2p.crypto.AESEngine; +import net.i2p.crypto.CryptixAESEngine; +import net.i2p.crypto.DSAEngine; +import net.i2p.client.naming.NamingService; +import net.i2p.util.LogManager; +import net.i2p.util.Clock; +import net.i2p.util.RandomSource; +import net.i2p.data.RoutingKeyGenerator; +import net.i2p.crypto.KeyGenerator; + +import java.util.Properties; +import java.util.HashSet; +import java.util.Set; + +/** + *

Provide a base scope for accessing singletons that I2P exposes. Rather than + * using the traditional singleton, where any component can access the component + * in question directly, all of those I2P related singletons are exposed through + * a particular I2PAppContext. This helps not only with understanding their use + * and the components I2P exposes, but it also allows multiple isolated + * environments to operate concurrently within the same JVM - particularly useful + * for stubbing out implementations of the rooted components and simulating the + * software's interaction between multiple instances.

+ * + * As a simplification, there is also a global context - if some component needs + * access to one of the singletons but doesn't have its own context from which + * to root itself, it binds to the I2PAppContext's globalAppContext(), which is + * the first context that was created within the JVM, or a new one if no context + * existed already. This functionality is often used within the I2P core for + * logging - e.g.
+ *     private static final Log _log = new Log(someClass.class);
+ * 
+ * It is for this reason that applications that care about working with multiple + * contexts should build their own context as soon as possible (within the main(..)) + * so that any referenced components will latch on to that context instead of + * instantiating a new one. However, there are situations in which both can be + * relevent. + * + */ +public class I2PAppContext { + /** the context that components without explicit root are bound */ + protected static I2PAppContext _globalAppContext; + /** + * Determine if the app context been initialized. If this is false + * and something asks for the globalAppContext, a new one is created, + * otherwise the existing one is used. + * + */ + protected static volatile boolean _globalAppContextInitialized; + + private StatManager _statManager; + private SessionKeyManager _sessionKeyManager; + private NamingService _namingService; + private ElGamalEngine _elGamalEngine; + private ElGamalAESEngine _elGamalAESEngine; + private AESEngine _AESEngine; + private LogManager _logManager; + private HMACSHA256Generator _hmac; + private SHA256Generator _sha; + private Clock _clock; + private DSAEngine _dsa; + private RoutingKeyGenerator _routingKeyGenerator; + private RandomSource _random; + private KeyGenerator _keyGenerator; + private volatile boolean _statManagerInitialized; + private volatile boolean _sessionKeyManagerInitialized; + private volatile boolean _namingServiceInitialized; + private volatile boolean _elGamalEngineInitialized; + private volatile boolean _elGamalAESEngineInitialized; + private volatile boolean _AESEngineInitialized; + private volatile boolean _logManagerInitialized; + private volatile boolean _hmacInitialized; + private volatile boolean _shaInitialized; + private volatile boolean _clockInitialized; + private volatile boolean _dsaInitialized; + private volatile boolean _routingKeyGeneratorInitialized; + private volatile boolean _randomInitialized; + private volatile boolean _keyGeneratorInitialized; + + /** + * Pull the default context, creating a new one if necessary, else using + * the first one created. + * + */ + public static I2PAppContext getGlobalContext() { + if (!_globalAppContextInitialized) { + synchronized (I2PAppContext.class) { + System.err.println("*** Building seperate global context!"); + if (_globalAppContext == null) + _globalAppContext = new I2PAppContext(false); + _globalAppContextInitialized = true; + } + } + return _globalAppContext; + } + + /** + * Lets root a brand new context + * + */ + public I2PAppContext() { + this(true); + } + /** + * @param doInit should this context be used as the global one (if necessary)? + */ + private I2PAppContext(boolean doInit) { + //System.out.println("App context created: " + this); + if (doInit) { + if (!_globalAppContextInitialized) { + synchronized (I2PAppContext.class) { + if (_globalAppContext == null) { + _globalAppContext = this; + _globalAppContextInitialized = true; + } + } + } + } + _statManager = null; + _sessionKeyManager = null; + _namingService = null; + _elGamalEngine = null; + _elGamalAESEngine = null; + _logManager = null; + _statManagerInitialized = false; + _sessionKeyManagerInitialized = false; + _namingServiceInitialized = false; + _elGamalEngineInitialized = false; + _elGamalAESEngineInitialized = false; + _logManagerInitialized = false; + } + + /** + * Access the configuration attributes of this context (aka System.getProperty) + * This can be overloaded by subclasses to allow different system + * properties for different app contexts. + * + */ + public String getProperty(String propName) { + return System.getProperty(propName); + } + /** + * Access the configuration attributes of this context (aka System.getProperty) + * This can be overloaded by subclasses to allow different system + * properties for different app contexts. + * + */ + public String getProperty(String propName, String defaultValue) { + return System.getProperty(propName, defaultValue); + } + /** + * Access the configuration attributes of this context (aka System.getProperties) + * This can be overloaded by subclasses to allow different system + * properties for different app contexts. + * + * @return set of Strings containing the names of defined system properties + */ + public Set getPropertyNames() { + return new HashSet(System.getProperties().keySet()); + } + + /** + * The statistics component with which we can track various events + * over time. + */ + public StatManager statManager() { + if (!_statManagerInitialized) initializeStatManager(); + return _statManager; + } + private void initializeStatManager() { + synchronized (this) { + if (_statManager == null) + _statManager = new StatManager(this); + _statManagerInitialized = true; + } + } + + /** + * The session key manager which coordinates the sessionKey / sessionTag + * data. This component allows transparent operation of the + * ElGamal/AES+SessionTag algorithm, and contains all of the session tags + * for one particular application. If you want to seperate multiple apps + * to have their own sessionTags and sessionKeys, they should use different + * I2PAppContexts, and hence, different sessionKeyManagers. + * + */ + public SessionKeyManager sessionKeyManager() { + if (!_sessionKeyManagerInitialized) initializeSessionKeyManager(); + return _sessionKeyManager; + } + private void initializeSessionKeyManager() { + synchronized (this) { + if (_sessionKeyManager == null) + _sessionKeyManager = new PersistentSessionKeyManager(this); + _sessionKeyManagerInitialized = true; + } + } + + /** + * Pull up the naming service used in this context. The naming service itself + * works by querying the context's properties, so those props should be + * specified to customize the naming service exposed. + */ + public NamingService namingService() { + if (!_namingServiceInitialized) initializeNamingService(); + return _namingService; + } + private void initializeNamingService() { + synchronized (this) { + if (_namingService == null) { + _namingService = NamingService.createInstance(this); + } + _namingServiceInitialized = true; + } + } + + /** + * This is the ElGamal engine used within this context. While it doesn't + * really have anything substantial that is context specific (the algorithm + * just does the algorithm), it does transparently use the context for logging + * its performance and activity. In addition, the engine can be swapped with + * the context's properties (though only someone really crazy should mess with + * it ;) + */ + public ElGamalEngine elGamalEngine() { + if (!_elGamalEngineInitialized) initializeElGamalEngine(); + return _elGamalEngine; + } + private void initializeElGamalEngine() { + synchronized (this) { + if (_elGamalEngine == null) { + if ("off".equals(getProperty("i2p.encryption", "on"))) + _elGamalEngine = new DummyElGamalEngine(this); + else + _elGamalEngine = new ElGamalEngine(this); + } + _elGamalEngineInitialized = true; + } + } + + /** + * Access the ElGamal/AES+SessionTag engine for this context. The algorithm + * makes use of the context's sessionKeyManager to coordinate transparent + * access to the sessionKeys and sessionTags, as well as the context's elGamal + * engine (which in turn keeps stats, etc). + * + */ + public ElGamalAESEngine elGamalAESEngine() { + if (!_elGamalAESEngineInitialized) initializeElGamalAESEngine(); + return _elGamalAESEngine; + } + private void initializeElGamalAESEngine() { + synchronized (this) { + if (_elGamalAESEngine == null) + _elGamalAESEngine = new ElGamalAESEngine(this); + _elGamalAESEngineInitialized = true; + } + } + + /** + * Ok, I'll admit it. there is no good reason for having a context specific + * AES engine. We dont really keep stats on it, since its just too fast to + * matter. Though for the crazy people out there, we do expose a way to + * disable it. + */ + public AESEngine AESEngine() { + if (!_AESEngineInitialized) initializeAESEngine(); + return _AESEngine; + } + private void initializeAESEngine() { + synchronized (this) { + if (_AESEngine == null) { + if ("off".equals(getProperty("i2p.encryption", "on"))) + _AESEngine = new AESEngine(this); + else + _AESEngine = new CryptixAESEngine(this); + } + _AESEngineInitialized = true; + } + } + + /** + * Query the log manager for this context, which may in turn have its own + * set of configuration settings (loaded from the context's properties). + * Each context's logManager keeps its own isolated set of Log instances with + * their own log levels, output locations, and rotation configuration. + */ + public LogManager logManager() { + if (!_logManagerInitialized) initializeLogManager(); + return _logManager; + } + private void initializeLogManager() { + synchronized (this) { + if (_logManager == null) + _logManager = new LogManager(this); + _logManagerInitialized = true; + } + } + /** + * There is absolutely no good reason to make this context specific, + * other than for consistency, and perhaps later we'll want to + * include some stats. + */ + public HMACSHA256Generator hmac() { + if (!_hmacInitialized) initializeHMAC(); + return _hmac; + } + private void initializeHMAC() { + synchronized (this) { + if (_hmac == null) + _hmac= new HMACSHA256Generator(this); + _hmacInitialized = true; + } + } + + /** + * Our SHA256 instance (see the hmac discussion for why its context specific) + * + */ + public SHA256Generator sha() { + if (!_shaInitialized) initializeSHA(); + return _sha; + } + private void initializeSHA() { + synchronized (this) { + if (_sha == null) + _sha= new SHA256Generator(this); + _shaInitialized = true; + } + } + + /** + * Our DSA engine (see HMAC and SHA above) + * + */ + public DSAEngine dsa() { + if (!_dsaInitialized) initializeDSA(); + return _dsa; + } + private void initializeDSA() { + synchronized (this) { + if (_dsa == null) + _dsa = new DSAEngine(this); + _dsaInitialized = true; + } + } + + /** + * Component to generate ElGamal, DSA, and Session keys. For why it is in + * the appContext, see the DSA, HMAC, and SHA comments above. + */ + public KeyGenerator keyGenerator() { + if (!_keyGeneratorInitialized) initializeKeyGenerator(); + return _keyGenerator; + } + private void initializeKeyGenerator() { + synchronized (this) { + if (_keyGenerator == null) + _keyGenerator = new KeyGenerator(this); + _keyGeneratorInitialized = true; + } + } + + /** + * The context's synchronized clock, which is kept context specific only to + * enable simulators to play with clock skew among different instances. + * + */ + public Clock clock() { + if (!_clockInitialized) initializeClock(); + return _clock; + } + private void initializeClock() { + synchronized (this) { + if (_clock == null) + _clock = new Clock(this); + _clockInitialized = true; + } + } + + /** + * Determine how much do we want to mess with the keys to turn them + * into something we can route. This is context specific because we + * may want to test out how things react when peers don't agree on + * how to skew. + * + */ + public RoutingKeyGenerator routingKeyGenerator() { + if (!_routingKeyGeneratorInitialized) initializeRoutingKeyGenerator(); + return _routingKeyGenerator; + } + private void initializeRoutingKeyGenerator() { + synchronized (this) { + if (_routingKeyGenerator == null) + _routingKeyGenerator = new RoutingKeyGenerator(this); + _routingKeyGeneratorInitialized = true; + } + } + + /** + * [insert snarky comment here] + * + */ + public RandomSource random() { + if (!_randomInitialized) initializeRandom(); + return _random; + } + private void initializeRandom() { + synchronized (this) { + if (_random == null) + _random = new RandomSource(this); + _randomInitialized = true; + } + } +} \ No newline at end of file diff --git a/core/java/src/net/i2p/client/ATalk.java b/core/java/src/net/i2p/client/ATalk.java index 53fc937e4..a922c2b9f 100644 --- a/core/java/src/net/i2p/client/ATalk.java +++ b/core/java/src/net/i2p/client/ATalk.java @@ -31,6 +31,7 @@ import net.i2p.util.Clock; import net.i2p.util.I2PThread; import net.i2p.util.Log; import net.i2p.util.LogManager; +import net.i2p.I2PAppContext; /** * ATalk - anonymous talk, demonstrating a trivial I2P usage scenario. @@ -290,6 +291,7 @@ public class ATalk implements I2PSessionListener, Runnable { /** driver */ public static void main(String args[]) { + I2PAppContext context = new I2PAppContext(); if (args.length == 2) { String myKeyFile = args[0]; String myDestinationFile = args[1]; @@ -309,9 +311,9 @@ public class ATalk implements I2PSessionListener, Runnable { String peerDestFile = args[1]; String shouldLog = args[2]; if (Boolean.TRUE.toString().equalsIgnoreCase(shouldLog)) - LogManager.getInstance().setDisplayOnScreen(true); + context.logManager().setDisplayOnScreen(true); else - LogManager.getInstance().setDisplayOnScreen(false); + context.logManager().setDisplayOnScreen(false); String logFile = args[2]; Thread talkThread = new I2PThread(new ATalk(myKeyfile, peerDestFile)); talkThread.start(); diff --git a/core/java/src/net/i2p/client/DisconnectMessageHandler.java b/core/java/src/net/i2p/client/DisconnectMessageHandler.java index 004128bb0..0c1e69d0f 100644 --- a/core/java/src/net/i2p/client/DisconnectMessageHandler.java +++ b/core/java/src/net/i2p/client/DisconnectMessageHandler.java @@ -11,6 +11,7 @@ package net.i2p.client; import net.i2p.data.i2cp.DisconnectMessage; import net.i2p.data.i2cp.I2CPMessage; +import net.i2p.I2PAppContext; /** * Handle I2CP disconnect messages from the router @@ -18,8 +19,8 @@ import net.i2p.data.i2cp.I2CPMessage; * @author jrandom */ class DisconnectMessageHandler extends HandlerImpl { - public DisconnectMessageHandler() { - super(DisconnectMessage.MESSAGE_TYPE); + public DisconnectMessageHandler(I2PAppContext context) { + super(context, DisconnectMessage.MESSAGE_TYPE); } public void handleMessage(I2CPMessage message, I2PSessionImpl session) { diff --git a/core/java/src/net/i2p/client/HandlerImpl.java b/core/java/src/net/i2p/client/HandlerImpl.java index 0337f0dad..bf9171284 100644 --- a/core/java/src/net/i2p/client/HandlerImpl.java +++ b/core/java/src/net/i2p/client/HandlerImpl.java @@ -10,6 +10,7 @@ package net.i2p.client; */ import net.i2p.util.Log; +import net.i2p.I2PAppContext; /** * Base class for handling I2CP messages @@ -19,8 +20,10 @@ import net.i2p.util.Log; abstract class HandlerImpl implements I2CPMessageHandler { protected Log _log; private int _type; + protected I2PAppContext _context; - public HandlerImpl(int type) { + public HandlerImpl(I2PAppContext context, int type) { + _context = context; _type = type; _log = new Log(getClass()); } diff --git a/core/java/src/net/i2p/client/I2CPMessageProducer.java b/core/java/src/net/i2p/client/I2CPMessageProducer.java index a6432f601..a664d3062 100644 --- a/core/java/src/net/i2p/client/I2CPMessageProducer.java +++ b/core/java/src/net/i2p/client/I2CPMessageProducer.java @@ -32,6 +32,7 @@ import net.i2p.data.i2cp.SendMessageMessage; import net.i2p.data.i2cp.SessionConfig; import net.i2p.util.Log; import net.i2p.util.RandomSource; +import net.i2p.I2PAppContext; /** * Produce the various messages the session needs to send to the router. @@ -41,7 +42,12 @@ import net.i2p.util.RandomSource; class I2CPMessageProducer { private final static Log _log = new Log(I2CPMessageProducer.class); private final static RandomSource _rand = RandomSource.getInstance(); + private I2PAppContext _context; + public I2CPMessageProducer(I2PAppContext context) { + _context = context; + } + /** * Send all the messages that a client needs to send to a router to establish * a new session. @@ -102,7 +108,7 @@ class I2CPMessageProducer { Payload data = new Payload(); // randomize padding int size = payload.length + RandomSource.getInstance().nextInt(1024); - byte encr[] = ElGamalAESEngine.encrypt(payload, dest.getPublicKey(), key, tags, tag, newKey, size); + byte encr[] = _context.elGamalAESEngine().encrypt(payload, dest.getPublicKey(), key, tags, tag, newKey, size); // yes, in an intelligent component, newTags would be queued for confirmation along with key, and // generateNewTags would only generate tags if necessary diff --git a/core/java/src/net/i2p/client/I2PClientImpl.java b/core/java/src/net/i2p/client/I2PClientImpl.java index 1771f0cfe..76dab5a7b 100644 --- a/core/java/src/net/i2p/client/I2PClientImpl.java +++ b/core/java/src/net/i2p/client/I2PClientImpl.java @@ -22,6 +22,7 @@ import net.i2p.data.PrivateKey; import net.i2p.data.PublicKey; import net.i2p.data.SigningPrivateKey; import net.i2p.data.SigningPublicKey; +import net.i2p.I2PAppContext; /** * Base client implementation @@ -70,7 +71,13 @@ class I2PClientImpl implements I2PClient { * */ public I2PSession createSession(InputStream destKeyStream, Properties options) throws I2PSessionException { - //return new I2PSessionImpl(destKeyStream, options); // not thread safe - return new I2PSessionImpl2(destKeyStream, options); // thread safe + return createSession(I2PAppContext.getGlobalContext(), destKeyStream, options); + } + /** + * Create a new session (though do not connect it yet) + * + */ + public I2PSession createSession(I2PAppContext context, InputStream destKeyStream, Properties options) throws I2PSessionException { + return new I2PSessionImpl2(context, destKeyStream, options); // thread safe } } \ No newline at end of file diff --git a/core/java/src/net/i2p/client/I2PClientMessageHandlerMap.java b/core/java/src/net/i2p/client/I2PClientMessageHandlerMap.java index 61d0db159..0945999d8 100644 --- a/core/java/src/net/i2p/client/I2PClientMessageHandlerMap.java +++ b/core/java/src/net/i2p/client/I2PClientMessageHandlerMap.java @@ -19,6 +19,7 @@ import net.i2p.data.i2cp.RequestLeaseSetMessage; import net.i2p.data.i2cp.SessionStatusMessage; import net.i2p.data.i2cp.SetDateMessage; import net.i2p.util.Log; +import net.i2p.I2PAppContext; /** * Contains a map of message handlers that a session will want to use @@ -28,19 +29,19 @@ import net.i2p.util.Log; class I2PClientMessageHandlerMap { private final static Log _log = new Log(I2PClientMessageHandlerMap.class); /** map of message type id --> I2CPMessageHandler */ - private static Map _handlers; + private Map _handlers; - static { + public I2PClientMessageHandlerMap(I2PAppContext context) { _handlers = new HashMap(); - _handlers.put(new Integer(DisconnectMessage.MESSAGE_TYPE), new DisconnectMessageHandler()); - _handlers.put(new Integer(SessionStatusMessage.MESSAGE_TYPE), new SessionStatusMessageHandler()); - _handlers.put(new Integer(RequestLeaseSetMessage.MESSAGE_TYPE), new RequestLeaseSetMessageHandler()); - _handlers.put(new Integer(MessagePayloadMessage.MESSAGE_TYPE), new MessagePayloadMessageHandler()); - _handlers.put(new Integer(MessageStatusMessage.MESSAGE_TYPE), new MessageStatusMessageHandler()); - _handlers.put(new Integer(SetDateMessage.MESSAGE_TYPE), new SetDateMessageHandler()); + _handlers.put(new Integer(DisconnectMessage.MESSAGE_TYPE), new DisconnectMessageHandler(context)); + _handlers.put(new Integer(SessionStatusMessage.MESSAGE_TYPE), new SessionStatusMessageHandler(context)); + _handlers.put(new Integer(RequestLeaseSetMessage.MESSAGE_TYPE), new RequestLeaseSetMessageHandler(context)); + _handlers.put(new Integer(MessagePayloadMessage.MESSAGE_TYPE), new MessagePayloadMessageHandler(context)); + _handlers.put(new Integer(MessageStatusMessage.MESSAGE_TYPE), new MessageStatusMessageHandler(context)); + _handlers.put(new Integer(SetDateMessage.MESSAGE_TYPE), new SetDateMessageHandler(context)); } - public static I2CPMessageHandler getHandler(int messageTypeId) { + public I2CPMessageHandler getHandler(int messageTypeId) { I2CPMessageHandler handler = (I2CPMessageHandler) _handlers.get(new Integer(messageTypeId)); return handler; } diff --git a/core/java/src/net/i2p/client/I2PSessionImpl.java b/core/java/src/net/i2p/client/I2PSessionImpl.java index cc840560a..33c8adfe4 100644 --- a/core/java/src/net/i2p/client/I2PSessionImpl.java +++ b/core/java/src/net/i2p/client/I2PSessionImpl.java @@ -39,6 +39,7 @@ import net.i2p.data.i2cp.SessionId; import net.i2p.util.Clock; import net.i2p.util.I2PThread; import net.i2p.util.Log; +import net.i2p.I2PAppContext; /** * Implementation of an I2P session running over TCP. This class is NOT thread safe - @@ -47,7 +48,7 @@ import net.i2p.util.Log; * @author jrandom */ abstract class I2PSessionImpl implements I2PSession, I2CPMessageReader.I2CPMessageEventListener { - private final static Log _log = new Log(I2PSessionImpl.class); + private Log _log; /** who we are */ private Destination _myDestination; /** private key for decryption */ @@ -79,6 +80,11 @@ abstract class I2PSessionImpl implements I2PSession, I2CPMessageReader.I2CPMessa protected I2CPMessageProducer _producer; /** map of integer --> MessagePayloadMessage */ Map _availableMessages; + + protected I2PClientMessageHandlerMap _handlerMap; + + /** used to seperate things out so we can get rid of singletons */ + protected I2PAppContext _context; /** MessageStatusMessage status from the most recent send that hasn't been consumed */ private List _receivedStatus; @@ -108,9 +114,12 @@ abstract class I2PSessionImpl implements I2PSession, I2CPMessageReader.I2CPMessa * * @throws I2PSessionException if there is a problem loading the private keys or */ - public I2PSessionImpl(InputStream destKeyStream, Properties options) throws I2PSessionException { + public I2PSessionImpl(I2PAppContext context, InputStream destKeyStream, Properties options) throws I2PSessionException { + _context = context; + _log = context.logManager().getLog(I2PSessionImpl.class); + _handlerMap = new I2PClientMessageHandlerMap(context); _closed = true; - _producer = new I2CPMessageProducer(); + _producer = new I2CPMessageProducer(context); _availableMessages = new HashMap(); try { readDestination(destKeyStream); @@ -139,13 +148,13 @@ abstract class I2PSessionImpl implements I2PSession, I2CPMessageReader.I2CPMessa _portNum = Integer.parseInt(portNum); } catch (NumberFormatException nfe) { if (_log.shouldLog(Log.WARN)) - _log.warn("Invalid port number specified, defaulting to " - + TestServer.LISTEN_PORT, nfe); + _log.warn("Invalid port number specified, defaulting to " + + TestServer.LISTEN_PORT, nfe); _portNum = TestServer.LISTEN_PORT; } } - private static Properties filter(Properties options) { + private Properties filter(Properties options) { Properties rv = new Properties(); for (Iterator iter = options.keySet().iterator(); iter.hasNext();) { String key = (String) iter.next(); @@ -212,7 +221,7 @@ abstract class I2PSessionImpl implements I2PSession, I2CPMessageReader.I2CPMessa */ public void connect() throws I2PSessionException { _closed = false; - long startConnect = Clock.getInstance().now(); + long startConnect = _context.clock().now(); try { if (_log.shouldLog(Log.DEBUG)) _log.debug("connect begin to " + _hostname + ":" + _portNum); _socket = new Socket(_hostname, _portNum); @@ -251,7 +260,7 @@ abstract class I2PSessionImpl implements I2PSession, I2CPMessageReader.I2CPMessa } } } - long connected = Clock.getInstance().now(); + long connected = _context.clock().now(); if (_log.shouldLog(Log.INFO)) _log.info("Lease set created with inbound tunnels after " + (connected - startConnect) @@ -339,7 +348,7 @@ abstract class I2PSessionImpl implements I2PSession, I2CPMessageReader.I2CPMessa * */ public void messageReceived(I2CPMessageReader reader, I2CPMessage message) { - I2CPMessageHandler handler = I2PClientMessageHandlerMap.getHandler(message.getType()); + I2CPMessageHandler handler = _handlerMap.getHandler(message.getType()); if (handler == null) { if (_log.shouldLog(Log.WARN)) _log.warn("Unknown message or unhandleable message received: type = " diff --git a/core/java/src/net/i2p/client/I2PSessionImpl2.java b/core/java/src/net/i2p/client/I2PSessionImpl2.java index 966362008..03a19c927 100644 --- a/core/java/src/net/i2p/client/I2PSessionImpl2.java +++ b/core/java/src/net/i2p/client/I2PSessionImpl2.java @@ -26,6 +26,7 @@ import net.i2p.data.i2cp.MessageStatusMessage; import net.i2p.util.Clock; import net.i2p.util.Log; import net.i2p.util.RandomSource; +import net.i2p.I2PAppContext; /** * Thread safe implementation of an I2P session running over TCP. @@ -33,7 +34,7 @@ import net.i2p.util.RandomSource; * @author jrandom */ class I2PSessionImpl2 extends I2PSessionImpl { - private final static Log _log = new Log(I2PSessionImpl2.class); + private Log _log; /** set of MessageState objects, representing all of the messages in the process of being sent */ private Set _sendingStates; @@ -48,8 +49,9 @@ class I2PSessionImpl2 extends I2PSessionImpl { * * @throws I2PSessionException if there is a problem loading the private keys or */ - public I2PSessionImpl2(InputStream destKeyStream, Properties options) throws I2PSessionException { - super(destKeyStream, options); + public I2PSessionImpl2(I2PAppContext ctx, InputStream destKeyStream, Properties options) throws I2PSessionException { + super(ctx, destKeyStream, options); + _log = ctx.logManager().getLog(I2PSessionImpl2.class); _sendingStates = new HashSet(32); } @@ -95,22 +97,22 @@ class I2PSessionImpl2 extends I2PSessionImpl { private boolean sendBestEffort(Destination dest, byte payload[], SessionKey keyUsed, Set tagsSent) throws I2PSessionException { - SessionKey key = SessionKeyManager.getInstance().getCurrentKey(dest.getPublicKey()); - if (key == null) key = SessionKeyManager.getInstance().createSession(dest.getPublicKey()); - SessionTag tag = SessionKeyManager.getInstance().consumeNextAvailableTag(dest.getPublicKey(), key); + SessionKey key = _context.sessionKeyManager().getCurrentKey(dest.getPublicKey()); + if (key == null) key = _context.sessionKeyManager().createSession(dest.getPublicKey()); + SessionTag tag = _context.sessionKeyManager().consumeNextAvailableTag(dest.getPublicKey(), key); Set sentTags = null; - if (SessionKeyManager.getInstance().getAvailableTags(dest.getPublicKey(), key) < 10) { + if (_context.sessionKeyManager().getAvailableTags(dest.getPublicKey(), key) < 10) { sentTags = createNewTags(50); - } else if (SessionKeyManager.getInstance().getAvailableTimeLeft(dest.getPublicKey(), key) < 30 * 1000) { + } else if (_context.sessionKeyManager().getAvailableTimeLeft(dest.getPublicKey(), key) < 30 * 1000) { // if we have > 10 tags, but they expire in under 30 seconds, we want more sentTags = createNewTags(50); if (_log.shouldLog(Log.DEBUG)) _log.debug("Tags are almost expired, adding 50 new ones"); } SessionKey newKey = null; if (false) // rekey - newKey = KeyGenerator.getInstance().generateSessionKey(); + newKey = _context.keyGenerator().generateSessionKey(); - long nonce = (long) RandomSource.getInstance().nextInt(Integer.MAX_VALUE); + long nonce = (long)_context.random().nextInt(Integer.MAX_VALUE); MessageState state = new MessageState(nonce); state.setKey(key); state.setTags(sentTags); @@ -137,7 +139,8 @@ class I2PSessionImpl2 extends I2PSessionImpl { _log.debug("Adding sending state " + state.getMessageId() + " / " + state.getNonce()); _producer.sendMessage(this, dest, nonce, payload, tag, key, sentTags, newKey); - state.waitFor(MessageStatusMessage.STATUS_SEND_ACCEPTED, Clock.getInstance().now() + getTimeout()); + state.waitFor(MessageStatusMessage.STATUS_SEND_ACCEPTED, + _context.clock().now() + getTimeout()); synchronized (_sendingStates) { _sendingStates.remove(state); } @@ -163,22 +166,22 @@ class I2PSessionImpl2 extends I2PSessionImpl { private boolean sendGuaranteed(Destination dest, byte payload[], SessionKey keyUsed, Set tagsSent) throws I2PSessionException { - SessionKey key = SessionKeyManager.getInstance().getCurrentKey(dest.getPublicKey()); - if (key == null) key = SessionKeyManager.getInstance().createSession(dest.getPublicKey()); - SessionTag tag = SessionKeyManager.getInstance().consumeNextAvailableTag(dest.getPublicKey(), key); + SessionKey key = _context.sessionKeyManager().getCurrentKey(dest.getPublicKey()); + if (key == null) key = _context.sessionKeyManager().createSession(dest.getPublicKey()); + SessionTag tag = _context.sessionKeyManager().consumeNextAvailableTag(dest.getPublicKey(), key); Set sentTags = null; - if (SessionKeyManager.getInstance().getAvailableTags(dest.getPublicKey(), key) < 10) { + if (_context.sessionKeyManager().getAvailableTags(dest.getPublicKey(), key) < 10) { sentTags = createNewTags(50); - } else if (SessionKeyManager.getInstance().getAvailableTimeLeft(dest.getPublicKey(), key) < 30 * 1000) { + } else if (_context.sessionKeyManager().getAvailableTimeLeft(dest.getPublicKey(), key) < 30 * 1000) { // if we have > 10 tags, but they expire in under 30 seconds, we want more sentTags = createNewTags(50); if (_log.shouldLog(Log.DEBUG)) _log.debug("Tags are almost expired, adding 50 new ones"); } SessionKey newKey = null; if (false) // rekey - newKey = KeyGenerator.getInstance().generateSessionKey(); + newKey = _context.keyGenerator().generateSessionKey(); - long nonce = (long) RandomSource.getInstance().nextInt(Integer.MAX_VALUE); + long nonce = (long)_context.random().nextInt(Integer.MAX_VALUE); MessageState state = new MessageState(nonce); state.setKey(key); state.setTags(sentTags); @@ -206,9 +209,11 @@ class I2PSessionImpl2 extends I2PSessionImpl { + state.getNonce()); _producer.sendMessage(this, dest, nonce, payload, tag, key, sentTags, newKey); if (isGuaranteed()) - state.waitFor(MessageStatusMessage.STATUS_SEND_GUARANTEED_SUCCESS, Clock.getInstance().now() + SEND_TIMEOUT); + state.waitFor(MessageStatusMessage.STATUS_SEND_GUARANTEED_SUCCESS, + _context.clock().now() + SEND_TIMEOUT); else - state.waitFor(MessageStatusMessage.STATUS_SEND_ACCEPTED, Clock.getInstance().now() + SEND_TIMEOUT); + state.waitFor(MessageStatusMessage.STATUS_SEND_ACCEPTED, + _context.clock().now() + SEND_TIMEOUT); synchronized (_sendingStates) { _sendingStates.remove(state); } @@ -250,9 +255,9 @@ class I2PSessionImpl2 extends I2PSessionImpl { + state.getTags()); if ((state.getTags() != null) && (state.getTags().size() > 0)) { if (state.getNewKey() == null) - SessionKeyManager.getInstance().tagsDelivered(state.getTo().getPublicKey(), state.getKey(), state.getTags()); + _context.sessionKeyManager().tagsDelivered(state.getTo().getPublicKey(), state.getKey(), state.getTags()); else - SessionKeyManager.getInstance().tagsDelivered(state.getTo().getPublicKey(), state.getNewKey(), state.getTags()); + _context.sessionKeyManager().tagsDelivered(state.getTo().getPublicKey(), state.getNewKey(), state.getTags()); } } @@ -260,7 +265,7 @@ class I2PSessionImpl2 extends I2PSessionImpl { if (_log.shouldLog(Log.INFO)) _log.info("nack tags for msgId " + state.getMessageId() + " / " + state.getNonce() + " key = " + state.getKey()); - SessionKeyManager.getInstance().failTags(state.getTo().getPublicKey()); + _context.sessionKeyManager().failTags(state.getTo().getPublicKey()); } public void receiveStatus(int msgId, long nonce, int status) { diff --git a/core/java/src/net/i2p/client/MessagePayloadMessageHandler.java b/core/java/src/net/i2p/client/MessagePayloadMessageHandler.java index 7d546de94..ac8cef572 100644 --- a/core/java/src/net/i2p/client/MessagePayloadMessageHandler.java +++ b/core/java/src/net/i2p/client/MessagePayloadMessageHandler.java @@ -16,6 +16,7 @@ import net.i2p.data.i2cp.I2CPMessage; import net.i2p.data.i2cp.MessageId; import net.i2p.data.i2cp.MessagePayloadMessage; import net.i2p.data.i2cp.ReceiveMessageEndMessage; +import net.i2p.I2PAppContext; /** * Handle I2CP MessagePayloadMessages from the router delivering the contents @@ -25,8 +26,8 @@ import net.i2p.data.i2cp.ReceiveMessageEndMessage; * @author jrandom */ class MessagePayloadMessageHandler extends HandlerImpl { - public MessagePayloadMessageHandler() { - super(MessagePayloadMessage.MESSAGE_TYPE); + public MessagePayloadMessageHandler(I2PAppContext context) { + super(context, MessagePayloadMessage.MESSAGE_TYPE); } public void handleMessage(I2CPMessage message, I2PSessionImpl session) { @@ -53,7 +54,7 @@ class MessagePayloadMessageHandler extends HandlerImpl { */ private Payload decryptPayload(MessagePayloadMessage msg, I2PSessionImpl session) throws DataFormatException { Payload payload = msg.getPayload(); - byte[] data = ElGamalAESEngine.decrypt(payload.getEncryptedData(), session.getDecryptionKey()); + byte[] data = _context.elGamalAESEngine().decrypt(payload.getEncryptedData(), session.getDecryptionKey()); if (data == null) { _log .error("Error decrypting the payload to public key " diff --git a/core/java/src/net/i2p/client/MessageStatusMessageHandler.java b/core/java/src/net/i2p/client/MessageStatusMessageHandler.java index a5d1c00ad..3413aff3a 100644 --- a/core/java/src/net/i2p/client/MessageStatusMessageHandler.java +++ b/core/java/src/net/i2p/client/MessageStatusMessageHandler.java @@ -12,6 +12,7 @@ package net.i2p.client; import net.i2p.data.i2cp.I2CPMessage; import net.i2p.data.i2cp.MessageStatusMessage; import net.i2p.data.i2cp.ReceiveMessageBeginMessage; +import net.i2p.I2PAppContext; /** * Handle I2CP MessageStatusMessages from the router. This currently only takes @@ -21,8 +22,8 @@ import net.i2p.data.i2cp.ReceiveMessageBeginMessage; * @author jrandom */ class MessageStatusMessageHandler extends HandlerImpl { - public MessageStatusMessageHandler() { - super(MessageStatusMessage.MESSAGE_TYPE); + public MessageStatusMessageHandler(I2PAppContext context) { + super(context, MessageStatusMessage.MESSAGE_TYPE); } public void handleMessage(I2CPMessage message, I2PSessionImpl session) { diff --git a/core/java/src/net/i2p/client/RequestLeaseSetMessageHandler.java b/core/java/src/net/i2p/client/RequestLeaseSetMessageHandler.java index 7cc0c8690..06bb3e808 100644 --- a/core/java/src/net/i2p/client/RequestLeaseSetMessageHandler.java +++ b/core/java/src/net/i2p/client/RequestLeaseSetMessageHandler.java @@ -25,6 +25,7 @@ import net.i2p.data.SigningPublicKey; import net.i2p.data.i2cp.I2CPMessage; import net.i2p.data.i2cp.RequestLeaseSetMessage; import net.i2p.util.Log; +import net.i2p.I2PAppContext; /** * Handle I2CP RequestLeaseSetMessage from the router by granting all leases @@ -35,8 +36,8 @@ class RequestLeaseSetMessageHandler extends HandlerImpl { private final static Log _log = new Log(RequestLeaseSetMessageHandler.class); private Map _existingLeaseSets; - public RequestLeaseSetMessageHandler() { - super(RequestLeaseSetMessage.MESSAGE_TYPE); + public RequestLeaseSetMessageHandler(I2PAppContext context) { + super(context, RequestLeaseSetMessage.MESSAGE_TYPE); _existingLeaseSets = new HashMap(32); } diff --git a/core/java/src/net/i2p/client/SessionStatusMessageHandler.java b/core/java/src/net/i2p/client/SessionStatusMessageHandler.java index 1ba816469..6f0ad741f 100644 --- a/core/java/src/net/i2p/client/SessionStatusMessageHandler.java +++ b/core/java/src/net/i2p/client/SessionStatusMessageHandler.java @@ -11,6 +11,7 @@ package net.i2p.client; import net.i2p.data.i2cp.I2CPMessage; import net.i2p.data.i2cp.SessionStatusMessage; +import net.i2p.I2PAppContext; /** * Handle I2CP SessionStatusMessagese from the router, updating the session as @@ -19,8 +20,8 @@ import net.i2p.data.i2cp.SessionStatusMessage; * @author jrandom */ class SessionStatusMessageHandler extends HandlerImpl { - public SessionStatusMessageHandler() { - super(SessionStatusMessage.MESSAGE_TYPE); + public SessionStatusMessageHandler(I2PAppContext context) { + super(context, SessionStatusMessage.MESSAGE_TYPE); } public void handleMessage(I2CPMessage message, I2PSessionImpl session) { diff --git a/core/java/src/net/i2p/client/SetDateMessageHandler.java b/core/java/src/net/i2p/client/SetDateMessageHandler.java index 63ff1ae51..2cc52086e 100644 --- a/core/java/src/net/i2p/client/SetDateMessageHandler.java +++ b/core/java/src/net/i2p/client/SetDateMessageHandler.java @@ -12,6 +12,7 @@ package net.i2p.client; import net.i2p.data.i2cp.I2CPMessage; import net.i2p.data.i2cp.SetDateMessage; import net.i2p.util.Clock; +import net.i2p.I2PAppContext; /** * Handle I2CP time messages from the router @@ -19,8 +20,8 @@ import net.i2p.util.Clock; * @author jrandom */ class SetDateMessageHandler extends HandlerImpl { - public SetDateMessageHandler() { - super(SetDateMessage.MESSAGE_TYPE); + public SetDateMessageHandler(I2PAppContext ctx) { + super(ctx, SetDateMessage.MESSAGE_TYPE); } public void handleMessage(I2CPMessage message, I2PSessionImpl session) { diff --git a/core/java/src/net/i2p/client/naming/DummyNamingService.java b/core/java/src/net/i2p/client/naming/DummyNamingService.java index 1bd18cd78..d826b0b06 100644 --- a/core/java/src/net/i2p/client/naming/DummyNamingService.java +++ b/core/java/src/net/i2p/client/naming/DummyNamingService.java @@ -8,11 +8,21 @@ package net.i2p.client.naming; import net.i2p.data.Destination; +import net.i2p.I2PAppContext; /** * A Dummy naming service that can only handle base64 destinations. */ class DummyNamingService extends NamingService { + /** + * The naming service should only be constructed and accessed through the + * application context. This constructor should only be used by the + * appropriate application context itself. + * + */ + protected DummyNamingService(I2PAppContext context) { super(context); } + private DummyNamingService() { super(null); } + public Destination lookup(String hostname) { return lookupBase64(hostname); } diff --git a/core/java/src/net/i2p/client/naming/HostsTxtNamingService.java b/core/java/src/net/i2p/client/naming/HostsTxtNamingService.java index 636db1f0d..e997989c0 100644 --- a/core/java/src/net/i2p/client/naming/HostsTxtNamingService.java +++ b/core/java/src/net/i2p/client/naming/HostsTxtNamingService.java @@ -14,12 +14,22 @@ import java.util.Properties; import net.i2p.data.Destination; import net.i2p.util.Log; +import net.i2p.I2PAppContext; /** * A naming service based on the "hosts.txt" file. */ public class HostsTxtNamingService extends NamingService { + /** + * The naming service should only be constructed and accessed through the + * application context. This constructor should only be used by the + * appropriate application context itself. + * + */ + protected HostsTxtNamingService(I2PAppContext context) { super(context); } + private HostsTxtNamingService() { super(null); } + /** * If this system property is specified, the tunnel will read the * given file for hostname=destKey values when resolving names @@ -35,7 +45,7 @@ public class HostsTxtNamingService extends NamingService { // Try to look it up in hosts.txt // Reload file each time to catch changes. // (and it's easier :P - String hostsfile = System.getProperty(PROP_HOSTS_FILE, DEFAULT_HOSTS_FILE); + String hostsfile = _context.getProperty(PROP_HOSTS_FILE, DEFAULT_HOSTS_FILE); Properties hosts = new Properties(); FileInputStream fis = null; try { diff --git a/core/java/src/net/i2p/client/naming/NamingService.java b/core/java/src/net/i2p/client/naming/NamingService.java index 904d1e709..b6b6d64a5 100644 --- a/core/java/src/net/i2p/client/naming/NamingService.java +++ b/core/java/src/net/i2p/client/naming/NamingService.java @@ -10,6 +10,9 @@ package net.i2p.client.naming; import net.i2p.data.DataFormatException; import net.i2p.data.Destination; import net.i2p.util.Log; +import net.i2p.I2PAppContext; + +import java.lang.reflect.Constructor; /** * Naming services create a subclass of this class. @@ -17,10 +20,23 @@ import net.i2p.util.Log; public abstract class NamingService { private final static Log _log = new Log(NamingService.class); + protected I2PAppContext _context; private static final String PROP_IMPL = "i2p.naming.impl"; private static final String DEFAULT_IMPL = "net.i2p.client.naming.HostsTxtNamingService"; + + /** + * The naming service should only be constructed and accessed through the + * application context. This constructor should only be used by the + * appropriate application context itself. + * + */ + protected NamingService(I2PAppContext context) { + _context = context; + } + private NamingService() {} + /** * Look up a host name. * @return the Destination for this host name, or @@ -52,23 +68,22 @@ public abstract class NamingService { } } - private static NamingService instance = null; - /** * Get a naming service instance. This method ensures that there * will be only one naming service instance (singleton) as well as * choose the implementation from the "i2p.naming.impl" system * property. */ - public static synchronized NamingService getInstance() { - if (instance == null) { - String impl = System.getProperty(PROP_IMPL, DEFAULT_IMPL); - try { - instance = (NamingService) Class.forName(impl).newInstance(); - } catch (Exception ex) { - _log.error("Cannot loadNaming service implementation", ex); - instance = new DummyNamingService(); // fallback - } + public static final synchronized NamingService createInstance(I2PAppContext context) { + NamingService instance = null; + String impl = context.getProperty(PROP_IMPL, DEFAULT_IMPL); + try { + Class cls = Class.forName(impl); + Constructor con = cls.getConstructor(new Class[] { I2PAppContext.class }); + instance = (NamingService)con.newInstance(new Object[] { context }); + } catch (Exception ex) { + _log.error("Cannot loadNaming service implementation", ex); + instance = new DummyNamingService(context); // fallback } return instance; } diff --git a/core/java/src/net/i2p/crypto/AESEngine.java b/core/java/src/net/i2p/crypto/AESEngine.java index a96870d15..1cdfcff16 100644 --- a/core/java/src/net/i2p/crypto/AESEngine.java +++ b/core/java/src/net/i2p/crypto/AESEngine.java @@ -19,26 +19,20 @@ import net.i2p.data.Hash; import net.i2p.data.SessionKey; import net.i2p.util.Log; import net.i2p.util.RandomSource; +import net.i2p.I2PAppContext; /** - * Wrapper singleton for AES cypher operation. + * Dummy wrapper for AES cipher operation. * - * @author jrandom */ public class AESEngine { - private final static Log _log = new Log(AESEngine.class); - private static AESEngine _engine; - static { - if ("off".equals(System.getProperty("i2p.encryption", "on"))) - _engine = new AESEngine(); - else - _engine = new CryptixAESEngine(); + private Log _log; + private I2PAppContext _context; + public AESEngine(I2PAppContext ctx) { + _context = ctx; + _log = _context.logManager().getLog(AESEngine.class); } - - public static AESEngine getInstance() { - return _engine; - } - + /** Encrypt the payload with the session key * @param payload data to be encrypted * @param sessionKey private esession key to encrypt to @@ -59,13 +53,13 @@ public class AESEngine { if ((iv == null) || (payload == null) || (sessionKey == null) || (iv.length != 16)) return null; ByteArrayOutputStream baos = new ByteArrayOutputStream(paddedSize + 64); - Hash h = SHA256Generator.getInstance().calculateHash(sessionKey.getData()); + Hash h = _context.sha().calculateHash(sessionKey.getData()); try { h.writeBytes(baos); DataHelper.writeLong(baos, 4, payload.length); baos.write(payload); byte tv[] = baos.toByteArray(); - baos.write(ElGamalAESEngine.getPadding(tv.length, paddedSize)); + baos.write(ElGamalAESEngine.getPadding(_context, tv.length, paddedSize)); } catch (IOException ioe) { _log.error("Error writing data", ioe); return null; @@ -85,7 +79,7 @@ public class AESEngine { return null; } ByteArrayInputStream bais = new ByteArrayInputStream(decr); - Hash h = SHA256Generator.getInstance().calculateHash(sessionKey.getData()); + Hash h = _context.sha().calculateHash(sessionKey.getData()); try { Hash rh = new Hash(); rh.readBytes(bais); @@ -127,20 +121,21 @@ public class AESEngine { } public static void main(String args[]) { - SessionKey key = KeyGenerator.getInstance().generateSessionKey(); + I2PAppContext ctx = new I2PAppContext(); + SessionKey key = ctx.keyGenerator().generateSessionKey(); byte iv[] = new byte[16]; RandomSource.getInstance().nextBytes(iv); byte sbuf[] = new byte[16]; RandomSource.getInstance().nextBytes(sbuf); - byte se[] = AESEngine.getInstance().encrypt(sbuf, key, iv); - byte sd[] = AESEngine.getInstance().decrypt(se, key, iv); - _log.debug("Short test: " + DataHelper.eq(sd, sbuf)); + byte se[] = ctx.AESEngine().encrypt(sbuf, key, iv); + byte sd[] = ctx.AESEngine().decrypt(se, key, iv); + ctx.logManager().getLog(AESEngine.class).debug("Short test: " + DataHelper.eq(sd, sbuf)); byte lbuf[] = new byte[1024]; RandomSource.getInstance().nextBytes(sbuf); - byte le[] = AESEngine.getInstance().safeEncrypt(lbuf, key, iv, 2048); - byte ld[] = AESEngine.getInstance().safeDecrypt(le, key, iv); - _log.debug("Long test: " + DataHelper.eq(ld, lbuf)); + byte le[] = ctx.AESEngine().safeEncrypt(lbuf, key, iv, 2048); + byte ld[] = ctx.AESEngine().safeDecrypt(le, key, iv); + ctx.logManager().getLog(AESEngine.class).debug("Long test: " + DataHelper.eq(ld, lbuf)); } } \ No newline at end of file diff --git a/core/java/src/net/i2p/crypto/AESInputStream.java b/core/java/src/net/i2p/crypto/AESInputStream.java index 008b4576d..ad2cb4c01 100644 --- a/core/java/src/net/i2p/crypto/AESInputStream.java +++ b/core/java/src/net/i2p/crypto/AESInputStream.java @@ -24,6 +24,7 @@ import net.i2p.data.SessionKey; import net.i2p.util.Clock; import net.i2p.util.Log; import net.i2p.util.RandomSource; +import net.i2p.I2PAppContext; /** * This reads an underlying stream as written by AESOutputStream - AES256 encrypted @@ -36,8 +37,8 @@ import net.i2p.util.RandomSource; * */ public class AESInputStream extends FilterInputStream { - private final static Log _log = new Log(AESInputStream.class); - private final static CryptixAESEngine _engine = new CryptixAESEngine(); + private Log _log; + private I2PAppContext _context; private SessionKey _key; private byte[] _lastBlock; private boolean _eofFound; @@ -52,8 +53,10 @@ public class AESInputStream extends FilterInputStream { private final static int READ_SIZE = BLOCK_SIZE; private final static int DECRYPT_SIZE = BLOCK_SIZE - 1; - public AESInputStream(InputStream source, SessionKey key, byte iv[]) { + public AESInputStream(I2PAppContext context, InputStream source, SessionKey key, byte iv[]) { super(source); + _context = context; + _log = context.logManager().getLog(AESInputStream.class); _key = key; _lastBlock = new byte[BLOCK_SIZE]; System.arraycopy(iv, 0, _lastBlock, 0, BLOCK_SIZE); @@ -223,8 +226,8 @@ public class AESInputStream extends FilterInputStream { byte block[] = new byte[BLOCK_SIZE]; for (int i = 0; i < numBlocks; i++) { System.arraycopy(encrypted, i * BLOCK_SIZE, block, 0, BLOCK_SIZE); - byte decrypted[] = _engine.decrypt(block, _key, _lastBlock); - byte data[] = CryptixAESEngine.xor(decrypted, _lastBlock); + byte decrypted[] = _context.AESEngine().decrypt(block, _key, _lastBlock); + byte data[] = DataHelper.xor(decrypted, _lastBlock); int cleaned[] = stripPadding(data); for (int j = 0; j < cleaned.length; j++) { if (((int) cleaned[j]) <= 0) { @@ -297,6 +300,8 @@ public class AESInputStream extends FilterInputStream { * Test AESOutputStream/AESInputStream */ public static void main(String args[]) { + I2PAppContext ctx = new I2PAppContext(); + Log log = ctx.logManager().getLog(AESInputStream.class); byte orig[] = new byte[1024 * 32]; RandomSource.getInstance().nextBytes(orig); //byte orig[] = "you are my sunshine, my only sunshine".getBytes(); @@ -304,40 +309,40 @@ public class AESInputStream extends FilterInputStream { byte iv[] = "there once was a".getBytes(); for (int i = 0; i < 20; i++) { - runTest(orig, key, iv); + runTest(ctx, orig, key, iv); } - _log.info("Done testing 32KB data"); + log.info("Done testing 32KB data"); orig = new byte[20]; RandomSource.getInstance().nextBytes(orig); for (int i = 0; i < 20; i++) { - runTest(orig, key, iv); + runTest(ctx, orig, key, iv); } - _log.info("Done testing 20 byte data"); + log.info("Done testing 20 byte data"); orig = new byte[3]; RandomSource.getInstance().nextBytes(orig); for (int i = 0; i < 20; i++) { - runTest(orig, key, iv); + runTest(ctx, orig, key, iv); } - _log.info("Done testing 3 byte data"); + log.info("Done testing 3 byte data"); orig = new byte[0]; RandomSource.getInstance().nextBytes(orig); for (int i = 0; i < 20; i++) { - runTest(orig, key, iv); + runTest(ctx, orig, key, iv); } - _log.info("Done testing 0 byte data"); + log.info("Done testing 0 byte data"); orig = new byte[32]; RandomSource.getInstance().nextBytes(orig); - runOffsetTest(orig, key, iv); + runOffsetTest(ctx, orig, key, iv); - _log.info("Done testing offset test (it should have come back with a statement NOT EQUAL!)"); + log.info("Done testing offset test (it should have come back with a statement NOT EQUAL!)"); try { Thread.sleep(30 * 1000); @@ -345,11 +350,12 @@ public class AESInputStream extends FilterInputStream { } } - private static void runTest(byte orig[], SessionKey key, byte[] iv) { + private static void runTest(I2PAppContext ctx, byte orig[], SessionKey key, byte[] iv) { + Log log = ctx.logManager().getLog(AESInputStream.class); try { long start = Clock.getInstance().now(); ByteArrayOutputStream origStream = new ByteArrayOutputStream(512); - AESOutputStream out = new AESOutputStream(origStream, key, iv); + AESOutputStream out = new AESOutputStream(ctx, origStream, key, iv); out.write(orig); out.close(); @@ -357,7 +363,7 @@ public class AESInputStream extends FilterInputStream { long endE = Clock.getInstance().now(); ByteArrayInputStream encryptedStream = new ByteArrayInputStream(encrypted); - AESInputStream in = new AESInputStream(encryptedStream, key, iv); + AESInputStream in = new AESInputStream(ctx, encryptedStream, key, iv); ByteArrayOutputStream baos = new ByteArrayOutputStream(512); byte buf[] = new byte[1024 * 32]; int read = DataHelper.read(in, buf); @@ -370,65 +376,66 @@ public class AESInputStream extends FilterInputStream { Hash newHash = SHA256Generator.getInstance().calculateHash(fin); boolean eq = origHash.equals(newHash); if (eq) - _log.info("Equal hashes. hash: " + origHash); + log.info("Equal hashes. hash: " + origHash); else - _log.error("NOT EQUAL! \norig: \t" + Base64.encode(orig) + "\nnew : \t" + Base64.encode(fin)); + log.error("NOT EQUAL! \norig: \t" + Base64.encode(orig) + "\nnew : \t" + Base64.encode(fin)); boolean ok = DataHelper.eq(orig, fin); - _log.debug("EQ data? " + ok + " origLen: " + orig.length + " fin.length: " + fin.length); - _log.debug("Time to D(E(" + orig.length + ")): " + (end - start) + "ms"); - _log.debug("Time to E(" + orig.length + "): " + (endE - start) + "ms"); - _log.debug("Time to D(" + orig.length + "): " + (end - endE) + "ms"); + log.debug("EQ data? " + ok + " origLen: " + orig.length + " fin.length: " + fin.length); + log.debug("Time to D(E(" + orig.length + ")): " + (end - start) + "ms"); + log.debug("Time to E(" + orig.length + "): " + (endE - start) + "ms"); + log.debug("Time to D(" + orig.length + "): " + (end - endE) + "ms"); } catch (Throwable t) { - _log.error("ERROR transferring", t); + log.error("ERROR transferring", t); } //try { Thread.sleep(5000); } catch (Throwable t) {} } - private static void runOffsetTest(byte orig[], SessionKey key, byte[] iv) { + private static void runOffsetTest(I2PAppContext ctx, byte orig[], SessionKey key, byte[] iv) { + Log log = ctx.logManager().getLog(AESInputStream.class); try { long start = Clock.getInstance().now(); ByteArrayOutputStream origStream = new ByteArrayOutputStream(512); - AESOutputStream out = new AESOutputStream(origStream, key, iv); + AESOutputStream out = new AESOutputStream(ctx, origStream, key, iv); out.write(orig); out.close(); byte encrypted[] = origStream.toByteArray(); long endE = Clock.getInstance().now(); - _log.info("Encrypted segment length: " + encrypted.length); + log.info("Encrypted segment length: " + encrypted.length); byte encryptedSegment[] = new byte[40]; System.arraycopy(encrypted, 0, encryptedSegment, 0, 40); ByteArrayInputStream encryptedStream = new ByteArrayInputStream(encryptedSegment); - AESInputStream in = new AESInputStream(encryptedStream, key, iv); + AESInputStream in = new AESInputStream(ctx, encryptedStream, key, iv); ByteArrayOutputStream baos = new ByteArrayOutputStream(512); byte buf[] = new byte[1024 * 32]; int read = DataHelper.read(in, buf); int remaining = in.remainingBytes(); int readyBytes = in.readyBytes(); - _log.info("Read: " + read); + log.info("Read: " + read); if (read > 0) baos.write(buf, 0, read); in.close(); byte fin[] = baos.toByteArray(); - _log.info("fin.length: " + fin.length + " remaining: " + remaining + " ready: " + readyBytes); + log.info("fin.length: " + fin.length + " remaining: " + remaining + " ready: " + readyBytes); long end = Clock.getInstance().now(); Hash origHash = SHA256Generator.getInstance().calculateHash(orig); Hash newHash = SHA256Generator.getInstance().calculateHash(fin); boolean eq = origHash.equals(newHash); if (eq) - _log.info("Equal hashes. hash: " + origHash); + log.info("Equal hashes. hash: " + origHash); else - _log.error("NOT EQUAL! \norig: \t" + Base64.encode(orig) + "\nnew : \t" + Base64.encode(fin)); + log.error("NOT EQUAL! \norig: \t" + Base64.encode(orig) + "\nnew : \t" + Base64.encode(fin)); boolean ok = DataHelper.eq(orig, fin); - _log.debug("EQ data? " + ok + " origLen: " + orig.length + " fin.length: " + fin.length); - _log.debug("Time to D(E(" + orig.length + ")): " + (end - start) + "ms"); - _log.debug("Time to E(" + orig.length + "): " + (endE - start) + "ms"); - _log.debug("Time to D(" + orig.length + "): " + (end - endE) + "ms"); + log.debug("EQ data? " + ok + " origLen: " + orig.length + " fin.length: " + fin.length); + log.debug("Time to D(E(" + orig.length + ")): " + (end - start) + "ms"); + log.debug("Time to E(" + orig.length + "): " + (endE - start) + "ms"); + log.debug("Time to D(" + orig.length + "): " + (end - endE) + "ms"); } catch (Throwable t) { - _log.error("ERROR transferring", t); + log.error("ERROR transferring", t); } //try { Thread.sleep(5000); } catch (Throwable t) {} } diff --git a/core/java/src/net/i2p/crypto/AESOutputStream.java b/core/java/src/net/i2p/crypto/AESOutputStream.java index c8d3c9d92..c1413c7e2 100644 --- a/core/java/src/net/i2p/crypto/AESOutputStream.java +++ b/core/java/src/net/i2p/crypto/AESOutputStream.java @@ -16,7 +16,9 @@ import java.io.OutputStream; import java.util.Arrays; import net.i2p.data.SessionKey; +import net.i2p.data.DataHelper; import net.i2p.util.Log; +import net.i2p.I2PAppContext; /** * This writes everything as CBC with PKCS#5 padding, but each block is padded @@ -28,8 +30,8 @@ import net.i2p.util.Log; * */ public class AESOutputStream extends FilterOutputStream { - private final static CryptixAESEngine _engine = new CryptixAESEngine(); - private final static Log _log = new Log(AESOutputStream.class); + private Log _log; + private I2PAppContext _context; private SessionKey _key; private byte[] _lastBlock; private ByteArrayOutputStream _inBuf; @@ -42,8 +44,10 @@ public class AESOutputStream extends FilterOutputStream { private final static int BLOCK_SIZE = CryptixRijndael_Algorithm._BLOCK_SIZE; private final static int MAX_BUF = 256; - public AESOutputStream(OutputStream source, SessionKey key, byte[] iv) { + public AESOutputStream(I2PAppContext context, OutputStream source, SessionKey key, byte[] iv) { super(source); + _context = context; + _log = context.logManager().getLog(AESOutputStream.class); _key = key; _lastBlock = new byte[BLOCK_SIZE]; System.arraycopy(iv, 0, _lastBlock, 0, BLOCK_SIZE); @@ -104,8 +108,8 @@ public class AESOutputStream extends FilterOutputStream { block[BLOCK_SIZE - 1] = 0x01; // the padding byte for "full" blocks for (int i = 0; i < numBlocks; i++) { System.arraycopy(src, i * 15, block, 0, 15); - byte data[] = _engine.xor(block, _lastBlock); - byte encrypted[] = _engine.encrypt(data, _key, _lastBlock); + byte data[] = DataHelper.xor(block, _lastBlock); + byte encrypted[] = _context.AESEngine().encrypt(data, _key, _lastBlock); _cumulativeWritten += encrypted.length; out.write(encrypted); System.arraycopy(encrypted, encrypted.length - BLOCK_SIZE, _lastBlock, 0, BLOCK_SIZE); @@ -118,8 +122,8 @@ public class AESOutputStream extends FilterOutputStream { int paddingBytes = BLOCK_SIZE - remainingBytes; System.arraycopy(src, numBlocks * 15, block, 0, remainingBytes); Arrays.fill(block, remainingBytes, BLOCK_SIZE, (byte) paddingBytes); - byte data[] = _engine.xor(block, _lastBlock); - byte encrypted[] = _engine.encrypt(data, _key, _lastBlock); + byte data[] = DataHelper.xor(block, _lastBlock); + byte encrypted[] = _context.AESEngine().encrypt(data, _key, _lastBlock); out.write(encrypted); System.arraycopy(encrypted, encrypted.length - BLOCK_SIZE, _lastBlock, 0, BLOCK_SIZE); _cumulativePadding += paddingBytes; diff --git a/core/java/src/net/i2p/crypto/CryptixAESEngine.java b/core/java/src/net/i2p/crypto/CryptixAESEngine.java index cd0337ed5..397f67c32 100644 --- a/core/java/src/net/i2p/crypto/CryptixAESEngine.java +++ b/core/java/src/net/i2p/crypto/CryptixAESEngine.java @@ -13,6 +13,7 @@ import java.security.InvalidKeyException; import net.i2p.data.SessionKey; import net.i2p.util.Log; +import net.i2p.I2PAppContext; /** * Wrapper for AES cypher operation using Cryptix's Rijndael implementation. Implements @@ -23,10 +24,15 @@ import net.i2p.util.Log; * @author jrandom, thecrypto */ public class CryptixAESEngine extends AESEngine { - private final static Log _log = new Log(CryptixAESEngine.class); + private Log _log; private final static CryptixRijndael_Algorithm _algo = new CryptixRijndael_Algorithm(); private final static boolean USE_FAKE_CRYPTO = false; private final static byte FAKE_KEY = 0x2A; + + public CryptixAESEngine(I2PAppContext context) { + super(context); + _log = context.logManager().getLog(CryptixAESEngine.class); + } public byte[] encrypt(byte payload[], SessionKey sessionKey, byte initializationVector[]) { if ((initializationVector == null) || (payload == null) || (payload.length <= 0) || (sessionKey == null) @@ -116,7 +122,7 @@ public class CryptixAESEngine extends AESEngine { * @param sessionKey private esession key to encrypt to * @return encrypted data */ - final static byte[] encrypt(byte payload[], SessionKey sessionKey) { + final byte[] encrypt(byte payload[], SessionKey sessionKey) { try { Object key = CryptixRijndael_Algorithm.makeKey(sessionKey.getData(), 16); byte rv[] = new byte[payload.length]; @@ -133,7 +139,7 @@ public class CryptixAESEngine extends AESEngine { * @param sessionKey private session key * @return unencrypted data */ - final static byte[] decrypt(byte payload[], SessionKey sessionKey) { + final byte[] decrypt(byte payload[], SessionKey sessionKey) { try { Object key = CryptixRijndael_Algorithm.makeKey(sessionKey.getData(), 16); byte rv[] = new byte[payload.length]; diff --git a/core/java/src/net/i2p/crypto/DHSessionKeyBuilder.java b/core/java/src/net/i2p/crypto/DHSessionKeyBuilder.java index 52d95285a..4187348aa 100644 --- a/core/java/src/net/i2p/crypto/DHSessionKeyBuilder.java +++ b/core/java/src/net/i2p/crypto/DHSessionKeyBuilder.java @@ -20,6 +20,7 @@ import net.i2p.util.I2PThread; import net.i2p.util.Log; import net.i2p.util.NativeBigInteger; import net.i2p.util.RandomSource; +import net.i2p.I2PAppContext; /** * Generate a new session key through a diffie hellman exchange. This uses the @@ -62,22 +63,23 @@ public class DHSessionKeyBuilder { public final static String DEFAULT_DH_PRECALC_DELAY = "1000"; static { + I2PAppContext ctx = I2PAppContext.getGlobalContext(); try { - int val = Integer.parseInt(System.getProperty(PROP_DH_PRECALC_MIN, DEFAULT_DH_PRECALC_MIN)); + int val = Integer.parseInt(ctx.getProperty(PROP_DH_PRECALC_MIN, DEFAULT_DH_PRECALC_MIN)); MIN_NUM_BUILDERS = val; } catch (Throwable t) { int val = Integer.parseInt(DEFAULT_DH_PRECALC_MIN); MIN_NUM_BUILDERS = val; } try { - int val = Integer.parseInt(System.getProperty(PROP_DH_PRECALC_MAX, DEFAULT_DH_PRECALC_MAX)); + int val = Integer.parseInt(ctx.getProperty(PROP_DH_PRECALC_MAX, DEFAULT_DH_PRECALC_MAX)); MAX_NUM_BUILDERS = val; } catch (Throwable t) { int val = Integer.parseInt(DEFAULT_DH_PRECALC_MAX); MAX_NUM_BUILDERS = val; } try { - int val = Integer.parseInt(System.getProperty(PROP_DH_PRECALC_DELAY, DEFAULT_DH_PRECALC_DELAY)); + int val = Integer.parseInt(ctx.getProperty(PROP_DH_PRECALC_DELAY, DEFAULT_DH_PRECALC_DELAY)); CALC_DELAY = val; } catch (Throwable t) { int val = Integer.parseInt(DEFAULT_DH_PRECALC_DELAY); @@ -266,6 +268,7 @@ public class DHSessionKeyBuilder { Thread.sleep(20 * 1000); } catch (InterruptedException ie) { } + I2PAppContext ctx = new I2PAppContext(); _log.debug("\n\n\n\nBegin test\n"); long negTime = 0; for (int i = 0; i < 5; i++) { @@ -289,8 +292,8 @@ public class DHSessionKeyBuilder { byte iv[] = new byte[16]; RandomSource.getInstance().nextBytes(iv); String origVal = "1234567890123456"; // 16 bytes max using AESEngine - byte enc[] = AESEngine.getInstance().encrypt(origVal.getBytes(), key1, iv); - byte dec[] = AESEngine.getInstance().decrypt(enc, key2, iv); + byte enc[] = ctx.AESEngine().encrypt(origVal.getBytes(), key1, iv); + byte dec[] = ctx.AESEngine().decrypt(enc, key2, iv); String tranVal = new String(dec); if (origVal.equals(tranVal)) _log.debug("**Success: D(E(val)) == val"); diff --git a/core/java/src/net/i2p/crypto/DSAEngine.java b/core/java/src/net/i2p/crypto/DSAEngine.java index c19a28cc7..331edefb4 100644 --- a/core/java/src/net/i2p/crypto/DSAEngine.java +++ b/core/java/src/net/i2p/crypto/DSAEngine.java @@ -39,17 +39,22 @@ import net.i2p.util.Clock; import net.i2p.util.Log; import net.i2p.util.NativeBigInteger; import net.i2p.util.RandomSource; +import net.i2p.I2PAppContext; public class DSAEngine { - private final static Log _log = new Log(DSAEngine.class); - private static DSAEngine _instance = new DSAEngine(); + private Log _log; + private I2PAppContext _context; - public static DSAEngine getInstance() { - return _instance; + public DSAEngine(I2PAppContext context) { + _log = context.logManager().getLog(DSAEngine.class); + _context = context; } - + public static DSAEngine getInstance() { + return I2PAppContext.getGlobalContext().dsa(); + } + public boolean verifySignature(Signature signature, byte signedData[], SigningPublicKey verifyingKey) { - long start = Clock.getInstance().now(); + long start = _context.clock().now(); byte[] sigbytes = signature.getData(); byte rbytes[] = new byte[20]; @@ -65,22 +70,20 @@ public class DSAEngine { BigInteger r = new NativeBigInteger(1, rbytes); BigInteger y = new NativeBigInteger(1, verifyingKey.getData()); BigInteger w = s.modInverse(CryptoConstants.dsaq); - BigInteger u1 = ((new NativeBigInteger(1, calculateHash(signedData).getData())).multiply(w)) - .mod(CryptoConstants.dsaq); + byte data[] = calculateHash(signedData).getData(); + NativeBigInteger bi = new NativeBigInteger(1, data); + BigInteger u1 = bi.multiply(w).mod(CryptoConstants.dsaq); BigInteger u2 = r.multiply(w).mod(CryptoConstants.dsaq); - BigInteger v = ((CryptoConstants.dsag.modPow(u1, CryptoConstants.dsap)) - .multiply(y.modPow(u2, - CryptoConstants.dsap))) - .mod( - CryptoConstants.dsap) - .mod( - CryptoConstants.dsaq); + BigInteger modval = CryptoConstants.dsag.modPow(u1, CryptoConstants.dsap); + BigInteger modmulval = modval.multiply(y.modPow(u2,CryptoConstants.dsap)); + BigInteger v = (modmulval).mod(CryptoConstants.dsap).mod(CryptoConstants.dsaq); boolean ok = v.compareTo(r) == 0; - long diff = Clock.getInstance().now() - start; + long diff = _context.clock().now() - start; if (diff > 1000) { - if (_log.shouldLog(Log.WARN)) _log.warn("Took too long to verify the signature (" + diff + "ms)"); + if (_log.shouldLog(Log.WARN)) + _log.warn("Took too long to verify the signature (" + diff + "ms)"); } return ok; @@ -88,13 +91,13 @@ public class DSAEngine { public Signature sign(byte data[], SigningPrivateKey signingKey) { if ((signingKey == null) || (data == null) || (data.length <= 0)) return null; - long start = Clock.getInstance().now(); + long start = _context.clock().now(); Signature sig = new Signature(); BigInteger k; do { - k = new BigInteger(160, RandomSource.getInstance()); + k = new BigInteger(160, _context.random()); } while (k.compareTo(CryptoConstants.dsaq) != 1); BigInteger r = CryptoConstants.dsag.modPow(k, CryptoConstants.dsap).mod(CryptoConstants.dsaq); @@ -139,7 +142,7 @@ public class DSAEngine { } sig.setData(out); - long diff = Clock.getInstance().now() - start; + long diff = _context.clock().now() - start; if (diff > 1000) { if (_log.shouldLog(Log.WARN)) _log.warn("Took too long to sign (" + diff + "ms)"); } diff --git a/core/java/src/net/i2p/crypto/DummyElGamalEngine.java b/core/java/src/net/i2p/crypto/DummyElGamalEngine.java index d86428ded..8a6376a50 100644 --- a/core/java/src/net/i2p/crypto/DummyElGamalEngine.java +++ b/core/java/src/net/i2p/crypto/DummyElGamalEngine.java @@ -17,6 +17,7 @@ import net.i2p.data.Hash; import net.i2p.data.PrivateKey; import net.i2p.data.PublicKey; import net.i2p.util.Log; +import net.i2p.I2PAppContext; /** * Fake ElG E and D, useful for when performance isn't being tested @@ -26,11 +27,19 @@ import net.i2p.util.Log; public class DummyElGamalEngine extends ElGamalEngine { private final static Log _log = new Log(DummyElGamalEngine.class); - public DummyElGamalEngine() { + /** + * The ElGamal engine should only be constructed and accessed through the + * application context. This constructor should only be used by the + * appropriate application context itself. + * + */ + public DummyElGamalEngine(I2PAppContext context) { + super(context); _log.log(Log.CRIT, "Dummy ElGamal engine in use! NO DATA SECURITY. Danger Will Robinson, Danger!", new Exception("I really hope you know what you're doing")); } - + private DummyElGamalEngine() { super(null); } + /** encrypt the data to the public key * @return encrypted data * @param publicKey public key encrypt to diff --git a/core/java/src/net/i2p/crypto/ElGamalAESEngine.java b/core/java/src/net/i2p/crypto/ElGamalAESEngine.java index 1e9e54fa0..08d455cc5 100644 --- a/core/java/src/net/i2p/crypto/ElGamalAESEngine.java +++ b/core/java/src/net/i2p/crypto/ElGamalAESEngine.java @@ -29,6 +29,7 @@ import net.i2p.stat.StatManager; import net.i2p.util.Clock; import net.i2p.util.Log; import net.i2p.util.RandomSource; +import net.i2p.I2PAppContext; /** * Handles the actual ElGamal+AES encryption and decryption scenarios using the @@ -37,28 +38,27 @@ import net.i2p.util.RandomSource; public class ElGamalAESEngine { private final static Log _log = new Log(ElGamalAESEngine.class); private final static int MIN_ENCRYPTED_SIZE = 80; // smallest possible resulting size + private I2PAppContext _context; - static { - StatManager.getInstance() - .createFrequencyStat("crypto.elGamalAES.encryptNewSession", - "how frequently we encrypt to a new ElGamal/AES+SessionTag session?", - "Encryption", new long[] { 60 * 1000l, 60 * 60 * 1000l, 24 * 60 * 60 * 1000l}); - StatManager.getInstance() - .createFrequencyStat("crypto.elGamalAES.encryptExistingSession", - "how frequently we encrypt to an existing ElGamal/AES+SessionTag session?", - "Encryption", new long[] { 60 * 1000l, 60 * 60 * 1000l, 24 * 60 * 60 * 1000l}); - StatManager.getInstance() - .createFrequencyStat("crypto.elGamalAES.decryptNewSession", - "how frequently we decrypt with a new ElGamal/AES+SessionTag session?", - "Encryption", new long[] { 60 * 1000l, 60 * 60 * 1000l, 24 * 60 * 60 * 1000l}); - StatManager.getInstance() - .createFrequencyStat("crypto.elGamalAES.decryptExistingSession", - "how frequently we decrypt with an existing ElGamal/AES+SessionTag session?", - "Encryption", new long[] { 60 * 1000l, 60 * 60 * 1000l, 24 * 60 * 60 * 1000l}); - StatManager.getInstance() - .createFrequencyStat("crypto.elGamalAES.decryptFail", - "how frequently we fail to decrypt with ElGamal/AES+SessionTag?", "Encryption", - new long[] { 60 * 60 * 1000l, 24 * 60 * 60 * 1000l}); + private ElGamalAESEngine() {} + public ElGamalAESEngine(I2PAppContext ctx) { + _context = ctx; + + _context.statManager().createFrequencyStat("crypto.elGamalAES.encryptNewSession", + "how frequently we encrypt to a new ElGamal/AES+SessionTag session?", + "Encryption", new long[] { 60*1000l, 60*60*1000l, 24*60*60*1000l}); + _context.statManager().createFrequencyStat("crypto.elGamalAES.encryptExistingSession", + "how frequently we encrypt to an existing ElGamal/AES+SessionTag session?", + "Encryption", new long[] { 60 * 1000l, 60 * 60 * 1000l, 24 * 60 * 60 * 1000l}); + _context.statManager().createFrequencyStat("crypto.elGamalAES.decryptNewSession", + "how frequently we decrypt with a new ElGamal/AES+SessionTag session?", + "Encryption", new long[] { 60 * 1000l, 60 * 60 * 1000l, 24 * 60 * 60 * 1000l}); + _context.statManager().createFrequencyStat("crypto.elGamalAES.decryptExistingSession", + "how frequently we decrypt with an existing ElGamal/AES+SessionTag session?", + "Encryption", new long[] { 60 * 1000l, 60 * 60 * 1000l, 24 * 60 * 60 * 1000l}); + _context.statManager().createFrequencyStat("crypto.elGamalAES.decryptFail", + "how frequently we fail to decrypt with ElGamal/AES+SessionTag?", "Encryption", + new long[] { 60 * 60 * 1000l, 24 * 60 * 60 * 1000l}); } /** @@ -66,7 +66,7 @@ public class ElGamalAESEngine { * ElGamal+AES algorithm in the data structure spec. * */ - public static byte[] decrypt(byte data[], PrivateKey targetPrivateKey) throws DataFormatException { + public byte[] decrypt(byte data[], PrivateKey targetPrivateKey) throws DataFormatException { if (data == null) { if (_log.shouldLog(Log.WARN)) _log.warn("Null data being decrypted?"); return null; @@ -79,7 +79,7 @@ public class ElGamalAESEngine { byte tag[] = new byte[32]; System.arraycopy(data, 0, tag, 0, tag.length); SessionTag st = new SessionTag(tag); - SessionKey key = SessionKeyManager.getInstance().consumeTag(st); + SessionKey key = _context.sessionKeyManager().consumeTag(st); SessionKey foundKey = new SessionKey(); foundKey.setData(null); SessionKey usedKey = new SessionKey(); @@ -90,16 +90,16 @@ public class ElGamalAESEngine { usedKey.setData(key.getData()); decrypted = decryptExistingSession(data, key, targetPrivateKey, foundTags, usedKey, foundKey); if (decrypted != null) - StatManager.getInstance().updateFrequency("crypto.elGamalAES.decryptExistingSession"); + _context.statManager().updateFrequency("crypto.elGamalAES.decryptExistingSession"); else - StatManager.getInstance().updateFrequency("crypto.elGamalAES.decryptFailed"); + _context.statManager().updateFrequency("crypto.elGamalAES.decryptFailed"); } else { if (_log.shouldLog(Log.DEBUG)) _log.debug("Key is NOT known for tag " + st); decrypted = decryptNewSession(data, targetPrivateKey, foundTags, usedKey, foundKey); if (decrypted != null) - StatManager.getInstance().updateFrequency("crypto.elGamalAES.decryptNewSession"); + _context.statManager().updateFrequency("crypto.elGamalAES.decryptNewSession"); else - StatManager.getInstance().updateFrequency("crypto.elGamalAES.decryptFailed"); + _context.statManager().updateFrequency("crypto.elGamalAES.decryptFailed"); } if ((key == null) && (decrypted == null)) { @@ -109,10 +109,10 @@ public class ElGamalAESEngine { if (foundTags.size() > 0) { if (foundKey.getData() != null) { if (_log.shouldLog(Log.DEBUG)) _log.debug("Found key: " + foundKey); - SessionKeyManager.getInstance().tagsReceived(foundKey, foundTags); + _context.sessionKeyManager().tagsReceived(foundKey, foundTags); } else { if (_log.shouldLog(Log.DEBUG)) _log.debug("Used key: " + usedKey); - SessionKeyManager.getInstance().tagsReceived(usedKey, foundTags); + _context.sessionKeyManager().tagsReceived(usedKey, foundTags); } } return decrypted; @@ -132,7 +132,7 @@ public class ElGamalAESEngine { * * @return null if decryption fails */ - static byte[] decryptNewSession(byte data[], PrivateKey targetPrivateKey, Set foundTags, SessionKey usedKey, + byte[] decryptNewSession(byte data[], PrivateKey targetPrivateKey, Set foundTags, SessionKey usedKey, SessionKey foundKey) throws DataFormatException { if (data == null) { if (_log.shouldLog(Log.WARN)) _log.warn("Data is null, unable to decrypt new session"); @@ -147,7 +147,7 @@ public class ElGamalAESEngine { } else { System.arraycopy(data, 0, elgEncr, 514 - data.length, data.length); } - byte elgDecr[] = ElGamalEngine.getInstance().decrypt(elgEncr, targetPrivateKey); + byte elgDecr[] = _context.elGamalEngine().decrypt(elgEncr, targetPrivateKey); if (elgDecr == null) return null; ByteArrayInputStream bais = new ByteArrayInputStream(elgDecr); @@ -170,7 +170,7 @@ public class ElGamalAESEngine { //_log.debug("Pre IV for decryptNewSession: " + DataHelper.toString(preIV, 32)); //_log.debug("SessionKey for decryptNewSession: " + DataHelper.toString(key.getData(), 32)); - Hash ivHash = SHA256Generator.getInstance().calculateHash(preIV); + Hash ivHash = _context.sha().calculateHash(preIV); byte iv[] = new byte[16]; System.arraycopy(ivHash.getData(), 0, iv, 0, 16); @@ -200,13 +200,13 @@ public class ElGamalAESEngine { * @param foundKey session key which may be filled with a new sessionKey found during decryption * */ - static byte[] decryptExistingSession(byte data[], SessionKey key, PrivateKey targetPrivateKey, Set foundTags, + byte[] decryptExistingSession(byte data[], SessionKey key, PrivateKey targetPrivateKey, Set foundTags, SessionKey usedKey, SessionKey foundKey) throws DataFormatException { byte preIV[] = new byte[32]; System.arraycopy(data, 0, preIV, 0, preIV.length); byte encr[] = new byte[data.length - 32]; System.arraycopy(data, 32, encr, 0, encr.length); - Hash ivHash = SHA256Generator.getInstance().calculateHash(preIV); + Hash ivHash = _context.sha().calculateHash(preIV); byte iv[] = new byte[16]; System.arraycopy(ivHash.getData(), 0, iv, 0, 16); @@ -246,12 +246,12 @@ public class ElGamalAESEngine { * @param foundTags set which is filled with any sessionTags found during decryption * @param foundKey session key which may be filled with a new sessionKey found during decryption */ - static byte[] decryptAESBlock(byte encrypted[], SessionKey key, byte iv[], byte sentTag[], Set foundTags, + byte[] decryptAESBlock(byte encrypted[], SessionKey key, byte iv[], byte sentTag[], Set foundTags, SessionKey foundKey) throws DataFormatException { //_log.debug("iv for decryption: " + DataHelper.toString(iv, 16)); //_log.debug("decrypting AES block. encr.length = " + (encrypted == null? -1 : encrypted.length) + " sentTag: " + DataHelper.toString(sentTag, 32)); - byte decrypted[] = AESEngine.getInstance().decrypt(encrypted, key, iv); - Hash h = SHA256Generator.getInstance().calculateHash(decrypted); + byte decrypted[] = _context.AESEngine().decrypt(encrypted, key, iv); + Hash h = _context.sha().calculateHash(decrypted); //_log.debug("Hash of entire aes block after decryption: \n" + DataHelper.toString(h.getData(), 32)); try { SessionKey newKey = null; @@ -289,7 +289,7 @@ public class ElGamalAESEngine { byte unencrData[] = new byte[(int) len]; read = bais.read(unencrData); if (read != unencrData.length) throw new Exception("Invalid size of the data read"); - Hash calcHash = SHA256Generator.getInstance().calculateHash(unencrData); + Hash calcHash = _context.sha().calculateHash(unencrData); if (calcHash.equals(readHash)) { // everything matches. w00t. foundTags.addAll(tags); @@ -317,17 +317,17 @@ public class ElGamalAESEngine { * @param paddedSize minimum size in bytes of the body after padding it (if less than the * body's real size, no bytes are appended but the body is not truncated) */ - public static byte[] encrypt(byte data[], PublicKey target, SessionKey key, Set tagsForDelivery, + public byte[] encrypt(byte data[], PublicKey target, SessionKey key, Set tagsForDelivery, SessionTag currentTag, SessionKey newKey, long paddedSize) { if (currentTag == null) { if (_log.shouldLog(Log.INFO)) _log.info("Current tag is null, encrypting as new session", new Exception("encrypt new")); - StatManager.getInstance().updateFrequency("crypto.elGamalAES.encryptNewSession"); + _context.statManager().updateFrequency("crypto.elGamalAES.encryptNewSession"); return encryptNewSession(data, target, key, tagsForDelivery, newKey, paddedSize); } else { if (_log.shouldLog(Log.INFO)) _log.info("Current tag is NOT null, encrypting as existing session", new Exception("encrypt existing")); - StatManager.getInstance().updateFrequency("crypto.elGamalAES.encryptExistingSession"); + _context.statManager().updateFrequency("crypto.elGamalAES.encryptExistingSession"); return encryptExistingSession(data, target, key, tagsForDelivery, currentTag, newKey, paddedSize); } } @@ -335,7 +335,7 @@ public class ElGamalAESEngine { /** * Encrypt the data to the target using the given key and deliver the specified tags */ - public static byte[] encrypt(byte data[], PublicKey target, SessionKey key, Set tagsForDelivery, + public byte[] encrypt(byte data[], PublicKey target, SessionKey key, Set tagsForDelivery, SessionTag currentTag, long paddedSize) { return encrypt(data, target, key, tagsForDelivery, currentTag, null, paddedSize); } @@ -343,14 +343,14 @@ public class ElGamalAESEngine { /** * Encrypt the data to the target using the given key and deliver the specified tags */ - public static byte[] encrypt(byte data[], PublicKey target, SessionKey key, Set tagsForDelivery, long paddedSize) { + public byte[] encrypt(byte data[], PublicKey target, SessionKey key, Set tagsForDelivery, long paddedSize) { return encrypt(data, target, key, tagsForDelivery, null, null, paddedSize); } /** * Encrypt the data to the target using the given key delivering no tags */ - public static byte[] encrypt(byte data[], PublicKey target, SessionKey key, long paddedSize) { + public byte[] encrypt(byte data[], PublicKey target, SessionKey key, long paddedSize) { return encrypt(data, target, key, null, null, null, paddedSize); } @@ -370,25 +370,25 @@ public class ElGamalAESEngine { * - random bytes, padding the total size to greater than paddedSize with a mod 16 = 0 * */ - static byte[] encryptNewSession(byte data[], PublicKey target, SessionKey key, Set tagsForDelivery, + byte[] encryptNewSession(byte data[], PublicKey target, SessionKey key, Set tagsForDelivery, SessionKey newKey, long paddedSize) { //_log.debug("Encrypting to a NEW session"); try { ByteArrayOutputStream elgSrc = new ByteArrayOutputStream(64); key.writeBytes(elgSrc); byte preIV[] = new byte[32]; - RandomSource.getInstance().nextBytes(preIV); + _context.random().nextBytes(preIV); elgSrc.write(preIV); byte rnd[] = new byte[158]; - RandomSource.getInstance().nextBytes(rnd); + _context.random().nextBytes(rnd); elgSrc.write(rnd); elgSrc.flush(); //_log.debug("Pre IV for encryptNewSession: " + DataHelper.toString(preIV, 32)); //_log.debug("SessionKey for encryptNewSession: " + DataHelper.toString(key.getData(), 32)); - long before = Clock.getInstance().now(); - byte elgEncr[] = ElGamalEngine.getInstance().encrypt(elgSrc.toByteArray(), target); - long after = Clock.getInstance().now(); + long before = _context.clock().now(); + byte elgEncr[] = _context.elGamalEngine().encrypt(elgSrc.toByteArray(), target); + long after = _context.clock().now(); if (_log.shouldLog(Log.INFO)) _log.info("elgEngine.encrypt of the session key took " + (after - before) + "ms"); if (elgEncr.length < 514) { @@ -400,7 +400,7 @@ public class ElGamalAESEngine { } //_log.debug("ElGamal encrypted length: " + elgEncr.length + " elGamal source length: " + elgSrc.toByteArray().length); - Hash ivHash = SHA256Generator.getInstance().calculateHash(preIV); + Hash ivHash = _context.sha().calculateHash(preIV); byte iv[] = new byte[16]; System.arraycopy(ivHash.getData(), 0, iv, 0, 16); byte aesEncr[] = encryptAESBlock(data, key, iv, tagsForDelivery, newKey, paddedSize); @@ -410,7 +410,7 @@ public class ElGamalAESEngine { System.arraycopy(elgEncr, 0, rv, 0, elgEncr.length); System.arraycopy(aesEncr, 0, rv, elgEncr.length, aesEncr.length); //_log.debug("Return length: " + rv.length); - long finish = Clock.getInstance().now(); + long finish = _context.clock().now(); if (_log.shouldLog(Log.DEBUG)) _log.debug("after the elgEngine.encrypt took a total of " + (finish - after) + "ms"); return rv; @@ -436,14 +436,14 @@ public class ElGamalAESEngine { * - random bytes, padding the total size to greater than paddedSize with a mod 16 = 0 * */ - static byte[] encryptExistingSession(byte data[], PublicKey target, SessionKey key, Set tagsForDelivery, + byte[] encryptExistingSession(byte data[], PublicKey target, SessionKey key, Set tagsForDelivery, SessionTag currentTag, SessionKey newKey, long paddedSize) { //_log.debug("Encrypting to an EXISTING session"); byte rawTag[] = currentTag.getData(); //_log.debug("Pre IV for encryptExistingSession (aka tag): " + currentTag.toString()); //_log.debug("SessionKey for encryptNewSession: " + DataHelper.toString(key.getData(), 32)); - Hash ivHash = SHA256Generator.getInstance().calculateHash(rawTag); + Hash ivHash = _context.sha().calculateHash(rawTag); byte iv[] = new byte[16]; System.arraycopy(ivHash.getData(), 0, iv, 0, 16); @@ -469,7 +469,7 @@ public class ElGamalAESEngine { * - random bytes, padding the total size to greater than paddedSize with a mod 16 = 0 * */ - final static byte[] encryptAESBlock(byte data[], SessionKey key, byte[] iv, Set tagsForDelivery, SessionKey newKey, + final byte[] encryptAESBlock(byte data[], SessionKey key, byte[] iv, Set tagsForDelivery, SessionKey newKey, long paddedSize) { //_log.debug("iv for encryption: " + DataHelper.toString(iv, 16)); //_log.debug("Encrypting AES"); @@ -484,7 +484,7 @@ public class ElGamalAESEngine { //_log.debug("# tags created, registered, and written: " + tags.size()); DataHelper.writeLong(aesSrc, 4, data.length); //_log.debug("data length: " + data.length); - Hash hash = SHA256Generator.getInstance().calculateHash(data); + Hash hash = _context.sha().calculateHash(data); hash.writeBytes(aesSrc); //_log.debug("hash of data: " + DataHelper.toString(hash.getData(), 32)); if (newKey == null) { @@ -499,14 +499,14 @@ public class ElGamalAESEngine { aesSrc.write(data); int len = aesSrc.toByteArray().length; //_log.debug("raw data written: " + len); - byte padding[] = getPadding(len, paddedSize); + byte padding[] = getPadding(_context, len, paddedSize); //_log.debug("padding length: " + padding.length); aesSrc.write(padding); byte aesUnencr[] = aesSrc.toByteArray(); - Hash h = SHA256Generator.getInstance().calculateHash(aesUnencr); + Hash h = _context.sha().calculateHash(aesUnencr); //_log.debug("Hash of entire aes block before encryption: (len=" + aesUnencr.length + ")\n" + DataHelper.toString(h.getData(), 32)); - byte aesEncr[] = AESEngine.getInstance().encrypt(aesUnencr, key, iv); + byte aesEncr[] = _context.AESEngine().encrypt(aesUnencr, key, iv); //_log.debug("Encrypted length: " + aesEncr.length); return aesEncr; } catch (IOException ioe) { @@ -523,7 +523,7 @@ public class ElGamalAESEngine { * at least minPaddedSize * */ - final static byte[] getPadding(int curSize, long minPaddedSize) { + final static byte[] getPadding(I2PAppContext context, int curSize, long minPaddedSize) { int diff = 0; if (curSize < minPaddedSize) { diff = (int) minPaddedSize - curSize; @@ -532,7 +532,7 @@ public class ElGamalAESEngine { int numPadding = diff; if (((curSize + diff) % 16) != 0) numPadding += (16 - ((curSize + diff) % 16)); byte rv[] = new byte[numPadding]; - RandomSource.getInstance().nextBytes(rv); + context.random().nextBytes(rv); return rv; } diff --git a/core/java/src/net/i2p/crypto/ElGamalEngine.java b/core/java/src/net/i2p/crypto/ElGamalEngine.java index 03923fa80..f90f9fedc 100644 --- a/core/java/src/net/i2p/crypto/ElGamalEngine.java +++ b/core/java/src/net/i2p/crypto/ElGamalEngine.java @@ -43,6 +43,7 @@ import net.i2p.util.Clock; import net.i2p.util.Log; import net.i2p.util.NativeBigInteger; import net.i2p.util.RandomSource; +import net.i2p.I2PAppContext; /** * Wrapper for ElGamal encryption/signature schemes. @@ -56,25 +57,28 @@ import net.i2p.util.RandomSource; */ public class ElGamalEngine { - private final static Log _log = new Log(ElGamalEngine.class); - private static ElGamalEngine _engine; - static { - if ("off".equals(System.getProperty("i2p.encryption", "on"))) - _engine = new DummyElGamalEngine(); - else - _engine = new ElGamalEngine(); - - StatManager.getInstance().createRateStat("crypto.elGamal.encrypt", - "how long does it take to do a full ElGamal encryption", "Encryption", - new long[] { 60 * 1000, 60 * 60 * 1000, 24 * 60 * 60 * 1000}); - StatManager.getInstance().createRateStat("crypto.elGamal.decrypt", - "how long does it take to do a full ElGamal decryption", "Encryption", - new long[] { 60 * 1000, 60 * 60 * 1000, 24 * 60 * 60 * 1000}); + private Log _log; + private I2PAppContext _context; + + /** + * The ElGamal engine should only be constructed and accessed through the + * application context. This constructor should only be used by the + * appropriate application context itself. + * + */ + public ElGamalEngine(I2PAppContext context) { + context.statManager().createRateStat("crypto.elGamal.encrypt", + "how long does it take to do a full ElGamal encryption", "Encryption", + new long[] { 60 * 1000, 60 * 60 * 1000, 24 * 60 * 60 * 1000}); + context.statManager().createRateStat("crypto.elGamal.decrypt", + "how long does it take to do a full ElGamal decryption", "Encryption", + new long[] { 60 * 1000, 60 * 60 * 1000, 24 * 60 * 60 * 1000}); + _context = context; + _log = context.logManager().getLog(ElGamalEngine.class); } + private ElGamalEngine() {} - public static ElGamalEngine getInstance() { - return _engine; - } + private final static BigInteger _two = new NativeBigInteger(1, new byte[] { 0x02}); private BigInteger[] getNextYK() { @@ -91,12 +95,12 @@ public class ElGamalEngine { throw new IllegalArgumentException("Data to encrypt must be < 223 bytes at the moment"); if (publicKey == null) throw new IllegalArgumentException("Null public key specified"); - long start = Clock.getInstance().now(); + long start = _context.clock().now(); ByteArrayOutputStream baos = new ByteArrayOutputStream(256); try { baos.write(0xFF); - Hash hash = SHA256Generator.getInstance().calculateHash(data); + Hash hash = _context.sha().calculateHash(data); hash.writeBytes(baos); baos.write(data); baos.flush(); @@ -106,25 +110,25 @@ public class ElGamalEngine { } byte d2[] = baos.toByteArray(); - long t0 = Clock.getInstance().now(); + long t0 = _context.clock().now(); BigInteger m = new NativeBigInteger(1, d2); - long t1 = Clock.getInstance().now(); + long t1 = _context.clock().now(); if (m.compareTo(CryptoConstants.elgp) >= 0) throw new IllegalArgumentException("ARGH. Data cannot be larger than the ElGamal prime. FIXME"); - long t2 = Clock.getInstance().now(); + long t2 = _context.clock().now(); BigInteger aalpha = new NativeBigInteger(1, publicKey.getData()); - long t3 = Clock.getInstance().now(); + long t3 = _context.clock().now(); BigInteger yk[] = getNextYK(); BigInteger k = yk[1]; BigInteger y = yk[0]; - long t7 = Clock.getInstance().now(); + long t7 = _context.clock().now(); BigInteger d = aalpha.modPow(k, CryptoConstants.elgp); - long t8 = Clock.getInstance().now(); + long t8 = _context.clock().now(); d = d.multiply(m); - long t9 = Clock.getInstance().now(); + long t9 = _context.clock().now(); d = d.mod(CryptoConstants.elgp); - long t10 = Clock.getInstance().now(); + long t10 = _context.clock().now(); byte[] ybytes = y.toByteArray(); byte[] dbytes = d.toByteArray(); @@ -146,14 +150,14 @@ public class ElGamalEngine { buf.append("8-9: ").append(t9 - t8).append('\n'); buf.append("9-10: ").append(t10 - t9).append('\n'); //_log.debug(buf.toString()); - long end = Clock.getInstance().now(); + long end = _context.clock().now(); long diff = end - start; if (diff > 1000) { if (_log.shouldLog(Log.WARN)) _log.warn("Took too long to encrypt ElGamal block (" + diff + "ms)"); } - StatManager.getInstance().addRateData("crypto.elGamal.encrypt", diff, diff); + _context.statManager().addRateData("crypto.elGamal.encrypt", diff, diff); return out; } @@ -165,7 +169,7 @@ public class ElGamalEngine { public byte[] decrypt(byte encrypted[], PrivateKey privateKey) { if ((encrypted == null) || (encrypted.length > 514)) throw new IllegalArgumentException("Data to decrypt must be <= 514 bytes at the moment"); - long start = Clock.getInstance().now(); + long start = _context.clock().now(); byte[] ybytes = new byte[257]; byte[] dbytes = new byte[257]; @@ -196,10 +200,10 @@ public class ElGamalEngine { return null; } - Hash calcHash = SHA256Generator.getInstance().calculateHash(rv); + Hash calcHash = _context.sha().calculateHash(rv); boolean ok = calcHash.equals(hash); - long end = Clock.getInstance().now(); + long end = _context.clock().now(); long diff = end - start; if (diff > 1000) { @@ -207,7 +211,7 @@ public class ElGamalEngine { _log.warn("Took too long to decrypt and verify ElGamal block (" + diff + "ms)"); } - StatManager.getInstance().addRateData("crypto.elGamal.decrypt", diff, diff); + _context.statManager().addRateData("crypto.elGamal.decrypt", diff, diff); if (ok) { //_log.debug("Hash matches: " + DataHelper.toString(hash.getData(), hash.getData().length)); @@ -236,6 +240,7 @@ public class ElGamalEngine { } RandomSource.getInstance().nextBoolean(); + I2PAppContext context = new I2PAppContext(); System.out.println("Running " + numRuns + " times"); @@ -249,9 +254,9 @@ public class ElGamalEngine { byte buf[] = new byte[128]; RandomSource.getInstance().nextBytes(buf); long startE = Clock.getInstance().now(); - byte encr[] = ElGamalEngine.getInstance().encrypt(buf, pubkey); + byte encr[] = context.elGamalEngine().encrypt(buf, pubkey); long endE = Clock.getInstance().now(); - byte decr[] = ElGamalEngine.getInstance().decrypt(encr, privkey); + byte decr[] = context.elGamalEngine().decrypt(encr, privkey); long endD = Clock.getInstance().now(); eTime += endE - startE; dTime += endD - endE; @@ -259,8 +264,7 @@ public class ElGamalEngine { if (!DataHelper.eq(decr, buf)) { System.out.println("PublicKey : " + DataHelper.toString(pubkey.getData(), pubkey.getData().length)); - System.out.println("PrivateKey : " - + DataHelper.toString(privkey.getData(), privkey.getData().length)); + System.out.println("PrivateKey : " + DataHelper.toString(privkey.getData(), privkey.getData().length)); System.out.println("orig : " + DataHelper.toString(buf, buf.length)); System.out.println("d(e(orig) : " + DataHelper.toString(decr, decr.length)); System.out.println("orig.len : " + buf.length); diff --git a/core/java/src/net/i2p/crypto/HMACSHA256Generator.java b/core/java/src/net/i2p/crypto/HMACSHA256Generator.java index 62bbc1060..c6ff13cf5 100644 --- a/core/java/src/net/i2p/crypto/HMACSHA256Generator.java +++ b/core/java/src/net/i2p/crypto/HMACSHA256Generator.java @@ -3,28 +3,23 @@ package net.i2p.crypto; import net.i2p.data.DataHelper; import net.i2p.data.Hash; import net.i2p.data.SessionKey; +import net.i2p.I2PAppContext; /** * Calculate the HMAC-SHA256 of a key+message. Currently FAKE - returns a stupid * kludgy hash: H(H(key) XOR H(data)). Fix me! * */ -public abstract class HMACSHA256Generator { - private static HMACSHA256Generator _generator = new DummyHMACSHA256Generator(); - +public class HMACSHA256Generator { + public HMACSHA256Generator(I2PAppContext context) {}; public static HMACSHA256Generator getInstance() { - return _generator; + return I2PAppContext.getGlobalContext().hmac(); } - - public abstract Hash calculate(SessionKey key, byte data[]); -} - -/** - * jrandom smells. - * - */ - -class DummyHMACSHA256Generator extends HMACSHA256Generator { + + /** + * This should calculate the HMAC/SHA256, but it DOESNT. Its just a kludge. + * Fix me. + */ public Hash calculate(SessionKey key, byte data[]) { if ((key == null) || (key.getData() == null) || (data == null)) throw new NullPointerException("Null arguments for HMAC"); diff --git a/core/java/src/net/i2p/crypto/KeyGenerator.java b/core/java/src/net/i2p/crypto/KeyGenerator.java index fecb44048..9327242fe 100644 --- a/core/java/src/net/i2p/crypto/KeyGenerator.java +++ b/core/java/src/net/i2p/crypto/KeyGenerator.java @@ -22,18 +22,24 @@ import net.i2p.util.Clock; import net.i2p.util.Log; import net.i2p.util.NativeBigInteger; import net.i2p.util.RandomSource; +import net.i2p.I2PAppContext; /** Define a way of generating asymetrical key pairs as well as symetrical keys * @author jrandom */ public class KeyGenerator { - private final static Log _log = new Log(KeyGenerator.class); - private static final RandomSource _random = RandomSource.getInstance(); - private static KeyGenerator _generator = new KeyGenerator(); + private Log _log; + private I2PAppContext _context; - public static KeyGenerator getInstance() { - return _generator; + public KeyGenerator(I2PAppContext context) { + _log = context.logManager().getLog(KeyGenerator.class); + _context = context; } + public static KeyGenerator getInstance() { + return I2PAppContext.getGlobalContext().keyGenerator(); + } + + /** Generate a private 256 bit session key * @return session key @@ -42,7 +48,7 @@ public class KeyGenerator { // 256bit random # as a session key SessionKey key = new SessionKey(); byte data[] = new byte[SessionKey.KEYSIZE_BYTES]; - _random.nextBytes(data); + _context.random().nextBytes(data); key.setData(data); return key; } @@ -52,7 +58,7 @@ public class KeyGenerator { * @return pair of keys */ public Object[] generatePKIKeypair() { - BigInteger a = new NativeBigInteger(2048, _random); + BigInteger a = new NativeBigInteger(2048, _context.random()); BigInteger aalpha = CryptoConstants.elgg.modPow(a, CryptoConstants.elgp); Object[] keys = new Object[2]; @@ -80,7 +86,7 @@ public class KeyGenerator { // make sure the random key is less than the DSA q do { - x = new NativeBigInteger(160, _random); + x = new NativeBigInteger(160, _context.random()); } while (x.compareTo(CryptoConstants.dsaq) >= 0); BigInteger y = CryptoConstants.dsag.modPow(x, CryptoConstants.dsap); @@ -118,13 +124,14 @@ public class KeyGenerator { byte src[] = new byte[200]; RandomSource.getInstance().nextBytes(src); + I2PAppContext ctx = new I2PAppContext(); long time = 0; for (int i = 0; i < 10; i++) { long start = Clock.getInstance().now(); Object keys[] = KeyGenerator.getInstance().generatePKIKeypair(); long end = Clock.getInstance().now(); - byte ctext[] = ElGamalEngine.getInstance().encrypt(src, (PublicKey) keys[0]); - byte ptext[] = ElGamalEngine.getInstance().decrypt(ctext, (PrivateKey) keys[1]); + byte ctext[] = ctx.elGamalEngine().encrypt(src, (PublicKey) keys[0]); + byte ptext[] = ctx.elGamalEngine().decrypt(ctext, (PrivateKey) keys[1]); time += end - start; if (DataHelper.eq(ptext, src)) log.debug("D(E(data)) == data"); diff --git a/core/java/src/net/i2p/crypto/PersistentSessionKeyManager.java b/core/java/src/net/i2p/crypto/PersistentSessionKeyManager.java index 5c88e9edd..291002683 100644 --- a/core/java/src/net/i2p/crypto/PersistentSessionKeyManager.java +++ b/core/java/src/net/i2p/crypto/PersistentSessionKeyManager.java @@ -27,6 +27,7 @@ import net.i2p.data.PublicKey; import net.i2p.data.SessionKey; import net.i2p.data.SessionTag; import net.i2p.util.Log; +import net.i2p.I2PAppContext; /** * Expose the functionality to allow people to write out and read in the @@ -39,6 +40,19 @@ public class PersistentSessionKeyManager extends TransientSessionKeyManager { private Object _yk = YKGenerator.class; + + /** + * The session key manager should only be constructed and accessed through the + * application context. This constructor should only be used by the + * appropriate application context itself. + * + */ + public PersistentSessionKeyManager(I2PAppContext context) { + super(context); + } + private PersistentSessionKeyManager() { + super(null); + } /** * Write the session key data to the given stream * @@ -146,7 +160,8 @@ public class PersistentSessionKeyManager extends TransientSessionKeyManager { } public static void main(String args[]) { - PersistentSessionKeyManager mgr = new PersistentSessionKeyManager(); + I2PAppContext ctx = new I2PAppContext(); + PersistentSessionKeyManager mgr = (PersistentSessionKeyManager)ctx.sessionKeyManager(); try { mgr.loadState(new FileInputStream("sessionKeys.dat")); String state = mgr.renderStatusHTML(); diff --git a/core/java/src/net/i2p/crypto/SHA256Generator.java b/core/java/src/net/i2p/crypto/SHA256Generator.java index 2ed42a12d..683c5d166 100644 --- a/core/java/src/net/i2p/crypto/SHA256Generator.java +++ b/core/java/src/net/i2p/crypto/SHA256Generator.java @@ -30,6 +30,7 @@ package net.i2p.crypto; */ import net.i2p.data.Hash; +import net.i2p.I2PAppContext; /** Defines a wrapper for SHA-256 operation * @@ -38,10 +39,9 @@ import net.i2p.data.Hash; * @author thecrypto,jrandom */ public class SHA256Generator { - private static SHA256Generator _generator = new SHA256Generator(); - + public SHA256Generator(I2PAppContext context) {}; public static SHA256Generator getInstance() { - return _generator; + return I2PAppContext.getGlobalContext().sha(); } static int[] K = { 0x428a2f98, 0x71374491, 0xb5c0fbcf, 0xe9b5dba5, 0x3956c25b, 0x59f111f1, 0x923f82a4, 0xab1c5ed5, diff --git a/core/java/src/net/i2p/crypto/SessionKeyManager.java b/core/java/src/net/i2p/crypto/SessionKeyManager.java index cdbd91152..ee3073041 100644 --- a/core/java/src/net/i2p/crypto/SessionKeyManager.java +++ b/core/java/src/net/i2p/crypto/SessionKeyManager.java @@ -14,6 +14,7 @@ import java.util.Set; import net.i2p.data.PublicKey; import net.i2p.data.SessionKey; import net.i2p.data.SessionTag; +import net.i2p.I2PAppContext; /** * Manage the session keys and session tags used for encryption and decryption. @@ -23,12 +24,11 @@ import net.i2p.data.SessionTag; * */ public class SessionKeyManager { - private final static SessionKeyManager _instance = new PersistentSessionKeyManager(); // new TransientSessionKeyManager(); // SessionKeyManager(); - - public final static SessionKeyManager getInstance() { - return _instance; - } - + /** session key managers must be created through an app context */ + protected SessionKeyManager(I2PAppContext context) {} + /** see above */ + private SessionKeyManager() {} + /** * Retrieve the session key currently associated with encryption to the target, * or null if a new session key should be generated. diff --git a/core/java/src/net/i2p/crypto/TransientSessionKeyManager.java b/core/java/src/net/i2p/crypto/TransientSessionKeyManager.java index 33109e6d0..72e06fa8a 100644 --- a/core/java/src/net/i2p/crypto/TransientSessionKeyManager.java +++ b/core/java/src/net/i2p/crypto/TransientSessionKeyManager.java @@ -18,6 +18,7 @@ import java.util.List; import java.util.Map; import java.util.Set; +import net.i2p.I2PAppContext; import net.i2p.data.DataHelper; import net.i2p.data.PublicKey; import net.i2p.data.SessionKey; @@ -52,11 +53,18 @@ class TransientSessionKeyManager extends SessionKeyManager { public final static long SESSION_LIFETIME_MAX_MS = SESSION_TAG_DURATION_MS + 5 * 60 * 1000; public final static int MAX_INBOUND_SESSION_TAGS = 100 * 1000; // this will consume at most 3.2M - public TransientSessionKeyManager() { - super(); + /** + * The session key manager should only be constructed and accessed through the + * application context. This constructor should only be used by the + * appropriate application context itself. + * + */ + public TransientSessionKeyManager(I2PAppContext context) { + super(context); _outboundSessions = new HashMap(64); _inboundTagSets = new HashMap(1024); } + private TransientSessionKeyManager() { super(null); } /** TagSet */ protected Set getInboundTagSets() { diff --git a/core/java/src/net/i2p/crypto/YKGenerator.java b/core/java/src/net/i2p/crypto/YKGenerator.java index c97a76e48..d5be31834 100644 --- a/core/java/src/net/i2p/crypto/YKGenerator.java +++ b/core/java/src/net/i2p/crypto/YKGenerator.java @@ -18,6 +18,7 @@ import net.i2p.util.I2PThread; import net.i2p.util.Log; import net.i2p.util.NativeBigInteger; import net.i2p.util.RandomSource; +import net.i2p.I2PAppContext; /** * Precalculate the Y and K for ElGamal encryption operations. @@ -56,22 +57,23 @@ class YKGenerator { private final static long CHECK_DELAY = 30 * 1000; static { + I2PAppContext ctx = I2PAppContext.getGlobalContext(); try { - int val = Integer.parseInt(System.getProperty(PROP_YK_PRECALC_MIN, DEFAULT_YK_PRECALC_MIN)); + int val = Integer.parseInt(ctx.getProperty(PROP_YK_PRECALC_MIN, DEFAULT_YK_PRECALC_MIN)); MIN_NUM_BUILDERS = val; } catch (Throwable t) { int val = Integer.parseInt(DEFAULT_YK_PRECALC_MIN); MIN_NUM_BUILDERS = val; } try { - int val = Integer.parseInt(System.getProperty(PROP_YK_PRECALC_MAX, DEFAULT_YK_PRECALC_MAX)); + int val = Integer.parseInt(ctx.getProperty(PROP_YK_PRECALC_MAX, DEFAULT_YK_PRECALC_MAX)); MAX_NUM_BUILDERS = val; } catch (Throwable t) { int val = Integer.parseInt(DEFAULT_YK_PRECALC_MAX); MAX_NUM_BUILDERS = val; } try { - int val = Integer.parseInt(System.getProperty(PROP_YK_PRECALC_DELAY, DEFAULT_YK_PRECALC_DELAY)); + int val = Integer.parseInt(ctx.getProperty(PROP_YK_PRECALC_DELAY, DEFAULT_YK_PRECALC_DELAY)); CALC_DELAY = val; } catch (Throwable t) { int val = Integer.parseInt(DEFAULT_YK_PRECALC_DELAY); diff --git a/core/java/src/net/i2p/data/DataHelper.java b/core/java/src/net/i2p/data/DataHelper.java index 78dfe0485..8a13ebb09 100644 --- a/core/java/src/net/i2p/data/DataHelper.java +++ b/core/java/src/net/i2p/data/DataHelper.java @@ -35,7 +35,6 @@ import net.i2p.util.OrderedProperties; * @author jrandom */ public class DataHelper { - private final static Log _log = new Log(DataHelper.class); private final static String _equal = "="; // in UTF-8 private final static String _semicolon = ";"; // in UTF-8 @@ -56,7 +55,8 @@ public class DataHelper { * @throws IOException if there is a problem reading the data * @return mapping */ - public static Properties readProperties(InputStream rawStream) throws DataFormatException, IOException { + public static Properties readProperties(InputStream rawStream) + throws DataFormatException, IOException { Properties props = new OrderedProperties(); long size = readLong(rawStream, 2); byte data[] = new byte[(int) size]; @@ -65,24 +65,18 @@ public class DataHelper { ByteArrayInputStream in = new ByteArrayInputStream(data); byte eqBuf[] = _equal.getBytes(); byte semiBuf[] = _semicolon.getBytes(); - try { - while (in.available() > 0) { - String key = readString(in); - read = read(in, eqBuf); - if ((read != eqBuf.length) || (!eq(new String(eqBuf), _equal))) { - _log.debug("Failed eqtest [" + new String(eqBuf) + "]"); - break; - } - String val = readString(in); - read = read(in, semiBuf); - if ((read != semiBuf.length) || (!eq(new String(semiBuf), _semicolon))) { - _log.debug("Failed semitest [" + new String(semiBuf) + "]"); - break; - } - props.put(key, val); + while (in.available() > 0) { + String key = readString(in); + read = read(in, eqBuf); + if ((read != eqBuf.length) || (!eq(new String(eqBuf), _equal))) { + break; } - } catch (IOException ioe) { - _log.warn("Error reading properties", ioe); + String val = readString(in); + read = read(in, semiBuf); + if ((read != semiBuf.length) || (!eq(new String(semiBuf), _semicolon))) { + break; + } + props.put(key, val); } return props; } @@ -96,8 +90,8 @@ public class DataHelper { * @throws DataFormatException if there is not enough valid data to write out * @throws IOException if there is an IO error writing out the data */ - public static void writeProperties(OutputStream rawStream, Properties props) throws DataFormatException, - IOException { + public static void writeProperties(OutputStream rawStream, Properties props) + throws DataFormatException, IOException { OrderedProperties p = new OrderedProperties(); if (props != null) p.putAll(props); ByteArrayOutputStream baos = new ByteArrayOutputStream(32); @@ -204,10 +198,10 @@ public class DataHelper { * @throws IOException if there is an IO error reading the number * @return number */ - public static long readLong(InputStream rawStream, int numBytes) throws DataFormatException, IOException { + public static long readLong(InputStream rawStream, int numBytes) + throws DataFormatException, IOException { if (numBytes > 8) - throw new DataFormatException( - "readLong doesn't currently support reading numbers > 8 bytes [as thats bigger than java's long]"); + throw new DataFormatException("readLong doesn't currently support reading numbers > 8 bytes [as thats bigger than java's long]"); byte data[] = new byte[numBytes]; int num = read(rawStream, data); if (num != numBytes) @@ -225,8 +219,8 @@ public class DataHelper { * @throws DataFormatException if the stream doesn't contain a validly formatted number of that many bytes * @throws IOException if there is an IO error writing to the stream */ - public static void writeLong(OutputStream rawStream, int numBytes, long value) throws DataFormatException, - IOException { + public static void writeLong(OutputStream rawStream, int numBytes, long value) + throws DataFormatException, IOException { UnsignedInteger i = new UnsignedInteger(value); rawStream.write(i.getBytes(numBytes)); } @@ -254,7 +248,8 @@ public class DataHelper { * @throws DataFormatException if the date is not valid * @throws IOException if there is an IO error writing the date */ - public static void writeDate(OutputStream out, Date date) throws DataFormatException, IOException { + public static void writeDate(OutputStream out, Date date) + throws DataFormatException, IOException { if (date == null) writeLong(out, 8, 0L); else @@ -286,7 +281,8 @@ public class DataHelper { * @throws DataFormatException if the string is not valid * @throws IOException if there is an IO error writing the string */ - public static void writeString(OutputStream out, String string) throws DataFormatException, IOException { + public static void writeString(OutputStream out, String string) + throws DataFormatException, IOException { if (string == null) { writeLong(out, 1, 0); } else { @@ -328,7 +324,8 @@ public class DataHelper { * @throws DataFormatException if the boolean is not valid * @throws IOException if there is an IO error writing the boolean */ - public static void writeBoolean(OutputStream out, Boolean bool) throws DataFormatException, IOException { + public static void writeBoolean(OutputStream out, Boolean bool) + throws DataFormatException, IOException { if (bool == null) writeLong(out, 1, 2); else if (Boolean.TRUE.equals(bool)) @@ -353,7 +350,6 @@ public class DataHelper { boolean eq = (((lhs == null) && (rhs == null)) || ((lhs != null) && (lhs.equals(rhs)))); return eq; } catch (ClassCastException cce) { - _log.warn("Error comparing [" + lhs + "] with [" + rhs + "]", cce); return false; } } @@ -542,12 +538,12 @@ public class DataHelper { out.finish(); out.flush(); byte rv[] = baos.toByteArray(); - if (_log.shouldLog(Log.DEBUG)) - _log.debug("Compression of " + orig.length + " into " + rv.length + " (or " + 100.0d - * (((double) orig.length) / ((double) rv.length)) + "% savings)"); + //if (_log.shouldLog(Log.DEBUG)) + // _log.debug("Compression of " + orig.length + " into " + rv.length + " (or " + 100.0d + // * (((double) orig.length) / ((double) rv.length)) + "% savings)"); return rv; } catch (IOException ioe) { - _log.error("Error compressing?!", ioe); + //_log.error("Error compressing?!", ioe); return null; } } @@ -565,12 +561,12 @@ public class DataHelper { baos.write(buf, 0, read); } byte rv[] = baos.toByteArray(); - if (_log.shouldLog(Log.DEBUG)) - _log.debug("Decompression of " + orig.length + " into " + rv.length + " (or " + 100.0d - * (((double) rv.length) / ((double) orig.length)) + "% savings)"); + //if (_log.shouldLog(Log.DEBUG)) + // _log.debug("Decompression of " + orig.length + " into " + rv.length + " (or " + 100.0d + // * (((double) rv.length) / ((double) orig.length)) + "% savings)"); return rv; } catch (IOException ioe) { - _log.error("Error decompressing?", ioe); + //_log.error("Error decompressing?", ioe); return null; } } diff --git a/core/java/src/net/i2p/data/RoutingKeyGenerator.java b/core/java/src/net/i2p/data/RoutingKeyGenerator.java index 5b549e2b8..b0f270c89 100644 --- a/core/java/src/net/i2p/data/RoutingKeyGenerator.java +++ b/core/java/src/net/i2p/data/RoutingKeyGenerator.java @@ -19,6 +19,7 @@ import net.i2p.crypto.SHA256Generator; import net.i2p.util.Clock; import net.i2p.util.Log; import net.i2p.util.RandomSource; +import net.i2p.I2PAppContext; /** * Component to manage the munging of hashes into routing keys - given a hash, @@ -40,12 +41,17 @@ import net.i2p.util.RandomSource; * */ public class RoutingKeyGenerator { - private final static RoutingKeyGenerator _instance = new RoutingKeyGenerator(); + private Log _log; + private I2PAppContext _context; - public static RoutingKeyGenerator getInstance() { - return _instance; + public RoutingKeyGenerator(I2PAppContext context) { + _log = context.logManager().getLog(RoutingKeyGenerator.class); + _context = context; } - private final static Log _log = new Log(RoutingKeyGenerator.class); + public static RoutingKeyGenerator getInstance() { + return I2PAppContext.getGlobalContext().routingKeyGenerator(); + } + private byte _currentModData[]; private final static Calendar _cal = GregorianCalendar.getInstance(TimeZone.getTimeZone("GMT")); @@ -67,7 +73,7 @@ public class RoutingKeyGenerator { public void generateDateBasedModData() { Date today = null; synchronized (_cal) { - _cal.setTime(new Date(Clock.getInstance().now())); + _cal.setTime(new Date(_context.clock().now())); _cal.set(Calendar.HOUR_OF_DAY, 0); _cal.set(Calendar.MINUTE, 0); _cal.set(Calendar.SECOND, 0); diff --git a/core/java/src/net/i2p/stat/SimpleStatDumper.java b/core/java/src/net/i2p/stat/SimpleStatDumper.java index f4cecd395..e53edcaec 100644 --- a/core/java/src/net/i2p/stat/SimpleStatDumper.java +++ b/core/java/src/net/i2p/stat/SimpleStatDumper.java @@ -6,24 +6,25 @@ import java.util.Set; import java.util.TreeSet; import net.i2p.util.Log; +import net.i2p.I2PAppContext; public class SimpleStatDumper { private final static Log _log = new Log(SimpleStatDumper.class); - public static void dumpStats(int logLevel) { + public static void dumpStats(I2PAppContext context, int logLevel) { if (!_log.shouldLog(logLevel)) return; StringBuffer buf = new StringBuffer(4 * 1024); - dumpFrequencies(buf); - dumpRates(buf); + dumpFrequencies(context, buf); + dumpRates(context, buf); _log.log(logLevel, buf.toString()); } - private static void dumpFrequencies(StringBuffer buf) { - Set frequencies = new TreeSet(StatManager.getInstance().getFrequencyNames()); + private static void dumpFrequencies(I2PAppContext ctx, StringBuffer buf) { + Set frequencies = new TreeSet(ctx.statManager().getFrequencyNames()); for (Iterator iter = frequencies.iterator(); iter.hasNext();) { String name = (String) iter.next(); - FrequencyStat freq = StatManager.getInstance().getFrequency(name); + FrequencyStat freq = ctx.statManager().getFrequency(name); buf.append('\n'); buf.append(freq.getGroupName()).append('.').append(freq.getName()).append(": ") .append(freq.getDescription()).append('\n'); @@ -39,11 +40,11 @@ public class SimpleStatDumper { } } - private static void dumpRates(StringBuffer buf) { - Set rates = new TreeSet(StatManager.getInstance().getRateNames()); + private static void dumpRates(I2PAppContext ctx, StringBuffer buf) { + Set rates = new TreeSet(ctx.statManager().getRateNames()); for (Iterator iter = rates.iterator(); iter.hasNext();) { String name = (String) iter.next(); - RateStat rate = StatManager.getInstance().getRate(name); + RateStat rate = ctx.statManager().getRate(name); buf.append('\n'); buf.append(rate.getGroupName()).append('.').append(rate.getName()).append(": ") .append(rate.getDescription()).append('\n'); diff --git a/core/java/src/net/i2p/stat/StatManager.java b/core/java/src/net/i2p/stat/StatManager.java index 1ef3f9c61..72ea06823 100644 --- a/core/java/src/net/i2p/stat/StatManager.java +++ b/core/java/src/net/i2p/stat/StatManager.java @@ -10,6 +10,7 @@ import java.util.TreeMap; import java.util.TreeSet; import net.i2p.util.Log; +import net.i2p.I2PAppContext; /** * Coordinate the management of various frequencies and rates within I2P components, @@ -19,18 +20,23 @@ import net.i2p.util.Log; * */ public class StatManager { - private final static Log _log = new Log(StatManager.class); - private final static StatManager _instance = new StatManager(); + private Log _log; + private I2PAppContext _context; - public final static StatManager getInstance() { - return _instance; - } /** stat name to FrequencyStat */ private Map _frequencyStats; /** stat name to RateStat */ private Map _rateStats; - private StatManager() { + /** + * The stat manager should only be constructed and accessed through the + * application context. This constructor should only be used by the + * appropriate application context itself. + * + */ + public StatManager(I2PAppContext context) { + _log = context.logManager().getLog(StatManager.class); + _context = context; _frequencyStats = Collections.synchronizedMap(new HashMap(128)); _rateStats = Collections.synchronizedMap(new HashMap(128)); } @@ -44,6 +50,7 @@ public class StatManager { * @param periods array of period lengths (in milliseconds) */ public void createFrequencyStat(String name, String description, String group, long periods[]) { + if (_frequencyStats.containsKey(name)) return; _frequencyStats.put(name, new FrequencyStat(name, description, group, periods)); } @@ -56,6 +63,7 @@ public class StatManager { * @param periods array of period lengths (in milliseconds) */ public void createRateStat(String name, String description, String group, long periods[]) { + if (_rateStats.containsKey(name)) return; _rateStats.put(name, new RateStat(name, description, group, periods)); } diff --git a/core/java/src/net/i2p/util/Clock.java b/core/java/src/net/i2p/util/Clock.java index 9cd2ca368..834834fc1 100644 --- a/core/java/src/net/i2p/util/Clock.java +++ b/core/java/src/net/i2p/util/Clock.java @@ -4,6 +4,8 @@ import java.util.HashSet; import java.util.Iterator; import java.util.Set; +import net.i2p.I2PAppContext; + /** * Alternate location for determining the time which takes into account an offset. * This offset will ideally be periodically updated so as to serve as the difference @@ -12,12 +14,20 @@ import java.util.Set; * */ public class Clock { - private final static Log _log = new Log(Clock.class); - private final static Clock _instance = new Clock(); - - public final static Clock getInstance() { - return _instance; + private I2PAppContext _context; + public Clock(I2PAppContext context) { + _context = context; + _offset = 0; + _alreadyChanged = false; + _listeners = new HashSet(64); } + public static Clock getInstance() { + return I2PAppContext.getGlobalContext().clock(); + } + + /** we fetch it on demand to avoid circular dependencies (logging uses the clock) */ + private Log getLog() { return _context.logManager().getLog(Clock.class); } + private volatile long _offset; private boolean _alreadyChanged; private Set _listeners; @@ -27,12 +37,6 @@ public class Clock { /** if the clock skewed changes by less than 1s, ignore the update (so we don't slide all over the place) */ public final static long MIN_OFFSET_CHANGE = 30 * 1000; - private Clock() { - _offset = 0; - _alreadyChanged = false; - _listeners = new HashSet(64); - } - /** * Specify how far away from the "correct" time the computer is - a positive * value means that we are slow, while a negative value means we are fast. @@ -40,18 +44,18 @@ public class Clock { */ public void setOffset(long offsetMs) { if ((offsetMs > MAX_OFFSET) || (offsetMs < 0 - MAX_OFFSET)) { - _log.error("Maximum offset shift exceeded [" + offsetMs + "], NOT HONORING IT"); + getLog().error("Maximum offset shift exceeded [" + offsetMs + "], NOT HONORING IT"); return; } long delta = offsetMs - _offset; if ((delta < MIN_OFFSET_CHANGE) && (delta > 0 - MIN_OFFSET_CHANGE)) { - _log.debug("Not changing offset since it is only " + delta + "ms"); + getLog().debug("Not changing offset since it is only " + delta + "ms"); return; } if (_alreadyChanged) - _log.log(Log.CRIT, "Updating clock offset to " + offsetMs + "ms from " + _offset + "ms"); + getLog().log(Log.CRIT, "Updating clock offset to " + offsetMs + "ms from " + _offset + "ms"); else - _log.log(Log.INFO, "Initializing clock offset to " + offsetMs + "ms from " + _offset + "ms"); + getLog().log(Log.INFO, "Initializing clock offset to " + offsetMs + "ms from " + _offset + "ms"); _alreadyChanged = true; _offset = offsetMs; fireOffsetChanged(delta); diff --git a/core/java/src/net/i2p/util/Log.java b/core/java/src/net/i2p/util/Log.java index ab2dfe94a..151ccd478 100644 --- a/core/java/src/net/i2p/util/Log.java +++ b/core/java/src/net/i2p/util/Log.java @@ -9,6 +9,9 @@ package net.i2p.util; * */ +import net.i2p.data.DataHelper; +import net.i2p.I2PAppContext; + /** * Wrapper class for whatever logging system I2P uses. This class should be * instantiated and kept as a variable for each class it is used by, ala: @@ -24,6 +27,8 @@ public class Log { private Class _class; private String _name; private int _minPriority; + private LogScope _scope; + private LogManager _manager; public final static int DEBUG = 10; public final static int INFO = 20; @@ -65,33 +70,46 @@ public class Log { } public Log(Class cls) { - this(cls, null); + this(I2PAppContext.getGlobalContext().logManager(), cls, null); + _manager.addLog(this); } public Log(String name) { - this(null, name); + this(I2PAppContext.getGlobalContext().logManager(), null, name); + _manager.addLog(this); } - public Log(Class cls, String name) { + Log(LogManager manager, Class cls) { + this(manager, cls, null); + } + + Log(LogManager manager, String name) { + this(manager, null, name); + } + + Log(LogManager manager, Class cls, String name) { + _manager = manager; _class = cls; _name = name; _minPriority = DEBUG; - LogManager.getInstance().registerLog(this); + _scope = new LogScope(name, cls); + //_manager.addRecord(new LogRecord(Log.class, null, Thread.currentThread().getName(), Log.DEBUG, + // "Log created with manager " + manager + " for class " + cls, null)); } public void log(int priority, String msg) { if (priority >= _minPriority) { - LogManager.getInstance().addRecord( - new LogRecord(_class, _name, Thread.currentThread().getName(), priority, - msg, null)); + _manager.addRecord(new LogRecord(_class, _name, + Thread.currentThread().getName(), priority, + msg, null)); } } public void log(int priority, String msg, Throwable t) { if (priority >= _minPriority) { - LogManager.getInstance().addRecord( - new LogRecord(_class, _name, Thread.currentThread().getName(), priority, - msg, t)); + _manager.addRecord(new LogRecord(_class, _name, + Thread.currentThread().getName(), priority, + msg, t)); } } @@ -133,6 +151,9 @@ public class Log { public void setMinimumPriority(int priority) { _minPriority = priority; + //_manager.addRecord(new LogRecord(Log.class, null, Thread.currentThread().getName(), Log.DEBUG, + // "Log with manager " + _manager + " for class " + _class + // + " new priority " + toLevelString(priority), null)); } public boolean shouldLog(int priority) { @@ -145,5 +166,32 @@ public class Log { else return _name; } - + + public Object getScope() { return _scope; } + private static final class LogScope { + private String _scopeName; + private Class _scopeClass; + public LogScope(String name, Class cls) { + _scopeName = name; + _scopeClass = cls; + } + public int hashCode() { + if (_scopeClass != null) + return _scopeClass.hashCode(); + else if (_scopeName != null) + return _scopeName.hashCode(); + else + return 42; + } + public boolean equals(Object obj) { + if (obj == null) throw new NullPointerException("Null object scope?"); + if (obj instanceof LogScope) { + LogScope s = (LogScope)obj; + return DataHelper.eq(s._scopeName, _scopeName) && + DataHelper.eq(s._scopeClass, _scopeClass); + } else { + return false; + } + } + } } \ No newline at end of file diff --git a/core/java/src/net/i2p/util/LogConsoleBuffer.java b/core/java/src/net/i2p/util/LogConsoleBuffer.java index c5b4e88c1..296034823 100644 --- a/core/java/src/net/i2p/util/LogConsoleBuffer.java +++ b/core/java/src/net/i2p/util/LogConsoleBuffer.java @@ -1,26 +1,24 @@ package net.i2p.util; -import java.util.LinkedList; +import java.util.ArrayList; import java.util.List; +import net.i2p.I2PAppContext; /** * Offer a glimpse into the last few console messages generated * */ public class LogConsoleBuffer { - private final static LogConsoleBuffer _instance = new LogConsoleBuffer(); - - public final static LogConsoleBuffer getInstance() { - return _instance; - } + private I2PAppContext _context; private List _buffer; - private LogConsoleBuffer() { - _buffer = new LinkedList(); + public LogConsoleBuffer(I2PAppContext context) { + _context = context; + _buffer = new ArrayList(); } void add(String msg) { - int lim = LogManager.getInstance().getConsoleBufferSize(); + int lim = _context.logManager().getConsoleBufferSize(); synchronized (_buffer) { while (_buffer.size() >= lim) _buffer.remove(0); @@ -36,7 +34,7 @@ public class LogConsoleBuffer { */ public List getMostRecentMessages() { synchronized (_buffer) { - return new LinkedList(_buffer); + return new ArrayList(_buffer); } } } \ No newline at end of file diff --git a/core/java/src/net/i2p/util/LogManager.java b/core/java/src/net/i2p/util/LogManager.java index 55f1104e2..01aee4614 100644 --- a/core/java/src/net/i2p/util/LogManager.java +++ b/core/java/src/net/i2p/util/LogManager.java @@ -19,6 +19,10 @@ import java.util.Iterator; import java.util.List; import java.util.Properties; import java.util.Set; +import java.util.Map; +import java.util.HashMap; + +import net.i2p.I2PAppContext; /** * Manages the logging system, loading (and reloading) the configuration file, @@ -31,14 +35,6 @@ public class LogManager { public final static String CONFIG_LOCATION_PROP = "loggerConfigLocation"; public final static String FILENAME_OVERRIDE_PROP = "loggerFilenameOverride"; public final static String CONFIG_LOCATION_DEFAULT = "logger.config"; - - public static final LogManager getInstance() { - return _instance; - } - private static final LogManager _instance = new LogManager(System.getProperty(CONFIG_LOCATION_PROP, - CONFIG_LOCATION_DEFAULT)); - private static final Log _log = new Log(LogManager.class); - /** * These define the characters in the format line of the config file */ @@ -65,12 +61,15 @@ public class LogManager { public final static String DEFAULT_DEFALTLEVEL = Log.STR_DEBUG; public final static String DEFAULT_ONSCREENLEVEL = Log.STR_DEBUG; + private I2PAppContext _context; + private Log _log; + private long _configLastRead; private String _location; private List _records; private Set _limits; - private Set _logs; + private Map _logs; private LogWriter _writer; private int _defaultLimit; @@ -83,7 +82,59 @@ public class LogManager { private boolean _displayOnScreen; private int _consoleBufferSize; + + private LogConsoleBuffer _consoleBuffer; + public LogManager(I2PAppContext context) { + _displayOnScreen = true; + _records = new ArrayList(); + _limits = new HashSet(); + _logs = new HashMap(128); + _defaultLimit = Log.DEBUG; + _configLastRead = 0; + _location = context.getProperty(CONFIG_LOCATION_PROP, CONFIG_LOCATION_DEFAULT); + _context = context; + _log = getLog(LogManager.class); + _consoleBuffer = new LogConsoleBuffer(context); + loadConfig(); + _writer = new LogWriter(this); + Thread t = new I2PThread(_writer); + t.setName("LogWriter"); + t.setDaemon(true); + t.start(); + Runtime.getRuntime().addShutdownHook(new ShutdownHook()); + System.out.println("Created logManager " + this + " with context: " + context); + } + private LogManager() {} + + public Log getLog(Class cls) { return getLog(cls, null); } + public Log getLog(String name) { return getLog(null, name); } + public Log getLog(Class cls, String name) { + Log rv = null; + synchronized (_logs) { + Log newLog = new Log(this, cls, name); + if (_logs.containsKey(newLog.getScope())) { + Log oldLog = (Log)_logs.get(newLog.getScope()); + rv = oldLog; + //_log.error("Duplicate log creation for " + cls); + } else { + _logs.put(newLog.getScope(), newLog); + rv = newLog; + } + } + updateLimit(rv); + return rv; + } + void addLog(Log log) { + synchronized (_logs) { + if (!_logs.containsKey(log.getScope())) + _logs.put(log.getScope(), log); + } + updateLimit(log); + } + + public LogConsoleBuffer getBuffer() { return _consoleBuffer; } + public void setDisplayOnScreen(boolean yes) { _displayOnScreen = yes; } @@ -123,18 +174,7 @@ public class LogManager { _records.add(record); } } - - /** - * Called during Log construction - * - */ - void registerLog(Log log) { - synchronized (_logs) { - _logs.add(log); - } - updateLimit(log); - } - + /** * Called periodically by the log writer's thread * @@ -148,23 +188,6 @@ public class LogManager { /// /// - private LogManager(String location) { - _displayOnScreen = true; - _location = location; - _records = new ArrayList(); - _limits = new HashSet(); - _logs = new HashSet(); - _defaultLimit = Log.DEBUG; - _configLastRead = 0; - loadConfig(); - _writer = new LogWriter(); - Thread t = new I2PThread(_writer); - t.setName("LogWriter"); - t.setDaemon(true); - t.start(); - Runtime.getRuntime().addShutdownHook(new ShutdownHook()); - } - // // // @@ -175,6 +198,8 @@ public class LogManager { if ((_configLastRead > 0) && (_configLastRead > cfgFile.lastModified())) { _log.debug("Short circuiting config read"); return; + } else { + _log.debug("Loading config from " + _location); } FileInputStream fis = null; try { @@ -212,7 +237,7 @@ public class LogManager { _displayOnScreen = false; } - String filenameOverride = System.getProperty(FILENAME_OVERRIDE_PROP); + String filenameOverride = _context.getProperty(FILENAME_OVERRIDE_PROP); if (filenameOverride != null) _baseLogfilename = filenameOverride; else @@ -297,11 +322,11 @@ public class LogManager { } private void updateLimits() { - Set logs = new HashSet(); + Map logs = null; synchronized (_logs) { - logs.addAll(_logs); + logs = new HashMap(_logs); } - for (Iterator iter = logs.iterator(); iter.hasNext();) { + for (Iterator iter = logs.values().iterator(); iter.hasNext();) { Log log = (Log) iter.next(); updateLimit(log); } @@ -322,10 +347,13 @@ public class LogManager { } } } - if (max != null) + if (max != null) { log.setMinimumPriority(max.getLimit()); - else + } else { + //if (_log != null) + // _log.debug("The log for " + log.getClass() + " has no matching limits"); log.setMinimumPriority(_defaultLimit); + } } private List getLimits(Log log) { @@ -373,10 +401,11 @@ public class LogManager { } public static void main(String args[]) { - Log l1 = new Log("test.1"); - Log l2 = new Log("test.2"); - Log l21 = new Log("test.2.1"); - Log l = new Log("test"); + I2PAppContext ctx = new I2PAppContext(); + Log l1 = ctx.logManager().getLog("test.1"); + Log l2 = ctx.logManager().getLog("test.2"); + Log l21 = ctx.logManager().getLog("test.2.1"); + Log l = ctx.logManager().getLog("test"); l.debug("this should fail"); l.info("this should pass"); l1.warn("this should pass"); diff --git a/core/java/src/net/i2p/util/LogRecordFormatter.java b/core/java/src/net/i2p/util/LogRecordFormatter.java index b186e7e10..52ce0426e 100644 --- a/core/java/src/net/i2p/util/LogRecordFormatter.java +++ b/core/java/src/net/i2p/util/LogRecordFormatter.java @@ -26,13 +26,13 @@ class LogRecordFormatter { private final static int MAX_THREAD_LENGTH = 12; private final static int MAX_PRIORITY_LENGTH = 5; - public static String formatRecord(LogRecord rec) { + public static String formatRecord(LogManager manager, LogRecord rec) { StringBuffer buf = new StringBuffer(); - char format[] = LogManager.getInstance()._getFormat(); + char format[] = manager._getFormat(); for (int i = 0; i < format.length; ++i) { switch ((int) format[i]) { case (int) LogManager.DATE: - buf.append(getWhen(rec)); + buf.append(getWhen(manager, rec)); break; case (int) LogManager.CLASS: buf.append(getWhere(rec)); @@ -71,8 +71,8 @@ class LogRecordFormatter { return toString(logRecord.getThreadName(), MAX_THREAD_LENGTH); } - private static String getWhen(LogRecord logRecord) { - return LogManager.getInstance()._getDateFormat().format(new Date(logRecord.getDate())); + private static String getWhen(LogManager manager, LogRecord logRecord) { + return manager._getDateFormat().format(new Date(logRecord.getDate())); } private static String getPriority(LogRecord rec) { diff --git a/core/java/src/net/i2p/util/LogWriter.java b/core/java/src/net/i2p/util/LogWriter.java index 19eca261a..1dd472aac 100644 --- a/core/java/src/net/i2p/util/LogWriter.java +++ b/core/java/src/net/i2p/util/LogWriter.java @@ -29,8 +29,14 @@ class LogWriter implements Runnable { private int _rotationNum = -1; private String _logFilenamePattern; private File _currentFile; + private LogManager _manager; private boolean _write; + + private LogWriter() {} + public LogWriter(LogManager manager) { + _manager = manager; + } public void stopWriting() { _write = false; @@ -46,7 +52,7 @@ class LogWriter implements Runnable { public void flushRecords() { try { - List records = LogManager.getInstance()._removeAll(); + List records = _manager._removeAll(); for (int i = 0; i < records.size(); i++) { LogRecord rec = (LogRecord) records.get(i); writeRecord(rec); @@ -68,19 +74,19 @@ class LogWriter implements Runnable { } long now = Clock.getInstance().now(); if (now - _lastReadConfig > CONFIG_READ_ITERVAL) { - LogManager.getInstance().rereadConfig(); + _manager.rereadConfig(); _lastReadConfig = now; } } private void writeRecord(LogRecord rec) { - String val = LogRecordFormatter.formatRecord(rec); + String val = LogRecordFormatter.formatRecord(_manager, rec); writeRecord(val); - if (LogManager.getInstance().getDisplayOnScreenLevel() <= rec.getPriority()) { + if (_manager.getDisplayOnScreenLevel() <= rec.getPriority()) { // we always add to the console buffer, but only sometimes write to stdout - LogConsoleBuffer.getInstance().add(val); - if (LogManager.getInstance().displayOnScreen()) { + _manager.getBuffer().add(val); + if (_manager.displayOnScreen()) { System.out.print(val); } } @@ -98,7 +104,7 @@ class LogWriter implements Runnable { System.err.println("Error writing record, disk full?"); t.printStackTrace(); } - if (_numBytesInCurrentFile >= LogManager.getInstance()._getFileSize()) { + if (_numBytesInCurrentFile >= _manager._getFileSize()) { rotateFile(); } } @@ -108,7 +114,7 @@ class LogWriter implements Runnable { * */ private void rotateFile() { - String pattern = LogManager.getInstance()._getBaseLogfilename(); + String pattern = _manager._getBaseLogfilename(); File f = getNextFile(pattern); _currentFile = f; _numBytesInCurrentFile = 0; @@ -129,7 +135,7 @@ class LogWriter implements Runnable { if (pattern.indexOf('#') < 0) { return new File(pattern); } else { - int max = LogManager.getInstance()._getRotationLimit(); + int max = _manager._getRotationLimit(); if (_rotationNum == -1) { return getFirstFile(pattern, max); } else { diff --git a/core/java/src/net/i2p/util/RandomSource.java b/core/java/src/net/i2p/util/RandomSource.java index 3929bac1b..648d0a69f 100644 --- a/core/java/src/net/i2p/util/RandomSource.java +++ b/core/java/src/net/i2p/util/RandomSource.java @@ -10,6 +10,7 @@ package net.i2p.util; */ import java.security.SecureRandom; +import net.i2p.I2PAppContext; /** * Singleton for whatever PRNG i2p uses. @@ -17,14 +18,14 @@ import java.security.SecureRandom; * @author jrandom */ public class RandomSource extends SecureRandom { - private final static RandomSource _random = new RandomSource(); + private Log _log; - private RandomSource() { + public RandomSource(I2PAppContext context) { super(); + _log = context.logManager().getLog(RandomSource.class); } - public static RandomSource getInstance() { - return _random; + return I2PAppContext.getGlobalContext().random(); } /** diff --git a/core/java/test/net/i2p/crypto/AES256Bench.java b/core/java/test/net/i2p/crypto/AES256Bench.java index 05718e4f2..60cce7cdf 100644 --- a/core/java/test/net/i2p/crypto/AES256Bench.java +++ b/core/java/test/net/i2p/crypto/AES256Bench.java @@ -1,138 +1,140 @@ package net.i2p.crypto; -/* +/* * Copyright (c) 2003, TheCrypto * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without + * + * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: - * - * - Redistributions of source code must retain the above copyright notice, this + * + * - Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. - * - Redistributions in binary form must reproduce the above copyright notice, - * this list of conditions and the following disclaimer in the documentation + * - Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. - * - Neither the name of the TheCrypto may be used to endorse or promote - * products derived from this software without specific prior written + * - Neither the name of the TheCrypto may be used to endorse or promote + * products derived from this software without specific prior written * permission. * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN - * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ import net.i2p.data.SessionKey; import net.i2p.data.DataHelper; +import net.i2p.I2PAppContext; public class AES256Bench { - + private static I2PAppContext _context = new I2PAppContext(); + public static void main(String args[]) { - char[] cplain = { - 0x00, 0x11, 0x22, 0x33, 0x44, 0x55, 0x66, 0x77, - 0x88, 0x99, 0xaa, 0xbb, 0xcc, 0xdd, 0xee, 0xff, - 0x00, 0x11, 0x22, 0x33, 0x44, 0x55, 0x66, 0x77, - 0x88, 0x99, 0xaa, 0xbb, 0xcc, 0xdd, 0xee, 0xff - }; - - byte[] plain = new byte[cplain.length]; - for (int x = 0; x < cplain.length; x++) { - plain[x] = (byte)cplain[x]; - } - char[] ckey = { - 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, - 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, - 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, - 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f - }; - byte[] bkey = new byte[ckey.length]; - for (int x = 0; x < ckey.length; x++) { - bkey[x] = (byte)ckey[x]; - } - - SessionKey key = new SessionKey(); - key.setData(bkey); - - char[] civ = { - 0x01, 0x23, 0x45, 0x67, 0x89, 0xab, 0xcd, 0xef, - 0xfe, 0xdc, 0xba, 0x98, 0x67, 0x54, 0x32, 0x10 - }; - - byte[] iv = new byte[civ.length]; - for (int x = 0; x < iv.length; x++) { - iv[x] = (byte)civ[x]; - } - - byte[] e = AESEngine.getInstance().encrypt(plain, key, iv); - byte[] d = AESEngine.getInstance().decrypt(e, key, iv); - boolean same = true; - for (int x = 0; x < d.length; x++) { - if (plain[x] != d[x]) { - same = false; - } - } - - System.out.println("Standard test D(E(value)) == value? " + same); - - plain = "1234567890123456".getBytes(); - e = AESEngine.getInstance().encrypt(plain, key, iv); - d = AESEngine.getInstance().decrypt(e, key, iv); - same = DataHelper.eq(plain, d); - System.out.println("Different value test D(E(value)) == value? " + same); - - System.out.println(); - System.out.println(); - - long times = 100; - long encrypttime = 0; - long decrypttime = 0; - long maxE = 0; - long minE = 0; - long maxD = 0; - long minD = 0; - byte[] message = new byte[2*1024]; - for (int i = 0; i < message.length; i++) - message[i] = (byte)((i%26)+'a'); - for (int x = 0; x < times; x++) { - long startencrypt = System.currentTimeMillis(); - e = AESEngine.getInstance().encrypt(message, key, iv); - long endencryptstartdecrypt = System.currentTimeMillis(); - d = AESEngine.getInstance().decrypt(e, key, iv); - long enddecrypt = System.currentTimeMillis(); - System.out.print("."); - encrypttime += endencryptstartdecrypt - startencrypt; - decrypttime += enddecrypt - endencryptstartdecrypt; - if (!DataHelper.eq(d, message)) { - System.out.println("Lengths: source [" + message.length + "] dest [" + d.length + "]"); - System.out.println("Data: dest [" + DataHelper.toString(d, d.length) + "]"); - throw new RuntimeException("Holy crap, decrypted != source message"); - } - - if ( (minE == 0) && (minD == 0) ) { - minE = endencryptstartdecrypt - startencrypt; - maxE = endencryptstartdecrypt - startencrypt; - minD = enddecrypt - endencryptstartdecrypt; - maxD = enddecrypt - endencryptstartdecrypt; - } else { - if (minE > endencryptstartdecrypt - startencrypt) minE = endencryptstartdecrypt - startencrypt; - if (maxE < endencryptstartdecrypt - startencrypt) maxE = endencryptstartdecrypt - startencrypt; - if (minD > enddecrypt - endencryptstartdecrypt) minD = enddecrypt - endencryptstartdecrypt; - if (maxD < enddecrypt - endencryptstartdecrypt) maxD = enddecrypt - endencryptstartdecrypt; - } - - } - - System.out.println(); - System.out.println("Data size : " + message.length); - System.out.println("Encryption Time Average : " + (encrypttime/times) + "ms\ttotal: " + encrypttime + "ms\tmin: " + minE + "ms\tmax: " + maxE + "ms\tEncryption Bps: " + (times*message.length*1000)/encrypttime); - System.out.println("Decryption Time Average : " + (decrypttime/times) + "ms\ttotal: " + decrypttime + "ms\tmin: " + minD + "ms\tmax: " + maxD + "ms\tDecryption Bps: " + (times*message.length*1000)/decrypttime); + char[] cplain = { + 0x00, 0x11, 0x22, 0x33, 0x44, 0x55, 0x66, 0x77, + 0x88, 0x99, 0xaa, 0xbb, 0xcc, 0xdd, 0xee, 0xff, + 0x00, 0x11, 0x22, 0x33, 0x44, 0x55, 0x66, 0x77, + 0x88, 0x99, 0xaa, 0xbb, 0xcc, 0xdd, 0xee, 0xff + }; + + byte[] plain = new byte[cplain.length]; + for (int x = 0; x < cplain.length; x++) { + plain[x] = (byte)cplain[x]; + } + char[] ckey = { + 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, + 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, + 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, + 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f + }; + byte[] bkey = new byte[ckey.length]; + for (int x = 0; x < ckey.length; x++) { + bkey[x] = (byte)ckey[x]; + } + + SessionKey key = new SessionKey(); + key.setData(bkey); + + char[] civ = { + 0x01, 0x23, 0x45, 0x67, 0x89, 0xab, 0xcd, 0xef, + 0xfe, 0xdc, 0xba, 0x98, 0x67, 0x54, 0x32, 0x10 + }; + + byte[] iv = new byte[civ.length]; + for (int x = 0; x < iv.length; x++) { + iv[x] = (byte)civ[x]; + } + + byte[] e = _context.AESEngine().encrypt(plain, key, iv); + byte[] d = _context.AESEngine().decrypt(e, key, iv); + boolean same = true; + for (int x = 0; x < d.length; x++) { + if (plain[x] != d[x]) { + same = false; + } + } + + System.out.println("Standard test D(E(value)) == value? " + same); + + plain = "1234567890123456".getBytes(); + e = _context.AESEngine().encrypt(plain, key, iv); + d = _context.AESEngine().decrypt(e, key, iv); + same = DataHelper.eq(plain, d); + System.out.println("Different value test D(E(value)) == value? " + same); + + System.out.println(); + System.out.println(); + + long times = 100; + long encrypttime = 0; + long decrypttime = 0; + long maxE = 0; + long minE = 0; + long maxD = 0; + long minD = 0; + byte[] message = new byte[2*1024]; + for (int i = 0; i < message.length; i++) + message[i] = (byte)((i%26)+'a'); + for (int x = 0; x < times; x++) { + long startencrypt = System.currentTimeMillis(); + e = _context.AESEngine().encrypt(message, key, iv); + long endencryptstartdecrypt = System.currentTimeMillis(); + d = _context.AESEngine().decrypt(e, key, iv); + long enddecrypt = System.currentTimeMillis(); + System.out.print("."); + encrypttime += endencryptstartdecrypt - startencrypt; + decrypttime += enddecrypt - endencryptstartdecrypt; + if (!DataHelper.eq(d, message)) { + System.out.println("Lengths: source [" + message.length + "] dest [" + d.length + "]"); + System.out.println("Data: dest [" + DataHelper.toString(d, d.length) + "]"); + throw new RuntimeException("Holy crap, decrypted != source message"); + } + + if ( (minE == 0) && (minD == 0) ) { + minE = endencryptstartdecrypt - startencrypt; + maxE = endencryptstartdecrypt - startencrypt; + minD = enddecrypt - endencryptstartdecrypt; + maxD = enddecrypt - endencryptstartdecrypt; + } else { + if (minE > endencryptstartdecrypt - startencrypt) minE = endencryptstartdecrypt - startencrypt; + if (maxE < endencryptstartdecrypt - startencrypt) maxE = endencryptstartdecrypt - startencrypt; + if (minD > enddecrypt - endencryptstartdecrypt) minD = enddecrypt - endencryptstartdecrypt; + if (maxD < enddecrypt - endencryptstartdecrypt) maxD = enddecrypt - endencryptstartdecrypt; + } + + } + + System.out.println(); + System.out.println("Data size : " + message.length); + System.out.println("Encryption Time Average : " + (encrypttime/times) + "ms\ttotal: " + encrypttime + "ms\tmin: " + minE + "ms\tmax: " + maxE + "ms\tEncryption Bps: " + (times*message.length*1000)/encrypttime); + System.out.println("Decryption Time Average : " + (decrypttime/times) + "ms\ttotal: " + decrypttime + "ms\tmin: " + minD + "ms\tmax: " + maxD + "ms\tDecryption Bps: " + (times*message.length*1000)/decrypttime); } } - + diff --git a/core/java/test/net/i2p/crypto/ElGamalAESEngineTest.java b/core/java/test/net/i2p/crypto/ElGamalAESEngineTest.java index afe2548aa..9fc0a0a05 100644 --- a/core/java/test/net/i2p/crypto/ElGamalAESEngineTest.java +++ b/core/java/test/net/i2p/crypto/ElGamalAESEngineTest.java @@ -1,13 +1,14 @@ package net.i2p.crypto; /* * free (adj.): unencumbered; not under the control of others - * Written by jrandom in 2003 and released into the public domain - * with no warranty of any kind, either expressed or implied. - * It probably won't make your computer catch on fire, or eat + * Written by jrandom in 2003 and released into the public domain + * with no warranty of any kind, either expressed or implied. + * It probably won't make your computer catch on fire, or eat * your children, but it might. Use at your own risk. * */ +import net.i2p.I2PAppContext; import net.i2p.data.Hash; import net.i2p.data.SessionKey; import net.i2p.data.PublicKey; @@ -25,211 +26,216 @@ import java.util.HashSet; class ElGamalAESEngineTest { private final static Log _log = new Log(ElGamalAESEngineTest.class); - public void runRoundtripTest() { - try { - Object keys[] = KeyGenerator.getInstance().generatePKIKeypair(); - PublicKey pubKey = (PublicKey)keys[0]; - PrivateKey privKey = (PrivateKey)keys[1]; - - String msg = "Hello world"; - Set toBeDelivered = new HashSet(); - SessionKey key = SessionKeyManager.getInstance().getCurrentKey(pubKey); - if (key == null) - key = SessionKeyManager.getInstance().createSession(pubKey); - byte[] encrypted = ElGamalAESEngine.encrypt(msg.getBytes(), pubKey, key, 64); - byte[] decrypted = ElGamalAESEngine.decrypt(encrypted, privKey); - if (decrypted == null) - throw new Exception("Failed to decrypt"); - String read = new String(decrypted); - _log.debug("read: " + read); - _log.debug("Match? " + msg.equals(read)); - } catch (Exception e) { - _log.error("Error", e); - try { Thread.sleep(5000); } catch (InterruptedException ie) {} - System.exit(0); - } + private I2PAppContext _context; + public ElGamalAESEngineTest(I2PAppContext ctx) { + _context = ctx; } - + public void runRoundtripTest() { + try { + Object keys[] = KeyGenerator.getInstance().generatePKIKeypair(); + PublicKey pubKey = (PublicKey)keys[0]; + PrivateKey privKey = (PrivateKey)keys[1]; + + String msg = "Hello world"; + Set toBeDelivered = new HashSet(); + SessionKey key = _context.sessionKeyManager().getCurrentKey(pubKey); + if (key == null) + key = _context.sessionKeyManager().createSession(pubKey); + byte[] encrypted = _context.elGamalAESEngine().encrypt(msg.getBytes(), pubKey, key, 64); + byte[] decrypted = _context.elGamalAESEngine().decrypt(encrypted, privKey); + if (decrypted == null) + throw new Exception("Failed to decrypt"); + String read = new String(decrypted); + _log.debug("read: " + read); + _log.debug("Match? " + msg.equals(read)); + } catch (Exception e) { + _log.error("Error", e); + try { Thread.sleep(5000); } catch (InterruptedException ie) {} + System.exit(0); + } + } + public void runLoopTest(int runs) { - try { - Object keys[] = KeyGenerator.getInstance().generatePKIKeypair(); - PublicKey pubKey = (PublicKey)keys[0]; - PrivateKey privKey = (PrivateKey)keys[1]; - - long e0 = 0; - long d0 = 0; - long eTot = 0; - long dTot = 0; - for (int i = 0; i < runs; i++) { - long times[] = runMessage(pubKey, privKey); - _log.debug("E[" + i + "] time: " + times[0] + "ms"); - _log.debug("D["+i+"] time: " + times[1] + "ms"); - if (i == 0) { - e0 = times[0]; - d0 = times[1]; - } - eTot += times[0]; - dTot += times[1]; - } - _log.debug("E average time: " + eTot/runs + "ms"); - _log.debug("D average time: " + dTot/runs + "ms"); - _log.debug("Total time to send and receive " + (runs) + "Kb: " + (eTot+dTot)+"ms"); - - } catch (Exception e) { - _log.error("Error", e); - try { Thread.sleep(5000); } catch (InterruptedException ie) {} - System.exit(0); - } + try { + Object keys[] = KeyGenerator.getInstance().generatePKIKeypair(); + PublicKey pubKey = (PublicKey)keys[0]; + PrivateKey privKey = (PrivateKey)keys[1]; + + long e0 = 0; + long d0 = 0; + long eTot = 0; + long dTot = 0; + for (int i = 0; i < runs; i++) { + long times[] = runMessage(pubKey, privKey); + _log.debug("E[" + i + "] time: " + times[0] + "ms"); + _log.debug("D["+i+"] time: " + times[1] + "ms"); + if (i == 0) { + e0 = times[0]; + d0 = times[1]; + } + eTot += times[0]; + dTot += times[1]; + } + _log.debug("E average time: " + eTot/runs + "ms"); + _log.debug("D average time: " + dTot/runs + "ms"); + _log.debug("Total time to send and receive " + (runs) + "Kb: " + (eTot+dTot)+"ms"); + + } catch (Exception e) { + _log.error("Error", e); + try { Thread.sleep(5000); } catch (InterruptedException ie) {} + System.exit(0); + } } private long[] runMessage(PublicKey pubKey, PrivateKey privKey) throws Exception { - byte[] msg = new byte[400]; - RandomSource.getInstance().nextBytes(msg); - SessionKey key = SessionKeyManager.getInstance().getCurrentKey(pubKey); - if (key == null) - key = SessionKeyManager.getInstance().createSession(pubKey); - - long beforeE = Clock.getInstance().now(); - byte[] encrypted = ElGamalAESEngine.encrypt(msg, pubKey, key, 1024); - long afterE = Clock.getInstance().now(); - byte[] decrypted = ElGamalAESEngine.decrypt(encrypted, privKey); - long afterD = Clock.getInstance().now(); - if (!DataHelper.eq(msg, decrypted)) { - _log.error("WTF, D(E(val)) != val"); - return null; - } - - long rv[] = new long[2]; - rv[0] = afterE - beforeE; - rv[1] = afterD - afterE; - return rv; + byte[] msg = new byte[400]; + RandomSource.getInstance().nextBytes(msg); + SessionKey key = _context.sessionKeyManager().getCurrentKey(pubKey); + if (key == null) + key = _context.sessionKeyManager().createSession(pubKey); + + long beforeE = Clock.getInstance().now(); + byte[] encrypted = _context.elGamalAESEngine().encrypt(msg, pubKey, key, 1024); + long afterE = Clock.getInstance().now(); + byte[] decrypted = _context.elGamalAESEngine().decrypt(encrypted, privKey); + long afterD = Clock.getInstance().now(); + if (!DataHelper.eq(msg, decrypted)) { + _log.error("WTF, D(E(val)) != val"); + return null; + } + + long rv[] = new long[2]; + rv[0] = afterE - beforeE; + rv[1] = afterD - afterE; + return rv; } public void runAESTest() { - try { - SessionKey sessionKey = KeyGenerator.getInstance().generateSessionKey(); - Hash h = SHA256Generator.getInstance().calculateHash(sessionKey.getData()); - byte iv[] = new byte[16]; - System.arraycopy(h.getData(), 0, iv, 0, 16); - - String msg = "Hello world"; - - byte encrypted[] = ElGamalAESEngine.encryptAESBlock(msg.getBytes(), sessionKey, iv, null, null, 64); - _log.debug("** Encryption complete. Beginning decryption"); - Set foundTags = new HashSet(); - SessionKey foundKey = new SessionKey(); - byte decrypted[] = ElGamalAESEngine.decryptAESBlock(encrypted, sessionKey, iv, null, foundTags, foundKey); - if (decrypted == null) throw new Exception("Decryption failed"); - String read = new String(decrypted); - _log.debug("read: " + read); - _log.debug("Match? " + msg.equals(read)); - } catch (Exception e) { - _log.error("Error", e); - try { Thread.sleep(5000); } catch (InterruptedException ie) {} - System.exit(0); - } + try { + SessionKey sessionKey = KeyGenerator.getInstance().generateSessionKey(); + Hash h = SHA256Generator.getInstance().calculateHash(sessionKey.getData()); + byte iv[] = new byte[16]; + System.arraycopy(h.getData(), 0, iv, 0, 16); + + String msg = "Hello world"; + + byte encrypted[] = _context.elGamalAESEngine().encryptAESBlock(msg.getBytes(), sessionKey, iv, null, null, 64); + _log.debug("** Encryption complete. Beginning decryption"); + Set foundTags = new HashSet(); + SessionKey foundKey = new SessionKey(); + byte decrypted[] = _context.elGamalAESEngine().decryptAESBlock(encrypted, sessionKey, iv, null, foundTags, foundKey); + if (decrypted == null) throw new Exception("Decryption failed"); + String read = new String(decrypted); + _log.debug("read: " + read); + _log.debug("Match? " + msg.equals(read)); + } catch (Exception e) { + _log.error("Error", e); + try { Thread.sleep(5000); } catch (InterruptedException ie) {} + System.exit(0); + } } public void runBasicAESTest() { - try { - SessionKey sessionKey = KeyGenerator.getInstance().generateSessionKey(); - Hash h = SHA256Generator.getInstance().calculateHash(sessionKey.getData()); - byte iv[] = new byte[16]; - System.arraycopy(h.getData(), 0, iv, 0, 16); - - String msg = "Hello world01234012345678901234501234567890123450123456789012345"; - h = SHA256Generator.getInstance().calculateHash(msg.getBytes()); - _log.debug("Hash of entire aes block before encryption: \n" + DataHelper.toString(h.getData(), 32)); - byte aesEncr[] = AESEngine.getInstance().encrypt(msg.getBytes(), sessionKey, iv); - byte aesDecr[] = AESEngine.getInstance().decrypt(aesEncr, sessionKey, iv); - h = SHA256Generator.getInstance().calculateHash(aesDecr); - _log.debug("Hash of entire aes block after decryption: \n" + DataHelper.toString(h.getData(), 32)); - if (msg.equals(new String(aesDecr))) { - _log.debug("**AES Basic test passed!\n\n"); - } - } catch (Exception e) { - _log.error("Error", e); - try { Thread.sleep(5000); } catch (InterruptedException ie) {} - System.exit(0); - } + try { + SessionKey sessionKey = KeyGenerator.getInstance().generateSessionKey(); + Hash h = SHA256Generator.getInstance().calculateHash(sessionKey.getData()); + byte iv[] = new byte[16]; + System.arraycopy(h.getData(), 0, iv, 0, 16); + + String msg = "Hello world01234012345678901234501234567890123450123456789012345"; + h = SHA256Generator.getInstance().calculateHash(msg.getBytes()); + _log.debug("Hash of entire aes block before encryption: \n" + DataHelper.toString(h.getData(), 32)); + byte aesEncr[] = _context.AESEngine().encrypt(msg.getBytes(), sessionKey, iv); + byte aesDecr[] = _context.AESEngine().decrypt(aesEncr, sessionKey, iv); + h = SHA256Generator.getInstance().calculateHash(aesDecr); + _log.debug("Hash of entire aes block after decryption: \n" + DataHelper.toString(h.getData(), 32)); + if (msg.equals(new String(aesDecr))) { + _log.debug("**AES Basic test passed!\n\n"); + } + } catch (Exception e) { + _log.error("Error", e); + try { Thread.sleep(5000); } catch (InterruptedException ie) {} + System.exit(0); + } } public void runElGamalTest(int numLoops) { - - for (int i = 0; i < numLoops; i++) { - Object keys[] = KeyGenerator.getInstance().generatePKIKeypair(); - PublicKey pubKey = (PublicKey)keys[0]; - PrivateKey privKey = (PrivateKey)keys[1]; - SessionKey key = KeyGenerator.getInstance().generateSessionKey(); - - runBasicElGamalTest(key, pubKey, privKey); - } + + for (int i = 0; i < numLoops; i++) { + Object keys[] = KeyGenerator.getInstance().generatePKIKeypair(); + PublicKey pubKey = (PublicKey)keys[0]; + PrivateKey privKey = (PrivateKey)keys[1]; + SessionKey key = KeyGenerator.getInstance().generateSessionKey(); + + runBasicElGamalTest(key, pubKey, privKey); + } } public void runBasicElGamalTest(SessionKey key, PublicKey pubKey, PrivateKey privKey) { - try { - ByteArrayOutputStream elgSrc = new ByteArrayOutputStream(256); - key.writeBytes(elgSrc); - byte preIV[] = new byte[32]; - RandomSource.getInstance().nextBytes(preIV); - elgSrc.write(preIV); -// byte rnd[] = new byte[191]; -// RandomSource.getInstance().nextBytes(rnd); -// elgSrc.write(rnd); - elgSrc.flush(); - - byte elgEncr[] = ElGamalEngine.getInstance().encrypt(elgSrc.toByteArray(), pubKey); - byte elgDecr[] = ElGamalEngine.getInstance().decrypt(elgEncr, privKey); - - ByteArrayInputStream bais = new ByteArrayInputStream(elgDecr); - SessionKey nk = new SessionKey(); - - nk.readBytes(bais); - byte postpreIV[] = new byte[32]; - int read = bais.read(postpreIV); - if (read != postpreIV.length) { - // hmm, this can't really happen... - throw new Exception("Somehow ElGamal broke and 256 bytes is less than 32 bytes..."); - } - // ignore the next 192 bytes - boolean eq = (DataHelper.eq(preIV, postpreIV) && DataHelper.eq(key, nk)); - if (!eq) { - _log.error("elgEncr.length: " + elgEncr.length + " elgDecr.length: " + elgDecr.length); - _log.error("Pre IV.................: " + DataHelper.toString(preIV, 32)); - _log.error("Pre IV after decryption: " + DataHelper.toString(postpreIV, 32)); - _log.error("SessionKey.................: " + DataHelper.toString(key.getData(), 32)); - _log.error("SessionKey after decryption: " + DataHelper.toString(nk.getData(), 32)); - _log.error("PublicKey: " + DataHelper.toDecimalString(pubKey.getData(), pubKey.getData().length)); - _log.error("PrivateKey: " + DataHelper.toDecimalString(privKey.getData(), privKey.getData().length)); - - throw new Exception("Not equal!"); - } else { - _log.debug("Basic ElG D(E(val)) == val"); - } - - } catch (Exception e) { - _log.error("Error", e); - try { Thread.sleep(5000); } catch (InterruptedException ie) {} - System.exit(0); - } + try { + ByteArrayOutputStream elgSrc = new ByteArrayOutputStream(256); + key.writeBytes(elgSrc); + byte preIV[] = new byte[32]; + RandomSource.getInstance().nextBytes(preIV); + elgSrc.write(preIV); + // byte rnd[] = new byte[191]; + // RandomSource.getInstance().nextBytes(rnd); + // elgSrc.write(rnd); + elgSrc.flush(); + + byte elgEncr[] = _context.elGamalEngine().encrypt(elgSrc.toByteArray(), pubKey); + byte elgDecr[] = _context.elGamalEngine().decrypt(elgEncr, privKey); + + ByteArrayInputStream bais = new ByteArrayInputStream(elgDecr); + SessionKey nk = new SessionKey(); + + nk.readBytes(bais); + byte postpreIV[] = new byte[32]; + int read = bais.read(postpreIV); + if (read != postpreIV.length) { + // hmm, this can't really happen... + throw new Exception("Somehow ElGamal broke and 256 bytes is less than 32 bytes..."); + } + // ignore the next 192 bytes + boolean eq = (DataHelper.eq(preIV, postpreIV) && DataHelper.eq(key, nk)); + if (!eq) { + _log.error("elgEncr.length: " + elgEncr.length + " elgDecr.length: " + elgDecr.length); + _log.error("Pre IV.................: " + DataHelper.toString(preIV, 32)); + _log.error("Pre IV after decryption: " + DataHelper.toString(postpreIV, 32)); + _log.error("SessionKey.................: " + DataHelper.toString(key.getData(), 32)); + _log.error("SessionKey after decryption: " + DataHelper.toString(nk.getData(), 32)); + _log.error("PublicKey: " + DataHelper.toDecimalString(pubKey.getData(), pubKey.getData().length)); + _log.error("PrivateKey: " + DataHelper.toDecimalString(privKey.getData(), privKey.getData().length)); + + throw new Exception("Not equal!"); + } else { + _log.debug("Basic ElG D(E(val)) == val"); + } + + } catch (Exception e) { + _log.error("Error", e); + try { Thread.sleep(5000); } catch (InterruptedException ie) {} + System.exit(0); + } } public static void main(String args[]) { - ElGamalAESEngineTest tst = new ElGamalAESEngineTest(); - Object o = YKGenerator.class; - try { Thread.sleep(120*1000); } catch (InterruptedException ie) {} - - tst.runBasicAESTest(); - tst.runAESTest(); - tst.runRoundtripTest(); - tst.runElGamalTest(2); - // test bug - for (int i = 0; i < 3; i++) - tst.runLoopTest(1); - // test throughput - tst.runLoopTest(5); - - net.i2p.stat.SimpleStatDumper.dumpStats(Log.CRIT); - try { Thread.sleep(5*1000); } catch (InterruptedException ie) {} + I2PAppContext context = new I2PAppContext(); + ElGamalAESEngineTest tst = new ElGamalAESEngineTest(context); + Object o = YKGenerator.class; + try { Thread.sleep(120*1000); } catch (InterruptedException ie) {} + + tst.runBasicAESTest(); + tst.runAESTest(); + tst.runRoundtripTest(); + tst.runElGamalTest(2); + // test bug + for (int i = 0; i < 3; i++) + tst.runLoopTest(1); + // test throughput + tst.runLoopTest(5); + + net.i2p.stat.SimpleStatDumper.dumpStats(context, Log.CRIT); + try { Thread.sleep(5*1000); } catch (InterruptedException ie) {} } } diff --git a/core/java/test/net/i2p/crypto/ElGamalBench.java b/core/java/test/net/i2p/crypto/ElGamalBench.java index 64ca0aa84..92728674e 100644 --- a/core/java/test/net/i2p/crypto/ElGamalBench.java +++ b/core/java/test/net/i2p/crypto/ElGamalBench.java @@ -1,96 +1,98 @@ package net.i2p.crypto; -/* +/* * Copyright (c) 2003, TheCrypto * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without + * + * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: - * - * - Redistributions of source code must retain the above copyright notice, this + * + * - Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. - * - Redistributions in binary form must reproduce the above copyright notice, - * this list of conditions and the following disclaimer in the documentation + * - Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. - * - Neither the name of the TheCrypto may be used to endorse or promote - * products derived from this software without specific prior written + * - Neither the name of the TheCrypto may be used to endorse or promote + * products derived from this software without specific prior written * permission. * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN - * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ import net.i2p.data.DataHelper; import net.i2p.data.PrivateKey; import net.i2p.data.PublicKey; +import net.i2p.I2PAppContext; public class ElGamalBench { - public static void main(String args[]) { - int times = 100; - long keygentime = 0; - long encrypttime = 0; - long decrypttime = 0; - long maxKey = 0; - long minKey = 0; - long maxE = 0; - long minE = 0; - long maxD = 0; - long minD = 0; - Object[] keys = KeyGenerator.getInstance().generatePKIKeypair(); - byte[] message = new byte[222]; - for (int i = 0; i < message.length; i++) - message[i] = (byte)((i%26)+'a'); - for (int x = 0; x < times; x++) { - long startkeys = System.currentTimeMillis(); - keys = KeyGenerator.getInstance().generatePKIKeypair(); - PublicKey pubkey = (PublicKey)keys[0]; - PrivateKey privkey = (PrivateKey)keys[1]; - long endkeys = System.currentTimeMillis(); - long startencrypt = System.currentTimeMillis(); - byte[] e = ElGamalEngine.getInstance().encrypt(message, pubkey); - long endencryptstartdecrypt = System.currentTimeMillis(); - byte[] d = ElGamalEngine.getInstance().decrypt(e, privkey); - long enddecrypt = System.currentTimeMillis(); - System.out.print("."); - keygentime += endkeys - startkeys; - encrypttime += endencryptstartdecrypt - startencrypt; - decrypttime += enddecrypt - endencryptstartdecrypt; - if (!DataHelper.eq(d, message)) { - System.out.println("Lengths: source [" + message.length + "] dest [" + d.length + "]"); - byte hash1[] = SHA256Generator.getInstance().calculateHash(message).getData(); - byte hash2[] = SHA256Generator.getInstance().calculateHash(d).getData(); - System.out.println("Hashes: source [" + DataHelper.toString(hash1, hash1.length) + "] dest [" + DataHelper.toString(hash2, hash2.length) + "]"); - throw new RuntimeException("Holy crap, decrypted != source message"); - } - if ( (minKey == 0) && (minE == 0) && (minD == 0) ) { - minKey = endkeys - startkeys; - maxKey = endkeys - startkeys; - minE = endencryptstartdecrypt - startencrypt; - maxE = endencryptstartdecrypt - startencrypt; - minD = enddecrypt - endencryptstartdecrypt; - maxD = enddecrypt - endencryptstartdecrypt; - } else { - if (minKey > endkeys - startkeys) minKey = endkeys - startkeys; - if (maxKey < endkeys - startkeys) maxKey = endkeys - startkeys; - if (minE > endencryptstartdecrypt - startencrypt) minE = endencryptstartdecrypt - startencrypt; - if (maxE < endencryptstartdecrypt - startencrypt) maxE = endencryptstartdecrypt - startencrypt; - if (minD > enddecrypt - endencryptstartdecrypt) minD = enddecrypt - endencryptstartdecrypt; - if (maxD < enddecrypt - endencryptstartdecrypt) maxD = enddecrypt - endencryptstartdecrypt; - } - } - System.out.println(); - System.out.println("Key Generation Time Average: " + (keygentime/times) + "\ttotal: " + keygentime + "\tmin: " + minKey + "\tmax: " + maxKey + "\tKeygen/second: " + (keygentime == 0 ? "NaN" : ""+(times*1000)/keygentime)); - System.out.println("Encryption Time Average : " + (encrypttime/times) + "\ttotal: " + encrypttime + "\tmin: " + minE + "\tmax: " + maxE + "\tEncryption Bps: " + (times*message.length*1000)/encrypttime); - System.out.println("Decryption Time Average : " + (decrypttime/times) + "\ttotal: " + decrypttime + "\tmin: " + minD + "\tmax: " + maxD + "\tDecryption Bps: " + (times*message.length*1000)/decrypttime); - } + private static I2PAppContext _context = new I2PAppContext(); + public static void main(String args[]) { + int times = 100; + long keygentime = 0; + long encrypttime = 0; + long decrypttime = 0; + long maxKey = 0; + long minKey = 0; + long maxE = 0; + long minE = 0; + long maxD = 0; + long minD = 0; + Object[] keys = KeyGenerator.getInstance().generatePKIKeypair(); + byte[] message = new byte[222]; + for (int i = 0; i < message.length; i++) + message[i] = (byte)((i%26)+'a'); + for (int x = 0; x < times; x++) { + long startkeys = System.currentTimeMillis(); + keys = KeyGenerator.getInstance().generatePKIKeypair(); + PublicKey pubkey = (PublicKey)keys[0]; + PrivateKey privkey = (PrivateKey)keys[1]; + long endkeys = System.currentTimeMillis(); + long startencrypt = System.currentTimeMillis(); + byte[] e = _context.elGamalEngine().encrypt(message, pubkey); + long endencryptstartdecrypt = System.currentTimeMillis(); + byte[] d = _context.elGamalEngine().decrypt(e, privkey); + long enddecrypt = System.currentTimeMillis(); + System.out.print("."); + keygentime += endkeys - startkeys; + encrypttime += endencryptstartdecrypt - startencrypt; + decrypttime += enddecrypt - endencryptstartdecrypt; + if (!DataHelper.eq(d, message)) { + System.out.println("Lengths: source [" + message.length + "] dest [" + d.length + "]"); + byte hash1[] = SHA256Generator.getInstance().calculateHash(message).getData(); + byte hash2[] = SHA256Generator.getInstance().calculateHash(d).getData(); + System.out.println("Hashes: source [" + DataHelper.toString(hash1, hash1.length) + "] dest [" + DataHelper.toString(hash2, hash2.length) + "]"); + throw new RuntimeException("Holy crap, decrypted != source message"); + } + if ( (minKey == 0) && (minE == 0) && (minD == 0) ) { + minKey = endkeys - startkeys; + maxKey = endkeys - startkeys; + minE = endencryptstartdecrypt - startencrypt; + maxE = endencryptstartdecrypt - startencrypt; + minD = enddecrypt - endencryptstartdecrypt; + maxD = enddecrypt - endencryptstartdecrypt; + } else { + if (minKey > endkeys - startkeys) minKey = endkeys - startkeys; + if (maxKey < endkeys - startkeys) maxKey = endkeys - startkeys; + if (minE > endencryptstartdecrypt - startencrypt) minE = endencryptstartdecrypt - startencrypt; + if (maxE < endencryptstartdecrypt - startencrypt) maxE = endencryptstartdecrypt - startencrypt; + if (minD > enddecrypt - endencryptstartdecrypt) minD = enddecrypt - endencryptstartdecrypt; + if (maxD < enddecrypt - endencryptstartdecrypt) maxD = enddecrypt - endencryptstartdecrypt; + } + } + System.out.println(); + System.out.println("Key Generation Time Average: " + (keygentime/times) + "\ttotal: " + keygentime + "\tmin: " + minKey + "\tmax: " + maxKey + "\tKeygen/second: " + (keygentime == 0 ? "NaN" : ""+(times*1000)/keygentime)); + System.out.println("Encryption Time Average : " + (encrypttime/times) + "\ttotal: " + encrypttime + "\tmin: " + minE + "\tmax: " + maxE + "\tEncryption Bps: " + (times*message.length*1000)/encrypttime); + System.out.println("Decryption Time Average : " + (decrypttime/times) + "\ttotal: " + decrypttime + "\tmin: " + minD + "\tmax: " + maxD + "\tDecryption Bps: " + (times*message.length*1000)/decrypttime); + } } - + diff --git a/core/java/test/net/i2p/crypto/SessionEncryptionTest.java b/core/java/test/net/i2p/crypto/SessionEncryptionTest.java index 5a1364e55..a6ed0fdaf 100644 --- a/core/java/test/net/i2p/crypto/SessionEncryptionTest.java +++ b/core/java/test/net/i2p/crypto/SessionEncryptionTest.java @@ -1,9 +1,9 @@ package net.i2p.crypto; /* * free (adj.): unencumbered; not under the control of others - * Written by jrandom in 2003 and released into the public domain - * with no warranty of any kind, either expressed or implied. - * It probably won't make your computer catch on fire, or eat + * Written by jrandom in 2003 and released into the public domain + * with no warranty of any kind, either expressed or implied. + * It probably won't make your computer catch on fire, or eat * your children, but it might. Use at your own risk. * */ @@ -16,6 +16,7 @@ import net.i2p.data.DataHelper; import net.i2p.util.Log; import net.i2p.util.Clock; +import net.i2p.I2PAppContext; import java.util.HashSet; import java.util.Set; @@ -24,383 +25,384 @@ import java.util.Set; * * session key management unit tests: * - * Run tagsIncluded useTag rekey + * Run tagsIncluded useTag rekey * // no sessions - * 1 no no no - * 2 no no no + * 1 no no no + * 2 no no no * // session - * 3 yes (2) no no - * 4 no yes no - * 5 yes (2) yes no - * 6 no yes no - * 7 no yes no + * 3 yes (2) no no + * 4 no yes no + * 5 yes (2) yes no + * 6 no yes no + * 7 no yes no * // rekeying - * 8 yes (2) no no - * 9 no yes no - * 10 yes (2) yes yes - * 11 no yes no - * 12 no yes no + * 8 yes (2) no no + * 9 no yes no + * 10 yes (2) yes yes + * 11 no yes no + * 12 no yes no * // long session - * 13-1000 20 tags every 10 messages, rekey every 50 + * 13-1000 20 tags every 10 messages, rekey every 50 */ public class SessionEncryptionTest { private final static Log _log = new Log(SessionEncryptionTest.class); + private static I2PAppContext _context = new I2PAppContext(); public static void main(String args[]) { - SessionEncryptionTest test = new SessionEncryptionTest(); - try { - //test.testNoSessions(); - //test.testSessions(); - //test.testRekeying(); - test.testLongSession(); - } catch (Throwable t) { - _log.error("Error running tests", t); - } - try { Thread.sleep(60*1000); } catch (InterruptedException ie) {} + SessionEncryptionTest test = new SessionEncryptionTest(); + try { + //test.testNoSessions(); + //test.testSessions(); + //test.testRekeying(); + test.testLongSession(); + } catch (Throwable t) { + _log.error("Error running tests", t); + } + try { Thread.sleep(60*1000); } catch (InterruptedException ie) {} } - + /** - * Run tagsIncluded useTag rekey - * 1 no no no - * 2 no no no + * Run tagsIncluded useTag rekey + * 1 no no no + * 2 no no no */ public void testNoSessions() throws Exception { - Object keys[] = KeyGenerator.getInstance().generatePKIKeypair(); - PublicKey pubKey = (PublicKey)keys[0]; - PrivateKey privKey = (PrivateKey)keys[1]; - SessionKey curKey = SessionKeyManager.getInstance().createSession(pubKey); - - byte[] msg1 = "msg 1".getBytes(); - byte[] msg2 = "msg 2".getBytes(); - - byte emsg1[] = ElGamalAESEngine.encrypt(msg1, pubKey, curKey, 64); - byte dmsg1[] = ElGamalAESEngine.decrypt(emsg1, privKey); - if (DataHelper.eq(dmsg1, msg1)) - _log.info("PASSED: No sessions msg 1"); - else - _log.error("FAILED: No sessions msg 1"); - - byte emsg2[] = ElGamalAESEngine.encrypt(msg2, pubKey, curKey, 64); - byte dmsg2[] = ElGamalAESEngine.decrypt(emsg2, privKey); - if (DataHelper.eq(dmsg2, msg2)) - _log.info("PASSED: No sessions msg 2"); - else - _log.error("FAILED: No sessions msg 2"); - } - + Object keys[] = KeyGenerator.getInstance().generatePKIKeypair(); + PublicKey pubKey = (PublicKey)keys[0]; + PrivateKey privKey = (PrivateKey)keys[1]; + SessionKey curKey = _context.sessionKeyManager().createSession(pubKey); + + byte[] msg1 = "msg 1".getBytes(); + byte[] msg2 = "msg 2".getBytes(); + + byte emsg1[] = _context.elGamalAESEngine().encrypt(msg1, pubKey, curKey, 64); + byte dmsg1[] = _context.elGamalAESEngine().decrypt(emsg1, privKey); + if (DataHelper.eq(dmsg1, msg1)) + _log.info("PASSED: No sessions msg 1"); + else + _log.error("FAILED: No sessions msg 1"); + + byte emsg2[] = _context.elGamalAESEngine().encrypt(msg2, pubKey, curKey, 64); + byte dmsg2[] = _context.elGamalAESEngine().decrypt(emsg2, privKey); + if (DataHelper.eq(dmsg2, msg2)) + _log.info("PASSED: No sessions msg 2"); + else + _log.error("FAILED: No sessions msg 2"); + } + /** - * Run tagsIncluded useTag rekey - * 1 yes (2) no no - * 2 no yes no - * 3 yes (2) yes no - * 4 no yes no - * 5 no yes no + * Run tagsIncluded useTag rekey + * 1 yes (2) no no + * 2 no yes no + * 3 yes (2) yes no + * 4 no yes no + * 5 no yes no */ public void testSessions() throws Exception { - Object keys[] = KeyGenerator.getInstance().generatePKIKeypair(); - PublicKey pubKey = (PublicKey)keys[0]; - PrivateKey privKey = (PrivateKey)keys[1]; - SessionKey curKey = SessionKeyManager.getInstance().createSession(pubKey); - - SessionTag tag1 = new SessionTag(true); - SessionTag tag2 = new SessionTag(true); - SessionTag tag3 = new SessionTag(true); - SessionTag tag4 = new SessionTag(true); - - HashSet firstTags = new HashSet(); - firstTags.add(tag1); - firstTags.add(tag2); - - HashSet secondTags = new HashSet(); - secondTags.add(tag3); - secondTags.add(tag4); - - byte[] msg1 = "msg 1".getBytes(); - byte[] msg2 = "msg 2".getBytes(); - byte[] msg3 = "msg 3".getBytes(); - byte[] msg4 = "msg 4".getBytes(); - byte[] msg5 = "msg 5".getBytes(); - - byte emsg1[] = ElGamalAESEngine.encrypt(msg1, pubKey, curKey, firstTags, 64); - byte dmsg1[] = ElGamalAESEngine.decrypt(emsg1, privKey); - if (DataHelper.eq(dmsg1, msg1)) - _log.info("PASSED: Sessions msg 1"); - else { - _log.error("FAILED: Sessions msg 1"); - return; - } - - SessionKeyManager.getInstance().tagsDelivered(pubKey, curKey, firstTags); - - curKey = SessionKeyManager.getInstance().getCurrentKey(pubKey); - SessionTag curTag = SessionKeyManager.getInstance().consumeNextAvailableTag(pubKey, curKey); - if (curTag == null) { - _log.error("Not able to consume next tag for message 2"); - return; - } - - byte emsg2[] = ElGamalAESEngine.encrypt(msg2, pubKey, curKey, null, curTag, 64); - byte dmsg2[] = ElGamalAESEngine.decrypt(emsg2, privKey); - if (DataHelper.eq(dmsg2, msg2)) - _log.info("PASSED: Sessions msg 2"); - else { - _log.error("FAILED: Sessions msg 2"); - return; - } - - curKey = SessionKeyManager.getInstance().getCurrentKey(pubKey); - curTag = SessionKeyManager.getInstance().consumeNextAvailableTag(pubKey, curKey); - - if (curTag == null) { - _log.error("Not able to consume next tag for message 3"); - return; - } - if (curKey == null) { - _log.error("Not able to consume next KEY for message 3"); - return; - } - - byte emsg3[] = ElGamalAESEngine.encrypt(msg3, pubKey, curKey, secondTags, curTag, 64); - byte dmsg3[] = ElGamalAESEngine.decrypt(emsg3, privKey); - if (DataHelper.eq(dmsg3, msg3)) - _log.info("PASSED: Sessions msg 3"); - else { - _log.error("FAILED: Sessions msg 3"); - return; - } - - SessionKeyManager.getInstance().tagsDelivered(pubKey, curKey, secondTags); - - curKey = SessionKeyManager.getInstance().getCurrentKey(pubKey); - curTag = SessionKeyManager.getInstance().consumeNextAvailableTag(pubKey, curKey); - - if (curTag == null) { - _log.error("Not able to consume next tag for message 4"); - return; - } - if (curKey == null) { - _log.error("Not able to consume next KEY for message 4"); - return; - } - - byte emsg4[] = ElGamalAESEngine.encrypt(msg4, pubKey, curKey, null, curTag, 64); - byte dmsg4[] = ElGamalAESEngine.decrypt(emsg4, privKey); - if (DataHelper.eq(dmsg4, msg4)) - _log.info("PASSED: Sessions msg 4"); - else { - _log.error("FAILED: Sessions msg 4"); - return; - } - - curKey = SessionKeyManager.getInstance().getCurrentKey(pubKey); - curTag = SessionKeyManager.getInstance().consumeNextAvailableTag(pubKey, curKey); - - if (curTag == null) { - _log.error("Not able to consume next tag for message 5"); - return; - } - if (curKey == null) { - _log.error("Not able to consume next KEY for message 5"); - return; - } - - byte emsg5[] = ElGamalAESEngine.encrypt(msg5, pubKey, curKey, null, curTag, 64); - byte dmsg5[] = ElGamalAESEngine.decrypt(emsg5, privKey); - if (DataHelper.eq(dmsg5, msg5)) - _log.info("PASSED: Sessions msg 5"); - else { - _log.error("FAILED: Sessions msg 5"); - return; - } - } + Object keys[] = KeyGenerator.getInstance().generatePKIKeypair(); + PublicKey pubKey = (PublicKey)keys[0]; + PrivateKey privKey = (PrivateKey)keys[1]; + SessionKey curKey = _context.sessionKeyManager().createSession(pubKey); + + SessionTag tag1 = new SessionTag(true); + SessionTag tag2 = new SessionTag(true); + SessionTag tag3 = new SessionTag(true); + SessionTag tag4 = new SessionTag(true); + + HashSet firstTags = new HashSet(); + firstTags.add(tag1); + firstTags.add(tag2); + + HashSet secondTags = new HashSet(); + secondTags.add(tag3); + secondTags.add(tag4); + + byte[] msg1 = "msg 1".getBytes(); + byte[] msg2 = "msg 2".getBytes(); + byte[] msg3 = "msg 3".getBytes(); + byte[] msg4 = "msg 4".getBytes(); + byte[] msg5 = "msg 5".getBytes(); + + byte emsg1[] = _context.elGamalAESEngine().encrypt(msg1, pubKey, curKey, firstTags, 64); + byte dmsg1[] = _context.elGamalAESEngine().decrypt(emsg1, privKey); + if (DataHelper.eq(dmsg1, msg1)) + _log.info("PASSED: Sessions msg 1"); + else { + _log.error("FAILED: Sessions msg 1"); + return; + } + + _context.sessionKeyManager().tagsDelivered(pubKey, curKey, firstTags); + + curKey = _context.sessionKeyManager().getCurrentKey(pubKey); + SessionTag curTag = _context.sessionKeyManager().consumeNextAvailableTag(pubKey, curKey); + if (curTag == null) { + _log.error("Not able to consume next tag for message 2"); + return; + } + + byte emsg2[] = _context.elGamalAESEngine().encrypt(msg2, pubKey, curKey, null, curTag, 64); + byte dmsg2[] = _context.elGamalAESEngine().decrypt(emsg2, privKey); + if (DataHelper.eq(dmsg2, msg2)) + _log.info("PASSED: Sessions msg 2"); + else { + _log.error("FAILED: Sessions msg 2"); + return; + } + + curKey = _context.sessionKeyManager().getCurrentKey(pubKey); + curTag = _context.sessionKeyManager().consumeNextAvailableTag(pubKey, curKey); + + if (curTag == null) { + _log.error("Not able to consume next tag for message 3"); + return; + } + if (curKey == null) { + _log.error("Not able to consume next KEY for message 3"); + return; + } + + byte emsg3[] = _context.elGamalAESEngine().encrypt(msg3, pubKey, curKey, secondTags, curTag, 64); + byte dmsg3[] = _context.elGamalAESEngine().decrypt(emsg3, privKey); + if (DataHelper.eq(dmsg3, msg3)) + _log.info("PASSED: Sessions msg 3"); + else { + _log.error("FAILED: Sessions msg 3"); + return; + } + + _context.sessionKeyManager().tagsDelivered(pubKey, curKey, secondTags); + + curKey = _context.sessionKeyManager().getCurrentKey(pubKey); + curTag = _context.sessionKeyManager().consumeNextAvailableTag(pubKey, curKey); + + if (curTag == null) { + _log.error("Not able to consume next tag for message 4"); + return; + } + if (curKey == null) { + _log.error("Not able to consume next KEY for message 4"); + return; + } + + byte emsg4[] = _context.elGamalAESEngine().encrypt(msg4, pubKey, curKey, null, curTag, 64); + byte dmsg4[] = _context.elGamalAESEngine().decrypt(emsg4, privKey); + if (DataHelper.eq(dmsg4, msg4)) + _log.info("PASSED: Sessions msg 4"); + else { + _log.error("FAILED: Sessions msg 4"); + return; + } + + curKey = _context.sessionKeyManager().getCurrentKey(pubKey); + curTag = _context.sessionKeyManager().consumeNextAvailableTag(pubKey, curKey); + + if (curTag == null) { + _log.error("Not able to consume next tag for message 5"); + return; + } + if (curKey == null) { + _log.error("Not able to consume next KEY for message 5"); + return; + } + + byte emsg5[] = _context.elGamalAESEngine().encrypt(msg5, pubKey, curKey, null, curTag, 64); + byte dmsg5[] = _context.elGamalAESEngine().decrypt(emsg5, privKey); + if (DataHelper.eq(dmsg5, msg5)) + _log.info("PASSED: Sessions msg 5"); + else { + _log.error("FAILED: Sessions msg 5"); + return; + } + } /** - * Run tagsIncluded useTag rekey - * 1 yes (2) no no - * 2 no yes no - * 3 yes (2) yes yes - * 4 no yes no - * 5 no yes no + * Run tagsIncluded useTag rekey + * 1 yes (2) no no + * 2 no yes no + * 3 yes (2) yes yes + * 4 no yes no + * 5 no yes no */ public void testRekeying() throws Exception { - Object keys[] = KeyGenerator.getInstance().generatePKIKeypair(); - PublicKey pubKey = (PublicKey)keys[0]; - PrivateKey privKey = (PrivateKey)keys[1]; - SessionKey curKey = SessionKeyManager.getInstance().createSession(pubKey); - SessionKey nextKey = KeyGenerator.getInstance().generateSessionKey(); - - SessionTag tag1 = new SessionTag(true); - SessionTag tag2 = new SessionTag(true); - SessionTag tag3 = new SessionTag(true); - SessionTag tag4 = new SessionTag(true); - - HashSet firstTags = new HashSet(); - firstTags.add(tag1); - firstTags.add(tag2); - - HashSet secondTags = new HashSet(); - secondTags.add(tag3); - secondTags.add(tag4); - - byte[] msg1 = "msg 1".getBytes(); - byte[] msg2 = "msg 2".getBytes(); - byte[] msg3 = "msg 3".getBytes(); - byte[] msg4 = "msg 4".getBytes(); - byte[] msg5 = "msg 5".getBytes(); - - byte emsg1[] = ElGamalAESEngine.encrypt(msg1, pubKey, curKey, firstTags, 64); - byte dmsg1[] = ElGamalAESEngine.decrypt(emsg1, privKey); - if (DataHelper.eq(dmsg1, msg1)) - _log.info("PASSED: Sessions msg 1"); - else { - _log.error("FAILED: Sessions msg 1"); - return; - } - - SessionKeyManager.getInstance().tagsDelivered(pubKey, curKey, firstTags); - - curKey = SessionKeyManager.getInstance().getCurrentKey(pubKey); - SessionTag curTag = SessionKeyManager.getInstance().consumeNextAvailableTag(pubKey, curKey); - if (curTag == null) { - _log.error("Not able to consume next tag for message 2"); - return; - } - - byte emsg2[] = ElGamalAESEngine.encrypt(msg2, pubKey, curKey, null, curTag, 64); - byte dmsg2[] = ElGamalAESEngine.decrypt(emsg2, privKey); - if (DataHelper.eq(dmsg2, msg2)) - _log.info("PASSED: Sessions msg 2"); - else { - _log.error("FAILED: Sessions msg 2"); - return; - } - - curKey = SessionKeyManager.getInstance().getCurrentKey(pubKey); - curTag = SessionKeyManager.getInstance().consumeNextAvailableTag(pubKey, curKey); - - if (curTag == null) { - _log.error("Not able to consume next tag for message 3"); - return; - } - if (curKey == null) { - _log.error("Not able to consume next KEY for message 3"); - return; - } - - byte emsg3[] = ElGamalAESEngine.encrypt(msg3, pubKey, curKey, secondTags, curTag, nextKey, 64); - byte dmsg3[] = ElGamalAESEngine.decrypt(emsg3, privKey); - if (DataHelper.eq(dmsg3, msg3)) - _log.info("PASSED: Sessions msg 3"); - else { - _log.error("FAILED: Sessions msg 3"); - return; - } - - SessionKeyManager.getInstance().tagsDelivered(pubKey, nextKey, secondTags); // note nextKey not curKey - - curKey = SessionKeyManager.getInstance().getCurrentKey(pubKey); - curTag = SessionKeyManager.getInstance().consumeNextAvailableTag(pubKey, curKey); - - if (curTag == null) { - _log.error("Not able to consume next tag for message 4"); - return; - } - if (curKey == null) { - _log.error("Not able to consume next KEY for message 4"); - return; - } - - byte emsg4[] = ElGamalAESEngine.encrypt(msg4, pubKey, curKey, null, curTag, 64); - byte dmsg4[] = ElGamalAESEngine.decrypt(emsg4, privKey); - if (DataHelper.eq(dmsg4, msg4)) - _log.info("PASSED: Sessions msg 4"); - else { - _log.error("FAILED: Sessions msg 4"); - return; - } - - curKey = SessionKeyManager.getInstance().getCurrentKey(pubKey); - curTag = SessionKeyManager.getInstance().consumeNextAvailableTag(pubKey, curKey); - - if (curTag == null) { - _log.error("Not able to consume next tag for message 5"); - return; - } - if (curKey == null) { - _log.error("Not able to consume next KEY for message 5"); - return; - } - - byte emsg5[] = ElGamalAESEngine.encrypt(msg5, pubKey, curKey, null, curTag, 64); - byte dmsg5[] = ElGamalAESEngine.decrypt(emsg5, privKey); - if (DataHelper.eq(dmsg5, msg5)) - _log.info("PASSED: Sessions msg 5"); - else { - _log.error("FAILED: Sessions msg 5"); - return; - } - } - + Object keys[] = KeyGenerator.getInstance().generatePKIKeypair(); + PublicKey pubKey = (PublicKey)keys[0]; + PrivateKey privKey = (PrivateKey)keys[1]; + SessionKey curKey = _context.sessionKeyManager().createSession(pubKey); + SessionKey nextKey = KeyGenerator.getInstance().generateSessionKey(); + + SessionTag tag1 = new SessionTag(true); + SessionTag tag2 = new SessionTag(true); + SessionTag tag3 = new SessionTag(true); + SessionTag tag4 = new SessionTag(true); + + HashSet firstTags = new HashSet(); + firstTags.add(tag1); + firstTags.add(tag2); + + HashSet secondTags = new HashSet(); + secondTags.add(tag3); + secondTags.add(tag4); + + byte[] msg1 = "msg 1".getBytes(); + byte[] msg2 = "msg 2".getBytes(); + byte[] msg3 = "msg 3".getBytes(); + byte[] msg4 = "msg 4".getBytes(); + byte[] msg5 = "msg 5".getBytes(); + + byte emsg1[] = _context.elGamalAESEngine().encrypt(msg1, pubKey, curKey, firstTags, 64); + byte dmsg1[] = _context.elGamalAESEngine().decrypt(emsg1, privKey); + if (DataHelper.eq(dmsg1, msg1)) + _log.info("PASSED: Sessions msg 1"); + else { + _log.error("FAILED: Sessions msg 1"); + return; + } + + _context.sessionKeyManager().tagsDelivered(pubKey, curKey, firstTags); + + curKey = _context.sessionKeyManager().getCurrentKey(pubKey); + SessionTag curTag = _context.sessionKeyManager().consumeNextAvailableTag(pubKey, curKey); + if (curTag == null) { + _log.error("Not able to consume next tag for message 2"); + return; + } + + byte emsg2[] = _context.elGamalAESEngine().encrypt(msg2, pubKey, curKey, null, curTag, 64); + byte dmsg2[] = _context.elGamalAESEngine().decrypt(emsg2, privKey); + if (DataHelper.eq(dmsg2, msg2)) + _log.info("PASSED: Sessions msg 2"); + else { + _log.error("FAILED: Sessions msg 2"); + return; + } + + curKey = _context.sessionKeyManager().getCurrentKey(pubKey); + curTag = _context.sessionKeyManager().consumeNextAvailableTag(pubKey, curKey); + + if (curTag == null) { + _log.error("Not able to consume next tag for message 3"); + return; + } + if (curKey == null) { + _log.error("Not able to consume next KEY for message 3"); + return; + } + + byte emsg3[] = _context.elGamalAESEngine().encrypt(msg3, pubKey, curKey, secondTags, curTag, nextKey, 64); + byte dmsg3[] = _context.elGamalAESEngine().decrypt(emsg3, privKey); + if (DataHelper.eq(dmsg3, msg3)) + _log.info("PASSED: Sessions msg 3"); + else { + _log.error("FAILED: Sessions msg 3"); + return; + } + + _context.sessionKeyManager().tagsDelivered(pubKey, nextKey, secondTags); // note nextKey not curKey + + curKey = _context.sessionKeyManager().getCurrentKey(pubKey); + curTag = _context.sessionKeyManager().consumeNextAvailableTag(pubKey, curKey); + + if (curTag == null) { + _log.error("Not able to consume next tag for message 4"); + return; + } + if (curKey == null) { + _log.error("Not able to consume next KEY for message 4"); + return; + } + + byte emsg4[] = _context.elGamalAESEngine().encrypt(msg4, pubKey, curKey, null, curTag, 64); + byte dmsg4[] = _context.elGamalAESEngine().decrypt(emsg4, privKey); + if (DataHelper.eq(dmsg4, msg4)) + _log.info("PASSED: Sessions msg 4"); + else { + _log.error("FAILED: Sessions msg 4"); + return; + } + + curKey = _context.sessionKeyManager().getCurrentKey(pubKey); + curTag = _context.sessionKeyManager().consumeNextAvailableTag(pubKey, curKey); + + if (curTag == null) { + _log.error("Not able to consume next tag for message 5"); + return; + } + if (curKey == null) { + _log.error("Not able to consume next KEY for message 5"); + return; + } + + byte emsg5[] = _context.elGamalAESEngine().encrypt(msg5, pubKey, curKey, null, curTag, 64); + byte dmsg5[] = _context.elGamalAESEngine().decrypt(emsg5, privKey); + if (DataHelper.eq(dmsg5, msg5)) + _log.info("PASSED: Sessions msg 5"); + else { + _log.error("FAILED: Sessions msg 5"); + return; + } + } + /** - * 20 tags every 10 messages, rekey every 50 + * 20 tags every 10 messages, rekey every 50 */ public void testLongSession() throws Exception { - int num = 1000; - long start = Clock.getInstance().now(); - testLongSession(num); - long end = Clock.getInstance().now(); - long time = end - start; - float msEach = (float)num / time; - _log.error("Test long session duration: " + num + " messages in " + time + "ms (or " + msEach + "ms each)"); + int num = 1000; + long start = Clock.getInstance().now(); + testLongSession(num); + long end = Clock.getInstance().now(); + long time = end - start; + float msEach = (float)num / time; + _log.error("Test long session duration: " + num + " messages in " + time + "ms (or " + msEach + "ms each)"); } public void testLongSession(int numMsgs) throws Exception { - Object keys[] = KeyGenerator.getInstance().generatePKIKeypair(); - PublicKey pubKey = (PublicKey)keys[0]; - PrivateKey privKey = (PrivateKey)keys[1]; - SessionKey curKey = SessionKeyManager.getInstance().createSession(pubKey); - - for (int i = 0; i < numMsgs; i++) { - Set tags = null; - SessionKey nextKey = null; - curKey = SessionKeyManager.getInstance().getCurrentKey(pubKey); - SessionTag curTag = SessionKeyManager.getInstance().consumeNextAvailableTag(pubKey, curKey); - - int availTags = SessionKeyManager.getInstance().getAvailableTags(pubKey, curKey); - if ((availTags < 1)) { - tags = generateNewTags(50); - _log.info("Generating new tags"); - } else { - _log.info("Tags already available: " + availTags + " curTag: " + curTag); - } - if (i % 50 == 0) - nextKey = KeyGenerator.getInstance().generateSessionKey(); - - byte[] msg = ("msg " + i).getBytes(); - - byte emsg[] = ElGamalAESEngine.encrypt(msg, pubKey, curKey, tags, curTag, nextKey, 64); - byte dmsg[] = ElGamalAESEngine.decrypt(emsg, privKey); - if (DataHelper.eq(dmsg, msg)) - _log.info("PASSED: Long session msg " + i); - else { - _log.error("FAILED: Long session msg " + i); - return; - } - - if ( (tags != null) && (tags.size() > 0) ) { - if (nextKey == null) { - SessionKeyManager.getInstance().tagsDelivered(pubKey, curKey, tags); - } else { - SessionKeyManager.getInstance().tagsDelivered(pubKey, nextKey, tags); - } - } - } + Object keys[] = KeyGenerator.getInstance().generatePKIKeypair(); + PublicKey pubKey = (PublicKey)keys[0]; + PrivateKey privKey = (PrivateKey)keys[1]; + SessionKey curKey = _context.sessionKeyManager().createSession(pubKey); + + for (int i = 0; i < numMsgs; i++) { + Set tags = null; + SessionKey nextKey = null; + curKey = _context.sessionKeyManager().getCurrentKey(pubKey); + SessionTag curTag = _context.sessionKeyManager().consumeNextAvailableTag(pubKey, curKey); + + int availTags = _context.sessionKeyManager().getAvailableTags(pubKey, curKey); + if ((availTags < 1)) { + tags = generateNewTags(50); + _log.info("Generating new tags"); + } else { + _log.info("Tags already available: " + availTags + " curTag: " + curTag); + } + if (i % 50 == 0) + nextKey = KeyGenerator.getInstance().generateSessionKey(); + + byte[] msg = ("msg " + i).getBytes(); + + byte emsg[] = _context.elGamalAESEngine().encrypt(msg, pubKey, curKey, tags, curTag, nextKey, 64); + byte dmsg[] = _context.elGamalAESEngine().decrypt(emsg, privKey); + if (DataHelper.eq(dmsg, msg)) + _log.info("PASSED: Long session msg " + i); + else { + _log.error("FAILED: Long session msg " + i); + return; + } + + if ( (tags != null) && (tags.size() > 0) ) { + if (nextKey == null) { + _context.sessionKeyManager().tagsDelivered(pubKey, curKey, tags); + } else { + _context.sessionKeyManager().tagsDelivered(pubKey, nextKey, tags); + } + } + } } - + private Set generateNewTags(int numTags) { - Set tags = new HashSet(numTags); - for (int i = 0; i < numTags; i++) - tags.add(new SessionTag(true)); - return tags; + Set tags = new HashSet(numTags); + for (int i = 0; i < numTags; i++) + tags.add(new SessionTag(true)); + return tags; } } diff --git a/core/java/test/net/i2p/data/StructureTest.java b/core/java/test/net/i2p/data/StructureTest.java index 35b9c737e..2a511d5ae 100644 --- a/core/java/test/net/i2p/data/StructureTest.java +++ b/core/java/test/net/i2p/data/StructureTest.java @@ -15,6 +15,7 @@ import java.io.InputStream; import net.i2p.data.DataFormatException; import net.i2p.data.DataStructure; import net.i2p.util.Log; +import net.i2p.I2PAppContext; /** * Utility class for wrapping data structure tests @@ -23,6 +24,7 @@ import net.i2p.util.Log; */ public abstract class StructureTest implements TestDataGenerator, TestDataPrinter { private static final Log _log = new Log(StructureTest.class); + protected static I2PAppContext _context = I2PAppContext.getGlobalContext(); public abstract DataStructure createDataStructure() throws DataFormatException; public abstract DataStructure createStructureToRead(); diff --git a/router/java/src/net/i2p/data/i2np/DataMessage.java b/router/java/src/net/i2p/data/i2np/DataMessage.java index c7d19c869..be18d3c7a 100644 --- a/router/java/src/net/i2p/data/i2np/DataMessage.java +++ b/router/java/src/net/i2p/data/i2np/DataMessage.java @@ -1,9 +1,9 @@ package net.i2p.data.i2np; /* * free (adj.): unencumbered; not under the control of others - * Written by jrandom in 2003 and released into the public domain - * with no warranty of any kind, either expressed or implied. - * It probably won't make your computer catch on fire, or eat + * Written by jrandom in 2003 and released into the public domain + * with no warranty of any kind, either expressed or implied. + * It probably won't make your computer catch on fire, or eat * your children, but it might. Use at your own risk. * */ @@ -15,6 +15,7 @@ import java.io.InputStream; import net.i2p.data.DataFormatException; import net.i2p.data.DataHelper; import net.i2p.util.Log; +import net.i2p.I2PAppContext; /** * Defines a message containing arbitrary bytes of data @@ -26,8 +27,9 @@ public class DataMessage extends I2NPMessageImpl { public final static int MESSAGE_TYPE = 20; private byte _data[]; - public DataMessage() { - _data = null; + public DataMessage(I2PAppContext context) { + super(context); + _data = null; } public byte[] getData() { return _data; } @@ -36,23 +38,23 @@ public class DataMessage extends I2NPMessageImpl { public int getSize() { return _data.length; } public void readMessage(InputStream in, int type) throws I2NPMessageException, IOException { - if (type != MESSAGE_TYPE) throw new I2NPMessageException("Message type is incorrect for this message"); + if (type != MESSAGE_TYPE) throw new I2NPMessageException("Message type is incorrect for this message"); try { - int size = (int)DataHelper.readLong(in, 4); - _data = new byte[size]; - int read = read(in, _data); - if (read != size) - throw new DataFormatException("Not enough bytes to read (read = " + read + ", expected = " + size + ")"); + int size = (int)DataHelper.readLong(in, 4); + _data = new byte[size]; + int read = read(in, _data); + if (read != size) + throw new DataFormatException("Not enough bytes to read (read = " + read + ", expected = " + size + ")"); } catch (DataFormatException dfe) { throw new I2NPMessageException("Unable to load the message data", dfe); } } protected byte[] writeMessage() throws I2NPMessageException, IOException { - ByteArrayOutputStream os = new ByteArrayOutputStream((_data != null ? _data.length + 4 : 4)); + ByteArrayOutputStream os = new ByteArrayOutputStream((_data != null ? _data.length + 4 : 4)); try { - DataHelper.writeLong(os, 4, (_data != null ? _data.length : 0)); - os.write(_data); + DataHelper.writeLong(os, 4, (_data != null ? _data.length : 0)); + os.write(_data); } catch (DataFormatException dfe) { throw new I2NPMessageException("Error writing out the message data", dfe); } @@ -62,7 +64,7 @@ public class DataMessage extends I2NPMessageImpl { public int getType() { return MESSAGE_TYPE; } public int hashCode() { - return DataHelper.hashCode(getData()); + return DataHelper.hashCode(getData()); } public boolean equals(Object object) { @@ -74,7 +76,7 @@ public class DataMessage extends I2NPMessageImpl { } } - public String toString() { + public String toString() { StringBuffer buf = new StringBuffer(); buf.append("[DataMessage: "); buf.append("\n\tData: ").append(DataHelper.toString(getData(), 64)); diff --git a/router/java/src/net/i2p/data/i2np/DatabaseFindNearestMessage.java b/router/java/src/net/i2p/data/i2np/DatabaseFindNearestMessage.java deleted file mode 100644 index f3078734b..000000000 --- a/router/java/src/net/i2p/data/i2np/DatabaseFindNearestMessage.java +++ /dev/null @@ -1,99 +0,0 @@ -package net.i2p.data.i2np; -/* - * free (adj.): unencumbered; not under the control of others - * Written by jrandom in 2003 and released into the public domain - * with no warranty of any kind, either expressed or implied. - * It probably won't make your computer catch on fire, or eat - * your children, but it might. Use at your own risk. - * - */ - -import java.io.ByteArrayOutputStream; -import java.io.IOException; -import java.io.InputStream; - -import net.i2p.data.DataFormatException; -import net.i2p.data.DataHelper; -import net.i2p.data.Hash; -import net.i2p.util.Log; - -/** - * Defines the message a router sends to another router to help integrate into - * the network by searching for routers in a particular keyspace. - * - * @author jrandom - */ -public class DatabaseFindNearestMessage extends I2NPMessageImpl { - private final static Log _log = new Log(DatabaseFindNearestMessage.class); - public final static int MESSAGE_TYPE = 4; - private Hash _key; - private Hash _from; - - public DatabaseFindNearestMessage() { - setSearchKey(null); - setFromHash(null); - } - - /** - * Defines the key being searched for - */ - public Hash getSearchKey() { return _key; } - public void setSearchKey(Hash key) { _key = key; } - - /** - * Contains the SHA256 Hash of the RouterIdentity sending the message - */ - public Hash getFromHash() { return _from; } - public void setFromHash(Hash from) { _from = from; } - - public void readMessage(InputStream in, int type) throws I2NPMessageException, IOException { - if (type != MESSAGE_TYPE) throw new I2NPMessageException("Message type is incorrect for this message"); - try { - _key = new Hash(); - _key.readBytes(in); - _from = new Hash(); - _from.readBytes(in); - } catch (DataFormatException dfe) { - throw new I2NPMessageException("Unable to load the message data", dfe); - } - } - - protected byte[] writeMessage() throws I2NPMessageException, IOException { - if ( (_key == null) || (_from == null) ) throw new I2NPMessageException("Not enough data to write out"); - - ByteArrayOutputStream os = new ByteArrayOutputStream(32); - try { - _key.writeBytes(os); - _from.writeBytes(os); - } catch (DataFormatException dfe) { - throw new I2NPMessageException("Error writing out the message data", dfe); - } - return os.toByteArray(); - } - - public int getType() { return MESSAGE_TYPE; } - - public int hashCode() { - return DataHelper.hashCode(getSearchKey()) + - DataHelper.hashCode(getFromHash()); - } - - public boolean equals(Object object) { - if ( (object != null) && (object instanceof DatabaseFindNearestMessage) ) { - DatabaseFindNearestMessage msg = (DatabaseFindNearestMessage)object; - return DataHelper.eq(getSearchKey(),msg.getSearchKey()) && - DataHelper.eq(getFromHash(),msg.getFromHash()); - } else { - return false; - } - } - - public String toString() { - StringBuffer buf = new StringBuffer(); - buf.append("[DatabaseFindNearestMessage: "); - buf.append("\n\tSearch Key: ").append(getSearchKey()); - buf.append("\n\tFrom: ").append(getFromHash()); - buf.append("]"); - return buf.toString(); - } -} diff --git a/router/java/src/net/i2p/data/i2np/DatabaseLookupMessage.java b/router/java/src/net/i2p/data/i2np/DatabaseLookupMessage.java index 8ee73bbba..7d7533fc1 100644 --- a/router/java/src/net/i2p/data/i2np/DatabaseLookupMessage.java +++ b/router/java/src/net/i2p/data/i2np/DatabaseLookupMessage.java @@ -1,9 +1,9 @@ package net.i2p.data.i2np; /* * free (adj.): unencumbered; not under the control of others - * Written by jrandom in 2003 and released into the public domain - * with no warranty of any kind, either expressed or implied. - * It probably won't make your computer catch on fire, or eat + * Written by jrandom in 2003 and released into the public domain + * with no warranty of any kind, either expressed or implied. + * It probably won't make your computer catch on fire, or eat * your children, but it might. Use at your own risk. * */ @@ -21,6 +21,7 @@ import net.i2p.data.Hash; import net.i2p.data.RouterInfo; import net.i2p.data.TunnelId; import net.i2p.util.Log; +import net.i2p.I2PAppContext; /** * Defines the message a router sends to another router to search for a @@ -36,10 +37,11 @@ public class DatabaseLookupMessage extends I2NPMessageImpl { private TunnelId _replyTunnel; private Set _dontIncludePeers; - public DatabaseLookupMessage() { - setSearchKey(null); - setFrom(null); - setDontIncludePeers(null); + public DatabaseLookupMessage(I2PAppContext context) { + super(context); + setSearchKey(null); + setFrom(null); + setDontIncludePeers(null); } /** @@ -68,63 +70,63 @@ public class DatabaseLookupMessage extends I2NPMessageImpl { * @return Set of Hash objects, each of which is the H(routerIdentity) to skip */ public Set getDontIncludePeers() { return _dontIncludePeers; } - public void setDontIncludePeers(Set peers) { - if (peers != null) - _dontIncludePeers = new HashSet(peers); - else - _dontIncludePeers = null; + public void setDontIncludePeers(Set peers) { + if (peers != null) + _dontIncludePeers = new HashSet(peers); + else + _dontIncludePeers = null; } public void readMessage(InputStream in, int type) throws I2NPMessageException, IOException { - if (type != MESSAGE_TYPE) throw new I2NPMessageException("Message type is incorrect for this message"); + if (type != MESSAGE_TYPE) throw new I2NPMessageException("Message type is incorrect for this message"); try { - _key = new Hash(); - _key.readBytes(in); - _from = new RouterInfo(); - _from.readBytes(in); - boolean tunnelSpecified = DataHelper.readBoolean(in).booleanValue(); - if (tunnelSpecified) { - _replyTunnel = new TunnelId(); - _replyTunnel.readBytes(in); - } - int numPeers = (int)DataHelper.readLong(in, 2); - if ( (numPeers < 0) || (numPeers >= (1<<16) ) ) - throw new DataFormatException("Invalid number of peers - " + numPeers); - Set peers = new HashSet(numPeers); - for (int i = 0; i < numPeers; i++) { - Hash peer = new Hash(); - peer.readBytes(in); - peers.add(peer); - } - _dontIncludePeers = peers; + _key = new Hash(); + _key.readBytes(in); + _from = new RouterInfo(); + _from.readBytes(in); + boolean tunnelSpecified = DataHelper.readBoolean(in).booleanValue(); + if (tunnelSpecified) { + _replyTunnel = new TunnelId(); + _replyTunnel.readBytes(in); + } + int numPeers = (int)DataHelper.readLong(in, 2); + if ( (numPeers < 0) || (numPeers >= (1<<16) ) ) + throw new DataFormatException("Invalid number of peers - " + numPeers); + Set peers = new HashSet(numPeers); + for (int i = 0; i < numPeers; i++) { + Hash peer = new Hash(); + peer.readBytes(in); + peers.add(peer); + } + _dontIncludePeers = peers; } catch (DataFormatException dfe) { throw new I2NPMessageException("Unable to load the message data", dfe); } } protected byte[] writeMessage() throws I2NPMessageException, IOException { - if (_key == null) throw new I2NPMessageException("Key being searched for not specified"); - if (_from == null) throw new I2NPMessageException("From address not specified"); - + if (_key == null) throw new I2NPMessageException("Key being searched for not specified"); + if (_from == null) throw new I2NPMessageException("From address not specified"); + ByteArrayOutputStream os = new ByteArrayOutputStream(32); try { - _key.writeBytes(os); - _from.writeBytes(os); - if (_replyTunnel != null) { - DataHelper.writeBoolean(os, Boolean.TRUE); - _replyTunnel.writeBytes(os); - } else { - DataHelper.writeBoolean(os, Boolean.FALSE); - } - if ( (_dontIncludePeers == null) || (_dontIncludePeers.size() <= 0) ) { - DataHelper.writeLong(os, 2, 0); - } else { - DataHelper.writeLong(os, 2, _dontIncludePeers.size()); - for (Iterator iter = _dontIncludePeers.iterator(); iter.hasNext(); ) { - Hash peer = (Hash)iter.next(); - peer.writeBytes(os); - } - } + _key.writeBytes(os); + _from.writeBytes(os); + if (_replyTunnel != null) { + DataHelper.writeBoolean(os, Boolean.TRUE); + _replyTunnel.writeBytes(os); + } else { + DataHelper.writeBoolean(os, Boolean.FALSE); + } + if ( (_dontIncludePeers == null) || (_dontIncludePeers.size() <= 0) ) { + DataHelper.writeLong(os, 2, 0); + } else { + DataHelper.writeLong(os, 2, _dontIncludePeers.size()); + for (Iterator iter = _dontIncludePeers.iterator(); iter.hasNext(); ) { + Hash peer = (Hash)iter.next(); + peer.writeBytes(os); + } + } } catch (DataFormatException dfe) { throw new I2NPMessageException("Error writing out the message data", dfe); } @@ -134,25 +136,25 @@ public class DatabaseLookupMessage extends I2NPMessageImpl { public int getType() { return MESSAGE_TYPE; } public int hashCode() { - return DataHelper.hashCode(getSearchKey()) + - DataHelper.hashCode(getFrom()) + - DataHelper.hashCode(getReplyTunnel()) + - DataHelper.hashCode(_dontIncludePeers); + return DataHelper.hashCode(getSearchKey()) + + DataHelper.hashCode(getFrom()) + + DataHelper.hashCode(getReplyTunnel()) + + DataHelper.hashCode(_dontIncludePeers); } public boolean equals(Object object) { if ( (object != null) && (object instanceof DatabaseLookupMessage) ) { DatabaseLookupMessage msg = (DatabaseLookupMessage)object; return DataHelper.eq(getSearchKey(),msg.getSearchKey()) && - DataHelper.eq(getFrom(),msg.getFrom()) && - DataHelper.eq(getReplyTunnel(),msg.getReplyTunnel()) && - DataHelper.eq(_dontIncludePeers,msg.getDontIncludePeers()); + DataHelper.eq(getFrom(),msg.getFrom()) && + DataHelper.eq(getReplyTunnel(),msg.getReplyTunnel()) && + DataHelper.eq(_dontIncludePeers,msg.getDontIncludePeers()); } else { return false; } } - public String toString() { + public String toString() { StringBuffer buf = new StringBuffer(); buf.append("[DatabaseLookupMessage: "); buf.append("\n\tSearch Key: ").append(getSearchKey()); diff --git a/router/java/src/net/i2p/data/i2np/DatabaseSearchReplyMessage.java b/router/java/src/net/i2p/data/i2np/DatabaseSearchReplyMessage.java index 0bd5abbd5..ea996c7a8 100644 --- a/router/java/src/net/i2p/data/i2np/DatabaseSearchReplyMessage.java +++ b/router/java/src/net/i2p/data/i2np/DatabaseSearchReplyMessage.java @@ -1,9 +1,9 @@ package net.i2p.data.i2np; /* * free (adj.): unencumbered; not under the control of others - * Written by jrandom in 2003 and released into the public domain - * with no warranty of any kind, either expressed or implied. - * It probably won't make your computer catch on fire, or eat + * Written by jrandom in 2003 and released into the public domain + * with no warranty of any kind, either expressed or implied. + * It probably won't make your computer catch on fire, or eat * your children, but it might. Use at your own risk. * */ @@ -21,10 +21,11 @@ import net.i2p.data.DataHelper; import net.i2p.data.Hash; import net.i2p.data.RouterInfo; import net.i2p.util.Log; +import net.i2p.I2PAppContext; /** - * Defines the message a router sends to another router in response to a - * search (DatabaseFindNearest or DatabaseLookup) when it doesn't have the value, + * Defines the message a router sends to another router in response to a + * search (DatabaseFindNearest or DatabaseLookup) when it doesn't have the value, * specifying what routers it would search. * * @author jrandom @@ -36,10 +37,11 @@ public class DatabaseSearchReplyMessage extends I2NPMessageImpl { private List _routerInfoStructures; private Hash _from; - public DatabaseSearchReplyMessage() { - setSearchKey(null); - _routerInfoStructures = new ArrayList(); - setFromHash(null); + public DatabaseSearchReplyMessage(I2PAppContext context) { + super(context); + setSearchKey(null); + _routerInfoStructures = new ArrayList(); + setFromHash(null); } /** @@ -57,58 +59,58 @@ public class DatabaseSearchReplyMessage extends I2NPMessageImpl { public void setFromHash(Hash from) { _from = from; } public void readMessage(InputStream in, int type) throws I2NPMessageException, IOException { - if (type != MESSAGE_TYPE) throw new I2NPMessageException("Message type is incorrect for this message"); + if (type != MESSAGE_TYPE) throw new I2NPMessageException("Message type is incorrect for this message"); try { - _key = new Hash(); - _key.readBytes(in); - - int compressedLength = (int)DataHelper.readLong(in, 2); - byte compressedData[] = new byte[compressedLength]; - int read = DataHelper.read(in, compressedData); - if (read != compressedLength) - throw new IOException("Not enough data to decompress"); - byte decompressedData[] = DataHelper.decompress(compressedData); - ByteArrayInputStream bais = new ByteArrayInputStream(decompressedData); - int num = (int)DataHelper.readLong(bais, 1); - _routerInfoStructures.clear(); - for (int i = 0; i < num; i++) { - RouterInfo info = new RouterInfo(); - info.readBytes(bais); - addReply(info); - } - - _from = new Hash(); - _from.readBytes(in); + _key = new Hash(); + _key.readBytes(in); + + int compressedLength = (int)DataHelper.readLong(in, 2); + byte compressedData[] = new byte[compressedLength]; + int read = DataHelper.read(in, compressedData); + if (read != compressedLength) + throw new IOException("Not enough data to decompress"); + byte decompressedData[] = DataHelper.decompress(compressedData); + ByteArrayInputStream bais = new ByteArrayInputStream(decompressedData); + int num = (int)DataHelper.readLong(bais, 1); + _routerInfoStructures.clear(); + for (int i = 0; i < num; i++) { + RouterInfo info = new RouterInfo(); + info.readBytes(bais); + addReply(info); + } + + _from = new Hash(); + _from.readBytes(in); } catch (DataFormatException dfe) { throw new I2NPMessageException("Unable to load the message data", dfe); } } protected byte[] writeMessage() throws I2NPMessageException, IOException { - if (_key == null) - throw new I2NPMessageException("Key in reply to not specified"); - if (_routerInfoStructures == null) - throw new I2NPMessageException("RouterInfo replies are null"); - if (_routerInfoStructures.size() <= 0) - throw new I2NPMessageException("No replies specified in SearchReply! Always include oneself!"); - if (_from == null) - throw new I2NPMessageException("No 'from' address specified!"); - + if (_key == null) + throw new I2NPMessageException("Key in reply to not specified"); + if (_routerInfoStructures == null) + throw new I2NPMessageException("RouterInfo replies are null"); + if (_routerInfoStructures.size() <= 0) + throw new I2NPMessageException("No replies specified in SearchReply! Always include oneself!"); + if (_from == null) + throw new I2NPMessageException("No 'from' address specified!"); + ByteArrayOutputStream os = new ByteArrayOutputStream(32); try { - _key.writeBytes(os); - - ByteArrayOutputStream baos = new ByteArrayOutputStream(512); - DataHelper.writeLong(baos, 1, _routerInfoStructures.size()); - for (int i = 0; i < getNumReplies(); i++) { - RouterInfo info = getReply(i); - info.writeBytes(baos); - } - - byte compressed[] = DataHelper.compress(baos.toByteArray()); - DataHelper.writeLong(os, 2, compressed.length); - os.write(compressed); - _from.writeBytes(os); + _key.writeBytes(os); + + ByteArrayOutputStream baos = new ByteArrayOutputStream(512); + DataHelper.writeLong(baos, 1, _routerInfoStructures.size()); + for (int i = 0; i < getNumReplies(); i++) { + RouterInfo info = getReply(i); + info.writeBytes(baos); + } + + byte compressed[] = DataHelper.compress(baos.toByteArray()); + DataHelper.writeLong(os, 2, compressed.length); + os.write(compressed); + _from.writeBytes(os); } catch (DataFormatException dfe) { throw new I2NPMessageException("Error writing out the message data", dfe); } @@ -121,27 +123,27 @@ public class DatabaseSearchReplyMessage extends I2NPMessageImpl { if ( (object != null) && (object instanceof DatabaseSearchReplyMessage) ) { DatabaseSearchReplyMessage msg = (DatabaseSearchReplyMessage)object; return DataHelper.eq(getSearchKey(),msg.getSearchKey()) && - DataHelper.eq(getFromHash(),msg.getFromHash()) && - DataHelper.eq(_routerInfoStructures,msg._routerInfoStructures); + DataHelper.eq(getFromHash(),msg.getFromHash()) && + DataHelper.eq(_routerInfoStructures,msg._routerInfoStructures); } else { return false; } } public int hashCode() { - return DataHelper.hashCode(getSearchKey()) + - DataHelper.hashCode(getFromHash()) + - DataHelper.hashCode(_routerInfoStructures); + return DataHelper.hashCode(getSearchKey()) + + DataHelper.hashCode(getFromHash()) + + DataHelper.hashCode(_routerInfoStructures); } - public String toString() { + public String toString() { StringBuffer buf = new StringBuffer(); buf.append("[DatabaseSearchReplyMessage: "); buf.append("\n\tSearch Key: ").append(getSearchKey()); buf.append("\n\tReplies: # = ").append(getNumReplies()); - for (int i = 0; i < getNumReplies(); i++) { - buf.append("\n\t\tReply [").append(i).append("]: ").append(getReply(i)); - } + for (int i = 0; i < getNumReplies(); i++) { + buf.append("\n\t\tReply [").append(i).append("]: ").append(getReply(i)); + } buf.append("\n\tFrom: ").append(getFromHash()); buf.append("]"); return buf.toString(); diff --git a/router/java/src/net/i2p/data/i2np/DatabaseStoreMessage.java b/router/java/src/net/i2p/data/i2np/DatabaseStoreMessage.java index 0221e2f4a..223c33d20 100644 --- a/router/java/src/net/i2p/data/i2np/DatabaseStoreMessage.java +++ b/router/java/src/net/i2p/data/i2np/DatabaseStoreMessage.java @@ -1,9 +1,9 @@ package net.i2p.data.i2np; /* * free (adj.): unencumbered; not under the control of others - * Written by jrandom in 2003 and released into the public domain - * with no warranty of any kind, either expressed or implied. - * It probably won't make your computer catch on fire, or eat + * Written by jrandom in 2003 and released into the public domain + * with no warranty of any kind, either expressed or implied. + * It probably won't make your computer catch on fire, or eat * your children, but it might. Use at your own risk. * */ @@ -19,9 +19,10 @@ import net.i2p.data.Hash; import net.i2p.data.LeaseSet; import net.i2p.data.RouterInfo; import net.i2p.util.Log; +import net.i2p.I2PAppContext; /** - * Defines the message a router sends to another router to test the network + * Defines the message a router sends to another router to test the network * database reachability, as well as the reply message sent back. * * @author jrandom @@ -37,11 +38,12 @@ public class DatabaseStoreMessage extends I2NPMessageImpl { public final static int KEY_TYPE_ROUTERINFO = 0; public final static int KEY_TYPE_LEASESET = 1; - public DatabaseStoreMessage() { - setValueType(-1); - setKey(null); - setLeaseSet(null); - setRouterInfo(null); + public DatabaseStoreMessage(I2PAppContext context) { + super(context); + setValueType(-1); + setKey(null); + setLeaseSet(null); + setRouterInfo(null); } /** @@ -56,10 +58,10 @@ public class DatabaseStoreMessage extends I2NPMessageImpl { * */ public RouterInfo getRouterInfo() { return _info; } - public void setRouterInfo(RouterInfo routerInfo) { - _info = routerInfo; - if (_info != null) - setValueType(KEY_TYPE_ROUTERINFO); + public void setRouterInfo(RouterInfo routerInfo) { + _info = routerInfo; + if (_info != null) + setValueType(KEY_TYPE_ROUTERINFO); } /** @@ -67,14 +69,14 @@ public class DatabaseStoreMessage extends I2NPMessageImpl { * */ public LeaseSet getLeaseSet() { return _leaseSet; } - public void setLeaseSet(LeaseSet leaseSet) { - _leaseSet = leaseSet; - if (_leaseSet != null) - setValueType(KEY_TYPE_LEASESET); + public void setLeaseSet(LeaseSet leaseSet) { + _leaseSet = leaseSet; + if (_leaseSet != null) + setValueType(KEY_TYPE_LEASESET); } /** - * Defines type of key being stored in the network database - + * Defines type of key being stored in the network database - * either KEY_TYPE_ROUTERINFO or KEY_TYPE_LEASESET * */ @@ -82,52 +84,52 @@ public class DatabaseStoreMessage extends I2NPMessageImpl { public void setValueType(int type) { _type = type; } public void readMessage(InputStream in, int type) throws I2NPMessageException, IOException { - if (type != MESSAGE_TYPE) throw new I2NPMessageException("Message type is incorrect for this message"); + if (type != MESSAGE_TYPE) throw new I2NPMessageException("Message type is incorrect for this message"); try { - _key = new Hash(); - _key.readBytes(in); - _log.debug("Hash read: " + _key.toBase64()); - _type = (int)DataHelper.readLong(in, 1); - if (_type == KEY_TYPE_LEASESET) { - _leaseSet = new LeaseSet(); - _leaseSet.readBytes(in); - } else if (_type == KEY_TYPE_ROUTERINFO) { - _info = new RouterInfo(); - int compressedSize = (int)DataHelper.readLong(in, 2); - byte compressed[] = new byte[compressedSize]; - int read = DataHelper.read(in, compressed); - if (read != compressedSize) - throw new I2NPMessageException("Invalid compressed data size"); - ByteArrayInputStream bais = new ByteArrayInputStream(DataHelper.decompress(compressed)); - _info.readBytes(bais); - } else { - throw new I2NPMessageException("Invalid type of key read from the structure - " + _type); - } + _key = new Hash(); + _key.readBytes(in); + _log.debug("Hash read: " + _key.toBase64()); + _type = (int)DataHelper.readLong(in, 1); + if (_type == KEY_TYPE_LEASESET) { + _leaseSet = new LeaseSet(); + _leaseSet.readBytes(in); + } else if (_type == KEY_TYPE_ROUTERINFO) { + _info = new RouterInfo(); + int compressedSize = (int)DataHelper.readLong(in, 2); + byte compressed[] = new byte[compressedSize]; + int read = DataHelper.read(in, compressed); + if (read != compressedSize) + throw new I2NPMessageException("Invalid compressed data size"); + ByteArrayInputStream bais = new ByteArrayInputStream(DataHelper.decompress(compressed)); + _info.readBytes(bais); + } else { + throw new I2NPMessageException("Invalid type of key read from the structure - " + _type); + } } catch (DataFormatException dfe) { throw new I2NPMessageException("Unable to load the message data", dfe); } } protected byte[] writeMessage() throws I2NPMessageException, IOException { - if (_key == null) throw new I2NPMessageException("Invalid key"); - if ( (_type != KEY_TYPE_LEASESET) && (_type != KEY_TYPE_ROUTERINFO) ) throw new I2NPMessageException("Invalid key type"); - if ( (_type == KEY_TYPE_LEASESET) && (_leaseSet == null) ) throw new I2NPMessageException("Missing lease set"); - if ( (_type == KEY_TYPE_ROUTERINFO) && (_info == null) ) throw new I2NPMessageException("Missing router info"); - + if (_key == null) throw new I2NPMessageException("Invalid key"); + if ( (_type != KEY_TYPE_LEASESET) && (_type != KEY_TYPE_ROUTERINFO) ) throw new I2NPMessageException("Invalid key type"); + if ( (_type == KEY_TYPE_LEASESET) && (_leaseSet == null) ) throw new I2NPMessageException("Missing lease set"); + if ( (_type == KEY_TYPE_ROUTERINFO) && (_info == null) ) throw new I2NPMessageException("Missing router info"); + ByteArrayOutputStream os = new ByteArrayOutputStream(256); try { - _key.writeBytes(os); - DataHelper.writeLong(os, 1, _type); - if (_type == KEY_TYPE_LEASESET) { - _leaseSet.writeBytes(os); - } else if (_type == KEY_TYPE_ROUTERINFO) { - ByteArrayOutputStream baos = new ByteArrayOutputStream(4*1024); - _info.writeBytes(baos); - byte uncompressed[] = baos.toByteArray(); - byte compressed[] = DataHelper.compress(uncompressed); - DataHelper.writeLong(os, 2, compressed.length); - os.write(compressed); - } + _key.writeBytes(os); + DataHelper.writeLong(os, 1, _type); + if (_type == KEY_TYPE_LEASESET) { + _leaseSet.writeBytes(os); + } else if (_type == KEY_TYPE_ROUTERINFO) { + ByteArrayOutputStream baos = new ByteArrayOutputStream(4*1024); + _info.writeBytes(baos); + byte uncompressed[] = baos.toByteArray(); + byte compressed[] = DataHelper.compress(uncompressed); + DataHelper.writeLong(os, 2, compressed.length); + os.write(compressed); + } } catch (DataFormatException dfe) { throw new I2NPMessageException("Error writing out the message data", dfe); } @@ -137,29 +139,29 @@ public class DatabaseStoreMessage extends I2NPMessageImpl { public int getType() { return MESSAGE_TYPE; } public int hashCode() { - return DataHelper.hashCode(getKey()) + - DataHelper.hashCode(getLeaseSet()) + - DataHelper.hashCode(getRouterInfo()) + - getValueType(); + return DataHelper.hashCode(getKey()) + + DataHelper.hashCode(getLeaseSet()) + + DataHelper.hashCode(getRouterInfo()) + + getValueType(); } public boolean equals(Object object) { if ( (object != null) && (object instanceof DatabaseStoreMessage) ) { DatabaseStoreMessage msg = (DatabaseStoreMessage)object; return DataHelper.eq(getKey(),msg.getKey()) && - DataHelper.eq(getLeaseSet(),msg.getLeaseSet()) && - DataHelper.eq(getRouterInfo(),msg.getRouterInfo()) && - DataHelper.eq(getValueType(),msg.getValueType()); + DataHelper.eq(getLeaseSet(),msg.getLeaseSet()) && + DataHelper.eq(getRouterInfo(),msg.getRouterInfo()) && + DataHelper.eq(getValueType(),msg.getValueType()); } else { return false; } } - public String toString() { + public String toString() { StringBuffer buf = new StringBuffer(); buf.append("[DatabaseStoreMessage: "); - buf.append("\n\tExpiration: ").append(getMessageExpiration()); - buf.append("\n\tUnique ID: ").append(getUniqueId()); + buf.append("\n\tExpiration: ").append(getMessageExpiration()); + buf.append("\n\tUnique ID: ").append(getUniqueId()); buf.append("\n\tKey: ").append(getKey()); buf.append("\n\tValue Type: ").append(getValueType()); buf.append("\n\tRouter Info: ").append(getRouterInfo()); diff --git a/router/java/src/net/i2p/data/i2np/DeliveryStatusMessage.java b/router/java/src/net/i2p/data/i2np/DeliveryStatusMessage.java index 18c7d564e..0d4569917 100644 --- a/router/java/src/net/i2p/data/i2np/DeliveryStatusMessage.java +++ b/router/java/src/net/i2p/data/i2np/DeliveryStatusMessage.java @@ -1,9 +1,9 @@ package net.i2p.data.i2np; /* * free (adj.): unencumbered; not under the control of others - * Written by jrandom in 2003 and released into the public domain - * with no warranty of any kind, either expressed or implied. - * It probably won't make your computer catch on fire, or eat + * Written by jrandom in 2003 and released into the public domain + * with no warranty of any kind, either expressed or implied. + * It probably won't make your computer catch on fire, or eat * your children, but it might. Use at your own risk. * */ @@ -16,6 +16,7 @@ import java.util.Date; import net.i2p.data.DataFormatException; import net.i2p.data.DataHelper; import net.i2p.util.Log; +import net.i2p.I2PAppContext; /** * Defines the message sent back in reply to a message when requested, containing @@ -29,9 +30,10 @@ public class DeliveryStatusMessage extends I2NPMessageImpl { private long _id; private Date _arrival; - public DeliveryStatusMessage() { - setMessageId(-1); - setArrival(null); + public DeliveryStatusMessage(I2PAppContext context) { + super(context); + setMessageId(-1); + setArrival(null); } public long getMessageId() { return _id; } @@ -41,22 +43,22 @@ public class DeliveryStatusMessage extends I2NPMessageImpl { public void setArrival(Date arrival) { _arrival = arrival; } public void readMessage(InputStream in, int type) throws I2NPMessageException, IOException { - if (type != MESSAGE_TYPE) throw new I2NPMessageException("Message type is incorrect for this message"); + if (type != MESSAGE_TYPE) throw new I2NPMessageException("Message type is incorrect for this message"); try { - _id = DataHelper.readLong(in, 4); - _arrival = DataHelper.readDate(in); + _id = DataHelper.readLong(in, 4); + _arrival = DataHelper.readDate(in); } catch (DataFormatException dfe) { throw new I2NPMessageException("Unable to load the message data", dfe); } } protected byte[] writeMessage() throws I2NPMessageException, IOException { - if ( (_id < 0) || (_arrival == null) ) throw new I2NPMessageException("Not enough data to write out"); - + if ( (_id < 0) || (_arrival == null) ) throw new I2NPMessageException("Not enough data to write out"); + ByteArrayOutputStream os = new ByteArrayOutputStream(32); try { - DataHelper.writeLong(os, 4, _id); - DataHelper.writeDate(os, _arrival); + DataHelper.writeLong(os, 4, _id); + DataHelper.writeDate(os, _arrival); } catch (DataFormatException dfe) { throw new I2NPMessageException("Error writing out the message data", dfe); } @@ -66,21 +68,21 @@ public class DeliveryStatusMessage extends I2NPMessageImpl { public int getType() { return MESSAGE_TYPE; } public int hashCode() { - return (int)getMessageId() + - DataHelper.hashCode(getArrival()); + return (int)getMessageId() + + DataHelper.hashCode(getArrival()); } public boolean equals(Object object) { if ( (object != null) && (object instanceof DeliveryStatusMessage) ) { DeliveryStatusMessage msg = (DeliveryStatusMessage)object; return DataHelper.eq(getMessageId(),msg.getMessageId()) && - DataHelper.eq(getArrival(),msg.getArrival()); + DataHelper.eq(getArrival(),msg.getArrival()); } else { return false; } } - public String toString() { + public String toString() { StringBuffer buf = new StringBuffer(); buf.append("[DeliveryStatusMessage: "); buf.append("\n\tMessage ID: ").append(getMessageId()); diff --git a/router/java/src/net/i2p/data/i2np/GarlicClove.java b/router/java/src/net/i2p/data/i2np/GarlicClove.java index aec216751..983f09442 100644 --- a/router/java/src/net/i2p/data/i2np/GarlicClove.java +++ b/router/java/src/net/i2p/data/i2np/GarlicClove.java @@ -1,9 +1,9 @@ package net.i2p.data.i2np; /* * free (adj.): unencumbered; not under the control of others - * Written by jrandom in 2003 and released into the public domain - * with no warranty of any kind, either expressed or implied. - * It probably won't make your computer catch on fire, or eat + * Written by jrandom in 2003 and released into the public domain + * with no warranty of any kind, either expressed or implied. + * It probably won't make your computer catch on fire, or eat * your children, but it might. Use at your own risk. * */ @@ -18,6 +18,7 @@ import net.i2p.data.DataFormatException; import net.i2p.data.DataHelper; import net.i2p.data.DataStructureImpl; import net.i2p.util.Log; +import net.i2p.router.RouterContext; /** * Contains one deliverable message encrypted to a router along with instructions @@ -26,7 +27,8 @@ import net.i2p.util.Log; * @author jrandom */ public class GarlicClove extends DataStructureImpl { - private final static Log _log = new Log(GarlicClove.class); + private Log _log; + private RouterContext _context; private DeliveryInstructions _instructions; private I2NPMessage _msg; private long _cloveId; @@ -34,30 +36,34 @@ public class GarlicClove extends DataStructureImpl { private Certificate _certificate; private int _replyAction; private SourceRouteBlock _sourceRouteBlock; + private I2NPMessageHandler _handler; /** No action requested with the source route block */ public final static int ACTION_NONE = 0; - /** - * A DeliveryStatusMessage is requested with the source route block using + /** + * A DeliveryStatusMessage is requested with the source route block using * the cloveId as the id received * */ public final static int ACTION_STATUS = 1; - /** + /** * No DeliveryStatusMessage is requested, but the source route block is * included for message specific replies * */ public final static int ACTION_MESSAGE_SPECIFIC = 2; - public GarlicClove() { - setInstructions(null); - setData(null); - setCloveId(-1); - setExpiration(null); - setCertificate(null); - setSourceRouteBlockAction(ACTION_NONE); - setSourceRouteBlock(null); + public GarlicClove(RouterContext context) { + _context = context; + _log = context.logManager().getLog(GarlicClove.class); + _handler = new I2NPMessageHandler(context); + setInstructions(null); + setData(null); + setCloveId(-1); + setExpiration(null); + setCertificate(null); + setSourceRouteBlockAction(ACTION_NONE); + setSourceRouteBlock(null); } public DeliveryInstructions getInstructions() { return _instructions; } @@ -76,94 +82,94 @@ public class GarlicClove extends DataStructureImpl { public void setSourceRouteBlock(SourceRouteBlock block) { _sourceRouteBlock = block; } public void readBytes(InputStream in) throws DataFormatException, IOException { - _instructions = new DeliveryInstructions(); - _instructions.readBytes(in); - _log.debug("Read instructions: " + _instructions); - try { - _msg = new I2NPMessageHandler().readMessage(in); - } catch (I2NPMessageException ime) { - throw new DataFormatException("Unable to read the message from a garlic clove", ime); - } - _cloveId = DataHelper.readLong(in, 4); - _expiration = DataHelper.readDate(in); - _log.debug("CloveID read: " + _cloveId + " expiration read: " + _expiration); - _certificate = new Certificate(); - _certificate.readBytes(in); - _log.debug("Read cert: " + _certificate); - int replyStyle = (int)DataHelper.readLong(in, 1); - setSourceRouteBlockAction(replyStyle); - if (replyStyle != ACTION_NONE) { - _sourceRouteBlock = new SourceRouteBlock(); - _sourceRouteBlock.readBytes(in); - } + _instructions = new DeliveryInstructions(); + _instructions.readBytes(in); + _log.debug("Read instructions: " + _instructions); + try { + _msg = _handler.readMessage(in); + } catch (I2NPMessageException ime) { + throw new DataFormatException("Unable to read the message from a garlic clove", ime); + } + _cloveId = DataHelper.readLong(in, 4); + _expiration = DataHelper.readDate(in); + _log.debug("CloveID read: " + _cloveId + " expiration read: " + _expiration); + _certificate = new Certificate(); + _certificate.readBytes(in); + _log.debug("Read cert: " + _certificate); + int replyStyle = (int)DataHelper.readLong(in, 1); + setSourceRouteBlockAction(replyStyle); + if (replyStyle != ACTION_NONE) { + _sourceRouteBlock = new SourceRouteBlock(); + _sourceRouteBlock.readBytes(in); + } } public void writeBytes(OutputStream out) throws DataFormatException, IOException { - StringBuffer error = new StringBuffer(); - if (_instructions == null) - error.append("No instructions "); - if (_msg == null) - error.append("No message "); - if (_cloveId < 0) - error.append("CloveID < 0 [").append(_cloveId).append("] "); - if (_expiration == null) - error.append("Expiration is null "); - if (_certificate == null) - error.append("Certificate is null "); - if (_replyAction < 0) - error.append("Reply action is < 0 [").append(_replyAction).append("] ");; - if (error.length() > 0) - throw new DataFormatException(error.toString()); - if ( (_replyAction != 0) && (_sourceRouteBlock == null) ) - throw new DataFormatException("Source route block must be specified for non-null action"); - _instructions.writeBytes(out); - - _log.debug("Wrote instructions: " + _instructions); - _msg.writeBytes(out); - DataHelper.writeLong(out, 4, _cloveId); - DataHelper.writeDate(out, _expiration); - _log.debug("CloveID written: " + _cloveId + " expiration written: " + _expiration); - _certificate.writeBytes(out); - _log.debug("Written cert: " + _certificate); - DataHelper.writeLong(out, 1, _replyAction); - if ( (_replyAction != 0) && (_sourceRouteBlock != null) ) - _sourceRouteBlock.writeBytes(out); + StringBuffer error = new StringBuffer(); + if (_instructions == null) + error.append("No instructions "); + if (_msg == null) + error.append("No message "); + if (_cloveId < 0) + error.append("CloveID < 0 [").append(_cloveId).append("] "); + if (_expiration == null) + error.append("Expiration is null "); + if (_certificate == null) + error.append("Certificate is null "); + if (_replyAction < 0) + error.append("Reply action is < 0 [").append(_replyAction).append("] ");; + if (error.length() > 0) + throw new DataFormatException(error.toString()); + if ( (_replyAction != 0) && (_sourceRouteBlock == null) ) + throw new DataFormatException("Source route block must be specified for non-null action"); + _instructions.writeBytes(out); + + _log.debug("Wrote instructions: " + _instructions); + _msg.writeBytes(out); + DataHelper.writeLong(out, 4, _cloveId); + DataHelper.writeDate(out, _expiration); + _log.debug("CloveID written: " + _cloveId + " expiration written: " + _expiration); + _certificate.writeBytes(out); + _log.debug("Written cert: " + _certificate); + DataHelper.writeLong(out, 1, _replyAction); + if ( (_replyAction != 0) && (_sourceRouteBlock != null) ) + _sourceRouteBlock.writeBytes(out); } public boolean equals(Object obj) { if ( (obj == null) || !(obj instanceof GarlicClove)) return false; - GarlicClove clove = (GarlicClove)obj; - return DataHelper.eq(getCertificate(), clove.getCertificate()) && - DataHelper.eq(getCloveId(), clove.getCloveId()) && - DataHelper.eq(getData(), clove.getData()) && - DataHelper.eq(getExpiration(), clove.getExpiration()) && - DataHelper.eq(getInstructions(), clove.getInstructions()) && - DataHelper.eq(getSourceRouteBlock(), clove.getSourceRouteBlock()) && - (getSourceRouteBlockAction() == clove.getSourceRouteBlockAction()); + GarlicClove clove = (GarlicClove)obj; + return DataHelper.eq(getCertificate(), clove.getCertificate()) && + DataHelper.eq(getCloveId(), clove.getCloveId()) && + DataHelper.eq(getData(), clove.getData()) && + DataHelper.eq(getExpiration(), clove.getExpiration()) && + DataHelper.eq(getInstructions(), clove.getInstructions()) && + DataHelper.eq(getSourceRouteBlock(), clove.getSourceRouteBlock()) && + (getSourceRouteBlockAction() == clove.getSourceRouteBlockAction()); } public int hashCode() { - return DataHelper.hashCode(getCertificate()) + - (int)getCloveId() + - DataHelper.hashCode(getData()) + - DataHelper.hashCode(getExpiration()) + - DataHelper.hashCode(getInstructions()) + - DataHelper.hashCode(getSourceRouteBlock()) + - getSourceRouteBlockAction(); + return DataHelper.hashCode(getCertificate()) + + (int)getCloveId() + + DataHelper.hashCode(getData()) + + DataHelper.hashCode(getExpiration()) + + DataHelper.hashCode(getInstructions()) + + DataHelper.hashCode(getSourceRouteBlock()) + + getSourceRouteBlockAction(); } public String toString() { - StringBuffer buf = new StringBuffer(128); + StringBuffer buf = new StringBuffer(128); buf.append("[GarlicClove: "); - buf.append("\n\tInstructions: ").append(getInstructions()); - buf.append("\n\tCertificate: ").append(getCertificate()); - buf.append("\n\tClove ID: ").append(getCloveId()); - buf.append("\n\tExpiration: ").append(getExpiration()); - buf.append("\n\tSource route style: ").append(getSourceRouteBlockAction()); - buf.append("\n\tSource route block: ").append(getSourceRouteBlock()); - buf.append("\n\tData: ").append(getData()); - buf.append("]"); - return buf.toString(); + buf.append("\n\tInstructions: ").append(getInstructions()); + buf.append("\n\tCertificate: ").append(getCertificate()); + buf.append("\n\tClove ID: ").append(getCloveId()); + buf.append("\n\tExpiration: ").append(getExpiration()); + buf.append("\n\tSource route style: ").append(getSourceRouteBlockAction()); + buf.append("\n\tSource route block: ").append(getSourceRouteBlock()); + buf.append("\n\tData: ").append(getData()); + buf.append("]"); + return buf.toString(); } } diff --git a/router/java/src/net/i2p/data/i2np/GarlicMessage.java b/router/java/src/net/i2p/data/i2np/GarlicMessage.java index 2971abc7b..21d802184 100644 --- a/router/java/src/net/i2p/data/i2np/GarlicMessage.java +++ b/router/java/src/net/i2p/data/i2np/GarlicMessage.java @@ -1,9 +1,9 @@ package net.i2p.data.i2np; /* * free (adj.): unencumbered; not under the control of others - * Written by jrandom in 2003 and released into the public domain - * with no warranty of any kind, either expressed or implied. - * It probably won't make your computer catch on fire, or eat + * Written by jrandom in 2003 and released into the public domain + * with no warranty of any kind, either expressed or implied. + * It probably won't make your computer catch on fire, or eat * your children, but it might. Use at your own risk. * */ @@ -15,6 +15,7 @@ import java.io.InputStream; import net.i2p.data.DataFormatException; import net.i2p.data.DataHelper; import net.i2p.util.Log; +import net.i2p.I2PAppContext; /** * Defines the wrapped garlic message @@ -26,33 +27,34 @@ public class GarlicMessage extends I2NPMessageImpl { public final static int MESSAGE_TYPE = 11; private byte[] _data; - public GarlicMessage() { - setData(null); + public GarlicMessage(I2PAppContext context) { + super(context); + setData(null); } public byte[] getData() { return _data; } public void setData(byte[] data) { _data = data; } public void readMessage(InputStream in, int type) throws I2NPMessageException, IOException { - if (type != MESSAGE_TYPE) throw new I2NPMessageException("Message type is incorrect for this message"); + if (type != MESSAGE_TYPE) throw new I2NPMessageException("Message type is incorrect for this message"); try { - long len = DataHelper.readLong(in, 4); - _data = new byte[(int)len]; - int read = read(in, _data); - if (read != len) - throw new I2NPMessageException("Incorrect size read"); + long len = DataHelper.readLong(in, 4); + _data = new byte[(int)len]; + int read = read(in, _data); + if (read != len) + throw new I2NPMessageException("Incorrect size read"); } catch (DataFormatException dfe) { throw new I2NPMessageException("Unable to load the message data", dfe); } } protected byte[] writeMessage() throws I2NPMessageException, IOException { - if ( (_data == null) || (_data.length <= 0) ) throw new I2NPMessageException("Not enough data to write out"); - + if ( (_data == null) || (_data.length <= 0) ) throw new I2NPMessageException("Not enough data to write out"); + ByteArrayOutputStream os = new ByteArrayOutputStream(32); try { - DataHelper.writeLong(os, 4, _data.length); - os.write(_data); + DataHelper.writeLong(os, 4, _data.length); + os.write(_data); } catch (DataFormatException dfe) { throw new I2NPMessageException("Error writing out the message data", dfe); } @@ -62,7 +64,7 @@ public class GarlicMessage extends I2NPMessageImpl { public int getType() { return MESSAGE_TYPE; } public int hashCode() { - return DataHelper.hashCode(getData()); + return DataHelper.hashCode(getData()); } public boolean equals(Object object) { @@ -74,7 +76,7 @@ public class GarlicMessage extends I2NPMessageImpl { } } - public String toString() { + public String toString() { StringBuffer buf = new StringBuffer(); buf.append("[GarlicMessage: "); buf.append("\n\tData length: ").append(getData().length).append(" bytes"); diff --git a/router/java/src/net/i2p/data/i2np/I2NPMessageHandler.java b/router/java/src/net/i2p/data/i2np/I2NPMessageHandler.java index 196311610..aea8e2e99 100644 --- a/router/java/src/net/i2p/data/i2np/I2NPMessageHandler.java +++ b/router/java/src/net/i2p/data/i2np/I2NPMessageHandler.java @@ -1,9 +1,9 @@ package net.i2p.data.i2np; /* * free (adj.): unencumbered; not under the control of others - * Written by jrandom in 2003 and released into the public domain - * with no warranty of any kind, either expressed or implied. - * It probably won't make your computer catch on fire, or eat + * Written by jrandom in 2003 and released into the public domain + * with no warranty of any kind, either expressed or implied. + * It probably won't make your computer catch on fire, or eat * your children, but it might. Use at your own risk. * */ @@ -16,20 +16,25 @@ import net.i2p.data.DataFormatException; import net.i2p.data.DataHelper; import net.i2p.util.Clock; import net.i2p.util.Log; +import net.i2p.I2PAppContext; /** * Handle messages from router to router * */ public class I2NPMessageHandler { - private final static Log _log = new Log(I2NPMessageHandler.class); + private Log _log; + private I2PAppContext _context; private long _lastReadBegin; private long _lastReadEnd; - public I2NPMessageHandler() {} + public I2NPMessageHandler(I2PAppContext context) { + _context = context; + _log = context.logManager().getLog(I2NPMessageHandler.class); + } /** * Read an I2NPMessage from the stream and return the fully populated object. - * + * * @throws IOException if there is an IO problem reading from the stream * @throws I2NPMessageException if there is a problem handling the particular * message - if it is an unknown type or has improper formatting, etc. @@ -37,10 +42,10 @@ public class I2NPMessageHandler { public I2NPMessage readMessage(InputStream in) throws IOException, I2NPMessageException { try { int type = (int)DataHelper.readLong(in, 1); - _lastReadBegin = Clock.getInstance().now(); + _lastReadBegin = System.currentTimeMillis(); I2NPMessage msg = createMessage(in, type); msg.readBytes(in, type); - _lastReadEnd = Clock.getInstance().now(); + _lastReadEnd = System.currentTimeMillis(); return msg; } catch (DataFormatException dfe) { throw new I2NPMessageException("Error reading the message", dfe); @@ -50,31 +55,31 @@ public class I2NPMessageHandler { public long getLastReadTime() { return _lastReadEnd - _lastReadBegin; } /** - * Yes, this is fairly ugly, but its the only place it ever happens. + * Yes, this is fairly ugly, but its the only place it ever happens. * */ - private static I2NPMessage createMessage(InputStream in, int type) throws IOException, I2NPMessageException { + private I2NPMessage createMessage(InputStream in, int type) throws IOException, I2NPMessageException { switch (type) { - case DatabaseStoreMessage.MESSAGE_TYPE: - return new DatabaseStoreMessage(); - case DatabaseLookupMessage.MESSAGE_TYPE: - return new DatabaseLookupMessage(); - case DatabaseSearchReplyMessage.MESSAGE_TYPE: - return new DatabaseSearchReplyMessage(); - case DeliveryStatusMessage.MESSAGE_TYPE: - return new DeliveryStatusMessage(); - case GarlicMessage.MESSAGE_TYPE: - return new GarlicMessage(); - case TunnelMessage.MESSAGE_TYPE: - return new TunnelMessage(); - case DataMessage.MESSAGE_TYPE: - return new DataMessage(); - case SourceRouteReplyMessage.MESSAGE_TYPE: - return new SourceRouteReplyMessage(); - case TunnelCreateMessage.MESSAGE_TYPE: - return new TunnelCreateMessage(); - case TunnelCreateStatusMessage.MESSAGE_TYPE: - return new TunnelCreateStatusMessage(); + case DatabaseStoreMessage.MESSAGE_TYPE: + return new DatabaseStoreMessage(_context); + case DatabaseLookupMessage.MESSAGE_TYPE: + return new DatabaseLookupMessage(_context); + case DatabaseSearchReplyMessage.MESSAGE_TYPE: + return new DatabaseSearchReplyMessage(_context); + case DeliveryStatusMessage.MESSAGE_TYPE: + return new DeliveryStatusMessage(_context); + case GarlicMessage.MESSAGE_TYPE: + return new GarlicMessage(_context); + case TunnelMessage.MESSAGE_TYPE: + return new TunnelMessage(_context); + case DataMessage.MESSAGE_TYPE: + return new DataMessage(_context); + case SourceRouteReplyMessage.MESSAGE_TYPE: + return new SourceRouteReplyMessage(_context); + case TunnelCreateMessage.MESSAGE_TYPE: + return new TunnelCreateMessage(_context); + case TunnelCreateStatusMessage.MESSAGE_TYPE: + return new TunnelCreateStatusMessage(_context); default: throw new I2NPMessageException("The type "+ type + " is an unknown I2NP message"); } @@ -82,7 +87,7 @@ public class I2NPMessageHandler { public static void main(String args[]) { try { - I2NPMessage msg = new I2NPMessageHandler().readMessage(new FileInputStream(args[0])); + I2NPMessage msg = new I2NPMessageHandler(I2PAppContext.getGlobalContext()).readMessage(new FileInputStream(args[0])); System.out.println(msg); } catch (Exception e) { e.printStackTrace(); diff --git a/router/java/src/net/i2p/data/i2np/I2NPMessageImpl.java b/router/java/src/net/i2p/data/i2np/I2NPMessageImpl.java index 581d89a61..f36a7fb63 100644 --- a/router/java/src/net/i2p/data/i2np/I2NPMessageImpl.java +++ b/router/java/src/net/i2p/data/i2np/I2NPMessageImpl.java @@ -1,9 +1,9 @@ package net.i2p.data.i2np; /* * free (adj.): unencumbered; not under the control of others - * Written by jrandom in 2003 and released into the public domain - * with no warranty of any kind, either expressed or implied. - * It probably won't make your computer catch on fire, or eat + * Written by jrandom in 2003 and released into the public domain + * with no warranty of any kind, either expressed or implied. + * It probably won't make your computer catch on fire, or eat * your children, but it might. Use at your own risk. * */ @@ -19,6 +19,7 @@ import net.i2p.data.DataStructureImpl; import net.i2p.util.Clock; import net.i2p.util.Log; import net.i2p.util.RandomSource; +import net.i2p.I2PAppContext; /** * Defines the base message implementation. @@ -26,15 +27,18 @@ import net.i2p.util.RandomSource; * @author jrandom */ public abstract class I2NPMessageImpl extends DataStructureImpl implements I2NPMessage { - private final static Log _log = new Log(I2NPMessageImpl.class); + private Log _log; + protected I2PAppContext _context; private Date _expiration; private long _uniqueId; public final static long DEFAULT_EXPIRATION_MS = 1*60*1000; // 1 minute by default - public I2NPMessageImpl() { - _expiration = new Date(Clock.getInstance().now() + DEFAULT_EXPIRATION_MS); - _uniqueId = RandomSource.getInstance().nextInt(Integer.MAX_VALUE); + public I2NPMessageImpl(I2PAppContext context) { + _context = context; + _log = context.logManager().getLog(I2NPMessageImpl.class); + _expiration = new Date(_context.clock().now() + DEFAULT_EXPIRATION_MS); + _uniqueId = _context.random().nextInt(Integer.MAX_VALUE); } /** @@ -45,8 +49,8 @@ public abstract class I2NPMessageImpl extends DataStructureImpl implements I2NPM protected abstract byte[] writeMessage() throws I2NPMessageException, IOException; /** - * Read the body into the data structures, after the initial type byte and - * the uniqueId / expiration, using the current class's format as defined by + * Read the body into the data structures, after the initial type byte and + * the uniqueId / expiration, using the current class's format as defined by * the I2NP specification * * @param in stream to read from @@ -58,35 +62,35 @@ public abstract class I2NPMessageImpl extends DataStructureImpl implements I2NPM protected abstract void readMessage(InputStream in, int type) throws I2NPMessageException, IOException; public void readBytes(InputStream in) throws DataFormatException, IOException { - try { - readBytes(in, -1); - } catch (I2NPMessageException ime) { - throw new DataFormatException("Bad bytes", ime); - } + try { + readBytes(in, -1); + } catch (I2NPMessageException ime) { + throw new DataFormatException("Bad bytes", ime); + } } public void readBytes(InputStream in, int type) throws I2NPMessageException, IOException { - try { - if (type < 0) - type = (int)DataHelper.readLong(in, 1); - _uniqueId = DataHelper.readLong(in, 4); - _expiration = DataHelper.readDate(in); - } catch (DataFormatException dfe) { - throw new I2NPMessageException("Error reading the message header", dfe); - } - _log.debug("Reading bytes: type = " + type + " / uniqueId : " + _uniqueId + " / expiration : " + _expiration); - readMessage(in, type); + try { + if (type < 0) + type = (int)DataHelper.readLong(in, 1); + _uniqueId = DataHelper.readLong(in, 4); + _expiration = DataHelper.readDate(in); + } catch (DataFormatException dfe) { + throw new I2NPMessageException("Error reading the message header", dfe); + } + _log.debug("Reading bytes: type = " + type + " / uniqueId : " + _uniqueId + " / expiration : " + _expiration); + readMessage(in, type); } public void writeBytes(OutputStream out) throws DataFormatException, IOException { - try { - DataHelper.writeLong(out, 1, getType()); - DataHelper.writeLong(out, 4, _uniqueId); - DataHelper.writeDate(out, _expiration); - _log.debug("Writing bytes: type = " + getType() + " / uniqueId : " + _uniqueId + " / expiration : " + _expiration); - byte[] data = writeMessage(); - out.write(data); - } catch (I2NPMessageException ime) { - throw new DataFormatException("Error writing out the I2NP message data", ime); - } + try { + DataHelper.writeLong(out, 1, getType()); + DataHelper.writeLong(out, 4, _uniqueId); + DataHelper.writeDate(out, _expiration); + _log.debug("Writing bytes: type = " + getType() + " / uniqueId : " + _uniqueId + " / expiration : " + _expiration); + byte[] data = writeMessage(); + out.write(data); + } catch (I2NPMessageException ime) { + throw new DataFormatException("Error writing out the I2NP message data", ime); + } } /** diff --git a/router/java/src/net/i2p/data/i2np/I2NPMessageReader.java b/router/java/src/net/i2p/data/i2np/I2NPMessageReader.java index 140e98a52..f34420c98 100644 --- a/router/java/src/net/i2p/data/i2np/I2NPMessageReader.java +++ b/router/java/src/net/i2p/data/i2np/I2NPMessageReader.java @@ -1,9 +1,9 @@ package net.i2p.data.i2np; /* * free (adj.): unencumbered; not under the control of others - * Written by jrandom in 2003 and released into the public domain - * with no warranty of any kind, either expressed or implied. - * It probably won't make your computer catch on fire, or eat + * Written by jrandom in 2003 and released into the public domain + * with no warranty of any kind, either expressed or implied. + * It probably won't make your computer catch on fire, or eat * your children, but it might. Use at your own risk. * */ @@ -13,9 +13,10 @@ import java.io.InputStream; import net.i2p.util.I2PThread; import net.i2p.util.Log; +import net.i2p.router.RouterContext; /** - * The I2NPMessageReader reads an InputStream (using + * The I2NPMessageReader reads an InputStream (using * {@link I2NPMessageHandler I2NPMessageHandler}) and passes out events to a registered * listener, where events are either messages being received, exceptions being * thrown, or the connection being closed. Routers should use this rather @@ -24,23 +25,26 @@ import net.i2p.util.Log; * @author jrandom */ public class I2NPMessageReader { - private final static Log _log = new Log(I2NPMessageReader.class); + private Log _log; + private RouterContext _context; private InputStream _stream; private I2NPMessageEventListener _listener; private I2NPMessageReaderRunner _reader; private Thread _readerThread; - public I2NPMessageReader(InputStream stream, I2NPMessageEventListener lsnr) { - this(stream, lsnr, "I2NP Reader"); + public I2NPMessageReader(RouterContext context, InputStream stream, I2NPMessageEventListener lsnr) { + this(context, stream, lsnr, "I2NP Reader"); } - public I2NPMessageReader(InputStream stream, I2NPMessageEventListener lsnr, String name) { - _stream = stream; + public I2NPMessageReader(RouterContext context, InputStream stream, I2NPMessageEventListener lsnr, String name) { + _context = context; + _log = context.logManager().getLog(I2NPMessageReader.class); + _stream = stream; setListener(lsnr); _reader = new I2NPMessageReaderRunner(); _readerThread = new I2PThread(_reader); - _readerThread.setName(name); - _readerThread.setDaemon(true); + _readerThread.setName(name); + _readerThread.setDaemon(true); } public void setListener(I2NPMessageEventListener lsnr) { _listener = lsnr; } @@ -50,7 +54,7 @@ public class I2NPMessageReader { * Instruct the reader to begin reading messages off the stream * */ - public void startReading() { _readerThread.start(); } + public void startReading() { _readerThread.start(); } /** * Have the already started reader pause its reading indefinitely * @@ -62,7 +66,7 @@ public class I2NPMessageReader { */ public void resumeReading() { _reader.resumeRunner(); } /** - * Cancel reading. + * Cancel reading. * */ public void stopReading() { _reader.cancelRunner(); } @@ -90,22 +94,22 @@ public class I2NPMessageReader { * */ public void disconnected(I2NPMessageReader reader); - } + } private class I2NPMessageReaderRunner implements Runnable { - private boolean _doRun; + private boolean _doRun; private boolean _stayAlive; - private I2NPMessageHandler _handler; + private I2NPMessageHandler _handler; public I2NPMessageReaderRunner() { _doRun = true; _stayAlive = true; - _handler = new I2NPMessageHandler(); + _handler = new I2NPMessageHandler(_context); } public void pauseRunner() { _doRun = false; } public void resumeRunner() { _doRun = true; } - public void cancelRunner() { + public void cancelRunner() { _doRun = false; - _stayAlive = false; + _stayAlive = false; } public void run() { while (_stayAlive) { @@ -114,16 +118,16 @@ public class I2NPMessageReader { try { I2NPMessage msg = _handler.readMessage(_stream); if (msg != null) { - long msToRead = _handler.getLastReadTime(); + long msToRead = _handler.getLastReadTime(); _listener.messageReceived(I2NPMessageReader.this, msg, msToRead); - } + } } catch (I2NPMessageException ime) { - //_log.warn("Error handling message", ime); + //_log.warn("Error handling message", ime); _listener.readError(I2NPMessageReader.this, ime); - _listener.disconnected(I2NPMessageReader.this); - cancelRunner(); + _listener.disconnected(I2NPMessageReader.this); + cancelRunner(); } catch (IOException ioe) { - _log.warn("IO Error handling message", ioe); + _log.warn("IO Error handling message", ioe); _listener.disconnected(I2NPMessageReader.this); cancelRunner(); } diff --git a/router/java/src/net/i2p/data/i2np/SourceRouteBlock.java b/router/java/src/net/i2p/data/i2np/SourceRouteBlock.java index 58f1de654..42eadfc06 100644 --- a/router/java/src/net/i2p/data/i2np/SourceRouteBlock.java +++ b/router/java/src/net/i2p/data/i2np/SourceRouteBlock.java @@ -26,6 +26,7 @@ import net.i2p.data.PublicKey; import net.i2p.data.SessionKey; import net.i2p.data.SessionTag; import net.i2p.util.Log; +import net.i2p.I2PAppContext; /** @@ -46,14 +47,14 @@ public class SourceRouteBlock extends DataStructureImpl { private long _decryptedExpiration; public SourceRouteBlock() { - setRouter(null); - setData(null); - setKey(null); - setTag((byte[])null); - _decryptedInstructions = null; - _decryptedMessageId = -1; - _decryptedCertificate = null; - _decryptedExpiration = -1; + setRouter(null); + setData(null); + setKey(null); + setTag((byte[])null); + _decryptedInstructions = null; + _decryptedMessageId = -1; + _decryptedCertificate = null; + _decryptedExpiration = -1; } /** @@ -92,9 +93,9 @@ public class SourceRouteBlock extends DataStructureImpl { public byte[] getTag() { return _tag; } public void setTag(SessionTag tag) { setTag(tag.getData()); } public void setTag(byte tag[]) { - if ( (tag != null) && (tag.length != SessionTag.BYTE_LENGTH) ) - throw new IllegalArgumentException("Tag must be either null or 32 bytes"); - _tag = tag; + if ( (tag != null) && (tag.length != SessionTag.BYTE_LENGTH) ) + throw new IllegalArgumentException("Tag must be either null or 32 bytes"); + _tag = tag; } /** @@ -126,100 +127,105 @@ public class SourceRouteBlock extends DataStructureImpl { * * @throws DataFormatException if the data is invalid or could not be encrypted */ - public void setData(DeliveryInstructions instructions, long messageId, Certificate cert, long expiration, PublicKey replyThrough) throws DataFormatException { - try { - ByteArrayOutputStream baos = new ByteArrayOutputStream(64); + public void setData(I2PAppContext ctx, DeliveryInstructions instructions, + long messageId, Certificate cert, long expiration, + PublicKey replyThrough) throws DataFormatException { + try { + ByteArrayOutputStream baos = new ByteArrayOutputStream(64); - _decryptedInstructions = instructions; - _decryptedMessageId = messageId; - _decryptedCertificate = cert; - _decryptedExpiration = expiration; - - instructions.writeBytes(baos); - DataHelper.writeLong(baos, 4, messageId); - cert.writeBytes(baos); - DataHelper.writeDate(baos, new Date(expiration)); + _decryptedInstructions = instructions; + _decryptedMessageId = messageId; + _decryptedCertificate = cert; + _decryptedExpiration = expiration; - int paddedSize = 256; - SessionKey sessKey = null; - SessionTag tag = null; - if (instructions.getDelayRequested()) { - // always use a new key if we're delaying, since the reply block may not be used within the - // window of a session - sessKey = KeyGenerator.getInstance().generateSessionKey(); - tag = null; - _log.debug("Delay requested - creating a new session key"); - } else { - sessKey = SessionKeyManager.getInstance().getCurrentKey(replyThrough); - if (sessKey == null) { - sessKey = KeyGenerator.getInstance().generateSessionKey(); - tag = null; - _log.debug("No delay requested, but no session key is known"); - } else { - tag = SessionKeyManager.getInstance().consumeNextAvailableTag(replyThrough, sessKey); - } - } - byte encData[] = ElGamalAESEngine.encrypt(baos.toByteArray(), replyThrough, sessKey, null, tag, paddedSize); - setData(encData); - } catch (IOException ioe) { - throw new DataFormatException("Error writing out the source route block data", ioe); - } catch (DataFormatException dfe) { - throw new DataFormatException("Error writing out the source route block data", dfe); - } + instructions.writeBytes(baos); + DataHelper.writeLong(baos, 4, messageId); + cert.writeBytes(baos); + DataHelper.writeDate(baos, new Date(expiration)); + + int paddedSize = 256; + SessionKey sessKey = null; + SessionTag tag = null; + if (instructions.getDelayRequested()) { + // always use a new key if we're delaying, since the reply block may not be used within the + // window of a session + sessKey = ctx.keyGenerator().generateSessionKey(); + tag = null; + if (_log.shouldLog(Log.DEBUG)) + _log.debug("Delay requested - creating a new session key"); + } else { + sessKey = ctx.sessionKeyManager().getCurrentKey(replyThrough); + if (sessKey == null) { + sessKey = ctx.keyGenerator().generateSessionKey(); + tag = null; + if (_log.shouldLog(Log.DEBUG)) + _log.debug("No delay requested, but no session key is known"); + } else { + tag = ctx.sessionKeyManager().consumeNextAvailableTag(replyThrough, sessKey); + } + } + byte encData[] = ctx.elGamalAESEngine().encrypt(baos.toByteArray(), replyThrough, + sessKey, null, tag, paddedSize); + setData(encData); + } catch (IOException ioe) { + throw new DataFormatException("Error writing out the source route block data", ioe); + } catch (DataFormatException dfe) { + throw new DataFormatException("Error writing out the source route block data", dfe); + } } public void readBytes(InputStream in) throws DataFormatException, IOException { - _router = new Hash(); - _router.readBytes(in); - int size = (int)DataHelper.readLong(in, 2); - _data = new byte[size]; - int read = read(in, _data); - if (read != _data.length) - throw new DataFormatException("Incorrect # of bytes read for source route block: " + read); - _key = new SessionKey(); - _key.readBytes(in); - _tag = new byte[32]; - read = read(in, _tag); - if (read != _tag.length) - throw new DataFormatException("Incorrect # of bytes read for session tag: " + read); + _router = new Hash(); + _router.readBytes(in); + int size = (int)DataHelper.readLong(in, 2); + _data = new byte[size]; + int read = read(in, _data); + if (read != _data.length) + throw new DataFormatException("Incorrect # of bytes read for source route block: " + read); + _key = new SessionKey(); + _key.readBytes(in); + _tag = new byte[32]; + read = read(in, _tag); + if (read != _tag.length) + throw new DataFormatException("Incorrect # of bytes read for session tag: " + read); } public void writeBytes(OutputStream out) throws DataFormatException, IOException { if ( (_router == null) || (_data == null) || (_key == null) || (_tag == null) || (_tag.length != 32) ) - throw new DataFormatException("Insufficient data to write"); - _router.writeBytes(out); - DataHelper.writeLong(out, 2, _data.length); - out.write(_data); - _key.writeBytes(out); - out.write(_tag); + throw new DataFormatException("Insufficient data to write"); + _router.writeBytes(out); + DataHelper.writeLong(out, 2, _data.length); + out.write(_data); + _key.writeBytes(out); + out.write(_tag); } public boolean equals(Object obj) { if ( (obj == null) || !(obj instanceof SourceRouteBlock)) return false; - SourceRouteBlock block = (SourceRouteBlock)obj; - return DataHelper.eq(getRouter(), block.getRouter()) && - DataHelper.eq(getData(), block.getData()) && - DataHelper.eq(getKey(), block.getKey()) && - DataHelper.eq(getTag(), block.getTag()); + SourceRouteBlock block = (SourceRouteBlock)obj; + return DataHelper.eq(getRouter(), block.getRouter()) && + DataHelper.eq(getData(), block.getData()) && + DataHelper.eq(getKey(), block.getKey()) && + DataHelper.eq(getTag(), block.getTag()); } public int hashCode() { return DataHelper.hashCode(getRouter()) + - DataHelper.hashCode(getData()) + - DataHelper.hashCode(getKey()) + - DataHelper.hashCode(getTag()); + DataHelper.hashCode(getData()) + + DataHelper.hashCode(getKey()) + + DataHelper.hashCode(getTag()); } public String toString() { - StringBuffer buf = new StringBuffer(128); + StringBuffer buf = new StringBuffer(128); buf.append("[SourceRouteBlock: "); - buf.append("\n\tRouter: ").append(getRouter()); - buf.append("\n\tData: ").append(DataHelper.toString(getData(), getData().length)); - buf.append("\n\tTag: ").append(DataHelper.toString(getTag(), (getTag() != null ? getTag().length : 0))); - buf.append("\n\tKey: ").append(getKey()); - buf.append("]"); - return buf.toString(); + buf.append("\n\tRouter: ").append(getRouter()); + buf.append("\n\tData: ").append(DataHelper.toString(getData(), getData().length)); + buf.append("\n\tTag: ").append(DataHelper.toString(getTag(), (getTag() != null ? getTag().length : 0))); + buf.append("\n\tKey: ").append(getKey()); + buf.append("]"); + return buf.toString(); } } diff --git a/router/java/src/net/i2p/data/i2np/SourceRouteReplyMessage.java b/router/java/src/net/i2p/data/i2np/SourceRouteReplyMessage.java index 017ee6bb2..29c356469 100644 --- a/router/java/src/net/i2p/data/i2np/SourceRouteReplyMessage.java +++ b/router/java/src/net/i2p/data/i2np/SourceRouteReplyMessage.java @@ -19,6 +19,7 @@ import net.i2p.data.DataFormatException; import net.i2p.data.DataHelper; import net.i2p.data.PrivateKey; import net.i2p.util.Log; +import net.i2p.I2PAppContext; /** * Defines a message directed by a source route block to deliver a message to an @@ -35,14 +36,17 @@ public class SourceRouteReplyMessage extends I2NPMessageImpl { private long _decryptedMessageId; private Certificate _decryptedCertificate; private long _decryptedExpiration; + private I2NPMessageHandler _handler; - public SourceRouteReplyMessage() { - _encryptedHeader = null; - _message = null; - _decryptedInstructions = null; - _decryptedMessageId = -1; - _decryptedCertificate = null; - _decryptedExpiration = -1; + public SourceRouteReplyMessage(I2PAppContext context) { + super(context); + _handler = new I2NPMessageHandler(context); + _encryptedHeader = null; + _message = null; + _decryptedInstructions = null; + _decryptedMessageId = -1; + _decryptedCertificate = null; + _decryptedExpiration = -1; } /** @@ -77,54 +81,56 @@ public class SourceRouteReplyMessage extends I2NPMessageImpl { * @throws DataFormatException if the decryption fails or if the data is somehow malformed */ public void decryptHeader(PrivateKey key) throws DataFormatException { - if ( (_encryptedHeader == null) || (_encryptedHeader.length <= 0) ) - throw new DataFormatException("No header to decrypt"); - - byte decr[] = ElGamalAESEngine.decrypt(_encryptedHeader, key); - - if (decr == null) - throw new DataFormatException("Decrypted data is null"); - - try { - ByteArrayInputStream bais = new ByteArrayInputStream(decr); - - _decryptedInstructions = new DeliveryInstructions(); - _decryptedInstructions.readBytes(bais); - _decryptedMessageId = DataHelper.readLong(bais, 4); - _decryptedCertificate = new Certificate(); - _decryptedCertificate.readBytes(bais); - _decryptedExpiration = DataHelper.readDate(bais).getTime(); + if ( (_encryptedHeader == null) || (_encryptedHeader.length <= 0) ) + throw new DataFormatException("No header to decrypt"); - } catch (IOException ioe) { - throw new DataFormatException("Error reading the source route reply header", ioe); - } catch (DataFormatException dfe) { - throw new DataFormatException("Error reading the source route reply header", dfe); - } + byte decr[] = _context.elGamalAESEngine().decrypt(_encryptedHeader, key); + + if (decr == null) + throw new DataFormatException("Decrypted data is null"); + + try { + ByteArrayInputStream bais = new ByteArrayInputStream(decr); + + _decryptedInstructions = new DeliveryInstructions(); + _decryptedInstructions.readBytes(bais); + _decryptedMessageId = DataHelper.readLong(bais, 4); + _decryptedCertificate = new Certificate(); + _decryptedCertificate.readBytes(bais); + _decryptedExpiration = DataHelper.readDate(bais).getTime(); + + } catch (IOException ioe) { + throw new DataFormatException("Error reading the source route reply header", ioe); + } catch (DataFormatException dfe) { + throw new DataFormatException("Error reading the source route reply header", dfe); + } } public void readMessage(InputStream in, int type) throws I2NPMessageException, IOException { - if (type != MESSAGE_TYPE) throw new I2NPMessageException("Message type is incorrect for this message"); + if (type != MESSAGE_TYPE) + throw new I2NPMessageException("Message type is incorrect for this message"); try { - int headerSize = (int)DataHelper.readLong(in, 2); - _encryptedHeader = new byte[headerSize]; - int read = read(in, _encryptedHeader); - if (read != headerSize) - throw new DataFormatException("Not enough bytes to read the header (read = " + read + ", required = " + headerSize + ")"); - _message = new I2NPMessageHandler().readMessage(in); + int headerSize = (int)DataHelper.readLong(in, 2); + _encryptedHeader = new byte[headerSize]; + int read = read(in, _encryptedHeader); + if (read != headerSize) + throw new DataFormatException("Not enough bytes to read the header (read = " + read + + ", required = " + headerSize + ")"); + _message = _handler.readMessage(in); } catch (DataFormatException dfe) { throw new I2NPMessageException("Unable to load the message data", dfe); } } protected byte[] writeMessage() throws I2NPMessageException, IOException { - if ( (_encryptedHeader == null) || (_message == null) ) - throw new I2NPMessageException("Not enough data to write out"); - - ByteArrayOutputStream os = new ByteArrayOutputStream(1024); + if ( (_encryptedHeader == null) || (_message == null) ) + throw new I2NPMessageException("Not enough data to write out"); + + ByteArrayOutputStream os = new ByteArrayOutputStream(1024); try { - DataHelper.writeLong(os, 2, _encryptedHeader.length); - os.write(_encryptedHeader); - _message.writeBytes(os); + DataHelper.writeLong(os, 2, _encryptedHeader.length); + os.write(_encryptedHeader); + _message.writeBytes(os); } catch (DataFormatException dfe) { throw new I2NPMessageException("Error writing out the message data", dfe); } @@ -134,15 +140,15 @@ public class SourceRouteReplyMessage extends I2NPMessageImpl { public int getType() { return MESSAGE_TYPE; } public int hashCode() { - return DataHelper.hashCode(_encryptedHeader) + - DataHelper.hashCode(_message); + return DataHelper.hashCode(_encryptedHeader) + + DataHelper.hashCode(_message); } public boolean equals(Object object) { if ( (object != null) && (object instanceof SourceRouteReplyMessage) ) { SourceRouteReplyMessage msg = (SourceRouteReplyMessage)object; return DataHelper.eq(_message,msg._message) && - DataHelper.eq(_encryptedHeader,msg._encryptedHeader); + DataHelper.eq(_encryptedHeader,msg._encryptedHeader); } else { return false; } diff --git a/router/java/src/net/i2p/data/i2np/TunnelCreateMessage.java b/router/java/src/net/i2p/data/i2np/TunnelCreateMessage.java index b6f851646..857b0517f 100644 --- a/router/java/src/net/i2p/data/i2np/TunnelCreateMessage.java +++ b/router/java/src/net/i2p/data/i2np/TunnelCreateMessage.java @@ -1,9 +1,9 @@ package net.i2p.data.i2np; /* * free (adj.): unencumbered; not under the control of others - * Written by jrandom in 2003 and released into the public domain - * with no warranty of any kind, either expressed or implied. - * It probably won't make your computer catch on fire, or eat + * Written by jrandom in 2003 and released into the public domain + * with no warranty of any kind, either expressed or implied. + * It probably won't make your computer catch on fire, or eat * your children, but it might. Use at your own risk. * */ @@ -18,6 +18,7 @@ import net.i2p.data.DataHelper; import net.i2p.data.Hash; import net.i2p.data.TunnelId; import net.i2p.util.Log; +import net.i2p.I2PAppContext; /** * Defines the message sent to a router to request that it participate in a @@ -52,23 +53,24 @@ public class TunnelCreateMessage extends I2NPMessageImpl { private final static long FLAG_DUMMY = 1 << 7; private final static long FLAG_REORDER = 1 << 6; - public TunnelCreateMessage() { - setParticipantType(-1); - setNextRouter(null); - setTunnelId(null); - setTunnelDurationSeconds(-1); - setConfigurationKey(null); - setMaxPeakMessagesPerMin(-1); - setMaxAvgMessagesPerMin(-1); - setMaxPeakBytesPerMin(-1); - setMaxAvgBytesPerMin(-1); - setIncludeDummyTraffic(false); - setReorderMessages(false); - setVerificationPublicKey(null); - setVerificationPrivateKey(null); - setTunnelKey(null); - setCertificate(null); - setReplyBlock(null); + public TunnelCreateMessage(I2PAppContext context) { + super(context); + setParticipantType(-1); + setNextRouter(null); + setTunnelId(null); + setTunnelDurationSeconds(-1); + setConfigurationKey(null); + setMaxPeakMessagesPerMin(-1); + setMaxAvgMessagesPerMin(-1); + setMaxPeakBytesPerMin(-1); + setMaxAvgBytesPerMin(-1); + setIncludeDummyTraffic(false); + setReorderMessages(false); + setVerificationPublicKey(null); + setVerificationPrivateKey(null); + setTunnelKey(null); + setCertificate(null); + setReplyBlock(null); } public void setParticipantType(int type) { _participantType = type; } @@ -105,41 +107,41 @@ public class TunnelCreateMessage extends I2NPMessageImpl { public SourceRouteBlock getReplyBlock() { return _replyBlock; } public void readMessage(InputStream in, int type) throws I2NPMessageException, IOException { - if (type != MESSAGE_TYPE) throw new I2NPMessageException("Message type is incorrect for this message"); + if (type != MESSAGE_TYPE) throw new I2NPMessageException("Message type is incorrect for this message"); try { - _participantType = (int)DataHelper.readLong(in, 1); - if (_participantType != PARTICIPANT_TYPE_ENDPOINT) { - _nextRouter = new Hash(); - _nextRouter.readBytes(in); - } - _tunnelId = new TunnelId(); - _tunnelId.readBytes(in); - _tunnelDuration = DataHelper.readLong(in, 4); - _configKey = new TunnelConfigurationSessionKey(); - _configKey.readBytes(in); - _maxPeakMessagesPerMin = DataHelper.readLong(in, 4); - _maxAvgMessagesPerMin = DataHelper.readLong(in, 4); - _maxPeakBytesPerMin = DataHelper.readLong(in, 4); - _maxAvgBytesPerMin = DataHelper.readLong(in, 4); - - int flags = (int)DataHelper.readLong(in, 1); - _includeDummyTraffic = flagsIncludeDummy(flags); - _reorderMessages = flagsReorder(flags); - - _verificationPubKey = new TunnelSigningPublicKey(); - _verificationPubKey.readBytes(in); - if (_participantType == PARTICIPANT_TYPE_GATEWAY) { - _verificationPrivKey = new TunnelSigningPrivateKey(); - _verificationPrivKey.readBytes(in); - } - if ( (_participantType == PARTICIPANT_TYPE_ENDPOINT) || (_participantType == PARTICIPANT_TYPE_GATEWAY) ) { - _tunnelKey = new TunnelSessionKey(); - _tunnelKey.readBytes(in); - } - _certificate = new Certificate(); - _certificate.readBytes(in); - _replyBlock = new SourceRouteBlock(); - _replyBlock.readBytes(in); + _participantType = (int)DataHelper.readLong(in, 1); + if (_participantType != PARTICIPANT_TYPE_ENDPOINT) { + _nextRouter = new Hash(); + _nextRouter.readBytes(in); + } + _tunnelId = new TunnelId(); + _tunnelId.readBytes(in); + _tunnelDuration = DataHelper.readLong(in, 4); + _configKey = new TunnelConfigurationSessionKey(); + _configKey.readBytes(in); + _maxPeakMessagesPerMin = DataHelper.readLong(in, 4); + _maxAvgMessagesPerMin = DataHelper.readLong(in, 4); + _maxPeakBytesPerMin = DataHelper.readLong(in, 4); + _maxAvgBytesPerMin = DataHelper.readLong(in, 4); + + int flags = (int)DataHelper.readLong(in, 1); + _includeDummyTraffic = flagsIncludeDummy(flags); + _reorderMessages = flagsReorder(flags); + + _verificationPubKey = new TunnelSigningPublicKey(); + _verificationPubKey.readBytes(in); + if (_participantType == PARTICIPANT_TYPE_GATEWAY) { + _verificationPrivKey = new TunnelSigningPrivateKey(); + _verificationPrivKey.readBytes(in); + } + if ( (_participantType == PARTICIPANT_TYPE_ENDPOINT) || (_participantType == PARTICIPANT_TYPE_GATEWAY) ) { + _tunnelKey = new TunnelSessionKey(); + _tunnelKey.readBytes(in); + } + _certificate = new Certificate(); + _certificate.readBytes(in); + _replyBlock = new SourceRouteBlock(); + _replyBlock.readBytes(in); } catch (DataFormatException dfe) { throw new I2NPMessageException("Unable to load the message data", dfe); } @@ -148,146 +150,99 @@ public class TunnelCreateMessage extends I2NPMessageImpl { protected byte[] writeMessage() throws I2NPMessageException, IOException { ByteArrayOutputStream os = new ByteArrayOutputStream(32); try { - DataHelper.writeLong(os, 1, _participantType); - if (_participantType != PARTICIPANT_TYPE_ENDPOINT) { - _nextRouter.writeBytes(os); - } - _tunnelId.writeBytes(os); - DataHelper.writeLong(os, 4, _tunnelDuration); - _configKey.writeBytes(os); - - DataHelper.writeLong(os, 4, _maxPeakMessagesPerMin); - DataHelper.writeLong(os, 4, _maxAvgMessagesPerMin); - DataHelper.writeLong(os, 4, _maxPeakBytesPerMin); - DataHelper.writeLong(os, 4, _maxAvgBytesPerMin); - - long flags = getFlags(); - DataHelper.writeLong(os, 1, flags); - - _verificationPubKey.writeBytes(os); - if (_participantType == PARTICIPANT_TYPE_GATEWAY) { - _verificationPrivKey.writeBytes(os); - } - if ( (_participantType == PARTICIPANT_TYPE_ENDPOINT) || (_participantType == PARTICIPANT_TYPE_GATEWAY) ) { - _tunnelKey.writeBytes(os); - } - _certificate.writeBytes(os); - _replyBlock.writeBytes(os); + DataHelper.writeLong(os, 1, _participantType); + if (_participantType != PARTICIPANT_TYPE_ENDPOINT) { + _nextRouter.writeBytes(os); + } + _tunnelId.writeBytes(os); + DataHelper.writeLong(os, 4, _tunnelDuration); + _configKey.writeBytes(os); + + DataHelper.writeLong(os, 4, _maxPeakMessagesPerMin); + DataHelper.writeLong(os, 4, _maxAvgMessagesPerMin); + DataHelper.writeLong(os, 4, _maxPeakBytesPerMin); + DataHelper.writeLong(os, 4, _maxAvgBytesPerMin); + + long flags = getFlags(); + DataHelper.writeLong(os, 1, flags); + + _verificationPubKey.writeBytes(os); + if (_participantType == PARTICIPANT_TYPE_GATEWAY) { + _verificationPrivKey.writeBytes(os); + } + if ( (_participantType == PARTICIPANT_TYPE_ENDPOINT) || (_participantType == PARTICIPANT_TYPE_GATEWAY) ) { + _tunnelKey.writeBytes(os); + } + _certificate.writeBytes(os); + _replyBlock.writeBytes(os); } catch (Throwable t) { throw new I2NPMessageException("Error writing out the message data", t); } - /* - try { - DataHelper.writeLong(os, 1, _participantType); - if (_participantType != PARTICIPANT_TYPE_ENDPOINT) { - if (_nextRouter == null) - throw new I2NPMessageException("Next router is not defined"); - _nextRouter.writeBytes(os); - } - if (_tunnelId == null) - throw new I2NPMessageException("Tunnel ID is not defined"); - _tunnelId.writeBytes(os); - if (_tunnelDuration < 0) - throw new I2NPMessageException("Tunnel duration is negative"); - DataHelper.writeLong(os, 4, _tunnelDuration); - if (_configKey == null) - throw new I2NPMessageException("Configuration key is not defined"); - _configKey.writeBytes(os); - if ( (_maxPeakMessagesPerMin < 0) || (_maxAvgMessagesPerMin < 0) || - (_maxAvgMessagesPerMin < 0) || (_maxAvgBytesPerMin < 0) ) - throw new I2NPMessageException("Negative limits defined"); - - long flags = getFlags(); - DataHelper.writeLong(os, 1, flags); - - if (_verificationPubKey == null) - throw new I2NPMessageException("Verification public key is not defined"); - _verificationPubKey.writeBytes(os); - if (_participantType == PARTICIPANT_TYPE_GATEWAY) { - if (_verificationPrivKey == null) - throw new I2NPMessageException("Verification private key is needed and not defined"); - _verificationPrivKey.writeBytes(os); - } - if ( (_participantType == PARTICIPANT_TYPE_ENDPOINT) || (_participantType == PARTICIPANT_TYPE_GATEWAY) ) { - if (_tunnelKey == null) - throw new I2NPMessageException("Tunnel key is needed and not defined"); - _tunnelKey.writeBytes(os); - } - if (_certificate == null) - throw new I2NPMessageException("Certificate is not defined"); - _certificate.writeBytes(os); - if (_replyBlock == null) - throw new I2NPMessageException("Reply block not defined"); - _replyBlock.writeBytes(os); - } catch (DataFormatException dfe) { - throw new I2NPMessageException("Error writing out the message data", dfe); - } - */ return os.toByteArray(); } private boolean flagsIncludeDummy(long flags) { - return (0 != (flags & FLAG_DUMMY)); + return (0 != (flags & FLAG_DUMMY)); } private boolean flagsReorder(long flags) { - return (0 != (flags & FLAG_REORDER)); + return (0 != (flags & FLAG_REORDER)); } private long getFlags() { - long val = 0L; - if (getIncludeDummyTraffic()) - val = val | FLAG_DUMMY; - if (getReorderMessages()) - val = val | FLAG_REORDER; - return val; + long val = 0L; + if (getIncludeDummyTraffic()) + val = val | FLAG_DUMMY; + if (getReorderMessages()) + val = val | FLAG_REORDER; + return val; } public int getType() { return MESSAGE_TYPE; } public int hashCode() { - return (int)(DataHelper.hashCode(getCertificate()) + - DataHelper.hashCode(getConfigurationKey()) + - DataHelper.hashCode(getNextRouter()) + - DataHelper.hashCode(getReplyBlock()) + - DataHelper.hashCode(getTunnelId()) + - DataHelper.hashCode(getTunnelKey()) + - DataHelper.hashCode(getVerificationPrivateKey()) + - DataHelper.hashCode(getVerificationPublicKey()) + - (getIncludeDummyTraffic() ? 1 : 0) + - getMaxAvgBytesPerMin() + - getMaxAvgMessagesPerMin() + - getMaxPeakBytesPerMin() + - getMaxPeakMessagesPerMin() + - getParticipantType() + - (getReorderMessages() ? 1 : 0) + - getTunnelDurationSeconds()); + return (int)(DataHelper.hashCode(getCertificate()) + + DataHelper.hashCode(getConfigurationKey()) + + DataHelper.hashCode(getNextRouter()) + + DataHelper.hashCode(getReplyBlock()) + + DataHelper.hashCode(getTunnelId()) + + DataHelper.hashCode(getTunnelKey()) + + DataHelper.hashCode(getVerificationPrivateKey()) + + DataHelper.hashCode(getVerificationPublicKey()) + + (getIncludeDummyTraffic() ? 1 : 0) + + getMaxAvgBytesPerMin() + + getMaxAvgMessagesPerMin() + + getMaxPeakBytesPerMin() + + getMaxPeakMessagesPerMin() + + getParticipantType() + + (getReorderMessages() ? 1 : 0) + + getTunnelDurationSeconds()); } public boolean equals(Object object) { if ( (object != null) && (object instanceof TunnelCreateMessage) ) { TunnelCreateMessage msg = (TunnelCreateMessage)object; - return DataHelper.eq(getCertificate(), msg.getCertificate()) && - DataHelper.eq(getConfigurationKey(), msg.getConfigurationKey()) && - DataHelper.eq(getNextRouter(), msg.getNextRouter()) && - DataHelper.eq(getReplyBlock(), msg.getReplyBlock()) && - DataHelper.eq(getTunnelId(), msg.getTunnelId()) && - DataHelper.eq(getTunnelKey(), msg.getTunnelKey()) && - DataHelper.eq(getVerificationPrivateKey(), msg.getVerificationPrivateKey()) && - DataHelper.eq(getVerificationPublicKey(), msg.getVerificationPublicKey()) && - (getIncludeDummyTraffic() == msg.getIncludeDummyTraffic()) && - (getMaxAvgBytesPerMin() == msg.getMaxAvgBytesPerMin()) && - (getMaxAvgMessagesPerMin() == msg.getMaxAvgMessagesPerMin()) && - (getMaxPeakBytesPerMin() == msg.getMaxPeakBytesPerMin()) && - (getMaxPeakMessagesPerMin() == msg.getMaxPeakMessagesPerMin()) && - (getParticipantType() == msg.getParticipantType()) && - (getReorderMessages() == msg.getReorderMessages()) && - (getTunnelDurationSeconds() == msg.getTunnelDurationSeconds()); + return DataHelper.eq(getCertificate(), msg.getCertificate()) && + DataHelper.eq(getConfigurationKey(), msg.getConfigurationKey()) && + DataHelper.eq(getNextRouter(), msg.getNextRouter()) && + DataHelper.eq(getReplyBlock(), msg.getReplyBlock()) && + DataHelper.eq(getTunnelId(), msg.getTunnelId()) && + DataHelper.eq(getTunnelKey(), msg.getTunnelKey()) && + DataHelper.eq(getVerificationPrivateKey(), msg.getVerificationPrivateKey()) && + DataHelper.eq(getVerificationPublicKey(), msg.getVerificationPublicKey()) && + (getIncludeDummyTraffic() == msg.getIncludeDummyTraffic()) && + (getMaxAvgBytesPerMin() == msg.getMaxAvgBytesPerMin()) && + (getMaxAvgMessagesPerMin() == msg.getMaxAvgMessagesPerMin()) && + (getMaxPeakBytesPerMin() == msg.getMaxPeakBytesPerMin()) && + (getMaxPeakMessagesPerMin() == msg.getMaxPeakMessagesPerMin()) && + (getParticipantType() == msg.getParticipantType()) && + (getReorderMessages() == msg.getReorderMessages()) && + (getTunnelDurationSeconds() == msg.getTunnelDurationSeconds()); } else { return false; } } - public String toString() { + public String toString() { StringBuffer buf = new StringBuffer(); buf.append("[TunnelCreateMessage: "); buf.append("\n\tParticipant Type: ").append(getParticipantType()); diff --git a/router/java/src/net/i2p/data/i2np/TunnelCreateStatusMessage.java b/router/java/src/net/i2p/data/i2np/TunnelCreateStatusMessage.java index f18fc17b9..d8b276898 100644 --- a/router/java/src/net/i2p/data/i2np/TunnelCreateStatusMessage.java +++ b/router/java/src/net/i2p/data/i2np/TunnelCreateStatusMessage.java @@ -1,9 +1,9 @@ package net.i2p.data.i2np; /* * free (adj.): unencumbered; not under the control of others - * Written by jrandom in 2003 and released into the public domain - * with no warranty of any kind, either expressed or implied. - * It probably won't make your computer catch on fire, or eat + * Written by jrandom in 2003 and released into the public domain + * with no warranty of any kind, either expressed or implied. + * It probably won't make your computer catch on fire, or eat * your children, but it might. Use at your own risk. * */ @@ -17,9 +17,10 @@ import net.i2p.data.DataHelper; import net.i2p.data.Hash; import net.i2p.data.TunnelId; import net.i2p.util.Log; +import net.i2p.I2PAppContext; /** - * Defines the message a router sends to another router in reply to a + * Defines the message a router sends to another router in reply to a * TunnelCreateMessage * * @author jrandom @@ -37,10 +38,11 @@ public class TunnelCreateStatusMessage extends I2NPMessageImpl { public final static int STATUS_FAILED_CERTIFICATE = 3; public final static int STATUS_FAILED_DELETED = 100; - public TunnelCreateStatusMessage() { - setTunnelId(null); - setStatus(-1); - setFromHash(null); + public TunnelCreateStatusMessage(I2PAppContext context) { + super(context); + setTunnelId(null); + setStatus(-1); + setFromHash(null); } public TunnelId getTunnelId() { return _tunnelId; } @@ -56,26 +58,26 @@ public class TunnelCreateStatusMessage extends I2NPMessageImpl { public void setFromHash(Hash from) { _from = from; } public void readMessage(InputStream in, int type) throws I2NPMessageException, IOException { - if (type != MESSAGE_TYPE) throw new I2NPMessageException("Message type is incorrect for this message"); + if (type != MESSAGE_TYPE) throw new I2NPMessageException("Message type is incorrect for this message"); try { - _tunnelId = new TunnelId(); - _tunnelId.readBytes(in); - _status = (int)DataHelper.readLong(in, 1); - _from = new Hash(); - _from.readBytes(in); + _tunnelId = new TunnelId(); + _tunnelId.readBytes(in); + _status = (int)DataHelper.readLong(in, 1); + _from = new Hash(); + _from.readBytes(in); } catch (DataFormatException dfe) { throw new I2NPMessageException("Unable to load the message data", dfe); } } protected byte[] writeMessage() throws I2NPMessageException, IOException { - if ( (_tunnelId == null) || (_from == null) ) throw new I2NPMessageException("Not enough data to write out"); - + if ( (_tunnelId == null) || (_from == null) ) throw new I2NPMessageException("Not enough data to write out"); + ByteArrayOutputStream os = new ByteArrayOutputStream(32); try { - _tunnelId.writeBytes(os); - DataHelper.writeLong(os, 1, (_status < 0 ? 255 : _status)); - _from.writeBytes(os); + _tunnelId.writeBytes(os); + DataHelper.writeLong(os, 1, (_status < 0 ? 255 : _status)); + _from.writeBytes(os); } catch (DataFormatException dfe) { throw new I2NPMessageException("Error writing out the message data", dfe); } @@ -85,23 +87,23 @@ public class TunnelCreateStatusMessage extends I2NPMessageImpl { public int getType() { return MESSAGE_TYPE; } public int hashCode() { - return DataHelper.hashCode(getTunnelId()) + - getStatus() + - DataHelper.hashCode(getFromHash()); + return DataHelper.hashCode(getTunnelId()) + + getStatus() + + DataHelper.hashCode(getFromHash()); } public boolean equals(Object object) { if ( (object != null) && (object instanceof TunnelCreateStatusMessage) ) { TunnelCreateStatusMessage msg = (TunnelCreateStatusMessage)object; return DataHelper.eq(getTunnelId(),msg.getTunnelId()) && - DataHelper.eq(getFromHash(),msg.getFromHash()) && - (getStatus() == msg.getStatus()); + DataHelper.eq(getFromHash(),msg.getFromHash()) && + (getStatus() == msg.getStatus()); } else { return false; } } - public String toString() { + public String toString() { StringBuffer buf = new StringBuffer(); buf.append("[TunnelCreateStatusMessage: "); buf.append("\n\tTunnel ID: ").append(getTunnelId()); diff --git a/router/java/src/net/i2p/data/i2np/TunnelMessage.java b/router/java/src/net/i2p/data/i2np/TunnelMessage.java index 26aeb38dc..a723b2074 100644 --- a/router/java/src/net/i2p/data/i2np/TunnelMessage.java +++ b/router/java/src/net/i2p/data/i2np/TunnelMessage.java @@ -1,9 +1,9 @@ package net.i2p.data.i2np; /* * free (adj.): unencumbered; not under the control of others - * Written by jrandom in 2003 and released into the public domain - * with no warranty of any kind, either expressed or implied. - * It probably won't make your computer catch on fire, or eat + * Written by jrandom in 2003 and released into the public domain + * with no warranty of any kind, either expressed or implied. + * It probably won't make your computer catch on fire, or eat * your children, but it might. Use at your own risk. * */ @@ -16,6 +16,7 @@ import net.i2p.data.DataFormatException; import net.i2p.data.DataHelper; import net.i2p.data.TunnelId; import net.i2p.util.Log; +import net.i2p.I2PAppContext; /** * Defines the message sent between routers for tunnel delivery @@ -34,11 +35,12 @@ public class TunnelMessage extends I2NPMessageImpl { private final static int FLAG_INCLUDESTRUCTURE = 0; private final static int FLAG_DONT_INCLUDESTRUCTURE = 1; - public TunnelMessage() { - setTunnelId(null); - setData(null); - setVerificationStructure(null); - setEncryptedDeliveryInstructions(null); + public TunnelMessage(I2PAppContext context) { + super(context); + setTunnelId(null); + setData(null); + setVerificationStructure(null); + setEncryptedDeliveryInstructions(null); } public TunnelId getTunnelId() { return _tunnelId; } @@ -54,85 +56,85 @@ public class TunnelMessage extends I2NPMessageImpl { public void setEncryptedDeliveryInstructions(byte instructions[]) { _encryptedInstructions = instructions; } public void readMessage(InputStream in, int type) throws I2NPMessageException, IOException { - if (type != MESSAGE_TYPE) throw new I2NPMessageException("Message type is incorrect for this message"); + if (type != MESSAGE_TYPE) throw new I2NPMessageException("Message type is incorrect for this message"); try { - _tunnelId = new TunnelId(); - _tunnelId.readBytes(in); - _log.debug("Read tunnel message for tunnel " + _tunnelId); - _size = DataHelper.readLong(in, 4); - _log.debug("Read tunnel message size: " + _size); - if (_size < 0) throw new I2NPMessageException("Invalid size in the structure: " + _size); - _data = new byte[(int)_size]; - int read = read(in, _data); - if (read != _size) - throw new I2NPMessageException("Incorrect number of bytes read (" + read + ", expected " + _size); - int includeVerification = (int)DataHelper.readLong(in, 1); - if (includeVerification == FLAG_INCLUDESTRUCTURE) { - _verification = new TunnelVerificationStructure(); - _verification.readBytes(in); - int len = (int)DataHelper.readLong(in, 2); - _encryptedInstructions = new byte[len]; - read = read(in, _encryptedInstructions); - if (read != len) - throw new I2NPMessageException("Incorrect number of bytes read for instructions (" + read + ", expected " + len + ")"); - } + _tunnelId = new TunnelId(); + _tunnelId.readBytes(in); + _log.debug("Read tunnel message for tunnel " + _tunnelId); + _size = DataHelper.readLong(in, 4); + _log.debug("Read tunnel message size: " + _size); + if (_size < 0) throw new I2NPMessageException("Invalid size in the structure: " + _size); + _data = new byte[(int)_size]; + int read = read(in, _data); + if (read != _size) + throw new I2NPMessageException("Incorrect number of bytes read (" + read + ", expected " + _size); + int includeVerification = (int)DataHelper.readLong(in, 1); + if (includeVerification == FLAG_INCLUDESTRUCTURE) { + _verification = new TunnelVerificationStructure(); + _verification.readBytes(in); + int len = (int)DataHelper.readLong(in, 2); + _encryptedInstructions = new byte[len]; + read = read(in, _encryptedInstructions); + if (read != len) + throw new I2NPMessageException("Incorrect number of bytes read for instructions (" + read + ", expected " + len + ")"); + } } catch (DataFormatException dfe) { throw new I2NPMessageException("Unable to load the message data", dfe); } } protected byte[] writeMessage() throws I2NPMessageException, IOException { - if ( (_tunnelId == null) || (_data == null) || (_data.length <= 0) ) - throw new I2NPMessageException("Not enough data to write out"); - + if ( (_tunnelId == null) || (_data == null) || (_data.length <= 0) ) + throw new I2NPMessageException("Not enough data to write out"); + ByteArrayOutputStream os = new ByteArrayOutputStream(32); try { - _tunnelId.writeBytes(os); - _log.debug("Writing tunnel message for tunnel " + _tunnelId); - DataHelper.writeLong(os, 4, _data.length); - _log.debug("Writing tunnel message length: " + _data.length); - os.write(_data); - _log.debug("Writing tunnel message data"); - if ( (_verification == null) || (_encryptedInstructions == null) ) { - DataHelper.writeLong(os, 1, FLAG_DONT_INCLUDESTRUCTURE); - _log.debug("Writing DontIncludeStructure flag"); - } else { - DataHelper.writeLong(os, 1, FLAG_INCLUDESTRUCTURE); - _log.debug("Writing IncludeStructure flag, then the verification structure, then the E(instr).length [" + _encryptedInstructions.length + "], then the E(instr)"); - _verification.writeBytes(os); - DataHelper.writeLong(os, 2, _encryptedInstructions.length); - os.write(_encryptedInstructions); - } + _tunnelId.writeBytes(os); + _log.debug("Writing tunnel message for tunnel " + _tunnelId); + DataHelper.writeLong(os, 4, _data.length); + _log.debug("Writing tunnel message length: " + _data.length); + os.write(_data); + _log.debug("Writing tunnel message data"); + if ( (_verification == null) || (_encryptedInstructions == null) ) { + DataHelper.writeLong(os, 1, FLAG_DONT_INCLUDESTRUCTURE); + _log.debug("Writing DontIncludeStructure flag"); + } else { + DataHelper.writeLong(os, 1, FLAG_INCLUDESTRUCTURE); + _log.debug("Writing IncludeStructure flag, then the verification structure, then the E(instr).length [" + _encryptedInstructions.length + "], then the E(instr)"); + _verification.writeBytes(os); + DataHelper.writeLong(os, 2, _encryptedInstructions.length); + os.write(_encryptedInstructions); + } } catch (DataFormatException dfe) { throw new I2NPMessageException("Error writing out the message data", dfe); } - byte rv[] = os.toByteArray(); - _log.debug("Overall data being written: " + rv.length); + byte rv[] = os.toByteArray(); + _log.debug("Overall data being written: " + rv.length); return rv; } public int getType() { return MESSAGE_TYPE; } public int hashCode() { - return DataHelper.hashCode(getTunnelId()) + - DataHelper.hashCode(_data) + - DataHelper.hashCode(getVerificationStructure()) + - DataHelper.hashCode(getEncryptedDeliveryInstructions()); + return DataHelper.hashCode(getTunnelId()) + + DataHelper.hashCode(_data) + + DataHelper.hashCode(getVerificationStructure()) + + DataHelper.hashCode(getEncryptedDeliveryInstructions()); } public boolean equals(Object object) { if ( (object != null) && (object instanceof TunnelMessage) ) { TunnelMessage msg = (TunnelMessage)object; return DataHelper.eq(getTunnelId(),msg.getTunnelId()) && - DataHelper.eq(getVerificationStructure(),msg.getVerificationStructure()) && - DataHelper.eq(getData(),msg.getData()) && - DataHelper.eq(getEncryptedDeliveryInstructions(), msg.getEncryptedDeliveryInstructions()); + DataHelper.eq(getVerificationStructure(),msg.getVerificationStructure()) && + DataHelper.eq(getData(),msg.getData()) && + DataHelper.eq(getEncryptedDeliveryInstructions(), msg.getEncryptedDeliveryInstructions()); } else { return false; } } - public String toString() { + public String toString() { StringBuffer buf = new StringBuffer(); buf.append("[TunnelMessage: "); buf.append("\n\tTunnel ID: ").append(getTunnelId()); diff --git a/router/java/src/net/i2p/data/i2np/TunnelVerificationStructure.java b/router/java/src/net/i2p/data/i2np/TunnelVerificationStructure.java index 8309e62b5..3fbcac128 100644 --- a/router/java/src/net/i2p/data/i2np/TunnelVerificationStructure.java +++ b/router/java/src/net/i2p/data/i2np/TunnelVerificationStructure.java @@ -1,9 +1,9 @@ package net.i2p.data.i2np; /* * free (adj.): unencumbered; not under the control of others - * Written by jrandom in 2003 and released into the public domain - * with no warranty of any kind, either expressed or implied. - * It probably won't make your computer catch on fire, or eat + * Written by jrandom in 2003 and released into the public domain + * with no warranty of any kind, either expressed or implied. + * It probably won't make your computer catch on fire, or eat * your children, but it might. Use at your own risk. * */ @@ -21,19 +21,19 @@ import net.i2p.data.Signature; import net.i2p.data.SigningPrivateKey; import net.i2p.data.SigningPublicKey; import net.i2p.util.Log; +import net.i2p.router.RouterContext; /** * * @author jrandom */ public class TunnelVerificationStructure extends DataStructureImpl { - private final static Log _log = new Log(TunnelVerificationStructure.class); private Hash _msgHash; private Signature _authSignature; - public TunnelVerificationStructure() { - setMessageHash(null); - setAuthorizationSignature(null); + public TunnelVerificationStructure() { + setMessageHash(null); + setAuthorizationSignature(null); } public Hash getMessageHash() { return _msgHash; } @@ -42,45 +42,45 @@ public class TunnelVerificationStructure extends DataStructureImpl { public Signature getAuthorizationSignature() { return _authSignature; } public void setAuthorizationSignature(Signature sig) { _authSignature = sig; } - public void sign(SigningPrivateKey key) { - if (_msgHash != null) { - Signature sig = DSAEngine.getInstance().sign(_msgHash.getData(), key); - setAuthorizationSignature(sig); - } + public void sign(RouterContext context, SigningPrivateKey key) { + if (_msgHash != null) { + Signature sig = context.dsa().sign(_msgHash.getData(), key); + setAuthorizationSignature(sig); + } } - public boolean verifySignature(SigningPublicKey key) { - if (_msgHash == null) return false; - return DSAEngine.getInstance().verifySignature(_authSignature, _msgHash.getData(), key); + public boolean verifySignature(RouterContext context, SigningPublicKey key) { + if (_msgHash == null) return false; + return context.dsa().verifySignature(_authSignature, _msgHash.getData(), key); } public void readBytes(InputStream in) throws DataFormatException, IOException { - _msgHash = new Hash(); - _msgHash.readBytes(in); - _authSignature = new Signature(); - _authSignature.readBytes(in); + _msgHash = new Hash(); + _msgHash.readBytes(in); + _authSignature = new Signature(); + _authSignature.readBytes(in); } public void writeBytes(OutputStream out) throws DataFormatException, IOException { - if (_authSignature == null) { - _authSignature = new Signature(); - _authSignature.setData(Signature.FAKE_SIGNATURE); - } + if (_authSignature == null) { + _authSignature = new Signature(); + _authSignature.setData(Signature.FAKE_SIGNATURE); + } if ( (_msgHash == null) || (_authSignature == null) ) throw new DataFormatException("Invalid data"); - _msgHash.writeBytes(out); - _authSignature.writeBytes(out); + _msgHash.writeBytes(out); + _authSignature.writeBytes(out); } public boolean equals(Object obj) { if ( (obj == null) || !(obj instanceof TunnelVerificationStructure)) return false; - TunnelVerificationStructure str = (TunnelVerificationStructure)obj; - return DataHelper.eq(getMessageHash(), str.getMessageHash()) && - DataHelper.eq(getAuthorizationSignature(), str.getAuthorizationSignature()); + TunnelVerificationStructure str = (TunnelVerificationStructure)obj; + return DataHelper.eq(getMessageHash(), str.getMessageHash()) && + DataHelper.eq(getAuthorizationSignature(), str.getAuthorizationSignature()); } public int hashCode() { - if ( (_msgHash == null) || (_authSignature == null) ) return 0; - return getMessageHash().hashCode() + getAuthorizationSignature().hashCode(); + if ( (_msgHash == null) || (_authSignature == null) ) return 0; + return getMessageHash().hashCode() + getAuthorizationSignature().hashCode(); } public String toString() { diff --git a/router/java/src/net/i2p/router/ClientManagerFacade.java b/router/java/src/net/i2p/router/ClientManagerFacade.java index 73d39024b..e44fd97cb 100644 --- a/router/java/src/net/i2p/router/ClientManagerFacade.java +++ b/router/java/src/net/i2p/router/ClientManagerFacade.java @@ -21,8 +21,6 @@ import net.i2p.router.client.ClientManagerFacadeImpl; * @author jrandom */ public abstract class ClientManagerFacade implements Service { - private static ClientManagerFacade _instance = new ClientManagerFacadeImpl(); - public static ClientManagerFacade getInstance() { return _instance; } /** * Request that a particular client authorize the Leases contained in the @@ -74,16 +72,19 @@ public abstract class ClientManagerFacade implements Service { } class DummyClientManagerFacade extends ClientManagerFacade { + private RouterContext _context; + public DummyClientManagerFacade(RouterContext ctx) { + _context = ctx; + } public boolean isLocal(Hash destHash) { return true; } public boolean isLocal(Destination dest) { return true; } public void reportAbuse(Destination dest, String reason, int severity) { } public void messageReceived(ClientMessage msg) {} - public void requestLeaseSet(Destination dest, LeaseSet set, long timeout, Job onCreateJob, Job onFailedJob) { - JobQueue.getInstance().addJob(onFailedJob); + public void requestLeaseSet(Destination dest, LeaseSet set, long timeout, + Job onCreateJob, Job onFailedJob) { + _context.jobQueue().addJob(onFailedJob); } - public void startup() { - //JobQueue.getInstance().addJob(new PollOutboundClientMessagesJob()); - } + public void startup() {} public void stopAcceptingClients() { } public void shutdown() {} diff --git a/router/java/src/net/i2p/router/ClientMessagePool.java b/router/java/src/net/i2p/router/ClientMessagePool.java index 05fd9d3c3..8abe694b3 100644 --- a/router/java/src/net/i2p/router/ClientMessagePool.java +++ b/router/java/src/net/i2p/router/ClientMessagePool.java @@ -25,15 +25,12 @@ import net.i2p.util.Log; * */ public class ClientMessagePool { - private final static Log _log = new Log(ClientMessagePool.class); - private static ClientMessagePool _instance = new ClientMessagePool(); - public static final ClientMessagePool getInstance() { return _instance; } - private List _inMessages; - private List _outMessages; + private Log _log; + private RouterContext _context; - private ClientMessagePool() { - _inMessages = new ArrayList(); - _outMessages = new ArrayList(); + public ClientMessagePool(RouterContext context) { + _context = context; + _log = _context.logManager().getLog(ClientMessagePool.class); } /** @@ -42,84 +39,13 @@ public class ClientMessagePool { * */ public void add(ClientMessage msg) { - if ( (ClientManagerFacade.getInstance().isLocal(msg.getDestination())) || - (ClientManagerFacade.getInstance().isLocal(msg.getDestinationHash())) ) { - _log.debug("Adding message for local delivery"); - ClientManagerFacade.getInstance().messageReceived(msg); - //synchronized (_inMessages) { - // _inMessages.add(msg); - //} - } else { - _log.debug("Adding message for remote delivery"); - //JobQueue.getInstance().addJob(new ProcessOutboundClientMessageJob(msg)); - JobQueue.getInstance().addJob(new OutboundClientMessageJob(msg)); - //synchronized (_outMessages) { - // _outMessages.add(msg); - //} - } - } - - /** - * Retrieve the next locally destined message, or null if none are available. - * - */ - public ClientMessage getNextLocal() { - synchronized (_inMessages) { - if (_inMessages.size() <= 0) return null; - return (ClientMessage)_inMessages.remove(0); - } - } - - /** - * Retrieve the next remotely destined message, or null if none are available. - * - */ - public ClientMessage getNextRemote() { - synchronized (_outMessages) { - if (_outMessages.size() <= 0) return null; - return (ClientMessage)_outMessages.remove(0); - } - } - - /** - * Determine how many locally bound messages are in the pool - * - */ - public int getLocalCount() { - synchronized (_inMessages) { - return _inMessages.size(); - } - } - - /** - * Determine how many remotely bound messages are in the pool. - * - */ - public int getRemoteCount() { - synchronized (_outMessages) { - return _outMessages.size(); - } - } - - public void dumpPoolInfo() { - StringBuffer buf = new StringBuffer(); - buf.append("\nDumping Client Message Pool. Local messages: ").append(getLocalCount()).append(" Remote messages: ").append(getRemoteCount()).append("\n"); - buf.append("Inbound messages\n"); - buf.append("----------------------------\n"); - synchronized (_inMessages) { - for (Iterator iter = _inMessages.iterator(); iter.hasNext();) { - ClientMessage msg = (ClientMessage)iter.next(); - buf.append(msg).append("\n\n"); - } - } - buf.append("Outbound messages\n"); - buf.append("----------------------------\n"); - synchronized (_outMessages) { - for (Iterator iter = _outMessages.iterator(); iter.hasNext();) { - ClientMessage msg = (ClientMessage)iter.next(); - buf.append(msg).append("\n\n"); - } - } - _log.debug(buf.toString()); + if ( (_context.clientManager().isLocal(msg.getDestination())) || + (_context.clientManager().isLocal(msg.getDestinationHash())) ) { + _log.debug("Adding message for local delivery"); + _context.clientManager().messageReceived(msg); + } else { + _log.debug("Adding message for remote delivery"); + _context.jobQueue().addJob(new OutboundClientMessageJob(_context, msg)); + } } } diff --git a/router/java/src/net/i2p/router/CommSystemFacade.java b/router/java/src/net/i2p/router/CommSystemFacade.java index 2f2eec573..d3342138f 100644 --- a/router/java/src/net/i2p/router/CommSystemFacade.java +++ b/router/java/src/net/i2p/router/CommSystemFacade.java @@ -11,20 +11,12 @@ package net.i2p.router; import java.util.HashSet; import java.util.Set; -import net.i2p.router.transport.CommSystemFacadeImpl; - /** * Manages the communication subsystem between peers, including connections, * listeners, transports, connection keys, etc. * */ public abstract class CommSystemFacade implements Service { - private static CommSystemFacade _instance = new CommSystemFacadeImpl(); - public static CommSystemFacade getInstance() { return _instance; } - - // getAddresses - // rotateAddress(address) - public abstract void processMessage(OutNetMessage msg); public String renderStatusHTML() { return ""; } diff --git a/router/java/src/net/i2p/router/GenerateStatusConsoleJob.java b/router/java/src/net/i2p/router/GenerateStatusConsoleJob.java deleted file mode 100644 index 8faa129c3..000000000 --- a/router/java/src/net/i2p/router/GenerateStatusConsoleJob.java +++ /dev/null @@ -1,62 +0,0 @@ -package net.i2p.router; -/* - * free (adj.): unencumbered; not under the control of others - * Written by jrandom in 2003 and released into the public domain - * with no warranty of any kind, either expressed or implied. - * It probably won't make your computer catch on fire, or eat - * your children, but it might. Use at your own risk. - * - */ - -import java.io.FileOutputStream; -import java.io.IOException; - -import net.i2p.util.Log; - -public class GenerateStatusConsoleJob extends JobImpl { - private final static Log _log = new Log(GenerateStatusConsoleJob.class); - - private final static long REGENERATE_DELAY_MS = 60*1000; // once per minute update the console - public final static String CONFIG_CONSOLE_LOCATION = "routerConsoleFile"; - public final static String DEFAULT_CONSOLE_LOCATION = "routerConsole.html"; - - public final static String PARAM_GENERATE_CONFIG_CONSOLE = "router.generateConsole"; - public final static boolean DEFAULT_GENERATE_CONFIG_CONSOLE = true; - - private boolean shouldGenerateConsole() { - String str = Router.getInstance().getConfigSetting(PARAM_GENERATE_CONFIG_CONSOLE); - if ( (str == null) || (str.trim().length() <= 0) ) - return DEFAULT_GENERATE_CONFIG_CONSOLE; - if (Boolean.TRUE.toString().equalsIgnoreCase(str)) - return true; - else - return false; - } - - public String getName() { return "Generate Status Console"; } - public void runJob() { - if (shouldGenerateConsole()) { - String consoleHTML = Router.getInstance().renderStatusHTML(); - writeConsole(consoleHTML); - } - requeue(REGENERATE_DELAY_MS); - } - - private void writeConsole(String html) { - String loc = Router.getInstance().getConfigSetting(CONFIG_CONSOLE_LOCATION); - if (loc == null) - loc = DEFAULT_CONSOLE_LOCATION; - - FileOutputStream fos = null; - try { - fos = new FileOutputStream(loc); - fos.write(html.getBytes()); - fos.flush(); - } catch (IOException ioe) { - _log.error("Error writing out the console", ioe); - } finally { - if (fos != null) try { fos.close(); } catch (IOException ioe) {} - } - } - -} diff --git a/router/java/src/net/i2p/router/InNetMessagePool.java b/router/java/src/net/i2p/router/InNetMessagePool.java index 30777cf9b..1889b0ae7 100644 --- a/router/java/src/net/i2p/router/InNetMessagePool.java +++ b/router/java/src/net/i2p/router/InNetMessagePool.java @@ -26,17 +26,18 @@ import net.i2p.util.Log; * */ public class InNetMessagePool { - private final static Log _log = new Log(InNetMessagePool.class); - private static InNetMessagePool _instance = new InNetMessagePool(); - public final static InNetMessagePool getInstance() { return _instance; } + private Log _log; + private RouterContext _context; private List _messages; private Map _handlerJobBuilders; - private InNetMessagePool() { + public InNetMessagePool(RouterContext context) { + _context = context; _messages = new ArrayList(); _handlerJobBuilders = new HashMap(); - StatManager.getInstance().createRateStat("inNetPool.dropped", "How often do we drop a message", "InNetPool", new long[] { 60*1000l, 60*60*1000l, 24*60*60*1000l }); - StatManager.getInstance().createRateStat("inNetPool.duplicate", "How often do we receive a duplicate message", "InNetPool", new long[] { 60*1000l, 60*60*1000l, 24*60*60*1000l }); + _log = _context.logManager().getLog(InNetMessagePool.class); + _context.statManager().createRateStat("inNetPool.dropped", "How often do we drop a message", "InNetPool", new long[] { 60*1000l, 60*60*1000l, 24*60*60*1000l }); + _context.statManager().createRateStat("inNetPool.duplicate", "How often do we receive a duplicate message", "InNetPool", new long[] { 60*1000l, 60*60*1000l, 24*60*60*1000l }); } public HandlerJobBuilder registerHandlerJobBuilder(int i2npMessageType, HandlerJobBuilder builder) { @@ -57,15 +58,15 @@ public class InNetMessagePool { */ public int add(InNetMessage msg) { Date exp = msg.getMessage().getMessageExpiration(); - boolean valid = MessageValidator.getInstance().validateMessage(msg.getMessage().getUniqueId(), exp.getTime()); + boolean valid = _context.messageValidator().validateMessage(msg.getMessage().getUniqueId(), exp.getTime()); if (!valid) { if (_log.shouldLog(Log.WARN)) _log.warn("Duplicate message received [" + msg.getMessage().getUniqueId() + " expiring on " + exp + "]: " + msg.getMessage().getClass().getName()); - StatManager.getInstance().addRateData("inNetPool.dropped", 1, 0); - StatManager.getInstance().addRateData("inNetPool.duplicate", 1, 0); - MessageHistory.getInstance().droppedOtherMessage(msg.getMessage()); - MessageHistory.getInstance().messageProcessingError(msg.getMessage().getUniqueId(), + _context.statManager().addRateData("inNetPool.dropped", 1, 0); + _context.statManager().addRateData("inNetPool.duplicate", 1, 0); + _context.messageHistory().droppedOtherMessage(msg.getMessage()); + _context.messageHistory().messageProcessingError(msg.getMessage().getUniqueId(), msg.getMessage().getClass().getName(), "Duplicate/expired"); return -1; @@ -87,14 +88,14 @@ public class InNetMessagePool { Job job = builder.createJob(msg.getMessage(), msg.getFromRouter(), msg.getFromRouterHash(), msg.getReplyBlock()); if (job != null) { - JobQueue.getInstance().addJob(job); + _context.jobQueue().addJob(job); synchronized (_messages) { size = _messages.size(); } } } - List origMessages = OutboundMessageRegistry.getInstance().getOriginalMessages(msg.getMessage()); + List origMessages = _context.messageRegistry().getOriginalMessages(msg.getMessage()); if (_log.shouldLog(Log.DEBUG)) _log.debug("Original messages for inbound message: " + origMessages.size()); if (origMessages.size() > 1) { @@ -112,7 +113,7 @@ public class InNetMessagePool { if (job != null) { job.setMessage(msg.getMessage()); - JobQueue.getInstance().addJob(job); + _context.jobQueue().addJob(job); } } @@ -120,24 +121,24 @@ public class InNetMessagePool { // not handled as a reply if (size == -1) { // was not handled via HandlerJobBuilder - MessageHistory.getInstance().droppedOtherMessage(msg.getMessage()); + _context.messageHistory().droppedOtherMessage(msg.getMessage()); if (_log.shouldLog(Log.ERROR)) _log.error("Message " + msg.getMessage() + " was not handled by a HandlerJobBuilder - DROPPING: " + msg, new Exception("DROPPED MESSAGE")); - StatManager.getInstance().addRateData("inNetPool.dropped", 1, 0); + _context.statManager().addRateData("inNetPool.dropped", 1, 0); } else { String mtype = msg.getMessage().getClass().getName(); - MessageHistory.getInstance().receiveMessage(mtype, msg.getMessage().getUniqueId(), - msg.getMessage().getMessageExpiration(), - msg.getFromRouterHash(), true); + _context.messageHistory().receiveMessage(mtype, msg.getMessage().getUniqueId(), + msg.getMessage().getMessageExpiration(), + msg.getFromRouterHash(), true); return size; } } String mtype = msg.getMessage().getClass().getName(); - MessageHistory.getInstance().receiveMessage(mtype, msg.getMessage().getUniqueId(), - msg.getMessage().getMessageExpiration(), - msg.getFromRouterHash(), true); + _context.messageHistory().receiveMessage(mtype, msg.getMessage().getUniqueId(), + msg.getMessage().getMessageExpiration(), + msg.getFromRouterHash(), true); return size; } @@ -174,19 +175,4 @@ public class InNetMessagePool { return _messages.size(); } } - - public void dumpPoolInfo() { - if (!_log.shouldLog(Log.DEBUG)) return; - - StringBuffer buf = new StringBuffer(); - buf.append("\nDumping Inbound Network Message Pool. Total # message: ").append(getCount()).append("\n"); - synchronized (_messages) { - for (Iterator iter = _messages.iterator(); iter.hasNext();) { - InNetMessage msg = (InNetMessage)iter.next(); - buf.append("Message ").append(msg.getMessage()).append("\n\n"); - } - } - _log.debug(buf.toString()); - } - } diff --git a/router/java/src/net/i2p/router/JobImpl.java b/router/java/src/net/i2p/router/JobImpl.java index b4b3bf29a..6cc9d5f27 100644 --- a/router/java/src/net/i2p/router/JobImpl.java +++ b/router/java/src/net/i2p/router/JobImpl.java @@ -13,40 +13,42 @@ import net.i2p.util.Clock; * Base implementation of a Job */ public abstract class JobImpl implements Job { + protected RouterContext _context; private JobTiming _timing; private static int _idSrc = 0; private int _id; private Exception _addedBy; private long _madeReadyOn; - public JobImpl() { - _timing = new JobTiming(); - _id = ++_idSrc; - _addedBy = null; - _madeReadyOn = 0; + public JobImpl(RouterContext context) { + _context = context; + _timing = new JobTiming(context); + _id = ++_idSrc; + _addedBy = null; + _madeReadyOn = 0; } public int getJobId() { return _id; } public JobTiming getTiming() { return _timing; } public String toString() { - StringBuffer buf = new StringBuffer(128); - buf.append(super.toString()); - buf.append(": Job ").append(_id).append(": ").append(getName()); - return buf.toString(); + StringBuffer buf = new StringBuffer(128); + buf.append(super.toString()); + buf.append(": Job ").append(_id).append(": ").append(getName()); + return buf.toString(); } void addedToQueue() { - _addedBy = new Exception(); + _addedBy = new Exception(); } public Exception getAddedBy() { return _addedBy; } public long getMadeReadyOn() { return _madeReadyOn; } - public void madeReady() { _madeReadyOn = Clock.getInstance().now(); } + public void madeReady() { _madeReadyOn = _context.clock().now(); } public void dropped() {} protected void requeue(long delayMs) { - getTiming().setStartAfter(Clock.getInstance().now() + delayMs); - JobQueue.getInstance().addJob(this); + getTiming().setStartAfter(_context.clock().now() + delayMs); + _context.jobQueue().addJob(this); } } diff --git a/router/java/src/net/i2p/router/JobQueue.java b/router/java/src/net/i2p/router/JobQueue.java index 23e5efdba..3a22d0ac8 100644 --- a/router/java/src/net/i2p/router/JobQueue.java +++ b/router/java/src/net/i2p/router/JobQueue.java @@ -11,7 +11,7 @@ package net.i2p.router; import java.util.Date; import java.util.HashMap; import java.util.Iterator; -import java.util.LinkedList; +import java.util.ArrayList; import java.util.TreeMap; import net.i2p.router.message.HandleSourceRouteReplyMessageJob; @@ -29,22 +29,21 @@ import net.i2p.util.Log; * */ public class JobQueue { - private final static Log _log = new Log(JobQueue.class); - private static JobQueue _instance = new JobQueue(); - public static JobQueue getInstance() { return _instance; } + private Log _log; + private RouterContext _context; /** Integer (runnerId) to JobQueueRunner for created runners */ - private static HashMap _queueRunners; + private HashMap _queueRunners; /** a counter to identify a job runner */ private volatile static int _runnerId = 0; /** list of jobs that are ready to run ASAP */ - private LinkedList _readyJobs; + private ArrayList _readyJobs; /** list of jobs that are scheduled for running in the future */ - private LinkedList _timedJobs; + private ArrayList _timedJobs; /** when true, don't run any new jobs or update any limits, etc */ private boolean _paused; /** job name to JobStat for that job */ - private static TreeMap _jobStats; + private TreeMap _jobStats; /** how many job queue runners can go concurrently */ private int _maxRunners; private QueuePumper _pumper; @@ -92,8 +91,6 @@ public class JobQueue { private final static String PROP_MAX_WAITING_JOBS = "router.maxWaitingJobs"; static { - StatManager.getInstance().createRateStat("jobQueue.readyJobs", "How many ready and waiting jobs there are?", "JobQueue", new long[] { 60*1000l, 60*60*1000l, 24*60*60*1000l }); - StatManager.getInstance().createRateStat("jobQueue.droppedJobs", "How many jobs do we drop due to insane overload?", "JobQueue", new long[] { 60*1000l, 60*60*1000l, 24*60*60*1000l }); } /** @@ -102,20 +99,31 @@ public class JobQueue { */ private Object _runnerLock = new Object(); - private JobQueue() { - _alive = true; - _readyJobs = new LinkedList(); - _timedJobs = new LinkedList(); - _queueRunners = new HashMap(); - _paused = false; - _jobStats = new TreeMap(); - _allowParallelOperation = false; - _pumper = new QueuePumper(); - I2PThread pumperThread = new I2PThread(_pumper); - pumperThread.setDaemon(true); - pumperThread.setName("QueuePumper"); - pumperThread.setPriority(I2PThread.MIN_PRIORITY); - pumperThread.start(); + public JobQueue(RouterContext context) { + _context = context; + _log = context.logManager().getLog(JobQueue.class); + _context.statManager().createRateStat("jobQueue.readyJobs", + "How many ready and waiting jobs there are?", + "JobQueue", + new long[] { 60*1000l, 60*60*1000l, 24*60*60*1000l }); + _context.statManager().createRateStat("jobQueue.droppedJobs", + "How many jobs do we drop due to insane overload?", + "JobQueue", + new long[] { 60*1000l, 60*60*1000l, 24*60*60*1000l }); + + _alive = true; + _readyJobs = new ArrayList(); + _timedJobs = new ArrayList(); + _queueRunners = new HashMap(); + _paused = false; + _jobStats = new TreeMap(); + _allowParallelOperation = false; + _pumper = new QueuePumper(); + I2PThread pumperThread = new I2PThread(_pumper); + pumperThread.setDaemon(true); + pumperThread.setName("QueuePumper"); + pumperThread.setPriority(I2PThread.MIN_PRIORITY); + pumperThread.start(); } /** @@ -123,62 +131,63 @@ public class JobQueue { * */ public void addJob(Job job) { - if (job == null) return; - - if (job instanceof JobImpl) - ((JobImpl)job).addedToQueue(); - - boolean isReady = false; - long numReady = 0; - boolean alreadyExists = false; - synchronized (_readyJobs) { - if (_readyJobs.contains(job)) - alreadyExists = true; - numReady = _readyJobs.size(); - } - if (!alreadyExists) { - synchronized (_timedJobs) { - if (_timedJobs.contains(job)) - alreadyExists = true; - } - } - - StatManager.getInstance().addRateData("jobQueue.readyJobs", numReady, 0); - if (shouldDrop(job, numReady)) { - if (_log.shouldLog(Log.ERROR)) - _log.error("Dropping job due to overload! # ready jobs: " + numReady + ": job = " + job); - job.dropped(); - StatManager.getInstance().addRateData("jobQueue.droppedJobs", 1, 1); - awaken(1); - return; - } - - if (!alreadyExists) { - if (job.getTiming().getStartAfter() <= Clock.getInstance().now()) { - // don't skew us - its 'start after' its been queued, or later - job.getTiming().setStartAfter(Clock.getInstance().now()); - if (job instanceof JobImpl) - ((JobImpl)job).madeReady(); - synchronized (_readyJobs) { - _readyJobs.add(job); - isReady = true; - } - } else { - synchronized (_timedJobs) { - _timedJobs.add(job); - } - } - } else { - if (_log.shouldLog(Log.DEBUG)) - _log.debug("Not adding already enqueued job " + job.getName()); - } - - if (isReady) { - // wake up at most one runner - awaken(1); - } - - return; + if (job == null) return; + + if (job instanceof JobImpl) + ((JobImpl)job).addedToQueue(); + + boolean isReady = false; + long numReady = 0; + boolean alreadyExists = false; + synchronized (_readyJobs) { + if (_readyJobs.contains(job)) + alreadyExists = true; + numReady = _readyJobs.size(); + } + if (!alreadyExists) { + synchronized (_timedJobs) { + if (_timedJobs.contains(job)) + alreadyExists = true; + } + } + + _context.statManager().addRateData("jobQueue.readyJobs", numReady, 0); + if (shouldDrop(job, numReady)) { + if (_log.shouldLog(Log.ERROR)) + _log.error("Dropping job due to overload! # ready jobs: " + + numReady + ": job = " + job); + job.dropped(); + _context.statManager().addRateData("jobQueue.droppedJobs", 1, 1); + awaken(1); + return; + } + + if (!alreadyExists) { + if (job.getTiming().getStartAfter() <= _context.clock().now()) { + // don't skew us - its 'start after' its been queued, or later + job.getTiming().setStartAfter(_context.clock().now()); + if (job instanceof JobImpl) + ((JobImpl)job).madeReady(); + synchronized (_readyJobs) { + _readyJobs.add(job); + isReady = true; + } + } else { + synchronized (_timedJobs) { + _timedJobs.add(job); + } + } + } else { + if (_log.shouldLog(Log.DEBUG)) + _log.debug("Not adding already enqueued job " + job.getName()); + } + + if (isReady) { + // wake up at most one runner + awaken(1); + } + + return; } /** @@ -189,35 +198,35 @@ public class JobQueue { * */ private boolean shouldDrop(Job job, long numReady) { - if (_maxWaitingJobs <= 0) return false; // dont ever drop jobs - if (!_allowParallelOperation) return false; // dont drop during startup [duh] - Class cls = job.getClass(); - if (numReady > _maxWaitingJobs) { - - // heavy cpu load, plus we're allowed to be unreliable with these two - // [but garlics can contain our payloads, so lets not drop them] - //if (cls == HandleGarlicMessageJob.class) - // return true; - if (cls == HandleSourceRouteReplyMessageJob.class) - return true; - - // lets not try to drop too many tunnel messages... - //if (cls == HandleTunnelMessageJob.class) - // return true; - - // we don't really *need* to answer DB lookup messages - if (cls == HandleDatabaseLookupMessageJob.class) - return true; - - // tunnels are a bitch, but its dropped() builds a pair of fake ones just in case - if (cls == RequestTunnelJob.class) - return true; - - // if we're already this loaded, dont take more tunnels - if (cls == HandleTunnelCreateMessageJob.class) - return true; - } - return false; + if (_maxWaitingJobs <= 0) return false; // dont ever drop jobs + if (!_allowParallelOperation) return false; // dont drop during startup [duh] + Class cls = job.getClass(); + if (numReady > _maxWaitingJobs) { + + // heavy cpu load, plus we're allowed to be unreliable with these two + // [but garlics can contain our payloads, so lets not drop them] + //if (cls == HandleGarlicMessageJob.class) + // return true; + if (cls == HandleSourceRouteReplyMessageJob.class) + return true; + + // lets not try to drop too many tunnel messages... + //if (cls == HandleTunnelMessageJob.class) + // return true; + + // we don't really *need* to answer DB lookup messages + if (cls == HandleDatabaseLookupMessageJob.class) + return true; + + // tunnels are a bitch, but its dropped() builds a pair of fake ones just in case + if (cls == RequestTunnelJob.class) + return true; + + // if we're already this loaded, dont take more tunnels + if (cls == HandleTunnelCreateMessageJob.class) + return true; + } + return false; } public void allowParallelOperation() { _allowParallelOperation = true; } @@ -229,32 +238,33 @@ public class JobQueue { * */ Job getNext() { - while (_alive) { - while (_paused) { - try { Thread.sleep(30); } catch (InterruptedException ie) {} - } - Job rv = null; - int ready = 0; - synchronized (_readyJobs) { - ready = _readyJobs.size(); - if (ready > 0) - rv = (Job)_readyJobs.remove(0); - } - if (rv != null) { - // we found one, but there may be more, so wake up enough - // other runners - awaken(ready-1); - if (_log.shouldLog(Log.DEBUG)) - _log.debug("Using a ready job after waking up " + (ready-1) + " others"); - return rv; - } - try { - synchronized (_runnerLock) { - _runnerLock.wait(1000); - } - } catch (InterruptedException ie) {} - } - return null; + while (_alive) { + while (_paused) { + try { Thread.sleep(30); } catch (InterruptedException ie) {} + } + Job rv = null; + int ready = 0; + synchronized (_readyJobs) { + ready = _readyJobs.size(); + if (ready > 0) + rv = (Job)_readyJobs.remove(0); + } + if (rv != null) { + // we found one, but there may be more, so wake up enough + // other runners + awaken(ready-1); + if (_log.shouldLog(Log.DEBUG)) + _log.debug("Using a ready job after waking up " + (ready-1) + " others"); + return rv; + } + + try { + synchronized (_runnerLock) { + _runnerLock.wait(1000); + } + } catch (InterruptedException ie) {} + } + return null; } /** @@ -263,31 +273,31 @@ public class JobQueue { * */ private int checkJobTimings() { - boolean newJobsReady = false; - long now = Clock.getInstance().now(); - LinkedList toAdd = new LinkedList(); - synchronized (_timedJobs) { - for (int i = 0; i < _timedJobs.size(); i++) { - Job j = (Job)_timedJobs.get(i); - // find jobs due to start before now - if (j.getTiming().getStartAfter() <= now) { - if (j instanceof JobImpl) - ((JobImpl)j).madeReady(); + boolean newJobsReady = false; + long now = _context.clock().now(); + ArrayList toAdd = new ArrayList(4); + synchronized (_timedJobs) { + for (int i = 0; i < _timedJobs.size(); i++) { + Job j = (Job)_timedJobs.get(i); + // find jobs due to start before now + if (j.getTiming().getStartAfter() <= now) { + if (j instanceof JobImpl) + ((JobImpl)j).madeReady(); - toAdd.add(j); - _timedJobs.remove(i); - i--; // so the index stays consistent - } - } - } - - int ready = 0; - synchronized (_readyJobs) { - _readyJobs.addAll(toAdd); - ready = _readyJobs.size(); - } - - return ready; + toAdd.add(j); + _timedJobs.remove(i); + i--; // so the index stays consistent + } + } + } + + int ready = 0; + synchronized (_readyJobs) { + _readyJobs.addAll(toAdd); + ready = _readyJobs.size(); + } + + return ready; } /** @@ -299,39 +309,40 @@ public class JobQueue { * */ public void runQueue(int numThreads) { - synchronized (_queueRunners) { - // we're still starting up [serially] and we've got at least one runner, - // so dont do anything - if ( (_queueRunners.size() > 0) && (!_allowParallelOperation) ) return; - - // we've already enabled parallel operation, so grow to however many are - // specified - if (_queueRunners.size() < numThreads) { - if (_log.shouldLog(Log.INFO)) - _log.info("Increasing the number of queue runners from " + _queueRunners.size() + " to " + numThreads); - for (int i = _queueRunners.size(); i < numThreads; i++) { - JobQueueRunner runner = new JobQueueRunner(i); - _queueRunners.put(new Integer(i), runner); - Thread t = new I2PThread(runner); - t.setName("JobQueue"+(_runnerId++)); - t.setDaemon(false); - t.start(); - } - } else if (_queueRunners.size() == numThreads) { - // noop - } else { // numThreads < # runners, so shrink - //for (int i = _queueRunners.size(); i > numThreads; i++) { - // QueueRunner runner = (QueueRunner)_queueRunners.get(new Integer(i)); - // runner.stopRunning(); - //} - } - } + synchronized (_queueRunners) { + // we're still starting up [serially] and we've got at least one runner, + // so dont do anything + if ( (_queueRunners.size() > 0) && (!_allowParallelOperation) ) return; + + // we've already enabled parallel operation, so grow to however many are + // specified + if (_queueRunners.size() < numThreads) { + if (_log.shouldLog(Log.INFO)) + _log.info("Increasing the number of queue runners from " + + _queueRunners.size() + " to " + numThreads); + for (int i = _queueRunners.size(); i < numThreads; i++) { + JobQueueRunner runner = new JobQueueRunner(_context, i); + _queueRunners.put(new Integer(i), runner); + Thread t = new I2PThread(runner); + t.setName("JobQueue"+(_runnerId++)); + t.setDaemon(false); + t.start(); + } + } else if (_queueRunners.size() == numThreads) { + // noop + } else { // numThreads < # runners, so shrink + //for (int i = _queueRunners.size(); i > numThreads; i++) { + // QueueRunner runner = (QueueRunner)_queueRunners.get(new Integer(i)); + // runner.stopRunning(); + //} + } + } } //public void pauseQueue() { _paused = true; } //public void unpauseQueue() { _paused = false; } void removeRunner(int id) { _queueRunners.remove(new Integer(id)); } - + /** * Notify a sufficient number of waiting runners, and if necessary, increase @@ -339,29 +350,29 @@ public class JobQueue { * */ private void awaken(int numMadeReady) { - // notify a sufficient number of waiting runners - for (int i = 0; i < numMadeReady; i++) { - synchronized (_runnerLock) { - _runnerLock.notify(); - } - } - - int numRunners = 0; - synchronized (_queueRunners) { - numRunners = _queueRunners.size(); - } - - if (numRunners > 1) { - if (numMadeReady > numRunners) { - if (numMadeReady < _maxRunners) { - _log.info("Too much job contention (" + numMadeReady + " ready and waiting, " + numRunners + " runners exist), adding " + numMadeReady + " new runners (with max " + _maxRunners + ")"); - runQueue(numMadeReady); - } else { - _log.info("Too much job contention (" + numMadeReady + " ready and waiting, " + numRunners + " runners exist), increasing to our max of " + _maxRunners + " runners"); - runQueue(_maxRunners); - } - } - } + // notify a sufficient number of waiting runners + for (int i = 0; i < numMadeReady; i++) { + synchronized (_runnerLock) { + _runnerLock.notify(); + } + } + + int numRunners = 0; + synchronized (_queueRunners) { + numRunners = _queueRunners.size(); + } + + if (numRunners > 1) { + if (numMadeReady > numRunners) { + if (numMadeReady < _maxRunners) { + _log.info("Too much job contention (" + numMadeReady + " ready and waiting, " + numRunners + " runners exist), adding " + numMadeReady + " new runners (with max " + _maxRunners + ")"); + runQueue(numMadeReady); + } else { + _log.info("Too much job contention (" + numMadeReady + " ready and waiting, " + numRunners + " runners exist), increasing to our max of " + _maxRunners + " runners"); + runQueue(_maxRunners); + } + } + } } /** @@ -371,47 +382,47 @@ public class JobQueue { * */ private final class QueuePumper implements Runnable, Clock.ClockUpdateListener { - private long _lastLimitUpdated; - public QueuePumper() { - _lastLimitUpdated = 0; - Clock.getInstance().addUpdateListener(this); - } - public void run() { - try { - while (_alive) { - while (_paused) { - try { Thread.sleep(1000); } catch (InterruptedException ie) {} - } + private long _lastLimitUpdated; + public QueuePumper() { + _lastLimitUpdated = 0; + _context.clock().addUpdateListener(this); + } + public void run() { + try { + while (_alive) { + while (_paused) { + try { Thread.sleep(1000); } catch (InterruptedException ie) {} + } + + // periodically update our max runners limit + long now = _context.clock().now(); + if (now > _lastLimitUpdated + MAX_LIMIT_UPDATE_DELAY) { + if (_log.shouldLog(Log.INFO)) + _log.info("Updating the limits"); + updateMaxLimit(); + updateTimingLimits(); + _lastLimitUpdated = now; + } - // periodically update our max runners limit - long now = Clock.getInstance().now(); - if (now > _lastLimitUpdated + MAX_LIMIT_UPDATE_DELAY) { - if (_log.shouldLog(Log.INFO)) - _log.info("Updating the limits"); - updateMaxLimit(); - updateTimingLimits(); - _lastLimitUpdated = now; - } + // turn timed jobs into ready jobs + int numMadeReady = checkJobTimings(); - // turn timed jobs into ready jobs - int numMadeReady = checkJobTimings(); + awaken(numMadeReady); + + try { Thread.sleep(500); } catch (InterruptedException ie) {} + } + } catch (Throwable t) { + _context.clock().removeUpdateListener(this); + if (_log.shouldLog(Log.ERROR)) + _log.error("wtf, pumper killed", t); + } + } + + public void offsetChanged(long delta) { + if (_lastLimitUpdated > 0) + _lastLimitUpdated += delta; + } - awaken(numMadeReady); - - try { Thread.sleep(500); } catch (InterruptedException ie) {} - } - } catch (Throwable t) { - Clock.getInstance().removeUpdateListener(this); - if (_log.shouldLog(Log.ERROR)) - _log.error("wtf, pumper killed", t); - } - } - - public void offsetChanged(long delta) { - if (_lastLimitUpdated > 0) - _lastLimitUpdated += delta; - } - } /** @@ -420,57 +431,52 @@ public class JobQueue { * a warning (and if its really excessive, kill the router) */ void updateStats(Job job, long doStart, long origStartAfter, long duration) { - String key = job.getName(); - long lag = doStart - origStartAfter; // how long were we ready and waiting? - MessageHistory hist = MessageHistory.getInstance(); - long uptime = Router.getInstance().getUptime(); + String key = job.getName(); + long lag = doStart - origStartAfter; // how long were we ready and waiting? + MessageHistory hist = _context.messageHistory(); + long uptime = _context.router().getUptime(); - synchronized (_jobStats) { - if (!_jobStats.containsKey(key)) - _jobStats.put(key, new JobStats(key)); - JobStats stats = (JobStats)_jobStats.get(key); + synchronized (_jobStats) { + if (!_jobStats.containsKey(key)) + _jobStats.put(key, new JobStats(key)); + JobStats stats = (JobStats)_jobStats.get(key); + + stats.jobRan(duration, lag); + } - stats.jobRan(duration, lag); - } + String dieMsg = null; - String dieMsg = null; - boolean dumpRunners = false; - - if (lag > _lagWarning) { - dieMsg = "Lag too long for job " + job.getName() + " [" + lag + "ms and a run time of " + duration + "ms]"; - dumpRunners = true; - } else if (duration > _runWarning) { - dieMsg = "Job run too long for job " + job.getName() + " [" + lag + "ms lag and run time of " + duration + "ms]"; - dumpRunners = true; - } + if (lag > _lagWarning) { + dieMsg = "Lag too long for job " + job.getName() + " [" + lag + "ms and a run time of " + duration + "ms]"; + } else if (duration > _runWarning) { + dieMsg = "Job run too long for job " + job.getName() + " [" + lag + "ms lag and run time of " + duration + "ms]"; + } - if (dieMsg != null) { - if (_log.shouldLog(Log.WARN)) - _log.warn(dieMsg); - if (hist != null) - hist.messageProcessingError(-1, JobQueue.class.getName(), dieMsg); - } - - if (dumpRunners) - dumpRunners(true); - - if ( (lag > _lagFatal) && (uptime > _warmupTime) ) { - // this is fscking bad - the network at this size shouldn't have this much real contention - // so we're going to DIE DIE DIE - if (_log.shouldLog(Log.WARN)) - _log.log(Log.WARN, "The router is either incredibly overloaded or (more likely) there's an error.", new Exception("ttttooooo mmmuuuccccchhhh llllaaagggg")); - //try { Thread.sleep(5000); } catch (InterruptedException ie) {} - //Router.getInstance().shutdown(); - return; - } - if ( (uptime > _warmupTime) && (duration > _runFatal) ) { - // slow CPUs can get hosed with ElGamal, but 10s is too much. - if (_log.shouldLog(Log.WARN)) - _log.log(Log.WARN, "The router is incredibly overloaded - either you have a 386, or (more likely) there's an error. ", new Exception("ttttooooo sssllloooowww")); - //try { Thread.sleep(5000); } catch (InterruptedException ie) {} - //Router.getInstance().shutdown(); - return; - } + if (dieMsg != null) { + if (_log.shouldLog(Log.WARN)) + _log.warn(dieMsg); + if (hist != null) + hist.messageProcessingError(-1, JobQueue.class.getName(), dieMsg); + } + + if ( (lag > _lagFatal) && (uptime > _warmupTime) ) { + // this is fscking bad - the network at this size shouldn't have this much real contention + // so we're going to DIE DIE DIE + if (_log.shouldLog(Log.WARN)) + _log.log(Log.WARN, "The router is either incredibly overloaded or (more likely) there's an error.", new Exception("ttttooooo mmmuuuccccchhhh llllaaagggg")); + //try { Thread.sleep(5000); } catch (InterruptedException ie) {} + //Router.getInstance().shutdown(); + return; + } + + if ( (uptime > _warmupTime) && (duration > _runFatal) ) { + // slow CPUs can get hosed with ElGamal, but 10s is too much. + if (_log.shouldLog(Log.WARN)) + _log.log(Log.WARN, "The router is incredibly overloaded - either you have a 386, or (more likely) there's an error. ", new Exception("ttttooooo sssllloooowww")); + //try { Thread.sleep(5000); } catch (InterruptedException ie) {} + //Router.getInstance().shutdown(); + return; + } } //// @@ -482,18 +488,18 @@ public class JobQueue { * */ private void updateMaxLimit() { - String str = Router.getInstance().getConfigSetting(PROP_MAX_RUNNERS); - if (str != null) { - try { - _maxRunners = Integer.parseInt(str); - return; - } catch (NumberFormatException nfe) { - _log.error("Invalid maximum job runners [" + str + "]"); - } - } - if (_log.shouldLog(Log.INFO)) - _log.info("Defaulting the maximum job runners to " + DEFAULT_MAX_RUNNERS); - _maxRunners = DEFAULT_MAX_RUNNERS; + String str = _context.router().getConfigSetting(PROP_MAX_RUNNERS); + if (str != null) { + try { + _maxRunners = Integer.parseInt(str); + return; + } catch (NumberFormatException nfe) { + _log.error("Invalid maximum job runners [" + str + "]"); + } + } + if (_log.shouldLog(Log.INFO)) + _log.info("Defaulting the maximum job runners to " + DEFAULT_MAX_RUNNERS); + _maxRunners = DEFAULT_MAX_RUNNERS; } /** @@ -502,87 +508,87 @@ public class JobQueue { * */ private void updateTimingLimits() { - String str = Router.getInstance().getConfigSetting(PROP_LAG_WARNING); - if (str != null) { - try { - _lagWarning = Integer.parseInt(str); - } catch (NumberFormatException nfe) { - _log.error("Invalid job lag warning [" + str + "]"); - _lagWarning = DEFAULT_LAG_WARNING; - } - } else { - _lagWarning = DEFAULT_LAG_WARNING; - } - if (_log.shouldLog(Log.INFO)) - _log.info("Setting the warning job lag time to " + _lagWarning + "ms"); - - str = Router.getInstance().getConfigSetting(PROP_LAG_FATAL); - if (str != null) { - try { - _lagFatal = Integer.parseInt(str); - } catch (NumberFormatException nfe) { - _log.error("Invalid job lag fatal [" + str + "]"); - _lagFatal = DEFAULT_LAG_FATAL; - } - } else { - _lagFatal = DEFAULT_LAG_FATAL; - } - if (_log.shouldLog(Log.INFO)) - _log.info("Setting the fatal job lag time to " + _lagFatal + "ms"); - - str = Router.getInstance().getConfigSetting(PROP_RUN_WARNING); - if (str != null) { - try { - _runWarning = Integer.parseInt(str); - } catch (NumberFormatException nfe) { - _log.error("Invalid job run warning [" + str + "]"); - _runWarning = DEFAULT_RUN_WARNING; - } - } else { - _runWarning = DEFAULT_RUN_WARNING; - } - if (_log.shouldLog(Log.INFO)) - _log.info("Setting the warning job run time to " + _runWarning + "ms"); - - str = Router.getInstance().getConfigSetting(PROP_RUN_FATAL); - if (str != null) { - try { - _runFatal = Integer.parseInt(str); - } catch (NumberFormatException nfe) { - _log.error("Invalid job run fatal [" + str + "]"); - _runFatal = DEFAULT_RUN_FATAL; - } - } else { - _runFatal = DEFAULT_RUN_FATAL; - } - if (_log.shouldLog(Log.INFO)) - _log.info("Setting the fatal job run time to " + _runFatal + "ms"); - - str = Router.getInstance().getConfigSetting(PROP_WARMUM_TIME); - if (str != null) { - try { - _warmupTime = Integer.parseInt(str); - } catch (NumberFormatException nfe) { - _log.error("Invalid warmup time [" + str + "]"); - _warmupTime = DEFAULT_WARMUP_TIME; - } - } else { - _warmupTime = DEFAULT_WARMUP_TIME; - } - - str = Router.getInstance().getConfigSetting(PROP_MAX_WAITING_JOBS); - if (str != null) { - try { - _maxWaitingJobs = Integer.parseInt(str); - } catch (NumberFormatException nfe) { - _log.error("Invalid max waiting jobs [" + str + "]"); - _maxWaitingJobs = DEFAULT_MAX_WAITING_JOBS; - } - } else { - _maxWaitingJobs = DEFAULT_MAX_WAITING_JOBS; - } - if (_log.shouldLog(Log.INFO)) - _log.info("Setting the max waiting jobs to " + _maxWaitingJobs); + String str = _context.router().getConfigSetting(PROP_LAG_WARNING); + if (str != null) { + try { + _lagWarning = Integer.parseInt(str); + } catch (NumberFormatException nfe) { + _log.error("Invalid job lag warning [" + str + "]"); + _lagWarning = DEFAULT_LAG_WARNING; + } + } else { + _lagWarning = DEFAULT_LAG_WARNING; + } + if (_log.shouldLog(Log.INFO)) + _log.info("Setting the warning job lag time to " + _lagWarning + "ms"); + + str = _context.router().getConfigSetting(PROP_LAG_FATAL); + if (str != null) { + try { + _lagFatal = Integer.parseInt(str); + } catch (NumberFormatException nfe) { + _log.error("Invalid job lag fatal [" + str + "]"); + _lagFatal = DEFAULT_LAG_FATAL; + } + } else { + _lagFatal = DEFAULT_LAG_FATAL; + } + if (_log.shouldLog(Log.INFO)) + _log.info("Setting the fatal job lag time to " + _lagFatal + "ms"); + + str = _context.router().getConfigSetting(PROP_RUN_WARNING); + if (str != null) { + try { + _runWarning = Integer.parseInt(str); + } catch (NumberFormatException nfe) { + _log.error("Invalid job run warning [" + str + "]"); + _runWarning = DEFAULT_RUN_WARNING; + } + } else { + _runWarning = DEFAULT_RUN_WARNING; + } + if (_log.shouldLog(Log.INFO)) + _log.info("Setting the warning job run time to " + _runWarning + "ms"); + + str = _context.router().getConfigSetting(PROP_RUN_FATAL); + if (str != null) { + try { + _runFatal = Integer.parseInt(str); + } catch (NumberFormatException nfe) { + _log.error("Invalid job run fatal [" + str + "]"); + _runFatal = DEFAULT_RUN_FATAL; + } + } else { + _runFatal = DEFAULT_RUN_FATAL; + } + if (_log.shouldLog(Log.INFO)) + _log.info("Setting the fatal job run time to " + _runFatal + "ms"); + + str = _context.router().getConfigSetting(PROP_WARMUM_TIME); + if (str != null) { + try { + _warmupTime = Integer.parseInt(str); + } catch (NumberFormatException nfe) { + _log.error("Invalid warmup time [" + str + "]"); + _warmupTime = DEFAULT_WARMUP_TIME; + } + } else { + _warmupTime = DEFAULT_WARMUP_TIME; + } + + str = _context.router().getConfigSetting(PROP_MAX_WAITING_JOBS); + if (str != null) { + try { + _maxWaitingJobs = Integer.parseInt(str); + } catch (NumberFormatException nfe) { + _log.error("Invalid max waiting jobs [" + str + "]"); + _maxWaitingJobs = DEFAULT_MAX_WAITING_JOBS; + } + } else { + _maxWaitingJobs = DEFAULT_MAX_WAITING_JOBS; + } + if (_log.shouldLog(Log.INFO)) + _log.info("Setting the max waiting jobs to " + _maxWaitingJobs); } //// @@ -590,159 +596,124 @@ public class JobQueue { //// public String renderStatusHTML() { - LinkedList readyJobs = new LinkedList(); - LinkedList timedJobs = new LinkedList(); - LinkedList activeJobs = new LinkedList(); - synchronized (_readyJobs) { readyJobs.addAll(_readyJobs); } - synchronized (_timedJobs) { timedJobs.addAll(_timedJobs); } - synchronized (_queueRunners) { - for (Iterator iter = _queueRunners.values().iterator(); iter.hasNext();) { - JobQueueRunner runner = (JobQueueRunner)iter.next(); - Job job = runner.getCurrentJob(); - if (job != null) - activeJobs.add(job.getName()); - } - } - StringBuffer buf = new StringBuffer(); - buf.append("

JobQueue

"); - buf.append("# runners: "); - synchronized (_queueRunners) { - buf.append(_queueRunners.size()); - } - buf.append("
\n"); - buf.append("# active jobs: ").append(activeJobs.size()).append("
    \n"); - for (int i = 0; i < activeJobs.size(); i++) { - buf.append("
  1. ").append(activeJobs.get(i)).append("
  2. \n"); - } - buf.append("
\n"); - buf.append("# ready/waiting jobs: ").append(readyJobs.size()).append(" (lots of these mean there's likely a big problem)
    \n"); - for (int i = 0; i < readyJobs.size(); i++) { - buf.append("
  1. ").append(readyJobs.get(i)).append("
  2. \n"); - } - buf.append("
\n"); - - buf.append("# timed jobs: ").append(timedJobs.size()).append("
    \n"); - TreeMap ordered = new TreeMap(); - for (int i = 0; i < timedJobs.size(); i++) { - Job j = (Job)timedJobs.get(i); - ordered.put(new Long(j.getTiming().getStartAfter()), j); - } - for (Iterator iter = ordered.values().iterator(); iter.hasNext(); ) { - Job j = (Job)iter.next(); - buf.append("
  1. ").append(j.getName()).append(" @ ").append(new Date(j.getTiming().getStartAfter())).append("
  2. \n"); - } - buf.append("
\n"); - buf.append(getJobStats()); - return buf.toString(); + ArrayList readyJobs = null; + ArrayList timedJobs = null; + ArrayList activeJobs = new ArrayList(4); + synchronized (_readyJobs) { readyJobs = new ArrayList(_readyJobs); } + synchronized (_timedJobs) { timedJobs = new ArrayList(_timedJobs); } + synchronized (_queueRunners) { + for (Iterator iter = _queueRunners.values().iterator(); iter.hasNext();) { + JobQueueRunner runner = (JobQueueRunner)iter.next(); + Job job = runner.getCurrentJob(); + if (job != null) + activeJobs.add(job.getName()); + } + } + StringBuffer buf = new StringBuffer(20*1024); + buf.append("

JobQueue

"); + buf.append("# runners: "); + synchronized (_queueRunners) { + buf.append(_queueRunners.size()); + } + buf.append("
\n"); + buf.append("# active jobs: ").append(activeJobs.size()).append("
    \n"); + for (int i = 0; i < activeJobs.size(); i++) { + buf.append("
  1. ").append(activeJobs.get(i)).append("
  2. \n"); + } + buf.append("
\n"); + buf.append("# ready/waiting jobs: ").append(readyJobs.size()).append(" (lots of these mean there's likely a big problem)
    \n"); + for (int i = 0; i < readyJobs.size(); i++) { + buf.append("
  1. ").append(readyJobs.get(i)).append("
  2. \n"); + } + buf.append("
\n"); + + buf.append("# timed jobs: ").append(timedJobs.size()).append("
    \n"); + TreeMap ordered = new TreeMap(); + for (int i = 0; i < timedJobs.size(); i++) { + Job j = (Job)timedJobs.get(i); + ordered.put(new Long(j.getTiming().getStartAfter()), j); + } + for (Iterator iter = ordered.values().iterator(); iter.hasNext(); ) { + Job j = (Job)iter.next(); + buf.append("
  1. ").append(j.getName()).append(" @ "); + buf.append(new Date(j.getTiming().getStartAfter())).append("
  2. \n"); + } + buf.append("
\n"); + buf.append(getJobStats()); + return buf.toString(); } /** render the HTML for the job stats */ private String getJobStats() { - StringBuffer buf = new StringBuffer(1024); - buf.append("\n"); - buf.append(""); - buf.append(""); - buf.append("\n"); - long totRuns = 0; - long totExecTime = 0; - long avgExecTime = 0; - long maxExecTime = -1; - long minExecTime = -1; - long totPendingTime = 0; - long avgPendingTime = 0; - long maxPendingTime = -1; - long minPendingTime = -1; - - TreeMap tstats = null; - synchronized (_jobStats) { - tstats = (TreeMap)_jobStats.clone(); - } + StringBuffer buf = new StringBuffer(16*1024); + buf.append("
JobRunsTimeAvgMaxMinPendingAvgMaxMin
\n"); + buf.append(""); + buf.append(""); + buf.append("\n"); + long totRuns = 0; + long totExecTime = 0; + long avgExecTime = 0; + long maxExecTime = -1; + long minExecTime = -1; + long totPendingTime = 0; + long avgPendingTime = 0; + long maxPendingTime = -1; + long minPendingTime = -1; - for (Iterator iter = tstats.values().iterator(); iter.hasNext(); ) { - JobStats stats = (JobStats)iter.next(); - buf.append(""); - buf.append(""); - buf.append(""); - buf.append(""); - buf.append(""); - buf.append(""); - buf.append(""); - buf.append(""); - buf.append(""); - buf.append(""); - buf.append(""); - buf.append("\n"); - totRuns += stats.getRuns(); - totExecTime += stats.getTotalTime(); - if (stats.getMaxTime() > maxExecTime) - maxExecTime = stats.getMaxTime(); - if ( (minExecTime < 0) || (minExecTime > stats.getMinTime()) ) - minExecTime = stats.getMinTime(); - totPendingTime += stats.getTotalPendingTime(); - if (stats.getMaxPendingTime() > maxPendingTime) - maxPendingTime = stats.getMaxPendingTime(); - if ( (minPendingTime < 0) || (minPendingTime > stats.getMinPendingTime()) ) - minPendingTime = stats.getMinPendingTime(); - } + TreeMap tstats = null; + synchronized (_jobStats) { + tstats = (TreeMap)_jobStats.clone(); + } - if (totRuns != 0) { - if (totExecTime != 0) - avgExecTime = totExecTime / totRuns; - if (totPendingTime != 0) - avgPendingTime = totPendingTime / totRuns; - } + for (Iterator iter = tstats.values().iterator(); iter.hasNext(); ) { + JobStats stats = (JobStats)iter.next(); + buf.append(""); + buf.append(""); + buf.append(""); + buf.append(""); + buf.append(""); + buf.append(""); + buf.append(""); + buf.append(""); + buf.append(""); + buf.append(""); + buf.append(""); + buf.append("\n"); + totRuns += stats.getRuns(); + totExecTime += stats.getTotalTime(); + if (stats.getMaxTime() > maxExecTime) + maxExecTime = stats.getMaxTime(); + if ( (minExecTime < 0) || (minExecTime > stats.getMinTime()) ) + minExecTime = stats.getMinTime(); + totPendingTime += stats.getTotalPendingTime(); + if (stats.getMaxPendingTime() > maxPendingTime) + maxPendingTime = stats.getMaxPendingTime(); + if ( (minPendingTime < 0) || (minPendingTime > stats.getMinPendingTime()) ) + minPendingTime = stats.getMinPendingTime(); + } + + if (totRuns != 0) { + if (totExecTime != 0) + avgExecTime = totExecTime / totRuns; + if (totPendingTime != 0) + avgPendingTime = totPendingTime / totRuns; + } + + buf.append(""); + buf.append(""); + buf.append(""); + buf.append(""); + buf.append(""); + buf.append(""); + buf.append(""); + buf.append(""); + buf.append(""); + buf.append(""); + buf.append(""); + buf.append(""); + buf.append("\n"); - buf.append(""); - buf.append(""); - buf.append(""); - buf.append(""); - buf.append(""); - buf.append(""); - buf.append(""); - buf.append(""); - buf.append(""); - buf.append(""); - buf.append(""); - buf.append(""); - buf.append("\n"); - - buf.append("
JobRunsTimeAvgMaxMinPendingAvgMaxMin
").append(stats.getName()).append("").append(stats.getRuns()).append("").append(stats.getTotalTime()).append("").append(stats.getAvgTime()).append("").append(stats.getMaxTime()).append("").append(stats.getMinTime()).append("").append(stats.getTotalPendingTime()).append("").append(stats.getAvgPendingTime()).append("").append(stats.getMaxPendingTime()).append("").append(stats.getMinPendingTime()).append("
").append(stats.getName()).append("").append(stats.getRuns()).append("").append(stats.getTotalTime()).append("").append(stats.getAvgTime()).append("").append(stats.getMaxTime()).append("").append(stats.getMinTime()).append("").append(stats.getTotalPendingTime()).append("").append(stats.getAvgPendingTime()).append("").append(stats.getMaxPendingTime()).append("").append(stats.getMinPendingTime()).append("

").append("SUMMARY").append("").append(totRuns).append("").append(totExecTime).append("").append(avgExecTime).append("").append(maxExecTime).append("").append(minExecTime).append("").append(totPendingTime).append("").append(avgPendingTime).append("").append(maxPendingTime).append("").append(minPendingTime).append("

").append("SUMMARY").append("").append(totRuns).append("").append(totExecTime).append("").append(avgExecTime).append("").append(maxExecTime).append("").append(minExecTime).append("").append(totPendingTime).append("").append(avgPendingTime).append("").append(maxPendingTime).append("").append(minPendingTime).append("
\n"); - return buf.toString(); - } - - /** - * Log what each queue runner is doing at the moment - * - */ - void dumpRunners() { dumpRunners(false); } - /** if asError, dump the job runners in an error message, else as a debug message */ - void dumpRunners(boolean asError) { - if (!asError && (!_log.shouldLog(Log.DEBUG)) ) return; - if (asError && (!_log.shouldLog(Log.WARN)) ) return; - StringBuffer buf = new StringBuffer(1024); - buf.append("Queue runners:\n"); - synchronized (_queueRunners) { - for (Iterator iter = _queueRunners.values().iterator(); iter.hasNext(); ) { - JobQueueRunner runner = (JobQueueRunner)iter.next(); - Job job = runner.getCurrentJob(); - int id = runner.getRunnerId(); - buf.append("* Runner ").append(id).append(": \t"); - if (job == null) - buf.append("no job\n"); - else - buf.append(job.getName()).append('\n'); - } - } - synchronized (_timedJobs) { - buf.append("** Timed jobs: \t").append(_timedJobs.size()).append('\n'); - } - synchronized (_readyJobs) { - buf.append("** Ready jobs: \t").append(_readyJobs.size()).append('\n'); - } - - if (asError) - _log.warn(buf.toString()); - else - _log.debug(buf.toString()); + buf.append("\n"); + return buf.toString(); } } diff --git a/router/java/src/net/i2p/router/JobQueueRunner.java b/router/java/src/net/i2p/router/JobQueueRunner.java index c59300149..17f7656df 100644 --- a/router/java/src/net/i2p/router/JobQueueRunner.java +++ b/router/java/src/net/i2p/router/JobQueueRunner.java @@ -6,103 +6,105 @@ import net.i2p.util.Log; /** a do run run run a do run run */ class JobQueueRunner implements Runnable { - private final static Log _log = new Log(JobQueueRunner.class); + private Log _log; + private RouterContext _context; private boolean _keepRunning; private int _id; private long _numJobs; private Job _currentJob; - static { - StatManager.getInstance().createRateStat("jobQueue.jobRun", "How long jobs take", "JobQueue", new long[] { 60*1000l, 60*60*1000l, 24*60*60*1000l }); - StatManager.getInstance().createRateStat("jobQueue.jobLag", "How long jobs have to wait before running", "JobQueue", new long[] { 60*1000l, 60*60*1000l, 24*60*60*1000l }); - StatManager.getInstance().createRateStat("jobQueue.jobWait", "How long does a job sat on the job queue?", "JobQueue", new long[] { 60*1000l, 60*60*1000l, 24*60*60*1000l }); - StatManager.getInstance().createRateStat("jobQueue.jobRunnerInactive", "How long are runners inactive?", "JobQueue", new long[] { 60*1000l, 60*60*1000l, 24*60*60*1000l }); + public JobQueueRunner(RouterContext context, int id) { + _context = context; + _id = id; + _keepRunning = true; + _numJobs = 0; + _currentJob = null; + _log = _context.logManager().getLog(JobQueueRunner.class); + _context.statManager().createRateStat("jobQueue.jobRun", "How long jobs take", "JobQueue", new long[] { 60*1000l, 60*60*1000l, 24*60*60*1000l }); + _context.statManager().createRateStat("jobQueue.jobLag", "How long jobs have to wait before running", "JobQueue", new long[] { 60*1000l, 60*60*1000l, 24*60*60*1000l }); + _context.statManager().createRateStat("jobQueue.jobWait", "How long does a job sat on the job queue?", "JobQueue", new long[] { 60*1000l, 60*60*1000l, 24*60*60*1000l }); + _context.statManager().createRateStat("jobQueue.jobRunnerInactive", "How long are runners inactive?", "JobQueue", new long[] { 60*1000l, 60*60*1000l, 24*60*60*1000l }); } - public JobQueueRunner(int id) { - _id = id; - _keepRunning = true; - _numJobs = 0; - _currentJob = null; - } public Job getCurrentJob() { return _currentJob; } public int getRunnerId() { return _id; } public void stopRunning() { _keepRunning = false; } public void run() { - long lastActive = Clock.getInstance().now();; - while ( (_keepRunning) && (JobQueue.getInstance().isAlive()) ) { - try { - Job job = JobQueue.getInstance().getNext(); - if (job == null) continue; - long now = Clock.getInstance().now(); - - long enqueuedTime = 0; - if (job instanceof JobImpl) { - long when = ((JobImpl)job).getMadeReadyOn(); - if (when <= 0) { - _log.error("Job was not made ready?! " + job, new Exception("Not made ready?!")); - } else { - enqueuedTime = now - when; - } - } - - long betweenJobs = now - lastActive; - StatManager.getInstance().addRateData("jobQueue.jobRunnerInactive", betweenJobs, betweenJobs); - _currentJob = job; - if (_log.shouldLog(Log.DEBUG)) - _log.debug("Runner " + _id + " running job " + job.getJobId() + ": " + job.getName()); - long origStartAfter = job.getTiming().getStartAfter(); - long doStart = Clock.getInstance().now(); - job.getTiming().start(); - runCurrentJob(); - job.getTiming().end(); - long duration = job.getTiming().getActualEnd() - job.getTiming().getActualStart(); + long lastActive = _context.clock().now(); + while ( (_keepRunning) && (_context.jobQueue().isAlive()) ) { + try { + Job job = _context.jobQueue().getNext(); + if (job == null) continue; + long now = _context.clock().now(); - long beforeUpdate = Clock.getInstance().now(); - JobQueue.getInstance().updateStats(job, doStart, origStartAfter, duration); - long diff = Clock.getInstance().now() - beforeUpdate; - - StatManager.getInstance().addRateData("jobQueue.jobRun", duration, duration); - StatManager.getInstance().addRateData("jobQueue.jobLag", doStart - origStartAfter, 0); - StatManager.getInstance().addRateData("jobQueue.jobWait", enqueuedTime, enqueuedTime); - - if (diff > 100) { - if (_log.shouldLog(Log.WARN)) - _log.warn("Updating statistics for the job took too long [" + diff + "ms]"); - } - if (_log.shouldLog(Log.DEBUG)) - _log.debug("Job duration " + duration + "ms for " + job.getName() + " with lag of " + (doStart-origStartAfter) + "ms"); - lastActive = Clock.getInstance().now(); - _currentJob = null; - } catch (Throwable t) { - if (_log.shouldLog(Log.CRIT)) - _log.log(Log.CRIT, "WTF, error running?", t); - } - } - if (_log.shouldLog(Log.CRIT)) - _log.log(Log.CRIT, "Queue runner " + _id + " exiting"); - JobQueue.getInstance().removeRunner(_id); + long enqueuedTime = 0; + if (job instanceof JobImpl) { + long when = ((JobImpl)job).getMadeReadyOn(); + if (when <= 0) { + _log.error("Job was not made ready?! " + job, + new Exception("Not made ready?!")); + } else { + enqueuedTime = now - when; + } + } + + long betweenJobs = now - lastActive; + _context.statManager().addRateData("jobQueue.jobRunnerInactive", betweenJobs, betweenJobs); + _currentJob = job; + if (_log.shouldLog(Log.DEBUG)) + _log.debug("Runner " + _id + " running job " + job.getJobId() + ": " + job.getName()); + long origStartAfter = job.getTiming().getStartAfter(); + long doStart = _context.clock().now(); + job.getTiming().start(); + runCurrentJob(); + job.getTiming().end(); + long duration = job.getTiming().getActualEnd() - job.getTiming().getActualStart(); + long beforeUpdate = _context.clock().now(); + _context.jobQueue().updateStats(job, doStart, origStartAfter, duration); + long diff = _context.clock().now() - beforeUpdate; + + _context.statManager().addRateData("jobQueue.jobRun", duration, duration); + _context.statManager().addRateData("jobQueue.jobLag", doStart - origStartAfter, 0); + _context.statManager().addRateData("jobQueue.jobWait", enqueuedTime, enqueuedTime); + + if (diff > 100) { + if (_log.shouldLog(Log.WARN)) + _log.warn("Updating statistics for the job took too long [" + diff + "ms]"); + } + if (_log.shouldLog(Log.DEBUG)) + _log.debug("Job duration " + duration + "ms for " + job.getName() + + " with lag of " + (doStart-origStartAfter) + "ms"); + lastActive = _context.clock().now(); + _currentJob = null; + } catch (Throwable t) { + if (_log.shouldLog(Log.CRIT)) + _log.log(Log.CRIT, "WTF, error running?", t); + } + } + if (_log.shouldLog(Log.CRIT)) + _log.log(Log.CRIT, "Queue runner " + _id + " exiting"); + _context.jobQueue().removeRunner(_id); } private void runCurrentJob() { - try { - _currentJob.runJob(); - } catch (OutOfMemoryError oom) { - try { - if (_log.shouldLog(Log.CRIT)) - _log.log(Log.CRIT, "Router ran out of memory, shutting down", oom); - Router.getInstance().shutdown(); - } catch (Throwable t) { - System.err.println("***Router ran out of memory, shutting down hard"); - } - try { Thread.sleep(1000); } catch (InterruptedException ie) {} - System.exit(-1); - } catch (Throwable t) { - if (_log.shouldLog(Log.CRIT)) - _log.log(Log.CRIT, "Error processing job [" + _currentJob.getName() + "] on thread " + _id + ": " + t.getMessage(), t); - if (_log.shouldLog(Log.ERROR)) - _log.error("The above job was enqueued by: ", _currentJob.getAddedBy()); - JobQueue.getInstance().dumpRunners(true); - } + try { + _currentJob.runJob(); + } catch (OutOfMemoryError oom) { + try { + if (_log.shouldLog(Log.CRIT)) + _log.log(Log.CRIT, "Router ran out of memory, shutting down", oom); + _context.router().shutdown(); + } catch (Throwable t) { + System.err.println("***Router ran out of memory, shutting down hard"); + } + try { Thread.sleep(1000); } catch (InterruptedException ie) {} + System.exit(-1); + } catch (Throwable t) { + if (_log.shouldLog(Log.CRIT)) + _log.log(Log.CRIT, "Error processing job [" + _currentJob.getName() + + "] on thread " + _id + ": " + t.getMessage(), t); + if (_log.shouldLog(Log.ERROR)) + _log.error("The above job was enqueued by: ", _currentJob.getAddedBy()); + } } } diff --git a/router/java/src/net/i2p/router/JobTiming.java b/router/java/src/net/i2p/router/JobTiming.java index ab11e16f7..c623c38d9 100644 --- a/router/java/src/net/i2p/router/JobTiming.java +++ b/router/java/src/net/i2p/router/JobTiming.java @@ -1,14 +1,15 @@ package net.i2p.router; /* * free (adj.): unencumbered; not under the control of others - * Written by jrandom in 2003 and released into the public domain - * with no warranty of any kind, either expressed or implied. - * It probably won't make your computer catch on fire, or eat + * Written by jrandom in 2003 and released into the public domain + * with no warranty of any kind, either expressed or implied. + * It probably won't make your computer catch on fire, or eat * your children, but it might. Use at your own risk. * */ import net.i2p.util.Clock; + /** * Define the timing requirements and statistics for a particular job * @@ -17,12 +18,14 @@ public class JobTiming implements Clock.ClockUpdateListener { private long _start; private long _actualStart; private long _actualEnd; + private RouterContext _context; - public JobTiming() { - _start = Clock.getInstance().now(); - _actualStart = 0; - _actualEnd = 0; - Clock.getInstance().addUpdateListener(this); + public JobTiming(RouterContext context) { + _context = context; + _start = context.clock().now(); + _actualStart = 0; + _actualEnd = 0; + context.clock().addUpdateListener(this); } /** @@ -42,7 +45,7 @@ public class JobTiming implements Clock.ClockUpdateListener { * Notify the timing that the job began * */ - public void start() { _actualStart = Clock.getInstance().now(); } + public void start() { _actualStart = _context.clock().now(); } /** * # of milliseconds after the epoch the job actually ended * @@ -53,17 +56,17 @@ public class JobTiming implements Clock.ClockUpdateListener { * Notify the timing that the job finished * */ - public void end() { - _actualEnd = Clock.getInstance().now(); - Clock.getInstance().removeUpdateListener(this); + public void end() { + _actualEnd = _context.clock().now(); + _context.clock().removeUpdateListener(this); } public void offsetChanged(long delta) { - if (_start != 0) - _start += delta; - if (_actualStart != 0) - _actualStart += delta; - if (_actualEnd != 0) - _actualEnd += delta; + if (_start != 0) + _start += delta; + if (_actualStart != 0) + _actualStart += delta; + if (_actualEnd != 0) + _actualEnd += delta; } } diff --git a/router/java/src/net/i2p/router/KeyManager.java b/router/java/src/net/i2p/router/KeyManager.java index cf25aa21c..28444eaa5 100644 --- a/router/java/src/net/i2p/router/KeyManager.java +++ b/router/java/src/net/i2p/router/KeyManager.java @@ -32,9 +32,8 @@ import net.i2p.util.Log; * */ public class KeyManager { - private final static Log _log = new Log(KeyManager.class); - private static KeyManager _instance = new KeyManager(); - public static KeyManager getInstance() { return _instance; } + private Log _log; + private RouterContext _context; private PrivateKey _privateKey; private PublicKey _publicKey; private SigningPrivateKey _signingPrivateKey; @@ -49,13 +48,15 @@ public class KeyManager { private final static String KEYFILE_PUBLIC_SIGNING = "publicSigning.key"; private final static long DELAY = 30*1000; - private KeyManager() { - setPrivateKey(null); - setPublicKey(null); - setSigningPrivateKey(null); - setSigningPublicKey(null); - _leaseSetKeys = new HashMap(); - JobQueue.getInstance().addJob(new SynchronizeKeysJob()); + public KeyManager(RouterContext context) { + _context = context; + _log = _context.logManager().getLog(KeyManager.class); + setPrivateKey(null); + setPublicKey(null); + setSigningPrivateKey(null); + setSigningPublicKey(null); + _leaseSetKeys = new HashMap(); + _context.jobQueue().addJob(new SynchronizeKeysJob()); } /** Configure the router's private key */ @@ -72,119 +73,122 @@ public class KeyManager { public SigningPublicKey getSigningPublicKey() { return _signingPublicKey; } public void registerKeys(Destination dest, SigningPrivateKey leaseRevocationPrivateKey, PrivateKey endpointDecryptionKey) { - _log.info("Registering keys for destination " + dest.calculateHash().toBase64()); - LeaseSetKeys keys = new LeaseSetKeys(dest, leaseRevocationPrivateKey, endpointDecryptionKey); - synchronized (_leaseSetKeys) { - _leaseSetKeys.put(dest, keys); - } + _log.info("Registering keys for destination " + dest.calculateHash().toBase64()); + LeaseSetKeys keys = new LeaseSetKeys(dest, leaseRevocationPrivateKey, endpointDecryptionKey); + synchronized (_leaseSetKeys) { + _leaseSetKeys.put(dest, keys); + } } public LeaseSetKeys unregisterKeys(Destination dest) { - _log.info("Unregistering keys for destination " + dest.calculateHash().toBase64()); - synchronized (_leaseSetKeys) { - return (LeaseSetKeys)_leaseSetKeys.remove(dest); - } + _log.info("Unregistering keys for destination " + dest.calculateHash().toBase64()); + synchronized (_leaseSetKeys) { + return (LeaseSetKeys)_leaseSetKeys.remove(dest); + } } public LeaseSetKeys getKeys(Destination dest) { - synchronized (_leaseSetKeys) { - return (LeaseSetKeys)_leaseSetKeys.get(dest); - } + synchronized (_leaseSetKeys) { + return (LeaseSetKeys)_leaseSetKeys.get(dest); + } } public Set getAllKeys() { - HashSet keys = new HashSet(); - synchronized (_leaseSetKeys) { - keys.addAll(_leaseSetKeys.values()); - } - return keys; + HashSet keys = new HashSet(); + synchronized (_leaseSetKeys) { + keys.addAll(_leaseSetKeys.values()); + } + return keys; } private class SynchronizeKeysJob extends JobImpl { - public void runJob() { - String keyDir = Router.getInstance().getConfigSetting(PROP_KEYDIR); - if (keyDir == null) - keyDir = DEFAULT_KEYDIR; - File dir = new File(keyDir); - if (!dir.exists()) - dir.mkdirs(); - if (dir.exists() && dir.isDirectory() && dir.canRead() && dir.canWrite()) - syncKeys(dir); - - getTiming().setStartAfter(Clock.getInstance().now()+DELAY); - JobQueue.getInstance().addJob(this); - } - - private void syncKeys(File keyDir) { - syncPrivateKey(keyDir); - syncPublicKey(keyDir); - syncSigningKey(keyDir); - syncVerificationKey(keyDir); - } - - private void syncPrivateKey(File keyDir) { - File keyFile = new File(keyDir, KeyManager.KEYFILE_PRIVATE_ENC); - boolean exists = (_privateKey != null); - if (!exists) - _privateKey = new PrivateKey(); - _privateKey = (PrivateKey)syncKey(keyFile, _privateKey, exists); - } - private void syncPublicKey(File keyDir) { - File keyFile = new File(keyDir, KeyManager.KEYFILE_PUBLIC_ENC); - boolean exists = (_publicKey != null); - if (!exists) - _publicKey = new PublicKey(); - _publicKey = (PublicKey)syncKey(keyFile, _publicKey, exists); - } - - private void syncSigningKey(File keyDir) { - File keyFile = new File(keyDir, KeyManager.KEYFILE_PRIVATE_SIGNING); - boolean exists = (_signingPrivateKey != null); - if (!exists) - _signingPrivateKey = new SigningPrivateKey(); - _signingPrivateKey = (SigningPrivateKey)syncKey(keyFile, _signingPrivateKey, exists); - } - private void syncVerificationKey(File keyDir) { - File keyFile = new File(keyDir, KeyManager.KEYFILE_PUBLIC_SIGNING); - boolean exists = (_signingPublicKey != null); - if (!exists) - _signingPublicKey = new SigningPublicKey(); - _signingPublicKey = (SigningPublicKey)syncKey(keyFile, _signingPublicKey, exists); - } - - private DataStructure syncKey(File keyFile, DataStructure structure, boolean exists) { - FileOutputStream out = null; - FileInputStream in = null; - try { - if (exists) { - out = new FileOutputStream(keyFile); - structure.writeBytes(out); - return structure; - } else { - if (keyFile.exists()) { - in = new FileInputStream(keyFile); - structure.readBytes(in); - return structure; - } else { - // we don't have it, and its not on disk. oh well. - return null; - } - } - } catch (IOException ioe) { - _log.error("Error syncing the structure to " + keyFile.getAbsolutePath(), ioe); - } catch (DataFormatException dfe) { - _log.error("Error syncing the structure with " + keyFile.getAbsolutePath(), dfe); - } finally { - if (out != null) try { out.close(); } catch (IOException ioe) {} - if (in != null) try { in.close(); } catch (IOException ioe) {} - } - - if (exists) - return structure; - else - return null; - } - - public String getName() { return "Synchronize Keys to Disk"; } + public SynchronizeKeysJob() { + super(KeyManager.this._context); + } + public void runJob() { + String keyDir = KeyManager.this._context.router().getConfigSetting(PROP_KEYDIR); + if (keyDir == null) + keyDir = DEFAULT_KEYDIR; + File dir = new File(keyDir); + if (!dir.exists()) + dir.mkdirs(); + if (dir.exists() && dir.isDirectory() && dir.canRead() && dir.canWrite()) + syncKeys(dir); + + getTiming().setStartAfter(KeyManager.this._context.clock().now()+DELAY); + KeyManager.this._context.jobQueue().addJob(this); + } + + private void syncKeys(File keyDir) { + syncPrivateKey(keyDir); + syncPublicKey(keyDir); + syncSigningKey(keyDir); + syncVerificationKey(keyDir); + } + + private void syncPrivateKey(File keyDir) { + File keyFile = new File(keyDir, KeyManager.KEYFILE_PRIVATE_ENC); + boolean exists = (_privateKey != null); + if (!exists) + _privateKey = new PrivateKey(); + _privateKey = (PrivateKey)syncKey(keyFile, _privateKey, exists); + } + private void syncPublicKey(File keyDir) { + File keyFile = new File(keyDir, KeyManager.KEYFILE_PUBLIC_ENC); + boolean exists = (_publicKey != null); + if (!exists) + _publicKey = new PublicKey(); + _publicKey = (PublicKey)syncKey(keyFile, _publicKey, exists); + } + + private void syncSigningKey(File keyDir) { + File keyFile = new File(keyDir, KeyManager.KEYFILE_PRIVATE_SIGNING); + boolean exists = (_signingPrivateKey != null); + if (!exists) + _signingPrivateKey = new SigningPrivateKey(); + _signingPrivateKey = (SigningPrivateKey)syncKey(keyFile, _signingPrivateKey, exists); + } + private void syncVerificationKey(File keyDir) { + File keyFile = new File(keyDir, KeyManager.KEYFILE_PUBLIC_SIGNING); + boolean exists = (_signingPublicKey != null); + if (!exists) + _signingPublicKey = new SigningPublicKey(); + _signingPublicKey = (SigningPublicKey)syncKey(keyFile, _signingPublicKey, exists); + } + + private DataStructure syncKey(File keyFile, DataStructure structure, boolean exists) { + FileOutputStream out = null; + FileInputStream in = null; + try { + if (exists) { + out = new FileOutputStream(keyFile); + structure.writeBytes(out); + return structure; + } else { + if (keyFile.exists()) { + in = new FileInputStream(keyFile); + structure.readBytes(in); + return structure; + } else { + // we don't have it, and its not on disk. oh well. + return null; + } + } + } catch (IOException ioe) { + _log.error("Error syncing the structure to " + keyFile.getAbsolutePath(), ioe); + } catch (DataFormatException dfe) { + _log.error("Error syncing the structure with " + keyFile.getAbsolutePath(), dfe); + } finally { + if (out != null) try { out.close(); } catch (IOException ioe) {} + if (in != null) try { in.close(); } catch (IOException ioe) {} + } + + if (exists) + return structure; + else + return null; + } + + public String getName() { return "Synchronize Keys to Disk"; } } } diff --git a/router/java/src/net/i2p/router/MessageHistory.java b/router/java/src/net/i2p/router/MessageHistory.java index fde358b70..c2cf7edb5 100644 --- a/router/java/src/net/i2p/router/MessageHistory.java +++ b/router/java/src/net/i2p/router/MessageHistory.java @@ -23,13 +23,16 @@ import net.i2p.util.Log; * */ public class MessageHistory { - private final static Log _log = new Log(MessageHistory.class); - private static MessageHistory _instance; + private Log _log; + private RouterContext _context; private List _unwrittenEntries; // list of raw entries (strings) yet to be written private String _historyFile; // where to write private String _localIdent; // placed in each entry to uniquely identify the local router private boolean _doLog; // true == we want to log private boolean _doPause; // true == briefly stop writing data to the log (used while submitting it) + private ReinitializeJob _reinitializeJob; + private WriteJob _writeJob; + private SubmitMessageHistoryJob _submitMessageHistoryJob; private final static byte[] NL = System.getProperty("line.separator").getBytes(); private final static int FLUSH_SIZE = 1000; // write out at least once every 1000 entries @@ -41,21 +44,12 @@ public class MessageHistory { public final static String PROP_MESSAGE_HISTORY_FILENAME = "router.historyFilename"; public final static String DEFAULT_MESSAGE_HISTORY_FILENAME = "messageHistory.txt"; - public final static MessageHistory getInstance() { - if (_instance == null) - initialize(); - return _instance; - } - private final static void setInstance(MessageHistory hist) { - if (_instance != null) { - synchronized (_instance._unwrittenEntries) { - for (Iterator iter = _instance._unwrittenEntries.iterator(); iter.hasNext(); ) { - hist.addEntry((String)iter.next()); - } - _instance._unwrittenEntries.clear(); - } - } - _instance = hist; + public MessageHistory(RouterContext context) { + _context = context; + _reinitializeJob = new ReinitializeJob(); + _writeJob = new WriteJob(); + _submitMessageHistoryJob = new SubmitMessageHistoryJob(_context); + initialize(true); } void setDoLog(boolean log) { _doLog = log; } @@ -65,19 +59,19 @@ public class MessageHistory { String getFilename() { return _historyFile; } private void updateSettings() { - String keepHistory = Router.getInstance().getConfigSetting(PROP_KEEP_MESSAGE_HISTORY); - if (keepHistory != null) { - _doLog = Boolean.TRUE.toString().equalsIgnoreCase(keepHistory); - } else { - _doLog = DEFAULT_KEEP_MESSAGE_HISTORY; - } - - String filename = null; - if (_doLog) { - filename = Router.getInstance().getConfigSetting(PROP_MESSAGE_HISTORY_FILENAME); - if ( (filename == null) || (filename.trim().length() <= 0) ) - filename = DEFAULT_MESSAGE_HISTORY_FILENAME; - } + String keepHistory = _context.router().getConfigSetting(PROP_KEEP_MESSAGE_HISTORY); + if (keepHistory != null) { + _doLog = Boolean.TRUE.toString().equalsIgnoreCase(keepHistory); + } else { + _doLog = DEFAULT_KEEP_MESSAGE_HISTORY; + } + + String filename = null; + if (_doLog) { + filename = _context.router().getConfigSetting(PROP_MESSAGE_HISTORY_FILENAME); + if ( (filename == null) || (filename.trim().length() <= 0) ) + filename = DEFAULT_MESSAGE_HISTORY_FILENAME; + } } /** @@ -85,55 +79,38 @@ public class MessageHistory { * Call this whenever the router identity changes. * */ - public static void initialize() { - initialize(false); + public void initialize(boolean forceReinitialize) { + if (!forceReinitialize) return; + + if (_context.router().getRouterInfo() == null) { + _reinitializeJob.getTiming().setStartAfter(_context.clock().now()+5000); + _context.jobQueue().addJob(_reinitializeJob); + } else { + String filename = null; + filename = _context.router().getConfigSetting(PROP_MESSAGE_HISTORY_FILENAME); + if ( (filename == null) || (filename.trim().length() <= 0) ) + filename = DEFAULT_MESSAGE_HISTORY_FILENAME; + + _doLog = DEFAULT_KEEP_MESSAGE_HISTORY; + _historyFile = filename; + _localIdent = getName(_context.routerHash()); + _unwrittenEntries = new LinkedList(); + updateSettings(); + addEntry(getPrefix() + "** Router initialized (started up or changed identities)"); + _context.jobQueue().addJob(_writeJob); + _submitMessageHistoryJob.getTiming().setStartAfter(_context.clock().now() + 2*60*1000); + _context.jobQueue().addJob(_submitMessageHistoryJob); + } } - public static void initialize(boolean forceReinitialize) { - if ( (!forceReinitialize) && (_instance != null) ) return; - - if (Router.getInstance().getRouterInfo() == null) { - ReinitializeJob j = ReinitializeJob.getInstance(); - j.getTiming().setStartAfter(Clock.getInstance().now()+5000); - JobQueue.getInstance().addJob(j); - } else { - String filename = null; - filename = Router.getInstance().getConfigSetting(PROP_MESSAGE_HISTORY_FILENAME); - if ( (filename == null) || (filename.trim().length() <= 0) ) - filename = DEFAULT_MESSAGE_HISTORY_FILENAME; - MessageHistory hist = new MessageHistory(Router.getInstance().getRouterInfo().getIdentity().getHash(), filename); - setInstance(hist); - hist.updateSettings(); - getInstance().addEntry(getInstance().getPrefix() + "** Router initialized (started up or changed identities)"); - JobQueue.getInstance().addJob(new WriteJob()); - SubmitMessageHistoryJob histJob = new SubmitMessageHistoryJob(); - histJob.getTiming().setStartAfter(Clock.getInstance().now() + 2*60*1000); - JobQueue.getInstance().addJob(histJob); - } - } - - private static final class ReinitializeJob extends JobImpl { - private final static ReinitializeJob _jobInstance = new ReinitializeJob(); - public final static ReinitializeJob getInstance() { return _jobInstance; } - private ReinitializeJob() { - super(); - } - public void runJob() { - MessageHistory.initialize(); - } - public String getName() { return "Reinitialize message history"; } - } - - /** - * Create a component to monitor the message history of the router. - * - * @param localIdent Hash of local identity - * @param filename file to log trace info to - */ - private MessageHistory(Hash localIdent, String filename) { - _doLog = DEFAULT_KEEP_MESSAGE_HISTORY; - _historyFile = filename; - _localIdent = getName(localIdent); - _unwrittenEntries = new LinkedList(); + + private final class ReinitializeJob extends JobImpl { + private ReinitializeJob() { + super(MessageHistory.this._context); + } + public void runJob() { + initialize(true); + } + public String getName() { return "Reinitialize message history"; } } /** @@ -151,20 +128,20 @@ public class MessageHistory { * @param replyThrough the gateway of the tunnel that the sourceRoutePeer will be sending to */ public void requestTunnelCreate(TunnelId createTunnel, TunnelId outTunnel, Hash peerRequested, Hash nextPeer, Hash sourceRoutePeer, TunnelId replyTunnel, Hash replyThrough) { - if (!_doLog) return; - StringBuffer buf = new StringBuffer(128); - buf.append(getPrefix()); - buf.append("request [").append(getName(peerRequested)).append("] to create tunnel ["); - buf.append(createTunnel.getTunnelId()).append("] "); - if (nextPeer != null) - buf.append("(next [").append(getName(nextPeer)).append("]) "); - if (outTunnel != null) - buf.append("via [").append(outTunnel.getTunnelId()).append("] "); - if (sourceRoutePeer != null) - buf.append("with replies routed through [").append(getName(sourceRoutePeer)).append("] "); - if ( (replyTunnel != null) && (replyThrough != null) ) - buf.append("who forwards it through [").append(replyTunnel.getTunnelId()).append("] on [").append(getName(replyThrough)).append("]"); - addEntry(buf.toString()); + if (!_doLog) return; + StringBuffer buf = new StringBuffer(128); + buf.append(getPrefix()); + buf.append("request [").append(getName(peerRequested)).append("] to create tunnel ["); + buf.append(createTunnel.getTunnelId()).append("] "); + if (nextPeer != null) + buf.append("(next [").append(getName(nextPeer)).append("]) "); + if (outTunnel != null) + buf.append("via [").append(outTunnel.getTunnelId()).append("] "); + if (sourceRoutePeer != null) + buf.append("with replies routed through [").append(getName(sourceRoutePeer)).append("] "); + if ( (replyTunnel != null) && (replyThrough != null) ) + buf.append("who forwards it through [").append(replyTunnel.getTunnelId()).append("] on [").append(getName(replyThrough)).append("]"); + addEntry(buf.toString()); } /** @@ -178,14 +155,14 @@ public class MessageHistory { * @param sourceRoutePeer peer through whom we should send our garlic routed ok through */ public void receiveTunnelCreate(TunnelId createTunnel, Hash nextPeer, Date expire, boolean ok, Hash sourceRoutePeer) { - if (!_doLog) return; - StringBuffer buf = new StringBuffer(128); - buf.append(getPrefix()); - buf.append("receive tunnel create [").append(createTunnel.getTunnelId()).append("] "); - if (nextPeer != null) - buf.append("(next [").append(getName(nextPeer)).append("]) "); - buf.append("ok? ").append(ok).append(" expiring on [").append(getTime(expire)).append("]"); - addEntry(buf.toString()); + if (!_doLog) return; + StringBuffer buf = new StringBuffer(128); + buf.append(getPrefix()); + buf.append("receive tunnel create [").append(createTunnel.getTunnelId()).append("] "); + if (nextPeer != null) + buf.append("(next [").append(getName(nextPeer)).append("]) "); + buf.append("ok? ").append(ok).append(" expiring on [").append(getTime(expire)).append("]"); + addEntry(buf.toString()); } /** @@ -195,22 +172,22 @@ public class MessageHistory { * @param tunnel tunnel joined */ public void tunnelJoined(String state, TunnelInfo tunnel) { - if (!_doLog) return; - if (tunnel == null) return; - StringBuffer buf = new StringBuffer(128); - buf.append(getPrefix()); - buf.append("joining tunnel [").append(tunnel.getTunnelId().getTunnelId()).append("] as [").append(state).append("] "); - buf.append(" (next: "); - TunnelInfo cur = tunnel; - while (cur.getNextHopInfo() != null) { - buf.append('[').append(getName(cur.getNextHopInfo().getThisHop())); - buf.append("], "); - cur = cur.getNextHopInfo(); - } - if (cur.getNextHop() != null) - buf.append('[').append(getName(cur.getNextHop())).append(']'); - buf.append(") expiring on [").append(getTime(new Date(tunnel.getSettings().getExpiration()))).append("]"); - addEntry(buf.toString()); + if (!_doLog) return; + if (tunnel == null) return; + StringBuffer buf = new StringBuffer(128); + buf.append(getPrefix()); + buf.append("joining tunnel [").append(tunnel.getTunnelId().getTunnelId()).append("] as [").append(state).append("] "); + buf.append(" (next: "); + TunnelInfo cur = tunnel; + while (cur.getNextHopInfo() != null) { + buf.append('[').append(getName(cur.getNextHopInfo().getThisHop())); + buf.append("], "); + cur = cur.getNextHopInfo(); + } + if (cur.getNextHop() != null) + buf.append('[').append(getName(cur.getNextHop())).append(']'); + buf.append(") expiring on [").append(getTime(new Date(tunnel.getSettings().getExpiration()))).append("]"); + addEntry(buf.toString()); } /** @@ -219,12 +196,12 @@ public class MessageHistory { * @param tunnel tunnel failed */ public void tunnelFailed(TunnelId tunnel) { - if (!_doLog) return; - if (tunnel == null) return; - StringBuffer buf = new StringBuffer(128); - buf.append(getPrefix()); - buf.append("failing tunnel [").append(tunnel.getTunnelId()).append("]"); - addEntry(buf.toString()); + if (!_doLog) return; + if (tunnel == null) return; + StringBuffer buf = new StringBuffer(128); + buf.append(getPrefix()); + buf.append("failing tunnel [").append(tunnel.getTunnelId()).append("]"); + addEntry(buf.toString()); } /** @@ -235,24 +212,24 @@ public class MessageHistory { * @param timeToTest milliseconds to verify the tunnel */ public void tunnelValid(TunnelInfo tunnel, long timeToTest) { - if (!_doLog) return; - if (tunnel == null) return; - StringBuffer buf = new StringBuffer(128); - buf.append(getPrefix()); - buf.append("tunnel ").append(tunnel.getTunnelId().getTunnelId()).append(" tested ok after ").append(timeToTest).append("ms (containing "); - TunnelInfo cur = tunnel; - while (cur != null) { - buf.append('[').append(getName(cur.getThisHop())).append("], "); - if (cur.getNextHopInfo() != null) { - cur = cur.getNextHopInfo(); - } else { - if (cur.getNextHop() != null) - buf.append('[').append(getName(cur.getNextHop())).append(']'); - cur = null; - } - } - buf.append(')'); - addEntry(buf.toString()); + if (!_doLog) return; + if (tunnel == null) return; + StringBuffer buf = new StringBuffer(128); + buf.append(getPrefix()); + buf.append("tunnel ").append(tunnel.getTunnelId().getTunnelId()).append(" tested ok after ").append(timeToTest).append("ms (containing "); + TunnelInfo cur = tunnel; + while (cur != null) { + buf.append('[').append(getName(cur.getThisHop())).append("], "); + if (cur.getNextHopInfo() != null) { + cur = cur.getNextHopInfo(); + } else { + if (cur.getNextHop() != null) + buf.append('[').append(getName(cur.getNextHop())).append(']'); + cur = null; + } + } + buf.append(')'); + addEntry(buf.toString()); } /** @@ -260,15 +237,15 @@ public class MessageHistory { * */ public void tunnelRejected(Hash peer, TunnelId tunnel, Hash replyThrough, String reason) { - if (!_doLog) return; - if ( (tunnel == null) || (peer == null) ) return; - StringBuffer buf = new StringBuffer(128); - buf.append(getPrefix()); - buf.append("tunnel [").append(tunnel.getTunnelId()).append("] was rejected by ["); - buf.append(getName(peer)).append("] for [").append(reason).append("]"); - if (replyThrough != null) - buf.append(" with their reply intended to come through [").append(getName(replyThrough)).append("]"); - addEntry(buf.toString()); + if (!_doLog) return; + if ( (tunnel == null) || (peer == null) ) return; + StringBuffer buf = new StringBuffer(128); + buf.append(getPrefix()); + buf.append("tunnel [").append(tunnel.getTunnelId()).append("] was rejected by ["); + buf.append(getName(peer)).append("] for [").append(reason).append("]"); + if (replyThrough != null) + buf.append(" with their reply intended to come through [").append(getName(replyThrough)).append("]"); + addEntry(buf.toString()); } /** @@ -277,15 +254,15 @@ public class MessageHistory { * */ public void tunnelRequestTimedOut(Hash peer, TunnelId tunnel, Hash replyThrough) { - if (!_doLog) return; - if ( (tunnel == null) || (peer == null) ) return; - StringBuffer buf = new StringBuffer(128); - buf.append(getPrefix()); - buf.append("tunnel [").append(tunnel.getTunnelId()).append("] timed out on ["); - buf.append(getName(peer)).append("]"); - if (replyThrough != null) - buf.append(" with their reply intended to come through [").append(getName(replyThrough)).append("]"); - addEntry(buf.toString()); + if (!_doLog) return; + if ( (tunnel == null) || (peer == null) ) return; + StringBuffer buf = new StringBuffer(128); + buf.append(getPrefix()); + buf.append("tunnel [").append(tunnel.getTunnelId()).append("] timed out on ["); + buf.append(getName(peer)).append("]"); + if (replyThrough != null) + buf.append(" with their reply intended to come through [").append(getName(replyThrough)).append("]"); + addEntry(buf.toString()); } /** @@ -296,24 +273,24 @@ public class MessageHistory { * @param from peer that sent us this message (if known) */ public void droppedTunnelMessage(TunnelId id, Hash from) { - if (!_doLog) return; - StringBuffer buf = new StringBuffer(128); - buf.append(getPrefix()); - buf.append("dropped message for unknown tunnel [").append(id.getTunnelId()).append("] from [").append(getName(from)).append("]"); - addEntry(buf.toString()); + if (!_doLog) return; + StringBuffer buf = new StringBuffer(128); + buf.append(getPrefix()); + buf.append("dropped message for unknown tunnel [").append(id.getTunnelId()).append("] from [").append(getName(from)).append("]"); + addEntry(buf.toString()); } /** * We received another message we weren't waiting for and don't know how to handle */ public void droppedOtherMessage(I2NPMessage message) { - if (!_doLog) return; - if (message == null) return; - StringBuffer buf = new StringBuffer(512); - buf.append(getPrefix()); - buf.append("dropped [").append(message.getClass().getName()).append("] ").append(message.getUniqueId()); - buf.append(" [").append(message.toString()).append("]"); - addEntry(buf.toString()); + if (!_doLog) return; + if (message == null) return; + StringBuffer buf = new StringBuffer(512); + buf.append(getPrefix()); + buf.append("dropped [").append(message.getClass().getName()).append("] ").append(message.getUniqueId()); + buf.append(" [").append(message.toString()).append("]"); + addEntry(buf.toString()); } /** @@ -322,16 +299,16 @@ public class MessageHistory { * @param sentMessage message sent that didn't receive a reply */ public void replyTimedOut(OutNetMessage sentMessage) { - if (!_doLog) return; - if (sentMessage == null) return; - StringBuffer buf = new StringBuffer(512); - buf.append(getPrefix()); - buf.append("timed out waiting for a reply to [").append(sentMessage.getMessage().getClass().getName()); - buf.append("] [").append(sentMessage.getMessage().getUniqueId()).append("] expiring on ["); - if (sentMessage != null) - buf.append(getTime(new Date(sentMessage.getReplySelector().getExpiration()))); - buf.append("] ").append(sentMessage.getReplySelector().toString()); - addEntry(buf.toString()); + if (!_doLog) return; + if (sentMessage == null) return; + StringBuffer buf = new StringBuffer(512); + buf.append(getPrefix()); + buf.append("timed out waiting for a reply to [").append(sentMessage.getMessage().getClass().getName()); + buf.append("] [").append(sentMessage.getMessage().getUniqueId()).append("] expiring on ["); + if (sentMessage != null) + buf.append(getTime(new Date(sentMessage.getReplySelector().getExpiration()))); + buf.append("] ").append(sentMessage.getReplySelector().toString()); + addEntry(buf.toString()); } /** @@ -342,11 +319,11 @@ public class MessageHistory { * @param error error message related to the processing of the message */ public void messageProcessingError(long messageId, String messageType, String error) { - if (!_doLog) return; - StringBuffer buf = new StringBuffer(128); - buf.append(getPrefix()); - buf.append("Error processing [").append(messageType).append("] [").append(messageId).append("] failed with [").append(error).append("]"); - addEntry(buf.toString()); + if (!_doLog) return; + StringBuffer buf = new StringBuffer(128); + buf.append(getPrefix()); + buf.append("Error processing [").append(messageType).append("] [").append(messageId).append("] failed with [").append(error).append("]"); + addEntry(buf.toString()); } /** @@ -360,17 +337,17 @@ public class MessageHistory { * @param sentOk whether the message was sent successfully */ public void sendMessage(String messageType, long messageId, Date expiration, Hash peer, boolean sentOk) { - if (!_doLog) return; - StringBuffer buf = new StringBuffer(128); - buf.append(getPrefix()); - buf.append("send [").append(messageType).append("] message [").append(messageId).append("] "); - buf.append("to [").append(getName(peer)).append("] "); - buf.append("expiring on [").append(getTime(expiration)).append("] "); - if (sentOk) - buf.append("successfully"); - else - buf.append("failed"); - addEntry(buf.toString()); + if (!_doLog) return; + StringBuffer buf = new StringBuffer(128); + buf.append(getPrefix()); + buf.append("send [").append(messageType).append("] message [").append(messageId).append("] "); + buf.append("to [").append(getName(peer)).append("] "); + buf.append("expiring on [").append(getTime(expiration)).append("] "); + if (sentOk) + buf.append("successfully"); + else + buf.append("failed"); + addEntry(buf.toString()); } /** @@ -385,20 +362,20 @@ public class MessageHistory { * */ public void receiveMessage(String messageType, long messageId, Date expiration, Hash from, boolean isValid) { - if (!_doLog) return; - StringBuffer buf = new StringBuffer(128); - buf.append(getPrefix()); - buf.append("receive [").append(messageType).append("] with id [").append(messageId).append("] "); - if (from != null) - buf.append("from [").append(getName(from)).append("] "); - buf.append("expiring on [").append(getTime(expiration)).append("] valid? ").append(isValid); - addEntry(buf.toString()); - if (messageType.equals("net.i2p.data.i2np.TunnelMessage")) { - //_log.warn("ReceiveMessage tunnel message ["+messageId+"]", new Exception("Receive tunnel")); + if (!_doLog) return; + StringBuffer buf = new StringBuffer(128); + buf.append(getPrefix()); + buf.append("receive [").append(messageType).append("] with id [").append(messageId).append("] "); + if (from != null) + buf.append("from [").append(getName(from)).append("] "); + buf.append("expiring on [").append(getTime(expiration)).append("] valid? ").append(isValid); + addEntry(buf.toString()); + if (messageType.equals("net.i2p.data.i2np.TunnelMessage")) { + //_log.warn("ReceiveMessage tunnel message ["+messageId+"]", new Exception("Receive tunnel")); } } public void receiveMessage(String messageType, long messageId, Date expiration, boolean isValid) { - receiveMessage(messageType, messageId, expiration, null, isValid); + receiveMessage(messageType, messageId, expiration, null, isValid); } /** @@ -410,12 +387,12 @@ public class MessageHistory { * @param containerMessageId the unique message id of the message */ public void wrap(String bodyMessageType, long bodyMessageId, String containerMessageType, long containerMessageId) { - if (!_doLog) return; - StringBuffer buf = new StringBuffer(128); - buf.append(getPrefix()); - buf.append("Wrap message [").append(bodyMessageType).append("] id [").append(bodyMessageId).append("] "); - buf.append("in [").append(containerMessageType).append("] id [").append(containerMessageId).append("]"); - addEntry(buf.toString()); + if (!_doLog) return; + StringBuffer buf = new StringBuffer(128); + buf.append(getPrefix()); + buf.append("Wrap message [").append(bodyMessageType).append("] id [").append(bodyMessageId).append("] "); + buf.append("in [").append(containerMessageType).append("] id [").append(containerMessageId).append("]"); + addEntry(buf.toString()); } /** @@ -423,11 +400,11 @@ public class MessageHistory { * */ public void receivePayloadMessage(long messageId) { - if (!_doLog) return; - StringBuffer buf = new StringBuffer(64); - buf.append(getPrefix()); - buf.append("Receive payload message [").append(messageId).append("]"); - addEntry(buf.toString()); + if (!_doLog) return; + StringBuffer buf = new StringBuffer(64); + buf.append(getPrefix()); + buf.append("Receive payload message [").append(messageId).append("]"); + addEntry(buf.toString()); } /** @@ -438,11 +415,11 @@ public class MessageHistory { * @param timeToSend how long it took to send the message */ public void sendPayloadMessage(long messageId, boolean successfullySent, long timeToSend) { - if (!_doLog) return; - StringBuffer buf = new StringBuffer(128); - buf.append(getPrefix()); - buf.append("Send payload message in [").append(messageId).append("] in [").append(timeToSend).append("] successfully? ").append(successfullySent); - addEntry(buf.toString()); + if (!_doLog) return; + StringBuffer buf = new StringBuffer(128); + buf.append(getPrefix()); + buf.append("Send payload message in [").append(messageId).append("] in [").append(timeToSend).append("] successfully? ").append(successfullySent); + addEntry(buf.toString()); } /** @@ -450,27 +427,27 @@ public class MessageHistory { * */ private final static String getName(Hash router) { - if (router == null) return "unknown"; - String str = router.toBase64(); - if ( (str == null) || (str.length() < 6) ) return "invalid"; - return str.substring(0, 6); + if (router == null) return "unknown"; + String str = router.toBase64(); + if ( (str == null) || (str.length() < 6) ) return "invalid"; + return str.substring(0, 6); } private final String getPrefix() { - StringBuffer buf = new StringBuffer(48); - buf.append(getTime(new Date(Clock.getInstance().now()))); - buf.append(' ').append(_localIdent).append(": "); - return buf.toString(); + StringBuffer buf = new StringBuffer(48); + buf.append(getTime(new Date(_context.clock().now()))); + buf.append(' ').append(_localIdent).append(": "); + return buf.toString(); } private final static SimpleDateFormat _fmt = new SimpleDateFormat("yy/MM/dd.HH:mm:ss.SSS"); static { - _fmt.setTimeZone(TimeZone.getTimeZone("GMT")); + _fmt.setTimeZone(TimeZone.getTimeZone("GMT")); } private final static String getTime(Date when) { - synchronized (_fmt) { - return _fmt.format(when); - } + synchronized (_fmt) { + return _fmt.format(when); + } } /** @@ -479,27 +456,27 @@ public class MessageHistory { * */ private void addEntry(String entry) { - if (entry == null) return; - int sz = 0; - synchronized (_unwrittenEntries) { - _unwrittenEntries.add(entry); - sz = _unwrittenEntries.size(); - } - if (sz > FLUSH_SIZE) - flushEntries(); + if (entry == null) return; + int sz = 0; + synchronized (_unwrittenEntries) { + _unwrittenEntries.add(entry); + sz = _unwrittenEntries.size(); + } + if (sz > FLUSH_SIZE) + flushEntries(); } /** * Write out any unwritten entries, and clear the pending list */ private void flushEntries() { - if (_doPause) return; - List entries = null; - synchronized (_unwrittenEntries) { - entries = new LinkedList(_unwrittenEntries); - _unwrittenEntries.clear(); - } - writeEntries(entries); + if (_doPause) return; + List entries = null; + synchronized (_unwrittenEntries) { + entries = new LinkedList(_unwrittenEntries); + _unwrittenEntries.clear(); + } + writeEntries(entries); } /** @@ -507,41 +484,46 @@ public class MessageHistory { * */ private void writeEntries(List entries) { - if (!_doLog) return; - FileOutputStream fos = null; - try { - fos = new FileOutputStream(_historyFile, true); - for (Iterator iter = entries.iterator(); iter.hasNext(); ) { - String entry = (String)iter.next(); - fos.write(entry.getBytes()); - fos.write(NL); - } - } catch (IOException ioe) { - _log.error("Error writing trace entries", ioe); - } finally { - if (fos != null) try { fos.close(); } catch (IOException ioe) {} - } + if (!_doLog) return; + FileOutputStream fos = null; + try { + fos = new FileOutputStream(_historyFile, true); + for (Iterator iter = entries.iterator(); iter.hasNext(); ) { + String entry = (String)iter.next(); + fos.write(entry.getBytes()); + fos.write(NL); + } + } catch (IOException ioe) { + _log.error("Error writing trace entries", ioe); + } finally { + if (fos != null) try { fos.close(); } catch (IOException ioe) {} + } } /** write out the message history once per minute, if not sooner */ private final static long WRITE_DELAY = 60*1000; - private static class WriteJob extends JobImpl { - public String getName() { return "Write History Entries"; } - public void runJob() { - MessageHistory.getInstance().flushEntries(); - MessageHistory.getInstance().updateSettings(); - requeue(WRITE_DELAY); - } + private class WriteJob extends JobImpl { + public WriteJob() { + super(MessageHistory.this._context); + } + public String getName() { return "Write History Entries"; } + public void runJob() { + flushEntries(); + updateSettings(); + requeue(WRITE_DELAY); + } } public static void main(String args[]) { - MessageHistory hist = new MessageHistory(new Hash(new byte[32]), "messageHistory.txt"); - MessageHistory.getInstance().setDoLog(false); - hist.addEntry("you smell before"); - hist.getInstance().setDoLog(true); - hist.addEntry("you smell after"); - hist.getInstance().setDoLog(false); - hist.addEntry("you smell finished"); - hist.flushEntries(); + RouterContext ctx = new RouterContext(null); + MessageHistory hist = new MessageHistory(ctx); + //, new Hash(new byte[32]), "messageHistory.txt"); + hist.setDoLog(false); + hist.addEntry("you smell before"); + hist.setDoLog(true); + hist.addEntry("you smell after"); + hist.setDoLog(false); + hist.addEntry("you smell finished"); + hist.flushEntries(); } } diff --git a/router/java/src/net/i2p/router/MessageValidator.java b/router/java/src/net/i2p/router/MessageValidator.java index 01191ef12..8dd8bc050 100644 --- a/router/java/src/net/i2p/router/MessageValidator.java +++ b/router/java/src/net/i2p/router/MessageValidator.java @@ -17,21 +17,29 @@ import net.i2p.util.Log; * */ public class MessageValidator { - private final static Log _log = new Log(MessageValidator.class); - private final static MessageValidator _instance = new MessageValidator(); - public final static MessageValidator getInstance() { return _instance; } - - /** + private Log _log; + private RouterContext _context; + /** * Expiration date (as a Long) to message id (as a Long). * The expiration date (key) must be unique, so on collision, increment the value. * This keeps messageIds around longer than they need to be, but hopefully not by much ;) * */ - private TreeMap _receivedIdExpirations = new TreeMap(); + private TreeMap _receivedIdExpirations; /** Message id (as a Long) */ - private Set _receivedIds = new HashSet(1024); + private Set _receivedIds; /** synchronize on this before adjusting the received id data */ - private Object _receivedIdLock = new Object(); + private Object _receivedIdLock; + + + public MessageValidator(RouterContext context) { + _log = context.logManager().getLog(MessageValidator.class); + _receivedIdExpirations = new TreeMap(); + _receivedIds = new HashSet(1024); + _receivedIdLock = new Object(); + _context = context; + } + /** * Determine if this message should be accepted as valid (not expired, not a duplicate) @@ -39,88 +47,87 @@ public class MessageValidator { * @return true if the message should be accepted as valid, false otherwise */ public boolean validateMessage(long messageId, long expiration) { - long now = Clock.getInstance().now(); - if (now - Router.CLOCK_FUDGE_FACTOR >= expiration) { - if (_log.shouldLog(Log.WARN)) - _log.warn("Rejecting message " + messageId + " because it expired " + (now-expiration) + "ms ago"); - return false; - } - - boolean isDuplicate = noteReception(messageId, expiration); - if (isDuplicate) { - if (_log.shouldLog(Log.WARN)) - _log.warn("Rejecting message " + messageId + " because it is a duplicate", new Exception("Duplicate origin")); - return false; - } else { - if (_log.shouldLog(Log.DEBUG)) - _log.debug("Accepting message " + messageId + " because it is NOT a duplicate", new Exception("Original origin")); - return true; - } + long now = _context.clock().now(); + if (now - Router.CLOCK_FUDGE_FACTOR >= expiration) { + if (_log.shouldLog(Log.WARN)) + _log.warn("Rejecting message " + messageId + " because it expired " + (now-expiration) + "ms ago"); + return false; + } + + boolean isDuplicate = noteReception(messageId, expiration); + if (isDuplicate) { + if (_log.shouldLog(Log.WARN)) + _log.warn("Rejecting message " + messageId + " because it is a duplicate", new Exception("Duplicate origin")); + return false; + } else { + if (_log.shouldLog(Log.DEBUG)) + _log.debug("Accepting message " + messageId + " because it is NOT a duplicate", new Exception("Original origin")); + return true; + } } /** * Note that we've received the message (which has the expiration given). - * This functionality will need to be reworked for I2P 3.0 when we take into + * This functionality will need to be reworked for I2P 3.0 when we take into * consideration messages with significant user specified delays (since we dont * want to keep an infinite number of messages in RAM, etc) * * @return true if we HAVE already seen this message, false if not */ private boolean noteReception(long messageId, long messageExpiration) { - Long id = new Long(messageId); - synchronized (_receivedIdLock) { - locked_cleanReceivedIds(Clock.getInstance().now() - Router.CLOCK_FUDGE_FACTOR); - if (_receivedIds.contains(id)) { - return true; - } else { - long date = messageExpiration; - while (_receivedIdExpirations.containsKey(new Long(date))) - date++; - _receivedIdExpirations.put(new Long(date), id); - _receivedIds.add(id); - return false; - } - } + Long id = new Long(messageId); + synchronized (_receivedIdLock) { + locked_cleanReceivedIds(_context.clock().now() - Router.CLOCK_FUDGE_FACTOR); + if (_receivedIds.contains(id)) { + return true; + } else { + long date = messageExpiration; + while (_receivedIdExpirations.containsKey(new Long(date))) + date++; + _receivedIdExpirations.put(new Long(date), id); + _receivedIds.add(id); + return false; + } + } } - + /** - * Clean the ids that we no longer need to keep track of to prevent replay + * Clean the ids that we no longer need to keep track of to prevent replay * attacks. * - */ + */ private void cleanReceivedIds() { - long now = Clock.getInstance().now() - Router.CLOCK_FUDGE_FACTOR ; - synchronized (_receivedIdLock) { - locked_cleanReceivedIds(now); - } + long now = _context.clock().now() - Router.CLOCK_FUDGE_FACTOR ; + synchronized (_receivedIdLock) { + locked_cleanReceivedIds(now); + } } - + /** - * Clean the ids that we no longer need to keep track of to prevent replay + * Clean the ids that we no longer need to keep track of to prevent replay * attacks - only call this from within a block synchronized on the received ID lock. * */ private void locked_cleanReceivedIds(long now) { - Set toRemoveIds = new HashSet(4); - Set toRemoveDates = new HashSet(4); - for (Iterator iter = _receivedIdExpirations.keySet().iterator(); iter.hasNext(); ) { - Long date = (Long)iter.next(); - if (date.longValue() <= now) { - // no need to keep track of things in the past - toRemoveDates.add(date); - toRemoveIds.add(_receivedIdExpirations.get(date)); - } else { - // the expiration is in the future, we still need to keep track of - // it to prevent replays - break; - } - } - for (Iterator iter = toRemoveDates.iterator(); iter.hasNext(); ) - _receivedIdExpirations.remove(iter.next()); - for (Iterator iter = toRemoveIds.iterator(); iter.hasNext(); ) - _receivedIds.remove(iter.next()); - if (_log.shouldLog(Log.INFO)) - _log.info("Cleaned out " + toRemoveDates.size() + " expired messageIds, leaving " + _receivedIds.size() + " remaining"); + Set toRemoveIds = new HashSet(4); + Set toRemoveDates = new HashSet(4); + for (Iterator iter = _receivedIdExpirations.keySet().iterator(); iter.hasNext(); ) { + Long date = (Long)iter.next(); + if (date.longValue() <= now) { + // no need to keep track of things in the past + toRemoveDates.add(date); + toRemoveIds.add(_receivedIdExpirations.get(date)); + } else { + // the expiration is in the future, we still need to keep track of + // it to prevent replays + break; + } + } + for (Iterator iter = toRemoveDates.iterator(); iter.hasNext(); ) + _receivedIdExpirations.remove(iter.next()); + for (Iterator iter = toRemoveIds.iterator(); iter.hasNext(); ) + _receivedIds.remove(iter.next()); + if (_log.shouldLog(Log.INFO)) + _log.info("Cleaned out " + toRemoveDates.size() + " expired messageIds, leaving " + _receivedIds.size() + " remaining"); } - } diff --git a/router/java/src/net/i2p/router/NetworkDatabaseFacade.java b/router/java/src/net/i2p/router/NetworkDatabaseFacade.java index 7e78f2ae2..c00208dd6 100644 --- a/router/java/src/net/i2p/router/NetworkDatabaseFacade.java +++ b/router/java/src/net/i2p/router/NetworkDatabaseFacade.java @@ -23,9 +23,6 @@ import net.i2p.router.networkdb.kademlia.KademliaNetworkDatabaseFacade; * */ public abstract class NetworkDatabaseFacade implements Service { - private static NetworkDatabaseFacade _instance = new KademliaNetworkDatabaseFacade(); // NetworkDatabaseFacadeImpl(); - public static NetworkDatabaseFacade getInstance() { return _instance; } - /** * Return the RouterInfo structures for the routers closest to the given key. * At most maxNumRouters will be returned @@ -54,33 +51,35 @@ public abstract class NetworkDatabaseFacade implements Service { class DummyNetworkDatabaseFacade extends NetworkDatabaseFacade { private Map _routers; + private RouterContext _context; - public DummyNetworkDatabaseFacade() { - _routers = new HashMap(); + public DummyNetworkDatabaseFacade(RouterContext ctx) { + _routers = new HashMap(); + _context = ctx; } public void shutdown() {} public void startup() { - RouterInfo info = Router.getInstance().getRouterInfo(); - _routers.put(info.getIdentity().getHash(), info); + RouterInfo info = _context.router().getRouterInfo(); + _routers.put(info.getIdentity().getHash(), info); } public void lookupLeaseSet(Hash key, Job onFindJob, Job onFailedLookupJob, long timeoutMs) {} public LeaseSet lookupLeaseSetLocally(Hash key) { return null; } public void lookupRouterInfo(Hash key, Job onFindJob, Job onFailedLookupJob, long timeoutMs) { - RouterInfo info = lookupRouterInfoLocally(key); - if (info == null) - JobQueue.getInstance().addJob(onFailedLookupJob); - else - JobQueue.getInstance().addJob(onFindJob); + RouterInfo info = lookupRouterInfoLocally(key); + if (info == null) + _context.jobQueue().addJob(onFailedLookupJob); + else + _context.jobQueue().addJob(onFindJob); } public RouterInfo lookupRouterInfoLocally(Hash key) { return (RouterInfo)_routers.get(key); } public void publish(LeaseSet localLeaseSet) {} public void publish(RouterInfo localRouterInfo) {} public LeaseSet store(Hash key, LeaseSet leaseSet) { return leaseSet; } public RouterInfo store(Hash key, RouterInfo routerInfo) { - _routers.put(key, routerInfo); - return routerInfo; + _routers.put(key, routerInfo); + return routerInfo; } public void unpublish(LeaseSet localLeaseSet) {} public void fail(Hash dbEntry) {} diff --git a/router/java/src/net/i2p/router/OutNetMessage.java b/router/java/src/net/i2p/router/OutNetMessage.java index 4e982f6f7..2639283c2 100644 --- a/router/java/src/net/i2p/router/OutNetMessage.java +++ b/router/java/src/net/i2p/router/OutNetMessage.java @@ -1,9 +1,9 @@ package net.i2p.router; /* * free (adj.): unencumbered; not under the control of others - * Written by jrandom in 2003 and released into the public domain - * with no warranty of any kind, either expressed or implied. - * It probably won't make your computer catch on fire, or eat + * Written by jrandom in 2003 and released into the public domain + * with no warranty of any kind, either expressed or implied. + * It probably won't make your computer catch on fire, or eat * your children, but it might. Use at your own risk. * */ @@ -32,7 +32,8 @@ import net.i2p.util.Log; * */ public class OutNetMessage { - private final static Log _log = new Log(OutNetMessage.class); + private Log _log; + private RouterContext _context; private RouterInfo _target; private I2NPMessage _message; private long _messageSize; @@ -49,53 +50,55 @@ public class OutNetMessage { private long _created; /** for debugging, contains a mapping of even name to Long (e.g. "begin sending", "handleOutbound", etc) */ private HashMap _timestamps; - /** - * contains a list of timestamp event names in the order they were fired - * (some JVMs have less than 10ms resolution, so the Long above doesn't guarantee order) + /** + * contains a list of timestamp event names in the order they were fired + * (some JVMs have less than 10ms resolution, so the Long above doesn't guarantee order) */ private List _timestampOrder; - public OutNetMessage() { - setTarget(null); - _message = null; - _messageSize = 0; - setPriority(-1); - setExpiration(-1); - setOnSendJob(null); - setOnFailedSendJob(null); - setOnReplyJob(null); - setOnFailedReplyJob(null); - setReplySelector(null); - _timestamps = new HashMap(8); - _timestampOrder = new LinkedList(); - _failedTransports = new HashSet(); - _sendBegin = 0; - _createdBy = new Exception("Created by"); - _created = Clock.getInstance().now(); - timestamp("Created"); + public OutNetMessage(RouterContext context) { + _context = context; + _log = context.logManager().getLog(OutNetMessage.class); + setTarget(null); + _message = null; + _messageSize = 0; + setPriority(-1); + setExpiration(-1); + setOnSendJob(null); + setOnFailedSendJob(null); + setOnReplyJob(null); + setOnFailedReplyJob(null); + setReplySelector(null); + _timestamps = new HashMap(8); + _timestampOrder = new LinkedList(); + _failedTransports = new HashSet(); + _sendBegin = 0; + _createdBy = new Exception("Created by"); + _created = context.clock().now(); + timestamp("Created"); } public void timestamp(String eventName) { - synchronized (_timestamps) { - _timestamps.put(eventName, new Long(Clock.getInstance().now())); - _timestampOrder.add(eventName); - } + synchronized (_timestamps) { + _timestamps.put(eventName, new Long(_context.clock().now())); + _timestampOrder.add(eventName); + } } public Map getTimestamps() { - synchronized (_timestamps) { - return (Map)_timestamps.clone(); - } + synchronized (_timestamps) { + return (Map)_timestamps.clone(); + } } public Long getTimestamp(String eventName) { - synchronized (_timestamps) { - return (Long)_timestamps.get(eventName); - } + synchronized (_timestamps) { + return (Long)_timestamps.get(eventName); + } } public Exception getCreatedBy() { return _createdBy; } /** - * Specifies the router to which the message should be delivered. + * Specifies the router to which the message should be delivered. * */ public RouterInfo getTarget() { return _target; } @@ -105,48 +108,48 @@ public class OutNetMessage { * */ public I2NPMessage getMessage() { return _message; } - public void setMessage(I2NPMessage msg) { - _message = msg; + public void setMessage(I2NPMessage msg) { + _message = msg; } - public long getMessageSize() { - if (_messageSize <= 0) { - try { - ByteArrayOutputStream baos = new ByteArrayOutputStream(4096); // large enough to hold most messages - _message.writeBytes(baos); - long sz = baos.size(); - baos.reset(); - _messageSize = sz; - } catch (DataFormatException dfe) { - _log.error("Error serializing the I2NPMessage for the OutNetMessage", dfe); - } catch (IOException ioe) { - _log.error("Error serializing the I2NPMessage for the OutNetMessage", ioe); - } - } - return _messageSize; + public long getMessageSize() { + if (_messageSize <= 0) { + try { + ByteArrayOutputStream baos = new ByteArrayOutputStream(4096); // large enough to hold most messages + _message.writeBytes(baos); + long sz = baos.size(); + baos.reset(); + _messageSize = sz; + } catch (DataFormatException dfe) { + _log.error("Error serializing the I2NPMessage for the OutNetMessage", dfe); + } catch (IOException ioe) { + _log.error("Error serializing the I2NPMessage for the OutNetMessage", ioe); + } + } + return _messageSize; } - public byte[] getMessageData() { - if (_message == null) { - return null; - } else { - try { - ByteArrayOutputStream baos = new ByteArrayOutputStream(4096); // large enough to hold most messages - _message.writeBytes(baos); - byte data[] = baos.toByteArray(); - baos.reset(); - return data; - } catch (DataFormatException dfe) { - _log.error("Error serializing the I2NPMessage for the OutNetMessage", dfe); - } catch (IOException ioe) { - _log.error("Error serializing the I2NPMessage for the OutNetMessage", ioe); - } - return null; - } + public byte[] getMessageData() { + if (_message == null) { + return null; + } else { + try { + ByteArrayOutputStream baos = new ByteArrayOutputStream(4096); // large enough to hold most messages + _message.writeBytes(baos); + byte data[] = baos.toByteArray(); + baos.reset(); + return data; + } catch (DataFormatException dfe) { + _log.error("Error serializing the I2NPMessage for the OutNetMessage", dfe); + } catch (IOException ioe) { + _log.error("Error serializing the I2NPMessage for the OutNetMessage", ioe); + } + return null; + } } /** * Specify the priority of the message, where higher numbers are higher - * priority. Higher priority messages should be delivered before lower + * priority. Higher priority messages should be delivered before lower * priority ones, though some algorithm may be used to avoid starvation. * */ @@ -154,15 +157,15 @@ public class OutNetMessage { public void setPriority(int priority) { _priority = priority; } /** * Specify the # ms since the epoch after which if the message has not been - * sent the OnFailedSend job should be fired and the message should be - * removed from the pool. If the message has already been sent, this + * sent the OnFailedSend job should be fired and the message should be + * removed from the pool. If the message has already been sent, this * expiration is ignored and the expiration from the ReplySelector is used. * */ public long getExpiration() { return _expiration; } public void setExpiration(long expiration) { _expiration = expiration; } /** - * After the message is successfully passed to the router specified, the + * After the message is successfully passed to the router specified, the * given job is enqueued. * */ @@ -199,74 +202,74 @@ public class OutNetMessage { /** when did the sending process begin */ public long getSendBegin() { return _sendBegin; } - public void beginSend() { _sendBegin = Clock.getInstance().now(); } + public void beginSend() { _sendBegin = _context.clock().now(); } public long getCreated() { return _created; } - public long getLifetime() { return Clock.getInstance().now() - _created; } + public long getLifetime() { return _context.clock().now() - _created; } public String toString() { - StringBuffer buf = new StringBuffer(128); - buf.append("[OutNetMessage contains "); - if (_message == null) { - buf.append("*no message*"); - } else { - buf.append("a ").append(_messageSize).append(" byte "); - buf.append(_message.getClass().getName()); - } - buf.append(" expiring on ").append(new Date(_expiration)); - buf.append(" failed delivery on transports ").append(_failedTransports); - if (_target == null) - buf.append(" targetting no one in particular..."); - else - buf.append(" targetting ").append(_target.getIdentity().getHash().toBase64()); - if (_onReply != null) - buf.append(" with onReply job: ").append(_onReply); - if (_onSend != null) - buf.append(" with onSend job: ").append(_onSend); - if (_onFailedReply != null) - buf.append(" with onFailedReply job: ").append(_onFailedReply); - if (_onFailedSend != null) - buf.append(" with onFailedSend job: ").append(_onFailedSend); - buf.append(" {timestamps: \n"); - synchronized (_timestamps) { - long lastWhen = -1; - for (int i = 0; i < _timestampOrder.size(); i++) { - String name = (String)_timestampOrder.get(i); - Long when = (Long)_timestamps.get(name); - buf.append("\t["); - long diff = when.longValue() - lastWhen; - if ( (lastWhen > 0) && (diff > 500) ) - buf.append("**"); - if (lastWhen > 0) - buf.append(diff); - else - buf.append(0); - buf.append("ms: \t").append(name).append('=').append(formatDate(when.longValue())).append("]\n"); - lastWhen = when.longValue(); - } - } - buf.append("}"); - buf.append("]"); - return buf.toString(); + StringBuffer buf = new StringBuffer(128); + buf.append("[OutNetMessage contains "); + if (_message == null) { + buf.append("*no message*"); + } else { + buf.append("a ").append(_messageSize).append(" byte "); + buf.append(_message.getClass().getName()); + } + buf.append(" expiring on ").append(new Date(_expiration)); + buf.append(" failed delivery on transports ").append(_failedTransports); + if (_target == null) + buf.append(" targetting no one in particular..."); + else + buf.append(" targetting ").append(_target.getIdentity().getHash().toBase64()); + if (_onReply != null) + buf.append(" with onReply job: ").append(_onReply); + if (_onSend != null) + buf.append(" with onSend job: ").append(_onSend); + if (_onFailedReply != null) + buf.append(" with onFailedReply job: ").append(_onFailedReply); + if (_onFailedSend != null) + buf.append(" with onFailedSend job: ").append(_onFailedSend); + buf.append(" {timestamps: \n"); + synchronized (_timestamps) { + long lastWhen = -1; + for (int i = 0; i < _timestampOrder.size(); i++) { + String name = (String)_timestampOrder.get(i); + Long when = (Long)_timestamps.get(name); + buf.append("\t["); + long diff = when.longValue() - lastWhen; + if ( (lastWhen > 0) && (diff > 500) ) + buf.append("**"); + if (lastWhen > 0) + buf.append(diff); + else + buf.append(0); + buf.append("ms: \t").append(name).append('=').append(formatDate(when.longValue())).append("]\n"); + lastWhen = when.longValue(); + } + } + buf.append("}"); + buf.append("]"); + return buf.toString(); } private final static SimpleDateFormat _fmt = new SimpleDateFormat("HH:mm:ss.SSS"); private final static String formatDate(long when) { - Date d = new Date(when); - synchronized (_fmt) { - return _fmt.format(d); - } + Date d = new Date(when); + synchronized (_fmt) { + return _fmt.format(d); + } } public int hashCode() { - int rv = 0; - rv += DataHelper.hashCode(_message); - rv += DataHelper.hashCode(_target); - // the others are pretty much inconsequential - return rv; + int rv = 0; + rv += DataHelper.hashCode(_message); + rv += DataHelper.hashCode(_target); + // the others are pretty much inconsequential + return rv; } public boolean equals(Object obj) { - return obj == this; // two OutNetMessages are different even if they contain the same message + return obj == this; // two OutNetMessages are different even if they contain the same message } } diff --git a/router/java/src/net/i2p/router/OutNetMessagePool.java b/router/java/src/net/i2p/router/OutNetMessagePool.java index 86897e2d2..1b76f90ad 100644 --- a/router/java/src/net/i2p/router/OutNetMessagePool.java +++ b/router/java/src/net/i2p/router/OutNetMessagePool.java @@ -26,13 +26,12 @@ import net.i2p.util.Log; * */ public class OutNetMessagePool { - private final static Log _log = new Log(OutNetMessagePool.class); - private static OutNetMessagePool _instance = new OutNetMessagePool(); - public static OutNetMessagePool getInstance() { return _instance; } - private TreeMap _messageLists; // priority --> List of OutNetMessage objects, where HIGHEST priority first + private Log _log; + private RouterContext _context; - private OutNetMessagePool() { - _messageLists = new TreeMap(new ReverseIntegerComparator()); + public OutNetMessagePool(RouterContext context) { + _context = context; + _log = _context.logManager().getLog(OutNetMessagePool.class); } /** @@ -40,19 +39,7 @@ public class OutNetMessagePool { * */ public OutNetMessage getNext() { - synchronized (_messageLists) { - if (_messageLists.size() <= 0) return null; - for (Iterator iter = _messageLists.keySet().iterator(); iter.hasNext(); ) { - Integer priority = (Integer)iter.next(); - List messages = (List)_messageLists.get(priority); - if (messages.size() > 0) { - _log.debug("Found a message of priority " + priority); - return (OutNetMessage)messages.remove(0); - } - } - // no messages of any priority - return null; - } + return null; } /** @@ -60,45 +47,35 @@ public class OutNetMessagePool { * */ public void add(OutNetMessage msg) { - boolean valid = validate(msg); - if (!valid) return; - if (true) { // skip the pool - MessageSelector selector = msg.getReplySelector(); - if (selector != null) { - OutboundMessageRegistry.getInstance().registerPending(msg); - } - CommSystemFacade.getInstance().processMessage(msg); - return; - } - - synchronized (_messageLists) { - Integer pri = new Integer(msg.getPriority()); - if ( (_messageLists.size() <= 0) || (!_messageLists.containsKey(pri)) ) - _messageLists.put(new Integer(msg.getPriority()), new ArrayList(32)); - List messages = (List)_messageLists.get(pri); - messages.add(msg); - } + boolean valid = validate(msg); + if (!valid) return; + MessageSelector selector = msg.getReplySelector(); + if (selector != null) { + _context.messageRegistry().registerPending(msg); + } + _context.commSystem().processMessage(msg); + return; } private boolean validate(OutNetMessage msg) { - if (msg == null) return false; - if (msg.getMessage() == null) { - _log.error("Null message in the OutNetMessage: " + msg, new Exception("Someone fucked up")); - return false; - } - if (msg.getTarget() == null) { - _log.error("No target in the OutNetMessage: " + msg, new Exception("Definitely a fuckup")); - return false; - } - if (msg.getPriority() < 0) { - _log.warn("Priority less than 0? sounds like nonsense to me... " + msg, new Exception("Negative priority")); - return false; - } - if (msg.getExpiration() <= Clock.getInstance().now()) { - _log.error("Already expired! wtf: " + msg, new Exception("Expired message")); - return false; - } - return true; + if (msg == null) return false; + if (msg.getMessage() == null) { + _log.error("Null message in the OutNetMessage: " + msg, new Exception("Someone fucked up")); + return false; + } + if (msg.getTarget() == null) { + _log.error("No target in the OutNetMessage: " + msg, new Exception("Definitely a fuckup")); + return false; + } + if (msg.getPriority() < 0) { + _log.warn("Priority less than 0? sounds like nonsense to me... " + msg, new Exception("Negative priority")); + return false; + } + if (msg.getExpiration() <= _context.clock().now()) { + _log.error("Already expired! wtf: " + msg, new Exception("Expired message")); + return false; + } + return true; } /** @@ -106,43 +83,14 @@ public class OutNetMessagePool { * */ public void clearExpired() { - long now = Clock.getInstance().now(); - List jobsToEnqueue = new ArrayList(); - synchronized (_messageLists) { - for (Iterator iter = _messageLists.values().iterator(); iter.hasNext();) { - List toRemove = new ArrayList(); - List messages = (List)iter.next(); - for (Iterator msgIter = messages.iterator(); msgIter.hasNext(); ) { - OutNetMessage msg = (OutNetMessage)msgIter.next(); - if (msg.getExpiration() <= now) { - _log.warn("Outbound network message expired: " + msg); - toRemove.add(msg); - jobsToEnqueue.add(msg.getOnFailedSendJob()); - } - } - messages.removeAll(toRemove); - } - } - for (int i = 0; i < jobsToEnqueue.size(); i++) { - Job j = (Job)jobsToEnqueue.get(i); - JobQueue.getInstance().addJob(j); - } + // noop } /** * Retrieve the number of messages, regardless of priority. * */ - public int getCount() { - int size = 0; - synchronized (_messageLists) { - for (Iterator iter = _messageLists.values().iterator(); iter.hasNext(); ) { - List lst = (List)iter.next(); - size += lst.size(); - } - } - return size; - } + public int getCount() { return 0; } /** * Retrieve the number of messages at the given priority. This can be used for @@ -150,45 +98,17 @@ public class OutNetMessagePool { * where all of these 'spare' messages are of the same priority. * */ - public int getCount(int priority) { - synchronized (_messageLists) { - Integer pri = new Integer(priority); - List messages = (List)_messageLists.get(pri); - if (messages == null) - return 0; - else - return messages.size(); - } - } + public int getCount(int priority) { return 0; } - public void dumpPoolInfo() { - StringBuffer buf = new StringBuffer(); - buf.append("\nDumping Outbound Network Message Pool. Total # message: ").append(getCount()).append("\n"); - synchronized (_messageLists) { - for (Iterator iter = _messageLists.keySet().iterator(); iter.hasNext();) { - Integer pri = (Integer)iter.next(); - List messages = (List)_messageLists.get(pri); - if (messages.size() > 0) { - buf.append("Messages of priority ").append(pri).append(": ").append(messages.size()).append("\n"); - buf.append("---------------------------\n"); - for (Iterator msgIter = messages.iterator(); msgIter.hasNext(); ) { - OutNetMessage msg = (OutNetMessage)msgIter.next(); - buf.append("Message ").append(msg.getMessage()).append("\n\n"); - } - buf.append("---------------------------\n"); - } - } - } - _log.debug(buf.toString()); - } + public void dumpPoolInfo() { return; } private static class ReverseIntegerComparator implements Comparator { - public int compare(Object lhs, Object rhs) { - if ( (lhs == null) || (rhs == null) ) return 0; // invalid, but never used - if ( !(lhs instanceof Integer) || !(rhs instanceof Integer)) return 0; - Integer lv = (Integer)lhs; - Integer rv = (Integer)rhs; - return - (lv.compareTo(rv)); - } + public int compare(Object lhs, Object rhs) { + if ( (lhs == null) || (rhs == null) ) return 0; // invalid, but never used + if ( !(lhs instanceof Integer) || !(rhs instanceof Integer)) return 0; + Integer lv = (Integer)lhs; + Integer rv = (Integer)rhs; + return - (lv.compareTo(rv)); + } } } diff --git a/router/java/src/net/i2p/router/PeerManagerFacade.java b/router/java/src/net/i2p/router/PeerManagerFacade.java index 474babb15..d75a73258 100644 --- a/router/java/src/net/i2p/router/PeerManagerFacade.java +++ b/router/java/src/net/i2p/router/PeerManagerFacade.java @@ -18,23 +18,19 @@ import net.i2p.router.peermanager.PeerManagerFacadeImpl; * includes periodically queueing up outbound messages to the peers to test them. * */ -public abstract class PeerManagerFacade implements Service { - private static PeerManagerFacade _instance = new PeerManagerFacadeImpl(); - public static PeerManagerFacade getInstance() { return _instance; } - +public interface PeerManagerFacade extends Service { /** * Select peers from the manager's existing routing tables according to * the specified criteria. This call DOES block. * * @return List of Hash objects of the RouterIdentity for matching peers */ - public abstract List selectPeers(PeerSelectionCriteria criteria); - public String renderStatusHTML() { return ""; } + public List selectPeers(PeerSelectionCriteria criteria); } -class DummyPeerManagerFacade extends PeerManagerFacade { +class DummyPeerManagerFacade implements PeerManagerFacade { public void shutdown() {} public void startup() {} - + public String renderStatusHTML() { return ""; } public List selectPeers(PeerSelectionCriteria criteria) { return null; } } diff --git a/router/java/src/net/i2p/router/ProfileManager.java b/router/java/src/net/i2p/router/ProfileManager.java index 57251e667..c49c85bd7 100644 --- a/router/java/src/net/i2p/router/ProfileManager.java +++ b/router/java/src/net/i2p/router/ProfileManager.java @@ -11,71 +11,64 @@ package net.i2p.router; import java.util.Properties; import net.i2p.data.Hash; -import net.i2p.router.peermanager.ProfileManagerImpl; -public abstract class ProfileManager { - private final static ProfileManager _instance = new ProfileManagerImpl(); - public static ProfileManager getInstance() { return _instance; } - - /** is this peer failing or already dropped? */ - public abstract boolean isFailing(Hash peer); - +public interface ProfileManager { /** * Note that it took msToSend to send a message of size bytesSent to the peer over the transport. * This should only be called if the transport considered the send successful. * */ - public abstract void messageSent(Hash peer, String transport, long msToSend, long bytesSent); + void messageSent(Hash peer, String transport, long msToSend, long bytesSent); /** * Note that the router failed to send a message to the peer over the transport specified * */ - public abstract void messageFailed(Hash peer, String transport); + void messageFailed(Hash peer, String transport); /** * Note that the router failed to send a message to the peer over any transport * */ - public abstract void messageFailed(Hash peer); + void messageFailed(Hash peer); /** * Note that there was some sort of communication error talking with the peer * */ - public abstract void commErrorOccurred(Hash peer); + void commErrorOccurred(Hash peer); /** * Note that the router agreed to participate in a tunnel * */ - public abstract void tunnelJoined(Hash peer, long responseTimeMs); + void tunnelJoined(Hash peer, long responseTimeMs); /** * Note that a router explicitly rejected joining a tunnel * */ - public abstract void tunnelRejected(Hash peer, long responseTimeMs); + void tunnelRejected(Hash peer, long responseTimeMs); /** * Note that the peer participated in a tunnel that failed. Its failure may not have * been the peer's fault however. * */ - public abstract void tunnelFailed(Hash peer); + void tunnelFailed(Hash peer); /** * Note that the peer was able to return the valid data for a db lookup * */ - public abstract void dbLookupSuccessful(Hash peer, long responseTimeMs); + void dbLookupSuccessful(Hash peer, long responseTimeMs); /** * Note that the peer was unable to reply to a db lookup - either with data or with * a lookupReply redirecting the user elsewhere * */ - public abstract void dbLookupFailed(Hash peer); + void dbLookupFailed(Hash peer); /** * Note that the peer replied to a db lookup with a redirect to other routers, where @@ -85,39 +78,39 @@ public abstract class ProfileManager { * asked them not to send us, but they did anyway * */ - public abstract void dbLookupReply(Hash peer, int newPeers, int oldPeers, int invalid, int duplicate, long responseTimeMs); + void dbLookupReply(Hash peer, int newPeers, int oldPeers, int invalid, int duplicate, long responseTimeMs); /** * Note that the local router received a db lookup from the given peer * */ - public abstract void dbLookupReceived(Hash peer); + void dbLookupReceived(Hash peer); /** * Note that the local router received an unprompted db store from the given peer * */ - public abstract void dbStoreReceived(Hash peer, boolean wasNewKey); + void dbStoreReceived(Hash peer, boolean wasNewKey); /** * Note that we've confirmed a successful send of db data to the peer (though we haven't * necessarily requested it again from them, so they /might/ be lying) * */ - public abstract void dbStoreSent(Hash peer, long responseTimeMs); + void dbStoreSent(Hash peer, long responseTimeMs); /** * Note that we were unable to confirm a successful send of db data to * the peer, at least not within our timeout period * */ - public abstract void dbStoreFailed(Hash peer); + void dbStoreFailed(Hash peer); /** * Note that the local router received a reference to the given peer, either * through an explicit dbStore or in a dbLookupReply */ - public abstract void heardAbout(Hash peer); + void heardAbout(Hash peer); /** * Note that the router received a message from the given peer on the specified @@ -126,8 +119,8 @@ public abstract class ProfileManager { * available * */ - public abstract void messageReceived(Hash peer, String style, long msToReceive, int bytesRead); + void messageReceived(Hash peer, String style, long msToReceive, int bytesRead); /** provide a simple summary of a number of peers, suitable for publication in the netDb */ - public abstract Properties summarizePeers(int numPeers); + Properties summarizePeers(int numPeers); } diff --git a/router/java/src/net/i2p/router/Router.java b/router/java/src/net/i2p/router/Router.java index 4d69ecfed..8161a5d85 100644 --- a/router/java/src/net/i2p/router/Router.java +++ b/router/java/src/net/i2p/router/Router.java @@ -54,14 +54,14 @@ import net.i2p.util.RandomSource; * */ public class Router { - private final static Log _log = new Log(Router.class); - private final static Router _instance = new Router(); - public static Router getInstance() { return _instance; } + private Log _log; + private RouterContext _context; private Properties _config; private String _configFilename; private RouterInfo _routerInfo; private long _started; private boolean _higherVersionSeen; + private SessionKeyPersistenceHelper _sessionKeyPersistenceHelper; public final static String PROP_CONFIG_FILE = "router.configLocation"; @@ -73,16 +73,19 @@ public class Router { public final static String PROP_KEYS_FILENAME = "router.keys.location"; public final static String PROP_KEYS_FILENAME_DEFAULT = "router.keys"; - private Router() { - _config = new Properties(); - _configFilename = System.getProperty(PROP_CONFIG_FILE, "router.config"); - _routerInfo = null; - _higherVersionSeen = false; + public Router() { // grumble about sun's java caching DNS entries *forever* System.setProperty("sun.net.inetaddr.ttl", "0"); System.setProperty("networkaddress.cache.ttl", "0"); // (no need for keepalive) System.setProperty("http.keepAlive", "false"); + _config = new Properties(); + _context = new RouterContext(this); + _configFilename = _context.getProperty(PROP_CONFIG_FILE, "router.config"); + _routerInfo = null; + _higherVersionSeen = false; + _log = _context.logManager().getLog(Router.class); + _sessionKeyPersistenceHelper = new SessionKeyPersistenceHelper(_context); } public String getConfigFilename() { return _configFilename; } @@ -97,7 +100,7 @@ public class Router { public void setRouterInfo(RouterInfo info) { _routerInfo = info; if (info != null) - JobQueue.getInstance().addJob(new PersistRouterInfoJob()); + _context.jobQueue().addJob(new PersistRouterInfoJob()); } /** @@ -110,10 +113,10 @@ public class Router { public long getWhenStarted() { return _started; } /** wall clock uptime */ - public long getUptime() { return Clock.getInstance().now() - Clock.getInstance().getOffset() - _started; } + public long getUptime() { return _context.clock().now() - _context.clock().getOffset() - _started; } private void runRouter() { - _started = Clock.getInstance().now(); + _started = _context.clock().now(); Runtime.getRuntime().addShutdownHook(new ShutdownHook()); I2PThread.setOOMEventListener(new I2PThread.OOMEventListener() { public void outOfMemory(OutOfMemoryError oom) { @@ -123,21 +126,22 @@ public class Router { }); setupHandlers(); startupQueue(); - JobQueue.getInstance().addJob(new CoallesceStatsJob()); - JobQueue.getInstance().addJob(new UpdateRoutingKeyModifierJob()); + _context.jobQueue().addJob(new CoallesceStatsJob()); + _context.jobQueue().addJob(new UpdateRoutingKeyModifierJob()); warmupCrypto(); - SessionKeyPersistenceHelper.getInstance().startup(); - JobQueue.getInstance().addJob(new StartupJob()); + _sessionKeyPersistenceHelper.startup(); + _context.jobQueue().addJob(new StartupJob(_context)); } /** * coallesce the stats framework every minute * */ - private final static class CoallesceStatsJob extends JobImpl { + private final class CoallesceStatsJob extends JobImpl { + public CoallesceStatsJob() { super(Router.this._context); } public String getName() { return "Coallesce stats"; } public void runJob() { - StatManager.getInstance().coallesceStats(); + Router.this._context.statManager().coallesceStats(); requeue(60*1000); } } @@ -147,15 +151,16 @@ public class Router { * This is done here because we want to make sure the key is updated before anyone * uses it. */ - private final static class UpdateRoutingKeyModifierJob extends JobImpl { + private final class UpdateRoutingKeyModifierJob extends JobImpl { private Calendar _cal = new GregorianCalendar(TimeZone.getTimeZone("GMT")); + public UpdateRoutingKeyModifierJob() { super(Router.this._context); } public String getName() { return "Update Routing Key Modifier"; } public void runJob() { - RoutingKeyGenerator.getInstance().generateDateBasedModData(); + Router.this._context.routingKeyGenerator().generateDateBasedModData(); requeue(getTimeTillMidnight()); } private long getTimeTillMidnight() { - long now = Clock.getInstance().now(); + long now = Router.this._context.clock().now(); _cal.setTime(new Date(now)); _cal.add(Calendar.DATE, 1); _cal.set(Calendar.HOUR_OF_DAY, 0); @@ -175,18 +180,18 @@ public class Router { } private void warmupCrypto() { - RandomSource.getInstance().nextBoolean(); + _context.random().nextBoolean(); new DHSessionKeyBuilder(); // load the class so it starts the precalc process } private void startupQueue() { - JobQueue.getInstance().runQueue(1); + _context.jobQueue().runQueue(1); } private void setupHandlers() { - InNetMessagePool.getInstance().registerHandlerJobBuilder(GarlicMessage.MESSAGE_TYPE, new GarlicMessageHandler()); - InNetMessagePool.getInstance().registerHandlerJobBuilder(TunnelMessage.MESSAGE_TYPE, new TunnelMessageHandler()); - InNetMessagePool.getInstance().registerHandlerJobBuilder(SourceRouteReplyMessage.MESSAGE_TYPE, new SourceRouteReplyMessageHandler()); + _context.inNetMessagePool().registerHandlerJobBuilder(GarlicMessage.MESSAGE_TYPE, new GarlicMessageHandler(_context)); + _context.inNetMessagePool().registerHandlerJobBuilder(TunnelMessage.MESSAGE_TYPE, new TunnelMessageHandler(_context)); + _context.inNetMessagePool().registerHandlerJobBuilder(SourceRouteReplyMessage.MESSAGE_TYPE, new SourceRouteReplyMessageHandler(_context)); } public String renderStatusHTML() { @@ -214,9 +219,9 @@ public class Router { if ( (_routerInfo != null) && (_routerInfo.getIdentity() != null) ) buf.append("Router: ").append(_routerInfo.getIdentity().getHash().toBase64()).append("
\n"); - buf.append("As of: ").append(new Date(Clock.getInstance().now())).append(" (uptime: ").append(DataHelper.formatDuration(getUptime())).append(")
\n"); + buf.append("As of: ").append(new Date(_context.clock().now())).append(" (uptime: ").append(DataHelper.formatDuration(getUptime())).append(")
\n"); buf.append("Started on: ").append(new Date(getWhenStarted())).append("
\n"); - buf.append("Clock offset: ").append(Clock.getInstance().getOffset()).append("ms (OS time: ").append(new Date(Clock.getInstance().now() - Clock.getInstance().getOffset())).append(")
\n"); + buf.append("Clock offset: ").append(_context.clock().getOffset()).append("ms (OS time: ").append(new Date(_context.clock().now() - _context.clock().getOffset())).append(")
\n"); long tot = Runtime.getRuntime().totalMemory()/1024; long free = Runtime.getRuntime().freeMemory()/1024; buf.append("Memory: In use: ").append((tot-free)).append("KB Free: ").append(free).append("KB
\n"); @@ -225,8 +230,8 @@ public class Router { buf.append("HIGHER VERSION SEEN - please check to see if there is a new release out
\n"); buf.append("

Bandwidth

\n"); - long sent = BandwidthLimiter.getInstance().getTotalSendBytes(); - long received = BandwidthLimiter.getInstance().getTotalReceiveBytes(); + long sent = _context.bandwidthLimiter().getTotalSendBytes(); + long received = _context.bandwidthLimiter().getTotalReceiveBytes(); buf.append("
    "); buf.append("
  • ").append(sent).append(" bytes sent, "); @@ -235,7 +240,7 @@ public class Router { DecimalFormat fmt = new DecimalFormat("##0.00"); // we use the unadjusted time, since thats what getWhenStarted is based off - long lifetime = Clock.getInstance().now()-Clock.getInstance().getOffset() - getWhenStarted(); + long lifetime = _context.clock().now()-_context.clock().getOffset() - getWhenStarted(); lifetime /= 1000; if ( (sent > 0) && (received > 0) ) { double sendKBps = sent / (lifetime*1024.0); @@ -246,7 +251,7 @@ public class Router { buf.append("
  • "); } - RateStat sendRate = StatManager.getInstance().getRate("transport.sendMessageSize"); + RateStat sendRate = _context.statManager().getRate("transport.sendMessageSize"); for (int i = 0; i < sendRate.getPeriods().length; i++) { Rate rate = sendRate.getRate(sendRate.getPeriods()[i]); double bytes = rate.getLastTotalValue() + rate.getCurrentTotalValue(); @@ -280,7 +285,7 @@ public class Router { buf.append(""); } - RateStat receiveRate = StatManager.getInstance().getRate("transport.receiveMessageSize"); + RateStat receiveRate = _context.statManager().getRate("transport.receiveMessageSize"); for (int i = 0; i < receiveRate.getPeriods().length; i++) { Rate rate = receiveRate.getRate(receiveRate.getPeriods()[i]); double bytes = rate.getLastTotalValue() + rate.getCurrentTotalValue(); @@ -321,23 +326,23 @@ public class Router { buf.append("\n"); buf.append("
    \n"); - buf.append(ClientManagerFacade.getInstance().renderStatusHTML()); + buf.append(_context.clientManager().renderStatusHTML()); buf.append("\n
    \n"); - buf.append(CommSystemFacade.getInstance().renderStatusHTML()); + buf.append(_context.commSystem().renderStatusHTML()); buf.append("\n
    \n"); - buf.append(PeerManagerFacade.getInstance().renderStatusHTML()); + buf.append(_context.peerManager().renderStatusHTML()); buf.append("\n
    \n"); - buf.append(TunnelManagerFacade.getInstance().renderStatusHTML()); + buf.append(_context.tunnelManager().renderStatusHTML()); buf.append("\n
    \n"); - buf.append(JobQueue.getInstance().renderStatusHTML()); + buf.append(_context.jobQueue().renderStatusHTML()); buf.append("\n
    \n"); - buf.append(Shitlist.getInstance().renderStatusHTML()); + buf.append(_context.shitlist().renderStatusHTML()); buf.append("\n
    \n"); - buf.append(OutboundMessageRegistry.getInstance().renderStatusHTML()); + buf.append(_context.messageRegistry().renderStatusHTML()); buf.append("\n
    \n"); - buf.append(NetworkDatabaseFacade.getInstance().renderStatusHTML()); + buf.append(_context.netDb().renderStatusHTML()); buf.append("\n
    \n"); - List msgs = LogConsoleBuffer.getInstance().getMostRecentMessages(); + List msgs = _context.logManager().getBuffer().getMostRecentMessages(); buf.append("\n

    Most recent console messages:

    \n"); for (Iterator iter = msgs.iterator(); iter.hasNext(); ) { String msg = (String)iter.next(); @@ -350,27 +355,28 @@ public class Router { } public void shutdown() { - try { JobQueue.getInstance().shutdown(); } catch (Throwable t) { _log.log(Log.CRIT, "Error shutting down the job queue", t); } - try { StatisticsManager.getInstance().shutdown(); } catch (Throwable t) { _log.log(Log.CRIT, "Error shutting down the stats manager", t); } - try { ClientManagerFacade.getInstance().shutdown(); } catch (Throwable t) { _log.log(Log.CRIT, "Error shutting down the client manager", t); } - try { TunnelManagerFacade.getInstance().shutdown(); } catch (Throwable t) { _log.log(Log.CRIT, "Error shutting down the tunnel manager", t); } - try { NetworkDatabaseFacade.getInstance().shutdown(); } catch (Throwable t) { _log.log(Log.CRIT, "Error shutting down the networkDb", t); } - try { CommSystemFacade.getInstance().shutdown(); } catch (Throwable t) { _log.log(Log.CRIT, "Error shutting down the comm system", t); } - try { PeerManagerFacade.getInstance().shutdown(); } catch (Throwable t) { _log.log(Log.CRIT, "Error shutting down the peer manager", t); } - try { SessionKeyPersistenceHelper.getInstance().shutdown(); } catch (Throwable t) { _log.log(Log.CRIT, "Error shutting down the session key manager", t); } + try { _context.jobQueue().shutdown(); } catch (Throwable t) { _log.log(Log.CRIT, "Error shutting down the job queue", t); } + try { _context.statPublisher().shutdown(); } catch (Throwable t) { _log.log(Log.CRIT, "Error shutting down the stats manager", t); } + try { _context.clientManager().shutdown(); } catch (Throwable t) { _log.log(Log.CRIT, "Error shutting down the client manager", t); } + try { _context.tunnelManager().shutdown(); } catch (Throwable t) { _log.log(Log.CRIT, "Error shutting down the tunnel manager", t); } + try { _context.netDb().shutdown(); } catch (Throwable t) { _log.log(Log.CRIT, "Error shutting down the networkDb", t); } + try { _context.commSystem().shutdown(); } catch (Throwable t) { _log.log(Log.CRIT, "Error shutting down the comm system", t); } + try { _context.peerManager().shutdown(); } catch (Throwable t) { _log.log(Log.CRIT, "Error shutting down the peer manager", t); } + try { _sessionKeyPersistenceHelper.shutdown(); } catch (Throwable t) { _log.log(Log.CRIT, "Error shutting down the session key manager", t); } dumpStats(); _log.log(Log.CRIT, "Shutdown complete", new Exception("Shutdown")); - try { LogManager.getInstance().shutdown(); } catch (Throwable t) { } + try { _context.logManager().shutdown(); } catch (Throwable t) { } try { Thread.sleep(1000); } catch (InterruptedException ie) {} Runtime.getRuntime().halt(-1); } private void dumpStats() { - _log.log(Log.CRIT, "Lifetime stats:\n\n" + StatsGenerator.generateStatsPage()); + //_log.log(Log.CRIT, "Lifetime stats:\n\n" + StatsGenerator.generateStatsPage()); } public static void main(String args[]) { - Router.getInstance().runRouter(); + Router r = new Router(); + r.runRouter(); } private class ShutdownHook extends Thread { @@ -381,17 +387,18 @@ public class Router { } /** update the router.info file whenever its, er, updated */ - private static class PersistRouterInfoJob extends JobImpl { + private class PersistRouterInfoJob extends JobImpl { + public PersistRouterInfoJob() { super(Router.this._context); } public String getName() { return "Persist Updated Router Information"; } public void runJob() { if (_log.shouldLog(Log.DEBUG)) _log.debug("Persisting updated router info"); - String infoFilename = Router.getInstance().getConfigSetting(PROP_INFO_FILENAME); + String infoFilename = getConfigSetting(PROP_INFO_FILENAME); if (infoFilename == null) infoFilename = PROP_INFO_FILENAME_DEFAULT; - RouterInfo info = Router.getInstance().getRouterInfo(); + RouterInfo info = getRouterInfo(); FileOutputStream fos = null; try { diff --git a/router/java/src/net/i2p/router/RouterContext.java b/router/java/src/net/i2p/router/RouterContext.java new file mode 100644 index 000000000..eae75b25e --- /dev/null +++ b/router/java/src/net/i2p/router/RouterContext.java @@ -0,0 +1,465 @@ +package net.i2p.router; + +import net.i2p.data.Hash; +import net.i2p.router.client.ClientManagerFacadeImpl; +import net.i2p.router.transport.OutboundMessageRegistry; +import net.i2p.router.networkdb.kademlia.KademliaNetworkDatabaseFacade; +import net.i2p.router.transport.CommSystemFacadeImpl; +import net.i2p.router.transport.BandwidthLimiter; +import net.i2p.router.transport.TrivialBandwidthLimiter; +import net.i2p.router.tunnelmanager.PoolingTunnelManagerFacade; +import net.i2p.router.peermanager.ProfileOrganizer; +import net.i2p.router.peermanager.PeerManagerFacadeImpl; +import net.i2p.router.peermanager.ProfileManagerImpl; +import net.i2p.router.peermanager.Calculator; +import net.i2p.router.peermanager.IsFailingCalculator; +import net.i2p.router.peermanager.ReliabilityCalculator; +import net.i2p.router.peermanager.SpeedCalculator; +import net.i2p.router.peermanager.IntegrationCalculator; +import net.i2p.I2PAppContext; + +/** + * Build off the core I2P context to provide a root for a router instance to + * coordinate its resources. Router instances themselves should be sure to have + * their own RouterContext, and rooting off of it will allow multiple routers to + * operate in the same JVM without conflict (e.g. sessionTags wont get + * intermingled, nor will their netDbs, jobQueues, or bandwidth limiters). + * + */ +public class RouterContext extends I2PAppContext { + private Router _router; + private ClientManagerFacade _clientManagerFacade; + private ClientMessagePool _clientMessagePool; + private JobQueue _jobQueue; + private InNetMessagePool _inNetMessagePool; + private OutNetMessagePool _outNetMessagePool; + private MessageHistory _messageHistory; + private OutboundMessageRegistry _messageRegistry; + private NetworkDatabaseFacade _netDb; + private KeyManager _keyManager; + private CommSystemFacade _commSystem; + private ProfileOrganizer _profileOrganizer; + private PeerManagerFacade _peerManagerFacade; + private ProfileManager _profileManager; + private BandwidthLimiter _bandwidthLimiter; + private TunnelManagerFacade _tunnelManager; + private StatisticsManager _statPublisher; + private Shitlist _shitlist; + private MessageValidator _messageValidator; + private Calculator _isFailingCalc; + private Calculator _integrationCalc; + private Calculator _speedCalc; + private Calculator _reliabilityCalc; + + public RouterContext(Router router) { + super(); + _router = router; + initAll(); + } + private void initAll() { + _clientManagerFacade = new ClientManagerFacadeImpl(this); + _clientMessagePool = new ClientMessagePool(this); + _jobQueue = new JobQueue(this); + _inNetMessagePool = new InNetMessagePool(this); + _outNetMessagePool = new OutNetMessagePool(this); + _messageHistory = new MessageHistory(this); + _messageRegistry = new OutboundMessageRegistry(this); + _netDb = new KademliaNetworkDatabaseFacade(this); + _keyManager = new KeyManager(this); + _commSystem = new CommSystemFacadeImpl(this); + _profileOrganizer = new ProfileOrganizer(this); + _peerManagerFacade = new PeerManagerFacadeImpl(this); + _profileManager = new ProfileManagerImpl(this); + _bandwidthLimiter = new TrivialBandwidthLimiter(this); + _tunnelManager = new PoolingTunnelManagerFacade(this); + _statPublisher = new StatisticsManager(this); + _shitlist = new Shitlist(this); + _messageValidator = new MessageValidator(this); + _isFailingCalc = new IsFailingCalculator(this); + _integrationCalc = new IntegrationCalculator(this); + _speedCalc = new SpeedCalculator(this); + _reliabilityCalc = new ReliabilityCalculator(this); + } + + /** what router is this context working for? */ + public Router router() { return _router; } + /** convenience method for querying the router's ident */ + public Hash routerHash() { return _router.getRouterInfo().getIdentity().getHash(); } + + /** + * How are we coordinating clients for the router? + */ + public ClientManagerFacade clientManager() { return _clientManagerFacade; } + /** + * Where do we toss messages for the clients (and where do we get client messages + * to forward on from)? + */ + public ClientMessagePool clientMessagePool() { return _clientMessagePool; } + /** + * Where do we get network messages from (aka where does the comm system dump what + * it reads)? + */ + public InNetMessagePool inNetMessagePool() { return _inNetMessagePool; } + /** + * Where do we put messages that the router wants to forwards onto the network? + */ + public OutNetMessagePool outNetMessagePool() { return _outNetMessagePool; } + /** + * Tracker component for monitoring what messages are wrapped in what containers + * and how they proceed through the network. This is fully for debugging, as when + * a large portion of the network tracks their messages through this messageHistory + * and submits their logs, we can correlate them and watch as messages flow from + * hop to hop. + */ + public MessageHistory messageHistory() { return _messageHistory; } + /** + * The registry is used by outbound messages to wait for replies. + */ + public OutboundMessageRegistry messageRegistry() { return _messageRegistry; } + /** + * Our db cache + */ + public NetworkDatabaseFacade netDb() { return _netDb; } + /** + * The actual driver of the router, where all jobs are enqueued and processed. + */ + public JobQueue jobQueue() { return _jobQueue; } + /** + * Coordinates the router's ElGamal and DSA keys, as well as any keys given + * to it by clients as part of a LeaseSet. + */ + public KeyManager keyManager() { return _keyManager; } + /** + * How do we pass messages from our outNetMessagePool to another router + */ + public CommSystemFacade commSystem() { return _commSystem; } + /** + * Organize the peers we know about into various tiers, profiling their + * performance and sorting them accordingly. + */ + public ProfileOrganizer profileOrganizer() { return _profileOrganizer; } + /** + * Minimal interface for selecting peers for various tasks based on given + * criteria. This is kept seperate from the profile organizer since this + * logic is independent of how the peers are organized (or profiled even). + */ + public PeerManagerFacade peerManager() { return _peerManagerFacade; } + /** + * Expose a simple API for various router components to take note of + * particular events that a peer enacts (sends us a message, agrees to + * participate in a tunnel, etc). + */ + public ProfileManager profileManager() { return _profileManager; } + /** + * Coordinate this router's bandwidth limits + */ + public BandwidthLimiter bandwidthLimiter() { return _bandwidthLimiter; } + /** + * Coordinate this router's tunnels (its pools, participation, backup, etc). + * Any configuration for the tunnels is rooted from the context's properties + */ + public TunnelManagerFacade tunnelManager() { return _tunnelManager; } + /** + * If the router is configured to, gather up some particularly tasty morsels + * regarding the stats managed and offer to publish them into the routerInfo. + */ + public StatisticsManager statPublisher() { return _statPublisher; } + /** + * who does this peer hate? + */ + public Shitlist shitlist() { return _shitlist; } + /** + * The router keeps track of messages it receives to prevent duplicates, as + * well as other criteria for "validity". + */ + public MessageValidator messageValidator() { return _messageValidator; } + + /** how do we rank the failure of profiles? */ + public Calculator isFailingCalculator() { return _isFailingCalc; } + /** how do we rank the integration of profiles? */ + public Calculator integrationCalculator() { return _integrationCalc; } + /** how do we rank the speed of profiles? */ + public Calculator speedCalculator() { return _speedCalc; } + /** how do we rank the reliability of profiles? */ + public Calculator reliabilityCalculator() { return _reliabilityCalc; } +} +/* +public class RouterContext extends I2PAppContext { + private Router _router; + private ClientManagerFacade _clientManagerFacade; + private ClientMessagePool _clientMessagePool; + private JobQueue _jobQueue; + private InNetMessagePool _inNetMessagePool; + private OutNetMessagePool _outNetMessagePool; + private MessageHistory _messageHistory; + private OutboundMessageRegistry _messageRegistry; + private NetworkDatabaseFacade _netDb; + private KeyManager _keyManager; + private CommSystemFacade _commSystem; + private ProfileOrganizer _profileOrganizer; + private PeerManagerFacade _peerManagerFacade; + private ProfileManager _profileManager; + private BandwidthLimiter _bandwidthLimiter; + private TunnelManagerFacade _tunnelManager; + private StatisticsManager _statPublisher; + private Shitlist _shitlist; + private MessageValidator _messageValidator; + private volatile boolean _clientManagerFacadeInitialized; + private volatile boolean _clientMessagePoolInitialized; + private volatile boolean _jobQueueInitialized; + private volatile boolean _inNetMessagePoolInitialized; + private volatile boolean _outNetMessagePoolInitialized; + private volatile boolean _messageHistoryInitialized; + private volatile boolean _messageRegistryInitialized; + private volatile boolean _netDbInitialized; + private volatile boolean _peerSelectorInitialized; + private volatile boolean _keyManagerInitialized; + private volatile boolean _commSystemInitialized; + private volatile boolean _profileOrganizerInitialized; + private volatile boolean _profileManagerInitialized; + private volatile boolean _peerManagerFacadeInitialized; + private volatile boolean _bandwidthLimiterInitialized; + private volatile boolean _tunnelManagerInitialized; + private volatile boolean _statPublisherInitialized; + private volatile boolean _shitlistInitialized; + private volatile boolean _messageValidatorInitialized; + + private Calculator _isFailingCalc = new IsFailingCalculator(this); + private Calculator _integrationCalc = new IntegrationCalculator(this); + private Calculator _speedCalc = new SpeedCalculator(this); + private Calculator _reliabilityCalc = new ReliabilityCalculator(this); + + public Calculator isFailingCalculator() { return _isFailingCalc; } + public Calculator integrationCalculator() { return _integrationCalc; } + public Calculator speedCalculator() { return _speedCalc; } + public Calculator reliabilityCalculator() { return _reliabilityCalc; } + + + public RouterContext(Router router) { + super(); + _router = router; + } + + public Router router() { return _router; } + public Hash routerHash() { return _router.getRouterInfo().getIdentity().getHash(); } + + public ClientManagerFacade clientManager() { + if (!_clientManagerFacadeInitialized) initializeClientManagerFacade(); + return _clientManagerFacade; + } + private void initializeClientManagerFacade() { + synchronized (this) { + if (_clientManagerFacade == null) { + _clientManagerFacade = new ClientManagerFacadeImpl(this); + } + _clientManagerFacadeInitialized = true; + } + } + + public ClientMessagePool clientMessagePool() { + if (!_clientMessagePoolInitialized) initializeClientMessagePool(); + return _clientMessagePool; + } + private void initializeClientMessagePool() { + synchronized (this) { + if (_clientMessagePool == null) { + _clientMessagePool = new ClientMessagePool(this); + } + _clientMessagePoolInitialized = true; + } + } + + public InNetMessagePool inNetMessagePool() { + if (!_inNetMessagePoolInitialized) initializeInNetMessagePool(); + return _inNetMessagePool; + } + private void initializeInNetMessagePool() { + synchronized (this) { + if (_inNetMessagePool == null) { + _inNetMessagePool = new InNetMessagePool(this); + } + _inNetMessagePoolInitialized = true; + } + } + + public OutNetMessagePool outNetMessagePool() { + if (!_outNetMessagePoolInitialized) initializeOutNetMessagePool(); + return _outNetMessagePool; + } + private void initializeOutNetMessagePool() { + synchronized (this) { + if (_outNetMessagePool == null) { + _outNetMessagePool = new OutNetMessagePool(this); + } + _outNetMessagePoolInitialized = true; + } + } + + public MessageHistory messageHistory() { + if (!_messageHistoryInitialized) initializeMessageHistory(); + return _messageHistory; + } + private void initializeMessageHistory() { + synchronized (this) { + if (_messageHistory == null) { + _messageHistory = new MessageHistory(this); + } + _messageHistoryInitialized = true; + } + } + + public OutboundMessageRegistry messageRegistry() { + if (!_messageRegistryInitialized) initializeMessageRegistry(); + return _messageRegistry; + } + private void initializeMessageRegistry() { + synchronized (this) { + if (_messageRegistry == null) + _messageRegistry = new OutboundMessageRegistry(this); + _messageRegistryInitialized = true; + } + } + + public NetworkDatabaseFacade netDb() { + if (!_netDbInitialized) initializeNetDb(); + return _netDb; + } + private void initializeNetDb() { + synchronized (this) { + if (_netDb == null) + _netDb = new KademliaNetworkDatabaseFacade(this); + _netDbInitialized = true; + } + } + + public JobQueue jobQueue() { + if (!_jobQueueInitialized) initializeJobQueue(); + return _jobQueue; + } + private void initializeJobQueue() { + synchronized (this) { + if (_jobQueue == null) { + _jobQueue= new JobQueue(this); + } + _jobQueueInitialized = true; + } + } + + public KeyManager keyManager() { + if (!_keyManagerInitialized) initializeKeyManager(); + return _keyManager; + } + private void initializeKeyManager() { + synchronized (this) { + if (_keyManager == null) + _keyManager = new KeyManager(this); + _keyManagerInitialized = true; + } + } + + public CommSystemFacade commSystem() { + if (!_commSystemInitialized) initializeCommSystem(); + return _commSystem; + } + private void initializeCommSystem() { + synchronized (this) { + if (_commSystem == null) + _commSystem = new CommSystemFacadeImpl(this); + _commSystemInitialized = true; + } + } + + public ProfileOrganizer profileOrganizer() { + if (!_profileOrganizerInitialized) initializeProfileOrganizer(); + return _profileOrganizer; + } + private void initializeProfileOrganizer() { + synchronized (this) { + if (_profileOrganizer == null) + _profileOrganizer = new ProfileOrganizer(this); + _profileOrganizerInitialized = true; + } + } + public PeerManagerFacade peerManager() { + if (!_peerManagerFacadeInitialized) initializePeerManager(); + return _peerManagerFacade; + } + private void initializePeerManager() { + synchronized (this) { + if (_peerManagerFacade == null) + _peerManagerFacade = new PeerManagerFacadeImpl(this); + _peerManagerFacadeInitialized = true; + } + } + + public BandwidthLimiter bandwidthLimiter() { + if (!_bandwidthLimiterInitialized) initializeBandwidthLimiter(); + return _bandwidthLimiter; + } + private void initializeBandwidthLimiter() { + synchronized (this) { + if (_bandwidthLimiter == null) + _bandwidthLimiter = new TrivialBandwidthLimiter(this); + _bandwidthLimiterInitialized = true; + } + } + + public TunnelManagerFacade tunnelManager() { + if (!_tunnelManagerInitialized) initializeTunnelManager(); + return _tunnelManager; + } + private void initializeTunnelManager() { + synchronized (this) { + if (_tunnelManager == null) + _tunnelManager = new PoolingTunnelManagerFacade(this); + _tunnelManagerInitialized = true; + } + } + public ProfileManager profileManager() { + if (!_profileManagerInitialized) initializeProfileManager(); + return _profileManager; + } + private void initializeProfileManager() { + synchronized (this) { + if (_profileManager == null) + _profileManager = new ProfileManagerImpl(this); + _profileManagerInitialized = true; + } + } + public StatisticsManager statPublisher() { + if (!_statPublisherInitialized) initializeStatPublisher(); + return _statPublisher; + } + private void initializeStatPublisher() { + synchronized (this) { + if (_statPublisher == null) + _statPublisher = new StatisticsManager(this); + _statPublisherInitialized = true; + } + } + + public Shitlist shitlist() { + if (!_shitlistInitialized) initializeShitlist(); + return _shitlist; + } + private void initializeShitlist() { + synchronized (this) { + if (_shitlist == null) + _shitlist = new Shitlist(this); + _shitlistInitialized = true; + } + } + + public MessageValidator messageValidator() { + if (!_messageValidatorInitialized) initializeMessageValidator(); + return _messageValidator; + } + private void initializeMessageValidator() { + synchronized (this) { + if (_messageValidator == null) + _messageValidator = new MessageValidator(this); + _messageValidatorInitialized = true; + } + } +} + */ \ No newline at end of file diff --git a/router/java/src/net/i2p/router/SessionKeyPersistenceHelper.java b/router/java/src/net/i2p/router/SessionKeyPersistenceHelper.java index 3f922d2cf..9ec8826a2 100644 --- a/router/java/src/net/i2p/router/SessionKeyPersistenceHelper.java +++ b/router/java/src/net/i2p/router/SessionKeyPersistenceHelper.java @@ -15,77 +15,83 @@ import net.i2p.util.Log; * */ public class SessionKeyPersistenceHelper implements Service { - private final static Log _log = new Log(SessionKeyPersistenceHelper.class); - private static SessionKeyPersistenceHelper _instance = new SessionKeyPersistenceHelper(); - public static SessionKeyPersistenceHelper getInstance() { return _instance; } + private Log _log; + private RouterContext _context; private final static long PERSIST_DELAY = 3*60*1000; private final static String SESSION_KEY_FILE = "sessionKeys.dat"; + public SessionKeyPersistenceHelper(RouterContext context) { + _context = context; + _log = _context.logManager().getLog(SessionKeyPersistenceHelper.class); + } + public void shutdown() { - writeState(); + writeState(); } public void startup() { - SessionKeyManager mgr = SessionKeyManager.getInstance(); - if (mgr instanceof PersistentSessionKeyManager) { - PersistentSessionKeyManager manager = (PersistentSessionKeyManager)mgr; - File f = new File(SESSION_KEY_FILE); - if (f.exists()) { - FileInputStream fin = null; - try { - fin = new FileInputStream(f); - manager.loadState(fin); - int expired = manager.aggressiveExpire(); - _log.debug("Session keys loaded [not error] with " + expired + " sets immediately expired"); - } catch (Throwable t) { - _log.error("Error reading in session key data", t); - } finally { - if (fin != null) try { fin.close(); } catch (IOException ioe) {} - } - } - JobQueue.getInstance().addJob(new SessionKeyWriterJob()); - } + SessionKeyManager mgr = _context.sessionKeyManager(); + if (mgr instanceof PersistentSessionKeyManager) { + PersistentSessionKeyManager manager = (PersistentSessionKeyManager)mgr; + File f = new File(SESSION_KEY_FILE); + if (f.exists()) { + FileInputStream fin = null; + try { + fin = new FileInputStream(f); + manager.loadState(fin); + int expired = manager.aggressiveExpire(); + if (_log.shouldLog(Log.DEBUG)) + _log.debug("Session keys loaded [not error] with " + expired + + " sets immediately expired"); + } catch (Throwable t) { + _log.error("Error reading in session key data", t); + } finally { + if (fin != null) try { fin.close(); } catch (IOException ioe) {} + } + } + _context.jobQueue().addJob(new SessionKeyWriterJob()); + } } - private static void writeState() { - Object o = SessionKeyManager.getInstance(); - if (!(o instanceof PersistentSessionKeyManager)) { - _log.error("Unable to persist the session key state - manager is " + o.getClass().getName()); - return; - } - PersistentSessionKeyManager mgr = (PersistentSessionKeyManager)o; - - // only need for synchronization is during shutdown() - synchronized (mgr) { - FileOutputStream fos = null; - try { - int expired = mgr.aggressiveExpire(); - if (expired > 0) { - _log.info("Agressive expired " + expired + " tag sets"); - } - fos = new FileOutputStream(SESSION_KEY_FILE); - mgr.saveState(fos); - fos.flush(); - _log.debug("Session keys written"); - } catch (Throwable t) { - _log.debug("Error writing session key state", t); - } finally { - if (fos != null) try { fos.close(); } catch (IOException ioe) {} - } - } + private void writeState() { + Object o = _context.sessionKeyManager(); + if (!(o instanceof PersistentSessionKeyManager)) { + _log.error("Unable to persist the session key state - manager is " + o.getClass().getName()); + return; + } + PersistentSessionKeyManager mgr = (PersistentSessionKeyManager)o; + + // only need for synchronization is during shutdown() + synchronized (mgr) { + FileOutputStream fos = null; + try { + int expired = mgr.aggressiveExpire(); + if (expired > 0) { + _log.info("Agressive expired " + expired + " tag sets"); + } + fos = new FileOutputStream(SESSION_KEY_FILE); + mgr.saveState(fos); + fos.flush(); + _log.debug("Session keys written"); + } catch (Throwable t) { + _log.debug("Error writing session key state", t); + } finally { + if (fos != null) try { fos.close(); } catch (IOException ioe) {} + } + } } public String renderStatusHTML() { return ""; } private class SessionKeyWriterJob extends JobImpl { - public SessionKeyWriterJob() { - super(); - getTiming().setStartAfter(PERSIST_DELAY); - } - public String getName() { return "Write Session Keys"; } - public void runJob() { - writeState(); - requeue(PERSIST_DELAY); - } + public SessionKeyWriterJob() { + super(SessionKeyPersistenceHelper.this._context); + getTiming().setStartAfter(PERSIST_DELAY); + } + public String getName() { return "Write Session Keys"; } + public void runJob() { + writeState(); + requeue(PERSIST_DELAY); + } } } diff --git a/router/java/src/net/i2p/router/Shitlist.java b/router/java/src/net/i2p/router/Shitlist.java index 942790dcb..747eb351b 100644 --- a/router/java/src/net/i2p/router/Shitlist.java +++ b/router/java/src/net/i2p/router/Shitlist.java @@ -1,9 +1,9 @@ package net.i2p.router; /* * free (adj.): unencumbered; not under the control of others - * Written by jrandom in 2003 and released into the public domain - * with no warranty of any kind, either expressed or implied. - * It probably won't make your computer catch on fire, or eat + * Written by jrandom in 2003 and released into the public domain + * with no warranty of any kind, either expressed or implied. + * It probably won't make your computer catch on fire, or eat * your children, but it might. Use at your own risk. * */ @@ -24,76 +24,82 @@ import net.i2p.util.Log; * */ public class Shitlist { - private final static Shitlist _instance = new Shitlist(); - public final static Shitlist getInstance() { return _instance; } - private final static Log _log = new Log(Shitlist.class); + private Log _log; + private RouterContext _context; private Map _shitlist; // H(routerIdent) --> Date public final static long SHITLIST_DURATION_MS = 4*60*1000; // 4 minute shitlist - private Shitlist() { - _shitlist = new HashMap(100); + public Shitlist(RouterContext context) { + _context = context; + _log = context.logManager().getLog(Shitlist.class); + _shitlist = new HashMap(100); } public boolean shitlistRouter(Hash peer) { - if (peer == null) return false; - boolean wasAlready = false; - if (_log.shouldLog(Log.INFO)) - _log.info("Shitlisting router " + peer.toBase64(), new Exception("Shitlist cause")); - - synchronized (_shitlist) { - Date oldDate = (Date)_shitlist.put(peer, new Date(Clock.getInstance().now())); - wasAlready = (null == oldDate); - } - NetworkDatabaseFacade.getInstance().fail(peer); - TunnelManagerFacade.getInstance().peerFailed(peer); - return wasAlready; + if (peer == null) return false; + if (_context.routerHash().equals(peer)) { + _log.error("wtf, why did we try to shitlist ourselves?", new Exception("shitfaced")); + return false; + } + boolean wasAlready = false; + if (_log.shouldLog(Log.INFO)) + _log.info("Shitlisting router " + peer.toBase64(), new Exception("Shitlist cause")); + + synchronized (_shitlist) { + Date oldDate = (Date)_shitlist.put(peer, new Date(_context.clock().now())); + wasAlready = (null == oldDate); + } + + _context.netDb().fail(peer); + _context.tunnelManager().peerFailed(peer); + return wasAlready; } public void unshitlistRouter(Hash peer) { - if (peer == null) return; - _log.info("Unshitlisting router " + peer.toBase64()); - synchronized (_shitlist) { - _shitlist.remove(peer); - } + if (peer == null) return; + _log.info("Unshitlisting router " + peer.toBase64()); + synchronized (_shitlist) { + _shitlist.remove(peer); + } } public boolean isShitlisted(Hash peer) { - Date shitlistDate = null; - synchronized (_shitlist) { - shitlistDate = (Date)_shitlist.get(peer); - } - if (shitlistDate == null) return false; - - // check validity - if (shitlistDate.getTime() > Clock.getInstance().now() - SHITLIST_DURATION_MS) { - return true; - } else { - unshitlistRouter(peer); - return false; - } + Date shitlistDate = null; + synchronized (_shitlist) { + shitlistDate = (Date)_shitlist.get(peer); + } + if (shitlistDate == null) return false; + + // check validity + if (shitlistDate.getTime() > _context.clock().now() - SHITLIST_DURATION_MS) { + return true; + } else { + unshitlistRouter(peer); + return false; + } } public String renderStatusHTML() { - StringBuffer buf = new StringBuffer(); - buf.append("

    Shitlist

    "); - Map shitlist = new HashMap(); - synchronized (_shitlist) { - shitlist.putAll(_shitlist); - } - buf.append("
      "); - - long limit = Clock.getInstance().now() - SHITLIST_DURATION_MS; - - for (Iterator iter = shitlist.keySet().iterator(); iter.hasNext(); ) { - Hash key = (Hash)iter.next(); - Date shitDate = (Date)shitlist.get(key); - if (shitDate.getTime() < limit) - unshitlistRouter(key); - else - buf.append("
    • ").append(key.toBase64()).append(" was shitlisted on ").append(shitDate).append("
    • \n"); - } - buf.append("
    \n"); - return buf.toString(); + StringBuffer buf = new StringBuffer(); + buf.append("

    Shitlist

    "); + Map shitlist = new HashMap(); + synchronized (_shitlist) { + shitlist.putAll(_shitlist); + } + buf.append("
      "); + + long limit = _context.clock().now() - SHITLIST_DURATION_MS; + + for (Iterator iter = shitlist.keySet().iterator(); iter.hasNext(); ) { + Hash key = (Hash)iter.next(); + Date shitDate = (Date)shitlist.get(key); + if (shitDate.getTime() < limit) + unshitlistRouter(key); + else + buf.append("
    • ").append(key.toBase64()).append(" was shitlisted on ").append(shitDate).append("
    • \n"); + } + buf.append("
    \n"); + return buf.toString(); } } diff --git a/router/java/src/net/i2p/router/StatisticsManager.java b/router/java/src/net/i2p/router/StatisticsManager.java index ddc74c717..987ab7be9 100644 --- a/router/java/src/net/i2p/router/StatisticsManager.java +++ b/router/java/src/net/i2p/router/StatisticsManager.java @@ -25,9 +25,8 @@ import net.i2p.util.Log; * */ public class StatisticsManager implements Service { - private final static Log _log = new Log(StatisticsManager.class); - private static StatisticsManager _instance = new StatisticsManager(); - public static StatisticsManager getInstance() { return _instance; } + private Log _log; + private RouterContext _context; private boolean _includePeerRankings; private int _publishedStats; @@ -36,13 +35,15 @@ public class StatisticsManager implements Service { public final static String PROP_MAX_PUBLISHED_PEERS = "router.publishPeerMax"; public final static int DEFAULT_MAX_PUBLISHED_PEERS = 20; - public StatisticsManager() { + public StatisticsManager(RouterContext context) { + _context = context; + _log = context.logManager().getLog(StatisticsManager.class); _includePeerRankings = false; } public void shutdown() {} public void startup() { - String val = Router.getInstance().getConfigSetting(PROP_PUBLISH_RANKINGS); + String val = _context.router().getConfigSetting(PROP_PUBLISH_RANKINGS); try { if (val == null) { if (_log.shouldLog(Log.INFO)) @@ -65,7 +66,7 @@ public class StatisticsManager implements Service { + "], so we're defaulting to FALSE"); _includePeerRankings = false; } - val = Router.getInstance().getConfigSetting(PROP_MAX_PUBLISHED_PEERS); + val = _context.router().getConfigSetting(PROP_MAX_PUBLISHED_PEERS); if (val == null) { _publishedStats = DEFAULT_MAX_PUBLISHED_PEERS; } else { @@ -90,7 +91,7 @@ public class StatisticsManager implements Service { stats.setProperty("core.id", CoreVersion.ID); if (_includePeerRankings) { - stats.putAll(ProfileManager.getInstance().summarizePeers(_publishedStats)); + stats.putAll(_context.profileManager().summarizePeers(_publishedStats)); includeRate("transport.sendProcessingTime", stats, new long[] { 60*1000, 60*60*1000 }); //includeRate("tcp.queueSize", stats); @@ -110,7 +111,7 @@ public class StatisticsManager implements Service { includeRate("netDb.successPeers", stats, new long[] { 60*60*1000 }); includeRate("transport.receiveMessageSize", stats, new long[] { 5*60*1000, 60*60*1000 }); includeRate("transport.sendMessageSize", stats, new long[] { 5*60*1000, 60*60*1000 }); - stats.setProperty("stat_uptime", DataHelper.formatDuration(Router.getInstance().getUptime())); + stats.setProperty("stat_uptime", DataHelper.formatDuration(_context.router().getUptime())); stats.setProperty("stat__rateKey", "avg;maxAvg;pctLifetime;[sat;satLim;maxSat;maxSatLim;][num;lifetimeFreq;maxFreq]"); _log.debug("Publishing peer rankings"); } else { @@ -126,7 +127,7 @@ public class StatisticsManager implements Service { includeRate(rateName, stats, null); } private void includeRate(String rateName, Properties stats, long selectedPeriods[]) { - RateStat rate = StatManager.getInstance().getRate(rateName); + RateStat rate = _context.statManager().getRate(rateName); if (rate == null) return; long periods[] = rate.getPeriods(); for (int i = 0; i < periods.length; i++) { diff --git a/router/java/src/net/i2p/router/SubmitMessageHistoryJob.java b/router/java/src/net/i2p/router/SubmitMessageHistoryJob.java index 0f9fe0d13..a6c67be2f 100644 --- a/router/java/src/net/i2p/router/SubmitMessageHistoryJob.java +++ b/router/java/src/net/i2p/router/SubmitMessageHistoryJob.java @@ -9,12 +9,13 @@ import net.i2p.util.Clock; import net.i2p.util.HTTPSendData; import net.i2p.util.I2PThread; import net.i2p.util.Log; +import net.i2p.router.RouterContext; /** * Job that, if its allowed to, will submit the data gathered by the MessageHistory * component to some URL so that the network can be debugged more easily. By default * it does not submit any data or touch the message history file, but if the router - * has the line "router.submitHistory=true", it will send the file that the + * has the line "router.submitHistory=true", it will send the file that the * MessageHistory component is configured to write to once an hour, post it to * http://i2p.net/cgi-bin/submitMessageHistory, and then delete that file * locally. This should only be used if the MessageHistory component is configured to @@ -22,14 +23,14 @@ import net.i2p.util.Log; * */ public class SubmitMessageHistoryJob extends JobImpl { - private final static Log _log = new Log(SubmitMessageHistoryJob.class); + private Log _log; /** default submitting data every hour */ - private final static long DEFAULT_REQUEUE_DELAY = 60*60*1000; - /** + private final static long DEFAULT_REQUEUE_DELAY = 60*60*1000; + /** * router config param for whether we want to autosubmit (and delete) the - * history data managed by MessageHistory - */ + * history data managed by MessageHistory + */ public final static String PARAM_SUBMIT_DATA = "router.submitHistory"; /** default value for whether we autosubmit the data */ public final static boolean DEFAULT_SUBMIT_DATA = true; @@ -38,14 +39,19 @@ public class SubmitMessageHistoryJob extends JobImpl { /** default location */ public final static String DEFAULT_SUBMIT_URL = "http://i2p.net/cgi-bin/submitMessageHistory"; + public SubmitMessageHistoryJob(RouterContext context) { + super(context); + _log = context.logManager().getLog(SubmitMessageHistoryJob.class); + } + public void runJob() { - if (shouldSubmit()) { - submit(); - } else { - _log.debug("Not submitting data"); - // if we didn't submit we can just requeue - requeue(getRequeueDelay()); - } + if (shouldSubmit()) { + submit(); + } else { + _log.debug("Not submitting data"); + // if we didn't submit we can just requeue + requeue(getRequeueDelay()); + } } /** @@ -53,64 +59,64 @@ public class SubmitMessageHistoryJob extends JobImpl { * to do the actual submission, enqueueing a new submit job when its done */ private void submit() { - I2PThread t = new I2PThread(new Runnable() { - public void run() { - _log.debug("Submitting data"); - MessageHistory.getInstance().setPauseFlushes(true); - String filename = MessageHistory.getInstance().getFilename(); - send(filename); - MessageHistory.getInstance().setPauseFlushes(false); - Job job = new SubmitMessageHistoryJob(); - job.getTiming().setStartAfter(Clock.getInstance().now() + getRequeueDelay()); - JobQueue.getInstance().addJob(job); - } - }); - t.setName("SubmitData"); - t.setPriority(I2PThread.MIN_PRIORITY); - t.setDaemon(true); - t.start(); + I2PThread t = new I2PThread(new Runnable() { + public void run() { + _log.debug("Submitting data"); + _context.messageHistory().setPauseFlushes(true); + String filename = _context.messageHistory().getFilename(); + send(filename); + _context.messageHistory().setPauseFlushes(false); + Job job = new SubmitMessageHistoryJob(_context); + job.getTiming().setStartAfter(_context.clock().now() + getRequeueDelay()); + _context.jobQueue().addJob(job); + } + }); + t.setName("SubmitData"); + t.setPriority(I2PThread.MIN_PRIORITY); + t.setDaemon(true); + t.start(); } private void send(String filename) { - String url = getURL(); - try { - File dataFile = new File(filename); - if (!dataFile.exists() || !dataFile.canRead()) { - _log.warn("Unable to read the message data file [" + dataFile.getAbsolutePath() + "]"); - return; - } - long size = dataFile.length(); - int expectedSend = 512; // 512 for HTTP overhead - if (size > 0) - expectedSend += (int)size/10; // compression - FileInputStream fin = new FileInputStream(dataFile); - BandwidthLimiter.getInstance().delayOutbound(null, expectedSend); - boolean sent = HTTPSendData.postData(url, size, fin); - fin.close(); - boolean deleted = dataFile.delete(); - _log.debug("Submitted " + size + " bytes? " + sent + " and deleted? " + deleted); - } catch (IOException ioe) { - _log.error("Error sending the data", ioe); - } + String url = getURL(); + try { + File dataFile = new File(filename); + if (!dataFile.exists() || !dataFile.canRead()) { + _log.warn("Unable to read the message data file [" + dataFile.getAbsolutePath() + "]"); + return; + } + long size = dataFile.length(); + int expectedSend = 512; // 512 for HTTP overhead + if (size > 0) + expectedSend += (int)size/10; // compression + FileInputStream fin = new FileInputStream(dataFile); + _context.bandwidthLimiter().delayOutbound(null, expectedSend); + boolean sent = HTTPSendData.postData(url, size, fin); + fin.close(); + boolean deleted = dataFile.delete(); + _log.debug("Submitted " + size + " bytes? " + sent + " and deleted? " + deleted); + } catch (IOException ioe) { + _log.error("Error sending the data", ioe); + } } private String getURL() { - String str = Router.getInstance().getConfigSetting(PARAM_SUBMIT_URL); - if ( (str == null) || (str.trim().length() <= 0) ) - return DEFAULT_SUBMIT_URL; - else - return str.trim(); + String str = _context.router().getConfigSetting(PARAM_SUBMIT_URL); + if ( (str == null) || (str.trim().length() <= 0) ) + return DEFAULT_SUBMIT_URL; + else + return str.trim(); } - private boolean shouldSubmit() { - String str = Router.getInstance().getConfigSetting(PARAM_SUBMIT_DATA); - if (str == null) { - _log.debug("History submit config not specified [" + PARAM_SUBMIT_DATA + "], default = " + DEFAULT_SUBMIT_DATA); - return DEFAULT_SUBMIT_DATA; - } else { - _log.debug("History submit config specified [" + str + "]"); - } - return Boolean.TRUE.toString().equals(str); + private boolean shouldSubmit() { + String str = _context.router().getConfigSetting(PARAM_SUBMIT_DATA); + if (str == null) { + _log.debug("History submit config not specified [" + PARAM_SUBMIT_DATA + "], default = " + DEFAULT_SUBMIT_DATA); + return DEFAULT_SUBMIT_DATA; + } else { + _log.debug("History submit config specified [" + str + "]"); + } + return Boolean.TRUE.toString().equals(str); } private long getRequeueDelay() { return DEFAULT_REQUEUE_DELAY; } public String getName() { return "Submit Message History"; } diff --git a/router/java/src/net/i2p/router/TunnelInfo.java b/router/java/src/net/i2p/router/TunnelInfo.java index 560b8fa82..7589cea71 100644 --- a/router/java/src/net/i2p/router/TunnelInfo.java +++ b/router/java/src/net/i2p/router/TunnelInfo.java @@ -1,9 +1,9 @@ package net.i2p.router; /* * free (adj.): unencumbered; not under the control of others - * Written by jrandom in 2003 and released into the public domain - * with no warranty of any kind, either expressed or implied. - * It probably won't make your computer catch on fire, or eat + * Written by jrandom in 2003 and released into the public domain + * with no warranty of any kind, either expressed or implied. + * It probably won't make your computer catch on fire, or eat * your children, but it might. Use at your own risk. * */ @@ -29,12 +29,13 @@ import net.i2p.data.i2np.TunnelConfigurationSessionKey; import net.i2p.data.i2np.TunnelSessionKey; import net.i2p.data.i2np.TunnelSigningPrivateKey; import net.i2p.data.i2np.TunnelSigningPublicKey; -import net.i2p.util.Clock; +import net.i2p.I2PAppContext; /** * Defines the information associated with a tunnel */ public class TunnelInfo extends DataStructureImpl { + private I2PAppContext _context; private TunnelId _id; private Hash _nextHop; private Hash _thisHop; @@ -50,21 +51,22 @@ public class TunnelInfo extends DataStructureImpl { private boolean _ready; private boolean _wasEverReady; - public TunnelInfo() { - setTunnelId(null); - setThisHop(null); - setNextHop(null); - setNextHopInfo(null); - _configurationKey = null; - _verificationKey = null; - _signingKey = null; - _encryptionKey = null; - setDestination(null); - setSettings(null); - _options = new Properties(); - _ready = false; - _wasEverReady = false; - _created = Clock.getInstance().now(); + public TunnelInfo(I2PAppContext context) { + _context = context; + setTunnelId(null); + setThisHop(null); + setNextHop(null); + setNextHopInfo(null); + _configurationKey = null; + _verificationKey = null; + _signingKey = null; + _encryptionKey = null; + setDestination(null); + setSettings(null); + _options = new Properties(); + _ready = false; + _wasEverReady = false; + _created = _context.clock().now(); } public TunnelId getTunnelId() { return _id; } @@ -81,34 +83,34 @@ public class TunnelInfo extends DataStructureImpl { public TunnelConfigurationSessionKey getConfigurationKey() { return _configurationKey; } public void setConfigurationKey(TunnelConfigurationSessionKey key) { _configurationKey = key; } - public void setConfigurationKey(SessionKey key) { - TunnelConfigurationSessionKey tk = new TunnelConfigurationSessionKey(); - tk.setKey(key); - _configurationKey = tk; + public void setConfigurationKey(SessionKey key) { + TunnelConfigurationSessionKey tk = new TunnelConfigurationSessionKey(); + tk.setKey(key); + _configurationKey = tk; } public TunnelSigningPublicKey getVerificationKey() { return _verificationKey; } public void setVerificationKey(TunnelSigningPublicKey key) { _verificationKey = key; } - public void setVerificationKey(SigningPublicKey key) { - TunnelSigningPublicKey tk = new TunnelSigningPublicKey(); - tk.setKey(key); - _verificationKey = tk; + public void setVerificationKey(SigningPublicKey key) { + TunnelSigningPublicKey tk = new TunnelSigningPublicKey(); + tk.setKey(key); + _verificationKey = tk; } public TunnelSigningPrivateKey getSigningKey() { return _signingKey; } public void setSigningKey(TunnelSigningPrivateKey key) { _signingKey = key; } - public void setSigningKey(SigningPrivateKey key) { - TunnelSigningPrivateKey tk = new TunnelSigningPrivateKey(); - tk.setKey(key); - _signingKey = tk; + public void setSigningKey(SigningPrivateKey key) { + TunnelSigningPrivateKey tk = new TunnelSigningPrivateKey(); + tk.setKey(key); + _signingKey = tk; } public TunnelSessionKey getEncryptionKey() { return _encryptionKey; } public void setEncryptionKey(TunnelSessionKey key) { _encryptionKey = key; } - public void setEncryptionKey(SessionKey key) { - TunnelSessionKey tk = new TunnelSessionKey(); - tk.setKey(key); - _encryptionKey = tk; + public void setEncryptionKey(SessionKey key) { + TunnelSessionKey tk = new TunnelSessionKey(); + tk.setKey(key); + _encryptionKey = tk; } public Destination getDestination() { return _destination; } @@ -120,17 +122,17 @@ public class TunnelInfo extends DataStructureImpl { public Set getPropertyNames() { return new HashSet(_options.keySet()); } public TunnelSettings getSettings() { return _settings; } - public void setSettings(TunnelSettings settings) { _settings = settings; } - + public void setSettings(TunnelSettings settings) { _settings = settings; } + /** * Have all of the routers in this tunnel confirmed participation, and we're ok to * start sending messages through this tunnel? */ public boolean getIsReady() { return _ready; } - public void setIsReady(boolean ready) { - _ready = ready; - if (ready) - _wasEverReady = true; + public void setIsReady(boolean ready) { + _ready = ready; + if (ready) + _wasEverReady = true; } /** * true if this tunnel was ever working (aka rebuildable) @@ -145,204 +147,204 @@ public class TunnelInfo extends DataStructureImpl { * */ public final int getLength() { - int len = 0; - TunnelInfo info = this; - while (info != null) { - info = info.getNextHopInfo(); - len++; - } - return len; + int len = 0; + TunnelInfo info = this; + while (info != null) { + info = info.getNextHopInfo(); + len++; + } + return len; } public void readBytes(InputStream in) throws DataFormatException, IOException { - _options = DataHelper.readProperties(in); - Boolean includeDest = DataHelper.readBoolean(in); - if (includeDest.booleanValue()) { - _destination = new Destination(); - _destination.readBytes(in); - } else { - _destination = null; - } - Boolean includeThis = DataHelper.readBoolean(in); - if (includeThis.booleanValue()) { - _thisHop = new Hash(); - _thisHop.readBytes(in); - } else { - _thisHop = null; - } - Boolean includeNext = DataHelper.readBoolean(in); - if (includeNext.booleanValue()) { - _nextHop = new Hash(); - _nextHop.readBytes(in); - } else { - _nextHop = null; - } - Boolean includeNextInfo = DataHelper.readBoolean(in); - if (includeNextInfo.booleanValue()) { - _nextHopInfo = new TunnelInfo(); - _nextHopInfo.readBytes(in); - } else { - _nextHopInfo = null; - } - _id = new TunnelId(); - _id.readBytes(in); - Boolean includeConfigKey = DataHelper.readBoolean(in); - if (includeConfigKey.booleanValue()) { - _configurationKey = new TunnelConfigurationSessionKey(); - _configurationKey.readBytes(in); - } else { - _configurationKey = null; - } - Boolean includeEncryptionKey = DataHelper.readBoolean(in); - if (includeEncryptionKey.booleanValue()) { - _encryptionKey = new TunnelSessionKey(); - _encryptionKey.readBytes(in); - } else { - _encryptionKey = null; - } - Boolean includeSigningKey = DataHelper.readBoolean(in); - if (includeSigningKey.booleanValue()) { - _signingKey = new TunnelSigningPrivateKey(); - _signingKey.readBytes(in); - } else { - _signingKey = null; - } - Boolean includeVerificationKey = DataHelper.readBoolean(in); - if (includeVerificationKey.booleanValue()) { - _verificationKey = new TunnelSigningPublicKey(); - _verificationKey.readBytes(in); - } else { - _verificationKey = null; - } - _settings = new TunnelSettings(); - _settings.readBytes(in); - Boolean ready = DataHelper.readBoolean(in); - if (ready != null) - setIsReady(ready.booleanValue()); + _options = DataHelper.readProperties(in); + Boolean includeDest = DataHelper.readBoolean(in); + if (includeDest.booleanValue()) { + _destination = new Destination(); + _destination.readBytes(in); + } else { + _destination = null; + } + Boolean includeThis = DataHelper.readBoolean(in); + if (includeThis.booleanValue()) { + _thisHop = new Hash(); + _thisHop.readBytes(in); + } else { + _thisHop = null; + } + Boolean includeNext = DataHelper.readBoolean(in); + if (includeNext.booleanValue()) { + _nextHop = new Hash(); + _nextHop.readBytes(in); + } else { + _nextHop = null; + } + Boolean includeNextInfo = DataHelper.readBoolean(in); + if (includeNextInfo.booleanValue()) { + _nextHopInfo = new TunnelInfo(_context); + _nextHopInfo.readBytes(in); + } else { + _nextHopInfo = null; + } + _id = new TunnelId(); + _id.readBytes(in); + Boolean includeConfigKey = DataHelper.readBoolean(in); + if (includeConfigKey.booleanValue()) { + _configurationKey = new TunnelConfigurationSessionKey(); + _configurationKey.readBytes(in); + } else { + _configurationKey = null; + } + Boolean includeEncryptionKey = DataHelper.readBoolean(in); + if (includeEncryptionKey.booleanValue()) { + _encryptionKey = new TunnelSessionKey(); + _encryptionKey.readBytes(in); + } else { + _encryptionKey = null; + } + Boolean includeSigningKey = DataHelper.readBoolean(in); + if (includeSigningKey.booleanValue()) { + _signingKey = new TunnelSigningPrivateKey(); + _signingKey.readBytes(in); + } else { + _signingKey = null; + } + Boolean includeVerificationKey = DataHelper.readBoolean(in); + if (includeVerificationKey.booleanValue()) { + _verificationKey = new TunnelSigningPublicKey(); + _verificationKey.readBytes(in); + } else { + _verificationKey = null; + } + _settings = new TunnelSettings(_context); + _settings.readBytes(in); + Boolean ready = DataHelper.readBoolean(in); + if (ready != null) + setIsReady(ready.booleanValue()); } public void writeBytes(OutputStream out) throws DataFormatException, IOException { if (_id == null) throw new DataFormatException("Invalid tunnel ID: " + _id); - if (_options == null) throw new DataFormatException("Options are null"); - if (_settings == null) throw new DataFormatException("Settings are null"); - // everything else is optional in the serialization - - DataHelper.writeProperties(out, _options); - if (_destination != null) { - DataHelper.writeBoolean(out, Boolean.TRUE); - _destination.writeBytes(out); - } else { - DataHelper.writeBoolean(out, Boolean.FALSE); - } - if (_thisHop != null) { - DataHelper.writeBoolean(out, Boolean.TRUE); - _thisHop.writeBytes(out); - } else { - DataHelper.writeBoolean(out, Boolean.FALSE); - } - if (_nextHop != null) { - DataHelper.writeBoolean(out, Boolean.TRUE); - _nextHop.writeBytes(out); - } else { - DataHelper.writeBoolean(out, Boolean.FALSE); - } - if (_nextHopInfo != null) { - DataHelper.writeBoolean(out, Boolean.TRUE); - _nextHopInfo.writeBytes(out); - } else { - DataHelper.writeBoolean(out, Boolean.FALSE); - } - _id.writeBytes(out); - if (_configurationKey != null) { - DataHelper.writeBoolean(out, Boolean.TRUE); - _configurationKey.writeBytes(out); - } else { - DataHelper.writeBoolean(out, Boolean.FALSE); - } - if (_encryptionKey != null) { - DataHelper.writeBoolean(out, Boolean.TRUE); - _encryptionKey.writeBytes(out); - } else { - DataHelper.writeBoolean(out, Boolean.FALSE); - } - if (_signingKey != null) { - DataHelper.writeBoolean(out, Boolean.TRUE); - _signingKey.writeBytes(out); - } else { - DataHelper.writeBoolean(out, Boolean.FALSE); - } - if (_verificationKey != null) { - DataHelper.writeBoolean(out, Boolean.TRUE); - _verificationKey.writeBytes(out); - } else { - DataHelper.writeBoolean(out, Boolean.FALSE); - } - _settings.writeBytes(out); - DataHelper.writeBoolean(out, new Boolean(_ready)); + if (_options == null) throw new DataFormatException("Options are null"); + if (_settings == null) throw new DataFormatException("Settings are null"); + // everything else is optional in the serialization + + DataHelper.writeProperties(out, _options); + if (_destination != null) { + DataHelper.writeBoolean(out, Boolean.TRUE); + _destination.writeBytes(out); + } else { + DataHelper.writeBoolean(out, Boolean.FALSE); + } + if (_thisHop != null) { + DataHelper.writeBoolean(out, Boolean.TRUE); + _thisHop.writeBytes(out); + } else { + DataHelper.writeBoolean(out, Boolean.FALSE); + } + if (_nextHop != null) { + DataHelper.writeBoolean(out, Boolean.TRUE); + _nextHop.writeBytes(out); + } else { + DataHelper.writeBoolean(out, Boolean.FALSE); + } + if (_nextHopInfo != null) { + DataHelper.writeBoolean(out, Boolean.TRUE); + _nextHopInfo.writeBytes(out); + } else { + DataHelper.writeBoolean(out, Boolean.FALSE); + } + _id.writeBytes(out); + if (_configurationKey != null) { + DataHelper.writeBoolean(out, Boolean.TRUE); + _configurationKey.writeBytes(out); + } else { + DataHelper.writeBoolean(out, Boolean.FALSE); + } + if (_encryptionKey != null) { + DataHelper.writeBoolean(out, Boolean.TRUE); + _encryptionKey.writeBytes(out); + } else { + DataHelper.writeBoolean(out, Boolean.FALSE); + } + if (_signingKey != null) { + DataHelper.writeBoolean(out, Boolean.TRUE); + _signingKey.writeBytes(out); + } else { + DataHelper.writeBoolean(out, Boolean.FALSE); + } + if (_verificationKey != null) { + DataHelper.writeBoolean(out, Boolean.TRUE); + _verificationKey.writeBytes(out); + } else { + DataHelper.writeBoolean(out, Boolean.FALSE); + } + _settings.writeBytes(out); + DataHelper.writeBoolean(out, new Boolean(_ready)); } public String toString() { - StringBuffer buf = new StringBuffer(); - buf.append("[Tunnel ").append(_id.getTunnelId()); - TunnelInfo cur = this; - int i = 0; - while (cur != null) { - buf.append("\n*Hop ").append(i).append(": ").append(cur.getThisHop()); - if (cur.getEncryptionKey() != null) - buf.append("\n Encryption key: ").append(cur.getEncryptionKey()); - if (cur.getSigningKey() != null) - buf.append("\n Signing key: ").append(cur.getSigningKey()); - if (cur.getVerificationKey() != null) - buf.append("\n Verification key: ").append(cur.getVerificationKey()); - if (cur.getDestination() != null) - buf.append("\n Destination: ").append(cur.getDestination().calculateHash().toBase64()); - if (cur.getNextHop() != null) - buf.append("\n Next: ").append(cur.getNextHop()); - if (cur.getSettings() == null) - buf.append("\n Expiration: ").append("none"); - else - buf.append("\n Expiration: ").append(new Date(cur.getSettings().getExpiration())); - buf.append("\n Ready: ").append(getIsReady()); - cur = cur.getNextHopInfo(); - i++; - } - buf.append("]"); - return buf.toString(); + StringBuffer buf = new StringBuffer(); + buf.append("[Tunnel ").append(_id.getTunnelId()); + TunnelInfo cur = this; + int i = 0; + while (cur != null) { + buf.append("\n*Hop ").append(i).append(": ").append(cur.getThisHop()); + if (cur.getEncryptionKey() != null) + buf.append("\n Encryption key: ").append(cur.getEncryptionKey()); + if (cur.getSigningKey() != null) + buf.append("\n Signing key: ").append(cur.getSigningKey()); + if (cur.getVerificationKey() != null) + buf.append("\n Verification key: ").append(cur.getVerificationKey()); + if (cur.getDestination() != null) + buf.append("\n Destination: ").append(cur.getDestination().calculateHash().toBase64()); + if (cur.getNextHop() != null) + buf.append("\n Next: ").append(cur.getNextHop()); + if (cur.getSettings() == null) + buf.append("\n Expiration: ").append("none"); + else + buf.append("\n Expiration: ").append(new Date(cur.getSettings().getExpiration())); + buf.append("\n Ready: ").append(getIsReady()); + cur = cur.getNextHopInfo(); + i++; + } + buf.append("]"); + return buf.toString(); } public int hashCode() { - int rv = 0; - rv = 7*rv + DataHelper.hashCode(_options); - rv = 7*rv + DataHelper.hashCode(_destination); - rv = 7*rv + DataHelper.hashCode(_nextHop); - rv = 7*rv + DataHelper.hashCode(_thisHop); - rv = 7*rv + DataHelper.hashCode(_id); - rv = 7*rv + DataHelper.hashCode(_configurationKey); - rv = 7*rv + DataHelper.hashCode(_encryptionKey); - rv = 7*rv + DataHelper.hashCode(_signingKey); - rv = 7*rv + DataHelper.hashCode(_verificationKey); - rv = 7*rv + DataHelper.hashCode(_settings); - rv = 7*rv + (_ready ? 0 : 1); - return rv; + int rv = 0; + rv = 7*rv + DataHelper.hashCode(_options); + rv = 7*rv + DataHelper.hashCode(_destination); + rv = 7*rv + DataHelper.hashCode(_nextHop); + rv = 7*rv + DataHelper.hashCode(_thisHop); + rv = 7*rv + DataHelper.hashCode(_id); + rv = 7*rv + DataHelper.hashCode(_configurationKey); + rv = 7*rv + DataHelper.hashCode(_encryptionKey); + rv = 7*rv + DataHelper.hashCode(_signingKey); + rv = 7*rv + DataHelper.hashCode(_verificationKey); + rv = 7*rv + DataHelper.hashCode(_settings); + rv = 7*rv + (_ready ? 0 : 1); + return rv; } public boolean equals(Object obj) { - if ( (obj != null) && (obj instanceof TunnelInfo) ) { - TunnelInfo info = (TunnelInfo)obj; - return DataHelper.eq(getConfigurationKey(), info.getConfigurationKey()) && - DataHelper.eq(getDestination(), info.getDestination()) && - getIsReady() == info.getIsReady() && - DataHelper.eq(getEncryptionKey(), info.getEncryptionKey()) && - DataHelper.eq(getNextHop(), info.getNextHop()) && - DataHelper.eq(getNextHopInfo(), info.getNextHopInfo()) && - DataHelper.eq(getSettings(), info.getSettings()) && - DataHelper.eq(getSigningKey(), info.getSigningKey()) && - DataHelper.eq(getThisHop(), info.getThisHop()) && - DataHelper.eq(getTunnelId(), info.getTunnelId()) && - DataHelper.eq(getVerificationKey(), info.getVerificationKey()) && - DataHelper.eq(_options, info._options); - } else { - return false; - } + if ( (obj != null) && (obj instanceof TunnelInfo) ) { + TunnelInfo info = (TunnelInfo)obj; + return DataHelper.eq(getConfigurationKey(), info.getConfigurationKey()) && + DataHelper.eq(getDestination(), info.getDestination()) && + getIsReady() == info.getIsReady() && + DataHelper.eq(getEncryptionKey(), info.getEncryptionKey()) && + DataHelper.eq(getNextHop(), info.getNextHop()) && + DataHelper.eq(getNextHopInfo(), info.getNextHopInfo()) && + DataHelper.eq(getSettings(), info.getSettings()) && + DataHelper.eq(getSigningKey(), info.getSigningKey()) && + DataHelper.eq(getThisHop(), info.getThisHop()) && + DataHelper.eq(getTunnelId(), info.getTunnelId()) && + DataHelper.eq(getVerificationKey(), info.getVerificationKey()) && + DataHelper.eq(_options, info._options); + } else { + return false; + } } } diff --git a/router/java/src/net/i2p/router/TunnelManagerFacade.java b/router/java/src/net/i2p/router/TunnelManagerFacade.java index 373ae92f2..50ec1e68e 100644 --- a/router/java/src/net/i2p/router/TunnelManagerFacade.java +++ b/router/java/src/net/i2p/router/TunnelManagerFacade.java @@ -19,29 +19,27 @@ import net.i2p.router.tunnelmanager.PoolingTunnelManagerFacade; * Build and maintain tunnels throughout the network. * */ -public abstract class TunnelManagerFacade implements Service { - private static TunnelManagerFacade _instance = new PoolingTunnelManagerFacade(); - public static TunnelManagerFacade getInstance() { return _instance; } - +public interface TunnelManagerFacade extends Service { + /** * React to a request to join the specified tunnel. * * @return true if the router will accept participation, else false. */ - public abstract boolean joinTunnel(TunnelInfo info); + boolean joinTunnel(TunnelInfo info); /** * Retrieve the information related to a particular tunnel * */ - public abstract TunnelInfo getTunnelInfo(TunnelId id); + TunnelInfo getTunnelInfo(TunnelId id); /** * Retrieve a set of tunnels from the existing ones for various purposes */ - public abstract List selectOutboundTunnelIds(TunnelSelectionCriteria criteria); + List selectOutboundTunnelIds(TunnelSelectionCriteria criteria); /** * Retrieve a set of tunnels from the existing ones for various purposes */ - public abstract List selectInboundTunnelIds(TunnelSelectionCriteria criteria); + List selectInboundTunnelIds(TunnelSelectionCriteria criteria); /** * Make sure appropriate outbound tunnels are in place, builds requested @@ -49,18 +47,18 @@ public abstract class TunnelManagerFacade implements Service { * validate the leaseSet, then publish it in the network database. * */ - public abstract void createTunnels(Destination destination, ClientTunnelSettings clientSettings, long timeoutMs); + void createTunnels(Destination destination, ClientTunnelSettings clientSettings, long timeoutMs); /** * Called when a peer becomes unreachable - go through all of the current * tunnels and rebuild them if we can, or drop them if we can't. * */ - public abstract void peerFailed(Hash peer); + void peerFailed(Hash peer); /** * True if the peer currently part of a tunnel * */ - public abstract boolean isInUse(Hash peer); + boolean isInUse(Hash peer); } diff --git a/router/java/src/net/i2p/router/TunnelSettings.java b/router/java/src/net/i2p/router/TunnelSettings.java index 9bb8075e3..cb4d5bc32 100644 --- a/router/java/src/net/i2p/router/TunnelSettings.java +++ b/router/java/src/net/i2p/router/TunnelSettings.java @@ -1,9 +1,9 @@ package net.i2p.router; /* * free (adj.): unencumbered; not under the control of others - * Written by jrandom in 2003 and released into the public domain - * with no warranty of any kind, either expressed or implied. - * It probably won't make your computer catch on fire, or eat + * Written by jrandom in 2003 and released into the public domain + * with no warranty of any kind, either expressed or implied. + * It probably won't make your computer catch on fire, or eat * your children, but it might. Use at your own risk. * */ @@ -16,13 +16,14 @@ import java.util.Date; import net.i2p.data.DataFormatException; import net.i2p.data.DataHelper; import net.i2p.data.DataStructureImpl; -import net.i2p.util.Clock; +import net.i2p.I2PAppContext; /** - * Wrap up the settings specified for a particular tunnel + * Wrap up the settings specified for a particular tunnel * */ public class TunnelSettings extends DataStructureImpl { + private I2PAppContext _context; private int _depth; private long _msgsPerMinuteAvg; private long _bytesPerMinuteAvg; @@ -33,18 +34,19 @@ public class TunnelSettings extends DataStructureImpl { private long _expiration; private long _created; - public TunnelSettings() { - _depth = 0; - _msgsPerMinuteAvg = 0; - _msgsPerMinutePeak = 0; - _bytesPerMinuteAvg = 0; - _bytesPerMinutePeak = 0; - _includeDummy = false; - _reorder = false; - _expiration = 0; - _created = Clock.getInstance().now(); + public TunnelSettings(I2PAppContext context) { + _context = context; + _depth = 0; + _msgsPerMinuteAvg = 0; + _msgsPerMinutePeak = 0; + _bytesPerMinuteAvg = 0; + _bytesPerMinutePeak = 0; + _includeDummy = false; + _reorder = false; + _expiration = 0; + _created = _context.clock().now(); } - + public int getDepth() { return _depth; } public void setDepth(int depth) { _depth = depth; } public long getMessagesPerMinuteAverage() { return _msgsPerMinuteAvg; } @@ -64,71 +66,71 @@ public class TunnelSettings extends DataStructureImpl { public long getCreated() { return _created; } public void readBytes(InputStream in) throws DataFormatException, IOException { - Boolean b = DataHelper.readBoolean(in); - if (b == null) throw new DataFormatException("Null includeDummy boolean value"); - _includeDummy = b.booleanValue(); - b = DataHelper.readBoolean(in); - if (b == null) throw new DataFormatException("Null reorder boolean value"); - _reorder = b.booleanValue(); - _depth = (int)DataHelper.readLong(in, 1); - _bytesPerMinuteAvg = DataHelper.readLong(in, 4); - _bytesPerMinutePeak = DataHelper.readLong(in, 4); - Date exp = DataHelper.readDate(in); - if (exp == null) - _expiration = 0; - else - _expiration = exp.getTime(); - _msgsPerMinuteAvg = DataHelper.readLong(in, 4); - _msgsPerMinutePeak = DataHelper.readLong(in, 4); - Date created = DataHelper.readDate(in); - if (created != null) - _created = created.getTime(); - else - _created = Clock.getInstance().now(); + Boolean b = DataHelper.readBoolean(in); + if (b == null) throw new DataFormatException("Null includeDummy boolean value"); + _includeDummy = b.booleanValue(); + b = DataHelper.readBoolean(in); + if (b == null) throw new DataFormatException("Null reorder boolean value"); + _reorder = b.booleanValue(); + _depth = (int)DataHelper.readLong(in, 1); + _bytesPerMinuteAvg = DataHelper.readLong(in, 4); + _bytesPerMinutePeak = DataHelper.readLong(in, 4); + Date exp = DataHelper.readDate(in); + if (exp == null) + _expiration = 0; + else + _expiration = exp.getTime(); + _msgsPerMinuteAvg = DataHelper.readLong(in, 4); + _msgsPerMinutePeak = DataHelper.readLong(in, 4); + Date created = DataHelper.readDate(in); + if (created != null) + _created = created.getTime(); + else + _created = _context.clock().now(); } public void writeBytes(OutputStream out) throws DataFormatException, IOException { - DataHelper.writeBoolean(out, _includeDummy ? Boolean.TRUE : Boolean.FALSE); - DataHelper.writeBoolean(out, _reorder ? Boolean.TRUE : Boolean.FALSE); - DataHelper.writeLong(out, 1, _depth); - DataHelper.writeLong(out, 4, _bytesPerMinuteAvg); - DataHelper.writeLong(out, 4, _bytesPerMinutePeak); - if (_expiration <= 0) - DataHelper.writeDate(out, new Date(0)); - else - DataHelper.writeDate(out, new Date(_expiration)); - DataHelper.writeLong(out, 4, _msgsPerMinuteAvg); - DataHelper.writeLong(out, 4, _msgsPerMinutePeak); - DataHelper.writeDate(out, new Date(_created)); + DataHelper.writeBoolean(out, _includeDummy ? Boolean.TRUE : Boolean.FALSE); + DataHelper.writeBoolean(out, _reorder ? Boolean.TRUE : Boolean.FALSE); + DataHelper.writeLong(out, 1, _depth); + DataHelper.writeLong(out, 4, _bytesPerMinuteAvg); + DataHelper.writeLong(out, 4, _bytesPerMinutePeak); + if (_expiration <= 0) + DataHelper.writeDate(out, new Date(0)); + else + DataHelper.writeDate(out, new Date(_expiration)); + DataHelper.writeLong(out, 4, _msgsPerMinuteAvg); + DataHelper.writeLong(out, 4, _msgsPerMinutePeak); + DataHelper.writeDate(out, new Date(_created)); } public int hashCode() { - int rv = 0; - rv += _includeDummy ? 100 : 0; - rv += _reorder ? 50 : 0; - rv += _depth; - rv += _bytesPerMinuteAvg; - rv += _bytesPerMinutePeak; - rv += _expiration; - rv += _msgsPerMinuteAvg; - rv += _msgsPerMinutePeak; - return rv; + int rv = 0; + rv += _includeDummy ? 100 : 0; + rv += _reorder ? 50 : 0; + rv += _depth; + rv += _bytesPerMinuteAvg; + rv += _bytesPerMinutePeak; + rv += _expiration; + rv += _msgsPerMinuteAvg; + rv += _msgsPerMinutePeak; + return rv; } public boolean equals(Object obj) { - if ( (obj != null) && (obj instanceof TunnelSettings) ) { - TunnelSettings settings = (TunnelSettings)obj; - return settings.getBytesPerMinuteAverage() == getBytesPerMinuteAverage() && - settings.getBytesPerMinutePeak() == getBytesPerMinutePeak() && - settings.getDepth() == getDepth() && - settings.getExpiration() == getExpiration() && - settings.getIncludeDummy() == getIncludeDummy() && - settings.getMessagesPerMinuteAverage() == getMessagesPerMinuteAverage() && - settings.getMessagesPerMinutePeak() == getMessagesPerMinutePeak() && - settings.getReorder() == getReorder(); - } else { - return false; - } + if ( (obj != null) && (obj instanceof TunnelSettings) ) { + TunnelSettings settings = (TunnelSettings)obj; + return settings.getBytesPerMinuteAverage() == getBytesPerMinuteAverage() && + settings.getBytesPerMinutePeak() == getBytesPerMinutePeak() && + settings.getDepth() == getDepth() && + settings.getExpiration() == getExpiration() && + settings.getIncludeDummy() == getIncludeDummy() && + settings.getMessagesPerMinuteAverage() == getMessagesPerMinuteAverage() && + settings.getMessagesPerMinutePeak() == getMessagesPerMinutePeak() && + settings.getReorder() == getReorder(); + } else { + return false; + } } } diff --git a/router/java/src/net/i2p/router/admin/AdminListener.java b/router/java/src/net/i2p/router/admin/AdminListener.java index 10be7be4b..455533d60 100644 --- a/router/java/src/net/i2p/router/admin/AdminListener.java +++ b/router/java/src/net/i2p/router/admin/AdminListener.java @@ -1,9 +1,9 @@ package net.i2p.router.admin; /* * free (adj.): unencumbered; not under the control of others - * Written by jrandom in 2003 and released into the public domain - * with no warranty of any kind, either expressed or implied. - * It probably won't make your computer catch on fire, or eat + * Written by jrandom in 2003 and released into the public domain + * with no warranty of any kind, either expressed or implied. + * It probably won't make your computer catch on fire, or eat * your children, but it might. Use at your own risk. * */ @@ -14,6 +14,7 @@ import java.net.Socket; import net.i2p.util.I2PThread; import net.i2p.util.Log; +import net.i2p.router.RouterContext; /** * Listen for connections on the specified port, and toss them onto the client manager's @@ -22,15 +23,18 @@ import net.i2p.util.Log; * @author jrandom */ public class AdminListener implements Runnable { - private final static Log _log = new Log(AdminListener.class); + private Log _log; + private RouterContext _context; private ServerSocket _socket; private int _port; private boolean _running; private long _nextFailDelay = 1000; - public AdminListener(int port) { - _port = port; - _running = false; + public AdminListener(RouterContext context, int port) { + _context = context; + _log = context.logManager().getLog(AdminListener.class); + _port = port; + _running = false; } public void setPort(int port) { _port = port; } @@ -39,50 +43,50 @@ public class AdminListener implements Runnable { /** max time to bind */ private final static int MAX_FAIL_DELAY = 5*60*1000; - /** + /** * Start up the socket listener, listens for connections, and - * fires those connections off via {@link #runConnection runConnection}. + * fires those connections off via {@link #runConnection runConnection}. * This only returns if the socket cannot be opened or there is a catastrophic * failure. * */ public void startup() { - _running = true; - int curDelay = 0; - while ( (_running) && (curDelay < MAX_FAIL_DELAY) ) { - try { - _log.info("Starting up listening for connections on port " + _port); - _socket = new ServerSocket(_port); - curDelay = 0; - while (_running) { - try { - Socket socket = _socket.accept(); - _log.debug("Connection received"); - runConnection(socket); - } catch (IOException ioe) { - _log.error("Server error accepting", ioe); - } catch (Throwable t) { - _log.error("Fatal error running client listener - killing the thread!", t); - return; - } - } - } catch (IOException ioe) { - _log.error("Error listening on port " + _port, ioe); - } - - if (_socket != null) { - try { _socket.close(); } catch (IOException ioe) {} - _socket = null; - } - - _log.error("Error listening, waiting " + _nextFailDelay + "ms before we try again"); - try { Thread.sleep(_nextFailDelay); } catch (InterruptedException ie) {} - curDelay += _nextFailDelay; - _nextFailDelay *= 5; - } - - _log.error("CANCELING ADMIN LISTENER. delay = " + curDelay, new Exception("ADMIN LISTENER cancelled!!!")); - _running = false; + _running = true; + int curDelay = 0; + while ( (_running) && (curDelay < MAX_FAIL_DELAY) ) { + try { + _log.info("Starting up listening for connections on port " + _port); + _socket = new ServerSocket(_port); + curDelay = 0; + while (_running) { + try { + Socket socket = _socket.accept(); + _log.debug("Connection received"); + runConnection(socket); + } catch (IOException ioe) { + _log.error("Server error accepting", ioe); + } catch (Throwable t) { + _log.error("Fatal error running client listener - killing the thread!", t); + return; + } + } + } catch (IOException ioe) { + _log.error("Error listening on port " + _port, ioe); + } + + if (_socket != null) { + try { _socket.close(); } catch (IOException ioe) {} + _socket = null; + } + + _log.error("Error listening, waiting " + _nextFailDelay + "ms before we try again"); + try { Thread.sleep(_nextFailDelay); } catch (InterruptedException ie) {} + curDelay += _nextFailDelay; + _nextFailDelay *= 5; + } + + _log.error("CANCELING ADMIN LISTENER. delay = " + curDelay, new Exception("ADMIN LISTENER cancelled!!!")); + _running = false; } /** @@ -90,20 +94,20 @@ public class AdminListener implements Runnable { * */ protected void runConnection(Socket socket) throws IOException { - AdminRunner runner = new AdminRunner(socket); - I2PThread t = new I2PThread(runner); - t.setName("Admin Runner"); - t.setPriority(Thread.MIN_PRIORITY); - t.setDaemon(true); - t.start(); + AdminRunner runner = new AdminRunner(_context, socket); + I2PThread t = new I2PThread(runner); + t.setName("Admin Runner"); + t.setPriority(Thread.MIN_PRIORITY); + t.setDaemon(true); + t.start(); } - public void shutdown() { - _running = false; - if (_socket != null) try { - _socket.close(); - _socket = null; - } catch (IOException ioe) {} + public void shutdown() { + _running = false; + if (_socket != null) try { + _socket.close(); + _socket = null; + } catch (IOException ioe) {} } public void run() { startup(); } } diff --git a/router/java/src/net/i2p/router/admin/AdminManager.java b/router/java/src/net/i2p/router/admin/AdminManager.java index e536ad95b..63ec69924 100644 --- a/router/java/src/net/i2p/router/admin/AdminManager.java +++ b/router/java/src/net/i2p/router/admin/AdminManager.java @@ -4,47 +4,52 @@ import net.i2p.router.Router; import net.i2p.router.Service; import net.i2p.util.I2PThread; import net.i2p.util.Log; +import net.i2p.router.RouterContext; public class AdminManager implements Service { - private final static Log _log = new Log(AdminManager.class); - private final static AdminManager _instance = new AdminManager(); - public final static AdminManager getInstance() { return _instance; } + private Log _log; + private RouterContext _context; public final static String PARAM_ADMIN_PORT = "router.adminPort"; public final static int DEFAULT_ADMIN_PORT = 7655; private AdminListener _listener; + public AdminManager(RouterContext context) { + _context = context; + _log = context.logManager().getLog(AdminManager.class); + } + public String renderStatusHTML() { return ""; } public void shutdown() { - if (_listener != null) { - _log.info("Shutting down admin listener"); - _listener.shutdown(); - _listener = null; - } + if (_listener != null) { + _log.info("Shutting down admin listener"); + _listener.shutdown(); + _listener = null; + } } public void startup() { - int port = DEFAULT_ADMIN_PORT; - String str = Router.getInstance().getConfigSetting(PARAM_ADMIN_PORT); - if (str != null) { - try { - int val = Integer.parseInt(str); - port = val; - } catch (NumberFormatException nfe) { - _log.warn("Invalid admin port specified [" + str + "]", nfe); - } - } - _log.info("Starting up admin listener on port " + port); - startup(port); + int port = DEFAULT_ADMIN_PORT; + String str = _context.router().getConfigSetting(PARAM_ADMIN_PORT); + if (str != null) { + try { + int val = Integer.parseInt(str); + port = val; + } catch (NumberFormatException nfe) { + _log.warn("Invalid admin port specified [" + str + "]", nfe); + } + } + _log.info("Starting up admin listener on port " + port); + startup(port); } private void startup(int port) { - _listener = new AdminListener(port); - I2PThread t = new I2PThread(_listener); - t.setName("Admin Listener"); - t.setDaemon(true); - t.setPriority(Thread.MIN_PRIORITY); - t.start(); + _listener = new AdminListener(_context, port); + I2PThread t = new I2PThread(_listener); + t.setName("Admin Listener"); + t.setDaemon(true); + t.setPriority(Thread.MIN_PRIORITY); + t.start(); } } diff --git a/router/java/src/net/i2p/router/admin/AdminRunner.java b/router/java/src/net/i2p/router/admin/AdminRunner.java index 86982ad0c..d9677e5c8 100644 --- a/router/java/src/net/i2p/router/admin/AdminRunner.java +++ b/router/java/src/net/i2p/router/admin/AdminRunner.java @@ -13,90 +13,96 @@ import net.i2p.data.Hash; import net.i2p.router.Router; import net.i2p.router.peermanager.ProfileOrganizer; import net.i2p.util.Log; +import net.i2p.router.RouterContext; class AdminRunner implements Runnable { - private final static Log _log = new Log(AdminRunner.class); + private Log _log; + private RouterContext _context; private Socket _socket; + private StatsGenerator _generator; - public AdminRunner(Socket socket) { - _socket = socket; + public AdminRunner(RouterContext context, Socket socket) { + _context = context; + _log = context.logManager().getLog(AdminRunner.class); + _socket = socket; + _generator = new StatsGenerator(context); } public void run() { - try { - BufferedReader in = new BufferedReader(new InputStreamReader(_socket.getInputStream())); - OutputStream out = _socket.getOutputStream(); - - String command = in.readLine(); - runCommand(command, out); - } catch (IOException ioe) { - _log.error("Error running admin command", ioe); - } + try { + BufferedReader in = new BufferedReader(new InputStreamReader(_socket.getInputStream())); + OutputStream out = _socket.getOutputStream(); + + String command = in.readLine(); + runCommand(command, out); + } catch (IOException ioe) { + _log.error("Error running admin command", ioe); + } } private void runCommand(String command, OutputStream out) throws IOException { - _log.debug("Command [" + command + "]"); - if (command.indexOf("favicon") >= 0) { - reply(out, "this is not a website"); - } else if (command.indexOf("routerStats.html") >= 0) { - reply(out, StatsGenerator.generateStatsPage()); - } else if (command.indexOf("/profile/") >= 0) { - replyText(out, getProfile(command)); - } else if (true || command.indexOf("routerConsole.html") > 0) { - reply(out, Router.getInstance().renderStatusHTML()); - } + _log.debug("Command [" + command + "]"); + if (command.indexOf("favicon") >= 0) { + reply(out, "this is not a website"); + } else if (command.indexOf("routerStats.html") >= 0) { + reply(out, _generator.generateStatsPage()); + } else if (command.indexOf("/profile/") >= 0) { + replyText(out, getProfile(command)); + } else if (true || command.indexOf("routerConsole.html") > 0) { + reply(out, _context.router().renderStatusHTML()); + } } private void reply(OutputStream out, String content) throws IOException { - StringBuffer reply = new StringBuffer(10240); - reply.append("HTTP/1.1 200 OK\n"); - reply.append("Connection: close\n"); - reply.append("Cache-control: no-cache\n"); - reply.append("Content-type: text/html\n\n"); - reply.append(content); - try { - out.write(reply.toString().getBytes()); - out.close(); - } catch (IOException ioe) { - if (_log.shouldLog(Log.WARN)) - _log.warn("Error writing out the admin reply:\n" + content); - throw ioe; - } + StringBuffer reply = new StringBuffer(10240); + reply.append("HTTP/1.1 200 OK\n"); + reply.append("Connection: close\n"); + reply.append("Cache-control: no-cache\n"); + reply.append("Content-type: text/html\n\n"); + reply.append(content); + try { + out.write(reply.toString().getBytes()); + out.close(); + } catch (IOException ioe) { + if (_log.shouldLog(Log.WARN)) + _log.warn("Error writing out the admin reply:\n" + content); + throw ioe; + } } private void replyText(OutputStream out, String content) throws IOException { - StringBuffer reply = new StringBuffer(10240); - reply.append("HTTP/1.1 200 OK\n"); - reply.append("Connection: close\n"); - reply.append("Cache-control: no-cache\n"); - reply.append("Content-type: text/plain\n\n"); - reply.append(content); - try { - out.write(reply.toString().getBytes()); - out.close(); - } catch (IOException ioe) { - if (_log.shouldLog(Log.WARN)) - _log.warn("Error writing out the admin reply:\n" + content); - throw ioe; - } + StringBuffer reply = new StringBuffer(10240); + reply.append("HTTP/1.1 200 OK\n"); + reply.append("Connection: close\n"); + reply.append("Cache-control: no-cache\n"); + reply.append("Content-type: text/plain\n\n"); + reply.append(content); + try { + out.write(reply.toString().getBytes()); + out.close(); + } catch (IOException ioe) { + if (_log.shouldLog(Log.WARN)) + _log.warn("Error writing out the admin reply:\n" + content); + throw ioe; + } } private String getProfile(String cmd) { - Set peers = ProfileOrganizer._getInstance().selectAllPeers(); - for (Iterator iter = peers.iterator(); iter.hasNext(); ) { - Hash peer = (Hash)iter.next(); - if (cmd.indexOf(peer.toBase64().substring(0,10)) >= 0) { - try { - ByteArrayOutputStream baos = new ByteArrayOutputStream(64*1024); - ProfileOrganizer._getInstance().exportProfile(peer, baos); - return new String(baos.toByteArray()); - } catch (IOException ioe) { - _log.error("Error exporting the profile", ioe); - return "Error exporting the peer profile\n"; - } - } - } - - return "No such peer is being profiled\n"; + Set peers = _context.profileOrganizer().selectAllPeers(); + for (Iterator iter = peers.iterator(); iter.hasNext(); ) { + Hash peer = (Hash)iter.next(); + if (cmd.indexOf(peer.toBase64().substring(0,10)) >= 0) { + try { + ByteArrayOutputStream baos = new ByteArrayOutputStream(64*1024); + _context.profileOrganizer().exportProfile(peer, baos); + return new String(baos.toByteArray()); + } catch (IOException ioe) { + _log.error("Error exporting the profile", ioe); + return "Error exporting the peer profile\n"; + } + } + } + + return "No such peer is being profiled\n"; } } diff --git a/router/java/src/net/i2p/router/admin/StatsGenerator.java b/router/java/src/net/i2p/router/admin/StatsGenerator.java index dfc5f4d56..55fb4d655 100644 --- a/router/java/src/net/i2p/router/admin/StatsGenerator.java +++ b/router/java/src/net/i2p/router/admin/StatsGenerator.java @@ -18,190 +18,196 @@ import net.i2p.stat.Rate; import net.i2p.stat.RateStat; import net.i2p.stat.StatManager; import net.i2p.util.Log; +import net.i2p.router.RouterContext; /** * Dump the stats to the web admin interface */ public class StatsGenerator { - private final static Log _log = new Log(StatsGenerator.class); - - public static String generateStatsPage() { - ByteArrayOutputStream baos = new ByteArrayOutputStream(32*1024); - try { - generateStatsPage(baos); - } catch (IOException ioe) { - _log.error("Error generating stats", ioe); - } - return new String(baos.toByteArray()); + private Log _log; + private RouterContext _context; + public StatsGenerator(RouterContext context) { + _context = context; + _log = context.logManager().getLog(StatsGenerator.class); } - public static void generateStatsPage(OutputStream out) throws IOException { - PrintWriter pw = new PrintWriter(out); - pw.println("I2P Router Stats"); - pw.println("

    Router statistics

    "); - pw.println("console | stats
    "); - Map groups = StatManager.getInstance().getStatsByGroup(); - - pw.println(""); - pw.println(""); - pw.println(""); - - pw.print("Statistics gathered during this router's uptime ("); - long uptime = Router.getInstance().getUptime(); - pw.print(DataHelper.formatDuration(uptime)); - pw.println("). The data gathered is quantized over a 1 minute period, so should just be used as an estimate

    "); - - for (Iterator iter = groups.keySet().iterator(); iter.hasNext(); ) { - String group = (String)iter.next(); - Set stats = (Set)groups.get(group); - pw.print("

    "); - pw.print(group); - pw.println("

    "); - pw.println("
      "); - for (Iterator statIter = stats.iterator(); statIter.hasNext(); ) { - String stat = (String)statIter.next(); - pw.print("
    • "); - pw.print(stat); - pw.println("
      "); - if (StatManager.getInstance().isFrequency(stat)) - renderFrequency(stat, pw); - else - renderRate(stat, pw); - } - pw.println("

    "); - } - pw.println(""); - pw.flush(); + public String generateStatsPage() { + ByteArrayOutputStream baos = new ByteArrayOutputStream(32*1024); + try { + generateStatsPage(baos); + } catch (IOException ioe) { + _log.error("Error generating stats", ioe); + } + return new String(baos.toByteArray()); } - private static void renderFrequency(String name, PrintWriter pw) throws IOException { - FrequencyStat freq = StatManager.getInstance().getFrequency(name); - pw.print(""); - pw.print(freq.getDescription()); - pw.println("
    "); - long periods[] = freq.getPeriods(); - Arrays.sort(periods); - for (int i = 0; i < periods.length; i++) { - renderPeriod(pw, periods[i], "frequency"); - Frequency curFreq = freq.getFrequency(periods[i]); - pw.print(" avg per period: ("); - pw.print(num(curFreq.getAverageEventsPerPeriod())); - pw.print(", max "); - pw.print(num(curFreq.getMaxAverageEventsPerPeriod())); - if ( (curFreq.getMaxAverageEventsPerPeriod() > 0) && (curFreq.getAverageEventsPerPeriod() > 0) ) { - pw.print(", current is "); - pw.print(pct(curFreq.getAverageEventsPerPeriod()/curFreq.getMaxAverageEventsPerPeriod())); - pw.print(" of max"); - } - pw.print(")"); - //buf.append(" avg interval between updates: (").append(num(curFreq.getAverageInterval())).append("ms, min "); - //buf.append(num(curFreq.getMinAverageInterval())).append("ms)"); - pw.print(" strict average per period: "); - pw.print(num(curFreq.getStrictAverageEventsPerPeriod())); - pw.print(" events (averaged "); - pw.print(" using the lifetime of "); - pw.print(num(curFreq.getEventCount())); - pw.print(" events)"); - pw.println("
    "); - } - pw.println("
    "); + public void generateStatsPage(OutputStream out) throws IOException { + PrintWriter pw = new PrintWriter(out); + pw.println("I2P Router Stats"); + pw.println("

    Router statistics

    "); + pw.println("console | stats
    "); + Map groups = _context.statManager().getStatsByGroup(); + + pw.println(""); + pw.println(""); + pw.println(""); + + pw.print("Statistics gathered during this router's uptime ("); + long uptime = _context.router().getUptime(); + pw.print(DataHelper.formatDuration(uptime)); + pw.println("). The data gathered is quantized over a 1 minute period, so should just be used as an estimate

    "); + + for (Iterator iter = groups.keySet().iterator(); iter.hasNext(); ) { + String group = (String)iter.next(); + Set stats = (Set)groups.get(group); + pw.print("

    "); + pw.print(group); + pw.println("

    "); + pw.println("
      "); + for (Iterator statIter = stats.iterator(); statIter.hasNext(); ) { + String stat = (String)statIter.next(); + pw.print("
    • "); + pw.print(stat); + pw.println("
      "); + if (_context.statManager().isFrequency(stat)) + renderFrequency(stat, pw); + else + renderRate(stat, pw); + } + pw.println("

    "); + } + pw.println(""); + pw.flush(); } - private static void renderRate(String name, PrintWriter pw) throws IOException { - RateStat rate = StatManager.getInstance().getRate(name); - pw.print(""); - pw.print(rate.getDescription()); - pw.println("
    "); - long periods[] = rate.getPeriods(); - Arrays.sort(periods); - pw.println("
      "); - for (int i = 0; i < periods.length; i++) { - pw.println("
    • "); - renderPeriod(pw, periods[i], "rate"); - Rate curRate = rate.getRate(periods[i]); - pw.print( "avg value: ("); - pw.print(num(curRate.getAverageValue())); - pw.print(" peak "); - pw.print(num(curRate.getExtremeAverageValue())); - pw.print(", ["); - pw.print(pct(curRate.getPercentageOfExtremeValue())); - pw.print(" of max"); - pw.print(", and "); - pw.print(pct(curRate.getPercentageOfLifetimeValue())); - pw.print(" of lifetime average]"); - - pw.print(")"); - pw.print(" highest total period value: ("); - pw.print(num(curRate.getExtremeTotalValue())); - pw.print(")"); - if (curRate.getLifetimeTotalEventTime() > 0) { - pw.print(" saturation: ("); - pw.print(pct(curRate.getLastEventSaturation())); - pw.print(")"); - pw.print(" saturated limit: ("); - pw.print(num(curRate.getLastSaturationLimit())); - pw.print(")"); - pw.print(" peak saturation: ("); - pw.print(pct(curRate.getExtremeEventSaturation())); - pw.print(")"); - pw.print(" peak saturated limit: ("); - pw.print(num(curRate.getExtremeSaturationLimit())); - pw.print(")"); - } - pw.print(" events per period: "); - pw.print(num(curRate.getLastEventCount())); - long numPeriods = curRate.getLifetimePeriods(); - if (numPeriods > 0) { - double avgFrequency = curRate.getLifetimeEventCount() / (double)numPeriods; - double peakFrequency = curRate.getExtremeEventCount(); - pw.print(" (lifetime average: "); - pw.print(num(avgFrequency)); - pw.print(", peak average: "); - pw.print(num(curRate.getExtremeEventCount())); - pw.println(")"); - } - pw.print("
    • "); - if (i + 1 == periods.length) { - // last one, so lets display the strict average - pw.print("
    • lifetime average value: "); - pw.print(num(curRate.getLifetimeAverageValue())); - pw.print(" over "); - pw.print(num(curRate.getLifetimeEventCount())); - pw.println(" events
    • "); - } - } - pw.print("
    "); - pw.println("
    "); + private void renderFrequency(String name, PrintWriter pw) throws IOException { + FrequencyStat freq = _context.statManager().getFrequency(name); + pw.print(""); + pw.print(freq.getDescription()); + pw.println("
    "); + long periods[] = freq.getPeriods(); + Arrays.sort(periods); + for (int i = 0; i < periods.length; i++) { + renderPeriod(pw, periods[i], "frequency"); + Frequency curFreq = freq.getFrequency(periods[i]); + pw.print(" avg per period: ("); + pw.print(num(curFreq.getAverageEventsPerPeriod())); + pw.print(", max "); + pw.print(num(curFreq.getMaxAverageEventsPerPeriod())); + if ( (curFreq.getMaxAverageEventsPerPeriod() > 0) && (curFreq.getAverageEventsPerPeriod() > 0) ) { + pw.print(", current is "); + pw.print(pct(curFreq.getAverageEventsPerPeriod()/curFreq.getMaxAverageEventsPerPeriod())); + pw.print(" of max"); + } + pw.print(")"); + //buf.append(" avg interval between updates: (").append(num(curFreq.getAverageInterval())).append("ms, min "); + //buf.append(num(curFreq.getMinAverageInterval())).append("ms)"); + pw.print(" strict average per period: "); + pw.print(num(curFreq.getStrictAverageEventsPerPeriod())); + pw.print(" events (averaged "); + pw.print(" using the lifetime of "); + pw.print(num(curFreq.getEventCount())); + pw.print(" events)"); + pw.println("
    "); + } + pw.println("
    "); + } + + private void renderRate(String name, PrintWriter pw) throws IOException { + RateStat rate = _context.statManager().getRate(name); + pw.print(""); + pw.print(rate.getDescription()); + pw.println("
    "); + long periods[] = rate.getPeriods(); + Arrays.sort(periods); + pw.println("
      "); + for (int i = 0; i < periods.length; i++) { + pw.println("
    • "); + renderPeriod(pw, periods[i], "rate"); + Rate curRate = rate.getRate(periods[i]); + pw.print( "avg value: ("); + pw.print(num(curRate.getAverageValue())); + pw.print(" peak "); + pw.print(num(curRate.getExtremeAverageValue())); + pw.print(", ["); + pw.print(pct(curRate.getPercentageOfExtremeValue())); + pw.print(" of max"); + pw.print(", and "); + pw.print(pct(curRate.getPercentageOfLifetimeValue())); + pw.print(" of lifetime average]"); + + pw.print(")"); + pw.print(" highest total period value: ("); + pw.print(num(curRate.getExtremeTotalValue())); + pw.print(")"); + if (curRate.getLifetimeTotalEventTime() > 0) { + pw.print(" saturation: ("); + pw.print(pct(curRate.getLastEventSaturation())); + pw.print(")"); + pw.print(" saturated limit: ("); + pw.print(num(curRate.getLastSaturationLimit())); + pw.print(")"); + pw.print(" peak saturation: ("); + pw.print(pct(curRate.getExtremeEventSaturation())); + pw.print(")"); + pw.print(" peak saturated limit: ("); + pw.print(num(curRate.getExtremeSaturationLimit())); + pw.print(")"); + } + pw.print(" events per period: "); + pw.print(num(curRate.getLastEventCount())); + long numPeriods = curRate.getLifetimePeriods(); + if (numPeriods > 0) { + double avgFrequency = curRate.getLifetimeEventCount() / (double)numPeriods; + double peakFrequency = curRate.getExtremeEventCount(); + pw.print(" (lifetime average: "); + pw.print(num(avgFrequency)); + pw.print(", peak average: "); + pw.print(num(curRate.getExtremeEventCount())); + pw.println(")"); + } + pw.print("
    • "); + if (i + 1 == periods.length) { + // last one, so lets display the strict average + pw.print("
    • lifetime average value: "); + pw.print(num(curRate.getLifetimeAverageValue())); + pw.print(" over "); + pw.print(num(curRate.getLifetimeEventCount())); + pw.println(" events
    • "); + } + } + pw.print("
    "); + pw.println("
    "); } private static void renderPeriod(PrintWriter pw, long period, String name) throws IOException { - pw.print(""); - pw.print(DataHelper.formatDuration(period)); - pw.print(" "); - pw.print(name); - pw.print(": "); + pw.print(""); + pw.print(DataHelper.formatDuration(period)); + pw.print(" "); + pw.print(name); + pw.print(": "); } private final static DecimalFormat _fmt = new DecimalFormat("###,##0.00"); diff --git a/router/java/src/net/i2p/router/client/ClientConnectionRunner.java b/router/java/src/net/i2p/router/client/ClientConnectionRunner.java index 478c92482..659c13864 100644 --- a/router/java/src/net/i2p/router/client/ClientConnectionRunner.java +++ b/router/java/src/net/i2p/router/client/ClientConnectionRunner.java @@ -34,6 +34,7 @@ import net.i2p.router.Job; import net.i2p.router.JobImpl; import net.i2p.router.JobQueue; import net.i2p.router.NetworkDatabaseFacade; +import net.i2p.router.RouterContext; import net.i2p.util.Clock; import net.i2p.util.Log; import net.i2p.util.RandomSource; @@ -44,7 +45,8 @@ import net.i2p.util.RandomSource; * @author jrandom */ public class ClientConnectionRunner { - private final static Log _log = new Log(ClientConnectionRunner.class); + private Log _log; + private RouterContext _context; private ClientManager _manager; /** socket for this particular peer connection */ private Socket _socket; @@ -76,14 +78,16 @@ public class ClientConnectionRunner { * Create a new runner against the given socket * */ - public ClientConnectionRunner(ClientManager manager, Socket socket) { - _manager = manager; - _socket = socket; - _config = null; - _messages = new HashMap(); - _alreadyProcessed = new LinkedList(); - _acceptedPending = new HashSet(); - _dead = false; + public ClientConnectionRunner(RouterContext context, ClientManager manager, Socket socket) { + _context = context; + _log = _context.logManager().getLog(ClientConnectionRunner.class); + _manager = manager; + _socket = socket; + _config = null; + _messages = new HashMap(); + _alreadyProcessed = new LinkedList(); + _acceptedPending = new HashSet(); + _dead = false; } /** @@ -93,35 +97,37 @@ public class ClientConnectionRunner { * */ public void startRunning() { - try { - _reader = new I2CPMessageReader(_socket.getInputStream(), new ClientMessageEventListener(this)); - _out = _socket.getOutputStream(); - _reader.startReading(); - } catch (IOException ioe) { - _log.error("Error starting up the runner", ioe); - } + try { + _reader = new I2CPMessageReader(_socket.getInputStream(), new ClientMessageEventListener(_context, this)); + _out = _socket.getOutputStream(); + _reader.startReading(); + } catch (IOException ioe) { + _log.error("Error starting up the runner", ioe); + } } /** die a horrible death */ void stopRunning() { - if (_dead) return; - _log.error("Stop the I2CP connection! current leaseSet: " + _currentLeaseSet, new Exception("Stop client connection")); - _dead = true; - // we need these keys to unpublish the leaseSet - if (_reader != null) _reader.stopReading(); - if (_socket != null) try { _socket.close(); } catch (IOException ioe) { } - synchronized (_messages) { - _messages.clear(); - } - _manager.unregisterConnection(this); - if (_currentLeaseSet != null) - NetworkDatabaseFacade.getInstance().unpublish(_currentLeaseSet); - _leaseRequest = null; - synchronized (_alreadyProcessed) { - _alreadyProcessed.clear(); - } - _config = null; - _manager = null; + if (_dead) return; + _log.error("Stop the I2CP connection! current leaseSet: " + + _currentLeaseSet, new Exception("Stop client connection")); + _dead = true; + // we need these keys to unpublish the leaseSet + if (_reader != null) _reader.stopReading(); + if (_socket != null) try { _socket.close(); } catch (IOException ioe) { } + synchronized (_messages) { + _messages.clear(); + } + _manager.unregisterConnection(this); + if (_currentLeaseSet != null) + _context.netDb().unpublish(_currentLeaseSet); + _leaseRequest = null; + synchronized (_alreadyProcessed) { + _alreadyProcessed.clear(); + } + _config = null; + _manager = null; + _context = null; } /** current client's config */ @@ -144,43 +150,43 @@ public class ClientConnectionRunner { void removePayload(MessageId id) { synchronized (_messages) { _messages.remove(id); } } void sessionEstablished(SessionConfig config) { - _config = config; - _manager.destinationEstablished(this); + _config = config; + _manager.destinationEstablished(this); } void updateMessageDeliveryStatus(MessageId id, boolean delivered) { - if (_dead) return; - JobQueue.getInstance().addJob(new MessageDeliveryStatusUpdate(id, delivered)); + if (_dead) return; + _context.jobQueue().addJob(new MessageDeliveryStatusUpdate(id, delivered)); } /** * called after a new leaseSet is granted by the client, the NetworkDb has been * updated. This takes care of all the LeaseRequestState stuff (including firing any jobs) */ void leaseSetCreated(LeaseSet ls) { - if (_leaseRequest == null) { - _log.error("LeaseRequest is null and we've received a new lease?! WTF"); - return; - } else { - _leaseRequest.setIsSuccessful(true); - if (_leaseRequest.getOnGranted() != null) - JobQueue.getInstance().addJob(_leaseRequest.getOnGranted()); - _leaseRequest = null; - _currentLeaseSet = ls; - } + if (_leaseRequest == null) { + _log.error("LeaseRequest is null and we've received a new lease?! WTF"); + return; + } else { + _leaseRequest.setIsSuccessful(true); + if (_leaseRequest.getOnGranted() != null) + _context.jobQueue().addJob(_leaseRequest.getOnGranted()); + _leaseRequest = null; + _currentLeaseSet = ls; + } } void disconnectClient(String reason) { - _log.error("Disconnecting the client: " + reason, new Exception("Disconnecting!")); - DisconnectMessage msg = new DisconnectMessage(); - msg.setReason(reason); - try { - doSend(msg); - } catch (I2CPMessageException ime) { - _log.error("Error writing out the disconnect message", ime); - } catch (IOException ioe) { - _log.error("Error writing out the disconnect message", ioe); - } - stopRunning(); + _log.error("Disconnecting the client: " + reason, new Exception("Disconnecting!")); + DisconnectMessage msg = new DisconnectMessage(); + msg.setReason(reason); + try { + doSend(msg); + } catch (I2CPMessageException ime) { + _log.error("Error writing out the disconnect message", ime); + } catch (IOException ioe) { + _log.error("Error writing out the disconnect message", ioe); + } + stopRunning(); } /** @@ -190,17 +196,20 @@ public class ClientConnectionRunner { * */ MessageId distributeMessage(SendMessageMessage message) { - Payload payload = message.getPayload(); - Destination dest = message.getDestination(); - MessageId id = new MessageId(); - id.setMessageId(getNextMessageId()); - synchronized (_acceptedPending) { - _acceptedPending.add(id); - } - _log.debug("** Recieving message [" + id.getMessageId() + "] with payload of size [" + payload.getSize() + "]" + " for session [" + _sessionId.getSessionId() + "]"); - // the following blocks as described above - _manager.distributeMessage(_config.getDestination(), message.getDestination(), message.getPayload(), id); - return id; + Payload payload = message.getPayload(); + Destination dest = message.getDestination(); + MessageId id = new MessageId(); + id.setMessageId(getNextMessageId()); + synchronized (_acceptedPending) { + _acceptedPending.add(id); + } + if (_log.shouldLog(Log.DEBUG)) + _log.debug("** Recieving message [" + id.getMessageId() + "] with payload of size [" + + payload.getSize() + "]" + " for session [" + _sessionId.getSessionId() + + "]"); + // the following blocks as described above + _manager.distributeMessage(_config.getDestination(), message.getDestination(), message.getPayload(), id); + return id; } /** @@ -209,23 +218,25 @@ public class ClientConnectionRunner { * */ void ackSendMessage(MessageId id, long nonce) { - _log.debug("Acking message send [accepted]" + id + " / " + nonce + " for sessionId " + _sessionId, new Exception("sendAccepted")); - MessageStatusMessage status = new MessageStatusMessage(); - status.setMessageId(id); - status.setSessionId(_sessionId); - status.setSize(0L); - status.setNonce(nonce); - status.setStatus(MessageStatusMessage.STATUS_SEND_ACCEPTED); - try { - doSend(status); - synchronized (_acceptedPending) { - _acceptedPending.remove(id); - } - } catch (I2CPMessageException ime) { - _log.error("Error writing out the message status message", ime); - } catch (IOException ioe) { - _log.error("Error writing out the message status message", ioe); - } + if (_log.shouldLog(Log.DEBUG)) + _log.debug("Acking message send [accepted]" + id + " / " + nonce + " for sessionId " + + _sessionId, new Exception("sendAccepted")); + MessageStatusMessage status = new MessageStatusMessage(); + status.setMessageId(id); + status.setSessionId(_sessionId); + status.setSize(0L); + status.setNonce(nonce); + status.setStatus(MessageStatusMessage.STATUS_SEND_ACCEPTED); + try { + doSend(status); + synchronized (_acceptedPending) { + _acceptedPending.remove(id); + } + } catch (I2CPMessageException ime) { + _log.error("Error writing out the message status message", ime); + } catch (IOException ioe) { + _log.error("Error writing out the message status message", ioe); + } } /** @@ -233,8 +244,8 @@ public class ClientConnectionRunner { * */ void receiveMessage(Destination toDest, Destination fromDest, Payload payload) { - if (_dead) return; - JobQueue.getInstance().addJob(new MessageReceivedJob(this, toDest, fromDest, payload)); + if (_dead) return; + _context.jobQueue().addJob(new MessageReceivedJob(_context, this, toDest, fromDest, payload)); } /** @@ -242,8 +253,8 @@ public class ClientConnectionRunner { * */ public void reportAbuse(String reason, int severity) { - if (_dead) return; - JobQueue.getInstance().addJob(new ReportAbuseJob(this, reason, severity)); + if (_dead) return; + _context.jobQueue().addJob(new ReportAbuseJob(_context, this, reason, severity)); } /** @@ -259,13 +270,13 @@ public class ClientConnectionRunner { * @param onFailedJob Job to run after the timeout passes without receiving authorization */ void requestLeaseSet(LeaseSet set, long expirationTime, Job onCreateJob, Job onFailedJob) { - if (_dead) return; - JobQueue.getInstance().addJob(new RequestLeaseSetJob(this, set, expirationTime, onCreateJob, onFailedJob)); + if (_dead) return; + _context.jobQueue().addJob(new RequestLeaseSetJob(_context, this, set, expirationTime, onCreateJob, onFailedJob)); } void disconnected() { - _log.error("Disconnected", new Exception("Disconnected?")); - stopRunning(); + _log.error("Disconnected", new Exception("Disconnected?")); + stopRunning(); } //// @@ -276,29 +287,30 @@ public class ClientConnectionRunner { * */ void doSend(I2CPMessage msg) throws I2CPMessageException, IOException { - if (_out == null) throw new I2CPMessageException("Output stream is not initialized"); - long before = Clock.getInstance().now(); - try { - synchronized (_out) { - msg.writeMessage(_out); - _out.flush(); - } - } catch (I2CPMessageException ime) { - _log.error("Message exception sending I2CP message", ime); - throw ime; - } catch (IOException ioe) { - _log.error("IO exception sending I2CP message", ioe); - throw ioe; - } catch (Throwable t) { - _log.log(Log.CRIT, "Unhandled exception sending I2CP message", t); - throw new IOException("Unhandled exception sending I2CP message: " + t.getMessage()); - } finally { - long after = Clock.getInstance().now(); - long lag = after - before; - if (lag > 300) { - _log.error("synchronization on the i2cp message send took too long (" + lag + "ms): " + msg, new Exception("I2CP Lag")); - } - } + if (_out == null) throw new I2CPMessageException("Output stream is not initialized"); + long before = _context.clock().now(); + try { + synchronized (_out) { + msg.writeMessage(_out); + _out.flush(); + } + } catch (I2CPMessageException ime) { + _log.error("Message exception sending I2CP message", ime); + throw ime; + } catch (IOException ioe) { + _log.error("IO exception sending I2CP message", ioe); + throw ioe; + } catch (Throwable t) { + _log.log(Log.CRIT, "Unhandled exception sending I2CP message", t); + throw new IOException("Unhandled exception sending I2CP message: " + t.getMessage()); + } finally { + long after = _context.clock().now(); + long lag = after - before; + if (lag > 300) { + _log.error("synchronization on the i2cp message send took too long (" + lag + + "ms): " + msg, new Exception("I2CP Lag")); + } + } } // this *should* be mod 65536, but UnsignedInteger is still b0rked. FIXME @@ -307,12 +319,12 @@ public class ClientConnectionRunner { private static Object _messageIdLock = new Object(); static int getNextMessageId() { - synchronized (_messageIdLock) { - int messageId = (++_messageId)%MAX_MESSAGE_ID; - if (_messageId >= MAX_MESSAGE_ID) - _messageId = 0; - return messageId; - } + synchronized (_messageIdLock) { + int messageId = (++_messageId)%MAX_MESSAGE_ID; + if (_messageId >= MAX_MESSAGE_ID) + _messageId = 0; + return messageId; + } } /** @@ -321,20 +333,20 @@ public class ClientConnectionRunner { * */ private boolean alreadyAccepted(MessageId id) { - if (_dead) return false; - boolean isPending = false; - int pending = 0; - String buf = null; - synchronized (_acceptedPending) { - if (_acceptedPending.contains(id)) - isPending = true; - pending = _acceptedPending.size(); - buf = _acceptedPending.toString(); - } - if (pending >= 1) { - _log.warn("Pending acks: " + pending + ": " + buf); - } - return !isPending; + if (_dead) return false; + boolean isPending = false; + int pending = 0; + String buf = null; + synchronized (_acceptedPending) { + if (_acceptedPending.contains(id)) + isPending = true; + pending = _acceptedPending.size(); + buf = _acceptedPending.toString(); + } + if (pending >= 1) { + _log.warn("Pending acks: " + pending + ": " + buf); + } + return !isPending; } /** @@ -346,59 +358,73 @@ public class ClientConnectionRunner { private final static long REQUEUE_DELAY = 500; private class MessageDeliveryStatusUpdate extends JobImpl { - private MessageId _messageId; - private boolean _success; - private long _lastTried; - public MessageDeliveryStatusUpdate(MessageId id, boolean success) { - _messageId = id; - _success = success; - _lastTried = 0; - } - - public String getName() { return "Update Delivery Status"; } - public void runJob() { - if (_dead) return; - - MessageStatusMessage msg = new MessageStatusMessage(); - msg.setMessageId(_messageId); - msg.setSessionId(_sessionId); - msg.setNonce(2); - msg.setSize(0); - if (_success) - msg.setStatus(MessageStatusMessage.STATUS_SEND_GUARANTEED_SUCCESS); - else - msg.setStatus(MessageStatusMessage.STATUS_SEND_GUARANTEED_FAILURE); + private MessageId _messageId; + private boolean _success; + private long _lastTried; + public MessageDeliveryStatusUpdate(MessageId id, boolean success) { + super(ClientConnectionRunner.this._context); + _messageId = id; + _success = success; + _lastTried = 0; + } - if (!alreadyAccepted(_messageId)) { - _log.warn("Almost send an update for message " + _messageId + " to " + MessageStatusMessage.getStatusString(msg.getStatus()) + " for session [" + _sessionId.getSessionId() + "] before they knew the messageId! delaying .5s"); - _lastTried = Clock.getInstance().now(); - requeue(REQUEUE_DELAY); - return; - } + public String getName() { return "Update Delivery Status"; } + public void runJob() { + if (_dead) return; - synchronized (_alreadyProcessed) { - if (_alreadyProcessed.contains(_messageId)) { - _log.warn("Status already updated"); - return; - } else { - _alreadyProcessed.add(_messageId); - while (_alreadyProcessed.size() > 10) - _alreadyProcessed.remove(0); - } - } - - if (_lastTried > 0) - _log.info("Updating message status for message " + _messageId + " to " + MessageStatusMessage.getStatusString(msg.getStatus()) + " for session [" + _sessionId.getSessionId() + "] (with nonce=2), retrying after [" + (Clock.getInstance().now() - _lastTried) + "]", getAddedBy()); - else - _log.debug("Updating message status for message " + _messageId + " to " + MessageStatusMessage.getStatusString(msg.getStatus()) + " for session [" + _sessionId.getSessionId() + "] (with nonce=2)"); + MessageStatusMessage msg = new MessageStatusMessage(); + msg.setMessageId(_messageId); + msg.setSessionId(_sessionId); + msg.setNonce(2); + msg.setSize(0); + if (_success) + msg.setStatus(MessageStatusMessage.STATUS_SEND_GUARANTEED_SUCCESS); + else + msg.setStatus(MessageStatusMessage.STATUS_SEND_GUARANTEED_FAILURE); - try { - doSend(msg); - } catch (I2CPMessageException ime) { - _log.warn("Error updating the status for message ID " + _messageId, ime); - } catch (IOException ioe) { - _log.warn("Error updating the status for message ID " + _messageId, ioe); - } - } + if (!alreadyAccepted(_messageId)) { + _log.warn("Almost send an update for message " + _messageId + " to " + + MessageStatusMessage.getStatusString(msg.getStatus()) + + " for session [" + _sessionId.getSessionId() + + "] before they knew the messageId! delaying .5s"); + _lastTried = ClientConnectionRunner.this._context.clock().now(); + requeue(REQUEUE_DELAY); + return; + } + + synchronized (_alreadyProcessed) { + if (_alreadyProcessed.contains(_messageId)) { + _log.warn("Status already updated"); + return; + } else { + _alreadyProcessed.add(_messageId); + while (_alreadyProcessed.size() > 10) + _alreadyProcessed.remove(0); + } + } + + if (_lastTried > 0) { + if (_log.shouldLog(Log.DEBUG)) + _log.info("Updating message status for message " + _messageId + " to " + + MessageStatusMessage.getStatusString(msg.getStatus()) + + " for session [" + _sessionId.getSessionId() + + "] (with nonce=2), retrying after [" + + (ClientConnectionRunner.this._context.clock().now() - _lastTried) + + "]", getAddedBy()); + } else { + if (_log.shouldLog(Log.DEBUG)) + _log.debug("Updating message status for message " + _messageId + " to " + + MessageStatusMessage.getStatusString(msg.getStatus()) + + " for session [" + _sessionId.getSessionId() + "] (with nonce=2)"); + } + + try { + doSend(msg); + } catch (I2CPMessageException ime) { + _log.warn("Error updating the status for message ID " + _messageId, ime); + } catch (IOException ioe) { + _log.warn("Error updating the status for message ID " + _messageId, ioe); + } + } } } diff --git a/router/java/src/net/i2p/router/client/ClientListenerRunner.java b/router/java/src/net/i2p/router/client/ClientListenerRunner.java index 664f17b8b..3db930154 100644 --- a/router/java/src/net/i2p/router/client/ClientListenerRunner.java +++ b/router/java/src/net/i2p/router/client/ClientListenerRunner.java @@ -14,6 +14,7 @@ import java.net.Socket; import net.i2p.client.I2PClient; import net.i2p.util.Log; +import net.i2p.router.RouterContext; /** * Listen for connections on the specified port, and toss them onto the client manager's @@ -22,17 +23,20 @@ import net.i2p.util.Log; * @author jrandom */ public class ClientListenerRunner implements Runnable { - private final static Log _log = new Log(ClientListenerRunner.class); + private Log _log; + private RouterContext _context; private ClientManager _manager; private ServerSocket _socket; private int _port; private boolean _running; private long _nextFailDelay = 1000; - public ClientListenerRunner(ClientManager manager, int port) { - _manager = manager; - _port = port; - _running = false; + public ClientListenerRunner(RouterContext context, ClientManager manager, int port) { + _context = context; + _log = _context.logManager().getLog(ClientListenerRunner.class); + _manager = manager; + _port = port; + _running = false; } public void setPort(int port) { _port = port; } @@ -49,83 +53,83 @@ public class ClientListenerRunner implements Runnable { * */ public void runServer() { - _running = true; - int curDelay = 0; - while ( (_running) && (curDelay < MAX_FAIL_DELAY) ) { - try { - _log.info("Starting up listening for connections on port " + _port); - _socket = new ServerSocket(_port); - curDelay = 0; - while (_running) { - try { - Socket socket = _socket.accept(); - if (validate(socket)) { - if (_log.shouldLog(Log.DEBUG)) - _log.debug("Connection received"); - runConnection(socket); - } else { - socket.close(); - if (_log.shouldLog(Log.WARN)) - _log.warn("Refused connection from " + socket.getInetAddress().toString()); - } - } catch (IOException ioe) { - _log.error("Server error accepting", ioe); - } catch (Throwable t) { - _log.error("Fatal error running client listener - killing the thread!", t); - return; - } - } - } catch (IOException ioe) { - _log.error("Error listening on port " + _port, ioe); - } - - if (_socket != null) { - try { _socket.close(); } catch (IOException ioe) {} - _socket = null; - } - - _log.error("Error listening, waiting " + _nextFailDelay + "ms before we try again"); - try { Thread.sleep(_nextFailDelay); } catch (InterruptedException ie) {} - curDelay += _nextFailDelay; - _nextFailDelay *= 5; - } - - _log.error("CANCELING I2CP LISTEN. delay = " + curDelay, new Exception("I2CP Listen cancelled!!!")); - _running = false; + _running = true; + int curDelay = 0; + while ( (_running) && (curDelay < MAX_FAIL_DELAY) ) { + try { + _log.info("Starting up listening for connections on port " + _port); + _socket = new ServerSocket(_port); + curDelay = 0; + while (_running) { + try { + Socket socket = _socket.accept(); + if (validate(socket)) { + if (_log.shouldLog(Log.DEBUG)) + _log.debug("Connection received"); + runConnection(socket); + } else { + socket.close(); + if (_log.shouldLog(Log.WARN)) + _log.warn("Refused connection from " + socket.getInetAddress()); + } + } catch (IOException ioe) { + _log.error("Server error accepting", ioe); + } catch (Throwable t) { + _log.error("Fatal error running client listener - killing the thread!", t); + return; + } + } + } catch (IOException ioe) { + _log.error("Error listening on port " + _port, ioe); + } + + if (_socket != null) { + try { _socket.close(); } catch (IOException ioe) {} + _socket = null; + } + + _log.error("Error listening, waiting " + _nextFailDelay + "ms before we try again"); + try { Thread.sleep(_nextFailDelay); } catch (InterruptedException ie) {} + curDelay += _nextFailDelay; + _nextFailDelay *= 5; + } + + _log.error("CANCELING I2CP LISTEN. delay = " + curDelay, new Exception("I2CP Listen cancelled!!!")); + _running = false; } /** give the i2cp client 5 seconds to show that they're really i2cp clients */ private final static int CONNECT_TIMEOUT = 5*1000; private boolean validate(Socket socket) { - try { - socket.setSoTimeout(CONNECT_TIMEOUT); - int read = socket.getInputStream().read(); - if (read != I2PClient.PROTOCOL_BYTE) - return false; - socket.setSoTimeout(0); - return true; - } catch (IOException ioe) { - if (_log.shouldLog(Log.WARN)) - _log.warn("Peer did not authenticate themselves as I2CP quickly enough, dropping"); - return false; - } + try { + socket.setSoTimeout(CONNECT_TIMEOUT); + int read = socket.getInputStream().read(); + if (read != I2PClient.PROTOCOL_BYTE) + return false; + socket.setSoTimeout(0); + return true; + } catch (IOException ioe) { + if (_log.shouldLog(Log.WARN)) + _log.warn("Peer did not authenticate themselves as I2CP quickly enough, dropping"); + return false; + } } /** * Handle the connection by passing it off to a {@link ClientConnectionRunner ClientConnectionRunner} * */ protected void runConnection(Socket socket) throws IOException { - ClientConnectionRunner runner = new ClientConnectionRunner(_manager, socket); - _manager.registerConnection(runner); + ClientConnectionRunner runner = new ClientConnectionRunner(_context, _manager, socket); + _manager.registerConnection(runner); } public void stopListening() { - _running = false; - if (_socket != null) try { - _socket.close(); - _socket = null; - } catch (IOException ioe) {} + _running = false; + if (_socket != null) try { + _socket.close(); + _socket = null; + } catch (IOException ioe) {} } public void run() { runServer(); } } diff --git a/router/java/src/net/i2p/router/client/ClientManager.java b/router/java/src/net/i2p/router/client/ClientManager.java index da3f731d3..6c478cb0b 100644 --- a/router/java/src/net/i2p/router/client/ClientManager.java +++ b/router/java/src/net/i2p/router/client/ClientManager.java @@ -29,6 +29,7 @@ import net.i2p.stat.StatManager; import net.i2p.util.Clock; import net.i2p.util.I2PThread; import net.i2p.util.Log; +import net.i2p.router.RouterContext; /** * Coordinate connections and various tasks @@ -36,108 +37,109 @@ import net.i2p.util.Log; * @author jrandom */ public class ClientManager { - private final static Log _log = new Log(ClientManager.class); + private Log _log; private ClientListenerRunner _listener; private HashMap _runners; // Destination --> ClientConnectionRunner private Set _pendingRunners; // ClientConnectionRunner for clients w/out a Dest yet + private RouterContext _context; /** ms to wait before rechecking for inbound messages to deliver to clients */ private final static int INBOUND_POLL_INTERVAL = 300; - static { - StatManager.getInstance().createRateStat("client.receiveMessageSize", "How large are messages received by the client?", "Client Messages", new long[] { 60*1000l, 60*60*1000l, 24*60*60*1000l }); - } - - public ClientManager(int port) { - _runners = new HashMap(); - _pendingRunners = new HashSet(); - _listener = new ClientListenerRunner(this, port); - Thread t = new I2PThread(_listener); - t.setName("ClientListener"); - t.setDaemon(true); - t.start(); - - //JobQueue.getInstance().addJob(new CheckInboundMessagesJob()); + public ClientManager(RouterContext context, int port) { + _context = context; + _log = context.logManager().getLog(ClientManager.class); + _context.statManager().createRateStat("client.receiveMessageSize", + "How large are messages received by the client?", + "Client Messages", + new long[] { 60*1000l, 60*60*1000l, 24*60*60*1000l }); + _runners = new HashMap(); + _pendingRunners = new HashSet(); + _listener = new ClientListenerRunner(_context, this, port); + Thread t = new I2PThread(_listener); + t.setName("ClientListener"); + t.setDaemon(true); + t.start(); } public void shutdown() { - _log.info("Shutting down the ClientManager"); - _listener.stopListening(); - Set runners = new HashSet(); - synchronized (_runners) { - for (Iterator iter = _runners.values().iterator(); iter.hasNext();) { - ClientConnectionRunner runner = (ClientConnectionRunner)iter.next(); - runners.add(runner); - } - } - synchronized (_pendingRunners) { - for (Iterator iter = _pendingRunners.iterator(); iter.hasNext();) { - ClientConnectionRunner runner = (ClientConnectionRunner)iter.next(); - runners.add(runner); - } - } - for (Iterator iter = runners.iterator(); iter.hasNext(); ) { - ClientConnectionRunner runner = (ClientConnectionRunner)iter.next(); - runner.stopRunning(); - } + _log.info("Shutting down the ClientManager"); + _listener.stopListening(); + Set runners = new HashSet(); + synchronized (_runners) { + for (Iterator iter = _runners.values().iterator(); iter.hasNext();) { + ClientConnectionRunner runner = (ClientConnectionRunner)iter.next(); + runners.add(runner); + } + } + synchronized (_pendingRunners) { + for (Iterator iter = _pendingRunners.iterator(); iter.hasNext();) { + ClientConnectionRunner runner = (ClientConnectionRunner)iter.next(); + runners.add(runner); + } + } + for (Iterator iter = runners.iterator(); iter.hasNext(); ) { + ClientConnectionRunner runner = (ClientConnectionRunner)iter.next(); + runner.stopRunning(); + } } public void registerConnection(ClientConnectionRunner runner) { - synchronized (_pendingRunners) { - _pendingRunners.add(runner); - } - runner.startRunning(); + synchronized (_pendingRunners) { + _pendingRunners.add(runner); + } + runner.startRunning(); } public void unregisterConnection(ClientConnectionRunner runner) { - _log.warn("Unregistering (dropping) a client connection"); - synchronized (_pendingRunners) { - _pendingRunners.remove(runner); - } - if ( (runner.getConfig() != null) && (runner.getConfig().getDestination() != null) ) { - // after connection establishment - synchronized (_runners) { - _runners.remove(runner.getConfig().getDestination()); - } - } + _log.warn("Unregistering (dropping) a client connection"); + synchronized (_pendingRunners) { + _pendingRunners.remove(runner); + } + if ( (runner.getConfig() != null) && (runner.getConfig().getDestination() != null) ) { + // after connection establishment + synchronized (_runners) { + _runners.remove(runner.getConfig().getDestination()); + } + } } public void destinationEstablished(ClientConnectionRunner runner) { - synchronized (_pendingRunners) { - _pendingRunners.remove(runner); - } - synchronized (_runners) { - _runners.put(runner.getConfig().getDestination(), runner); - } + synchronized (_pendingRunners) { + _pendingRunners.remove(runner); + } + synchronized (_runners) { + _runners.put(runner.getConfig().getDestination(), runner); + } } void distributeMessage(Destination fromDest, Destination toDest, Payload payload, MessageId msgId) { - // check if there is a runner for it - ClientConnectionRunner runner = getRunner(toDest); - if (runner != null) { - _log.debug("Message " + msgId + " is targeting a local destination. distribute it as such"); - runner.receiveMessage(toDest, fromDest, payload); - if (fromDest != null) { - ClientConnectionRunner sender = getRunner(fromDest); - if (sender != null) { - sender.updateMessageDeliveryStatus(msgId, true); - } else { - _log.log(Log.CRIT, "Um, wtf, we're sending a local message, but we can't find who sent it?", new Exception("wtf")); - } - } - } else { - // remote. w00t - _log.debug("Message " + msgId + " is targeting a REMOTE destination! Added to the client message pool"); - runner = getRunner(fromDest); - ClientMessage msg = new ClientMessage(); - msg.setDestination(toDest); - msg.setPayload(payload); - msg.setReceptionInfo(null); - msg.setSenderConfig(runner.getConfig()); - msg.setFromDestination(runner.getConfig().getDestination()); - msg.setMessageId(msgId); - ClientMessagePool.getInstance().add(msg); - } + // check if there is a runner for it + ClientConnectionRunner runner = getRunner(toDest); + if (runner != null) { + _log.debug("Message " + msgId + " is targeting a local destination. distribute it as such"); + runner.receiveMessage(toDest, fromDest, payload); + if (fromDest != null) { + ClientConnectionRunner sender = getRunner(fromDest); + if (sender != null) { + sender.updateMessageDeliveryStatus(msgId, true); + } else { + _log.log(Log.CRIT, "Um, wtf, we're sending a local message, but we can't find who sent it?", new Exception("wtf")); + } + } + } else { + // remote. w00t + _log.debug("Message " + msgId + " is targeting a REMOTE destination! Added to the client message pool"); + runner = getRunner(fromDest); + ClientMessage msg = new ClientMessage(); + msg.setDestination(toDest); + msg.setPayload(payload); + msg.setReceptionInfo(null); + msg.setSenderConfig(runner.getConfig()); + msg.setFromDestination(runner.getConfig().getDestination()); + msg.setMessageId(msgId); + _context.clientMessagePool().add(msg); + } } @@ -155,39 +157,40 @@ public class ClientManager { * @param onFailedJob Job to run after the timeout passes without receiving authorization */ public void requestLeaseSet(Destination dest, LeaseSet set, long timeout, Job onCreateJob, Job onFailedJob) { - ClientConnectionRunner runner = getRunner(dest); - if (runner == null) { - if (_log.shouldLog(Log.WARN)) - _log.warn("Cannot request the lease set, as we can't find a client runner for " + dest.calculateHash().toBase64() + ". disconnected?"); - JobQueue.getInstance().addJob(onFailedJob); - } else { - runner.requestLeaseSet(set, Clock.getInstance().now() + timeout, onCreateJob, onFailedJob); - } + ClientConnectionRunner runner = getRunner(dest); + if (runner == null) { + if (_log.shouldLog(Log.WARN)) + _log.warn("Cannot request the lease set, as we can't find a client runner for " + + dest.calculateHash().toBase64() + ". disconnected?"); + _context.jobQueue().addJob(onFailedJob); + } else { + runner.requestLeaseSet(set, _context.clock().now() + timeout, onCreateJob, onFailedJob); + } } public boolean isLocal(Destination dest) { - synchronized (_runners) { - return (_runners.containsKey(dest)); - } + synchronized (_runners) { + return (_runners.containsKey(dest)); + } } public boolean isLocal(Hash destHash) { - if (destHash == null) return false; - Set dests = new HashSet(); - synchronized (_runners) { - dests.addAll(_runners.keySet()); - } - for (Iterator iter = dests.iterator(); iter.hasNext();) { - Destination d = (Destination)iter.next(); - if (d.calculateHash().equals(destHash)) return true; - } - return false; + if (destHash == null) return false; + Set dests = new HashSet(); + synchronized (_runners) { + dests.addAll(_runners.keySet()); + } + for (Iterator iter = dests.iterator(); iter.hasNext();) { + Destination d = (Destination)iter.next(); + if (d.calculateHash().equals(destHash)) return true; + } + return false; } private ClientConnectionRunner getRunner(Destination dest) { - synchronized (_runners) { - return (ClientConnectionRunner)_runners.get(dest); - } + synchronized (_runners) { + return (ClientConnectionRunner)_runners.get(dest); + } } /** @@ -195,111 +198,118 @@ public class ClientManager { * */ public SessionConfig getClientSessionConfig(Destination dest) { - ClientConnectionRunner runner = getRunner(dest); - if (runner != null) - return runner.getConfig(); - else - return null; + ClientConnectionRunner runner = getRunner(dest); + if (runner != null) + return runner.getConfig(); + else + return null; } private ClientConnectionRunner getRunner(Hash destHash) { - if (destHash == null) - return null; - Set dests = new HashSet(); - synchronized (_runners) { - dests.addAll(_runners.keySet()); - } - for (Iterator iter = dests.iterator(); iter.hasNext(); ) { - Destination d = (Destination)iter.next(); - if (d.calculateHash().equals(destHash)) - return getRunner(d); - } - return null; + if (destHash == null) + return null; + Set dests = new HashSet(); + synchronized (_runners) { + dests.addAll(_runners.keySet()); + } + for (Iterator iter = dests.iterator(); iter.hasNext(); ) { + Destination d = (Destination)iter.next(); + if (d.calculateHash().equals(destHash)) + return getRunner(d); + } + return null; } public void messageDeliveryStatusUpdate(Destination fromDest, MessageId id, boolean delivered) { - ClientConnectionRunner runner = getRunner(fromDest); - if (runner != null) { - if (_log.shouldLog(Log.DEBUG)) - _log.debug("Delivering status [" + (delivered?"success":"failure") + "] to " + fromDest.calculateHash().toBase64() + " for message " + id); - runner.updateMessageDeliveryStatus(id, delivered); - } else { - if (_log.shouldLog(Log.WARN)) - _log.warn("Cannot deliver status [" + (delivered?"success":"failure") + "] to " + fromDest.calculateHash().toBase64() + " for message " + id); - } + ClientConnectionRunner runner = getRunner(fromDest); + if (runner != null) { + if (_log.shouldLog(Log.DEBUG)) + _log.debug("Delivering status [" + (delivered?"success":"failure") + "] to " + + fromDest.calculateHash().toBase64() + " for message " + id); + runner.updateMessageDeliveryStatus(id, delivered); + } else { + if (_log.shouldLog(Log.WARN)) + _log.warn("Cannot deliver status [" + (delivered?"success":"failure") + "] to " + + fromDest.calculateHash().toBase64() + " for message " + id); + } } private Set getRunnerDestinations() { - Set dests = new HashSet(); - synchronized (_runners) { - dests.addAll(_runners.keySet()); - } - return dests; + Set dests = new HashSet(); + synchronized (_runners) { + dests.addAll(_runners.keySet()); + } + return dests; } public void reportAbuse(Destination dest, String reason, int severity) { - if (dest != null) { - ClientConnectionRunner runner = getRunner(dest); - if (runner != null) { - runner.reportAbuse(reason, severity); - } - } else { - Set dests = getRunnerDestinations(); - for (Iterator iter = dests.iterator(); iter.hasNext(); ) { - Destination d = (Destination)iter.next(); - reportAbuse(d, reason, severity); - } - } + if (dest != null) { + ClientConnectionRunner runner = getRunner(dest); + if (runner != null) { + runner.reportAbuse(reason, severity); + } + } else { + Set dests = getRunnerDestinations(); + for (Iterator iter = dests.iterator(); iter.hasNext(); ) { + Destination d = (Destination)iter.next(); + reportAbuse(d, reason, severity); + } + } } public String renderStatusHTML() { - StringBuffer buf = new StringBuffer(); - buf.append("

    Clients

      "); - Map runners = null; - synchronized (_runners) { - runners = (Map)_runners.clone(); - } - for (Iterator iter = runners.keySet().iterator(); iter.hasNext(); ) { - Destination dest = (Destination)iter.next(); - ClientConnectionRunner runner = (ClientConnectionRunner)runners.get(dest); - buf.append("
    • ").append(dest.calculateHash().toBase64()).append("
    • \n"); - // toss out some general warnings - if (runner.getLeaseSet() == null) - buf.append("No leases! If you didn't just start a client, please restart it (and perhaps check your router's logs for ERROR messages)
      \n"); - else if (runner.getLeaseSet().getEarliestLeaseDate() < Clock.getInstance().now()) - buf.append("wtf, lease has already expired! please restart your client
      \n"); - buf.append("
      \n");
      -	    buf.append(runner.getLeaseSet()).append("
      \n"); - } - buf.append("
    \n"); - return buf.toString(); + StringBuffer buf = new StringBuffer(); + buf.append("

    Clients

      "); + Map runners = null; + synchronized (_runners) { + runners = (Map)_runners.clone(); + } + for (Iterator iter = runners.keySet().iterator(); iter.hasNext(); ) { + Destination dest = (Destination)iter.next(); + ClientConnectionRunner runner = (ClientConnectionRunner)runners.get(dest); + buf.append("
    • ").append(dest.calculateHash().toBase64()).append("
    • \n"); + // toss out some general warnings + if (runner.getLeaseSet() == null) + buf.append("No leases! If you didn't just start a client, please restart it (and perhaps check your router's logs for ERROR messages)
      \n"); + else if (runner.getLeaseSet().getEarliestLeaseDate() < _context.clock().now()) + buf.append("wtf, lease has already expired! please wait a minute, and if this message remains, restart your client
      \n"); + buf.append("
      \n");
      +            buf.append(runner.getLeaseSet()).append("
      \n"); + } + buf.append("
    \n"); + return buf.toString(); } public void messageReceived(ClientMessage msg) { - JobQueue.getInstance().addJob(new HandleJob(msg)); + _context.jobQueue().addJob(new HandleJob(msg)); } private class HandleJob extends JobImpl { - private ClientMessage _msg; - public HandleJob(ClientMessage msg) { - _msg = msg; - } - public String getName() { return "Handle Inbound Client Messages"; } - public void runJob() { - ClientConnectionRunner runner = null; - if (_msg.getDestination() != null) - runner = getRunner(_msg.getDestination()); - else - runner = getRunner(_msg.getDestinationHash()); - - if (runner != null) { - StatManager.getInstance().addRateData("client.receiveMessageSize", _msg.getPayload().getSize(), 0); - runner.receiveMessage(_msg.getDestination(), null, _msg.getPayload()); - } else { - // no client connection... - // we should pool these somewhere... - _log.warn("Message received but we don't have a connection to " + _msg.getDestination() + "/" + _msg.getDestinationHash() + " currently. DROPPED"); - } - } + private ClientMessage _msg; + public HandleJob(ClientMessage msg) { + super(ClientManager.this._context); + _msg = msg; + } + public String getName() { return "Handle Inbound Client Messages"; } + public void runJob() { + ClientConnectionRunner runner = null; + if (_msg.getDestination() != null) + runner = getRunner(_msg.getDestination()); + else + runner = getRunner(_msg.getDestinationHash()); + + if (runner != null) { + HandleJob.this._context.statManager().addRateData("client.receiveMessageSize", + _msg.getPayload().getSize(), 0); + runner.receiveMessage(_msg.getDestination(), null, _msg.getPayload()); + } else { + // no client connection... + // we should pool these somewhere... + if (_log.shouldLog(Log.WARN)) + _log.warn("Message received but we don't have a connection to " + + _msg.getDestination() + "/" + _msg.getDestinationHash() + + " currently. DROPPED"); + } + } } } diff --git a/router/java/src/net/i2p/router/client/ClientManagerFacadeImpl.java b/router/java/src/net/i2p/router/client/ClientManagerFacadeImpl.java index 1619242e7..6b303926d 100644 --- a/router/java/src/net/i2p/router/client/ClientManagerFacadeImpl.java +++ b/router/java/src/net/i2p/router/client/ClientManagerFacadeImpl.java @@ -17,6 +17,7 @@ import net.i2p.router.ClientManagerFacade; import net.i2p.router.ClientMessage; import net.i2p.router.Job; import net.i2p.router.Router; +import net.i2p.router.RouterContext; import net.i2p.util.Log; /** @@ -27,33 +28,35 @@ import net.i2p.util.Log; public class ClientManagerFacadeImpl extends ClientManagerFacade { private final static Log _log = new Log(ClientManagerFacadeImpl.class); private ClientManager _manager; + private RouterContext _context; public final static String PROP_CLIENT_PORT = "i2cp.port"; public final static int DEFAULT_PORT = 7654; - public ClientManagerFacadeImpl() { - _manager = null; - _log.debug("Client manager facade created"); + public ClientManagerFacadeImpl(RouterContext context) { + _context = context; + _manager = null; + _log.debug("Client manager facade created"); } public void startup() { - _log.info("Starting up the client subsystem"); - String portStr = Router.getInstance().getConfigSetting(PROP_CLIENT_PORT); - if (portStr != null) { - try { - int port = Integer.parseInt(portStr); - _manager = new ClientManager(port); - } catch (NumberFormatException nfe) { - _log.error("Error setting the port: " + portStr + " is not valid", nfe); - _manager = new ClientManager(DEFAULT_PORT); - } - } else { - _manager = new ClientManager(DEFAULT_PORT); - } + _log.info("Starting up the client subsystem"); + String portStr = _context.router().getConfigSetting(PROP_CLIENT_PORT); + if (portStr != null) { + try { + int port = Integer.parseInt(portStr); + _manager = new ClientManager(_context, port); + } catch (NumberFormatException nfe) { + _log.error("Error setting the port: " + portStr + " is not valid", nfe); + _manager = new ClientManager(_context, DEFAULT_PORT); + } + } else { + _manager = new ClientManager(_context, DEFAULT_PORT); + } } public void shutdown() { - if (_manager != null) - _manager.shutdown(); + if (_manager != null) + _manager.shutdown(); } /** @@ -70,10 +73,10 @@ public class ClientManagerFacadeImpl extends ClientManagerFacade { * @param onFailedJob Job to run after the timeout passes without receiving authorization */ public void requestLeaseSet(Destination dest, LeaseSet set, long timeout, Job onCreateJob, Job onFailedJob) { - if (_manager != null) - _manager.requestLeaseSet(dest, set, timeout, onCreateJob, onFailedJob); - else - _log.error("Null manager on requestLeaseSet!"); + if (_manager != null) + _manager.requestLeaseSet(dest, set, timeout, onCreateJob, onFailedJob); + else + _log.error("Null manager on requestLeaseSet!"); } /** @@ -85,10 +88,10 @@ public class ClientManagerFacadeImpl extends ClientManagerFacade { * @param severity How severe the abuse is, with 0 being not severe and 255 is the max */ public void reportAbuse(Destination dest, String reason, int severity) { - if (_manager != null) - _manager.reportAbuse(dest, reason, severity); - else - _log.error("Null manager on reportAbuse!"); + if (_manager != null) + _manager.reportAbuse(dest, reason, severity); + else + _log.error("Null manager on reportAbuse!"); } /** * Determine if the destination specified is managed locally. This call @@ -97,12 +100,12 @@ public class ClientManagerFacadeImpl extends ClientManagerFacade { * @param dest Destination to be checked */ public boolean isLocal(Destination dest) { - if (_manager != null) - return _manager.isLocal(dest); - else { - _log.debug("Null manager on isLocal(dest)!"); - return false; - } + if (_manager != null) + return _manager.isLocal(dest); + else { + _log.debug("Null manager on isLocal(dest)!"); + return false; + } } /** * Determine if the destination specified is managed locally. This call @@ -111,26 +114,26 @@ public class ClientManagerFacadeImpl extends ClientManagerFacade { * @param destHash Hash of Destination to be checked */ public boolean isLocal(Hash destHash) { - if (_manager != null) - return _manager.isLocal(destHash); - else { - _log.debug("Null manager on isLocal(hash)!"); - return false; - } + if (_manager != null) + return _manager.isLocal(destHash); + else { + _log.debug("Null manager on isLocal(hash)!"); + return false; + } } public void messageDeliveryStatusUpdate(Destination fromDest, MessageId id, boolean delivered) { - if (_manager != null) - _manager.messageDeliveryStatusUpdate(fromDest, id, delivered); - else - _log.error("Null manager on messageDeliveryStatusUpdate!"); + if (_manager != null) + _manager.messageDeliveryStatusUpdate(fromDest, id, delivered); + else + _log.error("Null manager on messageDeliveryStatusUpdate!"); } public void messageReceived(ClientMessage msg) { - if (_manager != null) - _manager.messageReceived(msg); - else - _log.error("Null manager on messageReceived!"); + if (_manager != null) + _manager.messageReceived(msg); + else + _log.error("Null manager on messageReceived!"); } /** @@ -138,20 +141,20 @@ public class ClientManagerFacadeImpl extends ClientManagerFacade { * */ public SessionConfig getClientSessionConfig(Destination dest) { - if (_manager != null) - return _manager.getClientSessionConfig(dest); - else { - _log.error("Null manager on getClientSessionConfig!"); - return null; - } + if (_manager != null) + return _manager.getClientSessionConfig(dest); + else { + _log.error("Null manager on getClientSessionConfig!"); + return null; + } } public String renderStatusHTML() { - if (_manager != null) - return _manager.renderStatusHTML(); - else { - _log.error("Null manager on renderStatusHTML!"); - return null; - } + if (_manager != null) + return _manager.renderStatusHTML(); + else { + _log.error("Null manager on renderStatusHTML!"); + return null; + } } } diff --git a/router/java/src/net/i2p/router/client/ClientMessageEventListener.java b/router/java/src/net/i2p/router/client/ClientMessageEventListener.java index 0f2e7e275..90605d246 100644 --- a/router/java/src/net/i2p/router/client/ClientMessageEventListener.java +++ b/router/java/src/net/i2p/router/client/ClientMessageEventListener.java @@ -32,6 +32,7 @@ import net.i2p.router.NetworkDatabaseFacade; import net.i2p.util.Clock; import net.i2p.util.Log; import net.i2p.util.RandomSource; +import net.i2p.router.RouterContext; /** * Receive events from the client and handle them accordingly (updating the runner when @@ -39,11 +40,14 @@ import net.i2p.util.RandomSource; * */ class ClientMessageEventListener implements I2CPMessageReader.I2CPMessageEventListener { - private static final Log _log = new Log(ClientMessageEventListener.class); + private Log _log; + private RouterContext _context; private ClientConnectionRunner _runner; - public ClientMessageEventListener(ClientConnectionRunner runner) { - _runner = runner; + public ClientMessageEventListener(RouterContext context, ClientConnectionRunner runner) { + _context = context; + _log = _context.logManager().getLog(ClientMessageEventListener.class); + _runner = runner; } /** @@ -51,36 +55,37 @@ class ClientMessageEventListener implements I2CPMessageReader.I2CPMessageEventLi * */ public void messageReceived(I2CPMessageReader reader, I2CPMessage message) { - if (_runner.isDead()) return; - _log.info("Message recieved: \n" + message); - switch (message.getType()) { - case GetDateMessage.MESSAGE_TYPE: - handleGetDate(reader, (GetDateMessage)message); - break; - case SetDateMessage.MESSAGE_TYPE: - handleSetDate(reader, (SetDateMessage)message); - break; - case CreateSessionMessage.MESSAGE_TYPE: - handleCreateSession(reader, (CreateSessionMessage)message); - break; - case SendMessageMessage.MESSAGE_TYPE: - handleSendMessage(reader, (SendMessageMessage)message); - break; - case ReceiveMessageBeginMessage.MESSAGE_TYPE: - handleReceiveBegin(reader, (ReceiveMessageBeginMessage)message); - break; - case ReceiveMessageEndMessage.MESSAGE_TYPE: - handleReceiveEnd(reader, (ReceiveMessageEndMessage)message); - break; - case CreateLeaseSetMessage.MESSAGE_TYPE: - handleCreateLeaseSet(reader, (CreateLeaseSetMessage)message); - break; - case DestroySessionMessage.MESSAGE_TYPE: - handleDestroySession(reader, (DestroySessionMessage)message); - break; - default: - _log.warn("Unhandled I2CP type received: " + message.getType()); - } + if (_runner.isDead()) return; + if (_log.shouldLog(Log.INFO)) + _log.info("Message recieved: \n" + message); + switch (message.getType()) { + case GetDateMessage.MESSAGE_TYPE: + handleGetDate(reader, (GetDateMessage)message); + break; + case SetDateMessage.MESSAGE_TYPE: + handleSetDate(reader, (SetDateMessage)message); + break; + case CreateSessionMessage.MESSAGE_TYPE: + handleCreateSession(reader, (CreateSessionMessage)message); + break; + case SendMessageMessage.MESSAGE_TYPE: + handleSendMessage(reader, (SendMessageMessage)message); + break; + case ReceiveMessageBeginMessage.MESSAGE_TYPE: + handleReceiveBegin(reader, (ReceiveMessageBeginMessage)message); + break; + case ReceiveMessageEndMessage.MESSAGE_TYPE: + handleReceiveEnd(reader, (ReceiveMessageEndMessage)message); + break; + case CreateLeaseSetMessage.MESSAGE_TYPE: + handleCreateLeaseSet(reader, (CreateLeaseSetMessage)message); + break; + case DestroySessionMessage.MESSAGE_TYPE: + handleDestroySession(reader, (DestroySessionMessage)message); + break; + default: + _log.warn("Unhandled I2CP type received: " + message.getType()); + } } /** @@ -88,27 +93,27 @@ class ClientMessageEventListener implements I2CPMessageReader.I2CPMessageEventLi * */ public void readError(I2CPMessageReader reader, Exception error) { - if (_runner.isDead()) return; - _log.error("Error occurred", error); - _runner.stopRunning(); + if (_runner.isDead()) return; + _log.error("Error occurred", error); + _runner.stopRunning(); } public void disconnected(I2CPMessageReader reader) { - if (_runner.isDead()) return; - _runner.disconnected(); + if (_runner.isDead()) return; + _runner.disconnected(); } private void handleGetDate(I2CPMessageReader reader, GetDateMessage message) { - try { - _runner.doSend(new SetDateMessage()); - } catch (I2CPMessageException ime) { - _log.error("Error writing out the setDate message", ime); - } catch (IOException ioe) { - _log.error("Error writing out the setDate message", ioe); - } + try { + _runner.doSend(new SetDateMessage()); + } catch (I2CPMessageException ime) { + _log.error("Error writing out the setDate message", ime); + } catch (IOException ioe) { + _log.error("Error writing out the setDate message", ioe); + } } private void handleSetDate(I2CPMessageReader reader, SetDateMessage message) { - Clock.getInstance().setNow(message.getDate().getTime()); + _context.clock().setNow(message.getDate().getTime()); } @@ -117,30 +122,30 @@ class ClientMessageEventListener implements I2CPMessageReader.I2CPMessageEventLi * */ private void handleCreateSession(I2CPMessageReader reader, CreateSessionMessage message) { - if (message.getSessionConfig().verifySignature()) { - _log.debug("Signature verified correctly on create session message"); - } else { - _log.error("Signature verification *FAILED* on a create session message. Hijack attempt?"); - _runner.disconnectClient("Invalid signature on CreateSessionMessage"); - return; - } + if (message.getSessionConfig().verifySignature()) { + _log.debug("Signature verified correctly on create session message"); + } else { + _log.error("Signature verification *FAILED* on a create session message. Hijack attempt?"); + _runner.disconnectClient("Invalid signature on CreateSessionMessage"); + return; + } - SessionStatusMessage msg = new SessionStatusMessage(); - SessionId sessionId = new SessionId(); - sessionId.setSessionId(getNextSessionId()); - _runner.setSessionId(sessionId); - msg.setSessionId(sessionId); - msg.setStatus(SessionStatusMessage.STATUS_CREATED); - try { - _runner.doSend(msg); - _runner.sessionEstablished(message.getSessionConfig()); - } catch (I2CPMessageException ime) { - _log.error("Error writing out the session status message", ime); - } catch (IOException ioe) { - _log.error("Error writing out the session status message", ioe); - } - - JobQueue.getInstance().addJob(new CreateSessionJob(_runner)); + SessionStatusMessage msg = new SessionStatusMessage(); + SessionId sessionId = new SessionId(); + sessionId.setSessionId(getNextSessionId()); + _runner.setSessionId(sessionId); + msg.setSessionId(sessionId); + msg.setStatus(SessionStatusMessage.STATUS_CREATED); + try { + _runner.doSend(msg); + _runner.sessionEstablished(message.getSessionConfig()); + } catch (I2CPMessageException ime) { + _log.error("Error writing out the session status message", ime); + } catch (IOException ioe) { + _log.error("Error writing out the session status message", ioe); + } + + _context.jobQueue().addJob(new CreateSessionJob(_context, _runner)); } @@ -150,9 +155,9 @@ class ClientMessageEventListener implements I2CPMessageReader.I2CPMessageEventLi * */ private void handleSendMessage(I2CPMessageReader reader, SendMessageMessage message) { - _log.debug("handleSendMessage called"); - MessageId id = _runner.distributeMessage(message); - _runner.ackSendMessage(id, message.getNonce()); + _log.debug("handleSendMessage called"); + MessageId id = _runner.distributeMessage(message); + _runner.ackSendMessage(id, message.getNonce()); } @@ -161,24 +166,25 @@ class ClientMessageEventListener implements I2CPMessageReader.I2CPMessageEventLi * */ private void handleReceiveBegin(I2CPMessageReader reader, ReceiveMessageBeginMessage message) { - if (_runner.isDead()) return; - _log.debug("Handling recieve begin: id = " + message.getMessageId()); - MessagePayloadMessage msg = new MessagePayloadMessage(); - msg.setMessageId(message.getMessageId()); - msg.setSessionId(_runner.getSessionId()); - Payload payload = _runner.getPayload(message.getMessageId()); - if (payload == null) { - _log.error("Payload for message id [" + message.getMessageId() + "] is null! Unknown message id?"); - return; - } - msg.setPayload(payload); - try { - _runner.doSend(msg); - } catch (IOException ioe) { - _log.error("Error delivering the payload", ioe); - } catch (I2CPMessageException ime) { - _log.error("Error delivering the payload", ime); - } + if (_runner.isDead()) return; + _log.debug("Handling recieve begin: id = " + message.getMessageId()); + MessagePayloadMessage msg = new MessagePayloadMessage(); + msg.setMessageId(message.getMessageId()); + msg.setSessionId(_runner.getSessionId()); + Payload payload = _runner.getPayload(message.getMessageId()); + if (payload == null) { + _log.error("Payload for message id [" + message.getMessageId() + + "] is null! Unknown message id?"); + return; + } + msg.setPayload(payload); + try { + _runner.doSend(msg); + } catch (IOException ioe) { + _log.error("Error delivering the payload", ioe); + } catch (I2CPMessageException ime) { + _log.error("Error delivering the payload", ime); + } } /** @@ -188,26 +194,26 @@ class ClientMessageEventListener implements I2CPMessageReader.I2CPMessageEventLi * */ private void handleReceiveEnd(I2CPMessageReader reader, ReceiveMessageEndMessage message) { - _runner.removePayload(message.getMessageId()); + _runner.removePayload(message.getMessageId()); } private void handleDestroySession(I2CPMessageReader reader, DestroySessionMessage message) { - _log.info("Destroying client session " + _runner.getSessionId()); - _runner.stopRunning(); + _log.info("Destroying client session " + _runner.getSessionId()); + _runner.stopRunning(); } private void handleCreateLeaseSet(I2CPMessageReader reader, CreateLeaseSetMessage message) { - if ( (message.getLeaseSet() == null) || (message.getPrivateKey() == null) || (message.getSigningPrivateKey() == null) ) { - _log.error("Null lease set granted: " + message); - return; - } - - _log.info("New lease set granted for destination " + message.getLeaseSet().getDestination().calculateHash().toBase64()); - KeyManager.getInstance().registerKeys(message.getLeaseSet().getDestination(), message.getSigningPrivateKey(), message.getPrivateKey()); - NetworkDatabaseFacade.getInstance().publish(message.getLeaseSet()); - - // leaseSetCreated takes care of all the LeaseRequestState stuff (including firing any jobs) - _runner.leaseSetCreated(message.getLeaseSet()); + if ( (message.getLeaseSet() == null) || (message.getPrivateKey() == null) || (message.getSigningPrivateKey() == null) ) { + _log.error("Null lease set granted: " + message); + return; + } + + _log.info("New lease set granted for destination " + message.getLeaseSet().getDestination().calculateHash().toBase64()); + _context.keyManager().registerKeys(message.getLeaseSet().getDestination(), message.getSigningPrivateKey(), message.getPrivateKey()); + _context.netDb().publish(message.getLeaseSet()); + + // leaseSetCreated takes care of all the LeaseRequestState stuff (including firing any jobs) + _runner.leaseSetCreated(message.getLeaseSet()); } // this *should* be mod 65536, but UnsignedInteger is still b0rked. FIXME @@ -218,11 +224,11 @@ class ClientMessageEventListener implements I2CPMessageReader.I2CPMessageEventLi /** generate a new sessionId */ private final static int getNextSessionId() { - synchronized (_sessionIdLock) { - int id = (++_id)%MAX_SESSION_ID; - if (_id >= MAX_SESSION_ID) - _id = 0; - return id; - } + synchronized (_sessionIdLock) { + int id = (++_id)%MAX_SESSION_ID; + if (_id >= MAX_SESSION_ID) + _id = 0; + return id; + } } } diff --git a/router/java/src/net/i2p/router/client/CreateSessionJob.java b/router/java/src/net/i2p/router/client/CreateSessionJob.java index 80012135e..33f80e9d0 100644 --- a/router/java/src/net/i2p/router/client/CreateSessionJob.java +++ b/router/java/src/net/i2p/router/client/CreateSessionJob.java @@ -1,9 +1,9 @@ package net.i2p.router.client; /* * free (adj.): unencumbered; not under the control of others - * Written by jrandom in 2003 and released into the public domain - * with no warranty of any kind, either expressed or implied. - * It probably won't make your computer catch on fire, or eat + * Written by jrandom in 2003 and released into the public domain + * with no warranty of any kind, either expressed or implied. + * It probably won't make your computer catch on fire, or eat * your children, but it might. Use at your own risk. * */ @@ -15,6 +15,7 @@ import net.i2p.router.ClientTunnelSettings; import net.i2p.router.JobImpl; import net.i2p.router.TunnelManagerFacade; import net.i2p.util.Log; +import net.i2p.router.RouterContext; /** * Given an established connection, walk through the process of establishing the @@ -24,39 +25,41 @@ import net.i2p.util.Log; * */ class CreateSessionJob extends JobImpl { - private final static Log _log = new Log(CreateSessionJob.class); + private Log _log; private ClientConnectionRunner _runner; private final static long LEASE_CREATION_TIMEOUT = 30*1000; - public CreateSessionJob(ClientConnectionRunner runner) { - _runner = runner; + public CreateSessionJob(RouterContext context, ClientConnectionRunner runner) { + super(context); + _log = context.logManager().getLog(CreateSessionJob.class); + _runner = runner; } public String getName() { return "Request tunnels for a new client"; } public void runJob() { - SessionConfig cfg = _runner.getConfig(); - if ( (cfg == null) || (cfg.getDestination() == null) ) return; - if (_log.shouldLog(Log.INFO)) - _log.info("Requesting lease set for destination " + cfg.getDestination().calculateHash().toBase64()); - ClientTunnelSettings settings = new ClientTunnelSettings(); - Properties props = new Properties(); - - // We're NOT going to force all clients to use the router's defaults, since that may be - // excessive. This means that unless the user says otherwise, we'll be satisfied with whatever - // is available. Otherwise, when the router starts up, if there aren't sufficient tunnels with the - // adequate number of hops, the user will have to wait. Once peer profiles are persistent, we can - // reenable this, since on startup we'll have a sufficient number of high enough ranked peers to - // tunnel through. (perhaps). - - // XXX take the router's defaults - // XXX props.putAll(Router.getInstance().getConfigMap()); - - // override them by the client's settings - props.putAll(_runner.getConfig().getOptions()); - - // and load 'em up (using anything not yet set as the software defaults) - settings.readFromProperties(props); - TunnelManagerFacade.getInstance().createTunnels(_runner.getConfig().getDestination(), settings, LEASE_CREATION_TIMEOUT); + SessionConfig cfg = _runner.getConfig(); + if ( (cfg == null) || (cfg.getDestination() == null) ) return; + if (_log.shouldLog(Log.INFO)) + _log.info("Requesting lease set for destination " + cfg.getDestination().calculateHash().toBase64()); + ClientTunnelSettings settings = new ClientTunnelSettings(); + Properties props = new Properties(); + + // We're NOT going to force all clients to use the router's defaults, since that may be + // excessive. This means that unless the user says otherwise, we'll be satisfied with whatever + // is available. Otherwise, when the router starts up, if there aren't sufficient tunnels with the + // adequate number of hops, the user will have to wait. Once peer profiles are persistent, we can + // reenable this, since on startup we'll have a sufficient number of high enough ranked peers to + // tunnel through. (perhaps). + + // XXX take the router's defaults + // XXX props.putAll(Router.getInstance().getConfigMap()); + + // override them by the client's settings + props.putAll(_runner.getConfig().getOptions()); + + // and load 'em up (using anything not yet set as the software defaults) + settings.readFromProperties(props); + _context.tunnelManager().createTunnels(_runner.getConfig().getDestination(), settings, LEASE_CREATION_TIMEOUT); } } diff --git a/router/java/src/net/i2p/router/client/MessageReceivedJob.java b/router/java/src/net/i2p/router/client/MessageReceivedJob.java index b3856c3bf..7b76a64ac 100644 --- a/router/java/src/net/i2p/router/client/MessageReceivedJob.java +++ b/router/java/src/net/i2p/router/client/MessageReceivedJob.java @@ -1,9 +1,9 @@ package net.i2p.router.client; /* * free (adj.): unencumbered; not under the control of others - * Written by jrandom in 2003 and released into the public domain - * with no warranty of any kind, either expressed or implied. - * It probably won't make your computer catch on fire, or eat + * Written by jrandom in 2003 and released into the public domain + * with no warranty of any kind, either expressed or implied. + * It probably won't make your computer catch on fire, or eat * your children, but it might. Use at your own risk. * */ @@ -16,6 +16,7 @@ import net.i2p.data.i2cp.I2CPMessageException; import net.i2p.data.i2cp.MessageId; import net.i2p.data.i2cp.MessageStatusMessage; import net.i2p.router.JobImpl; +import net.i2p.router.RouterContext; import net.i2p.util.Log; /** @@ -23,27 +24,29 @@ import net.i2p.util.Log; * */ class MessageReceivedJob extends JobImpl { - private final static Log _log = new Log(MessageReceivedJob.class); + private Log _log; private ClientConnectionRunner _runner; private Destination _to; private Destination _from; private Payload _payload; - public MessageReceivedJob(ClientConnectionRunner runner, Destination toDest, Destination fromDest, Payload payload) { - _runner = runner; - _to = toDest; - _from = fromDest; - _payload = payload; + public MessageReceivedJob(RouterContext ctx, ClientConnectionRunner runner, Destination toDest, Destination fromDest, Payload payload) { + super(ctx); + _log = ctx.logManager().getLog(MessageReceivedJob.class); + _runner = runner; + _to = toDest; + _from = fromDest; + _payload = payload; } - + public String getName() { return "Deliver New Message"; } public void runJob() { - if (_runner.isDead()) return; - MessageId id = new MessageId(); - id.setMessageId(ClientConnectionRunner.getNextMessageId()); - _runner.setPayload(id, _payload); - messageAvailable(id, _payload.getSize()); + if (_runner.isDead()) return; + MessageId id = new MessageId(); + id.setMessageId(ClientConnectionRunner.getNextMessageId()); + _runner.setPayload(id, _payload); + messageAvailable(id, _payload.getSize()); } - + /** * Deliver notification to the client that the given message is available. * This is synchronous and returns true if the notification was sent safely, @@ -51,19 +54,19 @@ class MessageReceivedJob extends JobImpl { * */ public void messageAvailable(MessageId id, long size) { - _log.debug("Sending message available: " + id + " to sessionId " + _runner.getSessionId() + " (with nonce=1)", new Exception("available")); - MessageStatusMessage msg = new MessageStatusMessage(); - msg.setMessageId(id); - msg.setSessionId(_runner.getSessionId()); - msg.setSize(size); - msg.setNonce(1); - msg.setStatus(MessageStatusMessage.STATUS_AVAILABLE); - try { - _runner.doSend(msg); - } catch (I2CPMessageException ime) { - _log.error("Error writing out the message status message", ime); - } catch (IOException ioe) { - _log.error("Error writing out the message status message", ioe); - } + _log.debug("Sending message available: " + id + " to sessionId " + _runner.getSessionId() + " (with nonce=1)", new Exception("available")); + MessageStatusMessage msg = new MessageStatusMessage(); + msg.setMessageId(id); + msg.setSessionId(_runner.getSessionId()); + msg.setSize(size); + msg.setNonce(1); + msg.setStatus(MessageStatusMessage.STATUS_AVAILABLE); + try { + _runner.doSend(msg); + } catch (I2CPMessageException ime) { + _log.error("Error writing out the message status message", ime); + } catch (IOException ioe) { + _log.error("Error writing out the message status message", ioe); + } } } diff --git a/router/java/src/net/i2p/router/client/ReportAbuseJob.java b/router/java/src/net/i2p/router/client/ReportAbuseJob.java index b5690ebf5..06c259012 100644 --- a/router/java/src/net/i2p/router/client/ReportAbuseJob.java +++ b/router/java/src/net/i2p/router/client/ReportAbuseJob.java @@ -1,9 +1,9 @@ package net.i2p.router.client; /* * free (adj.): unencumbered; not under the control of others - * Written by jrandom in 2003 and released into the public domain - * with no warranty of any kind, either expressed or implied. - * It probably won't make your computer catch on fire, or eat + * Written by jrandom in 2003 and released into the public domain + * with no warranty of any kind, either expressed or implied. + * It probably won't make your computer catch on fire, or eat * your children, but it might. Use at your own risk. * */ @@ -16,40 +16,43 @@ import net.i2p.data.i2cp.I2CPMessageException; import net.i2p.data.i2cp.ReportAbuseMessage; import net.i2p.router.JobImpl; import net.i2p.util.Log; +import net.i2p.router.RouterContext; /** * Async job to send an abuse message to the client * */ class ReportAbuseJob extends JobImpl { - private final static Log _log = new Log(ReportAbuseJob.class); + private Log _log; private ClientConnectionRunner _runner; private String _reason; private int _severity; - public ReportAbuseJob(ClientConnectionRunner runner, String reason, int severity) { - _runner = runner; - _reason = reason; - _severity = severity; + public ReportAbuseJob(RouterContext context, ClientConnectionRunner runner, String reason, int severity) { + super(context); + _log = context.logManager().getLog(ReportAbuseJob.class); + _runner = runner; + _reason = reason; + _severity = severity; } - + public String getName() { return "Report Abuse"; } public void runJob() { - if (_runner.isDead()) return; - AbuseReason res = new AbuseReason(); - res.setReason(_reason); - AbuseSeverity sev = new AbuseSeverity(); - sev.setSeverity(_severity); - ReportAbuseMessage msg = new ReportAbuseMessage(); - msg.setMessageId(null); - msg.setReason(res); - msg.setSessionId(_runner.getSessionId()); - msg.setSeverity(sev); - try { - _runner.doSend(msg); - } catch (I2CPMessageException ime) { - _log.error("Error reporting abuse", ime); - } catch (IOException ioe) { - _log.error("Error reporting abuse", ioe); - } + if (_runner.isDead()) return; + AbuseReason res = new AbuseReason(); + res.setReason(_reason); + AbuseSeverity sev = new AbuseSeverity(); + sev.setSeverity(_severity); + ReportAbuseMessage msg = new ReportAbuseMessage(); + msg.setMessageId(null); + msg.setReason(res); + msg.setSessionId(_runner.getSessionId()); + msg.setSeverity(sev); + try { + _runner.doSend(msg); + } catch (I2CPMessageException ime) { + _log.error("Error reporting abuse", ime); + } catch (IOException ioe) { + _log.error("Error reporting abuse", ioe); + } } } diff --git a/router/java/src/net/i2p/router/client/RequestLeaseSetJob.java b/router/java/src/net/i2p/router/client/RequestLeaseSetJob.java index 2c8344642..19e845c10 100644 --- a/router/java/src/net/i2p/router/client/RequestLeaseSetJob.java +++ b/router/java/src/net/i2p/router/client/RequestLeaseSetJob.java @@ -1,9 +1,9 @@ package net.i2p.router.client; /* * free (adj.): unencumbered; not under the control of others - * Written by jrandom in 2003 and released into the public domain - * with no warranty of any kind, either expressed or implied. - * It probably won't make your computer catch on fire, or eat + * Written by jrandom in 2003 and released into the public domain + * with no warranty of any kind, either expressed or implied. + * It probably won't make your computer catch on fire, or eat * your children, but it might. Use at your own risk. * */ @@ -19,110 +19,114 @@ import net.i2p.router.JobImpl; import net.i2p.router.JobQueue; import net.i2p.util.Clock; import net.i2p.util.Log; +import net.i2p.router.RouterContext; /** * Async job to walk the client through generating a lease set. First sends it - * to the client and then queues up a CheckLeaseRequestStatus job for + * to the client and then queues up a CheckLeaseRequestStatus job for * processing after the expiration. When that CheckLeaseRequestStatus is run, * if the client still hasn't provided the signed leaseSet, fire off the onFailed * job from the intermediary LeaseRequestState and drop the client. * */ class RequestLeaseSetJob extends JobImpl { - private static final Log _log = new Log(RequestLeaseSetJob.class); + private Log _log; private ClientConnectionRunner _runner; private LeaseSet _ls; private long _expiration; private Job _onCreate; private Job _onFail; - public RequestLeaseSetJob(ClientConnectionRunner runner, LeaseSet set, long expiration, Job onCreate, Job onFail) { - _runner = runner; - _ls = set; - _expiration = expiration; - _onCreate = onCreate; - _onFail = onFail; + public RequestLeaseSetJob(RouterContext ctx, ClientConnectionRunner runner, LeaseSet set, long expiration, Job onCreate, Job onFail) { + super(ctx); + _log = ctx.logManager().getLog(RequestLeaseSetJob.class); + _runner = runner; + _ls = set; + _expiration = expiration; + _onCreate = onCreate; + _onFail = onFail; } - + public String getName() { return "Request Lease Set"; } public void runJob() { - if (_runner.isDead()) return; - LeaseRequestState oldReq = _runner.getLeaseRequest(); - if (oldReq != null) { - if (oldReq.getExpiration() > Clock.getInstance().now()) { - _log.error("Old *current* leaseRequest already exists! Why are we trying to request too quickly?", getAddedBy()); - return; - } else { - _log.error("Old *expired* leaseRequest exists! Why did the old request not get killed? (expiration = " + new Date(oldReq.getExpiration()) + ")", getAddedBy()); - } - } - - LeaseRequestState state = new LeaseRequestState(_onCreate, _onFail, _expiration, _ls); - - RequestLeaseSetMessage msg = new RequestLeaseSetMessage(); - Date end = null; - // get the earliest end date - for (int i = 0; i < state.getRequested().getLeaseCount(); i++) { - if ( (end == null) || (end.getTime() > state.getRequested().getLease(i).getEndDate().getTime()) ) - end = state.getRequested().getLease(i).getEndDate(); - } - - msg.setEndDate(end); - msg.setSessionId(_runner.getSessionId()); - - for (int i = 0; i < state.getRequested().getLeaseCount(); i++) { - msg.addEndpoint(state.getRequested().getLease(i).getRouterIdentity(), state.getRequested().getLease(i).getTunnelId()); - } - - try { - _runner.setLeaseRequest(state); - _runner.doSend(msg); - JobQueue.getInstance().addJob(new CheckLeaseRequestStatus(state)); - return; - } catch (I2CPMessageException ime) { - _log.error("Error sending I2CP message requesting the lease set", ime); - state.setIsSuccessful(false); - _runner.setLeaseRequest(null); - _runner.disconnectClient("I2CP error requesting leaseSet"); - return; - } catch (IOException ioe) { - _log.error("Error sending I2CP message requesting the lease set", ioe); - state.setIsSuccessful(false); - _runner.setLeaseRequest(null); - _runner.disconnectClient("IO error requesting leaseSet"); - return; - } + if (_runner.isDead()) return; + LeaseRequestState oldReq = _runner.getLeaseRequest(); + if (oldReq != null) { + if (oldReq.getExpiration() > _context.clock().now()) { + _log.error("Old *current* leaseRequest already exists! Why are we trying to request too quickly?", getAddedBy()); + return; + } else { + _log.error("Old *expired* leaseRequest exists! Why did the old request not get killed? (expiration = " + new Date(oldReq.getExpiration()) + ")", getAddedBy()); + } + } + + LeaseRequestState state = new LeaseRequestState(_onCreate, _onFail, _expiration, _ls); + + RequestLeaseSetMessage msg = new RequestLeaseSetMessage(); + Date end = null; + // get the earliest end date + for (int i = 0; i < state.getRequested().getLeaseCount(); i++) { + if ( (end == null) || (end.getTime() > state.getRequested().getLease(i).getEndDate().getTime()) ) + end = state.getRequested().getLease(i).getEndDate(); + } + + msg.setEndDate(end); + msg.setSessionId(_runner.getSessionId()); + + for (int i = 0; i < state.getRequested().getLeaseCount(); i++) { + msg.addEndpoint(state.getRequested().getLease(i).getRouterIdentity(), state.getRequested().getLease(i).getTunnelId()); + } + + try { + _runner.setLeaseRequest(state); + _runner.doSend(msg); + _context.jobQueue().addJob(new CheckLeaseRequestStatus(state)); + return; + } catch (I2CPMessageException ime) { + _log.error("Error sending I2CP message requesting the lease set", ime); + state.setIsSuccessful(false); + _runner.setLeaseRequest(null); + _runner.disconnectClient("I2CP error requesting leaseSet"); + return; + } catch (IOException ioe) { + _log.error("Error sending I2CP message requesting the lease set", ioe); + state.setIsSuccessful(false); + _runner.setLeaseRequest(null); + _runner.disconnectClient("IO error requesting leaseSet"); + return; + } } - + /** - * Schedule this job to be run after the request's expiration, so that if + * Schedule this job to be run after the request's expiration, so that if * it wasn't yet successful, we fire off the failure job and disconnect the * client (but if it was, noop) * */ private class CheckLeaseRequestStatus extends JobImpl { - private LeaseRequestState _req; - - public CheckLeaseRequestStatus(LeaseRequestState state) { - _req = state; - getTiming().setStartAfter(state.getExpiration()); - } - - public void runJob() { - if (_runner.isDead()) return; - if (_req.getIsSuccessful()) { - // we didn't fail - return; - } else { - _log.error("Failed to receive a leaseSet in the time allotted (" + new Date(_req.getExpiration()) + ")"); - _runner.disconnectClient("Took too long to request leaseSet"); - if (_req.getOnFailed() != null) - JobQueue.getInstance().addJob(_req.getOnFailed()); - - // only zero out the request if its the one we know about - if (_req == _runner.getLeaseRequest()) - _runner.setLeaseRequest(null); - } - } - public String getName() { return "Check LeaseRequest Status"; } + private LeaseRequestState _req; + + public CheckLeaseRequestStatus(LeaseRequestState state) { + super(RequestLeaseSetJob.this._context); + _req = state; + getTiming().setStartAfter(state.getExpiration()); + } + + public void runJob() { + if (_runner.isDead()) return; + if (_req.getIsSuccessful()) { + // we didn't fail + return; + } else { + _log.error("Failed to receive a leaseSet in the time allotted (" + new Date(_req.getExpiration()) + ")"); + _runner.disconnectClient("Took too long to request leaseSet"); + if (_req.getOnFailed() != null) + RequestLeaseSetJob.this._context.jobQueue().addJob(_req.getOnFailed()); + + // only zero out the request if its the one we know about + if (_req == _runner.getLeaseRequest()) + _runner.setLeaseRequest(null); + } + } + public String getName() { return "Check LeaseRequest Status"; } } } diff --git a/router/java/src/net/i2p/router/message/BuildCreateTunnelMessageJob.java b/router/java/src/net/i2p/router/message/BuildCreateTunnelMessageJob.java deleted file mode 100644 index cf5c6dadc..000000000 --- a/router/java/src/net/i2p/router/message/BuildCreateTunnelMessageJob.java +++ /dev/null @@ -1,67 +0,0 @@ -package net.i2p.router.message; -/* - * free (adj.): unencumbered; not under the control of others - * Written by jrandom in 2003 and released into the public domain - * with no warranty of any kind, either expressed or implied. - * It probably won't make your computer catch on fire, or eat - * your children, but it might. Use at your own risk. - * - */ - -import net.i2p.data.Hash; -import net.i2p.data.RouterInfo; -import net.i2p.router.Job; -import net.i2p.router.JobImpl; -import net.i2p.router.TunnelInfo; -import net.i2p.util.Log; - -/** - * Build a TunnelCreateMessage that is sent to the target requesting that they - * participate in the tunnel. If they reply back saying they will, fire off the - * onCreateSuccessful job, otherwise fire off the onCreateFailed job after a timeout. - * The test message is sent at the specified priority. - * - * The message algorithm is: - * = check to see if we have working outbound tunnels - * - if true, send a tunnel message out the tunnel containing a garlic aimed directly at the peer in question. - * - if false, send a message garlic'ed through a few routers before reaching the peer in question. - * - * the source route block will always point at an inbound tunnel - even if there aren't any real ones (in - * which case, the tunnel gateway is the local router) - * - */ -class BuildCreateTunnelMessageJob extends JobImpl { - private final static Log _log = new Log(BuildCreateTunnelMessageJob.class); - private RouterInfo _target; - private Hash _replyTo; - private TunnelInfo _tunnelConfig; - private Job _onCreateSuccessful; - private Job _onCreateFailed; - private long _timeoutMs; - private int _priority; - - /** - * - * @param target router to participate in the tunnel - * @param replyTo our address - * @param info data regarding the tunnel configuration - * @param onCreateSuccessfulJob after the peer replies back saying they'll participate - * @param onCreateFailedJob after the peer replies back saying they won't participate, or timeout - * @param timeoutMs how long to wait before timing out - * @param priority how high priority to send this test - */ - public BuildCreateTunnelMessageJob(RouterInfo target, Hash replyTo, TunnelInfo info, Job onCreateSuccessfulJob, Job onCreateFailedJob, long timeoutMs, int priority) { - super(); - _target = target; - _replyTo = replyTo; - _tunnelConfig = info; - _onCreateSuccessful = onCreateSuccessfulJob; - _onCreateFailed = onCreateFailedJob; - _timeoutMs = timeoutMs; - _priority = priority; - } - - public String getName() { return "Build Create Tunnel Message"; } - public void runJob() {} -} - diff --git a/router/java/src/net/i2p/router/message/BuildTestMessageJob.java b/router/java/src/net/i2p/router/message/BuildTestMessageJob.java index f46fb4ae2..40031b193 100644 --- a/router/java/src/net/i2p/router/message/BuildTestMessageJob.java +++ b/router/java/src/net/i2p/router/message/BuildTestMessageJob.java @@ -1,9 +1,9 @@ package net.i2p.router.message; /* * free (adj.): unencumbered; not under the control of others - * Written by jrandom in 2003 and released into the public domain - * with no warranty of any kind, either expressed or implied. - * It probably won't make your computer catch on fire, or eat + * Written by jrandom in 2003 and released into the public domain + * with no warranty of any kind, either expressed or implied. + * It probably won't make your computer catch on fire, or eat * your children, but it might. Use at your own risk. * */ @@ -30,16 +30,17 @@ import net.i2p.router.Router; import net.i2p.util.Clock; import net.i2p.util.Log; import net.i2p.util.RandomSource; +import net.i2p.router.RouterContext; /** * Build a test message that will be sent to the target to make sure they're alive. - * Once that is verified, onSendJob is enqueued. If their reachability isn't + * Once that is verified, onSendJob is enqueued. If their reachability isn't * known (or they're unreachable) within timeoutMs, onSendFailedJob is enqueued. * The test message is sent at the specified priority. * */ public class BuildTestMessageJob extends JobImpl { - private final static Log _log = new Log(BuildTestMessageJob.class); + private Log _log; private RouterInfo _target; private Hash _replyTo; private Job _onSend; @@ -47,7 +48,7 @@ public class BuildTestMessageJob extends JobImpl { private long _timeoutMs; private int _priority; private long _testMessageKey; - + /** * * @param target router being tested @@ -56,144 +57,149 @@ public class BuildTestMessageJob extends JobImpl { * @param timeoutMs how long to wait before timing out * @param priority how high priority to send this test */ - public BuildTestMessageJob(RouterInfo target, Hash replyTo, Job onSendJob, Job onSendFailedJob, long timeoutMs, int priority) { - super(); - _target = target; - _replyTo = replyTo; - _onSend = onSendJob; - _onSendFailed = onSendFailedJob; - _timeoutMs = timeoutMs; - _priority = priority; - _testMessageKey = -1; + public BuildTestMessageJob(RouterContext ctx, RouterInfo target, Hash replyTo, + Job onSendJob, Job onSendFailedJob, long timeoutMs, int priority) { + super(ctx); + _log = ctx.logManager().getLog(BuildTestMessageJob.class); + _target = target; + _replyTo = replyTo; + _onSend = onSendJob; + _onSendFailed = onSendFailedJob; + _timeoutMs = timeoutMs; + _priority = priority; + _testMessageKey = -1; } public String getName() { return "Build Test Message"; } public void runJob() { - // This is a test message - build a garlic with a DeliveryStatusMessage that - // first goes to the peer then back to us. - if (_log.shouldLog(Log.DEBUG)) - _log.debug("Building garlic message to test " + _target.getIdentity().getHash().toBase64()); - GarlicConfig config = buildGarlicCloveConfig(); - // TODO: make the last params on this specify the correct sessionKey and tags used - ReplyJob replyJob = new JobReplyJob(_onSend, config.getRecipient().getIdentity().getPublicKey(), config.getId(), null, new HashSet()); - MessageSelector sel = buildMessageSelector(); - SendGarlicJob job = new SendGarlicJob(config, null, _onSendFailed, replyJob, _onSendFailed, _timeoutMs, _priority, sel); - JobQueue.getInstance().addJob(job); + // This is a test message - build a garlic with a DeliveryStatusMessage that + // first goes to the peer then back to us. + if (_log.shouldLog(Log.DEBUG)) + _log.debug("Building garlic message to test " + _target.getIdentity().getHash().toBase64()); + GarlicConfig config = buildGarlicCloveConfig(); + // TODO: make the last params on this specify the correct sessionKey and tags used + ReplyJob replyJob = new JobReplyJob(_context, _onSend, config.getRecipient().getIdentity().getPublicKey(), config.getId(), null, new HashSet()); + MessageSelector sel = buildMessageSelector(); + SendGarlicJob job = new SendGarlicJob(_context, config, null, _onSendFailed, replyJob, _onSendFailed, _timeoutMs, _priority, sel); + _context.jobQueue().addJob(job); } private MessageSelector buildMessageSelector() { - return new TestMessageSelector(_testMessageKey, _timeoutMs + Clock.getInstance().now()); + return new TestMessageSelector(_testMessageKey, _timeoutMs + _context.clock().now()); } private GarlicConfig buildGarlicCloveConfig() { - _testMessageKey = RandomSource.getInstance().nextInt(Integer.MAX_VALUE); - if (_log.shouldLog(Log.INFO)) - _log.info("Test message key: " + _testMessageKey); - GarlicConfig config = new GarlicConfig(); - - PayloadGarlicConfig ackClove = buildAckClove(); - config.addClove(ackClove); - - DeliveryInstructions instructions = new DeliveryInstructions(); - instructions.setDeliveryMode(DeliveryInstructions.DELIVERY_MODE_ROUTER); - instructions.setDelayRequested(false); - instructions.setDelaySeconds(0); - instructions.setEncrypted(false); - instructions.setEncryptionKey(null); - instructions.setRouter(_target.getIdentity().getHash()); - instructions.setTunnelId(null); - - config.setCertificate(new Certificate(Certificate.CERTIFICATE_TYPE_NULL, null)); - config.setDeliveryInstructions(instructions); - config.setId(RandomSource.getInstance().nextInt(Integer.MAX_VALUE)); - config.setExpiration(_timeoutMs+Clock.getInstance().now()+2*Router.CLOCK_FUDGE_FACTOR); - config.setRecipient(_target); - config.setRequestAck(false); - - return config; + _testMessageKey = _context.random().nextInt(Integer.MAX_VALUE); + if (_log.shouldLog(Log.INFO)) + _log.info("Test message key: " + _testMessageKey); + GarlicConfig config = new GarlicConfig(); + + PayloadGarlicConfig ackClove = buildAckClove(); + config.addClove(ackClove); + + DeliveryInstructions instructions = new DeliveryInstructions(); + instructions.setDeliveryMode(DeliveryInstructions.DELIVERY_MODE_ROUTER); + instructions.setDelayRequested(false); + instructions.setDelaySeconds(0); + instructions.setEncrypted(false); + instructions.setEncryptionKey(null); + instructions.setRouter(_target.getIdentity().getHash()); + instructions.setTunnelId(null); + + config.setCertificate(new Certificate(Certificate.CERTIFICATE_TYPE_NULL, null)); + config.setDeliveryInstructions(instructions); + config.setId(_context.random().nextInt(Integer.MAX_VALUE)); + config.setExpiration(_timeoutMs+_context.clock().now()+2*Router.CLOCK_FUDGE_FACTOR); + config.setRecipient(_target); + config.setRequestAck(false); + + return config; } /** * Build a clove that sends a DeliveryStatusMessage to us */ private PayloadGarlicConfig buildAckClove() { - PayloadGarlicConfig ackClove = new PayloadGarlicConfig(); - - DeliveryInstructions ackInstructions = new DeliveryInstructions(); - ackInstructions.setDeliveryMode(DeliveryInstructions.DELIVERY_MODE_ROUTER); - ackInstructions.setRouter(_replyTo); // yikes! - ackInstructions.setDelayRequested(false); - ackInstructions.setDelaySeconds(0); - ackInstructions.setEncrypted(false); - - DeliveryStatusMessage msg = new DeliveryStatusMessage(); - msg.setArrival(new Date(Clock.getInstance().now())); - msg.setMessageId(_testMessageKey); - if (_log.shouldLog(Log.DEBUG)) - _log.debug("Delivery status message key: " + _testMessageKey + " arrival: " + msg.getArrival()); - - ackClove.setCertificate(new Certificate(Certificate.CERTIFICATE_TYPE_NULL, null)); - ackClove.setDeliveryInstructions(ackInstructions); - ackClove.setExpiration(_timeoutMs+Clock.getInstance().now()); - ackClove.setId(RandomSource.getInstance().nextInt(Integer.MAX_VALUE)); - ackClove.setPayload(msg); - ackClove.setRecipient(_target); - ackClove.setRequestAck(false); - - return ackClove; + PayloadGarlicConfig ackClove = new PayloadGarlicConfig(); + + DeliveryInstructions ackInstructions = new DeliveryInstructions(); + ackInstructions.setDeliveryMode(DeliveryInstructions.DELIVERY_MODE_ROUTER); + ackInstructions.setRouter(_replyTo); // yikes! + ackInstructions.setDelayRequested(false); + ackInstructions.setDelaySeconds(0); + ackInstructions.setEncrypted(false); + + DeliveryStatusMessage msg = new DeliveryStatusMessage(_context); + msg.setArrival(new Date(_context.clock().now())); + msg.setMessageId(_testMessageKey); + if (_log.shouldLog(Log.DEBUG)) + _log.debug("Delivery status message key: " + _testMessageKey + " arrival: " + msg.getArrival()); + + ackClove.setCertificate(new Certificate(Certificate.CERTIFICATE_TYPE_NULL, null)); + ackClove.setDeliveryInstructions(ackInstructions); + ackClove.setExpiration(_timeoutMs+_context.clock().now()); + ackClove.setId(_context.random().nextInt(Integer.MAX_VALUE)); + ackClove.setPayload(msg); + ackClove.setRecipient(_target); + ackClove.setRequestAck(false); + + return ackClove; } /** * Search inbound messages for delivery status messages with our key */ private final static class TestMessageSelector implements MessageSelector { - private long _testMessageKey; - private long _timeout; - public TestMessageSelector(long key, long timeout) { - _testMessageKey = key; - _timeout = timeout; - } - public boolean continueMatching() { return false; } - public long getExpiration() { return _timeout; } - public boolean isMatch(I2NPMessage inMsg) { - if (inMsg.getType() == DeliveryStatusMessage.MESSAGE_TYPE) { - return ((DeliveryStatusMessage)inMsg).getMessageId() == _testMessageKey; - } else { - return false; - } - } + private long _testMessageKey; + private long _timeout; + public TestMessageSelector(long key, long timeout) { + _testMessageKey = key; + _timeout = timeout; + } + public boolean continueMatching() { return false; } + public long getExpiration() { return _timeout; } + public boolean isMatch(I2NPMessage inMsg) { + if (inMsg.getType() == DeliveryStatusMessage.MESSAGE_TYPE) { + return ((DeliveryStatusMessage)inMsg).getMessageId() == _testMessageKey; + } else { + return false; + } + } } /** * On reply, fire off the specified job * */ - private final static class JobReplyJob extends JobImpl implements ReplyJob { - private Job _job; - private PublicKey _target; - private long _msgId; - private Set _sessionTagsDelivered; - private SessionKey _keyDelivered; - public JobReplyJob(Job job, PublicKey target, long msgId, SessionKey keyUsed, Set tagsDelivered) { - _job = job; - _target = target; - _msgId = msgId; - _keyDelivered = keyUsed; - _sessionTagsDelivered = tagsDelivered; - } - public String getName() { return "Reply To Test Message Received"; } - public void runJob() { - if ( (_keyDelivered != null) && (_sessionTagsDelivered != null) && (_sessionTagsDelivered.size() > 0) ) - SessionKeyManager.getInstance().tagsDelivered(_target, _keyDelivered, _sessionTagsDelivered); - - JobQueue.getInstance().addJob(_job); - } - - public void setMessage(I2NPMessage message) { - // ignored, this is just a ping - } - + private static final class JobReplyJob extends JobImpl implements ReplyJob { + private Job _job; + private PublicKey _target; + private long _msgId; + private Set _sessionTagsDelivered; + private SessionKey _keyDelivered; + public JobReplyJob(RouterContext ctx, Job job, PublicKey target, long msgId, SessionKey keyUsed, Set tagsDelivered) { + super(ctx); + _job = job; + _target = target; + _msgId = msgId; + _keyDelivered = keyUsed; + _sessionTagsDelivered = tagsDelivered; + } + public String getName() { return "Reply To Test Message Received"; } + public void runJob() { + if ( (_keyDelivered != null) && + (_sessionTagsDelivered != null) && + (_sessionTagsDelivered.size() > 0) ) + _context.sessionKeyManager().tagsDelivered(_target, _keyDelivered, _sessionTagsDelivered); + + _context.jobQueue().addJob(_job); + } + + public void setMessage(I2NPMessage message) { + // ignored, this is just a ping + } + } } diff --git a/router/java/src/net/i2p/router/message/GarlicMessageBuilder.java b/router/java/src/net/i2p/router/message/GarlicMessageBuilder.java index c537e4434..885b265ee 100644 --- a/router/java/src/net/i2p/router/message/GarlicMessageBuilder.java +++ b/router/java/src/net/i2p/router/message/GarlicMessageBuilder.java @@ -1,9 +1,9 @@ package net.i2p.router.message; /* * free (adj.): unencumbered; not under the control of others - * Written by jrandom in 2003 and released into the public domain - * with no warranty of any kind, either expressed or implied. - * It probably won't make your computer catch on fire, or eat + * Written by jrandom in 2003 and released into the public domain + * with no warranty of any kind, either expressed or implied. + * It probably won't make your computer catch on fire, or eat * your children, but it might. Use at your own risk. * */ @@ -28,179 +28,181 @@ import net.i2p.data.i2np.I2NPMessage; import net.i2p.data.i2np.SourceRouteBlock; import net.i2p.router.MessageHistory; import net.i2p.util.Log; +import net.i2p.router.RouterContext; /** * Build garlic messages based on a GarlicConfig * */ public class GarlicMessageBuilder { - private final static Log _log = new Log(GarlicMessageBuilder.class); - - public static GarlicMessage buildMessage(GarlicConfig config) { - return buildMessage(config, new SessionKey(), new HashSet()); + public static GarlicMessage buildMessage(RouterContext ctx, GarlicConfig config) { + return buildMessage(ctx, config, new SessionKey(), new HashSet()); } - public static GarlicMessage buildMessage(GarlicConfig config, SessionKey wrappedKey, Set wrappedTags) { - if (config == null) - throw new IllegalArgumentException("Null config specified"); - - PublicKey key = config.getRecipientPublicKey(); - if (key == null) { - if (config.getRecipient() == null) { - throw new IllegalArgumentException("Null recipient specified"); - } else if (config.getRecipient().getIdentity() == null) { - throw new IllegalArgumentException("Null recipient.identity specified"); - } else if (config.getRecipient().getIdentity().getPublicKey() == null) { - throw new IllegalArgumentException("Null recipient.identity.publicKey specified"); - } else - key = config.getRecipient().getIdentity().getPublicKey(); - } - GarlicMessage msg = new GarlicMessage(); - - noteWrap(msg, config); - - _log.info("Encrypted with public key " + key + " to expire on " + new Date(config.getExpiration())); - - byte cloveSet[] = buildCloveSet(config); - - SessionKey curKey = SessionKeyManager.getInstance().getCurrentKey(key); - if (curKey == null) - curKey = SessionKeyManager.getInstance().createSession(key); - wrappedKey.setData(curKey.getData()); - - int availTags = SessionKeyManager.getInstance().getAvailableTags(key, curKey); - _log.debug("Available tags for encryption to " + key + ": " + availTags); - - if (availTags < 10) { // arbitrary threshold - for (int i = 0; i < 20; i++) - wrappedTags.add(new SessionTag(true)); - _log.info("Less than 10 tags are available (" + availTags + "), so we're including 20 more"); - } else if (SessionKeyManager.getInstance().getAvailableTimeLeft(key, curKey) < 30*1000) { - // if we have > 10 tags, but they expire in under 30 seconds, we want more - for (int i = 0; i < 20; i++) - wrappedTags.add(new SessionTag(true)); - _log.info("Tags are almost expired, adding 20 new ones"); - } else { - // always tack on at least one more - not necessary. - //wrappedTags.add(new SessionTag(true)); - } - SessionTag curTag = SessionKeyManager.getInstance().consumeNextAvailableTag(key, curKey); - byte encData[] = ElGamalAESEngine.encrypt(cloveSet, key, curKey, wrappedTags, curTag, 1024); - msg.setData(encData); - Date exp = new Date(config.getExpiration()); - msg.setMessageExpiration(exp); - return msg; + public static GarlicMessage buildMessage(RouterContext ctx, GarlicConfig config, SessionKey wrappedKey, Set wrappedTags) { + Log log = ctx.logManager().getLog(GarlicMessageBuilder.class); + if (config == null) + throw new IllegalArgumentException("Null config specified"); + + PublicKey key = config.getRecipientPublicKey(); + if (key == null) { + if (config.getRecipient() == null) { + throw new IllegalArgumentException("Null recipient specified"); + } else if (config.getRecipient().getIdentity() == null) { + throw new IllegalArgumentException("Null recipient.identity specified"); + } else if (config.getRecipient().getIdentity().getPublicKey() == null) { + throw new IllegalArgumentException("Null recipient.identity.publicKey specified"); + } else + key = config.getRecipient().getIdentity().getPublicKey(); + } + GarlicMessage msg = new GarlicMessage(ctx); + + noteWrap(ctx, msg, config); + + log.info("Encrypted with public key " + key + " to expire on " + new Date(config.getExpiration())); + + byte cloveSet[] = buildCloveSet(ctx, config); + + SessionKey curKey = ctx.sessionKeyManager().getCurrentKey(key); + if (curKey == null) + curKey = ctx.sessionKeyManager().createSession(key); + wrappedKey.setData(curKey.getData()); + + int availTags = ctx.sessionKeyManager().getAvailableTags(key, curKey); + log.debug("Available tags for encryption to " + key + ": " + availTags); + + if (availTags < 10) { // arbitrary threshold + for (int i = 0; i < 20; i++) + wrappedTags.add(new SessionTag(true)); + log.info("Less than 10 tags are available (" + availTags + "), so we're including 20 more"); + } else if (ctx.sessionKeyManager().getAvailableTimeLeft(key, curKey) < 30*1000) { + // if we have > 10 tags, but they expire in under 30 seconds, we want more + for (int i = 0; i < 20; i++) + wrappedTags.add(new SessionTag(true)); + log.info("Tags are almost expired, adding 20 new ones"); + } else { + // always tack on at least one more - not necessary. + //wrappedTags.add(new SessionTag(true)); + } + SessionTag curTag = ctx.sessionKeyManager().consumeNextAvailableTag(key, curKey); + byte encData[] = ctx.elGamalAESEngine().encrypt(cloveSet, key, curKey, wrappedTags, curTag, 1024); + msg.setData(encData); + Date exp = new Date(config.getExpiration()); + msg.setMessageExpiration(exp); + return msg; } - private static void noteWrap(GarlicMessage wrapper, GarlicConfig contained) { - for (int i = 0; i < contained.getCloveCount(); i++) { - GarlicConfig config = contained.getClove(i); - if (config instanceof PayloadGarlicConfig) { - I2NPMessage msg = ((PayloadGarlicConfig)config).getPayload(); - String bodyType = msg.getClass().getName(); - MessageHistory.getInstance().wrap(bodyType, msg.getUniqueId(), GarlicMessage.class.getName(), wrapper.getUniqueId()); - } - } + private static void noteWrap(RouterContext ctx, GarlicMessage wrapper, GarlicConfig contained) { + for (int i = 0; i < contained.getCloveCount(); i++) { + GarlicConfig config = contained.getClove(i); + if (config instanceof PayloadGarlicConfig) { + I2NPMessage msg = ((PayloadGarlicConfig)config).getPayload(); + String bodyType = msg.getClass().getName(); + ctx.messageHistory().wrap(bodyType, msg.getUniqueId(), GarlicMessage.class.getName(), wrapper.getUniqueId()); + } + } } /** - * Build an unencrypted set of cloves specified by the config. + * Build an unencrypted set of cloves specified by the config. * */ - private static byte[] buildCloveSet(GarlicConfig config) { - ByteArrayOutputStream baos = new ByteArrayOutputStream(1024); - try { - if (config instanceof PayloadGarlicConfig) { - DataHelper.writeLong(baos, 1, 1); - baos.write(buildClove((PayloadGarlicConfig)config)); - } else { - DataHelper.writeLong(baos, 1, config.getCloveCount()); - for (int i = 0; i < config.getCloveCount(); i++) { - GarlicConfig c = config.getClove(i); - byte clove[] = null; - if (c instanceof PayloadGarlicConfig) { - _log.debug("Subclove IS a payload garlic clove"); - clove = buildClove((PayloadGarlicConfig)c); - } else { - _log.debug("Subclove IS NOT a payload garlic clove"); - clove = buildClove(c); - } - if (clove == null) - throw new DataFormatException("Unable to build clove"); - else - baos.write(clove); - } - } - config.getCertificate().writeBytes(baos); - DataHelper.writeLong(baos, 4, config.getId()); - DataHelper.writeDate(baos, new Date(config.getExpiration())); - } catch (IOException ioe) { - _log.error("Error building the clove set", ioe); - } catch (DataFormatException dfe) { - _log.error("Error building the clove set", dfe); - } - return baos.toByteArray(); + private static byte[] buildCloveSet(RouterContext ctx, GarlicConfig config) { + ByteArrayOutputStream baos = new ByteArrayOutputStream(1024); + Log log = ctx.logManager().getLog(GarlicMessageBuilder.class); + try { + if (config instanceof PayloadGarlicConfig) { + DataHelper.writeLong(baos, 1, 1); + baos.write(buildClove(ctx, (PayloadGarlicConfig)config)); + } else { + DataHelper.writeLong(baos, 1, config.getCloveCount()); + for (int i = 0; i < config.getCloveCount(); i++) { + GarlicConfig c = config.getClove(i); + byte clove[] = null; + if (c instanceof PayloadGarlicConfig) { + log.debug("Subclove IS a payload garlic clove"); + clove = buildClove(ctx, (PayloadGarlicConfig)c); + } else { + log.debug("Subclove IS NOT a payload garlic clove"); + clove = buildClove(ctx, c); + } + if (clove == null) + throw new DataFormatException("Unable to build clove"); + else + baos.write(clove); + } + } + config.getCertificate().writeBytes(baos); + DataHelper.writeLong(baos, 4, config.getId()); + DataHelper.writeDate(baos, new Date(config.getExpiration())); + } catch (IOException ioe) { + log.error("Error building the clove set", ioe); + } catch (DataFormatException dfe) { + log.error("Error building the clove set", dfe); + } + return baos.toByteArray(); } - private static byte[] buildClove(PayloadGarlicConfig config) throws DataFormatException, IOException { - GarlicClove clove = new GarlicClove(); - clove.setData(config.getPayload()); - return buildCommonClove(clove, config); + private static byte[] buildClove(RouterContext ctx, PayloadGarlicConfig config) throws DataFormatException, IOException { + GarlicClove clove = new GarlicClove(ctx); + clove.setData(config.getPayload()); + return buildCommonClove(ctx, clove, config); } - private static byte[] buildClove(GarlicConfig config) throws DataFormatException, IOException { - GarlicClove clove = new GarlicClove(); - GarlicMessage msg = buildMessage(config); - if (msg == null) - throw new DataFormatException("Unable to build message from clove config"); - clove.setData(msg); - return buildCommonClove(clove, config); + private static byte[] buildClove(RouterContext ctx, GarlicConfig config) throws DataFormatException, IOException { + GarlicClove clove = new GarlicClove(ctx); + GarlicMessage msg = buildMessage(ctx, config); + if (msg == null) + throw new DataFormatException("Unable to build message from clove config"); + clove.setData(msg); + return buildCommonClove(ctx, clove, config); } - private static byte[] buildCommonClove(GarlicClove clove, GarlicConfig config) throws DataFormatException, IOException { - clove.setCertificate(config.getCertificate()); - clove.setCloveId(config.getId()); - clove.setExpiration(new Date(config.getExpiration())); - clove.setInstructions(config.getDeliveryInstructions()); - specifySourceRouteBlock(clove, config); - ByteArrayOutputStream baos = new ByteArrayOutputStream(1024); - clove.writeBytes(baos); - return baos.toByteArray(); + private static byte[] buildCommonClove(RouterContext ctx, GarlicClove clove, GarlicConfig config) throws DataFormatException, IOException { + clove.setCertificate(config.getCertificate()); + clove.setCloveId(config.getId()); + clove.setExpiration(new Date(config.getExpiration())); + clove.setInstructions(config.getDeliveryInstructions()); + specifySourceRouteBlock(ctx, clove, config); + ByteArrayOutputStream baos = new ByteArrayOutputStream(1024); + clove.writeBytes(baos); + return baos.toByteArray(); } - private static void specifySourceRouteBlock(GarlicClove clove, GarlicConfig config) throws DataFormatException { - boolean includeBlock = false; - if (config.getRequestAck()) { - clove.setSourceRouteBlockAction(GarlicClove.ACTION_STATUS); - includeBlock = true; - } else if (config.getReplyInstructions() != null) { - clove.setSourceRouteBlockAction(GarlicClove.ACTION_MESSAGE_SPECIFIC); - includeBlock = true; - } else { - clove.setSourceRouteBlockAction(GarlicClove.ACTION_NONE); - } - - if (includeBlock) { - _log.debug("Specifying source route block"); - - SessionKey replySessionKey = KeyGenerator.getInstance().generateSessionKey(); - SessionTag tag = new SessionTag(true); - - // make it so we'll read the session tag correctly and use the right session key - HashSet tags = new HashSet(1); - tags.add(tag); - SessionKeyManager.getInstance().tagsReceived(replySessionKey, tags); - - SourceRouteBlock block = new SourceRouteBlock(); - PublicKey pk = config.getReplyThroughRouter().getIdentity().getPublicKey(); - block.setData(config.getReplyInstructions(), config.getReplyBlockMessageId(), - config.getReplyBlockCertificate(), config.getReplyBlockExpiration(), pk); - block.setRouter(config.getReplyThroughRouter().getIdentity().getHash()); - block.setKey(replySessionKey); - block.setTag(tag); - clove.setSourceRouteBlock(block); - } else { - clove.setSourceRouteBlock(null); - } + private static void specifySourceRouteBlock(RouterContext ctx, GarlicClove clove, GarlicConfig config) throws DataFormatException { + Log log = ctx.logManager().getLog(GarlicMessageBuilder.class); + boolean includeBlock = false; + if (config.getRequestAck()) { + clove.setSourceRouteBlockAction(GarlicClove.ACTION_STATUS); + includeBlock = true; + } else if (config.getReplyInstructions() != null) { + clove.setSourceRouteBlockAction(GarlicClove.ACTION_MESSAGE_SPECIFIC); + includeBlock = true; + } else { + clove.setSourceRouteBlockAction(GarlicClove.ACTION_NONE); + } + + if (includeBlock) { + log.debug("Specifying source route block"); + + SessionKey replySessionKey = ctx.keyGenerator().generateSessionKey(); + SessionTag tag = new SessionTag(true); + + // make it so we'll read the session tag correctly and use the right session key + HashSet tags = new HashSet(1); + tags.add(tag); + ctx.sessionKeyManager().tagsReceived(replySessionKey, tags); + + SourceRouteBlock block = new SourceRouteBlock(); + PublicKey pk = config.getReplyThroughRouter().getIdentity().getPublicKey(); + block.setData(ctx, config.getReplyInstructions(), config.getReplyBlockMessageId(), + config.getReplyBlockCertificate(), config.getReplyBlockExpiration(), pk); + block.setRouter(config.getReplyThroughRouter().getIdentity().getHash()); + block.setKey(replySessionKey); + block.setTag(tag); + clove.setSourceRouteBlock(block); + } else { + clove.setSourceRouteBlock(null); + } } } diff --git a/router/java/src/net/i2p/router/message/GarlicMessageHandler.java b/router/java/src/net/i2p/router/message/GarlicMessageHandler.java index 9817086b0..3274e2315 100644 --- a/router/java/src/net/i2p/router/message/GarlicMessageHandler.java +++ b/router/java/src/net/i2p/router/message/GarlicMessageHandler.java @@ -1,9 +1,9 @@ package net.i2p.router.message; /* * free (adj.): unencumbered; not under the control of others - * Written by jrandom in 2003 and released into the public domain - * with no warranty of any kind, either expressed or implied. - * It probably won't make your computer catch on fire, or eat + * Written by jrandom in 2003 and released into the public domain + * with no warranty of any kind, either expressed or implied. + * It probably won't make your computer catch on fire, or eat * your children, but it might. Use at your own risk. * */ @@ -15,17 +15,23 @@ import net.i2p.data.i2np.I2NPMessage; import net.i2p.data.i2np.SourceRouteBlock; import net.i2p.router.HandlerJobBuilder; import net.i2p.router.Job; +import net.i2p.router.RouterContext; /** * HandlerJobBuilder to build jobs to handle GarlicMessages * */ public class GarlicMessageHandler implements HandlerJobBuilder { + private RouterContext _context; + + public GarlicMessageHandler(RouterContext context) { + _context = context; + } public Job createJob(I2NPMessage receivedMessage, RouterIdentity from, Hash fromHash, SourceRouteBlock replyBlock) { - // ignore the reply block for the moment - HandleGarlicMessageJob job = new HandleGarlicMessageJob((GarlicMessage)receivedMessage, from, fromHash); - return job; + // ignore the reply block for the moment + HandleGarlicMessageJob job = new HandleGarlicMessageJob(_context, (GarlicMessage)receivedMessage, from, fromHash); + return job; } } diff --git a/router/java/src/net/i2p/router/message/GarlicMessageParser.java b/router/java/src/net/i2p/router/message/GarlicMessageParser.java index eb4650edd..019e9f806 100644 --- a/router/java/src/net/i2p/router/message/GarlicMessageParser.java +++ b/router/java/src/net/i2p/router/message/GarlicMessageParser.java @@ -1,9 +1,9 @@ package net.i2p.router.message; /* * free (adj.): unencumbered; not under the control of others - * Written by jrandom in 2003 and released into the public domain - * with no warranty of any kind, either expressed or implied. - * It probably won't make your computer catch on fire, or eat + * Written by jrandom in 2003 and released into the public domain + * with no warranty of any kind, either expressed or implied. + * It probably won't make your computer catch on fire, or eat * your children, but it might. Use at your own risk. * */ @@ -22,71 +22,75 @@ import net.i2p.data.PrivateKey; import net.i2p.data.i2np.GarlicClove; import net.i2p.data.i2np.GarlicMessage; import net.i2p.util.Log; +import net.i2p.router.RouterContext; /** - * Read a GarlicMessage, decrypt it, and return the resulting CloveSet + * Read a GarlicMessage, decrypt it, and return the resulting CloveSet * */ public class GarlicMessageParser { - private final static Log _log = new Log(GarlicMessageParser.class); - private static GarlicMessageParser _instance = new GarlicMessageParser(); - public static GarlicMessageParser getInstance() { return _instance; } - private GarlicMessageParser() {} + private Log _log; + private RouterContext _context; + + public GarlicMessageParser(RouterContext context) { + _context = context; + _log = _context.logManager().getLog(GarlicMessageParser.class); + } public CloveSet getGarlicCloves(GarlicMessage message, PrivateKey encryptionKey) { - byte encData[] = message.getData(); - byte decrData[] = null; - try { - _log.debug("Decrypting with private key " + encryptionKey); - decrData = ElGamalAESEngine.decrypt(encData, encryptionKey); - } catch (DataFormatException dfe) { - _log.warn("Error decrypting", dfe); - } - if (decrData == null) { - _log.debug("Decryption of garlic message failed"); - return null; - } else { - return readCloveSet(decrData); - } + byte encData[] = message.getData(); + byte decrData[] = null; + try { + _log.debug("Decrypting with private key " + encryptionKey); + decrData = _context.elGamalAESEngine().decrypt(encData, encryptionKey); + } catch (DataFormatException dfe) { + _log.warn("Error decrypting", dfe); + } + if (decrData == null) { + _log.debug("Decryption of garlic message failed"); + return null; + } else { + return readCloveSet(decrData); + } } private CloveSet readCloveSet(byte data[]) { - Set cloves = new HashSet(); - ByteArrayInputStream bais = new ByteArrayInputStream(data); - try { - CloveSet set = new CloveSet(); - - int numCloves = (int)DataHelper.readLong(bais, 1); - _log.debug("# cloves to read: " + numCloves); - for (int i = 0; i < numCloves; i++) { - _log.debug("Reading clove " + i); - try { - GarlicClove clove = new GarlicClove(); - clove.readBytes(bais); - set.addClove(clove); - } catch (DataFormatException dfe) { - _log.warn("Unable to read clove " + i, dfe); - } catch (IOException ioe) { - _log.warn("Unable to read clove " + i, ioe); - } - _log.debug("After reading clove " + i); - } - Certificate cert = new Certificate(); - cert.readBytes(bais); - long msgId = DataHelper.readLong(bais, 4); - Date expiration = DataHelper.readDate(bais); - - set.setCertificate(cert); - set.setMessageId(msgId); - set.setExpiration(expiration.getTime()); - - return set; - } catch (IOException ioe) { - _log.error("Error reading clove set", ioe); - return null; - } catch (DataFormatException dfe) { - _log.error("Error reading clove set", dfe); - return null; - } + Set cloves = new HashSet(); + ByteArrayInputStream bais = new ByteArrayInputStream(data); + try { + CloveSet set = new CloveSet(); + + int numCloves = (int)DataHelper.readLong(bais, 1); + _log.debug("# cloves to read: " + numCloves); + for (int i = 0; i < numCloves; i++) { + _log.debug("Reading clove " + i); + try { + GarlicClove clove = new GarlicClove(_context); + clove.readBytes(bais); + set.addClove(clove); + } catch (DataFormatException dfe) { + _log.warn("Unable to read clove " + i, dfe); + } catch (IOException ioe) { + _log.warn("Unable to read clove " + i, ioe); + } + _log.debug("After reading clove " + i); + } + Certificate cert = new Certificate(); + cert.readBytes(bais); + long msgId = DataHelper.readLong(bais, 4); + Date expiration = DataHelper.readDate(bais); + + set.setCertificate(cert); + set.setMessageId(msgId); + set.setExpiration(expiration.getTime()); + + return set; + } catch (IOException ioe) { + _log.error("Error reading clove set", ioe); + return null; + } catch (DataFormatException dfe) { + _log.error("Error reading clove set", dfe); + return null; + } } } diff --git a/router/java/src/net/i2p/router/message/HandleGarlicMessageJob.java b/router/java/src/net/i2p/router/message/HandleGarlicMessageJob.java index e8a6277b8..718df9455 100644 --- a/router/java/src/net/i2p/router/message/HandleGarlicMessageJob.java +++ b/router/java/src/net/i2p/router/message/HandleGarlicMessageJob.java @@ -28,6 +28,7 @@ import net.i2p.router.Router; import net.i2p.stat.StatManager; import net.i2p.util.Clock; import net.i2p.util.Log; +import net.i2p.router.RouterContext; /** * Unencrypt a garlic message and handle each of the cloves - locally destined @@ -37,40 +38,42 @@ import net.i2p.util.Log; * */ public class HandleGarlicMessageJob extends JobImpl { - private final static Log _log = new Log(HandleGarlicMessageJob.class); + private Log _log; private GarlicMessage _message; private RouterIdentity _from; private Hash _fromHash; - private static Map _cloves; // map of clove Id --> Expiration of cloves we've already seen - - static { - StatManager.getInstance().createRateStat("crypto.garlic.decryptFail", "How often garlic messages are undecryptable", "Encryption", new long[] { 5*60*1000, 60*60*1000, 24*60*60*1000 }); - } - + private Map _cloves; // map of clove Id --> Expiration of cloves we've already seen + private MessageHandler _handler; + private GarlicMessageParser _parser; + private final static int FORWARD_PRIORITY = 50; - public HandleGarlicMessageJob(GarlicMessage msg, RouterIdentity from, Hash fromHash) { - super(); + public HandleGarlicMessageJob(RouterContext context, GarlicMessage msg, RouterIdentity from, Hash fromHash) { + super(context); + _log = context.logManager().getLog(HandleGarlicMessageJob.class); + _context.statManager().createRateStat("crypto.garlic.decryptFail", "How often garlic messages are undecryptable", "Encryption", new long[] { 5*60*1000, 60*60*1000, 24*60*60*1000 }); if (_log.shouldLog(Log.DEBUG)) _log.debug("New handle garlicMessageJob called w/ message from [" + from + "]", new Exception("Debug")); _message = msg; _from = from; _fromHash = fromHash; _cloves = new HashMap(); + _handler = new MessageHandler(context); + _parser = new GarlicMessageParser(context); } public String getName() { return "Handle Inbound Garlic Message"; } public void runJob() { - CloveSet set = GarlicMessageParser.getInstance().getGarlicCloves(_message, KeyManager.getInstance().getPrivateKey()); + CloveSet set = _parser.getGarlicCloves(_message, _context.keyManager().getPrivateKey()); if (set == null) { - Set keys = KeyManager.getInstance().getAllKeys(); + Set keys = _context.keyManager().getAllKeys(); if (_log.shouldLog(Log.DEBUG)) _log.debug("Decryption with the router's key failed, now try with the " + keys.size() + " leaseSet keys"); // our router key failed, which means that it was either encrypted wrong // or it was encrypted to a LeaseSet's PublicKey for (Iterator iter = keys.iterator(); iter.hasNext();) { LeaseSetKeys lskeys = (LeaseSetKeys)iter.next(); - set = GarlicMessageParser.getInstance().getGarlicCloves(_message, lskeys.getDecryptionKey()); + set = _parser.getGarlicCloves(_message, lskeys.getDecryptionKey()); if (set != null) { if (_log.shouldLog(Log.DEBUG)) _log.debug("Decrypted garlic message with lease set key for destination " @@ -96,14 +99,14 @@ public class HandleGarlicMessageJob extends JobImpl { _log.error("CloveMessageParser failed to decrypt the message [" + _message.getUniqueId() + "] to us when received from [" + _fromHash + "] / [" + _from + "]", new Exception("Decrypt garlic failed")); - StatManager.getInstance().addRateData("crypto.garlic.decryptFail", 1, 0); - MessageHistory.getInstance().messageProcessingError(_message.getUniqueId(), + _context.statManager().addRateData("crypto.garlic.decryptFail", 1, 0); + _context.messageHistory().messageProcessingError(_message.getUniqueId(), _message.getClass().getName(), "Garlic could not be decrypted"); } } - private static boolean isKnown(long cloveId) { + private boolean isKnown(long cloveId) { boolean known = false; synchronized (_cloves) { known = _cloves.containsKey(new Long(cloveId)); @@ -113,11 +116,11 @@ public class HandleGarlicMessageJob extends JobImpl { return known; } - private static void cleanupCloves() { + private void cleanupCloves() { // this should be in its own thread perhaps? and maybe _cloves should be // synced to disk? List toRemove = new ArrayList(32); - long now = Clock.getInstance().now(); + long now = _context.clock().now(); synchronized (_cloves) { for (Iterator iter = _cloves.keySet().iterator(); iter.hasNext();) { Long id = (Long)iter.next(); @@ -131,7 +134,7 @@ public class HandleGarlicMessageJob extends JobImpl { } } - private static boolean isValid(GarlicClove clove) { + private boolean isValid(GarlicClove clove) { if (isKnown(clove.getCloveId())) { _log.error("Duplicate garlic clove received - replay attack in progress? [cloveId = " + clove.getCloveId() + " expiration = " + clove.getExpiration()); @@ -140,7 +143,7 @@ public class HandleGarlicMessageJob extends JobImpl { _log.debug("Clove " + clove.getCloveId() + " expiring on " + clove.getExpiration() + " is not known"); } - long now = Clock.getInstance().now(); + long now = _context.clock().now(); if (clove.getExpiration().getTime() < now) { if (clove.getExpiration().getTime() < now + Router.CLOCK_FUDGE_FACTOR) { _log.warn("Expired garlic received, but within our fudge factor [" @@ -149,7 +152,7 @@ public class HandleGarlicMessageJob extends JobImpl { if (_log.shouldLog(Log.DEBUG)) _log.error("Expired garlic clove received - replay attack in progress? [cloveId = " + clove.getCloveId() + " expiration = " + clove.getExpiration() - + " now = " + (new Date(Clock.getInstance().now()))); + + " now = " + (new Date(_context.clock().now()))); return false; } } @@ -168,15 +171,15 @@ public class HandleGarlicMessageJob extends JobImpl { } boolean requestAck = (clove.getSourceRouteBlockAction() == GarlicClove.ACTION_STATUS); long sendExpiration = clove.getExpiration().getTime(); - MessageHandler.getInstance().handleMessage(clove.getInstructions(), clove.getData(), - requestAck, clove.getSourceRouteBlock(), - clove.getCloveId(), _from, _fromHash, - sendExpiration, FORWARD_PRIORITY); + _handler.handleMessage(clove.getInstructions(), clove.getData(), + requestAck, clove.getSourceRouteBlock(), + clove.getCloveId(), _from, _fromHash, + sendExpiration, FORWARD_PRIORITY); } public void dropped() { - MessageHistory.getInstance().messageProcessingError(_message.getUniqueId(), - _message.getClass().getName(), - "Dropped due to overload"); + _context.messageHistory().messageProcessingError(_message.getUniqueId(), + _message.getClass().getName(), + "Dropped due to overload"); } } diff --git a/router/java/src/net/i2p/router/message/HandleSourceRouteReplyMessageJob.java b/router/java/src/net/i2p/router/message/HandleSourceRouteReplyMessageJob.java index d3fcbd6dc..6bb2439c7 100644 --- a/router/java/src/net/i2p/router/message/HandleSourceRouteReplyMessageJob.java +++ b/router/java/src/net/i2p/router/message/HandleSourceRouteReplyMessageJob.java @@ -26,6 +26,7 @@ import net.i2p.router.MessageHistory; import net.i2p.router.Router; import net.i2p.util.Clock; import net.i2p.util.Log; +import net.i2p.router.RouterContext; /** * Handle a source route reply - decrypt the instructions and forward the message @@ -33,111 +34,126 @@ import net.i2p.util.Log; * */ public class HandleSourceRouteReplyMessageJob extends JobImpl { - private final static Log _log = new Log(HandleSourceRouteReplyMessageJob.class); + private Log _log; private SourceRouteReplyMessage _message; private RouterIdentity _from; private Hash _fromHash; - private static Map _seenMessages; // Long msgId --> Date seen + private Map _seenMessages; // Long msgId --> Date seen + private MessageHandler _handler; public final static int PRIORITY = 150; - - public HandleSourceRouteReplyMessageJob(SourceRouteReplyMessage msg, RouterIdentity from, Hash fromHash) { - super(); - _message = msg; - _from = from; - _fromHash = fromHash; - _seenMessages = new HashMap(); + + public HandleSourceRouteReplyMessageJob(RouterContext context, SourceRouteReplyMessage msg, RouterIdentity from, Hash fromHash) { + super(context); + _log = _context.logManager().getLog(HandleSourceRouteReplyMessageJob.class); + _message = msg; + _from = from; + _fromHash = fromHash; + _seenMessages = new HashMap(); + _handler = new MessageHandler(context); } public String getName() { return "Handle Source Route Reply Message"; } public void runJob() { - try { - long before = Clock.getInstance().now(); - _message.decryptHeader(KeyManager.getInstance().getPrivateKey()); - long after = Clock.getInstance().now(); - if ( (after-before) > 1000) { - _log.warn("Took more than a second (" + (after-before) + ") to decrypt the sourceRoute header"); - } else { - _log.debug("Took LESS than a second (" + (after-before) + ") to decrypt the sourceRoute header"); - } - } catch (DataFormatException dfe) { - if (_log.shouldLog(Log.ERROR)) - _log.error("Error decrypting the source route message's header (message " + _message.getUniqueId() + ")", dfe); - if (_log.shouldLog(Log.WARN)) - _log.warn("Message header could not be decrypted: " + _message, getAddedBy()); - MessageHistory.getInstance().messageProcessingError(_message.getUniqueId(), _message.getClass().getName(), "Source route message header could not be decrypted"); - return; - } - - if (!isValid()) { - if (_log.shouldLog(Log.ERROR)) - _log.error("Error validating source route message, dropping: " + _message); - return; - } - - DeliveryInstructions instructions = _message.getDecryptedInstructions(); - - long now = Clock.getInstance().now(); - long expiration = _message.getDecryptedExpiration(); - // if its expiring really soon, jack the expiration 30 seconds - if (expiration < now+10*1000) - expiration = now + 60*1000; - - boolean requestAck = false; - MessageHandler.getInstance().handleMessage(instructions, _message.getMessage(), requestAck, null, - _message.getDecryptedMessageId(), _from, _fromHash, expiration, PRIORITY); + try { + long before = _context.clock().now(); + _message.decryptHeader(_context.keyManager().getPrivateKey()); + long after = _context.clock().now(); + if ( (after-before) > 1000) { + if (_log.shouldLog(Log.WARN)) + _log.warn("Took more than a second (" + (after-before) + + ") to decrypt the sourceRoute header"); + } else { + if (_log.shouldLog(Log.DEBUG)) + _log.debug("Took LESS than a second (" + (after-before) + + ") to decrypt the sourceRoute header"); + } + } catch (DataFormatException dfe) { + if (_log.shouldLog(Log.ERROR)) + _log.error("Error decrypting the source route message's header (message " + + _message.getUniqueId() + ")", dfe); + if (_log.shouldLog(Log.WARN)) + _log.warn("Message header could not be decrypted: " + _message, getAddedBy()); + _context.messageHistory().messageProcessingError(_message.getUniqueId(), + _message.getClass().getName(), + "Source route message header could not be decrypted"); + return; + } + + if (!isValid()) { + if (_log.shouldLog(Log.ERROR)) + _log.error("Error validating source route message, dropping: " + _message); + return; + } + + DeliveryInstructions instructions = _message.getDecryptedInstructions(); + + long now = _context.clock().now(); + long expiration = _message.getDecryptedExpiration(); + // if its expiring really soon, jack the expiration 30 seconds + if (expiration < now+10*1000) + expiration = now + 60*1000; + + boolean requestAck = false; + _handler.handleMessage(instructions, _message.getMessage(), requestAck, null, + _message.getDecryptedMessageId(), _from, _fromHash, expiration, PRIORITY); } private boolean isValid() { - long now = Clock.getInstance().now(); - if (_message.getDecryptedExpiration() < now) { - if (_message.getDecryptedExpiration() < now + Router.CLOCK_FUDGE_FACTOR) { - _log.info("Expired message received, but within our fudge factor"); - } else { - _log.error("Source route reply message expired. Replay attack? msgId = " + _message.getDecryptedMessageId() + " expiration = " + new Date(_message.getDecryptedExpiration())); - return false; - } - } - if (!isValidMessageId(_message.getDecryptedMessageId(), _message.getDecryptedExpiration())) { - _log.error("Source route reply message already received! Replay attack? msgId = " + _message.getDecryptedMessageId() + " expiration = " + new Date(_message.getDecryptedExpiration())); - return false; - } - return true; + long now = _context.clock().now(); + if (_message.getDecryptedExpiration() < now) { + if (_message.getDecryptedExpiration() < now + Router.CLOCK_FUDGE_FACTOR) { + _log.info("Expired message received, but within our fudge factor"); + } else { + _log.error("Source route reply message expired. Replay attack? msgId = " + + _message.getDecryptedMessageId() + " expiration = " + + new Date(_message.getDecryptedExpiration())); + return false; + } + } + if (!isValidMessageId(_message.getDecryptedMessageId(), _message.getDecryptedExpiration())) { + _log.error("Source route reply message already received! Replay attack? msgId = " + + _message.getDecryptedMessageId() + " expiration = " + + new Date(_message.getDecryptedExpiration())); + return false; + } + return true; } - private static boolean isValidMessageId(long msgId, long expiration) { - synchronized (_seenMessages) { - if (_seenMessages.containsKey(new Long(msgId))) - return false; - - - _seenMessages.put(new Long(msgId), new Date(expiration)); - } - // essentially random - if ((msgId % 10) == 0) { - cleanupMessages(); - } - return true; + private boolean isValidMessageId(long msgId, long expiration) { + synchronized (_seenMessages) { + if (_seenMessages.containsKey(new Long(msgId))) + return false; + + _seenMessages.put(new Long(msgId), new Date(expiration)); + } + // essentially random + if ((msgId % 10) == 0) { + cleanupMessages(); + } + return true; } - private static void cleanupMessages() { - // this should be in its own thread perhaps, or job? and maybe _seenMessages should be - // synced to disk? - List toRemove = new ArrayList(32); - long now = Clock.getInstance().now()-Router.CLOCK_FUDGE_FACTOR; - synchronized (_seenMessages) { - for (Iterator iter = _seenMessages.keySet().iterator(); iter.hasNext();) { - Long id = (Long)iter.next(); - Date exp = (Date)_seenMessages.get(id); - if (now > exp.getTime()) - toRemove.add(id); - } - for (int i = 0; i < toRemove.size(); i++) - _seenMessages.remove(toRemove.get(i)); - } + private void cleanupMessages() { + // this should be in its own thread perhaps, or job? and maybe _seenMessages should be + // synced to disk? + List toRemove = new ArrayList(32); + long now = _context.clock().now()-Router.CLOCK_FUDGE_FACTOR; + synchronized (_seenMessages) { + for (Iterator iter = _seenMessages.keySet().iterator(); iter.hasNext();) { + Long id = (Long)iter.next(); + Date exp = (Date)_seenMessages.get(id); + if (now > exp.getTime()) + toRemove.add(id); + } + for (int i = 0; i < toRemove.size(); i++) + _seenMessages.remove(toRemove.get(i)); + } } public void dropped() { - MessageHistory.getInstance().messageProcessingError(_message.getUniqueId(), _message.getClass().getName(), "Dropped due to overload"); + _context.messageHistory().messageProcessingError(_message.getUniqueId(), + _message.getClass().getName(), + "Dropped due to overload"); } } diff --git a/router/java/src/net/i2p/router/message/HandleTunnelMessageJob.java b/router/java/src/net/i2p/router/message/HandleTunnelMessageJob.java index 465cc6241..aea4d1ba4 100644 --- a/router/java/src/net/i2p/router/message/HandleTunnelMessageJob.java +++ b/router/java/src/net/i2p/router/message/HandleTunnelMessageJob.java @@ -45,26 +45,26 @@ import net.i2p.router.TunnelManagerFacade; import net.i2p.stat.StatManager; import net.i2p.util.Clock; import net.i2p.util.Log; +import net.i2p.router.RouterContext; public class HandleTunnelMessageJob extends JobImpl { - private final static Log _log = new Log(HandleTunnelMessageJob.class); + private Log _log; private TunnelMessage _message; private RouterIdentity _from; private Hash _fromHash; - private final static I2NPMessageHandler _handler = new I2NPMessageHandler(); + private I2NPMessageHandler _handler; private final static long FORWARD_TIMEOUT = 60*1000; private final static int FORWARD_PRIORITY = 400; - static { - StatManager.getInstance().createRateStat("tunnel.unknownTunnelTimeLeft", "How much time is left on tunnel messages we receive that are for unknown tunnels?", "Tunnels", new long[] { 5*60*1000l, 60*60*1000l, 24*60*60*1000l }); - StatManager.getInstance().createRateStat("tunnel.gatewayMessageSize", "How large are the messages we are forwarding on as an inbound gateway?", "Tunnels", new long[] { 60*1000l, 60*60*1000l, 24*60*60*1000l }); - StatManager.getInstance().createRateStat("tunnel.relayMessageSize", "How large are the messages we are forwarding on as a participant in a tunnel?", "Tunnels", new long[] { 60*1000l, 60*60*1000l, 24*60*60*1000l }); - StatManager.getInstance().createRateStat("tunnel.endpointMessageSize", "How large are the messages we are forwarding in as an outbound endpoint?", "Tunnels", new long[] { 60*1000l, 60*60*1000l, 24*60*60*1000l }); - } - - public HandleTunnelMessageJob(TunnelMessage msg, RouterIdentity from, Hash fromHash) { - super(); + public HandleTunnelMessageJob(RouterContext ctx, TunnelMessage msg, RouterIdentity from, Hash fromHash) { + super(ctx); + _log = ctx.logManager().getLog(HandleTunnelMessageJob.class); + _handler = new I2NPMessageHandler(ctx); + ctx.statManager().createRateStat("tunnel.unknownTunnelTimeLeft", "How much time is left on tunnel messages we receive that are for unknown tunnels?", "Tunnels", new long[] { 5*60*1000l, 60*60*1000l, 24*60*60*1000l }); + ctx.statManager().createRateStat("tunnel.gatewayMessageSize", "How large are the messages we are forwarding on as an inbound gateway?", "Tunnels", new long[] { 60*1000l, 60*60*1000l, 24*60*60*1000l }); + ctx.statManager().createRateStat("tunnel.relayMessageSize", "How large are the messages we are forwarding on as a participant in a tunnel?", "Tunnels", new long[] { 60*1000l, 60*60*1000l, 24*60*60*1000l }); + ctx.statManager().createRateStat("tunnel.endpointMessageSize", "How large are the messages we are forwarding in as an outbound endpoint?", "Tunnels", new long[] { 60*1000l, 60*60*1000l, 24*60*60*1000l }); _message = msg; _from = from; _fromHash = fromHash; @@ -73,18 +73,18 @@ public class HandleTunnelMessageJob extends JobImpl { public String getName() { return "Handle Inbound Tunnel Message"; } public void runJob() { TunnelId id = _message.getTunnelId(); - TunnelInfo info = TunnelManagerFacade.getInstance().getTunnelInfo(id); + TunnelInfo info = _context.tunnelManager().getTunnelInfo(id); if (info == null) { Hash from = _fromHash; if (_from != null) from = _from.getHash(); - MessageHistory.getInstance().droppedTunnelMessage(id, from); + _context.messageHistory().droppedTunnelMessage(id, from); if (_log.shouldLog(Log.ERROR)) _log.error("Received a message for an unknown tunnel [" + id.getTunnelId() + "], dropping it: " + _message, getAddedBy()); - long timeRemaining = _message.getMessageExpiration().getTime() - Clock.getInstance().now(); - StatManager.getInstance().addRateData("tunnel.unknownTunnelTimeLeft", timeRemaining, 0); + long timeRemaining = _message.getMessageExpiration().getTime() - _context.clock().now(); + _context.statManager().addRateData("tunnel.unknownTunnelTimeLeft", timeRemaining, 0); return; } @@ -92,8 +92,8 @@ public class HandleTunnelMessageJob extends JobImpl { if (info == null) { if (_log.shouldLog(Log.ERROR)) _log.error("We are not part of a known tunnel?? wtf! drop.", getAddedBy()); - long timeRemaining = _message.getMessageExpiration().getTime() - Clock.getInstance().now(); - StatManager.getInstance().addRateData("tunnel.unknownTunnelTimeLeft", timeRemaining, 0); + long timeRemaining = _message.getMessageExpiration().getTime() - _context.clock().now(); + _context.statManager().addRateData("tunnel.unknownTunnelTimeLeft", timeRemaining, 0); return; } else { if (_log.shouldLog(Log.DEBUG)) @@ -108,7 +108,7 @@ public class HandleTunnelMessageJob extends JobImpl { _log.debug("We are the gateway to tunnel " + id.getTunnelId()); byte data[] = _message.getData(); I2NPMessage msg = getBody(data); - JobQueue.getInstance().addJob(new HandleGatewayMessageJob(msg, info, data.length)); + _context.jobQueue().addJob(new HandleGatewayMessageJob(msg, info, data.length)); return; } else { if (_log.shouldLog(Log.DEBUG)) @@ -116,23 +116,23 @@ public class HandleTunnelMessageJob extends JobImpl { if (_log.shouldLog(Log.DEBUG)) _log.debug("Process locally"); if (info.getDestination() != null) { - if (!ClientManagerFacade.getInstance().isLocal(info.getDestination())) { + if (!_context.clientManager().isLocal(info.getDestination())) { if (_log.shouldLog(Log.WARN)) _log.warn("Received a message on a tunnel allocated to a client that has disconnected - dropping it!"); if (_log.shouldLog(Log.DEBUG)) _log.debug("Dropping message for disconnected client: " + _message); - MessageHistory.getInstance().droppedOtherMessage(_message); - MessageHistory.getInstance().messageProcessingError(_message.getUniqueId(), - _message.getClass().getName(), - "Disconnected client"); + _context.messageHistory().droppedOtherMessage(_message); + _context.messageHistory().messageProcessingError(_message.getUniqueId(), + _message.getClass().getName(), + "Disconnected client"); return; } } I2NPMessage body = getBody(_message.getData()); if (body != null) { - JobQueue.getInstance().addJob(new HandleLocallyJob(body, info)); + _context.jobQueue().addJob(new HandleLocallyJob(body, info)); return; } else { if (_log.shouldLog(Log.ERROR)) @@ -152,7 +152,7 @@ public class HandleTunnelMessageJob extends JobImpl { } else { // participant TunnelVerificationStructure struct = _message.getVerificationStructure(); - boolean ok = struct.verifySignature(info.getVerificationKey().getKey()); + boolean ok = struct.verifySignature(_context, info.getVerificationKey().getKey()); if (!ok) { if (_log.shouldLog(Log.WARN)) _log.warn("Failed tunnel verification! Spoofing / tagging attack? " + _message, getAddedBy()); @@ -164,16 +164,18 @@ public class HandleTunnelMessageJob extends JobImpl { + " received where we're not the gateway and there are remaining hops, so forward it on to " + info.getNextHop().toBase64() + " via SendTunnelMessageJob"); - StatManager.getInstance().addRateData("tunnel.relayMessageSize", - _message.getData().length, 0); + _context.statManager().addRateData("tunnel.relayMessageSize", + _message.getData().length, 0); - JobQueue.getInstance().addJob(new SendMessageDirectJob(_message, info.getNextHop(), - Clock.getInstance().now() + FORWARD_TIMEOUT, FORWARD_PRIORITY)); + _context.jobQueue().addJob(new SendMessageDirectJob(_context, _message, + info.getNextHop(), + _context.clock().now() + FORWARD_TIMEOUT, + FORWARD_PRIORITY)); return; } else { if (_log.shouldLog(Log.DEBUG)) _log.debug("No more hops, unwrap and follow the instructions"); - JobQueue.getInstance().addJob(new HandleEndpointJob(info)); + _context.jobQueue().addJob(new HandleEndpointJob(info)); return; } } @@ -210,20 +212,20 @@ public class HandleTunnelMessageJob extends JobImpl { _log.error("Unable to recover the body from the tunnel", getAddedBy()); return; } else { - JobQueue.getInstance().addJob(new ProcessBodyLocallyJob(body, instructions, ourPlace)); + _context.jobQueue().addJob(new ProcessBodyLocallyJob(body, instructions, ourPlace)); } } } private void honorInstructions(DeliveryInstructions instructions, I2NPMessage body) { - StatManager.getInstance().addRateData("tunnel.endpointMessageSize", _message.getData().length, 0); + _context.statManager().addRateData("tunnel.endpointMessageSize", _message.getData().length, 0); switch (instructions.getDeliveryMode()) { case DeliveryInstructions.DELIVERY_MODE_LOCAL: sendToLocal(body); break; case DeliveryInstructions.DELIVERY_MODE_ROUTER: - if (Router.getInstance().getRouterInfo().getIdentity().getHash().equals(instructions.getRouter())) { + if (_context.routerHash().equals(instructions.getRouter())) { if (_log.shouldLog(Log.DEBUG)) _log.debug("Delivery instructions point at a router, but we're that router, so send to local"); sendToLocal(body); @@ -244,7 +246,7 @@ public class HandleTunnelMessageJob extends JobImpl { private void sendToDest(Hash dest, I2NPMessage body) { if (body instanceof DataMessage) { - boolean isLocal = ClientManagerFacade.getInstance().isLocal(dest); + boolean isLocal = _context.clientManager().isLocal(dest); if (isLocal) { deliverMessage(null, dest, (DataMessage)body); return; @@ -265,17 +267,17 @@ public class HandleTunnelMessageJob extends JobImpl { if (_log.shouldLog(Log.DEBUG)) _log.debug("Sending on to requested tunnel " + id.getTunnelId() + " on router " + router.toBase64()); - TunnelMessage msg = new TunnelMessage(); + TunnelMessage msg = new TunnelMessage(_context); msg.setTunnelId(id); try { ByteArrayOutputStream baos = new ByteArrayOutputStream(1024); body.writeBytes(baos); msg.setData(baos.toByteArray()); - long exp = Clock.getInstance().now() + FORWARD_TIMEOUT; - JobQueue.getInstance().addJob(new SendMessageDirectJob(msg, router, exp, FORWARD_PRIORITY)); + long exp = _context.clock().now() + FORWARD_TIMEOUT; + _context.jobQueue().addJob(new SendMessageDirectJob(_context, msg, router, exp, FORWARD_PRIORITY)); String bodyType = body.getClass().getName(); - MessageHistory.getInstance().wrap(bodyType, body.getUniqueId(), TunnelMessage.class.getName(), msg.getUniqueId()); + _context.messageHistory().wrap(bodyType, body.getUniqueId(), TunnelMessage.class.getName(), msg.getUniqueId()); } catch (DataFormatException dfe) { if (_log.shouldLog(Log.ERROR)) _log.error("Error writing out the message to forward to the tunnel", dfe); @@ -289,8 +291,8 @@ public class HandleTunnelMessageJob extends JobImpl { // TODO: we may want to send it via a tunnel later on, but for now, direct will do. if (_log.shouldLog(Log.DEBUG)) _log.debug("Sending on to requested router " + router.toBase64()); - long exp = Clock.getInstance().now() + FORWARD_TIMEOUT; - JobQueue.getInstance().addJob(new SendMessageDirectJob(body, router, exp, FORWARD_PRIORITY)); + long exp = _context.clock().now() + FORWARD_TIMEOUT; + _context.jobQueue().addJob(new SendMessageDirectJob(_context, body, router, exp, FORWARD_PRIORITY)); } private void sendToLocal(I2NPMessage body) { @@ -298,18 +300,18 @@ public class HandleTunnelMessageJob extends JobImpl { msg.setMessage(body); msg.setFromRouter(_from); msg.setFromRouterHash(_fromHash); - InNetMessagePool.getInstance().add(msg); + _context.inNetMessagePool().add(msg); } private void deliverMessage(Destination dest, Hash destHash, DataMessage msg) { - boolean valid = MessageValidator.getInstance().validateMessage(msg.getUniqueId(), msg.getMessageExpiration().getTime()); + boolean valid = _context.messageValidator().validateMessage(msg.getUniqueId(), msg.getMessageExpiration().getTime()); if (!valid) { if (_log.shouldLog(Log.WARN)) _log.warn("Duplicate data message received [" + msg.getUniqueId() + " expiring on " + msg.getMessageExpiration() + "]"); - MessageHistory.getInstance().droppedOtherMessage(msg); - MessageHistory.getInstance().messageProcessingError(msg.getUniqueId(), msg.getClass().getName(), - "Duplicate payload"); + _context.messageHistory().droppedOtherMessage(msg); + _context.messageHistory().messageProcessingError(msg.getUniqueId(), msg.getClass().getName(), + "Duplicate payload"); return; } @@ -327,9 +329,9 @@ public class HandleTunnelMessageJob extends JobImpl { cmsg.setPayload(payload); cmsg.setReceptionInfo(info); - MessageHistory.getInstance().receivePayloadMessage(msg.getUniqueId()); + _context.messageHistory().receivePayloadMessage(msg.getUniqueId()); // if the destination isn't local, the ClientMessagePool forwards it off as an OutboundClientMessageJob - ClientMessagePool.getInstance().add(cmsg); + _context.clientMessagePool().add(cmsg); } private I2NPMessage getBody(byte body[]) { @@ -347,9 +349,9 @@ public class HandleTunnelMessageJob extends JobImpl { private I2NPMessage decryptBody(byte encryptedMessage[], SessionKey key) { byte iv[] = new byte[16]; - Hash h = SHA256Generator.getInstance().calculateHash(key.getData()); + Hash h = _context.sha().calculateHash(key.getData()); System.arraycopy(h.getData(), 0, iv, 0, iv.length); - byte decrypted[] = AESEngine.getInstance().safeDecrypt(encryptedMessage, key, iv); + byte decrypted[] = _context.AESEngine().safeDecrypt(encryptedMessage, key, iv); if (decrypted == null) { if (_log.shouldLog(Log.ERROR)) _log.error("Error decrypting the message", getAddedBy()); @@ -361,9 +363,9 @@ public class HandleTunnelMessageJob extends JobImpl { private DeliveryInstructions getInstructions(byte encryptedInstructions[], SessionKey key) { try { byte iv[] = new byte[16]; - Hash h = SHA256Generator.getInstance().calculateHash(key.getData()); + Hash h = _context.sha().calculateHash(key.getData()); System.arraycopy(h.getData(), 0, iv, 0, iv.length); - byte decrypted[] = AESEngine.getInstance().safeDecrypt(encryptedInstructions, key, iv); + byte decrypted[] = _context.AESEngine().safeDecrypt(encryptedInstructions, key, iv); if (decrypted == null) { if (_log.shouldLog(Log.ERROR)) _log.error("Error decrypting the instructions", getAddedBy()); @@ -383,7 +385,7 @@ public class HandleTunnelMessageJob extends JobImpl { } private TunnelInfo getUs(TunnelInfo info) { - Hash us = Router.getInstance().getRouterInfo().getIdentity().getHash(); + Hash us = _context.routerHash(); while (info != null) { if (us.equals(info.getThisHop())) return info; @@ -406,7 +408,7 @@ public class HandleTunnelMessageJob extends JobImpl { return false; } - if (!vstruct.verifySignature(info.getVerificationKey().getKey())) { + if (!vstruct.verifySignature(_context, info.getVerificationKey().getKey())) { if (_log.shouldLog(Log.ERROR)) _log.error("Received a tunnel message with an invalid signature!"); // shitlist the sender? @@ -414,7 +416,7 @@ public class HandleTunnelMessageJob extends JobImpl { } // now validate the message - Hash msgHash = SHA256Generator.getInstance().calculateHash(_message.getData()); + Hash msgHash = _context.sha().calculateHash(_message.getData()); if (msgHash.equals(vstruct.getMessageHash())) { // hash matches. good. return true; @@ -427,8 +429,8 @@ public class HandleTunnelMessageJob extends JobImpl { } public void dropped() { - MessageHistory.getInstance().messageProcessingError(_message.getUniqueId(), _message.getClass().getName(), - "Dropped due to overload"); + _context.messageHistory().messageProcessingError(_message.getUniqueId(), _message.getClass().getName(), + "Dropped due to overload"); } //// @@ -442,17 +444,19 @@ public class HandleTunnelMessageJob extends JobImpl { private TunnelInfo _info; public HandleGatewayMessageJob(I2NPMessage body, TunnelInfo tunnel, int length) { + super(HandleTunnelMessageJob.this._context); _body = body; _length = length; _info = tunnel; } public void runJob() { + RouterContext ctx = HandleTunnelMessageJob.this._context; if (_body != null) { - StatManager.getInstance().addRateData("tunnel.gatewayMessageSize", _length, 0); + ctx.statManager().addRateData("tunnel.gatewayMessageSize", _length, 0); if (_log.shouldLog(Log.INFO)) _log.info("Message for tunnel " + _info.getTunnelId() + " received at the gateway (us), and since its > 0 length, forward the " + _body.getClass().getName() + " message on to " + _info.getNextHop().toBase64() + " via SendTunnelMessageJob"); - JobQueue.getInstance().addJob(new SendTunnelMessageJob(_body, _info.getTunnelId(), null, null, null, null, FORWARD_TIMEOUT, FORWARD_PRIORITY)); + ctx.jobQueue().addJob(new SendTunnelMessageJob(ctx, _body, _info.getTunnelId(), null, null, null, null, FORWARD_TIMEOUT, FORWARD_PRIORITY)); } else { if (_log.shouldLog(Log.WARN)) _log.warn("Body of the message for the tunnel could not be parsed"); @@ -469,6 +473,7 @@ public class HandleTunnelMessageJob extends JobImpl { private TunnelInfo _info; public HandleLocallyJob(I2NPMessage body, TunnelInfo tunnel) { + super(HandleTunnelMessageJob.this._context); _body = body; _info = tunnel; } @@ -491,7 +496,7 @@ public class HandleTunnelMessageJob extends JobImpl { msg.setFromRouter(_from); msg.setFromRouterHash(_fromHash); msg.setMessage(_body); - InNetMessagePool.getInstance().add(msg); + HandleLocallyJob.this._context.inNetMessagePool().add(msg); if (_log.shouldLog(Log.DEBUG)) _log.debug("Message added to Inbound network pool for local processing: " + _message); } @@ -503,6 +508,7 @@ public class HandleTunnelMessageJob extends JobImpl { private class HandleEndpointJob extends JobImpl { private TunnelInfo _info; public HandleEndpointJob(TunnelInfo info) { + super(HandleTunnelMessageJob.this._context); _info = info; } public void runJob() { @@ -517,6 +523,7 @@ public class HandleTunnelMessageJob extends JobImpl { private TunnelInfo _ourPlace; private DeliveryInstructions _instructions; public ProcessBodyLocallyJob(I2NPMessage body, DeliveryInstructions instructions, TunnelInfo ourPlace) { + super(HandleTunnelMessageJob.this._context); _body = body; _instructions = instructions; _ourPlace = ourPlace; diff --git a/router/java/src/net/i2p/router/message/MessageHandler.java b/router/java/src/net/i2p/router/message/MessageHandler.java index 85ec2f043..ea3b8ae8f 100644 --- a/router/java/src/net/i2p/router/message/MessageHandler.java +++ b/router/java/src/net/i2p/router/message/MessageHandler.java @@ -32,6 +32,7 @@ import net.i2p.router.MessageValidator; import net.i2p.router.Router; import net.i2p.util.Clock; import net.i2p.util.Log; +import net.i2p.router.RouterContext; /** * Implement the inbound message processing logic to forward based on delivery instructions and @@ -39,141 +40,165 @@ import net.i2p.util.Log; * */ class MessageHandler { - private final static Log _log = new Log(MessageHandler.class); - private static MessageHandler _instance = new MessageHandler(); - public static MessageHandler getInstance() { return _instance; } + private Log _log; + private RouterContext _context; + + public MessageHandler(RouterContext ctx) { + _context = ctx; + _log = _context.logManager().getLog(MessageHandler.class); + } - public void handleMessage(DeliveryInstructions instructions, I2NPMessage message, boolean requestAck, SourceRouteBlock replyBlock, - long replyId, RouterIdentity from, Hash fromHash, long expiration, int priority) { - switch (instructions.getDeliveryMode()) { - case DeliveryInstructions.DELIVERY_MODE_LOCAL: - _log.debug("Instructions for LOCAL DELIVERY"); - if (message.getType() == DataMessage.MESSAGE_TYPE) { - handleLocalDestination(instructions, message, fromHash); - } else { - handleLocalRouter(message, from, fromHash, replyBlock, requestAck); - } - break; - case DeliveryInstructions.DELIVERY_MODE_ROUTER: - _log.debug("Instructions for ROUTER DELIVERY to " + instructions.getRouter().toBase64()); - if (Router.getInstance().getRouterInfo().getIdentity().getHash().equals(instructions.getRouter())) { - handleLocalRouter(message, from, fromHash, replyBlock, requestAck); - } else { - handleRemoteRouter(message, instructions, expiration, priority); - } - break; - case DeliveryInstructions.DELIVERY_MODE_DESTINATION: - _log.debug("Instructions for DESTINATION DELIVERY to " + instructions.getDestination().toBase64()); - if (ClientManagerFacade.getInstance().isLocal(instructions.getDestination())) { - handleLocalDestination(instructions, message, fromHash); - } else { - _log.error("Instructions requests forwarding on to a non-local destination. Not yet supported"); - } - break; - case DeliveryInstructions.DELIVERY_MODE_TUNNEL: - _log.debug("Instructions for TUNNEL DELIVERY to" + instructions.getTunnelId().getTunnelId() + " on " + instructions.getRouter().toBase64()); - handleTunnel(instructions, expiration, message, priority); - break; - default: - _log.error("Message has instructions that are not yet implemented: mode = " + instructions.getDeliveryMode()); - } - - if (requestAck) { - _log.debug("SEND ACK REQUESTED"); - sendAck(replyBlock, replyId); - } else { - _log.debug("No ack requested"); - } + public void handleMessage(DeliveryInstructions instructions, I2NPMessage message, + boolean requestAck, SourceRouteBlock replyBlock, + long replyId, RouterIdentity from, Hash fromHash, + long expiration, int priority) { + switch (instructions.getDeliveryMode()) { + case DeliveryInstructions.DELIVERY_MODE_LOCAL: + _log.debug("Instructions for LOCAL DELIVERY"); + if (message.getType() == DataMessage.MESSAGE_TYPE) { + handleLocalDestination(instructions, message, fromHash); + } else { + handleLocalRouter(message, from, fromHash, replyBlock, requestAck); + } + break; + case DeliveryInstructions.DELIVERY_MODE_ROUTER: + if (_log.shouldLog(Log.DEBUG)) + _log.debug("Instructions for ROUTER DELIVERY to " + + instructions.getRouter().toBase64()); + if (_context.routerHash().equals(instructions.getRouter())) { + handleLocalRouter(message, from, fromHash, replyBlock, requestAck); + } else { + handleRemoteRouter(message, instructions, expiration, priority); + } + break; + case DeliveryInstructions.DELIVERY_MODE_DESTINATION: + if (_log.shouldLog(Log.DEBUG)) + _log.debug("Instructions for DESTINATION DELIVERY to " + + instructions.getDestination().toBase64()); + if (_context.clientManager().isLocal(instructions.getDestination())) { + handleLocalDestination(instructions, message, fromHash); + } else { + _log.error("Instructions requests forwarding on to a non-local destination. Not yet supported"); + } + break; + case DeliveryInstructions.DELIVERY_MODE_TUNNEL: + if (_log.shouldLog(Log.DEBUG)) + _log.debug("Instructions for TUNNEL DELIVERY to" + + instructions.getTunnelId().getTunnelId() + " on " + + instructions.getRouter().toBase64()); + handleTunnel(instructions, expiration, message, priority); + break; + default: + _log.error("Message has instructions that are not yet implemented: mode = " + instructions.getDeliveryMode()); + } + + if (requestAck) { + _log.debug("SEND ACK REQUESTED"); + sendAck(replyBlock, replyId); + } else { + _log.debug("No ack requested"); + } } private void sendAck(SourceRouteBlock replyBlock, long replyId) { - _log.info("Queueing up ack job via reply block " + replyBlock); - Job ackJob = new SendMessageAckJob(replyBlock, replyId); - JobQueue.getInstance().addJob(ackJob); + _log.info("Queueing up ack job via reply block " + replyBlock); + Job ackJob = new SendMessageAckJob(_context, replyBlock, replyId); + _context.jobQueue().addJob(ackJob); } private void handleLocalRouter(I2NPMessage message, RouterIdentity from, Hash fromHash, SourceRouteBlock replyBlock, boolean ackUsed) { - _log.info("Handle " + message.getClass().getName() + " to a local router - toss it on the inbound network pool"); - InNetMessage msg = new InNetMessage(); - msg.setFromRouter(from); - msg.setFromRouterHash(fromHash); - msg.setMessage(message); - if (!ackUsed) - msg.setReplyBlock(replyBlock); - InNetMessagePool.getInstance().add(msg); + _log.info("Handle " + message.getClass().getName() + " to a local router - toss it on the inbound network pool"); + InNetMessage msg = new InNetMessage(); + msg.setFromRouter(from); + msg.setFromRouterHash(fromHash); + msg.setMessage(message); + if (!ackUsed) + msg.setReplyBlock(replyBlock); + _context.inNetMessagePool().add(msg); } - private void handleRemoteRouter(I2NPMessage message, DeliveryInstructions instructions, long expiration, int priority) { + private void handleRemoteRouter(I2NPMessage message, DeliveryInstructions instructions, + long expiration, int priority) { + boolean valid = _context.messageValidator().validateMessage(message.getUniqueId(), message.getMessageExpiration().getTime()); + if (!valid) { + if (_log.shouldLog(Log.WARN)) + _log.warn("Duplicate / expired message received to remote router [" + message.getUniqueId() + " expiring on " + message.getMessageExpiration() + "]"); + _context.messageHistory().droppedOtherMessage(message); + _context.messageHistory().messageProcessingError(message.getUniqueId(), message.getClass().getName(), "Duplicate/expired to remote router"); + return; + } - boolean valid = MessageValidator.getInstance().validateMessage(message.getUniqueId(), message.getMessageExpiration().getTime()); - if (!valid) { - if (_log.shouldLog(Log.WARN)) - _log.warn("Duplicate / expired message received to remote router [" + message.getUniqueId() + " expiring on " + message.getMessageExpiration() + "]"); - MessageHistory.getInstance().droppedOtherMessage(message); - MessageHistory.getInstance().messageProcessingError(message.getUniqueId(), message.getClass().getName(), "Duplicate/expired to remote router"); - return; - } - - _log.info("Handle " + message.getClass().getName() + " to a remote router " + instructions.getRouter().toBase64() + " - fire a SendMessageDirectJob"); - SendMessageDirectJob j = new SendMessageDirectJob(message, instructions.getRouter(), expiration, priority); - JobQueue.getInstance().addJob(j); + if (_log.shouldLog(Log.INFO)) + _log.info("Handle " + message.getClass().getName() + " to a remote router " + + instructions.getRouter().toBase64() + " - fire a SendMessageDirectJob"); + SendMessageDirectJob j = new SendMessageDirectJob(_context, message, instructions.getRouter(), expiration, priority); + _context.jobQueue().addJob(j); } private void handleTunnel(DeliveryInstructions instructions, long expiration, I2NPMessage message, int priority) { - Hash to = instructions.getRouter(); - long timeoutMs = expiration - Clock.getInstance().now(); - TunnelId tunnelId = instructions.getTunnelId(); - - if (!Router.getInstance().getRouterInfo().getIdentity().getHash().equals(to)) { - // don't validate locally targetted tunnel messages, since then we'd have to tweak - // around message validation thats already in place for SendMessageDirectJob - boolean valid = MessageValidator.getInstance().validateMessage(message.getUniqueId(), message.getMessageExpiration().getTime()); - if (!valid) { - if (_log.shouldLog(Log.WARN)) - _log.warn("Duplicate / expired tunnel message received [" + message.getUniqueId() + " expiring on " + message.getMessageExpiration() + "]"); - MessageHistory.getInstance().droppedOtherMessage(message); - MessageHistory.getInstance().messageProcessingError(message.getUniqueId(), message.getClass().getName(), "Duplicate/expired"); - return; - } - } + Hash to = instructions.getRouter(); + long timeoutMs = expiration - _context.clock().now(); + TunnelId tunnelId = instructions.getTunnelId(); + + if (!_context.routerHash().equals(to)) { + // don't validate locally targetted tunnel messages, since then we'd have to tweak + // around message validation thats already in place for SendMessageDirectJob + boolean valid = _context.messageValidator().validateMessage(message.getUniqueId(), message.getMessageExpiration().getTime()); + if (!valid) { + if (_log.shouldLog(Log.WARN)) + _log.warn("Duplicate / expired tunnel message received [" + message.getUniqueId() + " expiring on " + message.getMessageExpiration() + "]"); + _context.messageHistory().droppedOtherMessage(message); + _context.messageHistory().messageProcessingError(message.getUniqueId(), + message.getClass().getName(), + "Duplicate/expired"); + return; + } + } - _log.info("Handle " + message.getClass().getName() + " to send to remote tunnel " + tunnelId.getTunnelId() + " on router " + to.toBase64()); - TunnelMessage msg = new TunnelMessage(); - ByteArrayOutputStream baos = new ByteArrayOutputStream(1024); - try { - message.writeBytes(baos); - msg.setData(baos.toByteArray()); - msg.setTunnelId(tunnelId); - _log.debug("Placing message of type " + message.getClass().getName() + " into the new tunnel message bound for " + tunnelId.getTunnelId() + " on " + to.toBase64()); - JobQueue.getInstance().addJob(new SendMessageDirectJob(msg, to, expiration, priority)); - - String bodyType = message.getClass().getName(); - MessageHistory.getInstance().wrap(bodyType, message.getUniqueId(), TunnelMessage.class.getName(), msg.getUniqueId()); - } catch (Exception e) { - _log.warn("Unable to forward on according to the instructions to the remote tunnel", e); - } + if (_log.shouldLog(Log.INFO)) + _log.info("Handle " + message.getClass().getName() + " to send to remote tunnel " + + tunnelId.getTunnelId() + " on router " + to.toBase64()); + TunnelMessage msg = new TunnelMessage(_context); + ByteArrayOutputStream baos = new ByteArrayOutputStream(1024); + try { + message.writeBytes(baos); + msg.setData(baos.toByteArray()); + msg.setTunnelId(tunnelId); + if (_log.shouldLog(Log.DEBUG)) + _log.debug("Placing message of type " + message.getClass().getName() + + " into the new tunnel message bound for " + tunnelId.getTunnelId() + + " on " + to.toBase64()); + _context.jobQueue().addJob(new SendMessageDirectJob(_context, msg, to, expiration, priority)); + + String bodyType = message.getClass().getName(); + _context.messageHistory().wrap(bodyType, message.getUniqueId(), TunnelMessage.class.getName(), msg.getUniqueId()); + } catch (Exception e) { + _log.warn("Unable to forward on according to the instructions to the remote tunnel", e); + } } private void handleLocalDestination(DeliveryInstructions instructions, I2NPMessage message, Hash fromHash) { - boolean valid = MessageValidator.getInstance().validateMessage(message.getUniqueId(), message.getMessageExpiration().getTime()); - if (!valid) { - if (_log.shouldLog(Log.WARN)) - _log.warn("Duplicate / expired client message received [" + message.getUniqueId() + " expiring on " + message.getMessageExpiration() + "]"); - MessageHistory.getInstance().droppedOtherMessage(message); - MessageHistory.getInstance().messageProcessingError(message.getUniqueId(), message.getClass().getName(), "Duplicate/expired client message"); - return; - } - - _log.debug("Handle " + message.getClass().getName() + " to a local destination - build a ClientMessage and pool it"); - ClientMessage msg = new ClientMessage(); - msg.setDestinationHash(instructions.getDestination()); - Payload payload = new Payload(); - payload.setEncryptedData(((DataMessage)message).getData()); - msg.setPayload(payload); - MessageReceptionInfo info = new MessageReceptionInfo(); - info.setFromPeer(fromHash); - msg.setReceptionInfo(info); - MessageHistory.getInstance().receivePayloadMessage(message.getUniqueId()); - ClientMessagePool.getInstance().add(msg); + boolean valid = _context.messageValidator().validateMessage(message.getUniqueId(), message.getMessageExpiration().getTime()); + if (!valid) { + if (_log.shouldLog(Log.WARN)) + _log.warn("Duplicate / expired client message received [" + message.getUniqueId() + " expiring on " + message.getMessageExpiration() + "]"); + _context.messageHistory().droppedOtherMessage(message); + _context.messageHistory().messageProcessingError(message.getUniqueId(), message.getClass().getName(), "Duplicate/expired client message"); + return; + } + + if (_log.shouldLog(Log.DEBUG)) + _log.debug("Handle " + message.getClass().getName() + + " to a local destination - build a ClientMessage and pool it"); + ClientMessage msg = new ClientMessage(); + msg.setDestinationHash(instructions.getDestination()); + Payload payload = new Payload(); + payload.setEncryptedData(((DataMessage)message).getData()); + msg.setPayload(payload); + MessageReceptionInfo info = new MessageReceptionInfo(); + info.setFromPeer(fromHash); + msg.setReceptionInfo(info); + _context.messageHistory().receivePayloadMessage(message.getUniqueId()); + _context.clientMessagePool().add(msg); } } diff --git a/router/java/src/net/i2p/router/message/OutboundClientMessageJob.java b/router/java/src/net/i2p/router/message/OutboundClientMessageJob.java index 7f4ee85f7..4828abd06 100644 --- a/router/java/src/net/i2p/router/message/OutboundClientMessageJob.java +++ b/router/java/src/net/i2p/router/message/OutboundClientMessageJob.java @@ -39,30 +39,31 @@ import net.i2p.stat.StatManager; import net.i2p.util.Clock; import net.i2p.util.Log; import net.i2p.util.RandomSource; +import net.i2p.router.RouterContext; /** * Send a client message, taking into consideration the fact that there may be * multiple inbound tunnels that the target provides. This job sends it to one * of them and if it doesnt get a confirmation within 15 seconds (SEND_TIMEOUT_MS), - * it tries the next, continuing on until a confirmation is received, the full + * it tries the next, continuing on until a confirmation is received, the full * timeout has been reached (60 seconds, or the ms defined in the client's or - * router's "clientMessageTimeout" option). + * router's "clientMessageTimeout" option). * - * After sending through all of the leases without success, if there's still - * time left it fails the leaseSet itself, does a new search for that leaseSet, - * and continues sending down any newly found leases. + * After sending through all of the leases without success, if there's still + * time left it fails the leaseSet itself, does a new search for that leaseSet, + * and continues sending down any newly found leases. * */ public class OutboundClientMessageJob extends JobImpl { - private final static Log _log = new Log(OutboundClientMessageJob.class); + private Log _log; private OutboundClientMessageStatus _status; private NextStepJob _nextStep; private LookupLeaseSetFailedJob _lookupLeaseSetFailed; private long _overallExpiration; - - /** + + /** * final timeout (in milliseconds) that the outbound message will fail in. - * This can be overridden in the router.config or the client's session config + * This can be overridden in the router.config or the client's session config * (the client's session config takes precedence) */ public final static String OVERALL_TIMEOUT_MS_PARAM = "clientMessageTimeout"; @@ -76,286 +77,285 @@ public class OutboundClientMessageJob extends JobImpl { /** dont search for the lease more than 3 times */ private final static int MAX_LEASE_LOOKUPS = 3; - static { - StatManager.getInstance().createFrequencyStat("client.sendMessageFailFrequency", "How often does a client fail to send a message?", "Client Messages", new long[] { 60*1000l, 60*60*1000l, 24*60*60*1000l }); - StatManager.getInstance().createRateStat("client.sendMessageSize", "How large are messages sent by the client?", "Client Messages", new long[] { 60*1000l, 60*60*1000l, 24*60*60*1000l }); - StatManager.getInstance().createRateStat("client.sendAttemptAverage", "How many different tunnels do we have to try when sending a client message?", "Client Messages", new long[] { 60*1000l, 60*60*1000l, 24*60*60*1000l }); - } - /** * Send the sucker */ - public OutboundClientMessageJob(ClientMessage msg) { - super(); - - long timeoutMs = OVERALL_TIMEOUT_MS_DEFAULT; - - String param = msg.getSenderConfig().getOptions().getProperty(OVERALL_TIMEOUT_MS_PARAM); - if (param == null) - param = Router.getInstance().getConfigSetting(OVERALL_TIMEOUT_MS_PARAM); - if (param != null) { - try { - timeoutMs = Long.parseLong(param); - } catch (NumberFormatException nfe) { - if (_log.shouldLog(Log.WARN)) - _log.warn("Invalid client message timeout specified [" + param + "], defaulting to " + OVERALL_TIMEOUT_MS_DEFAULT, nfe); - timeoutMs = OVERALL_TIMEOUT_MS_DEFAULT; - } - } - - _overallExpiration = timeoutMs + Clock.getInstance().now(); - _status = new OutboundClientMessageStatus(msg); - _nextStep = new NextStepJob(); - _lookupLeaseSetFailed = new LookupLeaseSetFailedJob(); + public OutboundClientMessageJob(RouterContext ctx, ClientMessage msg) { + super(ctx); + _log = ctx.logManager().getLog(OutboundClientMessageJob.class); + + ctx.statManager().createFrequencyStat("client.sendMessageFailFrequency", "How often does a client fail to send a message?", "Client Messages", new long[] { 60*1000l, 60*60*1000l, 24*60*60*1000l }); + ctx.statManager().createRateStat("client.sendMessageSize", "How large are messages sent by the client?", "Client Messages", new long[] { 60*1000l, 60*60*1000l, 24*60*60*1000l }); + ctx.statManager().createRateStat("client.sendAttemptAverage", "How many different tunnels do we have to try when sending a client message?", "Client Messages", new long[] { 60*1000l, 60*60*1000l, 24*60*60*1000l }); + + long timeoutMs = OVERALL_TIMEOUT_MS_DEFAULT; + + String param = msg.getSenderConfig().getOptions().getProperty(OVERALL_TIMEOUT_MS_PARAM); + if (param == null) + param = ctx.router().getConfigSetting(OVERALL_TIMEOUT_MS_PARAM); + if (param != null) { + try { + timeoutMs = Long.parseLong(param); + } catch (NumberFormatException nfe) { + if (_log.shouldLog(Log.WARN)) + _log.warn("Invalid client message timeout specified [" + param + "], defaulting to " + OVERALL_TIMEOUT_MS_DEFAULT, nfe); + timeoutMs = OVERALL_TIMEOUT_MS_DEFAULT; + } + } + + _overallExpiration = timeoutMs + _context.clock().now(); + _status = new OutboundClientMessageStatus(msg); + _nextStep = new NextStepJob(); + _lookupLeaseSetFailed = new LookupLeaseSetFailedJob(); } public String getName() { return "Outbound client message"; } public void runJob() { - if (_log.shouldLog(Log.DEBUG)) - _log.debug("Send outbound client message job beginning"); - buildClove(); - if (_log.shouldLog(Log.DEBUG)) - _log.debug("Clove built"); - Hash to = _status.getTo().calculateHash(); - long timeoutMs = _overallExpiration - Clock.getInstance().now(); - if (_log.shouldLog(Log.DEBUG)) - _log.debug("Send outbound client message - sending off leaseSet lookup job"); - _status.incrementLookups(); - NetworkDatabaseFacade.getInstance().lookupLeaseSet(to, _nextStep, _lookupLeaseSetFailed, timeoutMs); + if (_log.shouldLog(Log.DEBUG)) + _log.debug("Send outbound client message job beginning"); + buildClove(); + if (_log.shouldLog(Log.DEBUG)) + _log.debug("Clove built"); + Hash to = _status.getTo().calculateHash(); + long timeoutMs = _overallExpiration - _context.clock().now(); + if (_log.shouldLog(Log.DEBUG)) + _log.debug("Send outbound client message - sending off leaseSet lookup job"); + _status.incrementLookups(); + _context.netDb().lookupLeaseSet(to, _nextStep, _lookupLeaseSetFailed, timeoutMs); } /** * Continue on sending through the next tunnel */ private void sendNext() { - if (_log.shouldLog(Log.DEBUG)) { - _log.debug("sendNext() called with " + _status.getNumSent() + " already sent"); - } - - if (_status.getSuccess()) { - if (_log.shouldLog(Log.DEBUG)) - _log.debug("sendNext() - already successful!"); - return; - } - if (_status.getFailure()) { - if (_log.shouldLog(Log.DEBUG)) - _log.debug("sendNext() - already failed!"); - return; - } - - long now = Clock.getInstance().now(); - if (now >= _overallExpiration) { - if (_log.shouldLog(Log.WARN)) - _log.warn("sendNext() - Expired"); - dieFatal(); - return; - } - - Lease nextLease = getNextLease(); - - if (_log.shouldLog(Log.DEBUG)) - _log.debug("Send outbound client message - next lease found for [" + _status.getTo().calculateHash().toBase64() + "] - " + nextLease); - - if (nextLease == null) { - if (_log.shouldLog(Log.WARN)) - _log.warn("No more leases, and we still haven't heard back from the peer, refetching the leaseSet to try again"); - _status.setLeaseSet(null); - long remainingMs = _overallExpiration - Clock.getInstance().now(); - if (_status.getNumLookups() < MAX_LEASE_LOOKUPS) { - _status.incrementLookups(); - Hash to = _status.getMessage().getDestination().calculateHash(); - _status.clearAlreadySent(); - NetworkDatabaseFacade.getInstance().fail(to); - NetworkDatabaseFacade.getInstance().lookupLeaseSet(to, _nextStep, _lookupLeaseSetFailed, remainingMs); - return; - } else { - if (_log.shouldLog(Log.WARN)) - _log.warn("sendNext() - max # lease lookups exceeded! " + _status.getNumLookups()); - dieFatal(); - return; - } - } - - JobQueue.getInstance().addJob(new SendJob(nextLease)); + if (_log.shouldLog(Log.DEBUG)) { + _log.debug("sendNext() called with " + _status.getNumSent() + " already sent"); + } + + if (_status.getSuccess()) { + if (_log.shouldLog(Log.DEBUG)) + _log.debug("sendNext() - already successful!"); + return; + } + if (_status.getFailure()) { + if (_log.shouldLog(Log.DEBUG)) + _log.debug("sendNext() - already failed!"); + return; + } + + long now = _context.clock().now(); + if (now >= _overallExpiration) { + if (_log.shouldLog(Log.WARN)) + _log.warn("sendNext() - Expired"); + dieFatal(); + return; + } + + Lease nextLease = getNextLease(); + + if (_log.shouldLog(Log.DEBUG)) + _log.debug("Send outbound client message - next lease found for [" + _status.getTo().calculateHash().toBase64() + "] - " + nextLease); + + if (nextLease == null) { + if (_log.shouldLog(Log.WARN)) + _log.warn("No more leases, and we still haven't heard back from the peer, refetching the leaseSet to try again"); + _status.setLeaseSet(null); + long remainingMs = _overallExpiration - _context.clock().now(); + if (_status.getNumLookups() < MAX_LEASE_LOOKUPS) { + _status.incrementLookups(); + Hash to = _status.getMessage().getDestination().calculateHash(); + _status.clearAlreadySent(); + _context.netDb().fail(to); + _context.netDb().lookupLeaseSet(to, _nextStep, _lookupLeaseSetFailed, remainingMs); + return; + } else { + if (_log.shouldLog(Log.WARN)) + _log.warn("sendNext() - max # lease lookups exceeded! " + _status.getNumLookups()); + dieFatal(); + return; + } + } + + _context.jobQueue().addJob(new SendJob(nextLease)); } - /** - * fetch the next lease that we should try sending through, or null if there - * are no remaining leases available (or there weren't any in the first place...). - * This implements the logic to determine which lease should be next by picking a + /** + * fetch the next lease that we should try sending through, or null if there + * are no remaining leases available (or there weren't any in the first place...). + * This implements the logic to determine which lease should be next by picking a * random one that has been failing the least (e.g. if there are 3 leases in the leaseSet * and one has failed, the other two are randomly chosen as the 'next') * */ - private Lease getNextLease() { - LeaseSet ls = _status.getLeaseSet(); - if (ls == null) { - ls = NetworkDatabaseFacade.getInstance().lookupLeaseSetLocally(_status.getTo().calculateHash()); - if (ls == null) { - if (_log.shouldLog(Log.WARN)) - _log.warn("Lookup locally didn't find the leaseSet"); - return null; - } else { - if (_log.shouldLog(Log.DEBUG)) - _log.debug("Lookup locally DID find the leaseSet"); - } - _status.setLeaseSet(ls); - } - long now = Clock.getInstance().now(); - - // get the possible leases - List leases = new ArrayList(4); - for (int i = 0; i < ls.getLeaseCount(); i++) { - Lease lease = ls.getLease(i); - if (lease.isExpired(Router.CLOCK_FUDGE_FACTOR)) { - if (_log.shouldLog(Log.WARN)) - _log.warn("getNextLease() - expired lease! - " + lease); - continue; - } - - if (!_status.alreadySent(lease.getRouterIdentity().getHash(), lease.getTunnelId())) { - leases.add(lease); - } else { - if (_log.shouldLog(Log.DEBUG)) - _log.debug("getNextLease() - skipping lease we've already sent it down - " + lease); - } - } - - // randomize the ordering (so leases with equal # of failures per next sort are randomly ordered) - Collections.shuffle(leases); - - // ordered by lease number of failures - TreeMap orderedLeases = new TreeMap(); - for (Iterator iter = leases.iterator(); iter.hasNext(); ) { - Lease lease = (Lease)iter.next(); - long id = lease.getNumFailure(); - while (orderedLeases.containsKey(new Long(id))) - id++; - orderedLeases.put(new Long(id), lease); - if (_log.shouldLog(Log.DEBUG)) - _log.debug("getNextLease() - ranking lease we havent sent it down as " + id); - } - - if (orderedLeases.size() <= 0) { - if (_log.shouldLog(Log.WARN)) - _log.warn("No leases in the ordered set found! all = " + leases.size()); - return null; - } else { - return (Lease)orderedLeases.get(orderedLeases.firstKey()); - } + private Lease getNextLease() { + LeaseSet ls = _status.getLeaseSet(); + if (ls == null) { + ls = _context.netDb().lookupLeaseSetLocally(_status.getTo().calculateHash()); + if (ls == null) { + if (_log.shouldLog(Log.WARN)) + _log.warn("Lookup locally didn't find the leaseSet"); + return null; + } else { + if (_log.shouldLog(Log.DEBUG)) + _log.debug("Lookup locally DID find the leaseSet"); + } + _status.setLeaseSet(ls); + } + long now = _context.clock().now(); + + // get the possible leases + List leases = new ArrayList(4); + for (int i = 0; i < ls.getLeaseCount(); i++) { + Lease lease = ls.getLease(i); + if (lease.isExpired(Router.CLOCK_FUDGE_FACTOR)) { + if (_log.shouldLog(Log.WARN)) + _log.warn("getNextLease() - expired lease! - " + lease); + continue; + } + + if (!_status.alreadySent(lease.getRouterIdentity().getHash(), lease.getTunnelId())) { + leases.add(lease); + } else { + if (_log.shouldLog(Log.DEBUG)) + _log.debug("getNextLease() - skipping lease we've already sent it down - " + lease); + } + } + + // randomize the ordering (so leases with equal # of failures per next sort are randomly ordered) + Collections.shuffle(leases); + + // ordered by lease number of failures + TreeMap orderedLeases = new TreeMap(); + for (Iterator iter = leases.iterator(); iter.hasNext(); ) { + Lease lease = (Lease)iter.next(); + long id = lease.getNumFailure(); + while (orderedLeases.containsKey(new Long(id))) + id++; + orderedLeases.put(new Long(id), lease); + if (_log.shouldLog(Log.DEBUG)) + _log.debug("getNextLease() - ranking lease we havent sent it down as " + id); + } + + if (orderedLeases.size() <= 0) { + if (_log.shouldLog(Log.WARN)) + _log.warn("No leases in the ordered set found! all = " + leases.size()); + return null; + } else { + return (Lease)orderedLeases.get(orderedLeases.firstKey()); + } } /** * Send the message to the specified tunnel by creating a new garlic message containing - * the (already created) payload clove as well as a new delivery status message. This garlic + * the (already created) payload clove as well as a new delivery status message. This garlic * message is sent out one of our tunnels, destined for the lease (tunnel+router) specified, and the delivery - * status message is targetting one of our free inbound tunnels as well. We use a new + * status message is targetting one of our free inbound tunnels as well. We use a new * reply selector to keep an eye out for that delivery status message's token * */ private void send(Lease lease) { - // send it as a garlic with a DeliveryStatusMessage clove and a message selector w/ successJob on reply - long token = RandomSource.getInstance().nextInt(Integer.MAX_VALUE); - PublicKey key = _status.getLeaseSet().getEncryptionKey(); - SessionKey sessKey = new SessionKey(); - Set tags = new HashSet(); - GarlicMessage msg = OutboundClientMessageJobHelper.createGarlicMessage(token, _overallExpiration, key, _status.getClove(), _status.getTo(), sessKey, tags, true); - - if (_log.shouldLog(Log.DEBUG)) - _log.debug("send(lease) - token expected " + token); - - _status.sent(lease.getRouterIdentity().getHash(), lease.getTunnelId()); - - SendSuccessJob onReply = new SendSuccessJob(lease, sessKey, tags); - SendTimeoutJob onFail = new SendTimeoutJob(lease); - ReplySelector selector = new ReplySelector(token); - - if (_log.shouldLog(Log.DEBUG)) - _log.debug("Placing GarlicMessage into the new tunnel message bound for " + lease.getTunnelId() + " on " + lease.getRouterIdentity().getHash().toBase64()); - - TunnelId outTunnelId = selectOutboundTunnel(); - if (outTunnelId != null) { - if (_log.shouldLog(Log.DEBUG)) - _log.debug("Sending tunnel message out " + outTunnelId + " to " + lease.getTunnelId() + " on " + lease.getRouterIdentity().getHash().toBase64()); - SendTunnelMessageJob j = new SendTunnelMessageJob(msg, outTunnelId, lease.getRouterIdentity().getHash(), lease.getTunnelId(), null, onReply, onFail, selector, SEND_TIMEOUT_MS, SEND_PRIORITY); - JobQueue.getInstance().addJob(j); - } else { - if (_log.shouldLog(Log.ERROR)) - _log.error("Could not find any outbound tunnels to send the payload through... wtf?"); - JobQueue.getInstance().addJob(onFail); - } + // send it as a garlic with a DeliveryStatusMessage clove and a message selector w/ successJob on reply + long token = _context.random().nextInt(Integer.MAX_VALUE); + PublicKey key = _status.getLeaseSet().getEncryptionKey(); + SessionKey sessKey = new SessionKey(); + Set tags = new HashSet(); + GarlicMessage msg = OutboundClientMessageJobHelper.createGarlicMessage(_context, token, _overallExpiration, key, _status.getClove(), _status.getTo(), sessKey, tags, true); + + if (_log.shouldLog(Log.DEBUG)) + _log.debug("send(lease) - token expected " + token); + + _status.sent(lease.getRouterIdentity().getHash(), lease.getTunnelId()); + + SendSuccessJob onReply = new SendSuccessJob(lease, sessKey, tags); + SendTimeoutJob onFail = new SendTimeoutJob(lease); + ReplySelector selector = new ReplySelector(token); + + if (_log.shouldLog(Log.DEBUG)) + _log.debug("Placing GarlicMessage into the new tunnel message bound for " + lease.getTunnelId() + " on " + lease.getRouterIdentity().getHash().toBase64()); + + TunnelId outTunnelId = selectOutboundTunnel(); + if (outTunnelId != null) { + if (_log.shouldLog(Log.DEBUG)) + _log.debug("Sending tunnel message out " + outTunnelId + " to " + lease.getTunnelId() + " on " + lease.getRouterIdentity().getHash().toBase64()); + SendTunnelMessageJob j = new SendTunnelMessageJob(_context, msg, outTunnelId, lease.getRouterIdentity().getHash(), lease.getTunnelId(), null, onReply, onFail, selector, SEND_TIMEOUT_MS, SEND_PRIORITY); + _context.jobQueue().addJob(j); + } else { + if (_log.shouldLog(Log.ERROR)) + _log.error("Could not find any outbound tunnels to send the payload through... wtf?"); + _context.jobQueue().addJob(onFail); + } } /** - * Pick an arbitrary outbound tunnel to send the message through, or null if + * Pick an arbitrary outbound tunnel to send the message through, or null if * there aren't any around * */ private TunnelId selectOutboundTunnel() { - TunnelSelectionCriteria crit = new TunnelSelectionCriteria(); - crit.setMaximumTunnelsRequired(1); - crit.setMinimumTunnelsRequired(1); - List tunnelIds = TunnelManagerFacade.getInstance().selectOutboundTunnelIds(crit); - if (tunnelIds.size() <= 0) - return null; - else - return (TunnelId)tunnelIds.get(0); + TunnelSelectionCriteria crit = new TunnelSelectionCriteria(); + crit.setMaximumTunnelsRequired(1); + crit.setMinimumTunnelsRequired(1); + List tunnelIds = _context.tunnelManager().selectOutboundTunnelIds(crit); + if (tunnelIds.size() <= 0) + return null; + else + return (TunnelId)tunnelIds.get(0); } - /** + /** * give up the ghost, this message just aint going through. tell the client to fuck off. * * this is safe to call multiple times (only tells the client once) */ private void dieFatal() { - if (_status.getSuccess()) return; - boolean alreadyFailed = _status.failed(); - long sendTime = Clock.getInstance().now() - _status.getStart(); - ClientMessage msg = _status.getMessage(); - if (alreadyFailed) { - if (_log.shouldLog(Log.DEBUG)) - _log.debug("dieFatal() - already failed sending " + msg.getMessageId()+ ", no need to do it again", new Exception("Duplicate death?")); - return; - } else { - if (_log.shouldLog(Log.ERROR)) - _log.error("Failed to send the message " + msg.getMessageId() + " after " + _status.getNumSent() + " sends and " + _status.getNumLookups() + " lookups (and " + sendTime + "ms)", new Exception("Message send failure")); - } - - MessageHistory.getInstance().sendPayloadMessage(msg.getMessageId().getMessageId(), false, sendTime); - ClientManagerFacade.getInstance().messageDeliveryStatusUpdate(msg.getFromDestination(), msg.getMessageId(), false); - StatManager.getInstance().updateFrequency("client.sendMessageFailFrequency"); - StatManager.getInstance().addRateData("client.sendAttemptAverage", _status.getNumSent(), sendTime); + if (_status.getSuccess()) return; + boolean alreadyFailed = _status.failed(); + long sendTime = _context.clock().now() - _status.getStart(); + ClientMessage msg = _status.getMessage(); + if (alreadyFailed) { + if (_log.shouldLog(Log.DEBUG)) + _log.debug("dieFatal() - already failed sending " + msg.getMessageId()+ ", no need to do it again", new Exception("Duplicate death?")); + return; + } else { + if (_log.shouldLog(Log.ERROR)) + _log.error("Failed to send the message " + msg.getMessageId() + " after " + _status.getNumSent() + " sends and " + _status.getNumLookups() + " lookups (and " + sendTime + "ms)", new Exception("Message send failure")); + } + + _context.messageHistory().sendPayloadMessage(msg.getMessageId().getMessageId(), false, sendTime); + _context.clientManager().messageDeliveryStatusUpdate(msg.getFromDestination(), msg.getMessageId(), false); + _context.statManager().updateFrequency("client.sendMessageFailFrequency"); + _context.statManager().addRateData("client.sendAttemptAverage", _status.getNumSent(), sendTime); } /** build the payload clove that will be used for all of the messages, placing the clove in the status structure */ private void buildClove() { - PayloadGarlicConfig clove = new PayloadGarlicConfig(); - - DeliveryInstructions instructions = new DeliveryInstructions(); - instructions.setDeliveryMode(DeliveryInstructions.DELIVERY_MODE_DESTINATION); - instructions.setDestination(_status.getTo().calculateHash()); - - instructions.setDelayRequested(false); - instructions.setDelaySeconds(0); - instructions.setEncrypted(false); - - clove.setCertificate(new Certificate(Certificate.CERTIFICATE_TYPE_NULL, null)); - clove.setDeliveryInstructions(instructions); - clove.setExpiration(_overallExpiration); - clove.setId(RandomSource.getInstance().nextInt(Integer.MAX_VALUE)); - - DataMessage msg = new DataMessage(); - msg.setData(_status.getMessage().getPayload().getEncryptedData()); - - clove.setPayload(msg); - clove.setRecipientPublicKey(null); - clove.setRequestAck(false); - - _status.setClove(clove); - - if (_log.shouldLog(Log.DEBUG)) - _log.debug("Built payload clove with id " + clove.getId()); + PayloadGarlicConfig clove = new PayloadGarlicConfig(); + + DeliveryInstructions instructions = new DeliveryInstructions(); + instructions.setDeliveryMode(DeliveryInstructions.DELIVERY_MODE_DESTINATION); + instructions.setDestination(_status.getTo().calculateHash()); + + instructions.setDelayRequested(false); + instructions.setDelaySeconds(0); + instructions.setEncrypted(false); + + clove.setCertificate(new Certificate(Certificate.CERTIFICATE_TYPE_NULL, null)); + clove.setDeliveryInstructions(instructions); + clove.setExpiration(_overallExpiration); + clove.setId(_context.random().nextInt(Integer.MAX_VALUE)); + + DataMessage msg = new DataMessage(_context); + msg.setData(_status.getMessage().getPayload().getEncryptedData()); + + clove.setPayload(msg); + clove.setRecipientPublicKey(null); + clove.setRequestAck(false); + + _status.setClove(clove); + + if (_log.shouldLog(Log.DEBUG)) + _log.debug("Built payload clove with id " + clove.getId()); } /** @@ -363,162 +363,171 @@ public class OutboundClientMessageJob extends JobImpl { * */ private class OutboundClientMessageStatus { - private ClientMessage _msg; - private PayloadGarlicConfig _clove; - private LeaseSet _leaseSet; - private Set _sent; - private int _numLookups; - private boolean _success; - private boolean _failure; - private long _start; - private int _previousSent; - - public OutboundClientMessageStatus(ClientMessage msg) { - _msg = msg; - _clove = null; - _leaseSet = null; - _sent = new HashSet(4); - _success = false; - _failure = false; - _numLookups = 0; - _previousSent = 0; - _start = Clock.getInstance().now(); - } - - /** raw payload */ - public Payload getPayload() { return _msg.getPayload(); } - /** clove, if we've built it */ - public PayloadGarlicConfig getClove() { return _clove; } - public void setClove(PayloadGarlicConfig clove) { _clove = clove; } - public ClientMessage getMessage() { return _msg; } - /** date we started the process on */ - public long getStart() { return _start; } - - public int getNumLookups() { return _numLookups; } - public void incrementLookups() { _numLookups++; } - public void clearAlreadySent() { - synchronized (_sent) { - _previousSent += _sent.size(); - _sent.clear(); - } - } - - /** who sent the message? */ - public Destination getFrom() { return _msg.getFromDestination(); } - /** who is the message going to? */ - public Destination getTo() { return _msg.getDestination(); } - /** what is the target's current leaseSet (or null if we don't know yet) */ - public LeaseSet getLeaseSet() { return _leaseSet; } - public void setLeaseSet(LeaseSet ls) { _leaseSet = ls; } - /** have we already sent the message down this tunnel? */ - public boolean alreadySent(Hash gateway, TunnelId tunnelId) { - Tunnel t = new Tunnel(gateway, tunnelId); - synchronized (_sent) { - return _sent.contains(t); - } - } - public void sent(Hash gateway, TunnelId tunnelId) { - Tunnel t = new Tunnel(gateway, tunnelId); - synchronized (_sent) { - _sent.add(t); - } - } - /** how many messages have we sent through various leases? */ - public int getNumSent() { - synchronized (_sent) { - return _sent.size() + _previousSent; - } - } - /** did we totally fail? */ - public boolean getFailure() { return _failure; } - /** we failed. returns true if we had already failed before */ - public boolean failed() { - boolean already = _failure; - _failure = true; - return already; - } - /** have we totally succeeded? */ - public boolean getSuccess() { return _success; } - /** we succeeded. returns true if we had already succeeded before */ - public boolean success() { - boolean already = _success; - _success = true; - return already; - } - - /** represent a unique tunnel at any given time */ - private class Tunnel { - private Hash _gateway; - private TunnelId _tunnel; - - public Tunnel(Hash tunnelGateway, TunnelId tunnel) { - _gateway = tunnelGateway; - _tunnel = tunnel; - } - - public Hash getGateway() { return _gateway; } - public TunnelId getTunnel() { return _tunnel; } - - public int hashCode() { - int rv = 0; - if (_gateway != null) - rv += _gateway.hashCode(); - if (_tunnel != null) - rv += 7*_tunnel.getTunnelId(); - return rv; - } - - public boolean equals(Object o) { - if (o == null) return false; - if (o.getClass() != Tunnel.class) return false; - Tunnel t = (Tunnel)o; - return (getTunnel() == t.getTunnel()) && - getGateway().equals(t.getGateway()); - } - } + private ClientMessage _msg; + private PayloadGarlicConfig _clove; + private LeaseSet _leaseSet; + private Set _sent; + private int _numLookups; + private boolean _success; + private boolean _failure; + private long _start; + private int _previousSent; + + public OutboundClientMessageStatus(ClientMessage msg) { + _msg = msg; + _clove = null; + _leaseSet = null; + _sent = new HashSet(4); + _success = false; + _failure = false; + _numLookups = 0; + _previousSent = 0; + _start = _context.clock().now(); + } + + /** raw payload */ + public Payload getPayload() { return _msg.getPayload(); } + /** clove, if we've built it */ + public PayloadGarlicConfig getClove() { return _clove; } + public void setClove(PayloadGarlicConfig clove) { _clove = clove; } + public ClientMessage getMessage() { return _msg; } + /** date we started the process on */ + public long getStart() { return _start; } + + public int getNumLookups() { return _numLookups; } + public void incrementLookups() { _numLookups++; } + public void clearAlreadySent() { + synchronized (_sent) { + _previousSent += _sent.size(); + _sent.clear(); + } + } + + /** who sent the message? */ + public Destination getFrom() { return _msg.getFromDestination(); } + /** who is the message going to? */ + public Destination getTo() { return _msg.getDestination(); } + /** what is the target's current leaseSet (or null if we don't know yet) */ + public LeaseSet getLeaseSet() { return _leaseSet; } + public void setLeaseSet(LeaseSet ls) { _leaseSet = ls; } + /** have we already sent the message down this tunnel? */ + public boolean alreadySent(Hash gateway, TunnelId tunnelId) { + Tunnel t = new Tunnel(gateway, tunnelId); + synchronized (_sent) { + return _sent.contains(t); + } + } + public void sent(Hash gateway, TunnelId tunnelId) { + Tunnel t = new Tunnel(gateway, tunnelId); + synchronized (_sent) { + _sent.add(t); + } + } + /** how many messages have we sent through various leases? */ + public int getNumSent() { + synchronized (_sent) { + return _sent.size() + _previousSent; + } + } + /** did we totally fail? */ + public boolean getFailure() { return _failure; } + /** we failed. returns true if we had already failed before */ + public boolean failed() { + boolean already = _failure; + _failure = true; + return already; + } + /** have we totally succeeded? */ + public boolean getSuccess() { return _success; } + /** we succeeded. returns true if we had already succeeded before */ + public boolean success() { + boolean already = _success; + _success = true; + return already; + } + + /** represent a unique tunnel at any given time */ + private class Tunnel { + private Hash _gateway; + private TunnelId _tunnel; + + public Tunnel(Hash tunnelGateway, TunnelId tunnel) { + _gateway = tunnelGateway; + _tunnel = tunnel; + } + + public Hash getGateway() { return _gateway; } + public TunnelId getTunnel() { return _tunnel; } + + public int hashCode() { + int rv = 0; + if (_gateway != null) + rv += _gateway.hashCode(); + if (_tunnel != null) + rv += 7*_tunnel.getTunnelId(); + return rv; + } + + public boolean equals(Object o) { + if (o == null) return false; + if (o.getClass() != Tunnel.class) return false; + Tunnel t = (Tunnel)o; + return (getTunnel() == t.getTunnel()) && + getGateway().equals(t.getGateway()); + } + } } - + /** * Keep an eye out for any of the delivery status message tokens that have been * sent down the various tunnels to deliver this message * */ private class ReplySelector implements MessageSelector { - private long _pendingToken; - public ReplySelector(long token) { - _pendingToken = token; - } - - public boolean continueMatching() { return false; } - public long getExpiration() { return _overallExpiration; } - - public boolean isMatch(I2NPMessage inMsg) { - if (inMsg.getType() == DeliveryStatusMessage.MESSAGE_TYPE) { - return _pendingToken == ((DeliveryStatusMessage)inMsg).getMessageId(); - } else { - return false; - } - } + private long _pendingToken; + public ReplySelector(long token) { + _pendingToken = token; + } + + public boolean continueMatching() { return false; } + public long getExpiration() { return _overallExpiration; } + + public boolean isMatch(I2NPMessage inMsg) { + if (inMsg.getType() == DeliveryStatusMessage.MESSAGE_TYPE) { + return _pendingToken == ((DeliveryStatusMessage)inMsg).getMessageId(); + } else { + return false; + } + } } /** queued by the db lookup success and the send timeout to get us to try the next lease */ private class NextStepJob extends JobImpl { - public String getName() { return "Process next step for outbound client message"; } - public void runJob() { sendNext(); } + public NextStepJob() { + super(OutboundClientMessageJob.this._context); + } + public String getName() { return "Process next step for outbound client message"; } + public void runJob() { sendNext(); } } /** we couldn't even find the leaseSet, fuck off */ private class LookupLeaseSetFailedJob extends JobImpl { - public String getName() { return "Lookup for outbound client message failed"; } - public void runJob() { dieFatal(); } + public LookupLeaseSetFailedJob() { + super(OutboundClientMessageJob.this._context); + } + public String getName() { return "Lookup for outbound client message failed"; } + public void runJob() { dieFatal(); } } /** send a message to a lease */ private class SendJob extends JobImpl { - private Lease _lease; - public SendJob(Lease lease) { _lease = lease; } - public String getName() { return "Send outbound client message through the lease"; } - public void runJob() { send(_lease); } + private Lease _lease; + public SendJob(Lease lease) { + super(OutboundClientMessageJob.this._context); + _lease = lease; + } + public String getName() { return "Send outbound client message through the lease"; } + public void runJob() { send(_lease); } } /** @@ -527,48 +536,49 @@ public class OutboundClientMessageJob extends JobImpl { * */ private class SendSuccessJob extends JobImpl implements ReplyJob { - private Lease _lease; - private SessionKey _key; - private Set _tags; - - /** - * Create a new success job that will be fired when the message encrypted with - * the given session key and bearing the specified tags are confirmed delivered. - * - */ - public SendSuccessJob(Lease lease, SessionKey key, Set tags) { - _lease = lease; - _key = key; - _tags = tags; - } - - public String getName() { return "Send client message successful to a lease"; } - public void runJob() { - long sendTime = Clock.getInstance().now() - _status.getStart(); - boolean alreadySuccessful = _status.success(); - MessageId msgId = _status.getMessage().getMessageId(); - if (_log.shouldLog(Log.DEBUG)) - _log.debug("SUCCESS! Message delivered completely for message " + msgId + " after " + sendTime + "ms [for " + _status.getMessage().getMessageId() + "]"); - - if ( (_key != null) && (_tags != null) && (_tags.size() > 0) ) { - SessionKeyManager.getInstance().tagsDelivered(_status.getLeaseSet().getEncryptionKey(), _key, _tags); - } - - if (alreadySuccessful) { - if (_log.shouldLog(Log.DEBUG)) - _log.debug("Success is a duplicate for " + _status.getMessage().getMessageId() + ", dont notify again..."); - return; - } - long dataMsgId = _status.getClove().getId(); - MessageHistory.getInstance().sendPayloadMessage(dataMsgId, true, sendTime); - ClientManagerFacade.getInstance().messageDeliveryStatusUpdate(_status.getFrom(), msgId, true); - _lease.setNumSuccess(_lease.getNumSuccess()+1); - - StatManager.getInstance().addRateData("client.sendMessageSize", _status.getMessage().getPayload().getSize(), sendTime); - StatManager.getInstance().addRateData("client.sendAttemptAverage", _status.getNumSent(), sendTime); - } - - public void setMessage(I2NPMessage msg) {} + private Lease _lease; + private SessionKey _key; + private Set _tags; + + /** + * Create a new success job that will be fired when the message encrypted with + * the given session key and bearing the specified tags are confirmed delivered. + * + */ + public SendSuccessJob(Lease lease, SessionKey key, Set tags) { + super(OutboundClientMessageJob.this._context); + _lease = lease; + _key = key; + _tags = tags; + } + + public String getName() { return "Send client message successful to a lease"; } + public void runJob() { + long sendTime = _context.clock().now() - _status.getStart(); + boolean alreadySuccessful = _status.success(); + MessageId msgId = _status.getMessage().getMessageId(); + if (_log.shouldLog(Log.DEBUG)) + _log.debug("SUCCESS! Message delivered completely for message " + msgId + " after " + sendTime + "ms [for " + _status.getMessage().getMessageId() + "]"); + + if ( (_key != null) && (_tags != null) && (_tags.size() > 0) ) { + SendSuccessJob.this._context.sessionKeyManager().tagsDelivered(_status.getLeaseSet().getEncryptionKey(), _key, _tags); + } + + if (alreadySuccessful) { + if (_log.shouldLog(Log.DEBUG)) + _log.debug("Success is a duplicate for " + _status.getMessage().getMessageId() + ", dont notify again..."); + return; + } + long dataMsgId = _status.getClove().getId(); + SendSuccessJob.this._context.messageHistory().sendPayloadMessage(dataMsgId, true, sendTime); + SendSuccessJob.this._context.clientManager().messageDeliveryStatusUpdate(_status.getFrom(), msgId, true); + _lease.setNumSuccess(_lease.getNumSuccess()+1); + + SendSuccessJob.this._context.statManager().addRateData("client.sendMessageSize", _status.getMessage().getPayload().getSize(), sendTime); + SendSuccessJob.this._context.statManager().addRateData("client.sendAttemptAverage", _status.getNumSent(), sendTime); + } + + public void setMessage(I2NPMessage msg) {} } /** @@ -577,18 +587,19 @@ public class OutboundClientMessageJob extends JobImpl { * */ private class SendTimeoutJob extends JobImpl { - private Lease _lease; - - public SendTimeoutJob(Lease lease) { - _lease = lease; - } - - public String getName() { return "Send client message timed out through a lease"; } - public void runJob() { - if (_log.shouldLog(Log.DEBUG)) - _log.debug("Soft timeout through the lease " + _lease); - _lease.setNumFailure(_lease.getNumFailure()+1); - sendNext(); - } + private Lease _lease; + + public SendTimeoutJob(Lease lease) { + super(OutboundClientMessageJob.this._context); + _lease = lease; + } + + public String getName() { return "Send client message timed out through a lease"; } + public void runJob() { + if (_log.shouldLog(Log.DEBUG)) + _log.debug("Soft timeout through the lease " + _lease); + _lease.setNumFailure(_lease.getNumFailure()+1); + sendNext(); + } } } diff --git a/router/java/src/net/i2p/router/message/OutboundClientMessageJobHelper.java b/router/java/src/net/i2p/router/message/OutboundClientMessageJobHelper.java index 3f17f0c4e..28f553cca 100644 --- a/router/java/src/net/i2p/router/message/OutboundClientMessageJobHelper.java +++ b/router/java/src/net/i2p/router/message/OutboundClientMessageJobHelper.java @@ -1,9 +1,9 @@ package net.i2p.router.message; /* * free (adj.): unencumbered; not under the control of others - * Written by jrandom in 2003 and released into the public domain - * with no warranty of any kind, either expressed or implied. - * It probably won't make your computer catch on fire, or eat + * Written by jrandom in 2003 and released into the public domain + * with no warranty of any kind, either expressed or implied. + * It probably won't make your computer catch on fire, or eat * your children, but it might. Use at your own risk. * */ @@ -30,17 +30,16 @@ import net.i2p.router.TunnelSelectionCriteria; import net.i2p.util.Clock; import net.i2p.util.Log; import net.i2p.util.RandomSource; +import net.i2p.router.RouterContext; /** * Handle a particular client message that is destined for a remote destination. * */ class OutboundClientMessageJobHelper { - private static Log _log = new Log(OutboundClientMessageJobHelper.class); - /** * Build a garlic message that will be delivered to the router on which the target is located. - * Inside the message are two cloves: one containing the payload with instructions for + * Inside the message are two cloves: one containing the payload with instructions for * delivery to the (now local) destination, and the other containing a DeliveryStatusMessage with * instructions for delivery to an inbound tunnel of this router. * @@ -52,128 +51,130 @@ class OutboundClientMessageJobHelper { * For now, its just a tunneled DeliveryStatusMessage * */ - static GarlicMessage createGarlicMessage(long replyToken, long expiration, PublicKey recipientPK, Payload data, Destination dest, SessionKey wrappedKey, Set wrappedTags, boolean requireAck) { - PayloadGarlicConfig dataClove = buildDataClove(data, dest, expiration); - return createGarlicMessage(replyToken, expiration, recipientPK, dataClove, dest, wrappedKey, wrappedTags, requireAck); + static GarlicMessage createGarlicMessage(RouterContext ctx, long replyToken, long expiration, PublicKey recipientPK, Payload data, Destination dest, SessionKey wrappedKey, Set wrappedTags, boolean requireAck) { + PayloadGarlicConfig dataClove = buildDataClove(ctx, data, dest, expiration); + return createGarlicMessage(ctx, replyToken, expiration, recipientPK, dataClove, dest, wrappedKey, wrappedTags, requireAck); } /** * Allow the app to specify the data clove directly, which enables OutboundClientMessage to resend the * same payload (including expiration and unique id) in different garlics (down different tunnels) * */ - static GarlicMessage createGarlicMessage(long replyToken, long expiration, PublicKey recipientPK, PayloadGarlicConfig dataClove, Destination dest, SessionKey wrappedKey, Set wrappedTags, boolean requireAck) { - GarlicConfig config = createGarlicConfig(replyToken, expiration, recipientPK, dataClove, dest, requireAck); - GarlicMessage msg = GarlicMessageBuilder.buildMessage(config, wrappedKey, wrappedTags); - return msg; + static GarlicMessage createGarlicMessage(RouterContext ctx, long replyToken, long expiration, PublicKey recipientPK, PayloadGarlicConfig dataClove, Destination dest, SessionKey wrappedKey, Set wrappedTags, boolean requireAck) { + GarlicConfig config = createGarlicConfig(ctx, replyToken, expiration, recipientPK, dataClove, dest, requireAck); + GarlicMessage msg = GarlicMessageBuilder.buildMessage(ctx, config, wrappedKey, wrappedTags); + return msg; } - private static GarlicConfig createGarlicConfig(long replyToken, long expiration, PublicKey recipientPK, PayloadGarlicConfig dataClove, Destination dest, boolean requireAck) { - _log.debug("Reply token: " + replyToken); - GarlicConfig config = new GarlicConfig(); - - config.addClove(dataClove); - - if (requireAck) { - PayloadGarlicConfig ackClove = buildAckClove(replyToken, expiration); - config.addClove(ackClove); - } - - DeliveryInstructions instructions = new DeliveryInstructions(); - instructions.setDeliveryMode(DeliveryInstructions.DELIVERY_MODE_LOCAL); - instructions.setDelayRequested(false); - instructions.setDelaySeconds(0); - instructions.setEncrypted(false); - instructions.setEncryptionKey(null); - instructions.setRouter(null); - instructions.setTunnelId(null); - - config.setCertificate(new Certificate(Certificate.CERTIFICATE_TYPE_NULL, null)); - config.setDeliveryInstructions(instructions); - config.setId(RandomSource.getInstance().nextInt(Integer.MAX_VALUE)); - config.setExpiration(expiration+2*Router.CLOCK_FUDGE_FACTOR); - config.setRecipientPublicKey(recipientPK); - config.setRequestAck(false); - - _log.info("Creating garlic config to be encrypted to " + recipientPK + " for destination " + dest.calculateHash().toBase64()); - - return config; + private static GarlicConfig createGarlicConfig(RouterContext ctx, long replyToken, long expiration, PublicKey recipientPK, PayloadGarlicConfig dataClove, Destination dest, boolean requireAck) { + Log log = ctx.logManager().getLog(OutboundClientMessageJobHelper.class); + log.debug("Reply token: " + replyToken); + GarlicConfig config = new GarlicConfig(); + + config.addClove(dataClove); + + if (requireAck) { + PayloadGarlicConfig ackClove = buildAckClove(ctx, replyToken, expiration); + config.addClove(ackClove); + } + + DeliveryInstructions instructions = new DeliveryInstructions(); + instructions.setDeliveryMode(DeliveryInstructions.DELIVERY_MODE_LOCAL); + instructions.setDelayRequested(false); + instructions.setDelaySeconds(0); + instructions.setEncrypted(false); + instructions.setEncryptionKey(null); + instructions.setRouter(null); + instructions.setTunnelId(null); + + config.setCertificate(new Certificate(Certificate.CERTIFICATE_TYPE_NULL, null)); + config.setDeliveryInstructions(instructions); + config.setId(ctx.random().nextInt(Integer.MAX_VALUE)); + config.setExpiration(expiration+2*Router.CLOCK_FUDGE_FACTOR); + config.setRecipientPublicKey(recipientPK); + config.setRequestAck(false); + + log.info("Creating garlic config to be encrypted to " + recipientPK + " for destination " + dest.calculateHash().toBase64()); + + return config; } /** * Build a clove that sends a DeliveryStatusMessage to us */ - private static PayloadGarlicConfig buildAckClove(long replyToken, long expiration) { - PayloadGarlicConfig ackClove = new PayloadGarlicConfig(); - - Hash replyToTunnelRouter = null; // inbound tunnel gateway - TunnelId replyToTunnelId = null; // tunnel id on that gateway - - TunnelSelectionCriteria criteria = new TunnelSelectionCriteria(); - criteria.setMaximumTunnelsRequired(1); - criteria.setMinimumTunnelsRequired(1); - criteria.setReliabilityPriority(50); // arbitrary. fixme - criteria.setAnonymityPriority(50); // arbitrary. fixme - criteria.setLatencyPriority(50); // arbitrary. fixme - List tunnelIds = TunnelManagerFacade.getInstance().selectInboundTunnelIds(criteria); - if (tunnelIds.size() <= 0) { - _log.error("No inbound tunnels to receive an ack through!?"); - return null; - } - replyToTunnelId = (TunnelId)tunnelIds.get(0); - TunnelInfo info = TunnelManagerFacade.getInstance().getTunnelInfo(replyToTunnelId); - replyToTunnelRouter = info.getThisHop(); // info is the chain, and the first hop is the gateway - _log.debug("Ack for the data message will come back along tunnel " + replyToTunnelId + ":\n" + info); - - DeliveryInstructions ackInstructions = new DeliveryInstructions(); - ackInstructions.setDeliveryMode(DeliveryInstructions.DELIVERY_MODE_TUNNEL); - ackInstructions.setRouter(replyToTunnelRouter); - ackInstructions.setTunnelId(replyToTunnelId); - ackInstructions.setDelayRequested(false); - ackInstructions.setDelaySeconds(0); - ackInstructions.setEncrypted(false); - - DeliveryStatusMessage msg = new DeliveryStatusMessage(); - msg.setArrival(new Date(Clock.getInstance().now())); - msg.setMessageId(replyToken); - _log.debug("Delivery status message key: " + replyToken + " arrival: " + msg.getArrival()); - - ackClove.setCertificate(new Certificate(Certificate.CERTIFICATE_TYPE_NULL, null)); - ackClove.setDeliveryInstructions(ackInstructions); - ackClove.setExpiration(expiration); - ackClove.setId(RandomSource.getInstance().nextInt(Integer.MAX_VALUE)); - ackClove.setPayload(msg); - ackClove.setRecipient(Router.getInstance().getRouterInfo()); - ackClove.setRequestAck(false); - - _log.debug("Delivery status message is targetting us [" + ackClove.getRecipient().getIdentity().getHash().toBase64() + "] via tunnel " + replyToTunnelId.getTunnelId() + " on " + replyToTunnelRouter.toBase64()); - - return ackClove; - } + private static PayloadGarlicConfig buildAckClove(RouterContext ctx, long replyToken, long expiration) { + Log log = ctx.logManager().getLog(OutboundClientMessageJobHelper.class); + PayloadGarlicConfig ackClove = new PayloadGarlicConfig(); + Hash replyToTunnelRouter = null; // inbound tunnel gateway + TunnelId replyToTunnelId = null; // tunnel id on that gateway + + TunnelSelectionCriteria criteria = new TunnelSelectionCriteria(); + criteria.setMaximumTunnelsRequired(1); + criteria.setMinimumTunnelsRequired(1); + criteria.setReliabilityPriority(50); // arbitrary. fixme + criteria.setAnonymityPriority(50); // arbitrary. fixme + criteria.setLatencyPriority(50); // arbitrary. fixme + List tunnelIds = ctx.tunnelManager().selectInboundTunnelIds(criteria); + if (tunnelIds.size() <= 0) { + log.error("No inbound tunnels to receive an ack through!?"); + return null; + } + replyToTunnelId = (TunnelId)tunnelIds.get(0); + TunnelInfo info = ctx.tunnelManager().getTunnelInfo(replyToTunnelId); + replyToTunnelRouter = info.getThisHop(); // info is the chain, and the first hop is the gateway + log.debug("Ack for the data message will come back along tunnel " + replyToTunnelId + ":\n" + info); + + DeliveryInstructions ackInstructions = new DeliveryInstructions(); + ackInstructions.setDeliveryMode(DeliveryInstructions.DELIVERY_MODE_TUNNEL); + ackInstructions.setRouter(replyToTunnelRouter); + ackInstructions.setTunnelId(replyToTunnelId); + ackInstructions.setDelayRequested(false); + ackInstructions.setDelaySeconds(0); + ackInstructions.setEncrypted(false); + + DeliveryStatusMessage msg = new DeliveryStatusMessage(ctx); + msg.setArrival(new Date(ctx.clock().now())); + msg.setMessageId(replyToken); + log.debug("Delivery status message key: " + replyToken + " arrival: " + msg.getArrival()); + + ackClove.setCertificate(new Certificate(Certificate.CERTIFICATE_TYPE_NULL, null)); + ackClove.setDeliveryInstructions(ackInstructions); + ackClove.setExpiration(expiration); + ackClove.setId(ctx.random().nextInt(Integer.MAX_VALUE)); + ackClove.setPayload(msg); + ackClove.setRecipient(ctx.router().getRouterInfo()); + ackClove.setRequestAck(false); + + log.debug("Delivery status message is targetting us [" + ackClove.getRecipient().getIdentity().getHash().toBase64() + "] via tunnel " + replyToTunnelId.getTunnelId() + " on " + replyToTunnelRouter.toBase64()); + + return ackClove; + } + /** * Build a clove that sends the payload to the destination */ - static PayloadGarlicConfig buildDataClove(Payload data, Destination dest, long expiration) { - PayloadGarlicConfig clove = new PayloadGarlicConfig(); - - DeliveryInstructions instructions = new DeliveryInstructions(); - instructions.setDeliveryMode(DeliveryInstructions.DELIVERY_MODE_DESTINATION); - instructions.setDestination(dest.calculateHash()); - - instructions.setDelayRequested(false); - instructions.setDelaySeconds(0); - instructions.setEncrypted(false); - - clove.setCertificate(new Certificate(Certificate.CERTIFICATE_TYPE_NULL, null)); - clove.setDeliveryInstructions(instructions); - clove.setExpiration(expiration); - clove.setId(RandomSource.getInstance().nextInt(Integer.MAX_VALUE)); - DataMessage msg = new DataMessage(); - msg.setData(data.getEncryptedData()); - clove.setPayload(msg); - clove.setRecipientPublicKey(null); - clove.setRequestAck(false); - - return clove; + static PayloadGarlicConfig buildDataClove(RouterContext ctx, Payload data, Destination dest, long expiration) { + PayloadGarlicConfig clove = new PayloadGarlicConfig(); + + DeliveryInstructions instructions = new DeliveryInstructions(); + instructions.setDeliveryMode(DeliveryInstructions.DELIVERY_MODE_DESTINATION); + instructions.setDestination(dest.calculateHash()); + + instructions.setDelayRequested(false); + instructions.setDelaySeconds(0); + instructions.setEncrypted(false); + + clove.setCertificate(new Certificate(Certificate.CERTIFICATE_TYPE_NULL, null)); + clove.setDeliveryInstructions(instructions); + clove.setExpiration(expiration); + clove.setId(ctx.random().nextInt(Integer.MAX_VALUE)); + DataMessage msg = new DataMessage(ctx); + msg.setData(data.getEncryptedData()); + clove.setPayload(msg); + clove.setRecipientPublicKey(null); + clove.setRequestAck(false); + + return clove; } } diff --git a/router/java/src/net/i2p/router/message/SendGarlicJob.java b/router/java/src/net/i2p/router/message/SendGarlicJob.java index 5268039e2..6c186d6aa 100644 --- a/router/java/src/net/i2p/router/message/SendGarlicJob.java +++ b/router/java/src/net/i2p/router/message/SendGarlicJob.java @@ -1,9 +1,9 @@ package net.i2p.router.message; /* * free (adj.): unencumbered; not under the control of others - * Written by jrandom in 2003 and released into the public domain - * with no warranty of any kind, either expressed or implied. - * It probably won't make your computer catch on fire, or eat + * Written by jrandom in 2003 and released into the public domain + * with no warranty of any kind, either expressed or implied. + * It probably won't make your computer catch on fire, or eat * your children, but it might. Use at your own risk. * */ @@ -23,13 +23,14 @@ import net.i2p.router.ReplyJob; import net.i2p.router.Router; import net.i2p.util.Clock; import net.i2p.util.Log; +import net.i2p.router.RouterContext; /** * Build a garlic message from config, encrypt it, and enqueue it for delivery. * */ public class SendGarlicJob extends JobImpl { - private final static Log _log = new Log(SendGarlicJob.class); + private Log _log; //private RouterInfo _target; private GarlicConfig _config; private Job _onSend; @@ -42,7 +43,7 @@ public class SendGarlicJob extends JobImpl { private GarlicMessage _message; private SessionKey _wrappedKey; private Set _wrappedTags; - + /** * * @param config ??? @@ -54,69 +55,73 @@ public class SendGarlicJob extends JobImpl { * @param priority how high priority to send this test * @param replySelector ??? */ - public SendGarlicJob(GarlicConfig config, Job onSend, Job onSendFailed, ReplyJob onReply, Job onReplyFailed, long timeoutMs, int priority, MessageSelector replySelector) { - this(config, onSend, onSendFailed, onReply, onReplyFailed, timeoutMs, priority, replySelector, new SessionKey(), new HashSet()); + public SendGarlicJob(RouterContext ctx, GarlicConfig config, Job onSend, Job onSendFailed, ReplyJob onReply, Job onReplyFailed, long timeoutMs, int priority, MessageSelector replySelector) { + this(ctx, config, onSend, onSendFailed, onReply, onReplyFailed, timeoutMs, priority, replySelector, new SessionKey(), new HashSet()); } - public SendGarlicJob(GarlicConfig config, Job onSend, Job onSendFailed, ReplyJob onReply, Job onReplyFailed, long timeoutMs, int priority, MessageSelector replySelector, SessionKey wrappedKey, Set wrappedTags) { - super(); - if (config == null) throw new IllegalArgumentException("No config specified"); - if (config.getRecipient() == null) throw new IllegalArgumentException("No recipient in the config"); - //_target = target; - _config = config; - _onSend = onSend; - _onSendFailed = onSendFailed; - _onReply = onReply; - _onReplyFailed = onReplyFailed; - _timeoutMs = timeoutMs; - _priority = priority; - _replySelector = replySelector; - _message = null; - _wrappedKey = wrappedKey; - _wrappedTags = wrappedTags; + public SendGarlicJob(RouterContext ctx, GarlicConfig config, Job onSend, Job onSendFailed, ReplyJob onReply, Job onReplyFailed, long timeoutMs, int priority, MessageSelector replySelector, SessionKey wrappedKey, Set wrappedTags) { + super(ctx); + _log = ctx.logManager().getLog(SendGarlicJob.class); + if (config == null) throw new IllegalArgumentException("No config specified"); + if (config.getRecipient() == null) throw new IllegalArgumentException("No recipient in the config"); + //_target = target; + _config = config; + _onSend = onSend; + _onSendFailed = onSendFailed; + _onReply = onReply; + _onReplyFailed = onReplyFailed; + _timeoutMs = timeoutMs; + _priority = priority; + _replySelector = replySelector; + _message = null; + _wrappedKey = wrappedKey; + _wrappedTags = wrappedTags; } public String getName() { return "Build Garlic Message"; } public void runJob() { - long before = Clock.getInstance().now(); - _message = GarlicMessageBuilder.buildMessage(_config, _wrappedKey, _wrappedTags); - long after = Clock.getInstance().now(); - if ( (after - before) > 1000) { - _log.warn("Building the garlic took too long [" + (after-before)+" ms]", getAddedBy()); - } else { - _log.debug("Building the garlic was fast! " + (after - before) + " ms"); - } - JobQueue.getInstance().addJob(new SendJob()); + long before = _context.clock().now(); + _message = GarlicMessageBuilder.buildMessage(_context, _config, _wrappedKey, _wrappedTags); + long after = _context.clock().now(); + if ( (after - before) > 1000) { + _log.warn("Building the garlic took too long [" + (after-before)+" ms]", getAddedBy()); + } else { + _log.debug("Building the garlic was fast! " + (after - before) + " ms"); + } + _context.jobQueue().addJob(new SendJob()); } private class SendJob extends JobImpl { - public String getName() { return "Send Built Garlic Message"; } - public void runJob() { - if (_config.getRecipient() != null) - _log.info("sending garlic to recipient " + _config.getRecipient().getIdentity().getHash().toBase64()); - else - _log.info("sending garlic to public key " + _config.getRecipientPublicKey()); - sendGarlic(); - } + public SendJob() { + super(SendGarlicJob.this._context); + } + public String getName() { return "Send Built Garlic Message"; } + public void runJob() { + if (_config.getRecipient() != null) + _log.info("sending garlic to recipient " + _config.getRecipient().getIdentity().getHash().toBase64()); + else + _log.info("sending garlic to public key " + _config.getRecipientPublicKey()); + sendGarlic(); + } } private void sendGarlic() { - OutNetMessage msg = new OutNetMessage(); - long when = _message.getMessageExpiration().getTime() + Router.CLOCK_FUDGE_FACTOR; - msg.setExpiration(when); - msg.setMessage(_message); - msg.setOnFailedReplyJob(_onReplyFailed); - msg.setOnFailedSendJob(_onSendFailed); - msg.setOnReplyJob(_onReply); - msg.setOnSendJob(_onSend); - msg.setPriority(_priority); - msg.setReplySelector(_replySelector); - msg.setTarget(_config.getRecipient()); - //_log.info("Sending garlic message to [" + _config.getRecipient() + "] encrypted with " + _config.getRecipientPublicKey() + " or " + _config.getRecipient().getIdentity().getPublicKey()); - //_log.debug("Garlic config data:\n" + _config); - //msg.setTarget(_target); - OutNetMessagePool.getInstance().add(msg); - _log.debug("Garlic message added to outbound network message pool"); + OutNetMessage msg = new OutNetMessage(_context); + long when = _message.getMessageExpiration().getTime() + Router.CLOCK_FUDGE_FACTOR; + msg.setExpiration(when); + msg.setMessage(_message); + msg.setOnFailedReplyJob(_onReplyFailed); + msg.setOnFailedSendJob(_onSendFailed); + msg.setOnReplyJob(_onReply); + msg.setOnSendJob(_onSend); + msg.setPriority(_priority); + msg.setReplySelector(_replySelector); + msg.setTarget(_config.getRecipient()); + //_log.info("Sending garlic message to [" + _config.getRecipient() + "] encrypted with " + _config.getRecipientPublicKey() + " or " + _config.getRecipient().getIdentity().getPublicKey()); + //_log.debug("Garlic config data:\n" + _config); + //msg.setTarget(_target); + _context.outNetMessagePool().add(msg); + _log.debug("Garlic message added to outbound network message pool"); } } diff --git a/router/java/src/net/i2p/router/message/SendMessageAckJob.java b/router/java/src/net/i2p/router/message/SendMessageAckJob.java index c62d16657..c102d6fee 100644 --- a/router/java/src/net/i2p/router/message/SendMessageAckJob.java +++ b/router/java/src/net/i2p/router/message/SendMessageAckJob.java @@ -1,9 +1,9 @@ package net.i2p.router.message; /* * free (adj.): unencumbered; not under the control of others - * Written by jrandom in 2003 and released into the public domain - * with no warranty of any kind, either expressed or implied. - * It probably won't make your computer catch on fire, or eat + * Written by jrandom in 2003 and released into the public domain + * with no warranty of any kind, either expressed or implied. + * It probably won't make your computer catch on fire, or eat * your children, but it might. Use at your own risk. * */ @@ -16,6 +16,7 @@ import net.i2p.data.i2np.SourceRouteBlock; import net.i2p.router.JobImpl; import net.i2p.router.JobQueue; import net.i2p.util.Clock; +import net.i2p.router.RouterContext; /** * Send a DeliveryStatusMessage to the location specified in the source route block @@ -30,29 +31,29 @@ public class SendMessageAckJob extends JobImpl { public final static int ACK_PRIORITY = 100; - public SendMessageAckJob(SourceRouteBlock block, long ackId) { - super(); - _block = block; - _ackId = ackId; + public SendMessageAckJob(RouterContext ctx, SourceRouteBlock block, long ackId) { + super(ctx); + _block = block; + _ackId = ackId; } public void runJob() { - JobQueue.getInstance().addJob(new SendReplyMessageJob(_block, createAckMessage(), ACK_PRIORITY)); + _context.jobQueue().addJob(new SendReplyMessageJob(_context, _block, createAckMessage(), ACK_PRIORITY)); } /** - * Create whatever should be delivered to the intermediary hop so that - * a DeliveryStatusMessage gets to the intended recipient. + * Create whatever should be delivered to the intermediary hop so that + * a DeliveryStatusMessage gets to the intended recipient. * * Currently this doesn't garlic encrypt the DeliveryStatusMessage with * the block's tag and sessionKey, but it could. * */ protected I2NPMessage createAckMessage() { - DeliveryStatusMessage statusMessage = new DeliveryStatusMessage(); - statusMessage.setArrival(new Date(Clock.getInstance().now())); - statusMessage.setMessageId(_ackId); - return statusMessage; + DeliveryStatusMessage statusMessage = new DeliveryStatusMessage(_context); + statusMessage.setArrival(new Date(_context.clock().now())); + statusMessage.setMessageId(_ackId); + return statusMessage; } public String getName() { return "Send Message Ack"; } diff --git a/router/java/src/net/i2p/router/message/SendMessageDirectJob.java b/router/java/src/net/i2p/router/message/SendMessageDirectJob.java index ed595174e..da05be5ae 100644 --- a/router/java/src/net/i2p/router/message/SendMessageDirectJob.java +++ b/router/java/src/net/i2p/router/message/SendMessageDirectJob.java @@ -27,9 +27,10 @@ import net.i2p.router.Router; import net.i2p.router.transport.OutboundMessageRegistry; import net.i2p.util.Clock; import net.i2p.util.Log; +import net.i2p.router.RouterContext; public class SendMessageDirectJob extends JobImpl { - private final static Log _log = new Log(SendMessageDirectJob.class); + private Log _log; private I2NPMessage _message; private Hash _targetHash; private RouterInfo _router; @@ -44,114 +45,135 @@ public class SendMessageDirectJob extends JobImpl { private final static long DEFAULT_TIMEOUT = 60*1000; - public SendMessageDirectJob(I2NPMessage message, Hash toPeer, long expiration, int priority) { - this(message, toPeer, null, null, null, null, expiration, priority); + public SendMessageDirectJob(RouterContext ctx, I2NPMessage message, Hash toPeer, long expiration, int priority) { + this(ctx, message, toPeer, null, null, null, null, expiration, priority); } - public SendMessageDirectJob(I2NPMessage message, Hash toPeer, int priority) { - this(message, toPeer, DEFAULT_TIMEOUT+Clock.getInstance().now(), priority); + public SendMessageDirectJob(RouterContext ctx, I2NPMessage message, Hash toPeer, int priority) { + this(ctx, message, toPeer, DEFAULT_TIMEOUT+ctx.clock().now(), priority); } - public SendMessageDirectJob(I2NPMessage message, Hash toPeer, ReplyJob onSuccess, Job onFail, MessageSelector selector, long expiration, int priority) { - this(message, toPeer, null, onSuccess, onFail, selector, expiration, priority); + public SendMessageDirectJob(RouterContext ctx, I2NPMessage message, Hash toPeer, ReplyJob onSuccess, Job onFail, MessageSelector selector, long expiration, int priority) { + this(ctx, message, toPeer, null, onSuccess, onFail, selector, expiration, priority); } - public SendMessageDirectJob(I2NPMessage message, Hash toPeer, Job onSend, ReplyJob onSuccess, Job onFail, MessageSelector selector, long expiration, int priority) { - super(); - _message = message; - _targetHash = toPeer; - _router = null; - _expiration = expiration; - _priority = priority; - _alreadySearched = false; - _onSend = onSend; - _onSuccess = onSuccess; - _onFail = onFail; - _selector = selector; - if (message == null) - throw new IllegalArgumentException("Attempt to send a null message"); - if (_targetHash == null) - throw new IllegalArgumentException("Attempt to send a message to a null peer"); - _sent = false; - long remaining = expiration - Clock.getInstance().now(); - if (remaining < 50*1000) { - _log.info("Sending message to expire in " + remaining + "ms containing " + message.getUniqueId() + " (a " + message.getClass().getName() + ")", new Exception("SendDirect from")); - } + public SendMessageDirectJob(RouterContext ctx, I2NPMessage message, Hash toPeer, Job onSend, ReplyJob onSuccess, Job onFail, MessageSelector selector, long expiration, int priority) { + super(ctx); + _log = _context.logManager().getLog(SendMessageDirectJob.class); + _message = message; + _targetHash = toPeer; + _router = null; + _expiration = expiration; + _priority = priority; + _alreadySearched = false; + _onSend = onSend; + _onSuccess = onSuccess; + _onFail = onFail; + _selector = selector; + if (message == null) + throw new IllegalArgumentException("Attempt to send a null message"); + if (_targetHash == null) + throw new IllegalArgumentException("Attempt to send a message to a null peer"); + _sent = false; + long remaining = expiration - _context.clock().now(); + if (remaining < 50*1000) { + _log.info("Sending message to expire in " + remaining + "ms containing " + message.getUniqueId() + " (a " + message.getClass().getName() + ")", new Exception("SendDirect from")); + } } public String getName() { return "Send Message Direct"; } public void runJob() { - long now = Clock.getInstance().now(); - if (_expiration == 0) - _expiration = now + DEFAULT_TIMEOUT; - - if (_expiration - 30*1000 < now) { - _log.info("Soon to expire sendDirect of " + _message.getClass().getName() + " [expiring in " + (_expiration-now) + "]", getAddedBy()); - } - - if (_expiration < now) { - _log.warn("Timed out sending message " + _message + " directly (expiration = " + new Date(_expiration) + ") to " + _targetHash.toBase64(), getAddedBy()); - return; - } - if (_router != null) { - _log.debug("Router specified, sending"); - send(); - } else { - _router = NetworkDatabaseFacade.getInstance().lookupRouterInfoLocally(_targetHash); - if (_router != null) { - _log.debug("Router not specified but lookup found it"); - send(); - } else { - if (!_alreadySearched) { - _log.debug("Router not specified, so we're looking for it..."); - NetworkDatabaseFacade.getInstance().lookupRouterInfo(_targetHash, this, this, _expiration - Clock.getInstance().now()); - _alreadySearched = true; - } else { - _log.error("Unable to find the router to send to: " + _targetHash + " message: " + _message, getAddedBy()); - } - } - } + long now = _context.clock().now(); + if (_expiration == 0) + _expiration = now + DEFAULT_TIMEOUT; + + if (_expiration - 30*1000 < now) { + _log.info("Soon to expire sendDirect of " + _message.getClass().getName() + + " [expiring in " + (_expiration-now) + "]", getAddedBy()); + } + + if (_expiration < now) { + if (_log.shouldLog(Log.WARN)) + _log.warn("Timed out sending message " + _message + " directly (expiration = " + + new Date(_expiration) + ") to " + _targetHash.toBase64(), getAddedBy()); + return; + } + if (_router != null) { + if (_log.shouldLog(Log.DEBUG)) + _log.debug("Router specified, sending"); + send(); + } else { + _router = _context.netDb().lookupRouterInfoLocally(_targetHash); + if (_router != null) { + if (_log.shouldLog(Log.DEBUG)) + _log.debug("Router not specified but lookup found it"); + send(); + } else { + if (!_alreadySearched) { + if (_log.shouldLog(Log.DEBUG)) + _log.debug("Router not specified, so we're looking for it..."); + _context.netDb().lookupRouterInfo(_targetHash, this, this, + _expiration - _context.clock().now()); + _alreadySearched = true; + } else { + if (_log.shouldLog(Log.ERROR)) + _log.error("Unable to find the router to send to: " + _targetHash + + " message: " + _message, getAddedBy()); + } + } + } } private void send() { - if (_sent) { _log.warn("Not resending!", new Exception("blah")); return; } - _sent = true; - if (Router.getInstance().getRouterInfo().getIdentity().getHash().equals(_router.getIdentity().getHash())) { - if (_selector != null) { - OutNetMessage outM = new OutNetMessage(); - outM.setExpiration(_expiration); - outM.setMessage(_message); - outM.setOnFailedReplyJob(_onFail); - outM.setOnFailedSendJob(_onFail); - outM.setOnReplyJob(_onSuccess); - outM.setOnSendJob(_onSend); - outM.setPriority(_priority); - outM.setReplySelector(_selector); - outM.setTarget(_router); - OutboundMessageRegistry.getInstance().registerPending(outM); - } - - if (_onSend != null) - JobQueue.getInstance().addJob(_onSend); - - InNetMessage msg = new InNetMessage(); - msg.setFromRouter(_router.getIdentity()); - msg.setMessage(_message); - InNetMessagePool.getInstance().add(msg); - - _log.debug("Adding " + _message.getClass().getName() + " to inbound message pool as it was destined for ourselves"); - //_log.debug("debug", _createdBy); - } else { - OutNetMessage msg = new OutNetMessage(); - msg.setExpiration(_expiration); - msg.setMessage(_message); - msg.setOnFailedReplyJob(_onFail); - msg.setOnFailedSendJob(_onFail); - msg.setOnReplyJob(_onSuccess); - msg.setOnSendJob(_onSend); - msg.setPriority(_priority); - msg.setReplySelector(_selector); - msg.setTarget(_router); - OutNetMessagePool.getInstance().add(msg); - _log.debug("Adding " + _message.getClass().getName() + " to outbound message pool targeting " + _router.getIdentity().getHash().toBase64()); - //_log.debug("Message pooled: " + _message); - } + if (_sent) { + if (_log.shouldLog(Log.WARN)) + _log.warn("Not resending!", new Exception("blah")); + return; + } + _sent = true; + Hash to = _router.getIdentity().getHash(); + Hash us = _context.router().getRouterInfo().getIdentity().getHash(); + if (us.equals(to)) { + if (_selector != null) { + OutNetMessage outM = new OutNetMessage(_context); + outM.setExpiration(_expiration); + outM.setMessage(_message); + outM.setOnFailedReplyJob(_onFail); + outM.setOnFailedSendJob(_onFail); + outM.setOnReplyJob(_onSuccess); + outM.setOnSendJob(_onSend); + outM.setPriority(_priority); + outM.setReplySelector(_selector); + outM.setTarget(_router); + _context.messageRegistry().registerPending(outM); + } + + if (_onSend != null) + _context.jobQueue().addJob(_onSend); + + InNetMessage msg = new InNetMessage(); + msg.setFromRouter(_router.getIdentity()); + msg.setMessage(_message); + _context.inNetMessagePool().add(msg); + + if (_log.shouldLog(Log.DEBUG)) + _log.debug("Adding " + _message.getClass().getName() + + " to inbound message pool as it was destined for ourselves"); + //_log.debug("debug", _createdBy); + } else { + OutNetMessage msg = new OutNetMessage(_context); + msg.setExpiration(_expiration); + msg.setMessage(_message); + msg.setOnFailedReplyJob(_onFail); + msg.setOnFailedSendJob(_onFail); + msg.setOnReplyJob(_onSuccess); + msg.setOnSendJob(_onSend); + msg.setPriority(_priority); + msg.setReplySelector(_selector); + msg.setTarget(_router); + _context.outNetMessagePool().add(msg); + if (_log.shouldLog(Log.DEBUG)) + _log.debug("Adding " + _message.getClass().getName() + + " to outbound message pool targeting " + + _router.getIdentity().getHash().toBase64()); + //_log.debug("Message pooled: " + _message); + } } } diff --git a/router/java/src/net/i2p/router/message/SendReplyMessageJob.java b/router/java/src/net/i2p/router/message/SendReplyMessageJob.java index f47c66cc8..0a560be71 100644 --- a/router/java/src/net/i2p/router/message/SendReplyMessageJob.java +++ b/router/java/src/net/i2p/router/message/SendReplyMessageJob.java @@ -1,9 +1,9 @@ package net.i2p.router.message; /* * free (adj.): unencumbered; not under the control of others - * Written by jrandom in 2003 and released into the public domain - * with no warranty of any kind, either expressed or implied. - * It probably won't make your computer catch on fire, or eat + * Written by jrandom in 2003 and released into the public domain + * with no warranty of any kind, either expressed or implied. + * It probably won't make your computer catch on fire, or eat * your children, but it might. Use at your own risk. * */ @@ -14,34 +14,36 @@ import net.i2p.data.i2np.SourceRouteReplyMessage; import net.i2p.router.JobImpl; import net.i2p.router.JobQueue; import net.i2p.util.Log; +import net.i2p.router.RouterContext; /** - * Send a SourceRouteReplyMessage to the location specified in the source route block. - * This uses the simplest technique (don't garlic, and send direct to where the - * SourceRouteBlock requested), but it could instead garlic it and send it via a + * Send a SourceRouteReplyMessage to the location specified in the source route block. + * This uses the simplest technique (don't garlic, and send direct to where the + * SourceRouteBlock requested), but it could instead garlic it and send it via a * tunnel or garlic route it additionally) * */ public class SendReplyMessageJob extends JobImpl { - private final static Log _log = new Log(SendReplyMessageJob.class); + private Log _log; private SourceRouteBlock _block; private I2NPMessage _message; private int _priority; - public SendReplyMessageJob(SourceRouteBlock block, I2NPMessage message, int priority) { - super(); - _block = block; - _message = message; - _priority = priority; + public SendReplyMessageJob(RouterContext context, SourceRouteBlock block, I2NPMessage message, int priority) { + super(context); + _log = context.logManager().getLog(SendReplyMessageJob.class); + _block = block; + _message = message; + _priority = priority; } public void runJob() { - SourceRouteReplyMessage msg = new SourceRouteReplyMessage(); - msg.setMessage(_message); - msg.setEncryptedHeader(_block.getData()); - msg.setMessageExpiration(_message.getMessageExpiration()); - - send(msg); + SourceRouteReplyMessage msg = new SourceRouteReplyMessage(_context); + msg.setMessage(_message); + msg.setEncryptedHeader(_block.getData()); + msg.setMessageExpiration(_message.getMessageExpiration()); + + send(msg); } /** @@ -54,9 +56,9 @@ public class SendReplyMessageJob extends JobImpl { * */ protected void send(I2NPMessage msg) { - _log.info("Sending reply with " + _message.getClass().getName() + " in a sourceRouteeplyMessage to " + _block.getRouter().toBase64()); - SendMessageDirectJob j = new SendMessageDirectJob(msg, _block.getRouter(), _priority); - JobQueue.getInstance().addJob(j); + _log.info("Sending reply with " + _message.getClass().getName() + " in a sourceRouteeplyMessage to " + _block.getRouter().toBase64()); + SendMessageDirectJob j = new SendMessageDirectJob(_context, msg, _block.getRouter(), _priority); + _context.jobQueue().addJob(j); } public String getName() { return "Send Reply Message"; } diff --git a/router/java/src/net/i2p/router/message/SendTunnelMessageJob.java b/router/java/src/net/i2p/router/message/SendTunnelMessageJob.java index c5684aa75..eb58fc234 100644 --- a/router/java/src/net/i2p/router/message/SendTunnelMessageJob.java +++ b/router/java/src/net/i2p/router/message/SendTunnelMessageJob.java @@ -1,9 +1,9 @@ package net.i2p.router.message; /* * free (adj.): unencumbered; not under the control of others - * Written by jrandom in 2003 and released into the public domain - * with no warranty of any kind, either expressed or implied. - * It probably won't make your computer catch on fire, or eat + * Written by jrandom in 2003 and released into the public domain + * with no warranty of any kind, either expressed or implied. + * It probably won't make your computer catch on fire, or eat * your children, but it might. Use at your own risk. * */ @@ -46,13 +46,14 @@ import net.i2p.router.TunnelManagerFacade; import net.i2p.router.transport.OutboundMessageRegistry; import net.i2p.util.Clock; import net.i2p.util.Log; +import net.i2p.router.RouterContext; /** * Send a message down a tunnel that we are the gateway for * */ public class SendTunnelMessageJob extends JobImpl { - private final static Log _log = new Log(SendTunnelMessageJob.class); + private Log _log; private I2NPMessage _message; private Hash _destRouter; private TunnelId _tunnelId; @@ -65,360 +66,361 @@ public class SendTunnelMessageJob extends JobImpl { private long _expiration; private int _priority; - public SendTunnelMessageJob(I2NPMessage msg, TunnelId tunnelId, Job onSend, ReplyJob onReply, Job onFailure, MessageSelector selector, long timeoutMs, int priority) { - this(msg, tunnelId, null, null, onSend, onReply, onFailure, selector, timeoutMs, priority); + public SendTunnelMessageJob(RouterContext ctx, I2NPMessage msg, TunnelId tunnelId, Job onSend, ReplyJob onReply, Job onFailure, MessageSelector selector, long timeoutMs, int priority) { + this(ctx, msg, tunnelId, null, null, onSend, onReply, onFailure, selector, timeoutMs, priority); } - - public SendTunnelMessageJob(I2NPMessage msg, TunnelId tunnelId, Hash targetRouter, TunnelId targetTunnelId, Job onSend, ReplyJob onReply, Job onFailure, MessageSelector selector, long timeoutMs, int priority) { - super(); - if (msg == null) - throw new IllegalArgumentException("wtf, null message? sod off"); - _message = msg; - _destRouter = targetRouter; - _tunnelId = tunnelId; - _targetTunnelId = targetTunnelId; - _onSend = onSend; - _onReply = onReply; - _onFailure = onFailure; - _selector = selector; - _timeout = timeoutMs; - _priority = priority; - - if (timeoutMs < 50*1000) { - _log.info("Sending tunnel message to expire in " + timeoutMs + "ms containing " + msg.getUniqueId() + " (a " + msg.getClass().getName() + ")", new Exception("SendTunnel from")); - } - //_log.info("Send tunnel message " + msg.getClass().getName() + " to " + _destRouter + " over " + _tunnelId + " targetting tunnel " + _targetTunnelId, new Exception("SendTunnel from")); - _expiration = Clock.getInstance().now() + timeoutMs; + + public SendTunnelMessageJob(RouterContext ctx, I2NPMessage msg, TunnelId tunnelId, Hash targetRouter, TunnelId targetTunnelId, Job onSend, ReplyJob onReply, Job onFailure, MessageSelector selector, long timeoutMs, int priority) { + super(ctx); + _log = ctx.logManager().getLog(SendTunnelMessageJob.class); + if (msg == null) + throw new IllegalArgumentException("wtf, null message? sod off"); + _message = msg; + _destRouter = targetRouter; + _tunnelId = tunnelId; + _targetTunnelId = targetTunnelId; + _onSend = onSend; + _onReply = onReply; + _onFailure = onFailure; + _selector = selector; + _timeout = timeoutMs; + _priority = priority; + + if (timeoutMs < 50*1000) { + _log.info("Sending tunnel message to expire in " + timeoutMs + "ms containing " + msg.getUniqueId() + " (a " + msg.getClass().getName() + ")", new Exception("SendTunnel from")); + } + //_log.info("Send tunnel message " + msg.getClass().getName() + " to " + _destRouter + " over " + _tunnelId + " targetting tunnel " + _targetTunnelId, new Exception("SendTunnel from")); + _expiration = _context.clock().now() + timeoutMs; } public void runJob() { - TunnelInfo info = TunnelManagerFacade.getInstance().getTunnelInfo(_tunnelId); - if (info == null) { - _log.debug("Message for unknown tunnel [" + _tunnelId + "] received, forward to " + _destRouter); - if ( (_tunnelId == null) || (_destRouter == null) ) { - _log.error("Someone br0ke us. where is this message supposed to go again?", getAddedBy()); - return; - } - TunnelMessage msg = new TunnelMessage(); - try { - ByteArrayOutputStream baos = new ByteArrayOutputStream(1024); - _message.writeBytes(baos); - msg.setData(baos.toByteArray()); - msg.setTunnelId(_tunnelId); - msg.setMessageExpiration(new Date(_expiration)); - JobQueue.getInstance().addJob(new SendMessageDirectJob(msg, _destRouter, _onSend, _onReply, _onFailure, _selector, _expiration, _priority)); - - String bodyType = _message.getClass().getName(); - MessageHistory.getInstance().wrap(bodyType, _message.getUniqueId(), TunnelMessage.class.getName(), msg.getUniqueId()); - } catch (IOException ioe) { - _log.error("Error writing out the tunnel message to send to the tunnel", ioe); - } catch (DataFormatException dfe) { - _log.error("Error writing out the tunnel message to send to the tunnel", dfe); - } - return; - } - - if (isEndpoint(info)) { - _log.info("Tunnel message where we're both the gateway and the endpoint - honor instructions"); - honorInstructions(info); - return; - } else if (isGateway(info)) { - handleAsGateway(info); - return; - } else { - handleAsParticipant(info); - return; - } + TunnelInfo info = _context.tunnelManager().getTunnelInfo(_tunnelId); + if (info == null) { + _log.debug("Message for unknown tunnel [" + _tunnelId + "] received, forward to " + _destRouter); + if ( (_tunnelId == null) || (_destRouter == null) ) { + _log.error("Someone br0ke us. where is this message supposed to go again?", getAddedBy()); + return; + } + TunnelMessage msg = new TunnelMessage(_context); + try { + ByteArrayOutputStream baos = new ByteArrayOutputStream(1024); + _message.writeBytes(baos); + msg.setData(baos.toByteArray()); + msg.setTunnelId(_tunnelId); + msg.setMessageExpiration(new Date(_expiration)); + _context.jobQueue().addJob(new SendMessageDirectJob(_context, msg, _destRouter, _onSend, _onReply, _onFailure, _selector, _expiration, _priority)); + + String bodyType = _message.getClass().getName(); + _context.messageHistory().wrap(bodyType, _message.getUniqueId(), TunnelMessage.class.getName(), msg.getUniqueId()); + } catch (IOException ioe) { + _log.error("Error writing out the tunnel message to send to the tunnel", ioe); + } catch (DataFormatException dfe) { + _log.error("Error writing out the tunnel message to send to the tunnel", dfe); + } + return; + } + + if (isEndpoint(info)) { + _log.info("Tunnel message where we're both the gateway and the endpoint - honor instructions"); + honorInstructions(info); + return; + } else if (isGateway(info)) { + handleAsGateway(info); + return; + } else { + handleAsParticipant(info); + return; + } } private void handleAsGateway(TunnelInfo info) { - // since we are the gateway, we don't need to verify the data structures - TunnelInfo us = getUs(info); - if (us == null) { - _log.error("We are not participating in this /known/ tunnel - was the router reset?"); - if (_onFailure != null) - JobQueue.getInstance().addJob(_onFailure); - } else { - // we're the gateway, so sign, encrypt, and forward to info.getNextHop() - TunnelMessage msg = prepareMessage(info); - if (msg == null) { - if (_log.shouldLog(Log.ERROR)) - _log.error("wtf, unable to prepare a tunnel message to the next hop, when we're the gateway and hops remain? tunnel: " + info); - if (_onFailure != null) - JobQueue.getInstance().addJob(_onFailure); - return; - } - _log.debug("Tunnel message created: " + msg + " out of encrypted message: " + _message); - long now = Clock.getInstance().now(); - if (_expiration < now + 15*1000) { - _log.warn("Adding a tunnel message that will expire shortly [" + new Date(_expiration) + "]", getAddedBy()); - } - msg.setMessageExpiration(new Date(_expiration)); - JobQueue.getInstance().addJob(new SendMessageDirectJob(msg, info.getNextHop(), _onSend, _onReply, _onFailure, _selector, _expiration, _priority)); - } + // since we are the gateway, we don't need to verify the data structures + TunnelInfo us = getUs(info); + if (us == null) { + _log.error("We are not participating in this /known/ tunnel - was the router reset?"); + if (_onFailure != null) + _context.jobQueue().addJob(_onFailure); + } else { + // we're the gateway, so sign, encrypt, and forward to info.getNextHop() + TunnelMessage msg = prepareMessage(info); + if (msg == null) { + if (_log.shouldLog(Log.ERROR)) + _log.error("wtf, unable to prepare a tunnel message to the next hop, when we're the gateway and hops remain? tunnel: " + info); + if (_onFailure != null) + _context.jobQueue().addJob(_onFailure); + return; + } + _log.debug("Tunnel message created: " + msg + " out of encrypted message: " + _message); + long now = _context.clock().now(); + if (_expiration < now + 15*1000) { + _log.warn("Adding a tunnel message that will expire shortly [" + new Date(_expiration) + "]", getAddedBy()); + } + msg.setMessageExpiration(new Date(_expiration)); + _context.jobQueue().addJob(new SendMessageDirectJob(_context, msg, info.getNextHop(), _onSend, _onReply, _onFailure, _selector, _expiration, _priority)); + } } private void handleAsParticipant(TunnelInfo info) { - // SendTunnelMessageJob shouldn't be used for participants! - if (_log.shouldLog(Log.DEBUG)) - _log.debug("SendTunnelMessageJob for a participant... ", getAddedBy()); - - if (!(_message instanceof TunnelMessage)) { - if (_log.shouldLog(Log.ERROR)) - _log.error("Cannot inject non-tunnel messages as a participant!" + _message, getAddedBy()); - if (_onFailure != null) - JobQueue.getInstance().addJob(_onFailure); - return; - } - - TunnelMessage msg = (TunnelMessage)_message; - - TunnelVerificationStructure struct = msg.getVerificationStructure(); - if ( (info.getVerificationKey() == null) || (info.getVerificationKey().getKey() == null) ) { - if (_log.shouldLog(Log.ERROR)) - _log.error("No verification key for the participant? tunnel: " + info, getAddedBy()); - if (_onFailure != null) - JobQueue.getInstance().addJob(_onFailure); - return; - } - - boolean ok = struct.verifySignature(info.getVerificationKey().getKey()); - if (!ok) { - if (_log.shouldLog(Log.WARN)) - _log.warn("Failed tunnel verification! Spoofing / tagging attack? " + _message, getAddedBy()); - if (_onFailure != null) - JobQueue.getInstance().addJob(_onFailure); - return; - } else { - if (info.getNextHop() != null) { - if (_log.shouldLog(Log.INFO)) - _log.info("Message for tunnel " + info.getTunnelId().getTunnelId() + " received where we're not the gateway and there are remaining hops, so forward it on to " - + info.getNextHop().toBase64() + " via SendMessageDirectJob"); - JobQueue.getInstance().addJob(new SendMessageDirectJob(msg, info.getNextHop(), _onSend, null, _onFailure, null, _message.getMessageExpiration().getTime(), _priority)); - return; - } else { - if (_log.shouldLog(Log.ERROR)) - _log.error("Should not be reached - participant, but no more hops?!"); - if (_onFailure != null) - JobQueue.getInstance().addJob(_onFailure); - return; - } - } + // SendTunnelMessageJob shouldn't be used for participants! + if (_log.shouldLog(Log.DEBUG)) + _log.debug("SendTunnelMessageJob for a participant... ", getAddedBy()); + + if (!(_message instanceof TunnelMessage)) { + if (_log.shouldLog(Log.ERROR)) + _log.error("Cannot inject non-tunnel messages as a participant!" + _message, getAddedBy()); + if (_onFailure != null) + _context.jobQueue().addJob(_onFailure); + return; + } + + TunnelMessage msg = (TunnelMessage)_message; + + TunnelVerificationStructure struct = msg.getVerificationStructure(); + if ( (info.getVerificationKey() == null) || (info.getVerificationKey().getKey() == null) ) { + if (_log.shouldLog(Log.ERROR)) + _log.error("No verification key for the participant? tunnel: " + info, getAddedBy()); + if (_onFailure != null) + _context.jobQueue().addJob(_onFailure); + return; + } + + boolean ok = struct.verifySignature(_context, info.getVerificationKey().getKey()); + if (!ok) { + if (_log.shouldLog(Log.WARN)) + _log.warn("Failed tunnel verification! Spoofing / tagging attack? " + _message, getAddedBy()); + if (_onFailure != null) + _context.jobQueue().addJob(_onFailure); + return; + } else { + if (info.getNextHop() != null) { + if (_log.shouldLog(Log.INFO)) + _log.info("Message for tunnel " + info.getTunnelId().getTunnelId() + " received where we're not the gateway and there are remaining hops, so forward it on to " + + info.getNextHop().toBase64() + " via SendMessageDirectJob"); + _context.jobQueue().addJob(new SendMessageDirectJob(_context, msg, info.getNextHop(), _onSend, null, _onFailure, null, _message.getMessageExpiration().getTime(), _priority)); + return; + } else { + if (_log.shouldLog(Log.ERROR)) + _log.error("Should not be reached - participant, but no more hops?!"); + if (_onFailure != null) + _context.jobQueue().addJob(_onFailure); + return; + } + } } /** find our place in the tunnel */ private TunnelInfo getUs(TunnelInfo info) { - Hash us = Router.getInstance().getRouterInfo().getIdentity().getHash(); - TunnelInfo lastUs = null; - while (info != null) { - if (us.equals(info.getThisHop())) - lastUs = info; - info = info.getNextHopInfo(); - } - return lastUs; + Hash us = _context.routerHash(); + TunnelInfo lastUs = null; + while (info != null) { + if (us.equals(info.getThisHop())) + lastUs = info; + info = info.getNextHopInfo(); + } + return lastUs; } /** are we the endpoint for the tunnel? */ private boolean isEndpoint(TunnelInfo info) { - TunnelInfo us = getUs(info); - if (us == null) return false; - return (us.getNextHop() == null); + TunnelInfo us = getUs(info); + if (us == null) return false; + return (us.getNextHop() == null); } /** are we the gateway for the tunnel? */ private boolean isGateway(TunnelInfo info) { - TunnelInfo us = getUs(info); - if (us == null) return false; - return (us.getSigningKey() != null); // only the gateway can sign + TunnelInfo us = getUs(info); + if (us == null) return false; + return (us.getSigningKey() != null); // only the gateway can sign } private TunnelMessage prepareMessage(TunnelInfo info) { - TunnelMessage msg = new TunnelMessage(); - - SessionKey key = KeyGenerator.getInstance().generateSessionKey(); - - DeliveryInstructions instructions = new DeliveryInstructions(); - instructions.setDelayRequested(false); - instructions.setEncrypted(true); - instructions.setEncryptionKey(key); - - // if we aren't told where to send it, have it be processed locally at the endpoint - // but if we are, have the endpoint forward it appropriately. - // note that this algorithm does not currently support instructing the endpoint to send to a Destination - if (_destRouter != null) { - instructions.setRouter(_destRouter); - if (_targetTunnelId != null) { - _log.debug("Instructions target tunnel " + _targetTunnelId + " on router " + _destRouter.calculateHash()); - instructions.setTunnelId(_targetTunnelId); - instructions.setDeliveryMode(DeliveryInstructions.DELIVERY_MODE_TUNNEL); - } else { - _log.debug("Instructions target router " + _destRouter.toBase64()); - instructions.setDeliveryMode(DeliveryInstructions.DELIVERY_MODE_ROUTER); - } - } else { - if (_message instanceof DataMessage) { - _log.debug("Instructions are for local message delivery at the endpoint with a DataMessage to be sent to a Destination"); - instructions.setDeliveryMode(DeliveryInstructions.DELIVERY_MODE_LOCAL); - } else { - _log.debug("Instructions are for local delivery at the endpoint targetting the now-local router"); - instructions.setDeliveryMode(DeliveryInstructions.DELIVERY_MODE_LOCAL); - } - } - - if (info == null) { - if (_log.shouldLog(Log.WARN)) - _log.warn("Tunnel info is null to send message " + _message); - return null; - } else if ( (info.getEncryptionKey() == null) || (info.getEncryptionKey().getKey() == null) ) { - if (_log.shouldLog(Log.WARN)) - _log.warn("Tunnel encryption key is null when we're the gateway?! info: " + info); - return null; - } - - byte encryptedInstructions[] = encrypt(instructions, info.getEncryptionKey().getKey(), 512); - byte encryptedMessage[] = encrypt(_message, key, 1024); - TunnelVerificationStructure verification = createVerificationStructure(encryptedMessage, info); - - String bodyType = _message.getClass().getName(); - MessageHistory.getInstance().wrap(bodyType, _message.getUniqueId(), TunnelMessage.class.getName(), msg.getUniqueId()); - - _log.debug("Tunnel message prepared: instructions = " + instructions); - - msg.setData(encryptedMessage); - msg.setEncryptedDeliveryInstructions(encryptedInstructions); - msg.setTunnelId(_tunnelId); - msg.setVerificationStructure(verification); - return msg; + TunnelMessage msg = new TunnelMessage(_context); + + SessionKey key = _context.keyGenerator().generateSessionKey(); + + DeliveryInstructions instructions = new DeliveryInstructions(); + instructions.setDelayRequested(false); + instructions.setEncrypted(true); + instructions.setEncryptionKey(key); + + // if we aren't told where to send it, have it be processed locally at the endpoint + // but if we are, have the endpoint forward it appropriately. + // note that this algorithm does not currently support instructing the endpoint to send to a Destination + if (_destRouter != null) { + instructions.setRouter(_destRouter); + if (_targetTunnelId != null) { + _log.debug("Instructions target tunnel " + _targetTunnelId + " on router " + _destRouter.calculateHash()); + instructions.setTunnelId(_targetTunnelId); + instructions.setDeliveryMode(DeliveryInstructions.DELIVERY_MODE_TUNNEL); + } else { + _log.debug("Instructions target router " + _destRouter.toBase64()); + instructions.setDeliveryMode(DeliveryInstructions.DELIVERY_MODE_ROUTER); + } + } else { + if (_message instanceof DataMessage) { + _log.debug("Instructions are for local message delivery at the endpoint with a DataMessage to be sent to a Destination"); + instructions.setDeliveryMode(DeliveryInstructions.DELIVERY_MODE_LOCAL); + } else { + _log.debug("Instructions are for local delivery at the endpoint targetting the now-local router"); + instructions.setDeliveryMode(DeliveryInstructions.DELIVERY_MODE_LOCAL); + } + } + + if (info == null) { + if (_log.shouldLog(Log.WARN)) + _log.warn("Tunnel info is null to send message " + _message); + return null; + } else if ( (info.getEncryptionKey() == null) || (info.getEncryptionKey().getKey() == null) ) { + if (_log.shouldLog(Log.WARN)) + _log.warn("Tunnel encryption key is null when we're the gateway?! info: " + info); + return null; + } + + byte encryptedInstructions[] = encrypt(instructions, info.getEncryptionKey().getKey(), 512); + byte encryptedMessage[] = encrypt(_message, key, 1024); + TunnelVerificationStructure verification = createVerificationStructure(encryptedMessage, info); + + String bodyType = _message.getClass().getName(); + _context.messageHistory().wrap(bodyType, _message.getUniqueId(), TunnelMessage.class.getName(), msg.getUniqueId()); + + _log.debug("Tunnel message prepared: instructions = " + instructions); + + msg.setData(encryptedMessage); + msg.setEncryptedDeliveryInstructions(encryptedInstructions); + msg.setTunnelId(_tunnelId); + msg.setVerificationStructure(verification); + return msg; } - + private TunnelVerificationStructure createVerificationStructure(byte encryptedMessage[], TunnelInfo info) { - TunnelVerificationStructure struct = new TunnelVerificationStructure(); - struct.setMessageHash(SHA256Generator.getInstance().calculateHash(encryptedMessage)); - struct.sign(info.getSigningKey().getKey()); - return struct; + TunnelVerificationStructure struct = new TunnelVerificationStructure(); + struct.setMessageHash(_context.sha().calculateHash(encryptedMessage)); + struct.sign(_context, info.getSigningKey().getKey()); + return struct; } private byte[] encrypt(DataStructure struct, SessionKey key, int paddedSize) { - try { - ByteArrayOutputStream baos = new ByteArrayOutputStream(paddedSize); - struct.writeBytes(baos); - - byte iv[] = new byte[16]; - Hash h = SHA256Generator.getInstance().calculateHash(key.getData()); - System.arraycopy(h.getData(), 0, iv, 0, iv.length); - return AESEngine.getInstance().safeEncrypt(baos.toByteArray(), key, iv, paddedSize); - } catch (IOException ioe) { - _log.error("Error writing out data to encrypt", ioe); - } catch (DataFormatException dfe) { - _log.error("Error formatting data to encrypt", dfe); - } - return null; + try { + ByteArrayOutputStream baos = new ByteArrayOutputStream(paddedSize); + struct.writeBytes(baos); + + byte iv[] = new byte[16]; + Hash h = _context.sha().calculateHash(key.getData()); + System.arraycopy(h.getData(), 0, iv, 0, iv.length); + return _context.AESEngine().safeEncrypt(baos.toByteArray(), key, iv, paddedSize); + } catch (IOException ioe) { + _log.error("Error writing out data to encrypt", ioe); + } catch (DataFormatException dfe) { + _log.error("Error formatting data to encrypt", dfe); + } + return null; } - + private void honorInstructions(TunnelInfo info) { - if (_selector != null) - createFakeOutNetMessage(); - - if (_onSend != null) { - _log.debug("Firing onSend as we're honoring the instructions"); - JobQueue.getInstance().addJob(_onSend); - } - - // since we are the gateway, we don't need to decrypt the delivery instructions or the payload - - RouterIdentity ident = Router.getInstance().getRouterInfo().getIdentity(); - - if (_destRouter != null) { - I2NPMessage msg = null; - if (_targetTunnelId != null) { - _log.debug("Forward " + _message.getClass().getName() + " message off to remote tunnel " + _targetTunnelId.getTunnelId() + " on router " + _destRouter.toBase64()); - TunnelMessage tmsg = new TunnelMessage(); - tmsg.setEncryptedDeliveryInstructions(null); - tmsg.setTunnelId(_targetTunnelId); - tmsg.setVerificationStructure(null); - ByteArrayOutputStream baos = new ByteArrayOutputStream(1024); - try { - _message.writeBytes(baos); - } catch (IOException ioe) { - _log.error("Error writing out the message to be forwarded...??", ioe); - } catch (DataFormatException dfe) { - _log.error("Error writing message to be forwarded...???", dfe); - } - tmsg.setData(baos.toByteArray()); - msg = tmsg; - } else { - _log.debug("Forward " + _message.getClass().getName() + " message off to remote router " + _destRouter.toBase64()); - msg = _message; - } - long now = Clock.getInstance().now(); - //if (_expiration < now) { - _expiration = now + Router.CLOCK_FUDGE_FACTOR; - //_log.info("Fudging the message send so it expires in the fudge factor..."); - //} - - if (_expiration - 30*1000 < now) { - _log.error("Why are we trying to send a " + _message.getClass().getName() + " message with " + (_expiration-now) + "ms left?", getAddedBy()); - } - - String bodyType = _message.getClass().getName(); - MessageHistory.getInstance().wrap(bodyType, _message.getUniqueId(), TunnelMessage.class.getName(), msg.getUniqueId()); - - // don't specify a selector, since createFakeOutNetMessage already does that - JobQueue.getInstance().addJob(new SendMessageDirectJob(msg, _destRouter, _onSend, _onReply, _onFailure, null, _expiration, _priority)); - } else { - if ( (info.getDestination() == null) || !(_message instanceof DataMessage) ) { - // its a network message targeting us... - _log.debug("Destination is null or its not a DataMessage - pass it off to the InNetMessagePool"); - InNetMessage msg = new InNetMessage(); - msg.setFromRouter(ident); - msg.setFromRouterHash(ident.getHash()); - msg.setMessage(_message); - msg.setReplyBlock(null); - InNetMessagePool.getInstance().add(msg); - } else { - _log.debug("Destination is not null and it is a DataMessage - pop it into the ClientMessagePool"); - DataMessage msg = (DataMessage)_message; - boolean valid = MessageValidator.getInstance().validateMessage(msg.getUniqueId(), msg.getMessageExpiration().getTime()); - if (!valid) { - if (_log.shouldLog(Log.WARN)) - _log.warn("Duplicate data message received [" + msg.getUniqueId() + " expiring on " + msg.getMessageExpiration() + "]"); - MessageHistory.getInstance().droppedOtherMessage(msg); - MessageHistory.getInstance().messageProcessingError(msg.getUniqueId(), msg.getClass().getName(), "Duplicate"); - return; - } - - Payload payload = new Payload(); - payload.setEncryptedData(msg.getData()); - - MessageReceptionInfo receptionInfo = new MessageReceptionInfo(); - receptionInfo.setFromPeer(ident.getHash()); - receptionInfo.setFromTunnel(_tunnelId); - - ClientMessage clientMessage = new ClientMessage(); - clientMessage.setDestination(info.getDestination()); - clientMessage.setPayload(payload); - clientMessage.setReceptionInfo(receptionInfo); - ClientMessagePool.getInstance().add(clientMessage); - MessageHistory.getInstance().receivePayloadMessage(msg.getUniqueId()); - } - } + if (_selector != null) + createFakeOutNetMessage(); + + if (_onSend != null) { + _log.debug("Firing onSend as we're honoring the instructions"); + _context.jobQueue().addJob(_onSend); + } + + // since we are the gateway, we don't need to decrypt the delivery instructions or the payload + + RouterIdentity ident = _context.router().getRouterInfo().getIdentity(); + + if (_destRouter != null) { + I2NPMessage msg = null; + if (_targetTunnelId != null) { + _log.debug("Forward " + _message.getClass().getName() + " message off to remote tunnel " + _targetTunnelId.getTunnelId() + " on router " + _destRouter.toBase64()); + TunnelMessage tmsg = new TunnelMessage(_context); + tmsg.setEncryptedDeliveryInstructions(null); + tmsg.setTunnelId(_targetTunnelId); + tmsg.setVerificationStructure(null); + ByteArrayOutputStream baos = new ByteArrayOutputStream(1024); + try { + _message.writeBytes(baos); + } catch (IOException ioe) { + _log.error("Error writing out the message to be forwarded...??", ioe); + } catch (DataFormatException dfe) { + _log.error("Error writing message to be forwarded...???", dfe); + } + tmsg.setData(baos.toByteArray()); + msg = tmsg; + } else { + _log.debug("Forward " + _message.getClass().getName() + " message off to remote router " + _destRouter.toBase64()); + msg = _message; + } + long now = _context.clock().now(); + //if (_expiration < now) { + _expiration = now + Router.CLOCK_FUDGE_FACTOR; + //_log.info("Fudging the message send so it expires in the fudge factor..."); + //} + + if (_expiration - 30*1000 < now) { + _log.error("Why are we trying to send a " + _message.getClass().getName() + " message with " + (_expiration-now) + "ms left?", getAddedBy()); + } + + String bodyType = _message.getClass().getName(); + _context.messageHistory().wrap(bodyType, _message.getUniqueId(), TunnelMessage.class.getName(), msg.getUniqueId()); + + // don't specify a selector, since createFakeOutNetMessage already does that + _context.jobQueue().addJob(new SendMessageDirectJob(_context, msg, _destRouter, _onSend, _onReply, _onFailure, null, _expiration, _priority)); + } else { + if ( (info.getDestination() == null) || !(_message instanceof DataMessage) ) { + // its a network message targeting us... + _log.debug("Destination is null or its not a DataMessage - pass it off to the InNetMessagePool"); + InNetMessage msg = new InNetMessage(); + msg.setFromRouter(ident); + msg.setFromRouterHash(ident.getHash()); + msg.setMessage(_message); + msg.setReplyBlock(null); + _context.inNetMessagePool().add(msg); + } else { + _log.debug("Destination is not null and it is a DataMessage - pop it into the ClientMessagePool"); + DataMessage msg = (DataMessage)_message; + boolean valid = _context.messageValidator().validateMessage(msg.getUniqueId(), msg.getMessageExpiration().getTime()); + if (!valid) { + if (_log.shouldLog(Log.WARN)) + _log.warn("Duplicate data message received [" + msg.getUniqueId() + " expiring on " + msg.getMessageExpiration() + "]"); + _context.messageHistory().droppedOtherMessage(msg); + _context.messageHistory().messageProcessingError(msg.getUniqueId(), msg.getClass().getName(), "Duplicate"); + return; + } + + Payload payload = new Payload(); + payload.setEncryptedData(msg.getData()); + + MessageReceptionInfo receptionInfo = new MessageReceptionInfo(); + receptionInfo.setFromPeer(ident.getHash()); + receptionInfo.setFromTunnel(_tunnelId); + + ClientMessage clientMessage = new ClientMessage(); + clientMessage.setDestination(info.getDestination()); + clientMessage.setPayload(payload); + clientMessage.setReceptionInfo(receptionInfo); + _context.clientMessagePool().add(clientMessage); + _context.messageHistory().receivePayloadMessage(msg.getUniqueId()); + } + } } private void createFakeOutNetMessage() { - // now we create a fake outNetMessage to go onto the registry so we can select - _log.debug("Registering a fake outNetMessage for the message tunneled locally since we have a selector"); - OutNetMessage outM = new OutNetMessage(); - outM.setExpiration(_expiration); - outM.setMessage(_message); - outM.setOnFailedReplyJob(_onFailure); - outM.setOnFailedSendJob(_onFailure); - outM.setOnReplyJob(_onReply); - outM.setOnSendJob(_onSend); - outM.setPriority(_priority); - outM.setReplySelector(_selector); - outM.setTarget(null); - OutboundMessageRegistry.getInstance().registerPending(outM); + // now we create a fake outNetMessage to go onto the registry so we can select + _log.debug("Registering a fake outNetMessage for the message tunneled locally since we have a selector"); + OutNetMessage outM = new OutNetMessage(_context); + outM.setExpiration(_expiration); + outM.setMessage(_message); + outM.setOnFailedReplyJob(_onFailure); + outM.setOnFailedSendJob(_onFailure); + outM.setOnReplyJob(_onReply); + outM.setOnSendJob(_onSend); + outM.setPriority(_priority); + outM.setReplySelector(_selector); + outM.setTarget(null); + _context.messageRegistry().registerPending(outM); } public String getName() { return "Send Tunnel Message"; } diff --git a/router/java/src/net/i2p/router/message/SourceRouteReplyMessageHandler.java b/router/java/src/net/i2p/router/message/SourceRouteReplyMessageHandler.java index d8d99431e..286215d17 100644 --- a/router/java/src/net/i2p/router/message/SourceRouteReplyMessageHandler.java +++ b/router/java/src/net/i2p/router/message/SourceRouteReplyMessageHandler.java @@ -1,9 +1,9 @@ package net.i2p.router.message; /* * free (adj.): unencumbered; not under the control of others - * Written by jrandom in 2003 and released into the public domain - * with no warranty of any kind, either expressed or implied. - * It probably won't make your computer catch on fire, or eat + * Written by jrandom in 2003 and released into the public domain + * with no warranty of any kind, either expressed or implied. + * It probably won't make your computer catch on fire, or eat * your children, but it might. Use at your own risk. * */ @@ -15,17 +15,22 @@ import net.i2p.data.i2np.SourceRouteBlock; import net.i2p.data.i2np.SourceRouteReplyMessage; import net.i2p.router.HandlerJobBuilder; import net.i2p.router.Job; +import net.i2p.router.RouterContext; /** * HandlerJobBuilder to build jobs to handle SourceRouteReplyMessages * */ public class SourceRouteReplyMessageHandler implements HandlerJobBuilder { + private RouterContext _context; + public SourceRouteReplyMessageHandler(RouterContext context) { + _context = context; + } public Job createJob(I2NPMessage receivedMessage, RouterIdentity from, Hash fromHash, SourceRouteBlock replyBlock) { - // ignore the replyBlock for now - HandleSourceRouteReplyMessageJob job = new HandleSourceRouteReplyMessageJob((SourceRouteReplyMessage)receivedMessage, from, fromHash); - return job; + // ignore the replyBlock for now + HandleSourceRouteReplyMessageJob job = new HandleSourceRouteReplyMessageJob(_context, (SourceRouteReplyMessage)receivedMessage, from, fromHash); + return job; } } diff --git a/router/java/src/net/i2p/router/message/TunnelMessageHandler.java b/router/java/src/net/i2p/router/message/TunnelMessageHandler.java index 613d269d6..352828365 100644 --- a/router/java/src/net/i2p/router/message/TunnelMessageHandler.java +++ b/router/java/src/net/i2p/router/message/TunnelMessageHandler.java @@ -1,9 +1,9 @@ package net.i2p.router.message; /* * free (adj.): unencumbered; not under the control of others - * Written by jrandom in 2003 and released into the public domain - * with no warranty of any kind, either expressed or implied. - * It probably won't make your computer catch on fire, or eat + * Written by jrandom in 2003 and released into the public domain + * with no warranty of any kind, either expressed or implied. + * It probably won't make your computer catch on fire, or eat * your children, but it might. Use at your own risk. * */ @@ -15,17 +15,22 @@ import net.i2p.data.i2np.SourceRouteBlock; import net.i2p.data.i2np.TunnelMessage; import net.i2p.router.HandlerJobBuilder; import net.i2p.router.Job; +import net.i2p.router.RouterContext; /** * HandlerJobBuilder to build jobs to handle TunnelMessages * */ public class TunnelMessageHandler implements HandlerJobBuilder { + private RouterContext _context; + public TunnelMessageHandler(RouterContext context) { + _context = context; + } public Job createJob(I2NPMessage receivedMessage, RouterIdentity from, Hash fromHash, SourceRouteBlock replyBlock) { - // ignore the replyBlock for now - HandleTunnelMessageJob job = new HandleTunnelMessageJob((TunnelMessage)receivedMessage, from, fromHash); - return job; + // ignore the replyBlock for now + HandleTunnelMessageJob job = new HandleTunnelMessageJob(_context, (TunnelMessage)receivedMessage, from, fromHash); + return job; } } diff --git a/router/java/src/net/i2p/router/networkdb/DatabaseLookupMessageHandler.java b/router/java/src/net/i2p/router/networkdb/DatabaseLookupMessageHandler.java index 50ac040ec..ab42e2d37 100644 --- a/router/java/src/net/i2p/router/networkdb/DatabaseLookupMessageHandler.java +++ b/router/java/src/net/i2p/router/networkdb/DatabaseLookupMessageHandler.java @@ -15,20 +15,22 @@ import net.i2p.data.i2np.I2NPMessage; import net.i2p.data.i2np.SourceRouteBlock; import net.i2p.router.HandlerJobBuilder; import net.i2p.router.Job; -import net.i2p.stat.StatManager; +import net.i2p.router.RouterContext; /** * Build a HandleDatabaseLookupMessageJob whenever a DatabaseLookupMessage arrives * */ public class DatabaseLookupMessageHandler implements HandlerJobBuilder { - static { - StatManager.getInstance().createRateStat("netDb.lookupsReceived", "How many netDb lookups have we received?", "Network Database", new long[] { 5*60*1000l, 60*60*1000l, 24*60*60*1000l }); + private RouterContext _context; + public DatabaseLookupMessageHandler(RouterContext context) { + _context = context; + _context.statManager().createRateStat("netDb.lookupsReceived", "How many netDb lookups have we received?", "Network Database", new long[] { 5*60*1000l, 60*60*1000l, 24*60*60*1000l }); } public Job createJob(I2NPMessage receivedMessage, RouterIdentity from, Hash fromHash, SourceRouteBlock replyBlock) { - StatManager.getInstance().addRateData("netDb.lookupsReceived", 1, 0); + _context.statManager().addRateData("netDb.lookupsReceived", 1, 0); // ignore the reply block for the moment - return new HandleDatabaseLookupMessageJob((DatabaseLookupMessage)receivedMessage, from, fromHash); + return new HandleDatabaseLookupMessageJob(_context, (DatabaseLookupMessage)receivedMessage, from, fromHash); } } diff --git a/router/java/src/net/i2p/router/networkdb/DatabaseSearchReplyMessageHandler.java b/router/java/src/net/i2p/router/networkdb/DatabaseSearchReplyMessageHandler.java index 9471c8f95..bf903f003 100644 --- a/router/java/src/net/i2p/router/networkdb/DatabaseSearchReplyMessageHandler.java +++ b/router/java/src/net/i2p/router/networkdb/DatabaseSearchReplyMessageHandler.java @@ -1,9 +1,9 @@ package net.i2p.router.networkdb; /* * free (adj.): unencumbered; not under the control of others - * Written by jrandom in 2003 and released into the public domain - * with no warranty of any kind, either expressed or implied. - * It probably won't make your computer catch on fire, or eat + * Written by jrandom in 2003 and released into the public domain + * with no warranty of any kind, either expressed or implied. + * It probably won't make your computer catch on fire, or eat * your children, but it might. Use at your own risk. * */ @@ -15,14 +15,19 @@ import net.i2p.data.i2np.I2NPMessage; import net.i2p.data.i2np.SourceRouteBlock; import net.i2p.router.HandlerJobBuilder; import net.i2p.router.Job; +import net.i2p.router.RouterContext; /** * Build a HandleDatabaseSearchReplyMessageJob whenever a DatabaseSearchReplyMessage arrives * */ public class DatabaseSearchReplyMessageHandler implements HandlerJobBuilder { + private RouterContext _context; + public DatabaseSearchReplyMessageHandler(RouterContext context) { + _context = context; + } public Job createJob(I2NPMessage receivedMessage, RouterIdentity from, Hash fromHash, SourceRouteBlock replyBlock) { - // ignore the reply block for now - return new HandleDatabaseSearchReplyMessageJob((DatabaseSearchReplyMessage)receivedMessage, from, fromHash); + // ignore the reply block for now + return new HandleDatabaseSearchReplyMessageJob(_context, (DatabaseSearchReplyMessage)receivedMessage, from, fromHash); } } diff --git a/router/java/src/net/i2p/router/networkdb/DatabaseStoreMessageHandler.java b/router/java/src/net/i2p/router/networkdb/DatabaseStoreMessageHandler.java index 7130d608d..d1d311631 100644 --- a/router/java/src/net/i2p/router/networkdb/DatabaseStoreMessageHandler.java +++ b/router/java/src/net/i2p/router/networkdb/DatabaseStoreMessageHandler.java @@ -1,9 +1,9 @@ package net.i2p.router.networkdb; /* * free (adj.): unencumbered; not under the control of others - * Written by jrandom in 2003 and released into the public domain - * with no warranty of any kind, either expressed or implied. - * It probably won't make your computer catch on fire, or eat + * Written by jrandom in 2003 and released into the public domain + * with no warranty of any kind, either expressed or implied. + * It probably won't make your computer catch on fire, or eat * your children, but it might. Use at your own risk. * */ @@ -15,14 +15,19 @@ import net.i2p.data.i2np.I2NPMessage; import net.i2p.data.i2np.SourceRouteBlock; import net.i2p.router.HandlerJobBuilder; import net.i2p.router.Job; +import net.i2p.router.RouterContext; /** * Create a HandleDatabaseStoreMessageJob whenever a DatabaseStoreMessage arrives * */ public class DatabaseStoreMessageHandler implements HandlerJobBuilder { + private RouterContext _context; + public DatabaseStoreMessageHandler(RouterContext context) { + _context = context; + } public Job createJob(I2NPMessage receivedMessage, RouterIdentity from, Hash fromHash, SourceRouteBlock replyBlock) { - // ignore the reply block for the moment - return new HandleDatabaseStoreMessageJob((DatabaseStoreMessage)receivedMessage, from, fromHash); + // ignore the reply block for the moment + return new HandleDatabaseStoreMessageJob(_context, (DatabaseStoreMessage)receivedMessage, from, fromHash); } } diff --git a/router/java/src/net/i2p/router/networkdb/HandleDatabaseLookupMessageJob.java b/router/java/src/net/i2p/router/networkdb/HandleDatabaseLookupMessageJob.java index f99b56f1f..20fbe43d0 100644 --- a/router/java/src/net/i2p/router/networkdb/HandleDatabaseLookupMessageJob.java +++ b/router/java/src/net/i2p/router/networkdb/HandleDatabaseLookupMessageJob.java @@ -37,7 +37,7 @@ import net.i2p.router.message.SendMessageDirectJob; import net.i2p.router.message.SendTunnelMessageJob; import net.i2p.util.Clock; import net.i2p.util.Log; -import net.i2p.stat.StatManager; +import net.i2p.router.RouterContext; /** * Handle a lookup for a key received from a remote peer. Needs to be implemented @@ -45,20 +45,19 @@ import net.i2p.stat.StatManager; * */ public class HandleDatabaseLookupMessageJob extends JobImpl { - private final static Log _log = new Log(HandleDatabaseLookupMessageJob.class); + private Log _log; private DatabaseLookupMessage _message; private RouterIdentity _from; private Hash _fromHash; private final static int MAX_ROUTERS_RETURNED = 3; private final static int REPLY_TIMEOUT = 60*1000; private final static int MESSAGE_PRIORITY = 300; - - static { - StatManager.getInstance().createRateStat("netDb.lookupsHandled", "How many netDb lookups have we handled?", "Network Database", new long[] { 5*60*1000l, 60*60*1000l, 24*60*60*1000l }); - StatManager.getInstance().createRateStat("netDb.lookupsMatched", "How many netDb lookups did we have the data for?", "Network Database", new long[] { 5*60*1000l, 60*60*1000l, 24*60*60*1000l }); - } - public HandleDatabaseLookupMessageJob(DatabaseLookupMessage receivedMessage, RouterIdentity from, Hash fromHash) { + public HandleDatabaseLookupMessageJob(RouterContext ctx, DatabaseLookupMessage receivedMessage, RouterIdentity from, Hash fromHash) { + super(ctx); + _log = _context.logManager().getLog(HandleDatabaseLookupMessageJob.class); + _context.statManager().createRateStat("netDb.lookupsHandled", "How many netDb lookups have we handled?", "Network Database", new long[] { 5*60*1000l, 60*60*1000l, 24*60*60*1000l }); + _context.statManager().createRateStat("netDb.lookupsMatched", "How many netDb lookups did we have the data for?", "Network Database", new long[] { 5*60*1000l, 60*60*1000l, 24*60*60*1000l }); _message = receivedMessage; _from = from; _fromHash = fromHash; @@ -77,14 +76,14 @@ public class HandleDatabaseLookupMessageJob extends JobImpl { } // might as well grab what they sent us - NetworkDatabaseFacade.getInstance().store(fromKey, _message.getFrom()); + _context.netDb().store(fromKey, _message.getFrom()); // whatdotheywant? handleRequest(fromKey); } private void handleRequest(Hash fromKey) { - LeaseSet ls = NetworkDatabaseFacade.getInstance().lookupLeaseSetLocally(_message.getSearchKey()); + LeaseSet ls = _context.netDb().lookupLeaseSetLocally(_message.getSearchKey()); if (ls != null) { // send that lease set to the _message.getFromHash peer if (_log.shouldLog(Log.DEBUG)) @@ -92,7 +91,7 @@ public class HandleDatabaseLookupMessageJob extends JobImpl { + " locally as a lease set. sending to " + fromKey.toBase64()); sendData(_message.getSearchKey(), ls, fromKey, _message.getReplyTunnel()); } else { - RouterInfo info = NetworkDatabaseFacade.getInstance().lookupRouterInfoLocally(_message.getSearchKey()); + RouterInfo info = _context.netDb().lookupRouterInfoLocally(_message.getSearchKey()); if (info != null) { // send that routerInfo to the _message.getFromHash peer if (_log.shouldLog(Log.DEBUG)) @@ -101,8 +100,9 @@ public class HandleDatabaseLookupMessageJob extends JobImpl { sendData(_message.getSearchKey(), info, fromKey, _message.getReplyTunnel()); } else { // not found locally - return closest peer routerInfo structs - Set routerInfoSet = NetworkDatabaseFacade.getInstance().findNearestRouters(_message.getSearchKey(), - MAX_ROUTERS_RETURNED, _message.getDontIncludePeers()); + Set routerInfoSet = _context.netDb().findNearestRouters(_message.getSearchKey(), + MAX_ROUTERS_RETURNED, + _message.getDontIncludePeers()); if (_log.shouldLog(Log.DEBUG)) _log.debug("We do not have key " + _message.getSearchKey().toBase64() + " locally. sending back " + routerInfoSet.size() + " peers to " + fromKey.toBase64()); @@ -115,7 +115,7 @@ public class HandleDatabaseLookupMessageJob extends JobImpl { if (_log.shouldLog(Log.DEBUG)) _log.debug("Sending data matching key key " + key.toBase64() + " to peer " + toPeer.toBase64() + " tunnel " + replyTunnel); - DatabaseStoreMessage msg = new DatabaseStoreMessage(); + DatabaseStoreMessage msg = new DatabaseStoreMessage(_context); msg.setKey(key); if (data instanceof LeaseSet) { msg.setLeaseSet((LeaseSet)data); @@ -124,8 +124,8 @@ public class HandleDatabaseLookupMessageJob extends JobImpl { msg.setRouterInfo((RouterInfo)data); msg.setValueType(DatabaseStoreMessage.KEY_TYPE_ROUTERINFO); } - StatManager.getInstance().addRateData("netDb.lookupsMatched", 1, 0); - StatManager.getInstance().addRateData("netDb.lookupsHandled", 1, 0); + _context.statManager().addRateData("netDb.lookupsMatched", 1, 0); + _context.statManager().addRateData("netDb.lookupsHandled", 1, 0); sendMessage(msg, toPeer, replyTunnel); } @@ -133,15 +133,15 @@ public class HandleDatabaseLookupMessageJob extends JobImpl { if (_log.shouldLog(Log.DEBUG)) _log.debug("Sending closest routers to key " + key.toBase64() + ": # peers = " + routerInfoSet.size() + " tunnel " + replyTunnel); - DatabaseSearchReplyMessage msg = new DatabaseSearchReplyMessage(); - msg.setFromHash(Router.getInstance().getRouterInfo().getIdentity().getHash()); + DatabaseSearchReplyMessage msg = new DatabaseSearchReplyMessage(_context); + msg.setFromHash(_context.router().getRouterInfo().getIdentity().getHash()); msg.setSearchKey(key); if (routerInfoSet.size() <= 0) { // always include something, so lets toss ourselves in there - routerInfoSet.add(Router.getInstance().getRouterInfo()); + routerInfoSet.add(_context.router().getRouterInfo()); } msg.addReplies(routerInfoSet); - StatManager.getInstance().addRateData("netDb.lookupsHandled", 1, 0); + _context.statManager().addRateData("netDb.lookupsHandled", 1, 0); sendMessage(msg, toPeer, replyTunnel); // should this go via garlic messages instead? } @@ -152,21 +152,21 @@ public class HandleDatabaseLookupMessageJob extends JobImpl { } else { if (_log.shouldLog(Log.DEBUG)) _log.debug("Sending reply directly to " + toPeer); - send = new SendMessageDirectJob(message, toPeer, REPLY_TIMEOUT+Clock.getInstance().now(), MESSAGE_PRIORITY); + send = new SendMessageDirectJob(_context, message, toPeer, REPLY_TIMEOUT+_context.clock().now(), MESSAGE_PRIORITY); } - NetworkDatabaseFacade.getInstance().lookupRouterInfo(toPeer, send, null, REPLY_TIMEOUT); + _context.netDb().lookupRouterInfo(toPeer, send, null, REPLY_TIMEOUT); } private void sendThroughTunnel(I2NPMessage message, Hash toPeer, TunnelId replyTunnel) { - TunnelInfo info = TunnelManagerFacade.getInstance().getTunnelInfo(replyTunnel); + TunnelInfo info = _context.tunnelManager().getTunnelInfo(replyTunnel); // the sendTunnelMessageJob can't handle injecting into the tunnel anywhere but the beginning // (and if we are the beginning, we have the signing key) if ( (info == null) || (info.getSigningKey() != null)) { if (_log.shouldLog(Log.DEBUG)) _log.debug("Sending reply through " + replyTunnel + " on " + toPeer); - JobQueue.getInstance().addJob(new SendTunnelMessageJob(message, replyTunnel, toPeer, null, null, null, null, null, REPLY_TIMEOUT, MESSAGE_PRIORITY)); + _context.jobQueue().addJob(new SendTunnelMessageJob(_context, message, replyTunnel, toPeer, null, null, null, null, null, REPLY_TIMEOUT, MESSAGE_PRIORITY)); } else { // its a tunnel we're participating in, but we're NOT the gateway, so sendToGateway(message, toPeer, replyTunnel, info); @@ -183,19 +183,19 @@ public class HandleDatabaseLookupMessageJob extends JobImpl { return; } - long expiration = REPLY_TIMEOUT + Clock.getInstance().now(); + long expiration = REPLY_TIMEOUT + _context.clock().now(); - TunnelMessage msg = new TunnelMessage(); + TunnelMessage msg = new TunnelMessage(_context); try { ByteArrayOutputStream baos = new ByteArrayOutputStream(1024); message.writeBytes(baos); msg.setData(baos.toByteArray()); msg.setTunnelId(replyTunnel); msg.setMessageExpiration(new Date(expiration)); - JobQueue.getInstance().addJob(new SendMessageDirectJob(msg, toPeer, null, null, null, null, expiration, MESSAGE_PRIORITY)); + _context.jobQueue().addJob(new SendMessageDirectJob(_context, msg, toPeer, null, null, null, null, expiration, MESSAGE_PRIORITY)); String bodyType = message.getClass().getName(); - MessageHistory.getInstance().wrap(bodyType, message.getUniqueId(), TunnelMessage.class.getName(), msg.getUniqueId()); + _context.messageHistory().wrap(bodyType, message.getUniqueId(), TunnelMessage.class.getName(), msg.getUniqueId()); } catch (IOException ioe) { if (_log.shouldLog(Log.ERROR)) _log.error("Error writing out the tunnel message to send to the tunnel", ioe); @@ -208,8 +208,8 @@ public class HandleDatabaseLookupMessageJob extends JobImpl { public String getName() { return "Handle Database Lookup Message"; } public void dropped() { - MessageHistory.getInstance().messageProcessingError(_message.getUniqueId(), - _message.getClass().getName(), - "Dropped due to overload"); + _context.messageHistory().messageProcessingError(_message.getUniqueId(), + _message.getClass().getName(), + "Dropped due to overload"); } } diff --git a/router/java/src/net/i2p/router/networkdb/HandleDatabaseSearchReplyMessageJob.java b/router/java/src/net/i2p/router/networkdb/HandleDatabaseSearchReplyMessageJob.java index e40e8089e..4fc976c4e 100644 --- a/router/java/src/net/i2p/router/networkdb/HandleDatabaseSearchReplyMessageJob.java +++ b/router/java/src/net/i2p/router/networkdb/HandleDatabaseSearchReplyMessageJob.java @@ -1,9 +1,9 @@ package net.i2p.router.networkdb; /* * free (adj.): unencumbered; not under the control of others - * Written by jrandom in 2003 and released into the public domain - * with no warranty of any kind, either expressed or implied. - * It probably won't make your computer catch on fire, or eat + * Written by jrandom in 2003 and released into the public domain + * with no warranty of any kind, either expressed or implied. + * It probably won't make your computer catch on fire, or eat * your children, but it might. Use at your own risk. * */ @@ -16,55 +16,60 @@ import net.i2p.router.JobImpl; import net.i2p.router.JobQueue; import net.i2p.router.NetworkDatabaseFacade; import net.i2p.util.Log; +import net.i2p.router.RouterContext; /** * Receive DatabaseSearchReplyMessage data and store it in the local net db * */ public class HandleDatabaseSearchReplyMessageJob extends JobImpl { - private final static Log _log = new Log(HandleDatabaseSearchReplyMessageJob.class); + private Log _log; private DatabaseSearchReplyMessage _message; private RouterIdentity _from; private Hash _fromHash; - public HandleDatabaseSearchReplyMessageJob(DatabaseSearchReplyMessage receivedMessage, RouterIdentity from, Hash fromHash) { - _message = receivedMessage; - _from = from; - _fromHash = fromHash; + public HandleDatabaseSearchReplyMessageJob(RouterContext context, DatabaseSearchReplyMessage receivedMessage, RouterIdentity from, Hash fromHash) { + super(context); + _log = context.logManager().getLog(HandleDatabaseSearchReplyMessageJob.class); + _message = receivedMessage; + _from = from; + _fromHash = fromHash; } public void runJob() { - if (_log.shouldLog(Log.DEBUG)) - _log.debug("Handling database search reply message for key " + _message.getSearchKey().toBase64() + " with " + _message.getNumReplies() + " replies"); - if (_message.getNumReplies() > 0) - JobQueue.getInstance().addJob(new HandlePeerJob(0)); + if (_log.shouldLog(Log.DEBUG)) + _log.debug("Handling database search reply message for key " + _message.getSearchKey().toBase64() + " with " + _message.getNumReplies() + " replies"); + if (_message.getNumReplies() > 0) + _context.jobQueue().addJob(new HandlePeerJob(0)); } /** - * Partial job - take each reply entry, store it, then requeue again until all + * Partial job - take each reply entry, store it, then requeue again until all * of the entries are stored. This prevents a single reply from swamping the jobqueue * */ private final class HandlePeerJob extends JobImpl { - private int _curReply; - public HandlePeerJob(int reply) { - _curReply = reply; - } - public void runJob() { - boolean remaining = handle(); - if (remaining) - requeue(0); - } - - private boolean handle() { - RouterInfo info = _message.getReply(_curReply); - if (_log.shouldLog(Log.INFO)) - _log.info("On search for " + _message.getSearchKey().toBase64() + ", received " + info.getIdentity().getHash().toBase64()); - NetworkDatabaseFacade.getInstance().store(info.getIdentity().getHash(), info); - _curReply++; - return _message.getNumReplies() > _curReply; - } - public String getName() { return "Handle search reply value"; } + private int _curReply; + public HandlePeerJob(int reply) { + super(HandleDatabaseSearchReplyMessageJob.this._context); + _curReply = reply; + } + public void runJob() { + boolean remaining = handle(); + if (remaining) + requeue(0); + } + + private boolean handle() { + RouterInfo info = _message.getReply(_curReply); + if (_log.shouldLog(Log.INFO)) + _log.info("On search for " + _message.getSearchKey().toBase64() + ", received " + info.getIdentity().getHash().toBase64()); + + HandlePeerJob.this._context.netDb().store(info.getIdentity().getHash(), info); + _curReply++; + return _message.getNumReplies() > _curReply; + } + public String getName() { return "Handle search reply value"; } } public String getName() { return "Handle Database Search Reply Message"; } diff --git a/router/java/src/net/i2p/router/networkdb/HandleDatabaseStoreMessageJob.java b/router/java/src/net/i2p/router/networkdb/HandleDatabaseStoreMessageJob.java index 85be0f9b0..61548a0f5 100644 --- a/router/java/src/net/i2p/router/networkdb/HandleDatabaseStoreMessageJob.java +++ b/router/java/src/net/i2p/router/networkdb/HandleDatabaseStoreMessageJob.java @@ -19,22 +19,22 @@ import net.i2p.router.NetworkDatabaseFacade; import net.i2p.router.ProfileManager; import net.i2p.util.Log; import net.i2p.stat.StatManager; +import net.i2p.router.RouterContext; /** * Receive DatabaseStoreMessage data and store it in the local net db * */ public class HandleDatabaseStoreMessageJob extends JobImpl { - private final static Log _log = new Log(HandleDatabaseStoreMessageJob.class); + private Log _log; private DatabaseStoreMessage _message; private RouterIdentity _from; private Hash _fromHash; - - static { - StatManager.getInstance().createRateStat("netDb.storeHandled", "How many netDb store messages have we handled?", "Network Database", new long[] { 5*60*1000l, 60*60*1000l, 24*60*60*1000l }); - } - public HandleDatabaseStoreMessageJob(DatabaseStoreMessage receivedMessage, RouterIdentity from, Hash fromHash) { + public HandleDatabaseStoreMessageJob(RouterContext ctx, DatabaseStoreMessage receivedMessage, RouterIdentity from, Hash fromHash) { + super(ctx); + _log = ctx.logManager().getLog(HandleDatabaseStoreMessageJob.class); + ctx.statManager().createRateStat("netDb.storeHandled", "How many netDb store messages have we handled?", "Network Database", new long[] { 5*60*1000l, 60*60*1000l, 24*60*60*1000l }); _message = receivedMessage; _from = from; _fromHash = fromHash; @@ -46,15 +46,15 @@ public class HandleDatabaseStoreMessageJob extends JobImpl { boolean wasNew = false; if (_message.getValueType() == DatabaseStoreMessage.KEY_TYPE_LEASESET) { - Object match = NetworkDatabaseFacade.getInstance().store(_message.getKey(), _message.getLeaseSet()); + Object match = _context.netDb().store(_message.getKey(), _message.getLeaseSet()); wasNew = (null == match); } else if (_message.getValueType() == DatabaseStoreMessage.KEY_TYPE_ROUTERINFO) { if (_log.shouldLog(Log.INFO)) _log.info("Handling dbStore of router " + _message.getKey() + " with publishDate of " + new Date(_message.getRouterInfo().getPublished())); - Object match = NetworkDatabaseFacade.getInstance().store(_message.getKey(), _message.getRouterInfo()); + Object match = _context.netDb().store(_message.getKey(), _message.getRouterInfo()); wasNew = (null == match); - ProfileManager.getInstance().heardAbout(_message.getKey()); + _context.profileManager().heardAbout(_message.getKey()); } else { if (_log.shouldLog(Log.ERROR)) _log.error("Invalid DatabaseStoreMessage data type - " + _message.getValueType() @@ -63,13 +63,13 @@ public class HandleDatabaseStoreMessageJob extends JobImpl { if (_from != null) _fromHash = _from.getHash(); if (_fromHash != null) - ProfileManager.getInstance().dbStoreReceived(_fromHash, wasNew); - StatManager.getInstance().addRateData("netDb.storeHandled", 1, 0); + _context.profileManager().dbStoreReceived(_fromHash, wasNew); + _context.statManager().addRateData("netDb.storeHandled", 1, 0); } public String getName() { return "Handle Database Store Message"; } public void dropped() { - MessageHistory.getInstance().messageProcessingError(_message.getUniqueId(), _message.getClass().getName(), "Dropped due to overload"); + _context.messageHistory().messageProcessingError(_message.getUniqueId(), _message.getClass().getName(), "Dropped due to overload"); } } diff --git a/router/java/src/net/i2p/router/networkdb/PublishLocalRouterInfoJob.java b/router/java/src/net/i2p/router/networkdb/PublishLocalRouterInfoJob.java index 9ad1ae4c3..89abad881 100644 --- a/router/java/src/net/i2p/router/networkdb/PublishLocalRouterInfoJob.java +++ b/router/java/src/net/i2p/router/networkdb/PublishLocalRouterInfoJob.java @@ -22,33 +22,42 @@ import net.i2p.router.StatisticsManager; import net.i2p.util.Clock; import net.i2p.util.Log; import net.i2p.util.RandomSource; +import net.i2p.router.RouterContext; /** * Publish the local router's RouterInfo every 5 to 10 minutes * */ public class PublishLocalRouterInfoJob extends JobImpl { - private final static Log _log = new Log(PublishLocalRouterInfoJob.class); + private Log _log; final static long PUBLISH_DELAY = 5*60*1000; // every 5 to 10 minutes (since we randomize) + public PublishLocalRouterInfoJob(RouterContext ctx) { + super(ctx); + _log = ctx.logManager().getLog(PublishLocalRouterInfoJob.class); + } + public String getName() { return "Publish Local Router Info"; } public void runJob() { - RouterInfo ri = new RouterInfo(Router.getInstance().getRouterInfo()); - if (_log.shouldLog(Log.DEBUG)) - _log.debug("Old routerInfo contains " + ri.getAddresses().size() + " addresses and " + ri.getOptions().size() + " options"); - Properties stats = StatisticsManager.getInstance().publishStatistics(); - try { - ri.setPublished(Clock.getInstance().now()); - ri.setOptions(stats); - ri.setAddresses(CommSystemFacade.getInstance().createAddresses()); - ri.sign(KeyManager.getInstance().getSigningPrivateKey()); - Router.getInstance().setRouterInfo(ri); - if (_log.shouldLog(Log.INFO)) - _log.info("Newly updated routerInfo is published with " + stats.size() + "/" + ri.getOptions().size() + " options on " + new Date(ri.getPublished())); - NetworkDatabaseFacade.getInstance().publish(ri); - } catch (DataFormatException dfe) { - _log.error("Error signing the updated local router info!", dfe); - } - requeue(PUBLISH_DELAY + RandomSource.getInstance().nextInt((int)PUBLISH_DELAY)); + RouterInfo ri = new RouterInfo(_context.router().getRouterInfo()); + if (_log.shouldLog(Log.DEBUG)) + _log.debug("Old routerInfo contains " + ri.getAddresses().size() + + " addresses and " + ri.getOptions().size() + " options"); + Properties stats = _context.statPublisher().publishStatistics(); + try { + ri.setPublished(_context.clock().now()); + ri.setOptions(stats); + ri.setAddresses(_context.commSystem().createAddresses()); + ri.sign(_context.keyManager().getSigningPrivateKey()); + _context.router().setRouterInfo(ri); + if (_log.shouldLog(Log.INFO)) + _log.info("Newly updated routerInfo is published with " + stats.size() + + "/" + ri.getOptions().size() + " options on " + + new Date(ri.getPublished())); + _context.netDb().publish(ri); + } catch (DataFormatException dfe) { + _log.error("Error signing the updated local router info!", dfe); + } + requeue(PUBLISH_DELAY + _context.random().nextInt((int)PUBLISH_DELAY)); } } diff --git a/router/java/src/net/i2p/router/networkdb/kademlia/DataPublisherJob.java b/router/java/src/net/i2p/router/networkdb/kademlia/DataPublisherJob.java index 3225e779e..6890d57ce 100644 --- a/router/java/src/net/i2p/router/networkdb/kademlia/DataPublisherJob.java +++ b/router/java/src/net/i2p/router/networkdb/kademlia/DataPublisherJob.java @@ -1,9 +1,9 @@ package net.i2p.router.networkdb.kademlia; /* * free (adj.): unencumbered; not under the control of others - * Written by jrandom in 2003 and released into the public domain - * with no warranty of any kind, either expressed or implied. - * It probably won't make your computer catch on fire, or eat + * Written by jrandom in 2003 and released into the public domain + * with no warranty of any kind, either expressed or implied. + * It probably won't make your computer catch on fire, or eat * your children, but it might. Use at your own risk. * */ @@ -20,68 +20,70 @@ import net.i2p.router.JobQueue; import net.i2p.router.Router; import net.i2p.util.Clock; import net.i2p.util.Log; +import net.i2p.router.RouterContext; class DataPublisherJob extends JobImpl { - private final static Log _log = new Log(DataPublisherJob.class); + private Log _log; private KademliaNetworkDatabaseFacade _facade; private final static long RERUN_DELAY_MS = 30*1000; private final static int MAX_SEND_PER_RUN = 5; // publish no more than 5 at a time private final static long STORE_TIMEOUT = 60*1000; // give 'er a minute to send the data - public DataPublisherJob(KademliaNetworkDatabaseFacade facade) { - super(); - _facade = facade; - getTiming().setStartAfter(Clock.getInstance().now()+RERUN_DELAY_MS); // not immediate... + public DataPublisherJob(RouterContext ctx, KademliaNetworkDatabaseFacade facade) { + super(ctx); + _log = ctx.logManager().getLog(DataPublisherJob.class); + _facade = facade; + getTiming().setStartAfter(ctx.clock().now()+RERUN_DELAY_MS); // not immediate... } public String getName() { return "Data Publisher Job"; } - public void runJob() { - Set toSend = selectKeysToSend(); - _log.info("Keys being published in this timeslice: " + toSend); - for (Iterator iter = toSend.iterator(); iter.hasNext(); ) { - Hash key = (Hash)iter.next(); - DataStructure data = _facade.getDataStore().get(key); - if (data == null) { - _log.warn("Trying to send a key we dont have? " + key); - continue; - } - if (data instanceof LeaseSet) { - LeaseSet ls = (LeaseSet)data; - if (!ls.isCurrent(Router.CLOCK_FUDGE_FACTOR)) { - _log.warn("Not publishing a lease that isn't current - " + key, new Exception("Publish expired lease?")); - } - } - StoreJob store = new StoreJob(_facade, key, data, null, null, STORE_TIMEOUT); - JobQueue.getInstance().addJob(store); - } - requeue(RERUN_DELAY_MS); + public void runJob() { + Set toSend = selectKeysToSend(); + _log.info("Keys being published in this timeslice: " + toSend); + for (Iterator iter = toSend.iterator(); iter.hasNext(); ) { + Hash key = (Hash)iter.next(); + DataStructure data = _facade.getDataStore().get(key); + if (data == null) { + _log.warn("Trying to send a key we dont have? " + key); + continue; + } + if (data instanceof LeaseSet) { + LeaseSet ls = (LeaseSet)data; + if (!ls.isCurrent(Router.CLOCK_FUDGE_FACTOR)) { + _log.warn("Not publishing a lease that isn't current - " + key, new Exception("Publish expired lease?")); + } + } + StoreJob store = new StoreJob(_context, _facade, key, data, null, null, STORE_TIMEOUT); + _context.jobQueue().addJob(store); + } + requeue(RERUN_DELAY_MS); } private Set selectKeysToSend() { - Set explicit = _facade.getExplicitSendKeys(); - Set toSend = new HashSet(MAX_SEND_PER_RUN); - if (explicit.size() < MAX_SEND_PER_RUN) { - toSend.addAll(explicit); - _facade.removeFromExplicitSend(explicit); - - Set passive = _facade.getPassivelySendKeys(); - Set psend = new HashSet(passive.size()); - for (Iterator iter = passive.iterator(); iter.hasNext(); ) { - if (toSend.size() >= MAX_SEND_PER_RUN) break; - Hash key = (Hash)iter.next(); - toSend.add(key); - psend.add(key); - } - _facade.removeFromPassiveSend(psend); - } else { - for (Iterator iter = explicit.iterator(); iter.hasNext(); ) { - if (toSend.size() >= MAX_SEND_PER_RUN) break; - Hash key = (Hash)iter.next(); - toSend.add(key); - } - _facade.removeFromExplicitSend(toSend); - } - - return toSend; + Set explicit = _facade.getExplicitSendKeys(); + Set toSend = new HashSet(MAX_SEND_PER_RUN); + if (explicit.size() < MAX_SEND_PER_RUN) { + toSend.addAll(explicit); + _facade.removeFromExplicitSend(explicit); + + Set passive = _facade.getPassivelySendKeys(); + Set psend = new HashSet(passive.size()); + for (Iterator iter = passive.iterator(); iter.hasNext(); ) { + if (toSend.size() >= MAX_SEND_PER_RUN) break; + Hash key = (Hash)iter.next(); + toSend.add(key); + psend.add(key); + } + _facade.removeFromPassiveSend(psend); + } else { + for (Iterator iter = explicit.iterator(); iter.hasNext(); ) { + if (toSend.size() >= MAX_SEND_PER_RUN) break; + Hash key = (Hash)iter.next(); + toSend.add(key); + } + _facade.removeFromExplicitSend(toSend); + } + + return toSend; } } diff --git a/router/java/src/net/i2p/router/networkdb/kademlia/DataRepublishingSelectorJob.java b/router/java/src/net/i2p/router/networkdb/kademlia/DataRepublishingSelectorJob.java index e5ac6c50d..717284e25 100644 --- a/router/java/src/net/i2p/router/networkdb/kademlia/DataRepublishingSelectorJob.java +++ b/router/java/src/net/i2p/router/networkdb/kademlia/DataRepublishingSelectorJob.java @@ -1,9 +1,9 @@ package net.i2p.router.networkdb.kademlia; /* * free (adj.): unencumbered; not under the control of others - * Written by jrandom in 2003 and released into the public domain - * with no warranty of any kind, either expressed or implied. - * It probably won't make your computer catch on fire, or eat + * Written by jrandom in 2003 and released into the public domain + * with no warranty of any kind, either expressed or implied. + * It probably won't make your computer catch on fire, or eat * your children, but it might. Use at your own risk. * */ @@ -21,21 +21,22 @@ import net.i2p.router.Router; import net.i2p.util.Clock; import net.i2p.util.Log; import net.i2p.util.RandomSource; +import net.i2p.router.RouterContext; class DataRepublishingSelectorJob extends JobImpl { - private final static Log _log = new Log(DataRepublishingSelectorJob.class); + private Log _log; private KademliaNetworkDatabaseFacade _facade; - + private final static long RERUN_DELAY_MS = 1*60*1000; public final static int MAX_PASSIVE_POOL_SIZE = 30; // no need to have the pool be too big - + /** * For every bucket away from us, resend period increases by 5 minutes - so we resend * our own key every 5 minutes, and keys very far from us every 2.5 hours, increasing * linearly */ public final static long RESEND_BUCKET_FACTOR = 5*60*1000; - + /** * % chance any peer not specializing in the lease's key will broadcast it on each pass * of this job /after/ waiting 5 minutes (one RESENT_BUCKET_FACTOR). In other words, @@ -44,66 +45,67 @@ class DataRepublishingSelectorJob extends JobImpl { * */ private final static int LEASE_REBROADCAST_PROBABILITY = 5; - /** + /** * LEASE_REBROADCAST_PROBABILITY out of LEASE_REBROADCAST_PROBABILITY_SCALE chance. */ private final static int LEASE_REBROADCAST_PROBABILITY_SCALE = 1000; - - public DataRepublishingSelectorJob(KademliaNetworkDatabaseFacade facade) { - super(); - _facade = facade; - getTiming().setStartAfter(Clock.getInstance().now()+RERUN_DELAY_MS); // not immediate... + + public DataRepublishingSelectorJob(RouterContext ctx, KademliaNetworkDatabaseFacade facade) { + super(ctx); + _log = ctx.logManager().getLog(DataRepublishingSelectorJob.class); + _facade = facade; + getTiming().setStartAfter(ctx.clock().now()+RERUN_DELAY_MS); // not immediate... } - + public String getName() { return "Data Publisher Job"; } - public void runJob() { - Set toSend = selectKeysToSend(); - _log.info("Keys being queued up for publishing: " + toSend); - _facade.queueForPublishing(toSend); - requeue(RERUN_DELAY_MS); + public void runJob() { + Set toSend = selectKeysToSend(); + _log.info("Keys being queued up for publishing: " + toSend); + _facade.queueForPublishing(toSend); + requeue(RERUN_DELAY_MS); } - + /** - * Run through the entire data store, ranking how much we want to send each + * Run through the entire data store, ranking how much we want to send each * data point, and returning the ones we most want to send so that they can * be placed in the passive send pool (without making the passive pool greater * than the limit) * */ private Set selectKeysToSend() { - Set alreadyQueued = new HashSet(128); - alreadyQueued.addAll(_facade.getPassivelySendKeys()); - - int toAdd = MAX_PASSIVE_POOL_SIZE - alreadyQueued.size(); - _log.debug("Keys we need to queue up to fill the passive send pool: " + toAdd); - if (toAdd <= 0) return new HashSet(); - - alreadyQueued.addAll(_facade.getExplicitSendKeys()); - - Set keys = _facade.getDataStore().getKeys(); - keys.removeAll(alreadyQueued); - - _log.debug("Total number of keys in the datastore: " + keys.size()); - - TreeMap toSend = new TreeMap(); - for (Iterator iter = keys.iterator(); iter.hasNext(); ) { - Hash key = (Hash)iter.next(); - Long lastPublished = _facade.getLastSent(key); - long publishRank = rankPublishNeed(key, lastPublished); - _log.debug("Publish rank for " + key + ": " + publishRank); - if (publishRank > 0) { - while (toSend.containsKey(new Long(publishRank))) - publishRank++; - toSend.put(new Long(publishRank), key); - } - } - Set rv = new HashSet(toAdd); - for (Iterator iter = toSend.values().iterator(); iter.hasNext(); ) { - if (rv.size() > toAdd) break; - Hash key = (Hash)iter.next(); - rv.add(key); - } - return rv; + Set alreadyQueued = new HashSet(128); + alreadyQueued.addAll(_facade.getPassivelySendKeys()); + + int toAdd = MAX_PASSIVE_POOL_SIZE - alreadyQueued.size(); + _log.debug("Keys we need to queue up to fill the passive send pool: " + toAdd); + if (toAdd <= 0) return new HashSet(); + + alreadyQueued.addAll(_facade.getExplicitSendKeys()); + + Set keys = _facade.getDataStore().getKeys(); + keys.removeAll(alreadyQueued); + + _log.debug("Total number of keys in the datastore: " + keys.size()); + + TreeMap toSend = new TreeMap(); + for (Iterator iter = keys.iterator(); iter.hasNext(); ) { + Hash key = (Hash)iter.next(); + Long lastPublished = _facade.getLastSent(key); + long publishRank = rankPublishNeed(key, lastPublished); + _log.debug("Publish rank for " + key + ": " + publishRank); + if (publishRank > 0) { + while (toSend.containsKey(new Long(publishRank))) + publishRank++; + toSend.put(new Long(publishRank), key); + } + } + Set rv = new HashSet(toAdd); + for (Iterator iter = toSend.values().iterator(); iter.hasNext(); ) { + if (rv.size() > toAdd) break; + Hash key = (Hash)iter.next(); + rv.add(key); + } + return rv; } /** @@ -112,49 +114,49 @@ class DataRepublishingSelectorJob extends JobImpl { * */ private long rankPublishNeed(Hash key, Long lastPublished) { - int bucket = _facade.getKBuckets().pickBucket(key); - long sendPeriod = (bucket+1) * RESEND_BUCKET_FACTOR; - long now = Clock.getInstance().now(); - if (lastPublished.longValue() < now-sendPeriod) { - RouterInfo ri = _facade.lookupRouterInfoLocally(key); - if (ri != null) { - if (ri.isCurrent(2 * ExpireRoutersJob.EXPIRE_DELAY)) { - // last time it was sent was before the last send period - return KBucketSet.NUM_BUCKETS - bucket; - } else { - _log.info("Not republishing router " + key + " since it is really old [" + (now-ri.getPublished()) + "ms]"); - return -2; - } - } else { - LeaseSet ls = _facade.lookupLeaseSetLocally(key); - if (ls != null) { - if (ls.isCurrent(Router.CLOCK_FUDGE_FACTOR)) { - // last time it was sent was before the last send period - return KBucketSet.NUM_BUCKETS - bucket; - } else { - _log.info("Not republishing leaseSet " + key + " since it is really old [" + (now-ls.getEarliestLeaseDate()) + "ms]"); - return -3; - } - } else { - _log.info("Key " + key + " is not a leaseSet or routerInfo, definitely not publishing it"); - return -5; - } - } - } else { - // its been published since the last period we want to publish it - - if (now - RESEND_BUCKET_FACTOR > lastPublished.longValue()) { - if (_facade.lookupRouterInfoLocally(key) != null) { - // randomize the chance of rebroadcast for leases if we haven't - // sent it within 5 minutes - int val = RandomSource.getInstance().nextInt(LEASE_REBROADCAST_PROBABILITY_SCALE); - if (val <= LEASE_REBROADCAST_PROBABILITY) { - _log.info("Randomized rebroadcast of leases tells us to send " + key + ": " + val); - return 1; - } - } - } - return -1; - } + int bucket = _facade.getKBuckets().pickBucket(key); + long sendPeriod = (bucket+1) * RESEND_BUCKET_FACTOR; + long now = _context.clock().now(); + if (lastPublished.longValue() < now-sendPeriod) { + RouterInfo ri = _facade.lookupRouterInfoLocally(key); + if (ri != null) { + if (ri.isCurrent(2 * ExpireRoutersJob.EXPIRE_DELAY)) { + // last time it was sent was before the last send period + return KBucketSet.NUM_BUCKETS - bucket; + } else { + _log.info("Not republishing router " + key + " since it is really old [" + (now-ri.getPublished()) + "ms]"); + return -2; + } + } else { + LeaseSet ls = _facade.lookupLeaseSetLocally(key); + if (ls != null) { + if (ls.isCurrent(Router.CLOCK_FUDGE_FACTOR)) { + // last time it was sent was before the last send period + return KBucketSet.NUM_BUCKETS - bucket; + } else { + _log.info("Not republishing leaseSet " + key + " since it is really old [" + (now-ls.getEarliestLeaseDate()) + "ms]"); + return -3; + } + } else { + _log.info("Key " + key + " is not a leaseSet or routerInfo, definitely not publishing it"); + return -5; + } + } + } else { + // its been published since the last period we want to publish it + + if (now - RESEND_BUCKET_FACTOR > lastPublished.longValue()) { + if (_facade.lookupRouterInfoLocally(key) != null) { + // randomize the chance of rebroadcast for leases if we haven't + // sent it within 5 minutes + int val = _context.random().nextInt(LEASE_REBROADCAST_PROBABILITY_SCALE); + if (val <= LEASE_REBROADCAST_PROBABILITY) { + _log.info("Randomized rebroadcast of leases tells us to send " + key + ": " + val); + return 1; + } + } + } + return -1; + } } } diff --git a/router/java/src/net/i2p/router/networkdb/kademlia/ExpireLeasesJob.java b/router/java/src/net/i2p/router/networkdb/kademlia/ExpireLeasesJob.java index d909eaaaa..4d33a8055 100644 --- a/router/java/src/net/i2p/router/networkdb/kademlia/ExpireLeasesJob.java +++ b/router/java/src/net/i2p/router/networkdb/kademlia/ExpireLeasesJob.java @@ -1,9 +1,9 @@ package net.i2p.router.networkdb.kademlia; /* * free (adj.): unencumbered; not under the control of others - * Written by jrandom in 2003 and released into the public domain - * with no warranty of any kind, either expressed or implied. - * It probably won't make your computer catch on fire, or eat + * Written by jrandom in 2003 and released into the public domain + * with no warranty of any kind, either expressed or implied. + * It probably won't make your computer catch on fire, or eat * your children, but it might. Use at your own risk. * */ @@ -17,57 +17,59 @@ import net.i2p.data.LeaseSet; import net.i2p.router.JobImpl; import net.i2p.router.Router; import net.i2p.util.Log; +import net.i2p.router.RouterContext; /** - * Periodically search through all leases to find expired ones, failing those - * keys and firing up a new search for each (in case we want it later, might as + * Periodically search through all leases to find expired ones, failing those + * keys and firing up a new search for each (in case we want it later, might as * well preemptively fetch it) * */ class ExpireLeasesJob extends JobImpl { - private final static Log _log = new Log(ExpireLeasesJob.class); + private Log _log; private KademliaNetworkDatabaseFacade _facade; - + private final static long RERUN_DELAY_MS = 1*60*1000; - - public ExpireLeasesJob(KademliaNetworkDatabaseFacade facade) { - super(); - _facade = facade; + + public ExpireLeasesJob(RouterContext ctx, KademliaNetworkDatabaseFacade facade) { + super(ctx); + _log = ctx.logManager().getLog(ExpireLeasesJob.class); + _facade = facade; } - + public String getName() { return "Expire Lease Sets Job"; } - public void runJob() { - Set toExpire = selectKeysToExpire(); - _log.info("Leases to expire: " + toExpire); - for (Iterator iter = toExpire.iterator(); iter.hasNext(); ) { - Hash key = (Hash)iter.next(); - _facade.fail(key); - _log.info("Lease " + key + " is expiring, so lets look for it again", new Exception("Expire and search")); - _facade.lookupLeaseSet(key, null, null, RERUN_DELAY_MS); - } - //_facade.queueForExploration(toExpire); // don't do explicit searches, just explore passively - requeue(RERUN_DELAY_MS); + public void runJob() { + Set toExpire = selectKeysToExpire(); + _log.info("Leases to expire: " + toExpire); + for (Iterator iter = toExpire.iterator(); iter.hasNext(); ) { + Hash key = (Hash)iter.next(); + _facade.fail(key); + _log.info("Lease " + key + " is expiring, so lets look for it again", new Exception("Expire and search")); + _facade.lookupLeaseSet(key, null, null, RERUN_DELAY_MS); + } + //_facade.queueForExploration(toExpire); // don't do explicit searches, just explore passively + requeue(RERUN_DELAY_MS); } - + /** * Run through the entire data store, finding all expired leaseSets (ones that * don't have any leases that haven't yet passed, even with the CLOCK_FUDGE_FACTOR) * */ private Set selectKeysToExpire() { - Set keys = _facade.getDataStore().getKeys(); - Set toExpire = new HashSet(128); - for (Iterator iter = keys.iterator(); iter.hasNext(); ) { - Hash key = (Hash)iter.next(); - Object obj = _facade.getDataStore().get(key); - if (obj instanceof LeaseSet) { - LeaseSet ls = (LeaseSet)obj; - if (!ls.isCurrent(Router.CLOCK_FUDGE_FACTOR)) - toExpire.add(key); - else - _log.debug("Lease " + ls.getDestination().calculateHash() + " is current, no need to expire"); - } - } - return toExpire; + Set keys = _facade.getDataStore().getKeys(); + Set toExpire = new HashSet(128); + for (Iterator iter = keys.iterator(); iter.hasNext(); ) { + Hash key = (Hash)iter.next(); + Object obj = _facade.getDataStore().get(key); + if (obj instanceof LeaseSet) { + LeaseSet ls = (LeaseSet)obj; + if (!ls.isCurrent(Router.CLOCK_FUDGE_FACTOR)) + toExpire.add(key); + else + _log.debug("Lease " + ls.getDestination().calculateHash() + " is current, no need to expire"); + } + } + return toExpire; } } diff --git a/router/java/src/net/i2p/router/networkdb/kademlia/ExpireRoutersJob.java b/router/java/src/net/i2p/router/networkdb/kademlia/ExpireRoutersJob.java index 3f5207f66..d7e94d2d3 100644 --- a/router/java/src/net/i2p/router/networkdb/kademlia/ExpireRoutersJob.java +++ b/router/java/src/net/i2p/router/networkdb/kademlia/ExpireRoutersJob.java @@ -1,9 +1,9 @@ package net.i2p.router.networkdb.kademlia; /* * free (adj.): unencumbered; not under the control of others - * Written by jrandom in 2003 and released into the public domain - * with no warranty of any kind, either expressed or implied. - * It probably won't make your computer catch on fire, or eat + * Written by jrandom in 2003 and released into the public domain + * with no warranty of any kind, either expressed or implied. + * It probably won't make your computer catch on fire, or eat * your children, but it might. Use at your own risk. * */ @@ -19,46 +19,48 @@ import net.i2p.router.JobImpl; import net.i2p.router.TunnelManagerFacade; import net.i2p.util.Clock; import net.i2p.util.Log; +import net.i2p.router.RouterContext; /** - * Go through the routing table pick routers that are performing poorly or - * is out of date, but don't expire routers we're actively tunneling through. + * Go through the routing table pick routers that are performing poorly or + * is out of date, but don't expire routers we're actively tunneling through. * If a peer is performing worse than some threshold (via profile.rankLiveliness) * drop it and don't ask any questions. If a peer isn't ranked really poorly, but - * we just haven't heard from it in a while, drop it and add it to the set of + * we just haven't heard from it in a while, drop it and add it to the set of * keys we want the netDb to explore. * */ class ExpireRoutersJob extends JobImpl { - private final static Log _log = new Log(ExpireRoutersJob.class); + private Log _log; private KademliaNetworkDatabaseFacade _facade; - + private final static long RERUN_DELAY_MS = 30*1000; - /** - * If a routerInfo structure isn't updated within an hour, drop it + /** + * If a routerInfo structure isn't updated within an hour, drop it * and search for a later version. This value should be large enough * to deal with the Router.CLOCK_FUDGE_FACTOR. */ public final static long EXPIRE_DELAY = 60*60*1000; - public ExpireRoutersJob(KademliaNetworkDatabaseFacade facade) { - super(); - _facade = facade; + public ExpireRoutersJob(RouterContext ctx, KademliaNetworkDatabaseFacade facade) { + super(ctx); + _log = ctx.logManager().getLog(ExpireRoutersJob.class); + _facade = facade; } - + public String getName() { return "Expire Routers Job"; } - public void runJob() { - Set toExpire = selectKeysToExpire(); - _log.info("Routers to expire (drop and try to refetch): " + toExpire); - for (Iterator iter = toExpire.iterator(); iter.hasNext(); ) { - Hash key = (Hash)iter.next(); - _facade.fail(key); - } - _facade.queueForExploration(toExpire); - - requeue(RERUN_DELAY_MS); + public void runJob() { + Set toExpire = selectKeysToExpire(); + _log.info("Routers to expire (drop and try to refetch): " + toExpire); + for (Iterator iter = toExpire.iterator(); iter.hasNext(); ) { + Hash key = (Hash)iter.next(); + _facade.fail(key); + } + _facade.queueForExploration(toExpire); + + requeue(RERUN_DELAY_MS); } - + /** * Run through all of the known peers and pick ones that have really old @@ -67,40 +69,40 @@ class ExpireRoutersJob extends JobImpl { * */ private Set selectKeysToExpire() { - Set possible = getNotInUse(); - Set expiring = new HashSet(16); - long earliestPublishDate = Clock.getInstance().now() - EXPIRE_DELAY; - - for (Iterator iter = possible.iterator(); iter.hasNext(); ) { - Hash key = (Hash)iter.next(); - RouterInfo ri = _facade.lookupRouterInfoLocally(key); - if (ri != null) { - if (!ri.isCurrent(EXPIRE_DELAY)) { - if (_log.shouldLog(Log.INFO)) - _log.info("Expiring RouterInfo for " + key.toBase64() + " [published on " + new Date(ri.getPublished()) + "]"); - expiring.add(key); - } else { - if (_log.shouldLog(Log.DEBUG)) - _log.debug("Not expiring routerInfo for " + key.toBase64() + " [published on " + new Date(ri.getPublished()) + "]"); - } - } - } - - return expiring; + Set possible = getNotInUse(); + Set expiring = new HashSet(16); + long earliestPublishDate = _context.clock().now() - EXPIRE_DELAY; + + for (Iterator iter = possible.iterator(); iter.hasNext(); ) { + Hash key = (Hash)iter.next(); + RouterInfo ri = _facade.lookupRouterInfoLocally(key); + if (ri != null) { + if (!ri.isCurrent(EXPIRE_DELAY)) { + if (_log.shouldLog(Log.INFO)) + _log.info("Expiring RouterInfo for " + key.toBase64() + " [published on " + new Date(ri.getPublished()) + "]"); + expiring.add(key); + } else { + if (_log.shouldLog(Log.DEBUG)) + _log.debug("Not expiring routerInfo for " + key.toBase64() + " [published on " + new Date(ri.getPublished()) + "]"); + } + } + } + + return expiring; } /** all peers not in use by tunnels */ private Set getNotInUse() { - Set possible = new HashSet(16); - for (Iterator iter = _facade.getAllRouters().iterator(); iter.hasNext(); ) { - Hash peer = (Hash)iter.next(); - if (!TunnelManagerFacade.getInstance().isInUse(peer)) { - possible.add(peer); - } else { - if (_log.shouldLog(Log.DEBUG)) - _log.debug("Peer is in use: " + peer.toBase64()); - } - } - return possible; + Set possible = new HashSet(16); + for (Iterator iter = _facade.getAllRouters().iterator(); iter.hasNext(); ) { + Hash peer = (Hash)iter.next(); + if (!_context.tunnelManager().isInUse(peer)) { + possible.add(peer); + } else { + if (_log.shouldLog(Log.DEBUG)) + _log.debug("Peer is in use: " + peer.toBase64()); + } + } + return possible; } } diff --git a/router/java/src/net/i2p/router/networkdb/kademlia/ExploreJob.java b/router/java/src/net/i2p/router/networkdb/kademlia/ExploreJob.java index e7454fff8..687c2c957 100644 --- a/router/java/src/net/i2p/router/networkdb/kademlia/ExploreJob.java +++ b/router/java/src/net/i2p/router/networkdb/kademlia/ExploreJob.java @@ -1,9 +1,9 @@ package net.i2p.router.networkdb.kademlia; /* * free (adj.): unencumbered; not under the control of others - * Written by jrandom in 2003 and released into the public domain - * with no warranty of any kind, either expressed or implied. - * It probably won't make your computer catch on fire, or eat + * Written by jrandom in 2003 and released into the public domain + * with no warranty of any kind, either expressed or implied. + * It probably won't make your computer catch on fire, or eat * your children, but it might. Use at your own risk. * */ @@ -19,37 +19,41 @@ import net.i2p.data.TunnelId; import net.i2p.data.i2np.DatabaseLookupMessage; import net.i2p.router.Router; import net.i2p.util.Log; +import net.i2p.router.RouterContext; /** - * Search for a particular key iteratively until we either find a value, we run + * Search for a particular key iteratively until we either find a value, we run * out of peers, or the bucket the key belongs in has sufficient values in it. * Well, we're skipping the 'bucket gets filled up' test for now, since it'll never * get used (at least for a while). * */ class ExploreJob extends SearchJob { - private final Log _log = new Log(ExploreJob.class); + private Log _log; + private PeerSelector _peerSelector; /** how long each exploration should run for (currently a trivial 20 seconds) */ private final static long MAX_EXPLORE_TIME = 30*1000; - + /** how many of the peers closest to the key being explored do we want to explicitly say "dont send me this"? */ private final static int NUM_CLOSEST_TO_IGNORE = 3; - + /** * Create a new search for the routingKey specified - * + * */ - public ExploreJob(KademliaNetworkDatabaseFacade facade, Hash key) { - // note that we're treating the last param (isLease) as *false* since we're just exploring. - // if this collides with an actual leaseSet's key, neat, but that wouldn't imply we're actually - // attempting to send that lease a message! - super(facade, key, null, null, MAX_EXPLORE_TIME, false, false); + public ExploreJob(RouterContext context, KademliaNetworkDatabaseFacade facade, Hash key) { + // note that we're treating the last param (isLease) as *false* since we're just exploring. + // if this collides with an actual leaseSet's key, neat, but that wouldn't imply we're actually + // attempting to send that lease a message! + super(context, facade, key, null, null, MAX_EXPLORE_TIME, false, false); + _log = context.logManager().getLog(ExploreJob.class); + _peerSelector = new PeerSelector(context); } - + /** * Build the database search message, but unlike the normal searches, we're more explicit in - * what we /dont/ want. We don't just ask them to ignore the peers we've already searched + * what we /dont/ want. We don't just ask them to ignore the peers we've already searched * on, but to ignore a number of the peers we already know about (in the target key's bucket) as well. * * Perhaps we may want to ignore other keys too, such as the ones in nearby @@ -59,29 +63,29 @@ class ExploreJob extends SearchJob { * * @param replyTunnelId tunnel to receive replies through * @param replyGateway gateway for the reply tunnel - * @param expiration when the search should stop + * @param expiration when the search should stop */ protected DatabaseLookupMessage buildMessage(TunnelId replyTunnelId, RouterInfo replyGateway, long expiration) { - DatabaseLookupMessage msg = new DatabaseLookupMessage(); - msg.setSearchKey(getState().getTarget()); - msg.setFrom(replyGateway); - msg.setDontIncludePeers(getState().getAttempted()); - msg.setMessageExpiration(new Date(expiration)); - msg.setReplyTunnel(replyTunnelId); - - Set attempted = getState().getAttempted(); - List peers = PeerSelector.getInstance().selectNearestExplicit(getState().getTarget(), NUM_CLOSEST_TO_IGNORE, attempted, getFacade().getKBuckets()); - Set toSkip = new HashSet(64); - toSkip.addAll(attempted); - toSkip.addAll(peers); - msg.setDontIncludePeers(toSkip); - - if (_log.shouldLog(Log.DEBUG)) - _log.debug("Peers we don't want to hear about: " + toSkip); - - return msg; + DatabaseLookupMessage msg = new DatabaseLookupMessage(_context); + msg.setSearchKey(getState().getTarget()); + msg.setFrom(replyGateway); + msg.setDontIncludePeers(getState().getAttempted()); + msg.setMessageExpiration(new Date(expiration)); + msg.setReplyTunnel(replyTunnelId); + + Set attempted = getState().getAttempted(); + List peers = _peerSelector.selectNearestExplicit(getState().getTarget(), NUM_CLOSEST_TO_IGNORE, attempted, getFacade().getKBuckets()); + Set toSkip = new HashSet(64); + toSkip.addAll(attempted); + toSkip.addAll(peers); + msg.setDontIncludePeers(toSkip); + + if (_log.shouldLog(Log.DEBUG)) + _log.debug("Peers we don't want to hear about: " + toSkip); + + return msg; } - + /** * We're looking for a router, so lets build the lookup message (no need to tunnel route either, so just have @@ -89,13 +93,13 @@ class ExploreJob extends SearchJob { * */ protected DatabaseLookupMessage buildMessage(long expiration) { - return buildMessage(null, Router.getInstance().getRouterInfo(), expiration); + return buildMessage(null, _context.router().getRouterInfo(), expiration); } /* * We could override searchNext to see if we actually fill up a kbucket before - * the search expires, but, c'mon, the keyspace is just too bloody massive, and + * the search expires, but, c'mon, the keyspace is just too bloody massive, and * buckets wont be filling anytime soon, so might as well just use the SearchJob's * searchNext * diff --git a/router/java/src/net/i2p/router/networkdb/kademlia/ExploreKeySelectorJob.java b/router/java/src/net/i2p/router/networkdb/kademlia/ExploreKeySelectorJob.java index d43b547c1..bd713f5ef 100644 --- a/router/java/src/net/i2p/router/networkdb/kademlia/ExploreKeySelectorJob.java +++ b/router/java/src/net/i2p/router/networkdb/kademlia/ExploreKeySelectorJob.java @@ -1,9 +1,9 @@ package net.i2p.router.networkdb.kademlia; /* * free (adj.): unencumbered; not under the control of others - * Written by jrandom in 2003 and released into the public domain - * with no warranty of any kind, either expressed or implied. - * It probably won't make your computer catch on fire, or eat + * Written by jrandom in 2003 and released into the public domain + * with no warranty of any kind, either expressed or implied. + * It probably won't make your computer catch on fire, or eat * your children, but it might. Use at your own risk. * */ @@ -15,65 +15,67 @@ import java.util.Set; import net.i2p.data.Hash; import net.i2p.router.JobImpl; import net.i2p.util.Log; +import net.i2p.router.RouterContext; /** - * Go through the kbuckets and generate random keys for routers in buckets not + * Go through the kbuckets and generate random keys for routers in buckets not * yet full, attempting to keep a pool of keys we can explore with (at least one * per bucket) * */ class ExploreKeySelectorJob extends JobImpl { - private final static Log _log = new Log(ExploreKeySelectorJob.class); + private Log _log; private KademliaNetworkDatabaseFacade _facade; - + private final static long RERUN_DELAY_MS = 60*1000; - public ExploreKeySelectorJob(KademliaNetworkDatabaseFacade facade) { - super(); - _facade = facade; + public ExploreKeySelectorJob(RouterContext context, KademliaNetworkDatabaseFacade facade) { + super(context); + _log = context.logManager().getLog(ExploreKeySelectorJob.class); + _facade = facade; } - + public String getName() { return "Explore Key Selector Job"; } - public void runJob() { - Set toExplore = selectKeysToExplore(); - _log.info("Filling the explorer pool with: " + toExplore); - if (toExplore != null) - _facade.queueForExploration(toExplore); - requeue(RERUN_DELAY_MS); + public void runJob() { + Set toExplore = selectKeysToExplore(); + _log.info("Filling the explorer pool with: " + toExplore); + if (toExplore != null) + _facade.queueForExploration(toExplore); + requeue(RERUN_DELAY_MS); } - + /** * Run through all kbuckets with too few routers and generate a random key * for it, with a maximum number of keys limited by the exploration pool size * */ private Set selectKeysToExplore() { - Set alreadyQueued = _facade.getExploreKeys(); - if (alreadyQueued.size() > KBucketSet.NUM_BUCKETS) return null; - Set toExplore = new HashSet(KBucketSet.NUM_BUCKETS - alreadyQueued.size()); - for (int i = 0; i < KBucketSet.NUM_BUCKETS; i++) { - KBucket bucket = _facade.getKBuckets().getBucket(i); - if (bucket.getKeyCount() < KBucketSet.BUCKET_SIZE) { - boolean already = false; - for (Iterator iter = alreadyQueued.iterator(); iter.hasNext(); ) { - Hash key = (Hash)iter.next(); - if (bucket.shouldContain(key)) { - already = true; - _log.debug("Bucket " + i + " is already queued for exploration \t" + key); - break; - } - } - if (!already) { - // no keys are queued for exploring this still-too-small bucket yet - Hash key = bucket.generateRandomKey(); - _log.debug("Bucket " + i + " is NOT queued for exploration, and it only has " + bucket.getKeyCount() + " keys, so explore with \t" + key); - toExplore.add(key); - } - } else { - _log.debug("Bucket " + i + " already has enough keys (" + bucket.getKeyCount() + "), no need to explore further"); - } - } - return toExplore; + Set alreadyQueued = _facade.getExploreKeys(); + if (alreadyQueued.size() > KBucketSet.NUM_BUCKETS) return null; + Set toExplore = new HashSet(KBucketSet.NUM_BUCKETS - alreadyQueued.size()); + for (int i = 0; i < KBucketSet.NUM_BUCKETS; i++) { + KBucket bucket = _facade.getKBuckets().getBucket(i); + if (bucket.getKeyCount() < KBucketSet.BUCKET_SIZE) { + boolean already = false; + for (Iterator iter = alreadyQueued.iterator(); iter.hasNext(); ) { + Hash key = (Hash)iter.next(); + if (bucket.shouldContain(key)) { + already = true; + _log.debug("Bucket " + i + " is already queued for exploration \t" + key); + break; + } + } + if (!already) { + // no keys are queued for exploring this still-too-small bucket yet + Hash key = bucket.generateRandomKey(); + _log.debug("Bucket " + i + " is NOT queued for exploration, and it only has " + bucket.getKeyCount() + " keys, so explore with \t" + key); + toExplore.add(key); + } + } else { + _log.debug("Bucket " + i + " already has enough keys (" + bucket.getKeyCount() + "), no need to explore further"); + } + } + return toExplore; } } diff --git a/router/java/src/net/i2p/router/networkdb/kademlia/KBucketImpl.java b/router/java/src/net/i2p/router/networkdb/kademlia/KBucketImpl.java index 5f64b6be4..5fb26ab70 100644 --- a/router/java/src/net/i2p/router/networkdb/kademlia/KBucketImpl.java +++ b/router/java/src/net/i2p/router/networkdb/kademlia/KBucketImpl.java @@ -1,9 +1,9 @@ package net.i2p.router.networkdb.kademlia; /* * free (adj.): unencumbered; not under the control of others - * Written by jrandom in 2003 and released into the public domain - * with no warranty of any kind, either expressed or implied. - * It probably won't make your computer catch on fire, or eat + * Written by jrandom in 2003 and released into the public domain + * with no warranty of any kind, either expressed or implied. + * It probably won't make your computer catch on fire, or eat * your children, but it might. Use at your own risk. * */ @@ -16,9 +16,10 @@ import net.i2p.data.DataHelper; import net.i2p.data.Hash; import net.i2p.util.Log; import net.i2p.util.RandomSource; +import net.i2p.I2PAppContext; class KBucketImpl implements KBucket { - private final static Log _log = new Log(KBucketImpl.class); + private Log _log; private Set _entries; // PeerInfo structures private Hash _local; private int _begin; // if any bits equal or higher to this bit (in big endian order), @@ -26,96 +27,99 @@ class KBucketImpl implements KBucket { private BigInteger _lowerBounds; // lowest distance allowed from local private BigInteger _upperBounds; // one higher than the highest distance allowed from local private int _size; // integer value of the number of bits that can fit between lower and upper bounds + private I2PAppContext _context; - public KBucketImpl(Hash local) { - _entries = new HashSet(); - _local = local; + public KBucketImpl(I2PAppContext context, Hash local) { + _context = context; + _log = context.logManager().getLog(KBucketImpl.class); + _entries = new HashSet(); + _local = local; } public int getRangeBegin() { return _begin; } public int getRangeEnd() { return _end; } - public void setRange(int lowOrderBitLimit, int highOrderBitLimit) { - _begin = lowOrderBitLimit; - _end = highOrderBitLimit; - if (_begin == 0) - _lowerBounds = BigInteger.ZERO; - else - _lowerBounds = BigInteger.ZERO.setBit(_begin); - _upperBounds = BigInteger.ZERO.setBit(_end); - BigInteger diff = _upperBounds.subtract(_lowerBounds); - _size = diff.bitLength(); - StringBuffer buf = new StringBuffer(1024); - buf.append("Set range: ").append(lowOrderBitLimit).append(" through ").append(highOrderBitLimit).append('\n'); - buf.append("Local key, lowest allowed key, and highest allowed key: \n"); - Hash low = getRangeBeginKey(); - Hash high = getRangeEndKey(); - if ( (_local == null) || (_local.getData() == null) ) - buf.append(toString(Hash.FAKE_HASH.getData())).append('\n'); - else - buf.append(toString(_local.getData())).append('\n'); - buf.append(toString(low.getData())).append('\n'); - buf.append(toString(high.getData())); - //_log.debug(buf.toString()); + public void setRange(int lowOrderBitLimit, int highOrderBitLimit) { + _begin = lowOrderBitLimit; + _end = highOrderBitLimit; + if (_begin == 0) + _lowerBounds = BigInteger.ZERO; + else + _lowerBounds = BigInteger.ZERO.setBit(_begin); + _upperBounds = BigInteger.ZERO.setBit(_end); + BigInteger diff = _upperBounds.subtract(_lowerBounds); + _size = diff.bitLength(); + StringBuffer buf = new StringBuffer(1024); + buf.append("Set range: ").append(lowOrderBitLimit).append(" through ").append(highOrderBitLimit).append('\n'); + buf.append("Local key, lowest allowed key, and highest allowed key: \n"); + Hash low = getRangeBeginKey(); + Hash high = getRangeEndKey(); + if ( (_local == null) || (_local.getData() == null) ) + buf.append(toString(Hash.FAKE_HASH.getData())).append('\n'); + else + buf.append(toString(_local.getData())).append('\n'); + buf.append(toString(low.getData())).append('\n'); + buf.append(toString(high.getData())); + //_log.debug(buf.toString()); } - public int getKeyCount() { - synchronized (_entries) { - return _entries.size(); - } + public int getKeyCount() { + synchronized (_entries) { + return _entries.size(); + } } - + public Hash getLocal() { return _local; } public void setLocal(Hash local) { _local = local; } private byte[] distanceFromLocal(Hash key) { - return DataHelper.xor(key.getData(), _local.getData()); + return DataHelper.xor(key.getData(), _local.getData()); } public boolean shouldContain(Hash key) { - // woohah, incredibly excessive object creation! whee! - BigInteger kv = new BigInteger(1, distanceFromLocal(key)); - int lowComp = kv.compareTo(_lowerBounds); - int highComp = kv.compareTo(_upperBounds); - - //_log.debug("kv.compareTo(low) = " + lowComp + " kv.compareTo(high) " + highComp); - - if ( (lowComp >= 0) && (highComp < 0) ) return true; - return false; + // woohah, incredibly excessive object creation! whee! + BigInteger kv = new BigInteger(1, distanceFromLocal(key)); + int lowComp = kv.compareTo(_lowerBounds); + int highComp = kv.compareTo(_upperBounds); + + //_log.debug("kv.compareTo(low) = " + lowComp + " kv.compareTo(high) " + highComp); + + if ( (lowComp >= 0) && (highComp < 0) ) return true; + return false; } public Set getEntries() { - Set entries = new HashSet(64); - synchronized (_entries) { - entries.addAll(_entries); - } - return entries; + Set entries = new HashSet(64); + synchronized (_entries) { + entries.addAll(_entries); + } + return entries; } public Set getEntries(Set toIgnoreHashes) { - Set entries = new HashSet(64); - synchronized (_entries) { - entries.addAll(_entries); - entries.removeAll(toIgnoreHashes); - } - return entries; + Set entries = new HashSet(64); + synchronized (_entries) { + entries.addAll(_entries); + entries.removeAll(toIgnoreHashes); + } + return entries; } public void setEntries(Set entries) { - synchronized (_entries) { - _entries.clear(); - _entries.addAll(entries); - } + synchronized (_entries) { + _entries.clear(); + _entries.addAll(entries); + } } public int add(Hash peer) { - synchronized (_entries) { - _entries.add(peer); - return _entries.size(); - } + synchronized (_entries) { + _entries.add(peer); + return _entries.size(); + } } public boolean remove(Hash peer) { - synchronized (_entries) { - return _entries.remove(peer); - } + synchronized (_entries) { + return _entries.remove(peer); + } } /** @@ -123,164 +127,166 @@ class KBucketImpl implements KBucket { * */ public Hash generateRandomKey() { - BigInteger variance = new BigInteger(_size-1, RandomSource.getInstance()); - variance = variance.add(_lowerBounds); - //_log.debug("Random variance for " + _size + " bits: " + variance); - byte data[] = variance.toByteArray(); - byte hash[] = new byte[Hash.HASH_LENGTH]; - if (data.length <= Hash.HASH_LENGTH) { - System.arraycopy(data, 0, hash, hash.length - data.length, data.length); - } else { - System.arraycopy(data, data.length - hash.length, hash, 0, hash.length); - } - Hash key = new Hash(hash); - data = distanceFromLocal(key); - hash = new byte[Hash.HASH_LENGTH]; - if (data.length <= Hash.HASH_LENGTH) { - System.arraycopy(data, 0, hash, hash.length - data.length, data.length); - } else { - System.arraycopy(data, data.length - hash.length, hash, 0, hash.length); - } - key = new Hash(hash); - return key; + BigInteger variance = new BigInteger(_size-1, _context.random()); + variance = variance.add(_lowerBounds); + //_log.debug("Random variance for " + _size + " bits: " + variance); + byte data[] = variance.toByteArray(); + byte hash[] = new byte[Hash.HASH_LENGTH]; + if (data.length <= Hash.HASH_LENGTH) { + System.arraycopy(data, 0, hash, hash.length - data.length, data.length); + } else { + System.arraycopy(data, data.length - hash.length, hash, 0, hash.length); + } + Hash key = new Hash(hash); + data = distanceFromLocal(key); + hash = new byte[Hash.HASH_LENGTH]; + if (data.length <= Hash.HASH_LENGTH) { + System.arraycopy(data, 0, hash, hash.length - data.length, data.length); + } else { + System.arraycopy(data, data.length - hash.length, hash, 0, hash.length); + } + key = new Hash(hash); + return key; } public Hash getRangeBeginKey() { - BigInteger lowerBounds = _lowerBounds; - if ( (_local != null) && (_local.getData() != null) ) { - lowerBounds = lowerBounds.xor(new BigInteger(1, _local.getData())); - } - - byte data[] = lowerBounds.toByteArray(); - byte hash[] = new byte[Hash.HASH_LENGTH]; - if (data.length <= Hash.HASH_LENGTH) { - System.arraycopy(data, 0, hash, hash.length - data.length, data.length); - } else { - System.arraycopy(data, data.length - hash.length, hash, 0, hash.length); - } - Hash key = new Hash(hash); - return key; + BigInteger lowerBounds = _lowerBounds; + if ( (_local != null) && (_local.getData() != null) ) { + lowerBounds = lowerBounds.xor(new BigInteger(1, _local.getData())); + } + + byte data[] = lowerBounds.toByteArray(); + byte hash[] = new byte[Hash.HASH_LENGTH]; + if (data.length <= Hash.HASH_LENGTH) { + System.arraycopy(data, 0, hash, hash.length - data.length, data.length); + } else { + System.arraycopy(data, data.length - hash.length, hash, 0, hash.length); + } + Hash key = new Hash(hash); + return key; } public Hash getRangeEndKey() { - BigInteger upperBounds = _upperBounds; - if ( (_local != null) && (_local.getData() != null) ) { - upperBounds = upperBounds.xor(new BigInteger(1, _local.getData())); - } - byte data[] = upperBounds.toByteArray(); - byte hash[] = new byte[Hash.HASH_LENGTH]; - if (data.length <= Hash.HASH_LENGTH) { - System.arraycopy(data, 0, hash, hash.length - data.length, data.length); - } else { - System.arraycopy(data, data.length - hash.length, hash, 0, hash.length); - } - Hash key = new Hash(hash); - return key; + BigInteger upperBounds = _upperBounds; + if ( (_local != null) && (_local.getData() != null) ) { + upperBounds = upperBounds.xor(new BigInteger(1, _local.getData())); + } + byte data[] = upperBounds.toByteArray(); + byte hash[] = new byte[Hash.HASH_LENGTH]; + if (data.length <= Hash.HASH_LENGTH) { + System.arraycopy(data, 0, hash, hash.length - data.length, data.length); + } else { + System.arraycopy(data, data.length - hash.length, hash, 0, hash.length); + } + Hash key = new Hash(hash); + return key; } public String toString() { - StringBuffer buf = new StringBuffer(1024); - buf.append("KBucketImpl: "); - synchronized (_entries) { - buf.append(_entries.toString()).append("\n"); - } - buf.append("Low bit: ").append(_begin).append(" high bit: ").append(_end).append('\n'); - buf.append("Local key: \n"); - if ( (_local != null) && (_local.getData() != null) ) - buf.append(toString(_local.getData())).append('\n'); - else - buf.append("[undefined]\n"); - buf.append("Low and high keys:\n"); - buf.append(toString(getRangeBeginKey().getData())).append('\n'); - buf.append(toString(getRangeEndKey().getData())).append('\n'); - buf.append("Low and high deltas:\n"); - buf.append(_lowerBounds.toString(2)).append('\n'); - buf.append(_upperBounds.toString(2)).append('\n'); - return buf.toString(); + StringBuffer buf = new StringBuffer(1024); + buf.append("KBucketImpl: "); + synchronized (_entries) { + buf.append(_entries.toString()).append("\n"); + } + buf.append("Low bit: ").append(_begin).append(" high bit: ").append(_end).append('\n'); + buf.append("Local key: \n"); + if ( (_local != null) && (_local.getData() != null) ) + buf.append(toString(_local.getData())).append('\n'); + else + buf.append("[undefined]\n"); + buf.append("Low and high keys:\n"); + buf.append(toString(getRangeBeginKey().getData())).append('\n'); + buf.append(toString(getRangeEndKey().getData())).append('\n'); + buf.append("Low and high deltas:\n"); + buf.append(_lowerBounds.toString(2)).append('\n'); + buf.append(_upperBounds.toString(2)).append('\n'); + return buf.toString(); } /** * Test harness to make sure its assigning keys to the right buckets * - */ + */ public static void main(String args[]) { - testRand2(); - testRand(); - - try { Thread.sleep(10000); } catch (InterruptedException ie) {} + testRand2(); + testRand(); + + try { Thread.sleep(10000); } catch (InterruptedException ie) {} } private static void testRand() { - StringBuffer buf = new StringBuffer(2048); - int low = 1; - int high = 3; - KBucketImpl bucket = new KBucketImpl(Hash.FAKE_HASH); - bucket.setRange(low, high); - Hash lowerBoundKey = bucket.getRangeBeginKey(); - Hash upperBoundKey = bucket.getRangeEndKey(); - for (int i = 0; i < 100; i++) { - Hash rnd = bucket.generateRandomKey(); - //buf.append(toString(rnd.getData())).append('\n'); - boolean ok = bucket.shouldContain(rnd); - if (!ok) { - byte diff[] = DataHelper.xor(rnd.getData(), bucket.getLocal().getData()); - BigInteger dv = new BigInteger(1, diff); - _log.error("WTF! bucket doesn't want: \n" + toString(rnd.getData()) + "\nDelta: \n" + toString(diff) + "\nDelta val: \n" + dv.toString(2) + "\nBucket: \n"+bucket, new Exception("WTF")); - try { Thread.sleep(1000); } catch (Exception e) {} - System.exit(0); - } else { - //_log.debug("Ok, bucket wants: \n" + toString(rnd.getData())); - } - //_log.info("Low/High:\n" + toString(lowBounds.toByteArray()) + "\n" + toString(highBounds.toByteArray())); - } - _log.info("Passed 100 random key generations against the null hash"); + StringBuffer buf = new StringBuffer(2048); + int low = 1; + int high = 3; + Log log = I2PAppContext.getGlobalContext().logManager().getLog(KBucketImpl.class); + KBucketImpl bucket = new KBucketImpl(I2PAppContext.getGlobalContext(), Hash.FAKE_HASH); + bucket.setRange(low, high); + Hash lowerBoundKey = bucket.getRangeBeginKey(); + Hash upperBoundKey = bucket.getRangeEndKey(); + for (int i = 0; i < 100; i++) { + Hash rnd = bucket.generateRandomKey(); + //buf.append(toString(rnd.getData())).append('\n'); + boolean ok = bucket.shouldContain(rnd); + if (!ok) { + byte diff[] = DataHelper.xor(rnd.getData(), bucket.getLocal().getData()); + BigInteger dv = new BigInteger(1, diff); + log.error("WTF! bucket doesn't want: \n" + toString(rnd.getData()) + "\nDelta: \n" + toString(diff) + "\nDelta val: \n" + dv.toString(2) + "\nBucket: \n"+bucket, new Exception("WTF")); + try { Thread.sleep(1000); } catch (Exception e) {} + System.exit(0); + } else { + //_log.debug("Ok, bucket wants: \n" + toString(rnd.getData())); + } + //_log.info("Low/High:\n" + toString(lowBounds.toByteArray()) + "\n" + toString(highBounds.toByteArray())); + } + log.info("Passed 100 random key generations against the null hash"); } private static void testRand2() { - StringBuffer buf = new StringBuffer(1024*1024*16); - int low = 1; - int high = 200; - byte hash[] = new byte[Hash.HASH_LENGTH]; - RandomSource.getInstance().nextBytes(hash); - KBucketImpl bucket = new KBucketImpl(new Hash(hash)); - bucket.setRange(low, high); - Hash lowerBoundKey = bucket.getRangeBeginKey(); - Hash upperBoundKey = bucket.getRangeEndKey(); - for (int i = 0; i < 1000; i++) { - Hash rnd = bucket.generateRandomKey(); - buf.append(toString(rnd.getData())).append('\n'); - boolean ok = bucket.shouldContain(rnd); - if (!ok) { - byte diff[] = DataHelper.xor(rnd.getData(), bucket.getLocal().getData()); - BigInteger dv = new BigInteger(1, diff); - _log.error("WTF! bucket doesn't want: \n" + toString(rnd.getData()) + "\nDelta: \n" + toString(diff) + "\nDelta val: \n" + dv.toString(2) + "\nBucket: \n"+bucket, new Exception("WTF")); - try { Thread.sleep(1000); } catch (Exception e) {} - System.exit(0); - } else { - //_log.debug("Ok, bucket wants: \n" + toString(rnd.getData())); - } - } - _log.info("Passed 1000 random key generations against a random hash\n" + buf.toString()); + Log log = I2PAppContext.getGlobalContext().logManager().getLog(KBucketImpl.class); + StringBuffer buf = new StringBuffer(1024*1024*16); + int low = 1; + int high = 200; + byte hash[] = new byte[Hash.HASH_LENGTH]; + RandomSource.getInstance().nextBytes(hash); + KBucketImpl bucket = new KBucketImpl(I2PAppContext.getGlobalContext(), new Hash(hash)); + bucket.setRange(low, high); + Hash lowerBoundKey = bucket.getRangeBeginKey(); + Hash upperBoundKey = bucket.getRangeEndKey(); + for (int i = 0; i < 1000; i++) { + Hash rnd = bucket.generateRandomKey(); + buf.append(toString(rnd.getData())).append('\n'); + boolean ok = bucket.shouldContain(rnd); + if (!ok) { + byte diff[] = DataHelper.xor(rnd.getData(), bucket.getLocal().getData()); + BigInteger dv = new BigInteger(1, diff); + log.error("WTF! bucket doesn't want: \n" + toString(rnd.getData()) + "\nDelta: \n" + toString(diff) + "\nDelta val: \n" + dv.toString(2) + "\nBucket: \n"+bucket, new Exception("WTF")); + try { Thread.sleep(1000); } catch (Exception e) {} + System.exit(0); + } else { + //_log.debug("Ok, bucket wants: \n" + toString(rnd.getData())); + } + } + log.info("Passed 1000 random key generations against a random hash\n" + buf.toString()); } private final static String toString(byte b[]) { - StringBuffer buf = new StringBuffer(b.length); - for (int i = 0; i < b.length; i++) { - buf.append(toString(b[i])); - buf.append(" "); - } - return buf.toString(); + StringBuffer buf = new StringBuffer(b.length); + for (int i = 0; i < b.length; i++) { + buf.append(toString(b[i])); + buf.append(" "); + } + return buf.toString(); } private final static String toString(byte b) { - StringBuffer buf = new StringBuffer(8); - for (int i = 7; i >= 0; i--) { - boolean bb = (0 != (b & (1<= 0; i--) { + boolean bb = (0 != (b & (1<= 0) { - int oldSize = _buckets[bucket].getKeyCount(); - int numInBucket = _buckets[bucket].add(peer); - if (numInBucket > BUCKET_SIZE) { - // perhaps queue up coallesce job? naaahh.. lets let 'er grow for now - } - _log.debug("Peer " + peer + " added to bucket " + bucket); - return oldSize != numInBucket; - } else { - throw new IllegalArgumentException("Unable to pick a bucket. wtf!"); - } + int bucket = pickBucket(peer); + if (bucket >= 0) { + int oldSize = _buckets[bucket].getKeyCount(); + int numInBucket = _buckets[bucket].add(peer); + if (numInBucket > BUCKET_SIZE) { + // perhaps queue up coallesce job? naaahh.. lets let 'er grow for now + } + _log.debug("Peer " + peer + " added to bucket " + bucket); + return oldSize != numInBucket; + } else { + throw new IllegalArgumentException("Unable to pick a bucket. wtf!"); + } } - public int size() { - int size = 0; - for (int i = 0; i < _buckets.length; i++) - size += _buckets[i].getKeyCount(); - return size; + public int size() { + int size = 0; + for (int i = 0; i < _buckets.length; i++) + size += _buckets[i].getKeyCount(); + return size; } public boolean remove(Hash entry) { - int bucket = pickBucket(entry); - KBucket kbucket = getBucket(bucket); - boolean removed = kbucket.remove(entry); - return removed; + int bucket = pickBucket(entry); + KBucket kbucket = getBucket(bucket); + boolean removed = kbucket.remove(entry); + return removed; } public Set getAll() { return getAll(new HashSet()); } public Set getAll(Set toIgnore) { - HashSet all = new HashSet(1024); - for (int i = 0; i < _buckets.length; i++) { - all.addAll(_buckets[i].getEntries(toIgnore)); - } - return all; + HashSet all = new HashSet(1024); + for (int i = 0; i < _buckets.length; i++) { + all.addAll(_buckets[i].getEntries(toIgnore)); + } + return all; } public int pickBucket(Hash key) { - for (int i = 0; i < NUM_BUCKETS; i++) { - if (_buckets[i].shouldContain(key)) - return i; - } - _log.error("Key does not fit in any bucket?! WTF!\nKey : [" + toString(key.getData()) + "]\nDelta: ["+ toString(DataHelper.xor(_us.getData(), key.getData())) + "]\nUs : [" + toString(_us.getData()) + "]", new Exception("WTF")); - displayBuckets(); - return -1; + for (int i = 0; i < NUM_BUCKETS; i++) { + if (_buckets[i].shouldContain(key)) + return i; + } + _log.error("Key does not fit in any bucket?! WTF!\nKey : [" + toString(key.getData()) + "]\nDelta: ["+ toString(DataHelper.xor(_us.getData(), key.getData())) + "]\nUs : [" + toString(_us.getData()) + "]", new Exception("WTF")); + displayBuckets(); + return -1; } public KBucket getBucket(int bucket) { return _buckets[bucket]; } - + protected void createBuckets() { - _buckets = new KBucket[NUM_BUCKETS]; - for (int i = 0; i < NUM_BUCKETS-1; i++) { - _buckets[i] = createBucket(i*BASE, (i+1)*BASE); - } - _buckets[NUM_BUCKETS-1] = createBucket(BASE*(NUM_BUCKETS-1), BASE*(NUM_BUCKETS) + 1); + _buckets = new KBucket[NUM_BUCKETS]; + for (int i = 0; i < NUM_BUCKETS-1; i++) { + _buckets[i] = createBucket(i*BASE, (i+1)*BASE); + } + _buckets[NUM_BUCKETS-1] = createBucket(BASE*(NUM_BUCKETS-1), BASE*(NUM_BUCKETS) + 1); } protected KBucket createBucket(int start, int end) { - KBucket bucket = new KBucketImpl(_us); - bucket.setRange(start, end); - _log.debug("Creating a bucket from " + start + " to " + (end)); - return bucket; + KBucket bucket = new KBucketImpl(_context, _us); + bucket.setRange(start, end); + _log.debug("Creating a bucket from " + start + " to " + (end)); + return bucket; } public void displayBuckets() { - _log.info(toString()); + _log.info(toString()); } public String toString() { - BigInteger us = new BigInteger(1, _us.getData()); - StringBuffer buf = new StringBuffer(1024); - buf.append("Bucket set rooted on: ").append(us.toString()).append(" (aka ").append(us.toString(2)).append("): \n"); - for (int i = 0; i < NUM_BUCKETS; i++) { - buf.append("* Bucket ").append(i).append("/").append(NUM_BUCKETS-1).append(": )\n"); - buf.append("Start: ").append("2^").append(_buckets[i].getRangeBegin()).append(")\n"); - buf.append("End: ").append("2^").append(_buckets[i].getRangeEnd()).append(")\n"); - buf.append("Contents:").append(_buckets[i].toString()).append("\n"); - } - - return buf.toString(); + BigInteger us = new BigInteger(1, _us.getData()); + StringBuffer buf = new StringBuffer(1024); + buf.append("Bucket set rooted on: ").append(us.toString()).append(" (aka ").append(us.toString(2)).append("): \n"); + for (int i = 0; i < NUM_BUCKETS; i++) { + buf.append("* Bucket ").append(i).append("/").append(NUM_BUCKETS-1).append(": )\n"); + buf.append("Start: ").append("2^").append(_buckets[i].getRangeBegin()).append(")\n"); + buf.append("End: ").append("2^").append(_buckets[i].getRangeEnd()).append(")\n"); + buf.append("Contents:").append(_buckets[i].toString()).append("\n"); + } + + return buf.toString(); } final static String toString(byte b[]) { - byte val[] = new byte[Hash.HASH_LENGTH]; - if (b.length < 32) - System.arraycopy(b, 0, val, Hash.HASH_LENGTH-b.length-1, b.length); - else - System.arraycopy(b, Hash.HASH_LENGTH-b.length, val, 0, val.length); - StringBuffer buf = new StringBuffer(KEYSIZE_BITS); - for (int i = 0; i < val.length; i++) { - for (int j = 7; j >= 0; j--) { - boolean bb = (0 != (val[i] & (1<= 0; j--) { + boolean bb = (0 != (val[i] & (1<Kademlia Network DB Contents\n"); - if (!_initialized) { - buf.append("Not initialized\n"); - return buf.toString(); - } - Set leases = getLeases(); - buf.append("

    Leases

    \n"); - buf.append("
    \n"); - for (Iterator iter = leases.iterator(); iter.hasNext(); ) { - LeaseSet ls = (LeaseSet)iter.next(); - Hash key = ls.getDestination().calculateHash(); - buf.append(""); - - if (getLastSent(key).longValue() > 0) - buf.append(""); - else - buf.append(""); - buf.append("\n"); - } - buf.append("
    ").append(key.toBase64()).append("Last sent successfully: ").append(new Date(getLastSent(key).longValue())).append("
    Last sent successfully: never
    \n").append(ls.toString()).append("
    \n"); - - Hash us = Router.getInstance().getRouterInfo().getIdentity().getHash(); - Set routers = getRouters(); - buf.append("

    Routers

    \n"); - buf.append("\n"); - for (Iterator iter = routers.iterator(); iter.hasNext(); ) { - RouterInfo ri = (RouterInfo)iter.next(); - Hash key = ri.getIdentity().getHash(); - boolean isUs = key.equals(us); - if (isUs) { - buf.append(""); - buf.append(""); - } else { - buf.append(""); - if (getLastSent(key).longValue() > 0) - buf.append(""); - else - buf.append(""); - buf.append(""); - } - buf.append("\n"); - } - buf.append("
    ").append(key.toBase64()).append("Last sent successfully: ").append(new Date(getLastSent(key).longValue())).append("
    ").append(key.toBase64()).append("Last sent successfully: ").append(new Date(getLastSent(key).longValue())).append("Last sent successfully: neverProfile
    \n").append(ri.toString()).append("
    \n"); - - return buf.toString(); + StringBuffer buf = new StringBuffer(); + buf.append("

    Kademlia Network DB Contents

    \n"); + if (!_initialized) { + buf.append("Not initialized\n"); + return buf.toString(); + } + Set leases = getLeases(); + buf.append("

    Leases

    \n"); + buf.append("\n"); + for (Iterator iter = leases.iterator(); iter.hasNext(); ) { + LeaseSet ls = (LeaseSet)iter.next(); + Hash key = ls.getDestination().calculateHash(); + buf.append(""); + + if (getLastSent(key).longValue() > 0) + buf.append(""); + else + buf.append(""); + buf.append("\n"); + } + buf.append("
    ").append(key.toBase64()).append("Last sent successfully: ").append(new Date(getLastSent(key).longValue())).append("
    Last sent successfully: never
    \n").append(ls.toString()).append("
    \n"); + + Hash us = _context.routerHash(); + Set routers = getRouters(); + buf.append("

    Routers

    \n"); + buf.append("\n"); + for (Iterator iter = routers.iterator(); iter.hasNext(); ) { + RouterInfo ri = (RouterInfo)iter.next(); + Hash key = ri.getIdentity().getHash(); + boolean isUs = key.equals(us); + if (isUs) { + buf.append(""); + buf.append(""); + } else { + buf.append(""); + if (getLastSent(key).longValue() > 0) + buf.append(""); + else + buf.append(""); + buf.append(""); + } + buf.append("\n"); + } + buf.append("
    ").append(key.toBase64()).append("Last sent successfully: ").append(new Date(getLastSent(key).longValue())).append("
    ").append(key.toBase64()).append("Last sent successfully: ").append(new Date(getLastSent(key).longValue())).append("Last sent successfully: neverProfile
    \n").append(ri.toString()).append("
    \n"); + + return buf.toString(); } } diff --git a/router/java/src/net/i2p/router/networkdb/kademlia/PeerSelector.java b/router/java/src/net/i2p/router/networkdb/kademlia/PeerSelector.java index 5dee5ed04..0ac1f883e 100644 --- a/router/java/src/net/i2p/router/networkdb/kademlia/PeerSelector.java +++ b/router/java/src/net/i2p/router/networkdb/kademlia/PeerSelector.java @@ -23,11 +23,16 @@ import net.i2p.data.Hash; import net.i2p.router.ProfileManager; import net.i2p.router.Router; import net.i2p.util.Log; +import net.i2p.router.RouterContext; class PeerSelector { - private final static Log _log = new Log(PeerSelector.class); - private static final PeerSelector _instance = new PeerSelector(); - public static final PeerSelector getInstance() { return _instance; } + private Log _log; + private RouterContext _context; + + public PeerSelector(RouterContext ctx) { + _context = ctx; + _log = _context.logManager().getLog(PeerSelector.class); + } /** * Search through the kbucket set to find the most reliable peers close to the @@ -36,9 +41,9 @@ class PeerSelector { * @return ordered list of Hash objects */ public List selectMostReliablePeers(Hash key, int numClosest, Set alreadyChecked, KBucketSet kbuckets) { - // get the peers closest to the key - List nearest = selectNearestExplicit(key, numClosest, alreadyChecked, kbuckets); - return nearest; + // get the peers closest to the key + List nearest = selectNearestExplicit(key, numClosest, alreadyChecked, kbuckets); + return nearest; } /** @@ -49,26 +54,29 @@ class PeerSelector { * @return List of Hash for the peers selected, ordered by bucket (but intra bucket order is not defined) */ public List selectNearestExplicit(Hash key, int maxNumRouters, Set peersToIgnore, KBucketSet kbuckets) { - if (peersToIgnore == null) - peersToIgnore = new HashSet(1); - peersToIgnore.add(Router.getInstance().getRouterInfo().getIdentity().getHash()); - Set allHashes = kbuckets.getAll(peersToIgnore); - removeFailingPeers(allHashes); - Map diffMap = new HashMap(allHashes.size()); - for (Iterator iter = allHashes.iterator(); iter.hasNext(); ) { - Hash cur = (Hash)iter.next(); - BigInteger diff = getDistance(key, cur); - diffMap.put(diff, cur); - } - // n*log(n) - Map sortedMap = new TreeMap(diffMap); - List peerHashes = new ArrayList(maxNumRouters); - for (Iterator iter = sortedMap.values().iterator(); iter.hasNext(); ) { - if (peerHashes.size() >= maxNumRouters) break; - peerHashes.add(iter.next()); - } - _log.debug("Searching for " + maxNumRouters + " peers close to " + key + ": " + peerHashes + " (not including " + peersToIgnore + ") [allHashes.size = " + allHashes.size() + "]"); - return peerHashes; + if (peersToIgnore == null) + peersToIgnore = new HashSet(1); + peersToIgnore.add(_context.router().getRouterInfo().getIdentity().getHash()); + Set allHashes = kbuckets.getAll(peersToIgnore); + removeFailingPeers(allHashes); + Map diffMap = new HashMap(allHashes.size()); + for (Iterator iter = allHashes.iterator(); iter.hasNext(); ) { + Hash cur = (Hash)iter.next(); + BigInteger diff = getDistance(key, cur); + diffMap.put(diff, cur); + } + // n*log(n) + Map sortedMap = new TreeMap(diffMap); + List peerHashes = new ArrayList(maxNumRouters); + for (Iterator iter = sortedMap.values().iterator(); iter.hasNext(); ) { + if (peerHashes.size() >= maxNumRouters) break; + peerHashes.add(iter.next()); + } + if (_log.shouldLog(Log.DEBUG)) + _log.debug("Searching for " + maxNumRouters + " peers close to " + key + ": " + + peerHashes + " (not including " + peersToIgnore + ") [allHashes.size = " + + allHashes.size() + "]"); + return peerHashes; } /** @@ -76,22 +84,22 @@ class PeerSelector { * */ private void removeFailingPeers(Set peerHashes) { - List failing = new ArrayList(16); - for (Iterator iter = peerHashes.iterator(); iter.hasNext(); ) { - Hash cur = (Hash)iter.next(); - if (ProfileManager.getInstance().isFailing(cur)) { - if (_log.shouldLog(Log.DEBUG)) - _log.debug("Peer " + cur.toBase64() + " is failing, don't include them in the peer selection"); - failing.add(cur); - } - } - peerHashes.removeAll(failing); + List failing = new ArrayList(16); + for (Iterator iter = peerHashes.iterator(); iter.hasNext(); ) { + Hash cur = (Hash)iter.next(); + if (_context.profileOrganizer().isFailing(cur)) { + if (_log.shouldLog(Log.DEBUG)) + _log.debug("Peer " + cur.toBase64() + " is failing, don't include them in the peer selection"); + failing.add(cur); + } + } + peerHashes.removeAll(failing); } protected BigInteger getDistance(Hash targetKey, Hash routerInQuestion) { - // plain XOR of the key and router - byte diff[] = DataHelper.xor(routerInQuestion.getData(), targetKey.getData()); - return new BigInteger(1, diff); + // plain XOR of the key and router + byte diff[] = DataHelper.xor(routerInQuestion.getData(), targetKey.getData()); + return new BigInteger(1, diff); } /** @@ -102,10 +110,10 @@ class PeerSelector { * @return List of Hash for the peers selected, ordered by bucket (but intra bucket order is not defined) */ public List selectNearest(Hash key, int maxNumRouters, Set peersToIgnore, KBucketSet kbuckets) { - // sure, this may not be exactly correct per kademlia (peers on the border of a kbucket in strict kademlia - // would behave differently) but I can see no reason to keep around an /additional/ more complicated algorithm. - // later if/when selectNearestExplicit gets costly, we may revisit this (since kbuckets let us cache the distance() - // into a simple bucket selection algo + random select rather than an n*log(n) op) - return selectNearestExplicit(key, maxNumRouters, peersToIgnore, kbuckets); + // sure, this may not be exactly correct per kademlia (peers on the border of a kbucket in strict kademlia + // would behave differently) but I can see no reason to keep around an /additional/ more complicated algorithm. + // later if/when selectNearestExplicit gets costly, we may revisit this (since kbuckets let us cache the distance() + // into a simple bucket selection algo + random select rather than an n*log(n) op) + return selectNearestExplicit(key, maxNumRouters, peersToIgnore, kbuckets); } } diff --git a/router/java/src/net/i2p/router/networkdb/kademlia/PersistentDataStore.java b/router/java/src/net/i2p/router/networkdb/kademlia/PersistentDataStore.java index f51818c80..4dd0c8100 100644 --- a/router/java/src/net/i2p/router/networkdb/kademlia/PersistentDataStore.java +++ b/router/java/src/net/i2p/router/networkdb/kademlia/PersistentDataStore.java @@ -1,9 +1,9 @@ package net.i2p.router.networkdb.kademlia; /* * free (adj.): unencumbered; not under the control of others - * Written by jrandom in 2003 and released into the public domain - * with no warranty of any kind, either expressed or implied. - * It probably won't make your computer catch on fire, or eat + * Written by jrandom in 2003 and released into the public domain + * with no warranty of any kind, either expressed or implied. + * It probably won't make your computer catch on fire, or eat * your children, but it might. Use at your own risk. * */ @@ -24,226 +24,231 @@ import net.i2p.router.JobImpl; import net.i2p.router.JobQueue; import net.i2p.router.Router; import net.i2p.util.Log; +import net.i2p.router.RouterContext; /** * Write out keys to disk when we get them and periodically read ones we don't know * about into memory, with newly read routers are also added to the routing table. - * + * */ class PersistentDataStore extends TransientDataStore { - private final static Log _log = new Log(PersistentDataStore.class); + private Log _log; private String _dbDir; private KademliaNetworkDatabaseFacade _facade; private final static int READ_DELAY = 60*1000; - public PersistentDataStore(String dbDir, KademliaNetworkDatabaseFacade facade) { - super(); - _dbDir = dbDir; - _facade = facade; - JobQueue.getInstance().addJob(new ReadJob()); + public PersistentDataStore(RouterContext ctx, String dbDir, KademliaNetworkDatabaseFacade facade) { + super(ctx); + _log = ctx.logManager().getLog(PersistentDataStore.class); + _dbDir = dbDir; + _facade = facade; + _context.jobQueue().addJob(new ReadJob()); } public DataStructure remove(Hash key) { - JobQueue.getInstance().addJob(new RemoveJob(key)); - return super.remove(key); + _context.jobQueue().addJob(new RemoveJob(key)); + return super.remove(key); } - + public void put(Hash key, DataStructure data) { - if ( (data == null) || (key == null) ) return; - super.put(key, data); - JobQueue.getInstance().addJob(new WriteJob(key, data)); + if ( (data == null) || (key == null) ) return; + super.put(key, data); + _context.jobQueue().addJob(new WriteJob(key, data)); } private void accept(LeaseSet ls) { - super.put(ls.getDestination().calculateHash(), ls); + super.put(ls.getDestination().calculateHash(), ls); } private void accept(RouterInfo ri) { - Hash key = ri.getIdentity().getHash(); - super.put(key, ri); - // add recently loaded routers to the routing table - _facade.getKBuckets().add(key); + Hash key = ri.getIdentity().getHash(); + super.put(key, ri); + // add recently loaded routers to the routing table + _facade.getKBuckets().add(key); } private class RemoveJob extends JobImpl { - private Hash _key; - public RemoveJob(Hash key) { - _key = key; - } - public String getName() { return "Remove Key"; } - public void runJob() { - _log.info("Removing key " + _key, getAddedBy()); - try { - File dbDir = getDbDir(); - removeFile(_key, dbDir); - } catch (IOException ioe) { - _log.error("Error removing key " + _key, ioe); - } - } + private Hash _key; + public RemoveJob(Hash key) { + super(PersistentDataStore.this._context); + _key = key; + } + public String getName() { return "Remove Key"; } + public void runJob() { + _log.info("Removing key " + _key, getAddedBy()); + try { + File dbDir = getDbDir(); + removeFile(_key, dbDir); + } catch (IOException ioe) { + _log.error("Error removing key " + _key, ioe); + } + } } - + private class WriteJob extends JobImpl { - private Hash _key; - private DataStructure _data; - public WriteJob(Hash key, DataStructure data) { - super(); - _key = key; - _data = data; - } - public String getName() { return "DB Writer Job"; } - public void runJob() { - _log.info("Writing key " + _key); - FileOutputStream fos = null; - try { - String filename = null; - File dbDir = getDbDir(); - - if (_data instanceof LeaseSet) - filename = getLeaseSetName(_key); - else if (_data instanceof RouterInfo) - filename = getRouterInfoName(_key); - else - throw new IOException("We don't know how to write objects of type " + _data.getClass().getName()); - - fos = new FileOutputStream(new File(dbDir, filename)); - try { - _data.writeBytes(fos); - } catch (DataFormatException dfe) { - _log.error("Error writing out malformed object as " + _key + ": " + _data, dfe); - File f = new File(dbDir, filename); - f.delete(); - } - } catch (IOException ioe) { - _log.error("Error writing out the object", ioe); - } finally { - if (fos != null) try { fos.close(); } catch (IOException ioe) {} - } - } + private Hash _key; + private DataStructure _data; + public WriteJob(Hash key, DataStructure data) { + super(PersistentDataStore.this._context); + _key = key; + _data = data; + } + public String getName() { return "DB Writer Job"; } + public void runJob() { + _log.info("Writing key " + _key); + FileOutputStream fos = null; + try { + String filename = null; + File dbDir = getDbDir(); + + if (_data instanceof LeaseSet) + filename = getLeaseSetName(_key); + else if (_data instanceof RouterInfo) + filename = getRouterInfoName(_key); + else + throw new IOException("We don't know how to write objects of type " + _data.getClass().getName()); + + fos = new FileOutputStream(new File(dbDir, filename)); + try { + _data.writeBytes(fos); + } catch (DataFormatException dfe) { + _log.error("Error writing out malformed object as " + _key + ": " + _data, dfe); + File f = new File(dbDir, filename); + f.delete(); + } + } catch (IOException ioe) { + _log.error("Error writing out the object", ioe); + } finally { + if (fos != null) try { fos.close(); } catch (IOException ioe) {} + } + } } private class ReadJob extends JobImpl { - public ReadJob() { - super(); - } - public String getName() { return "DB Read Job"; } - public void runJob() { - _log.info("Rereading new files"); - readFiles(); - requeue(READ_DELAY); - } - - private void readFiles() { - try { - File dbDir = getDbDir(); - File leaseSetFiles[] = dbDir.listFiles(LeaseSetFilter.getInstance()); - if (leaseSetFiles != null) { - for (int i = 0; i < leaseSetFiles.length; i++) { - Hash key = getLeaseSetHash(leaseSetFiles[i].getName()); - if ( (key != null) && (!isKnown(key)) ) - JobQueue.getInstance().addJob(new ReadLeaseJob(leaseSetFiles[i])); - } - } - File routerInfoFiles[] = dbDir.listFiles(RouterInfoFilter.getInstance()); - if (routerInfoFiles != null) { - for (int i = 0; i < routerInfoFiles.length; i++) { - Hash key = getRouterInfoHash(routerInfoFiles[i].getName()); - if ( (key != null) && (!isKnown(key)) ) - JobQueue.getInstance().addJob(new ReadRouterJob(routerInfoFiles[i])); - } - } - } catch (IOException ioe) { - _log.error("Error reading files in the db dir", ioe); - } - } - + public ReadJob() { + super(PersistentDataStore.this._context); + } + public String getName() { return "DB Read Job"; } + public void runJob() { + _log.info("Rereading new files"); + readFiles(); + requeue(READ_DELAY); + } + + private void readFiles() { + try { + File dbDir = getDbDir(); + File leaseSetFiles[] = dbDir.listFiles(LeaseSetFilter.getInstance()); + if (leaseSetFiles != null) { + for (int i = 0; i < leaseSetFiles.length; i++) { + Hash key = getLeaseSetHash(leaseSetFiles[i].getName()); + if ( (key != null) && (!isKnown(key)) ) + PersistentDataStore.this._context.jobQueue().addJob(new ReadLeaseJob(leaseSetFiles[i])); + } + } + File routerInfoFiles[] = dbDir.listFiles(RouterInfoFilter.getInstance()); + if (routerInfoFiles != null) { + for (int i = 0; i < routerInfoFiles.length; i++) { + Hash key = getRouterInfoHash(routerInfoFiles[i].getName()); + if ( (key != null) && (!isKnown(key)) ) + PersistentDataStore.this._context.jobQueue().addJob(new ReadRouterJob(routerInfoFiles[i])); + } + } + } catch (IOException ioe) { + _log.error("Error reading files in the db dir", ioe); + } + } + } private class ReadLeaseJob extends JobImpl { - private File _leaseFile; - public ReadLeaseJob(File leaseFile) { - _leaseFile = leaseFile; - } - public String getName() { return "Read LeaseSet"; } - public void runJob() { - try { - FileInputStream fis = null; - boolean corrupt = false; - try { - fis = new FileInputStream(_leaseFile); - LeaseSet ls = new LeaseSet(); - ls.readBytes(fis); - if (ls.isCurrent(Router.CLOCK_FUDGE_FACTOR)) { - _log.info("Reading in new LeaseSet: " + ls.getDestination().calculateHash()); - accept(ls); - } else { - _log.warn("Expired LeaseSet found for " + ls.getDestination().calculateHash() + ": Deleting"); - corrupt = true; - } - } catch (DataFormatException dfe) { - _log.warn("Error reading the leaseSet from " + _leaseFile.getAbsolutePath(), dfe); - corrupt = true; - } catch (FileNotFoundException fnfe) { - _log.debug("Deleted prior to read.. a race during expiration / load"); - corrupt = false; - } finally { - if (fis != null) try { fis.close(); } catch (IOException ioe) {} - } - if (corrupt) _leaseFile.delete(); - } catch (IOException ioe) { - _log.warn("Error reading the leaseSet from " + _leaseFile.getAbsolutePath(), ioe); - } - } + private File _leaseFile; + public ReadLeaseJob(File leaseFile) { + super(PersistentDataStore.this._context); + _leaseFile = leaseFile; + } + public String getName() { return "Read LeaseSet"; } + public void runJob() { + try { + FileInputStream fis = null; + boolean corrupt = false; + try { + fis = new FileInputStream(_leaseFile); + LeaseSet ls = new LeaseSet(); + ls.readBytes(fis); + if (ls.isCurrent(Router.CLOCK_FUDGE_FACTOR)) { + _log.info("Reading in new LeaseSet: " + ls.getDestination().calculateHash()); + accept(ls); + } else { + _log.warn("Expired LeaseSet found for " + ls.getDestination().calculateHash() + ": Deleting"); + corrupt = true; + } + } catch (DataFormatException dfe) { + _log.warn("Error reading the leaseSet from " + _leaseFile.getAbsolutePath(), dfe); + corrupt = true; + } catch (FileNotFoundException fnfe) { + _log.debug("Deleted prior to read.. a race during expiration / load"); + corrupt = false; + } finally { + if (fis != null) try { fis.close(); } catch (IOException ioe) {} + } + if (corrupt) _leaseFile.delete(); + } catch (IOException ioe) { + _log.warn("Error reading the leaseSet from " + _leaseFile.getAbsolutePath(), ioe); + } + } } private class ReadRouterJob extends JobImpl { - private File _routerFile; - public ReadRouterJob(File routerFile) { - _routerFile = routerFile; - } - public String getName() { return "Read RouterInfo"; } - public void runJob() { - try { - FileInputStream fis = null; - boolean corrupt = false; - try { - fis = new FileInputStream(_routerFile); - RouterInfo ri = new RouterInfo(); - ri.readBytes(fis); - if (ri.isValid()) { - _log.info("Reading in new RouterInfo: " + ri.getIdentity().getHash()); - accept(ri); - } else { - _log.warn("Invalid routerInfo found for " + ri.getIdentity().getHash() + ": " + ri); - corrupt = true; - } - } catch (DataFormatException dfe) { - _log.warn("Error reading the routerInfo from " + _routerFile.getAbsolutePath(), dfe); - corrupt = true; - } finally { - if (fis != null) try { fis.close(); } catch (IOException ioe) {} - } - if (corrupt) _routerFile.delete(); - } catch (IOException ioe) { - _log.warn("Error reading the RouterInfo from " + _routerFile.getAbsolutePath(), ioe); - } - } + private File _routerFile; + public ReadRouterJob(File routerFile) { + super(PersistentDataStore.this._context); + _routerFile = routerFile; + } + public String getName() { return "Read RouterInfo"; } + public void runJob() { + try { + FileInputStream fis = null; + boolean corrupt = false; + try { + fis = new FileInputStream(_routerFile); + RouterInfo ri = new RouterInfo(); + ri.readBytes(fis); + if (ri.isValid()) { + _log.info("Reading in new RouterInfo: " + ri.getIdentity().getHash()); + accept(ri); + } else { + _log.warn("Invalid routerInfo found for " + ri.getIdentity().getHash() + ": " + ri); + corrupt = true; + } + } catch (DataFormatException dfe) { + _log.warn("Error reading the routerInfo from " + _routerFile.getAbsolutePath(), dfe); + corrupt = true; + } finally { + if (fis != null) try { fis.close(); } catch (IOException ioe) {} + } + if (corrupt) _routerFile.delete(); + } catch (IOException ioe) { + _log.warn("Error reading the RouterInfo from " + _routerFile.getAbsolutePath(), ioe); + } + } } private File getDbDir() throws IOException { - File f = new File(_dbDir); - if (!f.exists()) { - boolean created = f.mkdirs(); - if (!created) - throw new IOException("Unable to create the DB directory [" + f.getAbsolutePath() + "]"); - } - if (!f.isDirectory()) - throw new IOException("DB directory [" + f.getAbsolutePath() + "] is not a directory!"); - if (!f.canRead()) - throw new IOException("DB directory [" + f.getAbsolutePath() + "] is not readable!"); - if (!f.canWrite()) - throw new IOException("DB directory [" + f.getAbsolutePath() + "] is not writable!"); - return f; + File f = new File(_dbDir); + if (!f.exists()) { + boolean created = f.mkdirs(); + if (!created) + throw new IOException("Unable to create the DB directory [" + f.getAbsolutePath() + "]"); + } + if (!f.isDirectory()) + throw new IOException("DB directory [" + f.getAbsolutePath() + "] is not a directory!"); + if (!f.canRead()) + throw new IOException("DB directory [" + f.getAbsolutePath() + "] is not readable!"); + if (!f.canWrite()) + throw new IOException("DB directory [" + f.getAbsolutePath() + "] is not writable!"); + return f; } private final static String LEASESET_PREFIX = "leaseSet-"; @@ -252,72 +257,72 @@ class PersistentDataStore extends TransientDataStore { private final static String ROUTERINFO_SUFFIX = ".dat"; private String getLeaseSetName(Hash hash) { - return LEASESET_PREFIX + hash.toBase64() + LEASESET_SUFFIX; + return LEASESET_PREFIX + hash.toBase64() + LEASESET_SUFFIX; } private String getRouterInfoName(Hash hash) { - return ROUTERINFO_PREFIX + hash.toBase64() + ROUTERINFO_SUFFIX; + return ROUTERINFO_PREFIX + hash.toBase64() + ROUTERINFO_SUFFIX; } private Hash getLeaseSetHash(String filename) { - return getHash(filename, LEASESET_PREFIX, LEASESET_SUFFIX); + return getHash(filename, LEASESET_PREFIX, LEASESET_SUFFIX); } - + private Hash getRouterInfoHash(String filename) { - return getHash(filename, ROUTERINFO_PREFIX, ROUTERINFO_SUFFIX); + return getHash(filename, ROUTERINFO_PREFIX, ROUTERINFO_SUFFIX); } - + private Hash getHash(String filename, String prefix, String suffix) { - try { - String key = filename.substring(prefix.length()); - key = key.substring(0, key.length() - suffix.length()); - Hash h = new Hash(); - h.fromBase64(key); - return h; - } catch (Exception e) { - _log.warn("Unable to fetch the key from [" + filename + "]", e); - return null; - } + try { + String key = filename.substring(prefix.length()); + key = key.substring(0, key.length() - suffix.length()); + Hash h = new Hash(); + h.fromBase64(key); + return h; + } catch (Exception e) { + _log.warn("Unable to fetch the key from [" + filename + "]", e); + return null; + } } - + private void removeFile(Hash key, File dir) throws IOException { - String lsName = getLeaseSetName(key); - String riName = getRouterInfoName(key); - File f = new File(dir, lsName); - if (f.exists()) { - boolean removed = f.delete(); - if (!removed) - _log.warn("Unable to remove lease set at " + f.getAbsolutePath()); - else - _log.info("Removed lease set at " + f.getAbsolutePath()); - return; - } - f = new File(dir, riName); - if (f.exists()) { - boolean removed = f.delete(); - if (!removed) - _log.warn("Unable to remove router info at " + f.getAbsolutePath()); - else - _log.info("Removed router info at " + f.getAbsolutePath()); - return; - } + String lsName = getLeaseSetName(key); + String riName = getRouterInfoName(key); + File f = new File(dir, lsName); + if (f.exists()) { + boolean removed = f.delete(); + if (!removed) + _log.warn("Unable to remove lease set at " + f.getAbsolutePath()); + else + _log.info("Removed lease set at " + f.getAbsolutePath()); + return; + } + f = new File(dir, riName); + if (f.exists()) { + boolean removed = f.delete(); + if (!removed) + _log.warn("Unable to remove router info at " + f.getAbsolutePath()); + else + _log.info("Removed router info at " + f.getAbsolutePath()); + return; + } } private final static class LeaseSetFilter implements FilenameFilter { - private static final FilenameFilter _instance = new LeaseSetFilter(); - public static final FilenameFilter getInstance() { return _instance; } - public boolean accept(File dir, String name) { - if (name == null) return false; - name = name.toUpperCase(); - return (name.startsWith(LEASESET_PREFIX.toUpperCase()) && name.endsWith(LEASESET_SUFFIX.toUpperCase())); - } + private static final FilenameFilter _instance = new LeaseSetFilter(); + public static final FilenameFilter getInstance() { return _instance; } + public boolean accept(File dir, String name) { + if (name == null) return false; + name = name.toUpperCase(); + return (name.startsWith(LEASESET_PREFIX.toUpperCase()) && name.endsWith(LEASESET_SUFFIX.toUpperCase())); + } } private final static class RouterInfoFilter implements FilenameFilter { - private static final FilenameFilter _instance = new RouterInfoFilter(); - public static final FilenameFilter getInstance() { return _instance; } - public boolean accept(File dir, String name) { - if (name == null) return false; - name = name.toUpperCase(); - return (name.startsWith(ROUTERINFO_PREFIX.toUpperCase()) && name.endsWith(ROUTERINFO_SUFFIX.toUpperCase())); - } + private static final FilenameFilter _instance = new RouterInfoFilter(); + public static final FilenameFilter getInstance() { return _instance; } + public boolean accept(File dir, String name) { + if (name == null) return false; + name = name.toUpperCase(); + return (name.startsWith(ROUTERINFO_PREFIX.toUpperCase()) && name.endsWith(ROUTERINFO_SUFFIX.toUpperCase())); + } } } diff --git a/router/java/src/net/i2p/router/networkdb/kademlia/RepublishLeaseSetJob.java b/router/java/src/net/i2p/router/networkdb/kademlia/RepublishLeaseSetJob.java index e3cf23378..395376d97 100644 --- a/router/java/src/net/i2p/router/networkdb/kademlia/RepublishLeaseSetJob.java +++ b/router/java/src/net/i2p/router/networkdb/kademlia/RepublishLeaseSetJob.java @@ -19,6 +19,8 @@ import net.i2p.router.JobQueue; import net.i2p.router.Router; import net.i2p.util.Clock; import net.i2p.util.Log; +import net.i2p.router.RouterContext; +import net.i2p.I2PException; /** * Run periodically for each locally created leaseSet to cause it to be republished @@ -26,53 +28,43 @@ import net.i2p.util.Log; * */ public class RepublishLeaseSetJob extends JobImpl { - private final static Log _log = new Log(RepublishLeaseSetJob.class); + private Log _log; private final static long REPUBLISH_LEASESET_DELAY = 60*1000; // 5 mins private Hash _dest; private KademliaNetworkDatabaseFacade _facade; - /** - * maintain a set of dest hashes that we're already publishing, - * so we don't go overboard. This is clunky, so if it gets any more - * complicated this will go into a 'manager' function rather than part of - * a job. - */ - private final static Set _pending = new HashSet(16); - public static boolean alreadyRepublishing(Hash dest) { - synchronized (_pending) { - return _pending.contains(dest); - } - } - - public RepublishLeaseSetJob(KademliaNetworkDatabaseFacade facade, Hash destHash) { - super(); - _facade = facade; - _dest = destHash; - synchronized (_pending) { - _pending.add(destHash); - } - getTiming().setStartAfter(Clock.getInstance().now()+REPUBLISH_LEASESET_DELAY); + public RepublishLeaseSetJob(RouterContext ctx, KademliaNetworkDatabaseFacade facade, Hash destHash) { + super(ctx); + _log = ctx.logManager().getLog(RepublishLeaseSetJob.class); + _facade = facade; + _dest = destHash; + getTiming().setStartAfter(ctx.clock().now()+REPUBLISH_LEASESET_DELAY); } public String getName() { return "Republish a local leaseSet"; } public void runJob() { - if (ClientManagerFacade.getInstance().isLocal(_dest)) { - LeaseSet ls = _facade.lookupLeaseSetLocally(_dest); - if (ls != null) { - _log.warn("Client " + _dest + " is local, so we're republishing it"); - if (!ls.isCurrent(Router.CLOCK_FUDGE_FACTOR)) { - _log.warn("Not publishing a LOCAL lease that isn't current - " + _dest, new Exception("Publish expired LOCAL lease?")); - } else { - JobQueue.getInstance().addJob(new StoreJob(_facade, _dest, ls, null, null, REPUBLISH_LEASESET_DELAY)); - } - } else { - _log.warn("Client " + _dest + " is local, but we can't find a valid LeaseSet? perhaps its being rebuilt?"); - } - requeue(REPUBLISH_LEASESET_DELAY); - } else { - _log.info("Client " + _dest + " is no longer local, so no more republishing their leaseSet"); - synchronized (_pending) { - _pending.remove(_dest); - } - } + try { + if (_context.clientManager().isLocal(_dest)) { + LeaseSet ls = _facade.lookupLeaseSetLocally(_dest); + if (ls != null) { + _log.warn("Client " + _dest + " is local, so we're republishing it"); + if (!ls.isCurrent(Router.CLOCK_FUDGE_FACTOR)) { + _log.warn("Not publishing a LOCAL lease that isn't current - " + _dest, new Exception("Publish expired LOCAL lease?")); + } else { + _context.jobQueue().addJob(new StoreJob(_context, _facade, _dest, ls, null, null, REPUBLISH_LEASESET_DELAY)); + } + } else { + _log.warn("Client " + _dest + " is local, but we can't find a valid LeaseSet? perhaps its being rebuilt?"); + } + requeue(REPUBLISH_LEASESET_DELAY); + return; + } else { + _log.info("Client " + _dest + " is no longer local, so no more republishing their leaseSet"); + } + _facade.stopPublishing(_dest); + } catch (RuntimeException re) { + _log.error("Uncaught error republishing the leaseSet", re); + _facade.stopPublishing(_dest); + throw re; + } } } diff --git a/router/java/src/net/i2p/router/networkdb/kademlia/SearchJob.java b/router/java/src/net/i2p/router/networkdb/kademlia/SearchJob.java index 189904aff..9674f04c6 100644 --- a/router/java/src/net/i2p/router/networkdb/kademlia/SearchJob.java +++ b/router/java/src/net/i2p/router/networkdb/kademlia/SearchJob.java @@ -34,6 +34,7 @@ import net.i2p.router.message.SendTunnelMessageJob; import net.i2p.stat.StatManager; import net.i2p.util.Clock; import net.i2p.util.Log; +import net.i2p.router.RouterContext; /** * Search for a particular key iteratively until we either find a value or we @@ -41,7 +42,7 @@ import net.i2p.util.Log; * */ class SearchJob extends JobImpl { - private final Log _log = new Log(SearchJob.class); + private Log _log; private KademliaNetworkDatabaseFacade _facade; private SearchState _state; private Job _onSuccess; @@ -51,34 +52,35 @@ class SearchJob extends JobImpl { private boolean _keepStats; private boolean _isLease; private Job _pendingRequeueJob; + private PeerSelector _peerSelector; public final static int SEARCH_BREDTH = 3; // 3 peers at a time public final static int SEARCH_PRIORITY = 400; // large because the search is probably for a real search private static final long PER_PEER_TIMEOUT = 30*1000; - - static { - StatManager.getInstance().createRateStat("netDb.successTime", "How long a successful search takes", "Network Database", new long[] { 60*60*1000l, 24*60*60*1000l }); - StatManager.getInstance().createRateStat("netDb.failedTime", "How long a failed search takes", "Network Database", new long[] { 60*60*1000l, 24*60*60*1000l }); - StatManager.getInstance().createRateStat("netDb.successPeers", "How many peers are contacted in a successful search", "Network Database", new long[] { 60*60*1000l, 24*60*60*1000l }); - StatManager.getInstance().createRateStat("netDb.failedPeers", "How many peers are contacted in a failed search", "Network Database", new long[] { 60*60*1000l, 24*60*60*1000l }); - } /** * Create a new search for the routingKey specified * */ - public SearchJob(KademliaNetworkDatabaseFacade facade, Hash key, Job onSuccess, Job onFailure, long timeoutMs, boolean keepStats, boolean isLease) { + public SearchJob(RouterContext context, KademliaNetworkDatabaseFacade facade, Hash key, Job onSuccess, Job onFailure, long timeoutMs, boolean keepStats, boolean isLease) { + super(context); if ( (key == null) || (key.getData() == null) ) throw new IllegalArgumentException("Search for null key? wtf"); + _log = _context.logManager().getLog(SearchJob.class); _facade = facade; - _state = new SearchState(key); + _state = new SearchState(_context, key); _onSuccess = onSuccess; _onFailure = onFailure; _timeoutMs = timeoutMs; _keepStats = keepStats; _isLease = isLease; - _expiration = Clock.getInstance().now() + timeoutMs; - } + _peerSelector = new PeerSelector(_context); + _expiration = _context.clock().now() + timeoutMs; + _context.statManager().createRateStat("netDb.successTime", "How long a successful search takes", "Network Database", new long[] { 60*60*1000l, 24*60*60*1000l }); + _context.statManager().createRateStat("netDb.failedTime", "How long a failed search takes", "Network Database", new long[] { 60*60*1000l, 24*60*60*1000l }); + _context.statManager().createRateStat("netDb.successPeers", "How many peers are contacted in a successful search", "Network Database", new long[] { 60*60*1000l, 24*60*60*1000l }); + _context.statManager().createRateStat("netDb.failedPeers", "How many peers are contacted in a failed search", "Network Database", new long[] { 60*60*1000l, 24*60*60*1000l }); + } public void runJob() { if (_log.shouldLog(Log.INFO)) @@ -125,7 +127,7 @@ class SearchJob extends JobImpl { private boolean isLocal() { return _facade.getDataStore().isKnown(_state.getTarget()); } private boolean isExpired() { - return Clock.getInstance().now() >= _expiration; + return _context.clock().now() >= _expiration; } /** @@ -187,16 +189,21 @@ class SearchJob extends JobImpl { private void requeuePending() { if (_pendingRequeueJob == null) - _pendingRequeueJob = new JobImpl() { - public String getName() { return "Requeue search with pending"; } - public void runJob() { searchNext(); } - }; - long now = Clock.getInstance().now(); - if (_pendingRequeueJob.getTiming().getStartAfter() < now) - _pendingRequeueJob.getTiming().setStartAfter(now+5*1000); - JobQueue.getInstance().addJob(_pendingRequeueJob); + _pendingRequeueJob = new RequeuePending(); + long now = _context.clock().now(); + if (_pendingRequeueJob.getTiming().getStartAfter() < now) + _pendingRequeueJob.getTiming().setStartAfter(now+5*1000); + _context.jobQueue().addJob(_pendingRequeueJob); } + private class RequeuePending extends JobImpl { + public RequeuePending() { + super(SearchJob.this._context); + } + public String getName() { return "Requeue search with pending"; } + public void runJob() { searchNext(); } + } + /** * Set of Hash structures for routers we want to check next. This is the 'interesting' part of * the algorithm. But to keep you on your toes, we've refactored it to the PeerSelector.selectNearestExplicit @@ -204,10 +211,10 @@ class SearchJob extends JobImpl { * @return ordered list of Hash objects */ private List getClosestRouters(Hash key, int numClosest, Set alreadyChecked) { - Hash rkey = RoutingKeyGenerator.getInstance().getRoutingKey(key); + Hash rkey = _context.routingKeyGenerator().getRoutingKey(key); if (_log.shouldLog(Log.DEBUG)) _log.debug(getJobId() + ": Current routing key for " + key + ": " + rkey); - return PeerSelector.getInstance().selectNearestExplicit(rkey, numClosest, alreadyChecked, _facade.getKBuckets()); + return _peerSelector.selectNearestExplicit(rkey, numClosest, alreadyChecked, _facade.getKBuckets()); } /** @@ -215,7 +222,7 @@ class SearchJob extends JobImpl { * */ protected void sendSearch(RouterInfo router) { - if (router.getIdentity().equals(Router.getInstance().getRouterInfo().getIdentity())) { + if (router.getIdentity().equals(_context.router().getRouterInfo().getIdentity())) { // don't search ourselves if (_log.shouldLog(Log.ERROR)) _log.error(getJobId() + ": Dont send search to ourselves - why did we try?"); @@ -241,26 +248,26 @@ class SearchJob extends JobImpl { TunnelId inTunnelId = getInboundTunnelId(); if (inTunnelId == null) { _log.error("No tunnels to get search replies through! wtf!"); - JobQueue.getInstance().addJob(new FailedJob(router)); + _context.jobQueue().addJob(new FailedJob(router)); return; } - TunnelInfo inTunnel = TunnelManagerFacade.getInstance().getTunnelInfo(inTunnelId); - RouterInfo inGateway = NetworkDatabaseFacade.getInstance().lookupRouterInfoLocally(inTunnel.getThisHop()); + TunnelInfo inTunnel = _context.tunnelManager().getTunnelInfo(inTunnelId); + RouterInfo inGateway = _context.netDb().lookupRouterInfoLocally(inTunnel.getThisHop()); if (inGateway == null) { _log.error("We can't find the gateway to our inbound tunnel?! wtf"); - JobQueue.getInstance().addJob(new FailedJob(router)); + _context.jobQueue().addJob(new FailedJob(router)); return; } - long expiration = Clock.getInstance().now() + PER_PEER_TIMEOUT; // getTimeoutMs(); + long expiration = _context.clock().now() + PER_PEER_TIMEOUT; // getTimeoutMs(); DatabaseLookupMessage msg = buildMessage(inTunnelId, inGateway, expiration); TunnelId outTunnelId = getOutboundTunnelId(); if (outTunnelId == null) { _log.error("No tunnels to send search out through! wtf!"); - JobQueue.getInstance().addJob(new FailedJob(router)); + _context.jobQueue().addJob(new FailedJob(router)); return; } @@ -270,18 +277,18 @@ class SearchJob extends JobImpl { + msg.getFrom().getIdentity().getHash().toBase64() + "] via tunnel [" + msg.getReplyTunnel() + "]"); - SearchMessageSelector sel = new SearchMessageSelector(router, _expiration, _state); + SearchMessageSelector sel = new SearchMessageSelector(_context, router, _expiration, _state); long timeoutMs = PER_PEER_TIMEOUT; // getTimeoutMs(); - SearchUpdateReplyFoundJob reply = new SearchUpdateReplyFoundJob(router, _state, _facade, this); - SendTunnelMessageJob j = new SendTunnelMessageJob(msg, outTunnelId, router.getIdentity().getHash(), + SearchUpdateReplyFoundJob reply = new SearchUpdateReplyFoundJob(_context, router, _state, _facade, this); + SendTunnelMessageJob j = new SendTunnelMessageJob(_context, msg, outTunnelId, router.getIdentity().getHash(), null, null, reply, new FailedJob(router), sel, timeoutMs, SEARCH_PRIORITY); - JobQueue.getInstance().addJob(j); + _context.jobQueue().addJob(j); } /** we're searching for a router, so we can just send direct */ protected void sendRouterSearch(RouterInfo router) { - long expiration = Clock.getInstance().now() + PER_PEER_TIMEOUT; // getTimeoutMs(); + long expiration = _context.clock().now() + PER_PEER_TIMEOUT; // getTimeoutMs(); DatabaseLookupMessage msg = buildMessage(expiration); @@ -289,12 +296,12 @@ class SearchJob extends JobImpl { _log.info(getJobId() + ": Sending router search to " + router.getIdentity().getHash().toBase64() + " for " + msg.getSearchKey().toBase64() + " w/ replies to us [" + msg.getFrom().getIdentity().getHash().toBase64() + "]"); - SearchMessageSelector sel = new SearchMessageSelector(router, _expiration, _state); + SearchMessageSelector sel = new SearchMessageSelector(_context, router, _expiration, _state); long timeoutMs = PER_PEER_TIMEOUT; - SearchUpdateReplyFoundJob reply = new SearchUpdateReplyFoundJob(router, _state, _facade, this); - SendMessageDirectJob j = new SendMessageDirectJob(msg, router.getIdentity().getHash(), + SearchUpdateReplyFoundJob reply = new SearchUpdateReplyFoundJob(_context, router, _state, _facade, this); + SendMessageDirectJob j = new SendMessageDirectJob(_context, msg, router.getIdentity().getHash(), reply, new FailedJob(router), sel, expiration, SEARCH_PRIORITY); - JobQueue.getInstance().addJob(j); + _context.jobQueue().addJob(j); } /** @@ -306,7 +313,7 @@ class SearchJob extends JobImpl { TunnelSelectionCriteria crit = new TunnelSelectionCriteria(); crit.setMaximumTunnelsRequired(1); crit.setMinimumTunnelsRequired(1); - List tunnelIds = TunnelManagerFacade.getInstance().selectOutboundTunnelIds(crit); + List tunnelIds = _context.tunnelManager().selectOutboundTunnelIds(crit); if (tunnelIds.size() <= 0) { return null; } @@ -323,7 +330,7 @@ class SearchJob extends JobImpl { TunnelSelectionCriteria crit = new TunnelSelectionCriteria(); crit.setMaximumTunnelsRequired(1); crit.setMinimumTunnelsRequired(1); - List tunnelIds = TunnelManagerFacade.getInstance().selectInboundTunnelIds(crit); + List tunnelIds = _context.tunnelManager().selectInboundTunnelIds(crit); if (tunnelIds.size() <= 0) { return null; } @@ -338,7 +345,7 @@ class SearchJob extends JobImpl { * @param expiration when the search should stop */ protected DatabaseLookupMessage buildMessage(TunnelId replyTunnelId, RouterInfo replyGateway, long expiration) { - DatabaseLookupMessage msg = new DatabaseLookupMessage(); + DatabaseLookupMessage msg = new DatabaseLookupMessage(_context); msg.setSearchKey(_state.getTarget()); msg.setFrom(replyGateway); msg.setDontIncludePeers(_state.getAttempted()); @@ -353,9 +360,9 @@ class SearchJob extends JobImpl { * */ protected DatabaseLookupMessage buildMessage(long expiration) { - DatabaseLookupMessage msg = new DatabaseLookupMessage(); + DatabaseLookupMessage msg = new DatabaseLookupMessage(_context); msg.setSearchKey(_state.getTarget()); - msg.setFrom(Router.getInstance().getRouterInfo()); + msg.setFrom(_context.router().getRouterInfo()); msg.setDontIncludePeers(_state.getAttempted()); msg.setMessageExpiration(new Date(expiration)); msg.setReplyTunnel(null); @@ -365,7 +372,7 @@ class SearchJob extends JobImpl { void replyFound(DatabaseSearchReplyMessage message, Hash peer) { long duration = _state.replyFound(peer); // this processing can take a while, so split 'er up - JobQueue.getInstance().addJob(new SearchReplyJob((DatabaseSearchReplyMessage)message, peer, duration)); + _context.jobQueue().addJob(new SearchReplyJob((DatabaseSearchReplyMessage)message, peer, duration)); } private final class SearchReplyJob extends JobImpl { @@ -378,6 +385,7 @@ class SearchJob extends JobImpl { private int _duplicatePeers; private long _duration; public SearchReplyJob(DatabaseSearchReplyMessage message, Hash peer, long duration) { + super(SearchJob.this._context); _msg = message; _peer = peer; _curIndex = 0; @@ -389,8 +397,8 @@ class SearchJob extends JobImpl { public String getName() { return "Process Reply for Kademlia Search"; } public void runJob() { if (_curIndex >= _msg.getNumReplies()) { - ProfileManager.getInstance().dbLookupReply(_peer, _newPeers, _seenPeers, - _invalidPeers, _duplicatePeers, _duration); + _context.profileManager().dbLookupReply(_peer, _newPeers, _seenPeers, + _invalidPeers, _duplicatePeers, _duration); } else { RouterInfo ri = _msg.getReply(_curIndex); if (ri.isValid()) { @@ -435,7 +443,7 @@ class SearchJob extends JobImpl { * */ public FailedJob(RouterInfo peer, boolean penalizePeer) { - super(); + super(SearchJob.this._context); _penalizePeer = penalizePeer; _peer = peer.getIdentity().getHash(); } @@ -444,7 +452,7 @@ class SearchJob extends JobImpl { if (_penalizePeer) { if (_log.shouldLog(Log.WARN)) _log.warn("Penalizing peer for timeout on search: " + _peer.toBase64()); - ProfileManager.getInstance().dbLookupFailed(_peer); + _context.profileManager().dbLookupFailed(_peer); } else { if (_log.shouldLog(Log.ERROR)) _log.error("NOT (!!) Penalizing peer for timeout on search: " + _peer.toBase64()); @@ -464,12 +472,12 @@ class SearchJob extends JobImpl { _log.debug(getJobId() + ": State of successful search: " + _state); if (_keepStats) { - long time = Clock.getInstance().now() - _state.getWhenStarted(); - StatManager.getInstance().addRateData("netDb.successTime", time, 0); - StatManager.getInstance().addRateData("netDb.successPeers", _state.getAttempted().size(), time); + long time = _context.clock().now() - _state.getWhenStarted(); + _context.statManager().addRateData("netDb.successTime", time, 0); + _context.statManager().addRateData("netDb.successPeers", _state.getAttempted().size(), time); } if (_onSuccess != null) - JobQueue.getInstance().addJob(_onSuccess); + _context.jobQueue().addJob(_onSuccess); } /** @@ -482,12 +490,12 @@ class SearchJob extends JobImpl { _log.debug(getJobId() + ": State of failed search: " + _state); if (_keepStats) { - long time = Clock.getInstance().now() - _state.getWhenStarted(); - StatManager.getInstance().addRateData("netDb.failedTime", time, 0); - StatManager.getInstance().addRateData("netDb.failedPeers", _state.getAttempted().size(), time); + long time = _context.clock().now() - _state.getWhenStarted(); + _context.statManager().addRateData("netDb.failedTime", time, 0); + _context.statManager().addRateData("netDb.failedPeers", _state.getAttempted().size(), time); } if (_onFailure != null) - JobQueue.getInstance().addJob(_onFailure); + _context.jobQueue().addJob(_onFailure); } public String getName() { return "Kademlia NetDb Search"; } diff --git a/router/java/src/net/i2p/router/networkdb/kademlia/SearchMessageSelector.java b/router/java/src/net/i2p/router/networkdb/kademlia/SearchMessageSelector.java index a2d8ccfa2..8dce57170 100644 --- a/router/java/src/net/i2p/router/networkdb/kademlia/SearchMessageSelector.java +++ b/router/java/src/net/i2p/router/networkdb/kademlia/SearchMessageSelector.java @@ -6,81 +6,84 @@ import net.i2p.data.i2np.DatabaseSearchReplyMessage; import net.i2p.data.i2np.DatabaseStoreMessage; import net.i2p.data.i2np.I2NPMessage; import net.i2p.router.MessageSelector; -import net.i2p.util.Clock; +import net.i2p.router.RouterContext; import net.i2p.util.Log; /** - * Check to see the message is a reply from the peer regarding the current + * Check to see the message is a reply from the peer regarding the current * search * */ class SearchMessageSelector implements MessageSelector { - private final static Log _log = new Log(SearchMessageSelector.class); + private Log _log; + private RouterContext _context; private static int __searchSelectorId = 0; private Hash _peer; private boolean _found; private int _id; private long _exp; private SearchState _state; - - public SearchMessageSelector(RouterInfo peer, long expiration, SearchState state) { - _peer = peer.getIdentity().getHash(); - _found = false; - _exp = expiration; - _state = state; - _id = ++__searchSelectorId; - if (_log.shouldLog(Log.DEBUG)) - _log.debug("[" + _id + "] Created: " + toString()); + + public SearchMessageSelector(RouterContext context, RouterInfo peer, long expiration, SearchState state) { + _context = context; + _log = context.logManager().getLog(SearchMessageSelector.class); + _peer = peer.getIdentity().getHash(); + _found = false; + _exp = expiration; + _state = state; + _id = ++__searchSelectorId; + if (_log.shouldLog(Log.DEBUG)) + _log.debug("[" + _id + "] Created: " + toString()); } - + public String toString() { return "Search selector [" + _id + "] looking for a reply from " + _peer + " with regards to " + _state.getTarget(); } - - public boolean continueMatching() { - if (_found) { - if (_log.shouldLog(Log.DEBUG)) - _log.debug("[" + _id + "] Dont continue matching! looking for a reply from " + _peer + " with regards to " + _state.getTarget()); - return false; - } - long now = Clock.getInstance().now(); - return now < _exp; + + public boolean continueMatching() { + if (_found) { + if (_log.shouldLog(Log.DEBUG)) + _log.debug("[" + _id + "] Dont continue matching! looking for a reply from " + _peer + " with regards to " + _state.getTarget()); + return false; + } + long now = _context.clock().now(); + return now < _exp; } public long getExpiration() { return _exp; } public boolean isMatch(I2NPMessage message) { - if (_log.shouldLog(Log.DEBUG)) - _log.debug("[" + _id + "] isMatch("+message.getClass().getName() + ") [want dbStore or dbSearchReply from " + _peer + " for " + _state.getTarget() + "]"); - if (message instanceof DatabaseStoreMessage) { - DatabaseStoreMessage msg = (DatabaseStoreMessage)message; - if (msg.getKey().equals(_state.getTarget())) { - if (_log.shouldLog(Log.DEBUG)) - _log.debug("[" + _id + "] Was a DBStore of the key we're looking for. May not have been from who we're checking against though, but DBStore doesn't include that info"); - _found = true; - return true; - } else { - if (_log.shouldLog(Log.DEBUG)) - _log.debug("[" + _id + "] DBStore of a key we're not looking for"); - return false; - } - } else if (message instanceof DatabaseSearchReplyMessage) { - DatabaseSearchReplyMessage msg = (DatabaseSearchReplyMessage)message; - if (_peer.equals(msg.getFromHash())) { - if (msg.getSearchKey().equals(_state.getTarget())) { - if (_log.shouldLog(Log.DEBUG)) - _log.debug("[" + _id + "] Was a DBSearchReply from who we're checking with for a key we're looking for"); - _found = true; - return true; - } else { - if (_log.shouldLog(Log.DEBUG)) - _log.debug("[" + _id + "] Was a DBSearchReply from who we're checking with but NOT for the key we're looking for"); - return false; - } - } else { - if (_log.shouldLog(Log.DEBUG)) - _log.debug("[" + _id + "] DBSearchReply from someone we are not checking with [" + msg.getFromHash() + ", not " + _state.getTarget() + "]"); - return false; - } - } else { - //_log.debug("Not a DbStore or DbSearchReply"); - return false; - } + if (_log.shouldLog(Log.DEBUG)) + _log.debug("[" + _id + "] isMatch("+message.getClass().getName() + ") [want dbStore or dbSearchReply from " + _peer + " for " + _state.getTarget() + "]"); + if (message instanceof DatabaseStoreMessage) { + DatabaseStoreMessage msg = (DatabaseStoreMessage)message; + if (msg.getKey().equals(_state.getTarget())) { + if (_log.shouldLog(Log.DEBUG)) + _log.debug("[" + _id + "] Was a DBStore of the key we're looking for. May not have been from who we're checking against though, but DBStore doesn't include that info"); + _found = true; + return true; + } else { + if (_log.shouldLog(Log.DEBUG)) + _log.debug("[" + _id + "] DBStore of a key we're not looking for"); + return false; + } + } else if (message instanceof DatabaseSearchReplyMessage) { + DatabaseSearchReplyMessage msg = (DatabaseSearchReplyMessage)message; + if (_peer.equals(msg.getFromHash())) { + if (msg.getSearchKey().equals(_state.getTarget())) { + if (_log.shouldLog(Log.DEBUG)) + _log.debug("[" + _id + "] Was a DBSearchReply from who we're checking with for a key we're looking for"); + _found = true; + return true; + } else { + if (_log.shouldLog(Log.DEBUG)) + _log.debug("[" + _id + "] Was a DBSearchReply from who we're checking with but NOT for the key we're looking for"); + return false; + } + } else { + if (_log.shouldLog(Log.DEBUG)) + _log.debug("[" + _id + "] DBSearchReply from someone we are not checking with [" + msg.getFromHash() + ", not " + _state.getTarget() + "]"); + return false; + } + } else { + //_log.debug("Not a DbStore or DbSearchReply"); + return false; + } } } diff --git a/router/java/src/net/i2p/router/networkdb/kademlia/SearchState.java b/router/java/src/net/i2p/router/networkdb/kademlia/SearchState.java index 8a221c2c1..fd3ad4774 100644 --- a/router/java/src/net/i2p/router/networkdb/kademlia/SearchState.java +++ b/router/java/src/net/i2p/router/networkdb/kademlia/SearchState.java @@ -9,12 +9,14 @@ import java.util.Set; import net.i2p.data.Hash; import net.i2p.util.Clock; +import net.i2p.router.RouterContext; /** * Data related to a particular search * */ class SearchState { + private RouterContext _context; private HashSet _pendingPeers; private HashMap _pendingPeerTimes; private HashSet _attemptedPeers; @@ -23,137 +25,138 @@ class SearchState { private Hash _searchKey; private volatile long _completed; private volatile long _started; - - public SearchState(Hash key) { - _searchKey = key; - _pendingPeers = new HashSet(16); - _attemptedPeers = new HashSet(16); - _failedPeers = new HashSet(16); - _successfulPeers = new HashSet(16); - _pendingPeerTimes = new HashMap(16); - _completed = -1; - _started = Clock.getInstance().now(); + + public SearchState(RouterContext context, Hash key) { + _context = context; + _searchKey = key; + _pendingPeers = new HashSet(16); + _attemptedPeers = new HashSet(16); + _failedPeers = new HashSet(16); + _successfulPeers = new HashSet(16); + _pendingPeerTimes = new HashMap(16); + _completed = -1; + _started = _context.clock().now(); } - + public Hash getTarget() { return _searchKey; } - public Set getPending() { - synchronized (_pendingPeers) { - return (Set)_pendingPeers.clone(); - } + public Set getPending() { + synchronized (_pendingPeers) { + return (Set)_pendingPeers.clone(); + } } - public Set getAttempted() { - synchronized (_attemptedPeers) { - return (Set)_attemptedPeers.clone(); - } + public Set getAttempted() { + synchronized (_attemptedPeers) { + return (Set)_attemptedPeers.clone(); + } } public boolean wasAttempted(Hash peer) { - synchronized (_attemptedPeers) { - return _attemptedPeers.contains(peer); - } + synchronized (_attemptedPeers) { + return _attemptedPeers.contains(peer); + } } - public Set getSuccessful() { - synchronized (_successfulPeers) { - return (Set)_successfulPeers.clone(); - } + public Set getSuccessful() { + synchronized (_successfulPeers) { + return (Set)_successfulPeers.clone(); + } } - public Set getFailed() { - synchronized (_failedPeers) { - return (Set)_failedPeers.clone(); - } + public Set getFailed() { + synchronized (_failedPeers) { + return (Set)_failedPeers.clone(); + } } public boolean completed() { return _completed != -1; } - public void complete(boolean completed) { - if (completed) - _completed = Clock.getInstance().now(); + public void complete(boolean completed) { + if (completed) + _completed = _context.clock().now(); } - + public long getWhenStarted() { return _started; } public long getWhenCompleted() { return _completed; } - + public void addPending(Collection pending) { - synchronized (_pendingPeers) { - _pendingPeers.addAll(pending); - for (Iterator iter = pending.iterator(); iter.hasNext(); ) - _pendingPeerTimes.put(iter.next(), new Long(Clock.getInstance().now())); - } - synchronized (_attemptedPeers) { - _attemptedPeers.addAll(pending); - } + synchronized (_pendingPeers) { + _pendingPeers.addAll(pending); + for (Iterator iter = pending.iterator(); iter.hasNext(); ) + _pendingPeerTimes.put(iter.next(), new Long(_context.clock().now())); + } + synchronized (_attemptedPeers) { + _attemptedPeers.addAll(pending); + } } - + /** how long did it take to get the reply, or -1 if we don't know */ public long dataFound(Hash peer) { - long rv = -1; - synchronized (_pendingPeers) { - _pendingPeers.remove(peer); - Long when = (Long)_pendingPeerTimes.remove(peer); - if (when != null) - rv = Clock.getInstance().now() - when.longValue(); - } - synchronized (_successfulPeers) { - _successfulPeers.add(peer); - } - return rv; + long rv = -1; + synchronized (_pendingPeers) { + _pendingPeers.remove(peer); + Long when = (Long)_pendingPeerTimes.remove(peer); + if (when != null) + rv = _context.clock().now() - when.longValue(); + } + synchronized (_successfulPeers) { + _successfulPeers.add(peer); + } + return rv; } - + /** how long did it take to get the reply, or -1 if we dont know */ public long replyFound(Hash peer) { - synchronized (_pendingPeers) { - _pendingPeers.remove(peer); - Long when = (Long)_pendingPeerTimes.remove(peer); - if (when != null) - return Clock.getInstance().now() - when.longValue(); - else - return -1; - } + synchronized (_pendingPeers) { + _pendingPeers.remove(peer); + Long when = (Long)_pendingPeerTimes.remove(peer); + if (when != null) + return _context.clock().now() - when.longValue(); + else + return -1; + } } - + public void replyTimeout(Hash peer) { - synchronized (_pendingPeers) { - _pendingPeers.remove(peer); - _pendingPeerTimes.remove(peer); - } - synchronized (_failedPeers) { - _failedPeers.add(peer); - } + synchronized (_pendingPeers) { + _pendingPeers.remove(peer); + _pendingPeerTimes.remove(peer); + } + synchronized (_failedPeers) { + _failedPeers.add(peer); + } } - - public String toString() { - StringBuffer buf = new StringBuffer(256); - buf.append("Searching for ").append(_searchKey); - buf.append(" "); - if (_completed <= 0) - buf.append(" completed? false "); - else - buf.append(" completed on ").append(new Date(_completed)); - buf.append(" Attempted: "); - synchronized (_attemptedPeers) { - for (Iterator iter = _attemptedPeers.iterator(); iter.hasNext(); ) { - Hash peer = (Hash)iter.next(); - buf.append(peer.toBase64()).append(" "); - } - } - buf.append(" Pending: "); - synchronized (_pendingPeers) { - for (Iterator iter = _pendingPeers.iterator(); iter.hasNext(); ) { - Hash peer = (Hash)iter.next(); - buf.append(peer.toBase64()).append(" "); - } - } - buf.append(" Failed: "); - synchronized (_failedPeers) { - for (Iterator iter = _failedPeers.iterator(); iter.hasNext(); ) { - Hash peer = (Hash)iter.next(); - buf.append(peer.toBase64()).append(" "); - } - } - buf.append(" Successful: "); - synchronized (_successfulPeers) { - for (Iterator iter = _successfulPeers.iterator(); iter.hasNext(); ) { - Hash peer = (Hash)iter.next(); - buf.append(peer.toBase64()).append(" "); - } - } - return buf.toString(); + + public String toString() { + StringBuffer buf = new StringBuffer(256); + buf.append("Searching for ").append(_searchKey); + buf.append(" "); + if (_completed <= 0) + buf.append(" completed? false "); + else + buf.append(" completed on ").append(new Date(_completed)); + buf.append(" Attempted: "); + synchronized (_attemptedPeers) { + for (Iterator iter = _attemptedPeers.iterator(); iter.hasNext(); ) { + Hash peer = (Hash)iter.next(); + buf.append(peer.toBase64()).append(" "); + } + } + buf.append(" Pending: "); + synchronized (_pendingPeers) { + for (Iterator iter = _pendingPeers.iterator(); iter.hasNext(); ) { + Hash peer = (Hash)iter.next(); + buf.append(peer.toBase64()).append(" "); + } + } + buf.append(" Failed: "); + synchronized (_failedPeers) { + for (Iterator iter = _failedPeers.iterator(); iter.hasNext(); ) { + Hash peer = (Hash)iter.next(); + buf.append(peer.toBase64()).append(" "); + } + } + buf.append(" Successful: "); + synchronized (_successfulPeers) { + for (Iterator iter = _successfulPeers.iterator(); iter.hasNext(); ) { + Hash peer = (Hash)iter.next(); + buf.append(peer.toBase64()).append(" "); + } + } + return buf.toString(); } } diff --git a/router/java/src/net/i2p/router/networkdb/kademlia/SearchUpdateReplyFoundJob.java b/router/java/src/net/i2p/router/networkdb/kademlia/SearchUpdateReplyFoundJob.java index a6fea7533..6317d78ed 100644 --- a/router/java/src/net/i2p/router/networkdb/kademlia/SearchUpdateReplyFoundJob.java +++ b/router/java/src/net/i2p/router/networkdb/kademlia/SearchUpdateReplyFoundJob.java @@ -11,58 +11,60 @@ import net.i2p.router.JobImpl; import net.i2p.router.ProfileManager; import net.i2p.router.ReplyJob; import net.i2p.util.Log; +import net.i2p.router.RouterContext; /** * Called after a match to a db search is found * */ class SearchUpdateReplyFoundJob extends JobImpl implements ReplyJob { - private final static Log _log = new Log(SearchUpdateReplyFoundJob.class); + private Log _log; private I2NPMessage _message; private Hash _peer; private SearchState _state; private KademliaNetworkDatabaseFacade _facade; private SearchJob _job; - - public SearchUpdateReplyFoundJob(RouterInfo peer, SearchState state, KademliaNetworkDatabaseFacade facade, SearchJob job) { - super(); - _peer = peer.getIdentity().getHash(); - _state = state; - _facade = facade; - _job = job; + + public SearchUpdateReplyFoundJob(RouterContext context, RouterInfo peer, SearchState state, KademliaNetworkDatabaseFacade facade, SearchJob job) { + super(context); + _log = context.logManager().getLog(SearchUpdateReplyFoundJob.class); + _peer = peer.getIdentity().getHash(); + _state = state; + _facade = facade; + _job = job; } - + public String getName() { return "Update Reply Found for Kademlia Search"; } public void runJob() { - if (_log.shouldLog(Log.INFO)) - _log.info(getJobId() + ": Reply from " + _peer + " with message " + _message.getClass().getName()); - - if (_message instanceof DatabaseStoreMessage) { - long timeToReply = _state.dataFound(_peer); - - DatabaseStoreMessage msg = (DatabaseStoreMessage)_message; - if (msg.getValueType() == DatabaseStoreMessage.KEY_TYPE_LEASESET) { - _facade.store(msg.getKey(), msg.getLeaseSet()); - } else if (msg.getValueType() == DatabaseStoreMessage.KEY_TYPE_ROUTERINFO) { - if (_log.shouldLog(Log.INFO)) - _log.info(getJobId() + ": dbStore received on search containing router " + msg.getKey() + " with publishDate of " + new Date(msg.getRouterInfo().getPublished())); - _facade.store(msg.getKey(), msg.getRouterInfo()); - } else { - if (_log.shouldLog(Log.ERROR)) - _log.error(getJobId() + ": Unknown db store type?!@ " + msg.getValueType()); - } - - ProfileManager.getInstance().dbLookupSuccessful(_peer, timeToReply); - } else if (_message instanceof DatabaseSearchReplyMessage) { - _job.replyFound((DatabaseSearchReplyMessage)_message, _peer); - } else { - if (_log.shouldLog(Log.ERROR)) - _log.error(getJobId() + ": WTF, reply job matched a strange message: " + _message); - return; - } - - _job.searchNext(); + if (_log.shouldLog(Log.INFO)) + _log.info(getJobId() + ": Reply from " + _peer + " with message " + _message.getClass().getName()); + + if (_message instanceof DatabaseStoreMessage) { + long timeToReply = _state.dataFound(_peer); + + DatabaseStoreMessage msg = (DatabaseStoreMessage)_message; + if (msg.getValueType() == DatabaseStoreMessage.KEY_TYPE_LEASESET) { + _facade.store(msg.getKey(), msg.getLeaseSet()); + } else if (msg.getValueType() == DatabaseStoreMessage.KEY_TYPE_ROUTERINFO) { + if (_log.shouldLog(Log.INFO)) + _log.info(getJobId() + ": dbStore received on search containing router " + msg.getKey() + " with publishDate of " + new Date(msg.getRouterInfo().getPublished())); + _facade.store(msg.getKey(), msg.getRouterInfo()); + } else { + if (_log.shouldLog(Log.ERROR)) + _log.error(getJobId() + ": Unknown db store type?!@ " + msg.getValueType()); + } + + _context.profileManager().dbLookupSuccessful(_peer, timeToReply); + } else if (_message instanceof DatabaseSearchReplyMessage) { + _job.replyFound((DatabaseSearchReplyMessage)_message, _peer); + } else { + if (_log.shouldLog(Log.ERROR)) + _log.error(getJobId() + ": WTF, reply job matched a strange message: " + _message); + return; + } + + _job.searchNext(); } - - public void setMessage(I2NPMessage message) { _message = message; } + + public void setMessage(I2NPMessage message) { _message = message; } } diff --git a/router/java/src/net/i2p/router/networkdb/kademlia/StartExplorersJob.java b/router/java/src/net/i2p/router/networkdb/kademlia/StartExplorersJob.java index 8b603f301..1a86c1e84 100644 --- a/router/java/src/net/i2p/router/networkdb/kademlia/StartExplorersJob.java +++ b/router/java/src/net/i2p/router/networkdb/kademlia/StartExplorersJob.java @@ -1,9 +1,9 @@ package net.i2p.router.networkdb.kademlia; /* * free (adj.): unencumbered; not under the control of others - * Written by jrandom in 2003 and released into the public domain - * with no warranty of any kind, either expressed or implied. - * It probably won't make your computer catch on fire, or eat + * Written by jrandom in 2003 and released into the public domain + * with no warranty of any kind, either expressed or implied. + * It probably won't make your computer catch on fire, or eat * your children, but it might. Use at your own risk. * */ @@ -16,50 +16,52 @@ import net.i2p.data.Hash; import net.i2p.router.JobImpl; import net.i2p.router.JobQueue; import net.i2p.util.Log; +import net.i2p.router.RouterContext; /** * Fire off search jobs for random keys from the explore pool, up to MAX_PER_RUN - * at a time. + * at a time. * */ class StartExplorersJob extends JobImpl { - private final static Log _log = new Log(StartExplorersJob.class); + private Log _log; private KademliaNetworkDatabaseFacade _facade; - + private final static long RERUN_DELAY_MS = 3*60*1000; // every 3 minutes, explore MAX_PER_RUN keys private final static int MAX_PER_RUN = 3; // don't explore more than 1 bucket at a time - public StartExplorersJob(KademliaNetworkDatabaseFacade facade) { - super(); - _facade = facade; + public StartExplorersJob(RouterContext context, KademliaNetworkDatabaseFacade facade) { + super(context); + _log = context.logManager().getLog(StartExplorersJob.class); + _facade = facade; } - + public String getName() { return "Start Explorers Job"; } - public void runJob() { - Set toExplore = selectKeysToExplore(); - _log.debug("Keys to explore during this run: " + toExplore); - _facade.removeFromExploreKeys(toExplore); - for (Iterator iter = toExplore.iterator(); iter.hasNext(); ) { - Hash key = (Hash)iter.next(); - //_log.info("Starting explorer for " + key, new Exception("Exploring!")); - JobQueue.getInstance().addJob(new ExploreJob(_facade, key)); - } - requeue(RERUN_DELAY_MS); + public void runJob() { + Set toExplore = selectKeysToExplore(); + _log.debug("Keys to explore during this run: " + toExplore); + _facade.removeFromExploreKeys(toExplore); + for (Iterator iter = toExplore.iterator(); iter.hasNext(); ) { + Hash key = (Hash)iter.next(); + //_log.info("Starting explorer for " + key, new Exception("Exploring!")); + _context.jobQueue().addJob(new ExploreJob(_context, _facade, key)); + } + requeue(RERUN_DELAY_MS); } - + /** * Run through the explore pool and pick out some values * */ private Set selectKeysToExplore() { - Set queued = _facade.getExploreKeys(); - if (queued.size() <= MAX_PER_RUN) - return queued; - Set rv = new HashSet(MAX_PER_RUN); - for (Iterator iter = queued.iterator(); iter.hasNext(); ) { - if (rv.size() >= MAX_PER_RUN) break; - rv.add(iter.next()); - } - return rv; + Set queued = _facade.getExploreKeys(); + if (queued.size() <= MAX_PER_RUN) + return queued; + Set rv = new HashSet(MAX_PER_RUN); + for (Iterator iter = queued.iterator(); iter.hasNext(); ) { + if (rv.size() >= MAX_PER_RUN) break; + rv.add(iter.next()); + } + return rv; } } diff --git a/router/java/src/net/i2p/router/networkdb/kademlia/StoreJob.java b/router/java/src/net/i2p/router/networkdb/kademlia/StoreJob.java index 83b9d2d47..5dce76e07 100644 --- a/router/java/src/net/i2p/router/networkdb/kademlia/StoreJob.java +++ b/router/java/src/net/i2p/router/networkdb/kademlia/StoreJob.java @@ -49,15 +49,17 @@ import net.i2p.util.Clock; import net.i2p.util.Log; import net.i2p.util.RandomSource; import net.i2p.stat.StatManager; +import net.i2p.router.RouterContext; class StoreJob extends JobImpl { - private final Log _log = new Log(StoreJob.class); + private Log _log; private KademliaNetworkDatabaseFacade _facade; private StoreState _state; private Job _onSuccess; private Job _onFailure; private long _timeoutMs; private long _expiration; + private PeerSelector _peerSelector; private final static int PARALLELIZATION = 1; // how many sent at a time private final static int REDUNDANCY = 2; // we want the data sent to 2 peers @@ -72,22 +74,23 @@ class StoreJob extends JobImpl { */ private final static int EXPLORATORY_REDUNDANCY = 1; private final static int STORE_PRIORITY = 100; - - static { - StatManager.getInstance().createRateStat("netDb.storeSent", "How many netDb store messages have we sent?", "Network Database", new long[] { 5*60*1000l, 60*60*1000l, 24*60*60*1000l }); - } - + /** * Create a new search for the routingKey specified * */ - public StoreJob(KademliaNetworkDatabaseFacade facade, Hash key, DataStructure data, Job onSuccess, Job onFailure, long timeoutMs) { + public StoreJob(RouterContext context, KademliaNetworkDatabaseFacade facade, Hash key, + DataStructure data, Job onSuccess, Job onFailure, long timeoutMs) { + super(context); + _log = context.logManager().getLog(StoreJob.class); + _context.statManager().createRateStat("netDb.storeSent", "How many netDb store messages have we sent?", "Network Database", new long[] { 5*60*1000l, 60*60*1000l, 24*60*60*1000l }); _facade = facade; _state = new StoreState(key, data); _onSuccess = onSuccess; _onFailure = onFailure; _timeoutMs = timeoutMs; - _expiration = Clock.getInstance().now() + timeoutMs; + _expiration = context.clock().now() + timeoutMs; + _peerSelector = new PeerSelector(context); } public String getName() { return "Kademlia NetDb Store";} @@ -96,7 +99,7 @@ class StoreJob extends JobImpl { } protected boolean isExpired() { - return Clock.getInstance().now() >= _expiration; + return _context.clock().now() >= _expiration; } /** @@ -169,10 +172,11 @@ class StoreJob extends JobImpl { * @return ordered list of Hash objects */ protected List getClosestRouters(Hash key, int numClosest, Set alreadyChecked) { - Hash rkey = RoutingKeyGenerator.getInstance().getRoutingKey(key); + Hash rkey = _context.routingKeyGenerator().getRoutingKey(key); if (_log.shouldLog(Log.DEBUG)) _log.debug("Current routing key for " + key + ": " + rkey); - return PeerSelector.getInstance().selectNearestExplicit(rkey, numClosest, alreadyChecked, _facade.getKBuckets()); + + return _peerSelector.selectNearestExplicit(rkey, numClosest, alreadyChecked, _facade.getKBuckets()); } /** @@ -181,7 +185,7 @@ class StoreJob extends JobImpl { * */ protected void sendStore(RouterInfo router) { - DatabaseStoreMessage msg = new DatabaseStoreMessage(); + DatabaseStoreMessage msg = new DatabaseStoreMessage(_context); msg.setKey(_state.getTarget()); if (_state.getData() instanceof RouterInfo) msg.setRouterInfo((RouterInfo)_state.getData()); @@ -189,9 +193,9 @@ class StoreJob extends JobImpl { msg.setLeaseSet((LeaseSet)_state.getData()); else throw new IllegalArgumentException("Storing an unknown data type! " + _state.getData()); - msg.setMessageExpiration(new Date(Clock.getInstance().now() + _timeoutMs)); + msg.setMessageExpiration(new Date(_context.clock().now() + _timeoutMs)); - if (router.getIdentity().equals(Router.getInstance().getRouterInfo().getIdentity())) { + if (router.getIdentity().equals(_context.router().getRouterInfo().getIdentity())) { // don't send it to ourselves if (_log.shouldLog(Log.ERROR)) _log.error("Dont send store to ourselves - why did we try?"); @@ -214,7 +218,7 @@ class StoreJob extends JobImpl { TunnelInfo info = null; TunnelId outboundTunnelId = selectOutboundTunnel(); if (outboundTunnelId != null) - info = TunnelManagerFacade.getInstance().getTunnelInfo(outboundTunnelId); + info = _context.tunnelManager().getTunnelInfo(outboundTunnelId); if (info == null) { if (_log.shouldLog(Log.ERROR)) _log.error("selectOutboundTunnel didn't find a valid tunnel! outboundTunnelId = " @@ -226,11 +230,11 @@ class StoreJob extends JobImpl { + " is going to " + peer.getIdentity().getHash() + " via outbound tunnel: " + info); // send it out our outboundTunnelId with instructions for our endpoint to forward it // to the router specified (though no particular tunnelId on the target) - Job j = new SendTunnelMessageJob(msg, outboundTunnelId, peer.getIdentity().getHash(), - null, sent, null, fail, null, _expiration-Clock.getInstance().now(), + Job j = new SendTunnelMessageJob(_context, msg, outboundTunnelId, peer.getIdentity().getHash(), + null, sent, null, fail, null, _expiration-_context.clock().now(), STORE_PRIORITY); - JobQueue.getInstance().addJob(j); - StatManager.getInstance().addRateData("netDb.storeSent", 1, 0); + _context.jobQueue().addJob(j); + _context.statManager().addRateData("netDb.storeSent", 1, 0); } private TunnelId selectOutboundTunnel() { @@ -240,7 +244,7 @@ class StoreJob extends JobImpl { criteria.setReliabilityPriority(20); criteria.setMaximumTunnelsRequired(1); criteria.setMinimumTunnelsRequired(1); - List tunnelIds = TunnelManagerFacade.getInstance().selectOutboundTunnelIds(criteria); + List tunnelIds = _context.tunnelManager().selectOutboundTunnelIds(criteria); if (tunnelIds.size() <= 0) { _log.error("No outbound tunnels?!"); return null; @@ -263,7 +267,7 @@ class StoreJob extends JobImpl { private Hash _peer; public OptimisticSendSuccess(RouterInfo peer) { - super(); + super(StoreJob.this._context); _peer = peer.getIdentity().getHash(); } @@ -291,12 +295,12 @@ class StoreJob extends JobImpl { protected class FailedJob extends JobImpl { private Hash _peer; public FailedJob(RouterInfo peer) { - super(); + super(StoreJob.this._context); _peer = peer.getIdentity().getHash(); } public void runJob() { _state.replyTimeout(_peer); - ProfileManager.getInstance().dbStoreFailed(_peer); + _context.profileManager().dbStoreFailed(_peer); sendNext(); } public String getName() { return "Kademlia Store Failed"; } @@ -352,7 +356,7 @@ class StoreJob extends JobImpl { if (_log.shouldLog(Log.DEBUG)) _log.debug("State of successful send: " + _state); if (_onSuccess != null) - JobQueue.getInstance().addJob(_onSuccess); + _context.jobQueue().addJob(_onSuccess); _facade.noteKeySent(_state.getTarget()); } @@ -365,10 +369,10 @@ class StoreJob extends JobImpl { if (_log.shouldLog(Log.DEBUG)) _log.debug("State of failed send: " + _state, new Exception("Who failed me?")); if (_onFailure != null) - JobQueue.getInstance().addJob(_onFailure); + _context.jobQueue().addJob(_onFailure); } - protected static class StoreState { + protected class StoreState { private Hash _key; private DataStructure _data; private HashSet _pendingPeers; @@ -390,7 +394,7 @@ class StoreJob extends JobImpl { _successfulPeers = new HashSet(16); _successfulExploratoryPeers = new HashSet(16); _completed = -1; - _started = Clock.getInstance().now(); + _started = _context.clock().now(); } public Hash getTarget() { return _key; } @@ -423,7 +427,7 @@ class StoreJob extends JobImpl { public boolean completed() { return _completed != -1; } public void complete(boolean completed) { if (completed) - _completed = Clock.getInstance().now(); + _completed = _context.clock().now(); } public long getWhenStarted() { return _started; } @@ -433,7 +437,7 @@ class StoreJob extends JobImpl { synchronized (_pendingPeers) { _pendingPeers.addAll(pending); for (Iterator iter = pending.iterator(); iter.hasNext(); ) - _pendingPeerTimes.put(iter.next(), new Long(Clock.getInstance().now())); + _pendingPeerTimes.put(iter.next(), new Long(_context.clock().now())); } synchronized (_attemptedPeers) { _attemptedPeers.addAll(pending); @@ -446,7 +450,7 @@ class StoreJob extends JobImpl { _pendingPeers.remove(peer); Long when = (Long)_pendingPeerTimes.remove(peer); if (when != null) - rv = Clock.getInstance().now() - when.longValue(); + rv = _context.clock().now() - when.longValue(); } synchronized (_successfulPeers) { _successfulPeers.add(peer); @@ -460,7 +464,7 @@ class StoreJob extends JobImpl { _pendingPeers.remove(peer); Long when = (Long)_pendingPeerTimes.remove(peer); if (when != null) - rv = Clock.getInstance().now() - when.longValue(); + rv = _context.clock().now() - when.longValue(); } synchronized (_successfulExploratoryPeers) { _successfulExploratoryPeers.add(peer); diff --git a/router/java/src/net/i2p/router/networkdb/kademlia/TransientDataStore.java b/router/java/src/net/i2p/router/networkdb/kademlia/TransientDataStore.java index 4f082ac31..af7250a26 100644 --- a/router/java/src/net/i2p/router/networkdb/kademlia/TransientDataStore.java +++ b/router/java/src/net/i2p/router/networkdb/kademlia/TransientDataStore.java @@ -1,9 +1,9 @@ package net.i2p.router.networkdb.kademlia; /* * free (adj.): unencumbered; not under the control of others - * Written by jrandom in 2003 and released into the public domain - * with no warranty of any kind, either expressed or implied. - * It probably won't make your computer catch on fire, or eat + * Written by jrandom in 2003 and released into the public domain + * with no warranty of any kind, either expressed or implied. + * It probably won't make your computer catch on fire, or eat * your children, but it might. Use at your own risk. * */ @@ -23,33 +23,37 @@ import net.i2p.data.RouterInfo; import net.i2p.router.ProfileManager; import net.i2p.util.Clock; import net.i2p.util.Log; +import net.i2p.router.RouterContext; class TransientDataStore implements DataStore { - private final static Log _log = new Log(TransientDataStore.class); + private Log _log; private Map _data; // hash --> DataStructure + protected RouterContext _context; - public TransientDataStore() { - _data = new HashMap(1024); - if (_log.shouldLog(Log.INFO)) - _log.info("Data Store initialized"); + public TransientDataStore(RouterContext ctx) { + _context = ctx; + _log = ctx.logManager().getLog(TransientDataStore.class); + _data = new HashMap(1024); + if (_log.shouldLog(Log.INFO)) + _log.info("Data Store initialized"); } public Set getKeys() { - synchronized (_data) { - return new HashSet(_data.keySet()); - } + synchronized (_data) { + return new HashSet(_data.keySet()); + } } - public DataStructure get(Hash key) { - synchronized (_data) { - return (DataStructure)_data.get(key); - } + public DataStructure get(Hash key) { + synchronized (_data) { + return (DataStructure)_data.get(key); + } } public boolean isKnown(Hash key) { - synchronized (_data) { - return _data.containsKey(key); - } + synchronized (_data) { + return _data.containsKey(key); + } } /** nothing published more than 5 minutes in the future */ @@ -58,95 +62,95 @@ class TransientDataStore implements DataStore { private final static long MAX_FUTURE_EXPIRATION_DATE = 3*60*60*1000; public void put(Hash key, DataStructure data) { - if (data == null) return; - _log.debug("Storing key " + key); - Object old = null; - synchronized (_data) { - old = _data.put(key, data); - } - if (data instanceof RouterInfo) { - ProfileManager.getInstance().heardAbout(key); - RouterInfo ri = (RouterInfo)data; - if (old != null) { - RouterInfo ori = (RouterInfo)old; - if (ri.getPublished() < ori.getPublished()) { - if (_log.shouldLog(Log.INFO)) - _log.info("Almost clobbered an old router! " + key + ": [old published on " + new Date(ori.getPublished()) + " new on " + new Date(ri.getPublished()) + "]"); - if (_log.shouldLog(Log.DEBUG)) - _log.debug("Number of router options for " + key + ": " + ri.getOptions().size() + " (old one had: " + ori.getOptions().size() + ")", new Exception("Updated routerInfo")); - synchronized (_data) { - _data.put(key, old); - } - } else if (ri.getPublished() > Clock.getInstance().now() + MAX_FUTURE_PUBLISH_DATE) { - if (_log.shouldLog(Log.INFO)) - _log.info("Hmm, someone tried to give us something with the publication date really far in the future (" + new Date(ri.getPublished()) + "), dropping it"); - if (_log.shouldLog(Log.DEBUG)) - _log.debug("Number of router options for " + key + ": " + ri.getOptions().size() + " (old one had: " + ori.getOptions().size() + ")", new Exception("Updated routerInfo")); - synchronized (_data) { - _data.put(key, old); - } - } else { - if (_log.shouldLog(Log.INFO)) - _log.info("Updated the old router for " + key + ": [old published on " + new Date(ori.getPublished()) + " new on " + new Date(ri.getPublished()) + "]"); - if (_log.shouldLog(Log.DEBUG)) - _log.debug("Number of router options for " + key + ": " + ri.getOptions().size() + " (old one had: " + ori.getOptions().size() + ")", new Exception("Updated routerInfo")); - } - } else { - if (_log.shouldLog(Log.INFO)) - _log.info("Brand new router for " + key + ": published on " + new Date(ri.getPublished())); - if (_log.shouldLog(Log.DEBUG)) - _log.debug("Number of router options for " + key + ": " + ri.getOptions().size(), new Exception("Updated routerInfo")); - } - } else if (data instanceof LeaseSet) { - LeaseSet ls = (LeaseSet)data; - if (old != null) { - LeaseSet ols = (LeaseSet)old; - if (ls.getEarliestLeaseDate() < ols.getEarliestLeaseDate()) { - if (_log.shouldLog(Log.INFO)) - _log.info("Almost clobbered an old leaseSet! " + key + ": [old published on " + new Date(ols.getEarliestLeaseDate()) + " new on " + new Date(ls.getEarliestLeaseDate()) + "]"); - synchronized (_data) { - _data.put(key, old); - } - } else if (ls.getEarliestLeaseDate() > Clock.getInstance().now() + MAX_FUTURE_EXPIRATION_DATE) { - if (_log.shouldLog(Log.INFO)) - _log.info("Hmm, someone tried to give us something with the expiration date really far in the future (" + new Date(ls.getEarliestLeaseDate()) + "), dropping it"); - synchronized (_data) { - _data.put(key, old); - } - } - } - } + if (data == null) return; + _log.debug("Storing key " + key); + Object old = null; + synchronized (_data) { + old = _data.put(key, data); + } + if (data instanceof RouterInfo) { + _context.profileManager().heardAbout(key); + RouterInfo ri = (RouterInfo)data; + if (old != null) { + RouterInfo ori = (RouterInfo)old; + if (ri.getPublished() < ori.getPublished()) { + if (_log.shouldLog(Log.INFO)) + _log.info("Almost clobbered an old router! " + key + ": [old published on " + new Date(ori.getPublished()) + " new on " + new Date(ri.getPublished()) + "]"); + if (_log.shouldLog(Log.DEBUG)) + _log.debug("Number of router options for " + key + ": " + ri.getOptions().size() + " (old one had: " + ori.getOptions().size() + ")", new Exception("Updated routerInfo")); + synchronized (_data) { + _data.put(key, old); + } + } else if (ri.getPublished() > _context.clock().now() + MAX_FUTURE_PUBLISH_DATE) { + if (_log.shouldLog(Log.INFO)) + _log.info("Hmm, someone tried to give us something with the publication date really far in the future (" + new Date(ri.getPublished()) + "), dropping it"); + if (_log.shouldLog(Log.DEBUG)) + _log.debug("Number of router options for " + key + ": " + ri.getOptions().size() + " (old one had: " + ori.getOptions().size() + ")", new Exception("Updated routerInfo")); + synchronized (_data) { + _data.put(key, old); + } + } else { + if (_log.shouldLog(Log.INFO)) + _log.info("Updated the old router for " + key + ": [old published on " + new Date(ori.getPublished()) + " new on " + new Date(ri.getPublished()) + "]"); + if (_log.shouldLog(Log.DEBUG)) + _log.debug("Number of router options for " + key + ": " + ri.getOptions().size() + " (old one had: " + ori.getOptions().size() + ")", new Exception("Updated routerInfo")); + } + } else { + if (_log.shouldLog(Log.INFO)) + _log.info("Brand new router for " + key + ": published on " + new Date(ri.getPublished())); + if (_log.shouldLog(Log.DEBUG)) + _log.debug("Number of router options for " + key + ": " + ri.getOptions().size(), new Exception("Updated routerInfo")); + } + } else if (data instanceof LeaseSet) { + LeaseSet ls = (LeaseSet)data; + if (old != null) { + LeaseSet ols = (LeaseSet)old; + if (ls.getEarliestLeaseDate() < ols.getEarliestLeaseDate()) { + if (_log.shouldLog(Log.INFO)) + _log.info("Almost clobbered an old leaseSet! " + key + ": [old published on " + new Date(ols.getEarliestLeaseDate()) + " new on " + new Date(ls.getEarliestLeaseDate()) + "]"); + synchronized (_data) { + _data.put(key, old); + } + } else if (ls.getEarliestLeaseDate() > _context.clock().now() + MAX_FUTURE_EXPIRATION_DATE) { + if (_log.shouldLog(Log.INFO)) + _log.info("Hmm, someone tried to give us something with the expiration date really far in the future (" + new Date(ls.getEarliestLeaseDate()) + "), dropping it"); + synchronized (_data) { + _data.put(key, old); + } + } + } + } } - public int hashCode() { - return DataHelper.hashCode(_data); + public int hashCode() { + return DataHelper.hashCode(_data); } public boolean equals(Object obj) { - if ( (obj == null) || (obj.getClass() != getClass()) ) return false; - TransientDataStore ds = (TransientDataStore)obj; - return DataHelper.eq(ds._data, _data); + if ( (obj == null) || (obj.getClass() != getClass()) ) return false; + TransientDataStore ds = (TransientDataStore)obj; + return DataHelper.eq(ds._data, _data); } public String toString() { - StringBuffer buf = new StringBuffer(); - buf.append("Transient DataStore: ").append(_data.size()).append("\nKeys: "); - Map data = new HashMap(); - synchronized (_data) { - data.putAll(_data); - } - for (Iterator iter = data.keySet().iterator(); iter.hasNext();) { - Hash key = (Hash)iter.next(); - DataStructure dp = (DataStructure)data.get(key); - buf.append("\n\t*Key: ").append(key.toString()).append("\n\tContent: ").append(dp.toString()); - } - buf.append("\n"); - return buf.toString(); + StringBuffer buf = new StringBuffer(); + buf.append("Transient DataStore: ").append(_data.size()).append("\nKeys: "); + Map data = new HashMap(); + synchronized (_data) { + data.putAll(_data); + } + for (Iterator iter = data.keySet().iterator(); iter.hasNext();) { + Hash key = (Hash)iter.next(); + DataStructure dp = (DataStructure)data.get(key); + buf.append("\n\t*Key: ").append(key.toString()).append("\n\tContent: ").append(dp.toString()); + } + buf.append("\n"); + return buf.toString(); } public DataStructure remove(Hash key) { - synchronized (_data) { - if (_log.shouldLog(Log.DEBUG)) - _log.debug("Removing key " + key.toBase64()); - return (DataStructure)_data.remove(key); - } + synchronized (_data) { + if (_log.shouldLog(Log.DEBUG)) + _log.debug("Removing key " + key.toBase64()); + return (DataStructure)_data.remove(key); + } } } diff --git a/router/java/src/net/i2p/router/peermanager/Calculator.java b/router/java/src/net/i2p/router/peermanager/Calculator.java index 8796d4688..99fb99fd8 100644 --- a/router/java/src/net/i2p/router/peermanager/Calculator.java +++ b/router/java/src/net/i2p/router/peermanager/Calculator.java @@ -6,17 +6,7 @@ package net.i2p.router.peermanager; * as to coordinate via statics the four known aspects. * */ -class Calculator { - private static Calculator _isFailingCalc = new IsFailingCalculator(); - private static Calculator _integrationCalc = new IntegrationCalculator(); - private static Calculator _speedCalc = new SpeedCalculator(); - private static Calculator _reliabilityCalc = new ReliabilityCalculator(); - - public static Calculator getIsFailingCalculator() { return _isFailingCalc; } - public static Calculator getIntegrationCalculator() { return _integrationCalc; } - public static Calculator getSpeedCalculator() { return _speedCalc; } - public static Calculator getReliabilityCalculator() { return _reliabilityCalc; } - +public class Calculator { /** * Evaluate the profile according to the current metric */ @@ -24,5 +14,5 @@ class Calculator { /** * Evaluate the profile according to the current metric */ - public boolean calcBoolean(PeerProfile profile) { return true; } + public boolean calcBoolean(PeerProfile profile) { return false; } } diff --git a/router/java/src/net/i2p/router/peermanager/DBHistory.java b/router/java/src/net/i2p/router/peermanager/DBHistory.java index 2a41eebb2..d3d1cbda5 100644 --- a/router/java/src/net/i2p/router/peermanager/DBHistory.java +++ b/router/java/src/net/i2p/router/peermanager/DBHistory.java @@ -5,7 +5,7 @@ import java.io.OutputStream; import java.util.Properties; import net.i2p.stat.RateStat; -import net.i2p.util.Clock; +import net.i2p.router.RouterContext; import net.i2p.util.Log; /** @@ -13,7 +13,8 @@ import net.i2p.util.Log; * */ public class DBHistory { - private static final Log _log = new Log(DBHistory.class); + private Log _log; + private RouterContext _context; private long _successfulLookups; private long _failedLookups; private RateStat _failedLookupRate; @@ -26,23 +27,25 @@ public class DBHistory { private long _lastLookupReceived; private long _unpromptedDbStoreNew; private long _unpromptedDbStoreOld; - - public DBHistory() { - _successfulLookups = 0; - _failedLookups = 0; - _failedLookupRate = null; - _lookupReplyNew = 0; - _lookupReplyOld = 0; - _lookupReplyDuplicate = 0; - _lookupReplyInvalid = 0; - _lookupsReceived = 0; - _avgDelayBetweenLookupsReceived = 0; - _lastLookupReceived = -1; - _unpromptedDbStoreNew = 0; - _unpromptedDbStoreOld = 0; - createRates(); + + public DBHistory(RouterContext context) { + _context = context; + _log = context.logManager().getLog(DBHistory.class); + _successfulLookups = 0; + _failedLookups = 0; + _failedLookupRate = null; + _lookupReplyNew = 0; + _lookupReplyOld = 0; + _lookupReplyDuplicate = 0; + _lookupReplyInvalid = 0; + _lookupsReceived = 0; + _avgDelayBetweenLookupsReceived = 0; + _lastLookupReceived = -1; + _unpromptedDbStoreNew = 0; + _unpromptedDbStoreOld = 0; + createRates(); } - + /** how many times we have sent them a db lookup and received the value back from them */ public long getSuccessfulLookups() { return _successfulLookups; } /** how many times we have sent them a db lookup and not received the value or a lookup reply */ @@ -65,26 +68,26 @@ public class DBHistory { public long getUnpromptedDbStoreNew() { return _unpromptedDbStoreNew; } /** how many times have they sent us data we didn't ask for but that we have seen? */ public long getUnpromptedDbStoreOld() { return _unpromptedDbStoreOld; } - /** + /** * how often does the peer fail to reply to a lookup request, broken into 1 hour and 1 day periods. * */ public RateStat getFailedLookupRate() { return _failedLookupRate; } - + /** * Note that the peer was not only able to respond to the lookup, but sent us * the data we wanted! * */ public void lookupSuccessful() { - _successfulLookups++; + _successfulLookups++; } /** * Note that the peer failed to respond to the db lookup in any way */ public void lookupFailed() { - _failedLookups++; - _failedLookupRate.addData(1, 0); + _failedLookups++; + _failedLookupRate.addData(1, 0); } /** * Receive a lookup reply from the peer, where they gave us the specified info @@ -96,40 +99,40 @@ public class DBHistory { * themselves if they don't know anyone else) */ public void lookupReply(int newPeers, int oldPeers, int invalid, int duplicate) { - _lookupReplyNew += newPeers; - _lookupReplyOld += oldPeers; - _lookupReplyInvalid += invalid; - _lookupReplyDuplicate += duplicate; + _lookupReplyNew += newPeers; + _lookupReplyOld += oldPeers; + _lookupReplyInvalid += invalid; + _lookupReplyDuplicate += duplicate; } /** * Note that the peer sent us a lookup * */ public void lookupReceived() { - long now = Clock.getInstance().now(); - long delay = now - _lastLookupReceived; - _lastLookupReceived = now; - _lookupsReceived++; - if (_avgDelayBetweenLookupsReceived <= 0) { - _avgDelayBetweenLookupsReceived = delay; - } else { - if (delay > _avgDelayBetweenLookupsReceived) - _avgDelayBetweenLookupsReceived = _avgDelayBetweenLookupsReceived + (delay / _lookupsReceived); - else - _avgDelayBetweenLookupsReceived = _avgDelayBetweenLookupsReceived - (delay / _lookupsReceived); - } + long now = _context.clock().now(); + long delay = now - _lastLookupReceived; + _lastLookupReceived = now; + _lookupsReceived++; + if (_avgDelayBetweenLookupsReceived <= 0) { + _avgDelayBetweenLookupsReceived = delay; + } else { + if (delay > _avgDelayBetweenLookupsReceived) + _avgDelayBetweenLookupsReceived = _avgDelayBetweenLookupsReceived + (delay / _lookupsReceived); + else + _avgDelayBetweenLookupsReceived = _avgDelayBetweenLookupsReceived - (delay / _lookupsReceived); + } } /** * Note that the peer sent us a data point without us asking for it * @param wasNew whether we already knew about this data point or not */ public void unpromptedStoreReceived(boolean wasNew) { - if (wasNew) - _unpromptedDbStoreNew++; - else - _unpromptedDbStoreOld++; + if (wasNew) + _unpromptedDbStoreNew++; + else + _unpromptedDbStoreOld++; } - + public void setSuccessfulLookups(long num) { _successfulLookups = num; } public void setFailedLookups(long num) { _failedLookups = num; } public void setLookupReplyNew(long num) { _lookupReplyNew = num; } @@ -142,75 +145,75 @@ public class DBHistory { public void setUnpromptedDbStoreNew(long num) { _unpromptedDbStoreNew = num; } public void setUnpromptedDbStoreOld(long num) { _unpromptedDbStoreOld = num; } - public void coallesceStats() { - _log.debug("Coallescing stats"); - _failedLookupRate.coallesceStats(); + public void coallesceStats() { + _log.debug("Coallescing stats"); + _failedLookupRate.coallesceStats(); } private final static String NL = System.getProperty("line.separator"); - + public void store(OutputStream out) throws IOException { - StringBuffer buf = new StringBuffer(512); - buf.append(NL); - buf.append("#################").append(NL); - buf.append("# DB history").append(NL); - buf.append("###").append(NL); - add(buf, "successfulLookups", _successfulLookups, "How many times have they successfully given us what we wanted when looking for it?"); - add(buf, "failedLookups", _failedLookups, "How many times have we sent them a db lookup and they didn't reply?"); - add(buf, "lookupsReceived", _lookupsReceived, "How many lookups have they sent us?"); - add(buf, "lookupReplyDuplicate", _lookupReplyDuplicate, "How many of their reply values to our lookups were something we asked them not to send us?"); - add(buf, "lookupReplyInvalid", _lookupReplyInvalid, "How many of their reply values to our lookups were invalid (expired, forged, corrupted)?"); - add(buf, "lookupReplyNew", _lookupReplyNew, "How many of their reply values to our lookups were brand new to us?"); - add(buf, "lookupReplyOld", _lookupReplyOld, "How many of their reply values to our lookups were something we had seen before?"); - add(buf, "unpromptedDbStoreNew", _unpromptedDbStoreNew, "How times have they sent us something we didn't ask for and hadn't seen before?"); - add(buf, "unpromptedDbStoreOld", _unpromptedDbStoreOld, "How times have they sent us something we didn't ask for but have seen before?"); - add(buf, "lastLookupReceived", _lastLookupReceived, "When was the last time they send us a lookup? (milliseconds since the epoch)"); - add(buf, "avgDelayBetweenLookupsReceived", _avgDelayBetweenLookupsReceived, "How long is it typically between each db lookup they send us? (in milliseconds)"); - out.write(buf.toString().getBytes()); - _failedLookupRate.store(out, "dbHistory.failedLookupRate"); - _log.debug("Writing out dbHistory.failedLookupRate"); + StringBuffer buf = new StringBuffer(512); + buf.append(NL); + buf.append("#################").append(NL); + buf.append("# DB history").append(NL); + buf.append("###").append(NL); + add(buf, "successfulLookups", _successfulLookups, "How many times have they successfully given us what we wanted when looking for it?"); + add(buf, "failedLookups", _failedLookups, "How many times have we sent them a db lookup and they didn't reply?"); + add(buf, "lookupsReceived", _lookupsReceived, "How many lookups have they sent us?"); + add(buf, "lookupReplyDuplicate", _lookupReplyDuplicate, "How many of their reply values to our lookups were something we asked them not to send us?"); + add(buf, "lookupReplyInvalid", _lookupReplyInvalid, "How many of their reply values to our lookups were invalid (expired, forged, corrupted)?"); + add(buf, "lookupReplyNew", _lookupReplyNew, "How many of their reply values to our lookups were brand new to us?"); + add(buf, "lookupReplyOld", _lookupReplyOld, "How many of their reply values to our lookups were something we had seen before?"); + add(buf, "unpromptedDbStoreNew", _unpromptedDbStoreNew, "How times have they sent us something we didn't ask for and hadn't seen before?"); + add(buf, "unpromptedDbStoreOld", _unpromptedDbStoreOld, "How times have they sent us something we didn't ask for but have seen before?"); + add(buf, "lastLookupReceived", _lastLookupReceived, "When was the last time they send us a lookup? (milliseconds since the epoch)"); + add(buf, "avgDelayBetweenLookupsReceived", _avgDelayBetweenLookupsReceived, "How long is it typically between each db lookup they send us? (in milliseconds)"); + out.write(buf.toString().getBytes()); + _failedLookupRate.store(out, "dbHistory.failedLookupRate"); + _log.debug("Writing out dbHistory.failedLookupRate"); } - + private void add(StringBuffer buf, String name, long val, String description) { - buf.append("# ").append(name.toUpperCase()).append(NL).append("# ").append(description).append(NL); - buf.append("dbHistory.").append(name).append('=').append(val).append(NL).append(NL); + buf.append("# ").append(name.toUpperCase()).append(NL).append("# ").append(description).append(NL); + buf.append("dbHistory.").append(name).append('=').append(val).append(NL).append(NL); } - + public void load(Properties props) { - _successfulLookups = getLong(props, "dbHistory.successfulLookups"); - _failedLookups = getLong(props, "dbHistory.failedLookups"); - _lookupsReceived = getLong(props, "dbHistory.lookupsReceived"); - _lookupReplyDuplicate = getLong(props, "dbHistory.lookupReplyDuplicate"); - _lookupReplyInvalid = getLong(props, "dbHistory.lookupReplyInvalid"); - _lookupReplyNew = getLong(props, "dbHistory.lookupReplyNew"); - _lookupReplyOld = getLong(props, "dbHistory.lookupReplyOld"); - _unpromptedDbStoreNew = getLong(props, "dbHistory.unpromptedDbStoreNew"); - _unpromptedDbStoreOld = getLong(props, "dbHistory.unpromptedDbStoreOld"); - _lastLookupReceived = getLong(props, "dbHistory.lastLookupReceived"); - _avgDelayBetweenLookupsReceived = getLong(props, "dbHistory.avgDelayBetweenLookupsReceived"); - try { - _failedLookupRate.load(props, "dbHistory.failedLookupRate", true); - _log.debug("Loading dbHistory.failedLookupRate"); - } catch (IllegalArgumentException iae) { - _log.warn("DB History failed lookup rate is corrupt, resetting", iae); - createRates(); - } + _successfulLookups = getLong(props, "dbHistory.successfulLookups"); + _failedLookups = getLong(props, "dbHistory.failedLookups"); + _lookupsReceived = getLong(props, "dbHistory.lookupsReceived"); + _lookupReplyDuplicate = getLong(props, "dbHistory.lookupReplyDuplicate"); + _lookupReplyInvalid = getLong(props, "dbHistory.lookupReplyInvalid"); + _lookupReplyNew = getLong(props, "dbHistory.lookupReplyNew"); + _lookupReplyOld = getLong(props, "dbHistory.lookupReplyOld"); + _unpromptedDbStoreNew = getLong(props, "dbHistory.unpromptedDbStoreNew"); + _unpromptedDbStoreOld = getLong(props, "dbHistory.unpromptedDbStoreOld"); + _lastLookupReceived = getLong(props, "dbHistory.lastLookupReceived"); + _avgDelayBetweenLookupsReceived = getLong(props, "dbHistory.avgDelayBetweenLookupsReceived"); + try { + _failedLookupRate.load(props, "dbHistory.failedLookupRate", true); + _log.debug("Loading dbHistory.failedLookupRate"); + } catch (IllegalArgumentException iae) { + _log.warn("DB History failed lookup rate is corrupt, resetting", iae); + createRates(); + } } private void createRates() { - _failedLookupRate = new RateStat("dbHistory.failedLookupRate", "How often does this peer to respond to a lookup?", "dbHistory", new long[] { 60*1000l, 60*60*1000l, 24*60*60*1000l }); + _failedLookupRate = new RateStat("dbHistory.failedLookupRate", "How often does this peer to respond to a lookup?", "dbHistory", new long[] { 60*1000l, 60*60*1000l, 24*60*60*1000l }); } private final static long getLong(Properties props, String key) { - String val = props.getProperty(key); - if (val != null) { - try { - return Long.parseLong(val); - } catch (NumberFormatException nfe) { - return 0; - } - } - return 0; + String val = props.getProperty(key); + if (val != null) { + try { + return Long.parseLong(val); + } catch (NumberFormatException nfe) { + return 0; + } + } + return 0; } } diff --git a/router/java/src/net/i2p/router/peermanager/EvaluateProfilesJob.java b/router/java/src/net/i2p/router/peermanager/EvaluateProfilesJob.java index faa20d653..c9df93012 100644 --- a/router/java/src/net/i2p/router/peermanager/EvaluateProfilesJob.java +++ b/router/java/src/net/i2p/router/peermanager/EvaluateProfilesJob.java @@ -8,43 +8,47 @@ import net.i2p.data.Hash; import net.i2p.router.JobImpl; import net.i2p.util.Clock; import net.i2p.util.Log; +import net.i2p.router.RouterContext; /** * Run across all of the profiles, coallescing the stats and reorganizing them - * into appropriate groups. The stat coallesce must be run at least once a minute, + * into appropriate groups. The stat coallesce must be run at least once a minute, * so if the group reorg wants to get changed, this may want to be split into two * jobs. * */ class EvaluateProfilesJob extends JobImpl { - private final static Log _log = new Log(EvaluateProfilesJob.class); + private Log _log; - public EvaluateProfilesJob() {} + public EvaluateProfilesJob(RouterContext ctx) { + super(ctx); + _log = ctx.logManager().getLog(EvaluateProfilesJob.class); + } public String getName() { return "Evaluate peer profiles"; } public void runJob() { - try { - long start = Clock.getInstance().now(); - Set allPeers = ProfileOrganizer.getInstance().selectAllPeers(); - long afterSelect = Clock.getInstance().now(); - for (Iterator iter = allPeers.iterator(); iter.hasNext(); ) { - Hash peer = (Hash)iter.next(); - PeerProfile profile = ProfileOrganizer.getInstance().getProfile(peer); - if (profile != null) - profile.coallesceStats(); - } - long afterCoallesce = Clock.getInstance().now(); - ProfileOrganizer.getInstance().reorganize(); - long afterReorganize = Clock.getInstance().now(); - - if (_log.shouldLog(Log.DEBUG)) - _log.debug("Profiles coallesced and reorganized. total: " + allPeers.size() + ", selectAll: " + (afterSelect-start) + "ms, coallesce: " + (afterCoallesce-afterSelect) + "ms, reorganize: " + (afterReorganize-afterSelect)); - } catch (Throwable t) { - _log.log(Log.CRIT, "Error evaluating profiles", t); - } finally { - requeue(30*1000); - if (_log.shouldLog(Log.DEBUG)) - _log.debug("Requeued for " + new Date(getTiming().getStartAfter())); - } + try { + long start = _context.clock().now(); + Set allPeers = _context.profileOrganizer().selectAllPeers(); + long afterSelect = _context.clock().now(); + for (Iterator iter = allPeers.iterator(); iter.hasNext(); ) { + Hash peer = (Hash)iter.next(); + PeerProfile profile = _context.profileOrganizer().getProfile(peer); + if (profile != null) + profile.coallesceStats(); + } + long afterCoallesce = _context.clock().now(); + _context.profileOrganizer().reorganize(); + long afterReorganize = _context.clock().now(); + + if (_log.shouldLog(Log.DEBUG)) + _log.debug("Profiles coallesced and reorganized. total: " + allPeers.size() + ", selectAll: " + (afterSelect-start) + "ms, coallesce: " + (afterCoallesce-afterSelect) + "ms, reorganize: " + (afterReorganize-afterSelect)); + } catch (Throwable t) { + _log.log(Log.CRIT, "Error evaluating profiles", t); + } finally { + requeue(30*1000); + if (_log.shouldLog(Log.DEBUG)) + _log.debug("Requeued for " + new Date(getTiming().getStartAfter())); + } } } diff --git a/router/java/src/net/i2p/router/peermanager/IntegrationCalculator.java b/router/java/src/net/i2p/router/peermanager/IntegrationCalculator.java index 569d187c3..0c6f6b8bc 100644 --- a/router/java/src/net/i2p/router/peermanager/IntegrationCalculator.java +++ b/router/java/src/net/i2p/router/peermanager/IntegrationCalculator.java @@ -1,18 +1,25 @@ package net.i2p.router.peermanager; import net.i2p.util.Log; +import net.i2p.router.RouterContext; /** - * Determine how well integrated the peer is - how likely they will be useful + * Determine how well integrated the peer is - how likely they will be useful * to us if we are trying to get further connected. * */ -class IntegrationCalculator extends Calculator { - private final static Log _log = new Log(IntegrationCalculator.class); +public class IntegrationCalculator extends Calculator { + private Log _log; + private RouterContext _context; + + public IntegrationCalculator(RouterContext context) { + _context = context; + _log = context.logManager().getLog(IntegrationCalculator.class); + } public double calc(PeerProfile profile) { - long val = profile.getDbIntroduction().getRate(24*60*60*1000l).getCurrentEventCount(); - val += profile.getIntegrationBonus(); - return val; + long val = profile.getDbIntroduction().getRate(24*60*60*1000l).getCurrentEventCount(); + val += profile.getIntegrationBonus(); + return val; } } diff --git a/router/java/src/net/i2p/router/peermanager/IsFailingCalculator.java b/router/java/src/net/i2p/router/peermanager/IsFailingCalculator.java index 4bda17112..3a39f77b8 100644 --- a/router/java/src/net/i2p/router/peermanager/IsFailingCalculator.java +++ b/router/java/src/net/i2p/router/peermanager/IsFailingCalculator.java @@ -2,12 +2,13 @@ package net.i2p.router.peermanager; import net.i2p.util.Clock; import net.i2p.util.Log; +import net.i2p.router.RouterContext; /** * Simple boolean calculation to determine whether the given profile is "failing" - - * meaning we shouldn't bother trying to get them to do something (however, if we + * meaning we shouldn't bother trying to get them to do something. However, if we * have a specific need to contact them in particular - e.g. instructions in a garlic - * or leaseSet - we will try). The currently implemented algorithm determines that + * or leaseSet - we will try. The currently implemented algorithm determines that * a profile is failing if withing the last few minutes, they've done something bad:
      *
    • It has a comm error (TCP disconnect, etc) in the last minute or two
    • *
    • They've failed to respond to a db message in the last minute or two
    • @@ -16,35 +17,41 @@ import net.i2p.util.Log; *
    * */ -class IsFailingCalculator extends Calculator { - private final static Log _log = new Log(IsFailingCalculator.class); +public class IsFailingCalculator extends Calculator { + private Log _log; + private RouterContext _context; /** if they haven't b0rked in the last 5 minutes, they're ok */ private final static long GRACE_PERIOD = 5*60*1000; + public IsFailingCalculator(RouterContext context) { + _context = context; + _log = context.logManager().getLog(IsFailingCalculator.class); + } + public boolean calcBoolean(PeerProfile profile) { - // have we failed in the last 119 seconds? - if ( (profile.getCommError().getRate(60*1000).getCurrentEventCount() > 0) || - (profile.getCommError().getRate(60*1000).getLastEventCount() > 0) ) { - return true; - } else { - if ( (profile.getDBHistory().getFailedLookupRate().getRate(60*1000).getCurrentEventCount() > 0) || - (profile.getDBHistory().getFailedLookupRate().getRate(60*1000).getLastEventCount() > 0) ) { - // are they overloaded (or disconnected)? - return true; - } - - long recently = Clock.getInstance().now() - GRACE_PERIOD; - - if (profile.getTunnelHistory().getLastRejected() >= recently) { - // have they refused to participate in a tunnel in the last 5 minutes? - return true; - } - - if (profile.getLastSendFailed() >= recently) - return true; - - return false; - } + // have we failed in the last 119 seconds? + if ( (profile.getCommError().getRate(60*1000).getCurrentEventCount() > 0) || + (profile.getCommError().getRate(60*1000).getLastEventCount() > 0) ) { + return true; + } else { + if ( (profile.getDBHistory().getFailedLookupRate().getRate(60*1000).getCurrentEventCount() > 0) || + (profile.getDBHistory().getFailedLookupRate().getRate(60*1000).getLastEventCount() > 0) ) { + // are they overloaded (or disconnected)? + return true; + } + + long recently = _context.clock().now() - GRACE_PERIOD; + + if (profile.getTunnelHistory().getLastRejected() >= recently) { + // have they refused to participate in a tunnel in the last 5 minutes? + return true; + } + + if (profile.getLastSendFailed() >= recently) + return true; + + return false; + } } } diff --git a/router/java/src/net/i2p/router/peermanager/PeerManager.java b/router/java/src/net/i2p/router/peermanager/PeerManager.java index 9c0625ed8..09f70b393 100644 --- a/router/java/src/net/i2p/router/peermanager/PeerManager.java +++ b/router/java/src/net/i2p/router/peermanager/PeerManager.java @@ -1,9 +1,9 @@ package net.i2p.router.peermanager; /* * free (adj.): unencumbered; not under the control of others - * Written by jrandom in 2003 and released into the public domain - * with no warranty of any kind, either expressed or implied. - * It probably won't make your computer catch on fire, or eat + * Written by jrandom in 2003 and released into the public domain + * with no warranty of any kind, either expressed or implied. + * It probably won't make your computer catch on fire, or eat * your children, but it might. Use at your own risk. * */ @@ -18,48 +18,55 @@ import net.i2p.router.NetworkDatabaseFacade; import net.i2p.router.PeerSelectionCriteria; import net.i2p.router.Router; import net.i2p.util.Log; +import net.i2p.router.RouterContext; /** * Manage the current state of the statistics * */ class PeerManager { - private final static Log _log = new Log(PeerManager.class); - private ProfileOrganizer _organizer = ProfileOrganizer.getInstance(); + private Log _log; + private RouterContext _context; + private ProfileOrganizer _organizer; + private ProfilePersistenceHelper _persistenceHelper; - public PeerManager() { - _organizer.setUs(Router.getInstance().getRouterInfo().getIdentity().getHash()); - loadProfiles(); - JobQueue.getInstance().addJob(new EvaluateProfilesJob()); - JobQueue.getInstance().addJob(new PersistProfilesJob(this)); + public PeerManager(RouterContext context) { + _context = context; + _log = context.logManager().getLog(PeerManager.class); + _persistenceHelper = new ProfilePersistenceHelper(context); + _organizer = context.profileOrganizer(); + _organizer.setUs(context.routerHash()); + loadProfiles(); + _context.jobQueue().addJob(new EvaluateProfilesJob(_context)); + _context.jobQueue().addJob(new PersistProfilesJob(_context, this)); } void storeProfiles() { - Set peers = selectPeers(); - for (Iterator iter = peers.iterator(); iter.hasNext(); ) { - Hash peer = (Hash)iter.next(); - storeProfile(peer); - } + Set peers = selectPeers(); + for (Iterator iter = peers.iterator(); iter.hasNext(); ) { + Hash peer = (Hash)iter.next(); + storeProfile(peer); + } } Set selectPeers() { - return _organizer.getInstance().selectAllPeers(); + return _organizer.selectAllPeers(); } void storeProfile(Hash peer) { - if (peer == null) return; - PeerProfile prof = _organizer.getInstance().getProfile(peer); - if (prof == null) return; - ProfilePersistenceHelper.getInstance().writeProfile(prof); + if (peer == null) return; + PeerProfile prof = _organizer.getProfile(peer); + if (prof == null) return; + _persistenceHelper.writeProfile(prof); } void loadProfiles() { - Set profiles = ProfilePersistenceHelper.getInstance().readProfiles(); - for (Iterator iter = profiles.iterator(); iter.hasNext();) { - PeerProfile prof = (PeerProfile)iter.next(); - if (prof != null) { - _organizer.addProfile(prof); - if (_log.shouldLog(Log.DEBUG)) - _log.debug("Profile for " + prof.getPeer().toBase64() + " loaded"); - } - } + Set profiles = _persistenceHelper.readProfiles(); + for (Iterator iter = profiles.iterator(); iter.hasNext();) { + PeerProfile prof = (PeerProfile)iter.next(); + if (prof != null) { + _organizer.addProfile(prof); + if (_log.shouldLog(Log.DEBUG)) + _log.debug("Profile for " + prof.getPeer().toBase64() + " loaded"); + } + } } /** @@ -67,51 +74,51 @@ class PeerManager { * */ Set selectPeers(PeerSelectionCriteria criteria) { - int numPasses = 0; - Set rv = new HashSet(criteria.getMinimumRequired()); - Set exclude = new HashSet(1); - exclude.add(Router.getInstance().getRouterInfo().getIdentity().getHash()); - while (rv.size() < criteria.getMinimumRequired()) { - Set curVals = new HashSet(criteria.getMinimumRequired()); - switch (criteria.getPurpose()) { - case PeerSelectionCriteria.PURPOSE_TEST: - _organizer.selectWellIntegratedPeers(criteria.getMinimumRequired(), exclude, curVals); - break; - case PeerSelectionCriteria.PURPOSE_TUNNEL: - _organizer.selectFastAndReliablePeers(criteria.getMinimumRequired(), exclude, curVals); - break; - case PeerSelectionCriteria.PURPOSE_SOURCE_ROUTE: - _organizer.selectReliablePeers(criteria.getMinimumRequired(), exclude, curVals); - break; - case PeerSelectionCriteria.PURPOSE_GARLIC: - _organizer.selectReliablePeers(criteria.getMinimumRequired(), exclude, curVals); - break; - default: - break; - } - if (curVals.size() <= 0) { - if (_log.shouldLog(Log.WARN)) - _log.warn("We ran out of peers when looking for reachable ones after finding " + rv.size()); - break; - } else { - for (Iterator iter = curVals.iterator(); iter.hasNext(); ) { - Hash peer = (Hash)iter.next(); - if (null != NetworkDatabaseFacade.getInstance().lookupRouterInfoLocally(peer)) { - if (_log.shouldLog(Log.DEBUG)) - _log.debug("Peer " + peer.toBase64() + " is locally known, so we'll allow its selection"); - rv.add(peer); - } else { - if (_log.shouldLog(Log.DEBUG)) - _log.debug("Peer " + peer.toBase64() + " is NOT locally known, disallowing its selection"); - } - } - exclude.addAll(curVals); - } - numPasses++; - } - if (_log.shouldLog(Log.INFO)) - _log.info("Peers selected after " + numPasses + ": " + rv); - return rv; + int numPasses = 0; + Set rv = new HashSet(criteria.getMinimumRequired()); + Set exclude = new HashSet(1); + exclude.add(_context.routerHash()); + while (rv.size() < criteria.getMinimumRequired()) { + Set curVals = new HashSet(criteria.getMinimumRequired()); + switch (criteria.getPurpose()) { + case PeerSelectionCriteria.PURPOSE_TEST: + _organizer.selectWellIntegratedPeers(criteria.getMinimumRequired(), exclude, curVals); + break; + case PeerSelectionCriteria.PURPOSE_TUNNEL: + _organizer.selectFastAndReliablePeers(criteria.getMinimumRequired(), exclude, curVals); + break; + case PeerSelectionCriteria.PURPOSE_SOURCE_ROUTE: + _organizer.selectReliablePeers(criteria.getMinimumRequired(), exclude, curVals); + break; + case PeerSelectionCriteria.PURPOSE_GARLIC: + _organizer.selectReliablePeers(criteria.getMinimumRequired(), exclude, curVals); + break; + default: + break; + } + if (curVals.size() <= 0) { + if (_log.shouldLog(Log.WARN)) + _log.warn("We ran out of peers when looking for reachable ones after finding " + rv.size()); + break; + } else { + for (Iterator iter = curVals.iterator(); iter.hasNext(); ) { + Hash peer = (Hash)iter.next(); + if (null != _context.netDb().lookupRouterInfoLocally(peer)) { + if (_log.shouldLog(Log.DEBUG)) + _log.debug("Peer " + peer.toBase64() + " is locally known, so we'll allow its selection"); + rv.add(peer); + } else { + if (_log.shouldLog(Log.DEBUG)) + _log.debug("Peer " + peer.toBase64() + " is NOT locally known, disallowing its selection"); + } + } + exclude.addAll(curVals); + } + numPasses++; + } + if (_log.shouldLog(Log.INFO)) + _log.info("Peers selected after " + numPasses + ": " + rv); + return rv; } public String renderStatusHTML() { return _organizer.renderStatusHTML(); } diff --git a/router/java/src/net/i2p/router/peermanager/PeerManagerFacadeImpl.java b/router/java/src/net/i2p/router/peermanager/PeerManagerFacadeImpl.java index f3189cfeb..800f9035a 100644 --- a/router/java/src/net/i2p/router/peermanager/PeerManagerFacadeImpl.java +++ b/router/java/src/net/i2p/router/peermanager/PeerManagerFacadeImpl.java @@ -1,9 +1,9 @@ package net.i2p.router.peermanager; /* * free (adj.): unencumbered; not under the control of others - * Written by jrandom in 2003 and released into the public domain - * with no warranty of any kind, either expressed or implied. - * It probably won't make your computer catch on fire, or eat + * Written by jrandom in 2003 and released into the public domain + * with no warranty of any kind, either expressed or implied. + * It probably won't make your computer catch on fire, or eat * your children, but it might. Use at your own risk. * */ @@ -15,28 +15,37 @@ import net.i2p.router.PeerManagerFacade; import net.i2p.router.PeerSelectionCriteria; import net.i2p.router.Router; import net.i2p.util.Log; +import net.i2p.router.RouterContext; /** * Base implementation that has simple algorithms and periodically saves state * */ -public class PeerManagerFacadeImpl extends PeerManagerFacade { - private final static Log _log = new Log(PeerManagerFacadeImpl.class); +public class PeerManagerFacadeImpl implements PeerManagerFacade { + private Log _log; private PeerManager _manager; + private RouterContext _context; + private ProfilePersistenceHelper _persistenceHelper; - public void startup() { - _log.info("Starting up the peer manager"); - _manager = new PeerManager(); - ProfilePersistenceHelper.getInstance().setUs(Router.getInstance().getRouterInfo().getIdentity().getHash()); + public PeerManagerFacadeImpl(RouterContext ctx) { + _context = ctx; + _log = ctx.logManager().getLog(PeerManagerFacadeImpl.class); + _persistenceHelper = new ProfilePersistenceHelper(ctx); } - public void shutdown() { - _log.info("Shutting down the peer manager"); - _manager.storeProfiles(); + public void startup() { + _log.info("Starting up the peer manager"); + _manager = new PeerManager(_context); + _persistenceHelper.setUs(_context.routerHash()); + } + + public void shutdown() { + _log.info("Shutting down the peer manager"); + _manager.storeProfiles(); } public List selectPeers(PeerSelectionCriteria criteria) { - return new ArrayList(_manager.selectPeers(criteria)); + return new ArrayList(_manager.selectPeers(criteria)); } public String renderStatusHTML() { return _manager.renderStatusHTML(); } diff --git a/router/java/src/net/i2p/router/peermanager/PeerProfile.java b/router/java/src/net/i2p/router/peermanager/PeerProfile.java index c3e97c2c2..bd559706f 100644 --- a/router/java/src/net/i2p/router/peermanager/PeerProfile.java +++ b/router/java/src/net/i2p/router/peermanager/PeerProfile.java @@ -3,9 +3,11 @@ package net.i2p.router.peermanager; import net.i2p.data.Hash; import net.i2p.stat.RateStat; import net.i2p.util.Log; +import net.i2p.router.RouterContext; class PeerProfile { - private final static Log _log = new Log(PeerProfile.class); + private Log _log; + private RouterContext _context; // whoozaat? private Hash _peer; // general peer stats @@ -37,54 +39,56 @@ class PeerProfile { // does this peer profile contain expanded data, or just the basics? private boolean _expanded; - public PeerProfile() { - this(null, true); + public PeerProfile(RouterContext context) { + this(context, null, true); } - public PeerProfile(Hash peer) { - this(peer, true); + public PeerProfile(RouterContext context, Hash peer) { + this(context, peer, true); } - public PeerProfile(Hash peer, boolean expand) { - _expanded = false; - _speedValue = 0; - _reliabilityValue = 0; - _integrationValue = 0; - _isFailing = false; - _peer = peer; - if (expand) - expandProfile(); + public PeerProfile(RouterContext context, Hash peer, boolean expand) { + _context = context; + _log = context.logManager().getLog(PeerProfile.class); + _expanded = false; + _speedValue = 0; + _reliabilityValue = 0; + _integrationValue = 0; + _isFailing = false; + _peer = peer; + if (expand) + expandProfile(); } /** what peer is being profiled */ public Hash getPeer() { return _peer; } public void setPeer(Hash peer) { _peer = peer; } - /** - * are we keeping an expanded profile on the peer, or just the bare minimum? + /** + * are we keeping an expanded profile on the peer, or just the bare minimum? * If we aren't keeping the expanded profile, all of the rates as well as the * TunnelHistory and DBHistory will not be available. * */ public boolean getIsExpanded() { return _expanded; } - + /** * Is this peer active at the moment (sending/receiving messages within the last * 5 minutes) */ public boolean getIsActive() { - if ( (getSendSuccessSize().getRate(5*60*1000).getCurrentEventCount() > 0) || - (getSendSuccessSize().getRate(5*60*1000).getLastEventCount() > 0) || - (getReceiveSize().getRate(5*60*1000).getCurrentEventCount() > 0) || - (getReceiveSize().getRate(5*60*1000).getLastEventCount() > 0) ) - return true; - else - return false; - } - + if ( (getSendSuccessSize().getRate(5*60*1000).getCurrentEventCount() > 0) || + (getSendSuccessSize().getRate(5*60*1000).getLastEventCount() > 0) || + (getReceiveSize().getRate(5*60*1000).getCurrentEventCount() > 0) || + (getReceiveSize().getRate(5*60*1000).getLastEventCount() > 0) ) + return true; + else + return false; + } + /** when did we first hear about this peer? */ public long getFirstHeardAbout() { return _firstHeardAbout; } public void setFirstHeardAbout(long when) { _firstHeardAbout = when; } - + /** when did we last hear about this peer? */ public long getLastHeardAbout() { return _lastHeardAbout; } public void setLastHeardAbout(long when) { _lastHeardAbout = when; } @@ -100,7 +104,7 @@ class PeerProfile { /** when did we last hear from the peer? */ public long getLastHeardFrom() { return _lastHeardFrom; } public void setLastHeardFrom(long when) { _lastHeardFrom = when; } - + /** history of tunnel activity with the peer */ public TunnelHistory getTunnelHistory() { return _tunnelHistory; } public void setTunnelHistory(TunnelHistory history) { _tunnelHistory = history; } @@ -108,7 +112,7 @@ class PeerProfile { /** history of db activity with the peer */ public DBHistory getDBHistory() { return _dbHistory; } public void setDBHistory(DBHistory hist) { _dbHistory = hist; } - + /** how large successfully sent messages are, calculated over a 1 minute, 1 hour, and 1 day period */ public RateStat getSendSuccessSize() { return _sendSuccessSize; } /** how large messages that could not be sent were, calculated over a 1 minute, 1 hour, and 1 day period */ @@ -124,30 +128,30 @@ class PeerProfile { /** how many new peers we get from dbSearchReplyMessages or dbStore messages, calculated over a 1 hour, 1 day, and 1 week period */ public RateStat getDbIntroduction() { return _dbIntroduction; } - /** - * extra factor added to the speed ranking - this can be updated in the profile + /** + * extra factor added to the speed ranking - this can be updated in the profile * written to disk to affect how the algorithm ranks speed. Negative values are * penalties */ public long getSpeedBonus() { return _speedBonus; } public void setSpeedBonus(long bonus) { _speedBonus = bonus; } - /** - * extra factor added to the reliability ranking - this can be updated in the profile + /** + * extra factor added to the reliability ranking - this can be updated in the profile * written to disk to affect how the algorithm ranks reliability. Negative values are * penalties */ public long getReliabilityBonus() { return _reliabilityBonus; } public void setReliabilityBonus(long bonus) { _reliabilityBonus = bonus; } - /** - * extra factor added to the integration ranking - this can be updated in the profile + /** + * extra factor added to the integration ranking - this can be updated in the profile * written to disk to affect how the algorithm ranks integration. Negative values are * penalties */ public long getIntegrationBonus() { return _integrationBonus; } public void setIntegrationBonus(long bonus) { _integrationBonus = bonus; } - + /** * How fast is the peer, taking into consideration both throughput and latency. * This may even be made to take into consideration current rates vs. estimated @@ -157,7 +161,7 @@ class PeerProfile { public double getSpeedValue() { return _speedValue; } /** * How likely are they to stay up and pass on messages over the next few minutes? - * Positive numbers means more likely, negative numbers means its probably not + * Positive numbers means more likely, negative numbers means its probably not * even worth trying. * */ @@ -167,97 +171,97 @@ class PeerProfile { * told us that we didn't already know). Higher numbers means better integrated * */ - public double getIntegrationValue() { return _integrationValue; } - /** - * is this peer actively failing (aka not worth touching)? + public double getIntegrationValue() { return _integrationValue; } + /** + * is this peer actively failing (aka not worth touching)? */ public boolean getIsFailing() { return _isFailing; } - /** + /** * when the given peer is performing so poorly that we don't want to bother keeping * extensive stats on them, call this to discard excess data points. Specifically, * this drops the rates, the tunnelHistory, and the dbHistory. * */ public void shrinkProfile() { - _sendSuccessSize = null; - _sendFailureSize = null; - _receiveSize = null; - _dbResponseTime = null; - _tunnelCreateResponseTime = null; - _commError = null; - _dbIntroduction = null; - _tunnelHistory = null; - _dbHistory = null; - - _expanded = false; + _sendSuccessSize = null; + _sendFailureSize = null; + _receiveSize = null; + _dbResponseTime = null; + _tunnelCreateResponseTime = null; + _commError = null; + _dbIntroduction = null; + _tunnelHistory = null; + _dbHistory = null; + + _expanded = false; } - /** - * When the given peer is performing well enough that we want to keep detailed + /** + * When the given peer is performing well enough that we want to keep detailed * stats on them again, call this to set up the info we dropped during shrinkProfile. - * This will not however overwrite any existing data, so it can be safely called + * This will not however overwrite any existing data, so it can be safely called * repeatedly * */ public void expandProfile() { - if (_sendSuccessSize == null) - _sendSuccessSize = new RateStat("sendSuccessSize", "How large successfully sent messages are", "profile", new long[] { 60*1000l, 5*60*1000l, 60*60*1000l, 24*60*60*1000l }); - if (_sendFailureSize == null) - _sendFailureSize = new RateStat("sendFailureSize", "How large messages that could not be sent were", "profile", new long[] { 60*1000l, 60*60*1000l, 24*60*60*1000 } ); - if (_receiveSize == null) - _receiveSize = new RateStat("receiveSize", "How large received messages are", "profile", new long[] { 60*1000l, 5*60*1000l, 60*60*1000l, 24*60*60*1000 } ); - if (_dbResponseTime == null) - _dbResponseTime = new RateStat("dbResponseTime", "how long it takes to get a db response from the peer (in milliseconds)", "profile", new long[] { 60*1000l, 60*60*1000l, 24*60*60*1000 } ); - if (_tunnelCreateResponseTime == null) - _tunnelCreateResponseTime = new RateStat("tunnelCreateResponseTime", "how long it takes to get a tunnel create response from the peer (in milliseconds)", "profile", new long[] { 60*1000l, 60*60*1000l, 24*60*60*1000 } ); - if (_commError == null) - _commError = new RateStat("commErrorRate", "how long between communication errors with the peer (e.g. disconnection)", "profile", new long[] { 60*1000l, 60*60*1000l, 24*60*60*1000 } ); - if (_dbIntroduction == null) - _dbIntroduction = new RateStat("dbIntroduction", "how many new peers we get from dbSearchReplyMessages or dbStore messages", "profile", new long[] { 60*60*1000l, 24*60*60*1000l, 7*24*60*60*1000l }); - - if (_tunnelHistory == null) - _tunnelHistory = new TunnelHistory(); - if (_dbHistory == null) - _dbHistory = new DBHistory(); - - _expanded = true; + if (_sendSuccessSize == null) + _sendSuccessSize = new RateStat("sendSuccessSize", "How large successfully sent messages are", "profile", new long[] { 60*1000l, 5*60*1000l, 60*60*1000l, 24*60*60*1000l }); + if (_sendFailureSize == null) + _sendFailureSize = new RateStat("sendFailureSize", "How large messages that could not be sent were", "profile", new long[] { 60*1000l, 60*60*1000l, 24*60*60*1000 } ); + if (_receiveSize == null) + _receiveSize = new RateStat("receiveSize", "How large received messages are", "profile", new long[] { 60*1000l, 5*60*1000l, 60*60*1000l, 24*60*60*1000 } ); + if (_dbResponseTime == null) + _dbResponseTime = new RateStat("dbResponseTime", "how long it takes to get a db response from the peer (in milliseconds)", "profile", new long[] { 60*1000l, 60*60*1000l, 24*60*60*1000 } ); + if (_tunnelCreateResponseTime == null) + _tunnelCreateResponseTime = new RateStat("tunnelCreateResponseTime", "how long it takes to get a tunnel create response from the peer (in milliseconds)", "profile", new long[] { 60*1000l, 60*60*1000l, 24*60*60*1000 } ); + if (_commError == null) + _commError = new RateStat("commErrorRate", "how long between communication errors with the peer (e.g. disconnection)", "profile", new long[] { 60*1000l, 60*60*1000l, 24*60*60*1000 } ); + if (_dbIntroduction == null) + _dbIntroduction = new RateStat("dbIntroduction", "how many new peers we get from dbSearchReplyMessages or dbStore messages", "profile", new long[] { 60*60*1000l, 24*60*60*1000l, 7*24*60*60*1000l }); + + if (_tunnelHistory == null) + _tunnelHistory = new TunnelHistory(_context); + if (_dbHistory == null) + _dbHistory = new DBHistory(_context); + + _expanded = true; } /** update the stats and rates (this should be called once a minute) */ public void coallesceStats() { - if (!_expanded) return; - _commError.coallesceStats(); - _dbIntroduction.coallesceStats(); - _dbResponseTime.coallesceStats(); - _receiveSize.coallesceStats(); - _sendFailureSize.coallesceStats(); - _sendSuccessSize.coallesceStats(); - _tunnelCreateResponseTime.coallesceStats(); - _dbHistory.coallesceStats(); - - _speedValue = calculateSpeed(); - _reliabilityValue = calculateReliability(); - _integrationValue = calculateIntegration(); - _isFailing = calculateIsFailing(); - - if (_log.shouldLog(Log.DEBUG)) - _log.debug("Coallesced: speed [" + _speedValue + "] reliability [" + _reliabilityValue + "] integration [" + _integrationValue + "] failing? [" + _isFailing + "]"); + if (!_expanded) return; + _commError.coallesceStats(); + _dbIntroduction.coallesceStats(); + _dbResponseTime.coallesceStats(); + _receiveSize.coallesceStats(); + _sendFailureSize.coallesceStats(); + _sendSuccessSize.coallesceStats(); + _tunnelCreateResponseTime.coallesceStats(); + _dbHistory.coallesceStats(); + + _speedValue = calculateSpeed(); + _reliabilityValue = calculateReliability(); + _integrationValue = calculateIntegration(); + _isFailing = calculateIsFailing(); + + if (_log.shouldLog(Log.DEBUG)) + _log.debug("Coallesced: speed [" + _speedValue + "] reliability [" + _reliabilityValue + "] integration [" + _integrationValue + "] failing? [" + _isFailing + "]"); } - private double calculateSpeed() { return Calculator.getSpeedCalculator().calc(this); } - private double calculateReliability() { return Calculator.getReliabilityCalculator().calc(this); } - private double calculateIntegration() { return Calculator.getIntegrationCalculator().calc(this); } - private boolean calculateIsFailing() { return Calculator.getIsFailingCalculator().calcBoolean(this); } + private double calculateSpeed() { return _context.speedCalculator().calc(this); } + private double calculateReliability() { return _context.reliabilityCalculator().calc(this); } + private double calculateIntegration() { return _context.integrationCalculator().calc(this); } + private boolean calculateIsFailing() { return _context.isFailingCalculator().calcBoolean(this); } void setIsFailing(boolean val) { _isFailing = val; } public int hashCode() { return (_peer == null ? 0 : _peer.hashCode()); } public boolean equals(Object obj) { - if (obj == null) return false; - if (obj.getClass() != PeerProfile.class) return false; - if (_peer == null) return false; - PeerProfile prof = (PeerProfile)obj; - return _peer.equals(prof.getPeer()); + if (obj == null) return false; + if (obj.getClass() != PeerProfile.class) return false; + if (_peer == null) return false; + PeerProfile prof = (PeerProfile)obj; + return _peer.equals(prof.getPeer()); } public String toString() { return "Profile: " + getPeer().toBase64(); } @@ -267,51 +271,52 @@ class PeerProfile { * */ public static void main(String args[]) { - testProfileSize(100, 0); // 560KB - testProfileSize(1000, 0); // 3.9MB - testProfileSize(10000, 0); // 37MB - testProfileSize(0, 10000); // 2.2MB - testProfileSize(0, 100000); // 21MB - testProfileSize(0, 300000); // 63MB + RouterContext ctx = new RouterContext(null); + testProfileSize(ctx, 100, 0); // 560KB + testProfileSize(ctx, 1000, 0); // 3.9MB + testProfileSize(ctx, 10000, 0); // 37MB + testProfileSize(ctx, 0, 10000); // 2.2MB + testProfileSize(ctx, 0, 100000); // 21MB + testProfileSize(ctx, 0, 300000); // 63MB } - private static void testProfileSize(int numExpanded, int numCompact) { - Runtime.getRuntime().gc(); - PeerProfile profs[] = new PeerProfile[numExpanded]; - PeerProfile profsCompact[] = new PeerProfile[numCompact]; - long used = Runtime.getRuntime().totalMemory()-Runtime.getRuntime().freeMemory(); - long usedPer = used / (numExpanded+numCompact); - System.out.println(numExpanded + "/" + numCompact + ": create array - Used: " + used + " bytes (or " + usedPer + " bytes per array entry)"); - - int i = 0; - int j = 0; - try { - for (; i < numExpanded; i++) - profs[i] = new PeerProfile(new Hash(new byte[Hash.HASH_LENGTH])); - } catch (OutOfMemoryError oom) { - profs = null; - profsCompact = null; - Runtime.getRuntime().gc(); - System.out.println("Ran out of memory when creating profile " + i); - return; - } - try { - for (; i < numCompact; i++) - profsCompact[i] = new PeerProfile(new Hash(new byte[Hash.HASH_LENGTH]), false); - } catch (OutOfMemoryError oom) { - profs = null; - profsCompact = null; - Runtime.getRuntime().gc(); - System.out.println("Ran out of memory when creating compacted profile " + i); - return; - } - - Runtime.getRuntime().gc(); - long usedObjects = Runtime.getRuntime().totalMemory()-Runtime.getRuntime().freeMemory(); - usedPer = usedObjects / (numExpanded+numCompact); - System.out.println(numExpanded + "/" + numCompact + ": create objects - Used: " + usedObjects + " bytes (or " + usedPer + " bytes per profile)"); - profs = null; - profsCompact = null; - Runtime.getRuntime().gc(); + private static void testProfileSize(RouterContext ctx, int numExpanded, int numCompact) { + Runtime.getRuntime().gc(); + PeerProfile profs[] = new PeerProfile[numExpanded]; + PeerProfile profsCompact[] = new PeerProfile[numCompact]; + long used = Runtime.getRuntime().totalMemory()-Runtime.getRuntime().freeMemory(); + long usedPer = used / (numExpanded+numCompact); + System.out.println(numExpanded + "/" + numCompact + ": create array - Used: " + used + " bytes (or " + usedPer + " bytes per array entry)"); + + int i = 0; + int j = 0; + try { + for (; i < numExpanded; i++) + profs[i] = new PeerProfile(ctx, new Hash(new byte[Hash.HASH_LENGTH])); + } catch (OutOfMemoryError oom) { + profs = null; + profsCompact = null; + Runtime.getRuntime().gc(); + System.out.println("Ran out of memory when creating profile " + i); + return; + } + try { + for (; i < numCompact; i++) + profsCompact[i] = new PeerProfile(ctx, new Hash(new byte[Hash.HASH_LENGTH]), false); + } catch (OutOfMemoryError oom) { + profs = null; + profsCompact = null; + Runtime.getRuntime().gc(); + System.out.println("Ran out of memory when creating compacted profile " + i); + return; + } + + Runtime.getRuntime().gc(); + long usedObjects = Runtime.getRuntime().totalMemory()-Runtime.getRuntime().freeMemory(); + usedPer = usedObjects / (numExpanded+numCompact); + System.out.println(numExpanded + "/" + numCompact + ": create objects - Used: " + usedObjects + " bytes (or " + usedPer + " bytes per profile)"); + profs = null; + profsCompact = null; + Runtime.getRuntime().gc(); } } diff --git a/router/java/src/net/i2p/router/peermanager/PersistProfilesJob.java b/router/java/src/net/i2p/router/peermanager/PersistProfilesJob.java index a71af150d..f04d449e3 100644 --- a/router/java/src/net/i2p/router/peermanager/PersistProfilesJob.java +++ b/router/java/src/net/i2p/router/peermanager/PersistProfilesJob.java @@ -7,47 +7,50 @@ import net.i2p.data.Hash; import net.i2p.router.JobImpl; import net.i2p.router.JobQueue; import net.i2p.util.Clock; +import net.i2p.router.RouterContext; class PersistProfilesJob extends JobImpl { private PeerManager _mgr; private final static long PERSIST_DELAY = 10*60*1000; - public PersistProfilesJob(PeerManager mgr) { - _mgr = mgr; - getTiming().setStartAfter(Clock.getInstance().now() + PERSIST_DELAY); + public PersistProfilesJob(RouterContext ctx, PeerManager mgr) { + super(ctx); + _mgr = mgr; + getTiming().setStartAfter(_context.clock().now() + PERSIST_DELAY); } public String getName() { return "Persist profiles"; } public void runJob() { - Set peers = _mgr.selectPeers(); - Hash hashes[] = new Hash[peers.size()]; - int i = 0; - for (Iterator iter = peers.iterator(); iter.hasNext(); ) - hashes[i] = (Hash)iter.next(); - JobQueue.getInstance().addJob(new PersistProfileJob(hashes)); + Set peers = _mgr.selectPeers(); + Hash hashes[] = new Hash[peers.size()]; + int i = 0; + for (Iterator iter = peers.iterator(); iter.hasNext(); ) + hashes[i] = (Hash)iter.next(); + _context.jobQueue().addJob(new PersistProfileJob(hashes)); } private class PersistProfileJob extends JobImpl { - private Hash _peers[]; - private int _cur; - public PersistProfileJob(Hash peers[]) { - _peers = peers; - _cur = 0; - } - public void runJob() { - if (_cur < _peers.length) { - _mgr.storeProfile(_peers[_cur]); - _cur++; - } - if (_cur >= _peers.length) { - // no more left, requeue up the main persist-em-all job - PersistProfilesJob.this.getTiming().setStartAfter(Clock.getInstance().now() + PERSIST_DELAY); - JobQueue.getInstance().addJob(PersistProfilesJob.this); - } else { - // we've got peers left to persist, so requeue the persist profile job - JobQueue.getInstance().addJob(PersistProfileJob.this); - } - } - public String getName() { return "Persist profile"; } + private Hash _peers[]; + private int _cur; + public PersistProfileJob(Hash peers[]) { + super(PersistProfilesJob.this._context); + _peers = peers; + _cur = 0; + } + public void runJob() { + if (_cur < _peers.length) { + _mgr.storeProfile(_peers[_cur]); + _cur++; + } + if (_cur >= _peers.length) { + // no more left, requeue up the main persist-em-all job + PersistProfilesJob.this.getTiming().setStartAfter(_context.clock().now() + PERSIST_DELAY); + PersistProfilesJob.this._context.jobQueue().addJob(PersistProfilesJob.this); + } else { + // we've got peers left to persist, so requeue the persist profile job + PersistProfilesJob.this._context.jobQueue().addJob(PersistProfileJob.this); + } + } + public String getName() { return "Persist profile"; } } } diff --git a/router/java/src/net/i2p/router/peermanager/ProfileManagerImpl.java b/router/java/src/net/i2p/router/peermanager/ProfileManagerImpl.java index 21139f84f..16d93acf9 100644 --- a/router/java/src/net/i2p/router/peermanager/ProfileManagerImpl.java +++ b/router/java/src/net/i2p/router/peermanager/ProfileManagerImpl.java @@ -1,9 +1,9 @@ package net.i2p.router.peermanager; /* * free (adj.): unencumbered; not under the control of others - * Written by jrandom in 2003 and released into the public domain - * with no warranty of any kind, either expressed or implied. - * It probably won't make your computer catch on fire, or eat + * Written by jrandom in 2003 and released into the public domain + * with no warranty of any kind, either expressed or implied. + * It probably won't make your computer catch on fire, or eat * your children, but it might. Use at your own risk. * */ @@ -20,19 +20,14 @@ import net.i2p.data.Hash; import net.i2p.router.ProfileManager; import net.i2p.util.Clock; import net.i2p.util.Log; +import net.i2p.router.RouterContext; -public class ProfileManagerImpl extends ProfileManager { - private final static Log _log = new Log(ProfileManagerImpl.class); - public ProfileManagerImpl() {} +public class ProfileManagerImpl implements ProfileManager { + private Log _log; + private RouterContext _context; - - /** is this peer failing or already dropped? */ - public boolean isFailing(Hash peer) { - PeerProfile prof = getProfile(peer); - if (prof == null) - return true; - else - return prof.getIsFailing(); + public ProfileManagerImpl(RouterContext context) { + _context = context; } /** @@ -41,10 +36,10 @@ public class ProfileManagerImpl extends ProfileManager { * */ public void messageSent(Hash peer, String transport, long msToSend, long bytesSent) { - PeerProfile data = getProfile(peer); - if (data == null) return; - data.setLastSendSuccessful(Clock.getInstance().now()); - data.getSendSuccessSize().addData(bytesSent, msToSend); + PeerProfile data = getProfile(peer); + if (data == null) return; + data.setLastSendSuccessful(_context.clock().now()); + data.getSendSuccessSize().addData(bytesSent, msToSend); } /** @@ -52,10 +47,10 @@ public class ProfileManagerImpl extends ProfileManager { * */ public void messageFailed(Hash peer, String transport) { - PeerProfile data = getProfile(peer); - if (data == null) return; - data.setLastSendFailed(Clock.getInstance().now()); - data.getSendFailureSize().addData(0, 0); // yeah, should be a frequency... + PeerProfile data = getProfile(peer); + if (data == null) return; + data.setLastSendFailed(_context.clock().now()); + data.getSendFailureSize().addData(0, 0); // yeah, should be a frequency... } /** @@ -63,10 +58,10 @@ public class ProfileManagerImpl extends ProfileManager { * */ public void messageFailed(Hash peer) { - PeerProfile data = getProfile(peer); - if (data == null) return; - data.setLastSendFailed(Clock.getInstance().now()); - data.getSendFailureSize().addData(0, 0); // yeah, should be a frequency... + PeerProfile data = getProfile(peer); + if (data == null) return; + data.setLastSendFailed(_context.clock().now()); + data.getSendFailureSize().addData(0, 0); // yeah, should be a frequency... } /** @@ -74,13 +69,13 @@ public class ProfileManagerImpl extends ProfileManager { * */ public void commErrorOccurred(Hash peer) { - if (_log.shouldLog(Log.INFO)) - _log.info("Comm error occurred for peer " + peer.toBase64(), new Exception("Comm error")); - PeerProfile data = getProfile(peer); - if (data == null) return; - data.setLastSendFailed(Clock.getInstance().now()); - data.getSendFailureSize().addData(0, 0); // yeah, should be a frequency... - data.getCommError().addData(0, 0); // see above + if (_log.shouldLog(Log.INFO)) + _log.info("Comm error occurred for peer " + peer.toBase64(), new Exception("Comm error")); + PeerProfile data = getProfile(peer); + if (data == null) return; + data.setLastSendFailed(_context.clock().now()); + data.getSendFailureSize().addData(0, 0); // yeah, should be a frequency... + data.getCommError().addData(0, 0); // see above } /** @@ -88,11 +83,11 @@ public class ProfileManagerImpl extends ProfileManager { * */ public void tunnelJoined(Hash peer, long responseTimeMs) { - PeerProfile data = getProfile(peer); - if (data == null) return; - data.getTunnelCreateResponseTime().addData(responseTimeMs, responseTimeMs); - data.setLastHeardFrom(Clock.getInstance().now()); - data.getTunnelHistory().incrementAgreedTo(); + PeerProfile data = getProfile(peer); + if (data == null) return; + data.getTunnelCreateResponseTime().addData(responseTimeMs, responseTimeMs); + data.setLastHeardFrom(_context.clock().now()); + data.getTunnelHistory().incrementAgreedTo(); } /** @@ -100,10 +95,10 @@ public class ProfileManagerImpl extends ProfileManager { * */ public void tunnelRejected(Hash peer, long responseTimeMs) { - PeerProfile data = getProfile(peer); - if (data == null) return; - data.setLastHeardFrom(Clock.getInstance().now()); - data.getTunnelHistory().incrementRejected(); + PeerProfile data = getProfile(peer); + if (data == null) return; + data.setLastHeardFrom(_context.clock().now()); + data.getTunnelHistory().incrementRejected(); } /** @@ -112,10 +107,10 @@ public class ProfileManagerImpl extends ProfileManager { * */ public void tunnelFailed(Hash peer) { - PeerProfile data = getProfile(peer); - if (data == null) return; - data.setLastHeardFrom(Clock.getInstance().now()); - data.getTunnelHistory().incrementFailed(); + PeerProfile data = getProfile(peer); + if (data == null) return; + data.setLastHeardFrom(_context.clock().now()); + data.getTunnelHistory().incrementFailed(); } /** @@ -123,12 +118,12 @@ public class ProfileManagerImpl extends ProfileManager { * */ public void dbLookupSuccessful(Hash peer, long responseTimeMs) { - PeerProfile data = getProfile(peer); - if (data == null) return; - data.setLastHeardFrom(Clock.getInstance().now()); - data.getDbResponseTime().addData(responseTimeMs, responseTimeMs); - DBHistory hist = data.getDBHistory(); - hist.lookupSuccessful(); + PeerProfile data = getProfile(peer); + if (data == null) return; + data.setLastHeardFrom(_context.clock().now()); + data.getDbResponseTime().addData(responseTimeMs, responseTimeMs); + DBHistory hist = data.getDBHistory(); + hist.lookupSuccessful(); } /** @@ -137,10 +132,10 @@ public class ProfileManagerImpl extends ProfileManager { * */ public void dbLookupFailed(Hash peer) { - PeerProfile data = getProfile(peer); - if (data == null) return; - DBHistory hist = data.getDBHistory(); - hist.lookupFailed(); + PeerProfile data = getProfile(peer); + if (data == null) return; + DBHistory hist = data.getDBHistory(); + hist.lookupFailed(); } /** @@ -152,13 +147,13 @@ public class ProfileManagerImpl extends ProfileManager { * */ public void dbLookupReply(Hash peer, int newPeers, int oldPeers, int invalid, int duplicate, long responseTimeMs) { - PeerProfile data = getProfile(peer); - if (data == null) return; - data.setLastHeardFrom(Clock.getInstance().now()); - data.getDbResponseTime().addData(responseTimeMs, responseTimeMs); - data.getDbIntroduction().addData(newPeers, responseTimeMs); - DBHistory hist = data.getDBHistory(); - hist.lookupReply(newPeers, oldPeers, invalid, duplicate); + PeerProfile data = getProfile(peer); + if (data == null) return; + data.setLastHeardFrom(_context.clock().now()); + data.getDbResponseTime().addData(responseTimeMs, responseTimeMs); + data.getDbIntroduction().addData(newPeers, responseTimeMs); + DBHistory hist = data.getDBHistory(); + hist.lookupReply(newPeers, oldPeers, invalid, duplicate); } /** @@ -166,11 +161,11 @@ public class ProfileManagerImpl extends ProfileManager { * */ public void dbLookupReceived(Hash peer) { - PeerProfile data = getProfile(peer); - if (data == null) return; - data.setLastHeardFrom(Clock.getInstance().now()); - DBHistory hist = data.getDBHistory(); - hist.lookupReceived(); + PeerProfile data = getProfile(peer); + if (data == null) return; + data.setLastHeardFrom(_context.clock().now()); + DBHistory hist = data.getDBHistory(); + hist.lookupReceived(); } /** @@ -178,11 +173,11 @@ public class ProfileManagerImpl extends ProfileManager { * */ public void dbStoreReceived(Hash peer, boolean wasNewKey) { - PeerProfile data = getProfile(peer); - if (data == null) return; - data.setLastHeardFrom(Clock.getInstance().now()); - DBHistory hist = data.getDBHistory(); - hist.unpromptedStoreReceived(wasNewKey); + PeerProfile data = getProfile(peer); + if (data == null) return; + data.setLastHeardFrom(_context.clock().now()); + DBHistory hist = data.getDBHistory(); + hist.unpromptedStoreReceived(wasNewKey); } /** @@ -191,23 +186,23 @@ public class ProfileManagerImpl extends ProfileManager { * */ public void dbStoreSent(Hash peer, long responseTimeMs) { - PeerProfile data = getProfile(peer); - if (data == null) return; - long now = Clock.getInstance().now(); - data.setLastSendSuccessful(now); - data.setLastHeardFrom(now); - // we could do things like update some sort of "how many successful stores we've sent them"... - // naah.. dont really care now + PeerProfile data = getProfile(peer); + if (data == null) return; + long now = _context.clock().now(); + data.setLastSendSuccessful(now); + data.setLastHeardFrom(now); + // we could do things like update some sort of "how many successful stores we've sent them"... + // naah.. dont really care now } /** - * Note that we were unable to confirm a successful send of db data to + * Note that we were unable to confirm a successful send of db data to * the peer, at least not within our timeout period * */ public void dbStoreFailed(Hash peer) { - // we could do things like update some sort of "how many successful stores we've - // failed to send them"... + // we could do things like update some sort of "how many successful stores we've + // failed to send them"... } /** @@ -215,9 +210,9 @@ public class ProfileManagerImpl extends ProfileManager { * through an explicit dbStore or in a dbLookupReply */ public void heardAbout(Hash peer) { - PeerProfile data = getProfile(peer); - if (data == null) return; - data.setLastHeardAbout(Clock.getInstance().now()); + PeerProfile data = getProfile(peer); + if (data == null) return; + data.setLastHeardAbout(_context.clock().now()); } /** @@ -228,63 +223,63 @@ public class ProfileManagerImpl extends ProfileManager { * */ public void messageReceived(Hash peer, String style, long msToReceive, int bytesRead) { - PeerProfile data = getProfile(peer); - if (data == null) return; - data.setLastHeardFrom(Clock.getInstance().now()); - data.getReceiveSize().addData(bytesRead, msToReceive); + PeerProfile data = getProfile(peer); + if (data == null) return; + data.setLastHeardFrom(_context.clock().now()); + data.getReceiveSize().addData(bytesRead, msToReceive); } - - private PeerProfile getProfile(Hash peer) { - PeerProfile prof = ProfileOrganizer.getInstance().getProfile(peer); - if (prof == null) { - prof = new PeerProfile(peer); - ProfileOrganizer.getInstance().addProfile(prof); - } - return prof; + + private PeerProfile getProfile(Hash peer) { + PeerProfile prof = _context.profileOrganizer().getProfile(peer); + if (prof == null) { + prof = new PeerProfile(_context, peer); + _context.profileOrganizer().addProfile(prof); + } + return prof; } /** provide a simple summary of a number of peers, suitable for publication in the netDb */ public Properties summarizePeers(int numPeers) { - Set peers = new HashSet(numPeers); - // lets get the fastest ones we've got (this fails over to include just plain reliable, - // or even notFailing peers if there aren't enough fast ones) - ProfileOrganizer.getInstance().selectFastAndReliablePeers(numPeers, null, peers); - Properties props = new Properties(); - for (Iterator iter = peers.iterator(); iter.hasNext(); ) { - Hash peer = (Hash)iter.next(); - PeerProfile prof = getProfile(peer); - if (prof == null) continue; - - StringBuffer buf = new StringBuffer(64); - - buf.append("status: "); - if (ProfileOrganizer.getInstance().isFastAndReliable(peer)) { - buf.append("fastReliable"); - } else if (ProfileOrganizer.getInstance().isReliable(peer)) { - buf.append("reliable"); - } else if (ProfileOrganizer.getInstance().isFailing(peer)) { - buf.append("failing"); - } else { - buf.append("notFailing"); - } - - if (ProfileOrganizer.getInstance().isWellIntegrated(peer)) - buf.append("Integrated "); - else - buf.append(" "); - - buf.append("reliability: ").append(num(prof.getReliabilityValue())).append(" "); - buf.append("speed: ").append(num(prof.getSpeedValue())).append(" "); - buf.append("integration: ").append(num(prof.getIntegrationValue())); - - props.setProperty("profile." + peer.toBase64().replace('=', '_'), buf.toString()); - } - return props; + Set peers = new HashSet(numPeers); + // lets get the fastest ones we've got (this fails over to include just plain reliable, + // or even notFailing peers if there aren't enough fast ones) + _context.profileOrganizer().selectFastAndReliablePeers(numPeers, null, peers); + Properties props = new Properties(); + for (Iterator iter = peers.iterator(); iter.hasNext(); ) { + Hash peer = (Hash)iter.next(); + PeerProfile prof = getProfile(peer); + if (prof == null) continue; + + StringBuffer buf = new StringBuffer(64); + + buf.append("status: "); + if (_context.profileOrganizer().isFastAndReliable(peer)) { + buf.append("fastReliable"); + } else if (_context.profileOrganizer().isReliable(peer)) { + buf.append("reliable"); + } else if (_context.profileOrganizer().isFailing(peer)) { + buf.append("failing"); + } else { + buf.append("notFailing"); + } + + if (_context.profileOrganizer().isWellIntegrated(peer)) + buf.append("Integrated "); + else + buf.append(" "); + + buf.append("reliability: ").append(num(prof.getReliabilityValue())).append(" "); + buf.append("speed: ").append(num(prof.getSpeedValue())).append(" "); + buf.append("integration: ").append(num(prof.getIntegrationValue())); + + props.setProperty("profile." + peer.toBase64().replace('=', '_'), buf.toString()); + } + return props; } private final static DecimalFormat _fmt = new DecimalFormat("##0.00", new DecimalFormatSymbols(Locale.UK)); private final static String num(double val) { - synchronized (_fmt) { return _fmt.format(val); } + synchronized (_fmt) { return _fmt.format(val); } } } diff --git a/router/java/src/net/i2p/router/peermanager/ProfileOrganizer.java b/router/java/src/net/i2p/router/peermanager/ProfileOrganizer.java index 534d16a6e..aa5253000 100644 --- a/router/java/src/net/i2p/router/peermanager/ProfileOrganizer.java +++ b/router/java/src/net/i2p/router/peermanager/ProfileOrganizer.java @@ -22,19 +22,17 @@ import net.i2p.data.DataHelper; import net.i2p.data.Hash; import net.i2p.util.Clock; import net.i2p.util.Log; +import net.i2p.router.RouterContext; /** - * Keep the peer profiles organized according to the tiered model. This does not + * Keep the peer profiles organized according to the tiered model. This does not * actively update anything - the reorganize() method should be called periodically * to recalculate thresholds and move profiles into the appropriate tiers, and addProfile() * should be used to add new profiles (placing them into the appropriate groupings). */ public class ProfileOrganizer { - private final static Log _log = new Log(ProfileOrganizer.class); - private final static ProfileOrganizer _instance = new ProfileOrganizer(); - final static ProfileOrganizer getInstance() { return _instance; } - /** This data should not be exposed */ - public static final ProfileOrganizer _getInstance() { return _instance; } + private Log _log; + private RouterContext _context; /** H(routerIdentity) to PeerProfile for all peers that are fast and reliable */ private Map _fastAndReliablePeers; /** H(routerIdentity) to PeerProfile for all peers that are reliable */ @@ -47,6 +45,7 @@ public class ProfileOrganizer { private Map _failingPeers; /** who are we? */ private Hash _us; + private ProfilePersistenceHelper _persistenceHelper; /** PeerProfile objects for all peers profiled, orderd by most reliable first */ private Set _strictReliabilityOrder; @@ -64,41 +63,44 @@ public class ProfileOrganizer { /** incredibly weak PRNG, just used for shuffling peers. no need to waste the real PRNG on this */ private Random _random = new Random(); - private ProfileOrganizer() { - _fastAndReliablePeers = new HashMap(64); - _reliablePeers = new HashMap(512); - _wellIntegratedPeers = new HashMap(256); - _notFailingPeers = new HashMap(1024); - _failingPeers = new HashMap(4096); - _strictReliabilityOrder = new TreeSet(new InverseReliabilityComparator()); - _thresholdSpeedValue = 0.0d; - _thresholdReliabilityValue = 0.0d; - _thresholdIntegrationValue = 0.0d; + public ProfileOrganizer(RouterContext context) { + _context = context; + _log = context.logManager().getLog(ProfileOrganizer.class); + _fastAndReliablePeers = new HashMap(64); + _reliablePeers = new HashMap(512); + _wellIntegratedPeers = new HashMap(256); + _notFailingPeers = new HashMap(1024); + _failingPeers = new HashMap(4096); + _strictReliabilityOrder = new TreeSet(new InverseReliabilityComparator()); + _thresholdSpeedValue = 0.0d; + _thresholdReliabilityValue = 0.0d; + _thresholdIntegrationValue = 0.0d; + _persistenceHelper = new ProfilePersistenceHelper(_context); } /** * Order profiles by their reliability, but backwards (most reliable / highest value first). - * + * */ private static final class InverseReliabilityComparator implements Comparator { - private static final Comparator _comparator = new InverseReliabilityComparator(); - public int compare(Object lhs, Object rhs) { - if ( (lhs == null) || (rhs == null) || (!(lhs instanceof PeerProfile)) || (!(rhs instanceof PeerProfile)) ) - throw new ClassCastException("Only profiles can be compared - lhs = " + lhs + " rhs = " + rhs); - PeerProfile left = (PeerProfile)lhs; - PeerProfile right= (PeerProfile)rhs; - // note below that yes, we are treating left and right backwards. see: classname - int diff = (int)(right.getReliabilityValue() - left.getReliabilityValue()); - // we can't just return that, since the set would b0rk on equal values (just because two profiles - // rank the same way doesn't mean they're the same peer!) So if they reliabilities are equal, we - // order them by the peer's hash - if (diff != 0) - return diff; - if (left.getPeer().equals(right.getPeer())) - return 0; - else - return DataHelper.compareTo(right.getPeer().getData(), left.getPeer().getData()); - } + private static final Comparator _comparator = new InverseReliabilityComparator(); + public int compare(Object lhs, Object rhs) { + if ( (lhs == null) || (rhs == null) || (!(lhs instanceof PeerProfile)) || (!(rhs instanceof PeerProfile)) ) + throw new ClassCastException("Only profiles can be compared - lhs = " + lhs + " rhs = " + rhs); + PeerProfile left = (PeerProfile)lhs; + PeerProfile right= (PeerProfile)rhs; + // note below that yes, we are treating left and right backwards. see: classname + int diff = (int)(right.getReliabilityValue() - left.getReliabilityValue()); + // we can't just return that, since the set would b0rk on equal values (just because two profiles + // rank the same way doesn't mean they're the same peer!) So if they reliabilities are equal, we + // order them by the peer's hash + if (diff != 0) + return diff; + if (left.getPeer().equals(right.getPeer())) + return 0; + else + return DataHelper.compareTo(right.getPeer().getData(), left.getPeer().getData()); + } } public void setUs(Hash us) { _us = us; } @@ -108,28 +110,28 @@ public class ProfileOrganizer { * */ public PeerProfile getProfile(Hash peer) { - synchronized (_reorganizeLock) { - return locked_getProfile(peer); - } + synchronized (_reorganizeLock) { + return locked_getProfile(peer); + } } - + /** * Add the new profile, returning the old value (or null if no profile existed) * */ public PeerProfile addProfile(PeerProfile profile) { - if ( (profile == null) || (profile.getPeer() == null) || (_us.equals(profile.getPeer())) ) return null; - - if (_log.shouldLog(Log.DEBUG)) - _log.debug("New profile created for " + profile.getPeer().toBase64()); - - synchronized (_reorganizeLock) { - PeerProfile old = locked_getProfile(profile.getPeer()); - profile.coallesceStats(); - locked_placeProfile(profile); - _strictReliabilityOrder.add(profile); - return old; - } + if ( (profile == null) || (profile.getPeer() == null) || (_us.equals(profile.getPeer())) ) return null; + + if (_log.shouldLog(Log.DEBUG)) + _log.debug("New profile created for " + profile.getPeer().toBase64()); + + synchronized (_reorganizeLock) { + PeerProfile old = locked_getProfile(profile.getPeer()); + profile.coallesceStats(); + locked_placeProfile(profile); + _strictReliabilityOrder.add(profile); + return old; + } } public int countFastAndReliablePeers() { synchronized (_reorganizeLock) { return _fastAndReliablePeers.size(); } } @@ -155,12 +157,12 @@ public class ProfileOrganizer { * */ public void selectFastAndReliablePeers(int howMany, Set exclude, Set matches) { - synchronized (_reorganizeLock) { - locked_selectPeers(_fastAndReliablePeers, howMany, exclude, matches); - } - if (matches.size() < howMany) - selectReliablePeers(howMany, exclude, matches); - return; + synchronized (_reorganizeLock) { + locked_selectPeers(_fastAndReliablePeers, howMany, exclude, matches); + } + if (matches.size() < howMany) + selectReliablePeers(howMany, exclude, matches); + return; } /** @@ -168,34 +170,34 @@ public class ProfileOrganizer { * */ public void selectReliablePeers(int howMany, Set exclude, Set matches) { - synchronized (_reorganizeLock) { - locked_selectPeers(_reliablePeers, howMany, exclude, matches); - } - if (matches.size() < howMany) - selectNotFailingPeers(howMany, exclude, matches); - return; + synchronized (_reorganizeLock) { + locked_selectPeers(_reliablePeers, howMany, exclude, matches); + } + if (matches.size() < howMany) + selectNotFailingPeers(howMany, exclude, matches); + return; } /** * Return a set of Hashes for peers that are well integrated into the network. * */ public void selectWellIntegratedPeers(int howMany, Set exclude, Set matches) { - synchronized (_reorganizeLock) { - locked_selectPeers(_wellIntegratedPeers, howMany, exclude, matches); - } - if (matches.size() < howMany) - selectNotFailingPeers(howMany, exclude, matches); - return; - } + synchronized (_reorganizeLock) { + locked_selectPeers(_wellIntegratedPeers, howMany, exclude, matches); + } + if (matches.size() < howMany) + selectNotFailingPeers(howMany, exclude, matches); + return; + } /** * Return a set of Hashes for peers that are not failing, preferring ones that * we are already talking with * */ public void selectNotFailingPeers(int howMany, Set exclude, Set matches) { - if (matches.size() < howMany) - selectActiveNotFailingPeers(howMany, exclude, matches); - return; + if (matches.size() < howMany) + selectActiveNotFailingPeers(howMany, exclude, matches); + return; } /** * Return a set of Hashes for peers that are both not failing and we're actively @@ -203,67 +205,67 @@ public class ProfileOrganizer { * */ private void selectActiveNotFailingPeers(int howMany, Set exclude, Set matches) { - if (true) { - selectAllNotFailingPeers(howMany, exclude, matches); - return; - } - // pick out the not-failing peers that we're actively talking with - if (matches.size() < howMany) { - synchronized (_reorganizeLock) { - for (Iterator iter = _notFailingPeers.keySet().iterator(); iter.hasNext(); ) { - Hash peer = (Hash)iter.next(); - if ( (exclude != null) && exclude.contains(peer) ) continue; - if (matches.contains(peer)) continue; - PeerProfile prof = (PeerProfile)_notFailingPeers.get(peer); - if (prof.getIsActive()) - matches.add(peer); - if (matches.size() >= howMany) - return; - } - } - } - // ok, still not enough, pick out the not-failing peers that we aren't talking with - if (matches.size() < howMany) - selectAllNotFailingPeers(howMany, exclude, matches); - return; + if (true) { + selectAllNotFailingPeers(howMany, exclude, matches); + return; + } + // pick out the not-failing peers that we're actively talking with + if (matches.size() < howMany) { + synchronized (_reorganizeLock) { + for (Iterator iter = _notFailingPeers.keySet().iterator(); iter.hasNext(); ) { + Hash peer = (Hash)iter.next(); + if ( (exclude != null) && exclude.contains(peer) ) continue; + if (matches.contains(peer)) continue; + PeerProfile prof = (PeerProfile)_notFailingPeers.get(peer); + if (prof.getIsActive()) + matches.add(peer); + if (matches.size() >= howMany) + return; + } + } + } + // ok, still not enough, pick out the not-failing peers that we aren't talking with + if (matches.size() < howMany) + selectAllNotFailingPeers(howMany, exclude, matches); + return; } /** * Return a set of Hashes for peers that are not failing. * */ private void selectAllNotFailingPeers(int howMany, Set exclude, Set matches) { - if (matches.size() < howMany) { - int orig = matches.size(); - int needed = howMany - orig; - List selected = new ArrayList(needed); - synchronized (_reorganizeLock) { - for (Iterator iter = _strictReliabilityOrder.iterator(); selected.size() < needed && iter.hasNext(); ) { - PeerProfile prof = (PeerProfile)iter.next(); - if (matches.contains(prof.getPeer()) || - (exclude != null && exclude.contains(prof.getPeer())) || - _failingPeers.containsKey(prof.getPeer())) - continue; - else - selected.add(prof.getPeer()); - } - } - if (_log.shouldLog(Log.DEBUG)) - _log.debug("Selecting all not failing found " + (matches.size()-orig) + " new peers: " + selected); - matches.addAll(selected); - } - if (matches.size() < howMany) - selectFailingPeers(howMany, exclude, matches); - return; + if (matches.size() < howMany) { + int orig = matches.size(); + int needed = howMany - orig; + List selected = new ArrayList(needed); + synchronized (_reorganizeLock) { + for (Iterator iter = _strictReliabilityOrder.iterator(); selected.size() < needed && iter.hasNext(); ) { + PeerProfile prof = (PeerProfile)iter.next(); + if (matches.contains(prof.getPeer()) || + (exclude != null && exclude.contains(prof.getPeer())) || + _failingPeers.containsKey(prof.getPeer())) + continue; + else + selected.add(prof.getPeer()); + } + } + if (_log.shouldLog(Log.DEBUG)) + _log.debug("Selecting all not failing found " + (matches.size()-orig) + " new peers: " + selected); + matches.addAll(selected); + } + if (matches.size() < howMany) + selectFailingPeers(howMany, exclude, matches); + return; } /** * I'm not quite sure why you'd want this... (other than for failover from the better results) * */ public void selectFailingPeers(int howMany, Set exclude, Set matches) { - synchronized (_reorganizeLock) { - locked_selectPeers(_failingPeers, howMany, exclude, matches); - } - return; + synchronized (_reorganizeLock) { + locked_selectPeers(_failingPeers, howMany, exclude, matches); + } + return; } /** @@ -271,16 +273,16 @@ public class ProfileOrganizer { * */ public Set selectAllPeers() { - synchronized (_reorganizeLock) { - Set allPeers = new HashSet(_failingPeers.size() + _notFailingPeers.size() + _reliablePeers.size() + _fastAndReliablePeers.size()); - allPeers.addAll(_failingPeers.keySet()); - allPeers.addAll(_notFailingPeers.keySet()); - allPeers.addAll(_reliablePeers.keySet()); - allPeers.addAll(_fastAndReliablePeers.keySet()); - return allPeers; - } + synchronized (_reorganizeLock) { + Set allPeers = new HashSet(_failingPeers.size() + _notFailingPeers.size() + _reliablePeers.size() + _fastAndReliablePeers.size()); + allPeers.addAll(_failingPeers.keySet()); + allPeers.addAll(_notFailingPeers.keySet()); + allPeers.addAll(_reliablePeers.keySet()); + allPeers.addAll(_fastAndReliablePeers.keySet()); + return allPeers; + } } - + /** * Place peers into the correct tier, as well as expand/contract and even drop profiles * according to whatever limits are in place. Peer profiles are not coallesced during @@ -288,37 +290,37 @@ public class ProfileOrganizer { * */ public void reorganize() { - synchronized (_reorganizeLock) { - Set allPeers = new HashSet(_failingPeers.size() + _notFailingPeers.size() + _reliablePeers.size() + _fastAndReliablePeers.size()); - allPeers.addAll(_failingPeers.values()); - allPeers.addAll(_notFailingPeers.values()); - allPeers.addAll(_reliablePeers.values()); - allPeers.addAll(_fastAndReliablePeers.values()); - - _failingPeers.clear(); - _notFailingPeers.clear(); - _reliablePeers.clear(); - _fastAndReliablePeers.clear(); - - calculateThresholds(allPeers); - - for (Iterator iter = allPeers.iterator(); iter.hasNext(); ) { - PeerProfile profile = (PeerProfile)iter.next(); - locked_placeProfile(profile); - } - - Set reordered = new TreeSet(InverseReliabilityComparator._comparator); - reordered.addAll(_strictReliabilityOrder); - - _strictReliabilityOrder = reordered; - - locked_unfailAsNecessary(); - } - - if (_log.shouldLog(Log.DEBUG)) { - _log.debug("Profiles reorganized. averages: [integration: " + _thresholdIntegrationValue + ", reliability: " + _thresholdReliabilityValue + ", speed: " + _thresholdSpeedValue + "]"); - _log.debug("Strictly organized: " + _strictReliabilityOrder); - } + synchronized (_reorganizeLock) { + Set allPeers = new HashSet(_failingPeers.size() + _notFailingPeers.size() + _reliablePeers.size() + _fastAndReliablePeers.size()); + allPeers.addAll(_failingPeers.values()); + allPeers.addAll(_notFailingPeers.values()); + allPeers.addAll(_reliablePeers.values()); + allPeers.addAll(_fastAndReliablePeers.values()); + + _failingPeers.clear(); + _notFailingPeers.clear(); + _reliablePeers.clear(); + _fastAndReliablePeers.clear(); + + calculateThresholds(allPeers); + + for (Iterator iter = allPeers.iterator(); iter.hasNext(); ) { + PeerProfile profile = (PeerProfile)iter.next(); + locked_placeProfile(profile); + } + + Set reordered = new TreeSet(InverseReliabilityComparator._comparator); + reordered.addAll(_strictReliabilityOrder); + + _strictReliabilityOrder = reordered; + + locked_unfailAsNecessary(); + } + + if (_log.shouldLog(Log.DEBUG)) { + _log.debug("Profiles reorganized. averages: [integration: " + _thresholdIntegrationValue + ", reliability: " + _thresholdReliabilityValue + ", speed: " + _thresholdSpeedValue + "]"); + _log.debug("Strictly organized: " + _strictReliabilityOrder); + } } /** how many not failing/active peers must we have? */ @@ -330,35 +332,35 @@ public class ProfileOrganizer { * */ private void locked_unfailAsNecessary() { - int notFailingActive = 0; - for (Iterator iter = _notFailingPeers.keySet().iterator(); iter.hasNext(); ) { - Hash key = (Hash)iter.next(); - PeerProfile peer = (PeerProfile)_notFailingPeers.get(key); - if (peer.getIsActive()) - notFailingActive++; - if (notFailingActive >= MIN_NOT_FAILING_ACTIVE) { - // we've got enough, no need to try further - return; - } - } - - // we dont have enough, lets unfail our best ones remaining - int needToUnfail = MIN_NOT_FAILING_ACTIVE - notFailingActive; - if (needToUnfail > 0) { - int unfailed = 0; - for (Iterator iter = _strictReliabilityOrder.iterator(); iter.hasNext(); ) { - PeerProfile best = (PeerProfile)iter.next(); - if ( (best.getIsActive()) && (best.getIsFailing()) ) { - if (_log.shouldLog(Log.WARN)) - _log.warn("All peers were failing, so we have overridden the failing flag for one of the most reliable active peers (" + best.getPeer().toBase64() + ")"); - best.setIsFailing(false); - locked_placeProfile(best); - unfailed++; - } - if (unfailed >= needToUnfail) - break; - } - } + int notFailingActive = 0; + for (Iterator iter = _notFailingPeers.keySet().iterator(); iter.hasNext(); ) { + Hash key = (Hash)iter.next(); + PeerProfile peer = (PeerProfile)_notFailingPeers.get(key); + if (peer.getIsActive()) + notFailingActive++; + if (notFailingActive >= MIN_NOT_FAILING_ACTIVE) { + // we've got enough, no need to try further + return; + } + } + + // we dont have enough, lets unfail our best ones remaining + int needToUnfail = MIN_NOT_FAILING_ACTIVE - notFailingActive; + if (needToUnfail > 0) { + int unfailed = 0; + for (Iterator iter = _strictReliabilityOrder.iterator(); iter.hasNext(); ) { + PeerProfile best = (PeerProfile)iter.next(); + if ( (best.getIsActive()) && (best.getIsFailing()) ) { + if (_log.shouldLog(Log.WARN)) + _log.warn("All peers were failing, so we have overridden the failing flag for one of the most reliable active peers (" + best.getPeer().toBase64() + ")"); + best.setIsFailing(false); + locked_placeProfile(best); + unfailed++; + } + if (unfailed >= needToUnfail) + break; + } + } } //////// @@ -366,258 +368,258 @@ public class ProfileOrganizer { //////// /** - * Update the thresholds based on the profiles in this set. currently + * Update the thresholds based on the profiles in this set. currently * implements the thresholds based on a simple average (ignoring failing values), - * with integration and speed being directly equal to the simple average as + * with integration and speed being directly equal to the simple average as * calculated over all reliable and active non-failing peers, while the reliability threshold - * is half the simple average of active non-failing peers. Lots of room to tune this. + * is half the simple average of active non-failing peers. Lots of room to tune this. * should this instead be top 10%? top 90%? top 50? etc * */ private void calculateThresholds(Set allPeers) { - double totalReliability = 0; - int numActive = 0; - - for (Iterator iter = allPeers.iterator(); iter.hasNext(); ) { - PeerProfile profile = (PeerProfile)iter.next(); - - if (_us.equals(profile.getPeer())) continue; - - // only take into account peers that we're talking to within the last - // few minutes - if ( (!profile.getIsActive()) || (profile.getIsFailing()) ) - continue; - - numActive++; - - if (profile.getReliabilityValue() > 0) - totalReliability += profile.getReliabilityValue(); - } - _thresholdReliabilityValue = 0.5d * avg(totalReliability, numActive); - - // now derive the integration and speed thresholds based ONLY on the reliable - // and active peers - numActive = 0; - double totalIntegration = 0; - double totalSpeed = 0; - - for (Iterator iter = allPeers.iterator(); iter.hasNext(); ) { - PeerProfile profile = (PeerProfile)iter.next(); - - if (_us.equals(profile.getPeer())) continue; - - // only take into account peers that we're talking to within the last - // few minutes, who are reliable, AND who are not failing - if ( (!profile.getIsActive()) || (profile.getReliabilityValue() < _thresholdReliabilityValue) || (profile.getIsFailing()) ) - continue; - - numActive++; - - if (profile.getIntegrationValue() > 0) - totalIntegration += profile.getIntegrationValue(); - if (profile.getSpeedValue() > 0) - totalSpeed += profile.getSpeedValue(); - } - - - _thresholdIntegrationValue = 1.0d * avg(totalIntegration, numActive); - _thresholdSpeedValue = 1.0d * avg(totalSpeed, numActive); + double totalReliability = 0; + int numActive = 0; + + for (Iterator iter = allPeers.iterator(); iter.hasNext(); ) { + PeerProfile profile = (PeerProfile)iter.next(); + + if (_us.equals(profile.getPeer())) continue; + + // only take into account peers that we're talking to within the last + // few minutes + if ( (!profile.getIsActive()) || (profile.getIsFailing()) ) + continue; + + numActive++; + + if (profile.getReliabilityValue() > 0) + totalReliability += profile.getReliabilityValue(); + } + _thresholdReliabilityValue = 0.5d * avg(totalReliability, numActive); + + // now derive the integration and speed thresholds based ONLY on the reliable + // and active peers + numActive = 0; + double totalIntegration = 0; + double totalSpeed = 0; + + for (Iterator iter = allPeers.iterator(); iter.hasNext(); ) { + PeerProfile profile = (PeerProfile)iter.next(); + + if (_us.equals(profile.getPeer())) continue; + + // only take into account peers that we're talking to within the last + // few minutes, who are reliable, AND who are not failing + if ( (!profile.getIsActive()) || (profile.getReliabilityValue() < _thresholdReliabilityValue) || (profile.getIsFailing()) ) + continue; + + numActive++; + + if (profile.getIntegrationValue() > 0) + totalIntegration += profile.getIntegrationValue(); + if (profile.getSpeedValue() > 0) + totalSpeed += profile.getSpeedValue(); + } + + + _thresholdIntegrationValue = 1.0d * avg(totalIntegration, numActive); + _thresholdSpeedValue = 1.0d * avg(totalSpeed, numActive); } /** simple average, or 0 if NaN */ private final static double avg(double total, double quantity) { - if ( (total > 0) && (quantity > 0) ) - return total/quantity; - else - return 0.0d; + if ( (total > 0) && (quantity > 0) ) + return total/quantity; + else + return 0.0d; } /** called after locking the reorganizeLock */ private PeerProfile locked_getProfile(Hash peer) { - if (_notFailingPeers.containsKey(peer)) - return (PeerProfile)_notFailingPeers.get(peer); - else if (_failingPeers.containsKey(peer)) - return (PeerProfile)_failingPeers.get(peer); - else - return null; + if (_notFailingPeers.containsKey(peer)) + return (PeerProfile)_notFailingPeers.get(peer); + else if (_failingPeers.containsKey(peer)) + return (PeerProfile)_failingPeers.get(peer); + else + return null; } /** - * Select peers from the peer mapping, excluding appropriately and increasing the + * Select peers from the peer mapping, excluding appropriately and increasing the * matches set until it has howMany elements in it. * */ - private void locked_selectPeers(Map peers, int howMany, Set toExclude, Set matches) { - List all = new ArrayList(peers.keySet()); - if (toExclude != null) - all.removeAll(toExclude); - all.removeAll(matches); - all.remove(_us); - howMany -= matches.size(); - Collections.shuffle(all, _random); - Set rv = new HashSet(howMany); - for (int i = 0; i < howMany && i < all.size(); i++) { - rv.add(all.get(i)); - } - matches.addAll(rv); + private void locked_selectPeers(Map peers, int howMany, Set toExclude, Set matches) { + List all = new ArrayList(peers.keySet()); + if (toExclude != null) + all.removeAll(toExclude); + all.removeAll(matches); + all.remove(_us); + howMany -= matches.size(); + Collections.shuffle(all, _random); + Set rv = new HashSet(howMany); + for (int i = 0; i < howMany && i < all.size(); i++) { + rv.add(all.get(i)); + } + matches.addAll(rv); } - /** + /** * called after locking the reorganizeLock, place the profile in the appropriate tier. * This is where we implement the (betterThanAverage ? goToPierX : goToPierY) algorithms * */ private void locked_placeProfile(PeerProfile profile) { - if (profile.getIsFailing()) { - if (!shouldDrop(profile)) - _failingPeers.put(profile.getPeer(), profile); - _fastAndReliablePeers.remove(profile.getPeer()); - _reliablePeers.remove(profile.getPeer()); - _wellIntegratedPeers.remove(profile.getPeer()); - _notFailingPeers.remove(profile.getPeer()); - } else { - _failingPeers.remove(profile.getPeer()); - _fastAndReliablePeers.remove(profile.getPeer()); - _reliablePeers.remove(profile.getPeer()); - _wellIntegratedPeers.remove(profile.getPeer()); - - _notFailingPeers.put(profile.getPeer(), profile); - if (_thresholdReliabilityValue <= profile.getReliabilityValue()) { - _reliablePeers.put(profile.getPeer(), profile); - if (_log.shouldLog(Log.DEBUG)) - _log.debug("Reliable: \t" + profile.getPeer().toBase64()); - if (_thresholdSpeedValue <= profile.getSpeedValue()) { - _fastAndReliablePeers.put(profile.getPeer(), profile); - if (_log.shouldLog(Log.DEBUG)) - _log.debug("Fast: \t" + profile.getPeer().toBase64()); - } - - if (_thresholdIntegrationValue <= profile.getIntegrationValue()) { - _wellIntegratedPeers.put(profile.getPeer(), profile); - if (_log.shouldLog(Log.DEBUG)) - _log.debug("Integrated: \t" + profile.getPeer().toBase64()); - } - } else { - // not reliable, but not failing (yet) - } - } + if (profile.getIsFailing()) { + if (!shouldDrop(profile)) + _failingPeers.put(profile.getPeer(), profile); + _fastAndReliablePeers.remove(profile.getPeer()); + _reliablePeers.remove(profile.getPeer()); + _wellIntegratedPeers.remove(profile.getPeer()); + _notFailingPeers.remove(profile.getPeer()); + } else { + _failingPeers.remove(profile.getPeer()); + _fastAndReliablePeers.remove(profile.getPeer()); + _reliablePeers.remove(profile.getPeer()); + _wellIntegratedPeers.remove(profile.getPeer()); + + _notFailingPeers.put(profile.getPeer(), profile); + if (_thresholdReliabilityValue <= profile.getReliabilityValue()) { + _reliablePeers.put(profile.getPeer(), profile); + if (_log.shouldLog(Log.DEBUG)) + _log.debug("Reliable: \t" + profile.getPeer().toBase64()); + if (_thresholdSpeedValue <= profile.getSpeedValue()) { + _fastAndReliablePeers.put(profile.getPeer(), profile); + if (_log.shouldLog(Log.DEBUG)) + _log.debug("Fast: \t" + profile.getPeer().toBase64()); + } + + if (_thresholdIntegrationValue <= profile.getIntegrationValue()) { + _wellIntegratedPeers.put(profile.getPeer(), profile); + if (_log.shouldLog(Log.DEBUG)) + _log.debug("Integrated: \t" + profile.getPeer().toBase64()); + } + } else { + // not reliable, but not failing (yet) + } + } } - /** + /** * This is where we determine whether a failing peer is so poor and we're so overloaded * that we just want to forget they exist. This algorithm won't need to be implemented until - * after I2P 1.0, most likely, since we should be able to handle thousands of peers profiled + * after I2P 1.0, most likely, since we should be able to handle thousands of peers profiled * without ejecting any of them, but anyway, this is how we'd do it. Most likely. * */ private boolean shouldDrop(PeerProfile profile) { return false; } public void exportProfile(Hash profile, OutputStream out) throws IOException { - PeerProfile prof = getProfile(profile); - if (prof != null) - ProfilePersistenceHelper.getInstance().writeProfile(prof, out); + PeerProfile prof = getProfile(profile); + if (prof != null) + _persistenceHelper.writeProfile(prof, out); } public String renderStatusHTML() { - Set peers = selectAllPeers(); - - long hideBefore = Clock.getInstance().now() - 6*60*60*1000; - - TreeMap order = new TreeMap(); - for (Iterator iter = peers.iterator(); iter.hasNext();) { - Hash peer = (Hash)iter.next(); - if (_us.equals(peer)) continue; - PeerProfile prof = getProfile(peer); - if (prof.getLastSendSuccessful() <= hideBefore) continue; - order.put(peer.toBase64(), prof); - } - - int fast = 0; - int reliable = 0; - int integrated = 0; - int failing = 0; - StringBuffer buf = new StringBuffer(8*1024); - buf.append("

    Peer Profiles

    \n"); - buf.append(""); - buf.append(""); - buf.append(""); - buf.append(""); - buf.append(""); - buf.append(""); - buf.append(""); - buf.append(""); - buf.append(""); - buf.append(""); - for (Iterator iter = order.keySet().iterator(); iter.hasNext();) { - String name = (String)iter.next(); - PeerProfile prof = (PeerProfile)order.get(name); - Hash peer = prof.getPeer(); - - buf.append(""); - buf.append(""); - buf.append(""); - buf.append(""); - buf.append(""); - buf.append(""); - buf.append(""); - buf.append(""); - } - buf.append("
    Peer (").append(order.size()).append(", hiding ").append(peers.size()-order.size()).append(" inactive ones)GroupsSpeedReliabilityIntegrationFailing?Profile data
    "); - if (prof.getIsFailing()) { - buf.append("--").append(peer.toBase64()).append(""); - } else { - if (prof.getIsActive()) { - buf.append("++").append(peer.toBase64()).append(""); - } else { - buf.append("__").append(peer.toBase64()); - } - } - buf.append(""); - int tier = 0; - boolean isIntegrated = false; - synchronized (_reorganizeLock) { - if (_fastAndReliablePeers.containsKey(peer)) { - tier = 1; - fast++; - reliable++; - } else if (_reliablePeers.containsKey(peer)) { - tier = 2; - reliable++; - } else if (_notFailingPeers.containsKey(peer)) { - tier = 3; - } else { - failing++; - } - - if (_wellIntegratedPeers.containsKey(peer)) { - isIntegrated = true; - integrated++; - } - } - - switch (tier) { - case 1: buf.append("Fast+Reliable"); break; - case 2: buf.append("Reliable"); break; - case 3: buf.append("Not Failing"); break; - default: buf.append("Failing"); break; - } - if (isIntegrated) buf.append(", Well integrated"); - - buf.append("").append(num(prof.getSpeedValue())).append("").append(num(prof.getReliabilityValue())).append("").append(num(prof.getIntegrationValue())).append("").append(prof.getIsFailing()).append("profile.txt "); - buf.append(" netDb
    "); - buf.append("Note that the speed, reliability, and integration values are relative"); - buf.append(" - they do NOT correspond with any particular throughput, latency, uptime, "); - buf.append("or other metric. Higher numbers are better. "); - buf.append("Red peers prefixed with '--' means the peer is failing, and blue peers prefixed "); - buf.append("with '++' means we've sent or received a message from them "); - buf.append("in the last five minutes
    "); - buf.append("Thresholds:
    "); - buf.append("Speed: ").append(num(_thresholdSpeedValue)).append(" (").append(fast).append(" fast peers)
    "); - buf.append("Reliability: ").append(num(_thresholdReliabilityValue)).append(" (").append(reliable).append(" reliable peers)
    "); - buf.append("Integration: ").append(num(_thresholdIntegrationValue)).append(" (").append(integrated).append(" well integrated peers)
    "); - return buf.toString(); + Set peers = selectAllPeers(); + + long hideBefore = _context.clock().now() - 6*60*60*1000; + + TreeMap order = new TreeMap(); + for (Iterator iter = peers.iterator(); iter.hasNext();) { + Hash peer = (Hash)iter.next(); + if (_us.equals(peer)) continue; + PeerProfile prof = getProfile(peer); + if (prof.getLastSendSuccessful() <= hideBefore) continue; + order.put(peer.toBase64(), prof); + } + + int fast = 0; + int reliable = 0; + int integrated = 0; + int failing = 0; + StringBuffer buf = new StringBuffer(8*1024); + buf.append("

    Peer Profiles

    \n"); + buf.append(""); + buf.append(""); + buf.append(""); + buf.append(""); + buf.append(""); + buf.append(""); + buf.append(""); + buf.append(""); + buf.append(""); + buf.append(""); + for (Iterator iter = order.keySet().iterator(); iter.hasNext();) { + String name = (String)iter.next(); + PeerProfile prof = (PeerProfile)order.get(name); + Hash peer = prof.getPeer(); + + buf.append(""); + buf.append(""); + buf.append(""); + buf.append(""); + buf.append(""); + buf.append(""); + buf.append(""); + buf.append(""); + } + buf.append("
    Peer (").append(order.size()).append(", hiding ").append(peers.size()-order.size()).append(" inactive ones)GroupsSpeedReliabilityIntegrationFailing?Profile data
    "); + if (prof.getIsFailing()) { + buf.append("--").append(peer.toBase64()).append(""); + } else { + if (prof.getIsActive()) { + buf.append("++").append(peer.toBase64()).append(""); + } else { + buf.append("__").append(peer.toBase64()); + } + } + buf.append(""); + int tier = 0; + boolean isIntegrated = false; + synchronized (_reorganizeLock) { + if (_fastAndReliablePeers.containsKey(peer)) { + tier = 1; + fast++; + reliable++; + } else if (_reliablePeers.containsKey(peer)) { + tier = 2; + reliable++; + } else if (_notFailingPeers.containsKey(peer)) { + tier = 3; + } else { + failing++; + } + + if (_wellIntegratedPeers.containsKey(peer)) { + isIntegrated = true; + integrated++; + } + } + + switch (tier) { + case 1: buf.append("Fast+Reliable"); break; + case 2: buf.append("Reliable"); break; + case 3: buf.append("Not Failing"); break; + default: buf.append("Failing"); break; + } + if (isIntegrated) buf.append(", Well integrated"); + + buf.append("").append(num(prof.getSpeedValue())).append("").append(num(prof.getReliabilityValue())).append("").append(num(prof.getIntegrationValue())).append("").append(prof.getIsFailing()).append("profile.txt "); + buf.append(" netDb
    "); + buf.append("Note that the speed, reliability, and integration values are relative"); + buf.append(" - they do NOT correspond with any particular throughput, latency, uptime, "); + buf.append("or other metric. Higher numbers are better. "); + buf.append("Red peers prefixed with '--' means the peer is failing, and blue peers prefixed "); + buf.append("with '++' means we've sent or received a message from them "); + buf.append("in the last five minutes
    "); + buf.append("Thresholds:
    "); + buf.append("Speed: ").append(num(_thresholdSpeedValue)).append(" (").append(fast).append(" fast peers)
    "); + buf.append("Reliability: ").append(num(_thresholdReliabilityValue)).append(" (").append(reliable).append(" reliable peers)
    "); + buf.append("Integration: ").append(num(_thresholdIntegrationValue)).append(" (").append(integrated).append(" well integrated peers)
    "); + return buf.toString(); } private final static DecimalFormat _fmt = new DecimalFormat("###,##0.00", new DecimalFormatSymbols(Locale.UK)); private final static String num(double num) { synchronized (_fmt) { return _fmt.format(num); } } -} +} diff --git a/router/java/src/net/i2p/router/peermanager/ProfilePersistenceHelper.java b/router/java/src/net/i2p/router/peermanager/ProfilePersistenceHelper.java index c1e93988c..21170d8cc 100644 --- a/router/java/src/net/i2p/router/peermanager/ProfilePersistenceHelper.java +++ b/router/java/src/net/i2p/router/peermanager/ProfilePersistenceHelper.java @@ -19,11 +19,11 @@ import net.i2p.data.Hash; import net.i2p.router.Router; import net.i2p.util.Clock; import net.i2p.util.Log; +import net.i2p.router.RouterContext; class ProfilePersistenceHelper { - private final static Log _log = new Log(ProfilePersistenceHelper.class); - private final static ProfilePersistenceHelper _instance = new ProfilePersistenceHelper(); - public final static ProfilePersistenceHelper getInstance() { return _instance; } + private Log _log; + private RouterContext _context; public final static String PROP_PEER_PROFILE_DIR = "router.profileDir"; public final static String DEFAULT_PEER_PROFILE_DIR = "peerProfiles"; @@ -32,245 +32,247 @@ class ProfilePersistenceHelper { private File _profileDir = null; private Hash _us; - private ProfilePersistenceHelper() { - File profileDir = getProfileDir(); - _us = null; - if (!profileDir.exists()) { - profileDir.mkdirs(); - _log.info("Profile directory " + profileDir.getAbsolutePath() + " created"); - } + public ProfilePersistenceHelper(RouterContext ctx) { + _context = ctx; + _log = ctx.logManager().getLog(ProfilePersistenceHelper.class); + File profileDir = getProfileDir(); + _us = null; + if (!profileDir.exists()) { + profileDir.mkdirs(); + _log.info("Profile directory " + profileDir.getAbsolutePath() + " created"); + } } public void setUs(Hash routerIdentHash) { _us = routerIdentHash; } /** write out the data from the profile to the stream */ public void writeProfile(PeerProfile profile) { - File f = pickFile(profile); - long before = Clock.getInstance().now(); - OutputStream fos = null; - try { - fos = new BufferedOutputStream(new FileOutputStream(f)); - writeProfile(profile, fos); - } catch (IOException ioe) { - _log.error("Error writing profile to " + f); - } finally { - if (fos != null) try { fos.close(); } catch (IOException ioe) {} - } - long delay = Clock.getInstance().now() - before; - if (_log.shouldLog(Log.DEBUG)) - _log.debug("Writing the profile to " + f.getName() + " took " + delay + "ms"); + File f = pickFile(profile); + long before = _context.clock().now(); + OutputStream fos = null; + try { + fos = new BufferedOutputStream(new FileOutputStream(f)); + writeProfile(profile, fos); + } catch (IOException ioe) { + _log.error("Error writing profile to " + f); + } finally { + if (fos != null) try { fos.close(); } catch (IOException ioe) {} + } + long delay = _context.clock().now() - before; + if (_log.shouldLog(Log.DEBUG)) + _log.debug("Writing the profile to " + f.getName() + " took " + delay + "ms"); } /** write out the data from the profile to the stream */ public void writeProfile(PeerProfile profile, OutputStream out) throws IOException { - String groups = null; - if (ProfileOrganizer.getInstance().isFailing(profile.getPeer())) { - groups = "failing"; - } else if (!ProfileOrganizer.getInstance().isReliable(profile.getPeer())) { - groups = "not failing"; - } else { - if (ProfileOrganizer.getInstance().isFastAndReliable(profile.getPeer())) - groups = "fast and reliable"; - else - groups = "reliable"; - - if (ProfileOrganizer.getInstance().isWellIntegrated(profile.getPeer())) - groups = groups + ", well integrated"; - } - - StringBuffer buf = new StringBuffer(512); - buf.append("########################################################################").append(NL); - buf.append("# profile for ").append(profile.getPeer().toBase64()).append(NL); - if (_us != null) - buf.append("# as calculated by ").append(_us.toBase64()).append(NL); - buf.append("#").append(NL); - buf.append("# reliability: ").append(profile.getReliabilityValue()).append(NL); - buf.append("# integration: ").append(profile.getIntegrationValue()).append(NL); - buf.append("# speedValue: ").append(profile.getSpeedValue()).append(NL); - buf.append("#").append(NL); - buf.append("# Groups: ").append(groups).append(NL); - buf.append("########################################################################").append(NL); - buf.append("##").append(NL); - buf.append("# Reliability bonus: used to affect the reliability score after all other calculations are done").append(NL); - buf.append("reliabilityBonus=").append(profile.getReliabilityBonus()).append(NL); - buf.append("# Integration bonus: used to affect the integration score after all other calculations are done").append(NL); - buf.append("integrationBonus=").append(profile.getIntegrationBonus()).append(NL); - buf.append("# Speed bonus: used to affect the speed score after all other calculations are done").append(NL); - buf.append("speedBonus=").append(profile.getSpeedBonus()).append(NL); - buf.append(NL).append(NL); - buf.append("# Last heard about: when did we last get a reference to this peer? (milliseconds since the epoch)").append(NL); - buf.append("lastHeardAbout=").append(profile.getLastHeardAbout()).append(NL); - buf.append("# First heard about: when did we first get a reference to this peer? (milliseconds since the epoch)").append(NL); - buf.append("firstHeardAbout=").append(profile.getFirstHeardAbout()).append(NL); - buf.append("# Last sent to successfully: when did we last send the peer a message successfully? (milliseconds from the epoch)").append(NL); - buf.append("lastSentToSuccessfully=").append(profile.getLastSendSuccessful()).append(NL); - buf.append("# Last failed send: when did we last fail to send a message to the peer? (milliseconds from the epoch)").append(NL); - buf.append("lastFailedSend=").append(profile.getLastSendFailed()).append(NL); - buf.append("# Last heard from: when did we last get a message from the peer? (milliseconds from the epoch)").append(NL); - buf.append("lastHeardFrom=").append(profile.getLastHeardFrom()).append(NL); - buf.append(NL); - - out.write(buf.toString().getBytes()); - - profile.getTunnelHistory().store(out); - profile.getDBHistory().store(out); - - if (profile.getIsExpanded()) { - // only write out expanded data if, uh, we've got it - profile.getCommError().store(out, "commError"); - profile.getDbIntroduction().store(out, "dbIntroduction"); - profile.getDbResponseTime().store(out, "dbResponseTime"); - profile.getReceiveSize().store(out, "receiveSize"); - profile.getSendFailureSize().store(out, "sendFailureSize"); - profile.getSendSuccessSize().store(out, "tunnelCreateResponseTime"); - } + String groups = null; + if (_context.profileOrganizer().isFailing(profile.getPeer())) { + groups = "failing"; + } else if (!_context.profileOrganizer().isReliable(profile.getPeer())) { + groups = "not failing"; + } else { + if (_context.profileOrganizer().isFastAndReliable(profile.getPeer())) + groups = "fast and reliable"; + else + groups = "reliable"; + + if (_context.profileOrganizer().isWellIntegrated(profile.getPeer())) + groups = groups + ", well integrated"; + } + + StringBuffer buf = new StringBuffer(512); + buf.append("########################################################################").append(NL); + buf.append("# profile for ").append(profile.getPeer().toBase64()).append(NL); + if (_us != null) + buf.append("# as calculated by ").append(_us.toBase64()).append(NL); + buf.append("#").append(NL); + buf.append("# reliability: ").append(profile.getReliabilityValue()).append(NL); + buf.append("# integration: ").append(profile.getIntegrationValue()).append(NL); + buf.append("# speedValue: ").append(profile.getSpeedValue()).append(NL); + buf.append("#").append(NL); + buf.append("# Groups: ").append(groups).append(NL); + buf.append("########################################################################").append(NL); + buf.append("##").append(NL); + buf.append("# Reliability bonus: used to affect the reliability score after all other calculations are done").append(NL); + buf.append("reliabilityBonus=").append(profile.getReliabilityBonus()).append(NL); + buf.append("# Integration bonus: used to affect the integration score after all other calculations are done").append(NL); + buf.append("integrationBonus=").append(profile.getIntegrationBonus()).append(NL); + buf.append("# Speed bonus: used to affect the speed score after all other calculations are done").append(NL); + buf.append("speedBonus=").append(profile.getSpeedBonus()).append(NL); + buf.append(NL).append(NL); + buf.append("# Last heard about: when did we last get a reference to this peer? (milliseconds since the epoch)").append(NL); + buf.append("lastHeardAbout=").append(profile.getLastHeardAbout()).append(NL); + buf.append("# First heard about: when did we first get a reference to this peer? (milliseconds since the epoch)").append(NL); + buf.append("firstHeardAbout=").append(profile.getFirstHeardAbout()).append(NL); + buf.append("# Last sent to successfully: when did we last send the peer a message successfully? (milliseconds from the epoch)").append(NL); + buf.append("lastSentToSuccessfully=").append(profile.getLastSendSuccessful()).append(NL); + buf.append("# Last failed send: when did we last fail to send a message to the peer? (milliseconds from the epoch)").append(NL); + buf.append("lastFailedSend=").append(profile.getLastSendFailed()).append(NL); + buf.append("# Last heard from: when did we last get a message from the peer? (milliseconds from the epoch)").append(NL); + buf.append("lastHeardFrom=").append(profile.getLastHeardFrom()).append(NL); + buf.append(NL); + + out.write(buf.toString().getBytes()); + + profile.getTunnelHistory().store(out); + profile.getDBHistory().store(out); + + if (profile.getIsExpanded()) { + // only write out expanded data if, uh, we've got it + profile.getCommError().store(out, "commError"); + profile.getDbIntroduction().store(out, "dbIntroduction"); + profile.getDbResponseTime().store(out, "dbResponseTime"); + profile.getReceiveSize().store(out, "receiveSize"); + profile.getSendFailureSize().store(out, "sendFailureSize"); + profile.getSendSuccessSize().store(out, "tunnelCreateResponseTime"); + } } public Set readProfiles() { - long start = Clock.getInstance().now(); - Set files = selectFiles(); - Set profiles = new HashSet(files.size()); - for (Iterator iter = files.iterator(); iter.hasNext();) { - File f = (File)iter.next(); - PeerProfile profile = readProfile(f); - if (profile != null) - profiles.add(profile); - } - long duration = Clock.getInstance().now() - start; - if (_log.shouldLog(Log.DEBUG)) - _log.debug("Loading " + profiles.size() + " took " + duration + "ms"); - return profiles; + long start = _context.clock().now(); + Set files = selectFiles(); + Set profiles = new HashSet(files.size()); + for (Iterator iter = files.iterator(); iter.hasNext();) { + File f = (File)iter.next(); + PeerProfile profile = readProfile(f); + if (profile != null) + profiles.add(profile); + } + long duration = _context.clock().now() - start; + if (_log.shouldLog(Log.DEBUG)) + _log.debug("Loading " + profiles.size() + " took " + duration + "ms"); + return profiles; } private Set selectFiles() { - File files[] = getProfileDir().listFiles(new FilenameFilter() { - public boolean accept(File dir, String filename) { - return (filename.startsWith("profile-") && filename.endsWith(".dat")); - } - }); - Set rv = new HashSet(files.length); - for (int i = 0; i < files.length; i++) - rv.add(files[i]); - return rv; + File files[] = getProfileDir().listFiles(new FilenameFilter() { + public boolean accept(File dir, String filename) { + return (filename.startsWith("profile-") && filename.endsWith(".dat")); + } + }); + Set rv = new HashSet(files.length); + for (int i = 0; i < files.length; i++) + rv.add(files[i]); + return rv; } private PeerProfile readProfile(File file) { - Hash peer = getHash(file.getName()); - try { - if (peer == null) return null; - PeerProfile profile = new PeerProfile(peer); - Properties props = new Properties(); - - loadProps(props, file); - - profile.setReliabilityBonus(getLong(props, "reliabilityBonus")); - profile.setIntegrationBonus(getLong(props, "integrationBonus")); - profile.setSpeedBonus(getLong(props, "speedBonus")); - - profile.setLastHeardAbout(getLong(props, "lastHeardAbout")); - profile.setFirstHeardAbout(getLong(props, "firstHeardAbout")); - profile.setLastSendSuccessful(getLong(props, "lastSentToSuccessfully")); - profile.setLastSendFailed(getLong(props, "lastFailedSend")); - profile.setLastHeardFrom(getLong(props, "lastHeardFrom")); - - profile.getTunnelHistory().load(props); - profile.getDBHistory().load(props); - - profile.getCommError().load(props, "commError", true); - profile.getDbIntroduction().load(props, "dbIntroduction", true); - profile.getDbResponseTime().load(props, "dbResponseTime", true); - profile.getReceiveSize().load(props, "receiveSize", true); - profile.getSendFailureSize().load(props, "sendFailureSize", true); - profile.getSendSuccessSize().load(props, "tunnelCreateResponseTime", true); - - if (_log.shouldLog(Log.DEBUG)) - _log.debug("Loaded the profile for " + peer.toBase64() + " from " + file.getName()); - - return profile; - } catch (IllegalArgumentException iae) { - _log.error("Error loading profile from " +file.getName(), iae); - file.delete(); - return null; - } + Hash peer = getHash(file.getName()); + try { + if (peer == null) return null; + PeerProfile profile = new PeerProfile(_context, peer); + Properties props = new Properties(); + + loadProps(props, file); + + profile.setReliabilityBonus(getLong(props, "reliabilityBonus")); + profile.setIntegrationBonus(getLong(props, "integrationBonus")); + profile.setSpeedBonus(getLong(props, "speedBonus")); + + profile.setLastHeardAbout(getLong(props, "lastHeardAbout")); + profile.setFirstHeardAbout(getLong(props, "firstHeardAbout")); + profile.setLastSendSuccessful(getLong(props, "lastSentToSuccessfully")); + profile.setLastSendFailed(getLong(props, "lastFailedSend")); + profile.setLastHeardFrom(getLong(props, "lastHeardFrom")); + + profile.getTunnelHistory().load(props); + profile.getDBHistory().load(props); + + profile.getCommError().load(props, "commError", true); + profile.getDbIntroduction().load(props, "dbIntroduction", true); + profile.getDbResponseTime().load(props, "dbResponseTime", true); + profile.getReceiveSize().load(props, "receiveSize", true); + profile.getSendFailureSize().load(props, "sendFailureSize", true); + profile.getSendSuccessSize().load(props, "tunnelCreateResponseTime", true); + + if (_log.shouldLog(Log.DEBUG)) + _log.debug("Loaded the profile for " + peer.toBase64() + " from " + file.getName()); + + return profile; + } catch (IllegalArgumentException iae) { + _log.error("Error loading profile from " +file.getName(), iae); + file.delete(); + return null; + } } private final static long getLong(Properties props, String key) { - String val = props.getProperty(key); - if (val != null) { - try { - return Long.parseLong(val); - } catch (NumberFormatException nfe) { - return 0; - } - } - return 0; + String val = props.getProperty(key); + if (val != null) { + try { + return Long.parseLong(val); + } catch (NumberFormatException nfe) { + return 0; + } + } + return 0; } private void loadProps(Properties props, File file) { - BufferedReader in = null; - try { - in = new BufferedReader(new InputStreamReader(new FileInputStream(file)), 16*1024); - String line = null; - while ( (line = in.readLine()) != null) { - if (line.trim().length() <= 0) continue; - if (line.charAt(0) == '#') continue; - int split = line.indexOf('='); - if (split <= 0) continue; - String key = line.substring(0, split); - String val = line.substring(split+1); - if ( (key.length() > 0) && (val.length() > 0) ) - props.setProperty(key, val); - } - } catch (IOException ioe) { - _log.error("Error loading properties from " + file.getName(), ioe); - } finally { - if (in != null) try { in.close(); } catch (IOException ioe) {} - } - + BufferedReader in = null; + try { + in = new BufferedReader(new InputStreamReader(new FileInputStream(file)), 16*1024); + String line = null; + while ( (line = in.readLine()) != null) { + if (line.trim().length() <= 0) continue; + if (line.charAt(0) == '#') continue; + int split = line.indexOf('='); + if (split <= 0) continue; + String key = line.substring(0, split); + String val = line.substring(split+1); + if ( (key.length() > 0) && (val.length() > 0) ) + props.setProperty(key, val); + } + } catch (IOException ioe) { + _log.error("Error loading properties from " + file.getName(), ioe); + } finally { + if (in != null) try { in.close(); } catch (IOException ioe) {} + } + } private Hash getHash(String name) { - String key = name.substring("profile-".length()); - key = key.substring(0, key.length() - ".dat".length()); - Hash h = new Hash(); - try { - h.fromBase64(key); - return h; - } catch (DataFormatException dfe) { - return null; - } + String key = name.substring("profile-".length()); + key = key.substring(0, key.length() - ".dat".length()); + Hash h = new Hash(); + try { + h.fromBase64(key); + return h; + } catch (DataFormatException dfe) { + return null; + } } private File pickFile(PeerProfile profile) { - return new File(getProfileDir(), "profile-" + profile.getPeer().toBase64() + ".dat"); + return new File(getProfileDir(), "profile-" + profile.getPeer().toBase64() + ".dat"); } private File getProfileDir() { - if (_profileDir == null) { - String dir = Router.getInstance().getConfigSetting(PROP_PEER_PROFILE_DIR); - if (dir == null) { - _log.info("No peer profile dir specified [" + PROP_PEER_PROFILE_DIR + "], using [" + DEFAULT_PEER_PROFILE_DIR + "]"); - dir = DEFAULT_PEER_PROFILE_DIR; - } - _profileDir = new File(dir); - } - return _profileDir; + if (_profileDir == null) { + String dir = _context.router().getConfigSetting(PROP_PEER_PROFILE_DIR); + if (dir == null) { + _log.info("No peer profile dir specified [" + PROP_PEER_PROFILE_DIR + "], using [" + DEFAULT_PEER_PROFILE_DIR + "]"); + dir = DEFAULT_PEER_PROFILE_DIR; + } + _profileDir = new File(dir); + } + return _profileDir; } /** generate 1000 profiles */ public static void main(String args[]) { - System.out.println("Generating 1000 profiles"); - File dir = new File("profiles"); - dir.mkdirs(); - byte data[] = new byte[32]; - java.util.Random rnd = new java.util.Random(); - for (int i = 0; i < 1000; i++) { - rnd.nextBytes(data); - Hash peer = new Hash(data); - try { - File f = new File(dir, "profile-" + peer.toBase64() + ".dat"); - f.createNewFile(); - System.out.println("Created " + peer.toBase64()); - } catch (IOException ioe) {} - } - System.out.println("1000 peers created in " + dir.getAbsolutePath()); + System.out.println("Generating 1000 profiles"); + File dir = new File("profiles"); + dir.mkdirs(); + byte data[] = new byte[32]; + java.util.Random rnd = new java.util.Random(); + for (int i = 0; i < 1000; i++) { + rnd.nextBytes(data); + Hash peer = new Hash(data); + try { + File f = new File(dir, "profile-" + peer.toBase64() + ".dat"); + f.createNewFile(); + System.out.println("Created " + peer.toBase64()); + } catch (IOException ioe) {} + } + System.out.println("1000 peers created in " + dir.getAbsolutePath()); } } diff --git a/router/java/src/net/i2p/router/peermanager/ReliabilityCalculator.java b/router/java/src/net/i2p/router/peermanager/ReliabilityCalculator.java index 3fb7aa0af..bf1eb8ebe 100644 --- a/router/java/src/net/i2p/router/peermanager/ReliabilityCalculator.java +++ b/router/java/src/net/i2p/router/peermanager/ReliabilityCalculator.java @@ -2,70 +2,77 @@ package net.i2p.router.peermanager; import net.i2p.util.Clock; import net.i2p.util.Log; +import net.i2p.router.RouterContext; /** * Determine how reliable the peer is - how likely they'll be able to respond or * otherwise carry out whatever we ask them to (or even merely be reachable) * */ -class ReliabilityCalculator extends Calculator { - private final static Log _log = new Log(ReliabilityCalculator.class); +public class ReliabilityCalculator extends Calculator { + private Log _log; + private RouterContext _context; + + public ReliabilityCalculator(RouterContext context) { + _context = context; + _log = context.logManager().getLog(ReliabilityCalculator.class); + } public double calc(PeerProfile profile) { - // if we've never succeeded (even if we've never tried), the reliability is zip - if (profile.getSendSuccessSize().getRate(60*60*1000).getLifetimeEventCount() < 0) - return profile.getReliabilityBonus(); - - long val = 0; - val += profile.getSendSuccessSize().getRate(60*1000).getCurrentEventCount() * 5; - val += profile.getSendSuccessSize().getRate(60*1000).getLastEventCount() * 2; - val += profile.getSendSuccessSize().getRate(60*60*1000).getLastEventCount(); - val += profile.getSendSuccessSize().getRate(60*60*1000).getCurrentEventCount(); - - val += profile.getTunnelCreateResponseTime().getRate(60*1000).getCurrentEventCount() * 10; - val += profile.getTunnelCreateResponseTime().getRate(60*1000).getLastEventCount() * 5; - val += profile.getTunnelCreateResponseTime().getRate(60*60*1000).getCurrentEventCount(); - val += profile.getTunnelCreateResponseTime().getRate(60*60*1000).getLastEventCount(); - - val -= profile.getSendFailureSize().getRate(60*1000).getLastEventCount() * 5; - val -= profile.getSendFailureSize().getRate(60*60*1000).getCurrentEventCount()*2; - val -= profile.getSendFailureSize().getRate(60*60*1000).getLastEventCount()*2; - - // penalize them heavily for dropping netDb requests - val -= profile.getDBHistory().getFailedLookupRate().getRate(60*1000).getCurrentEventCount() * 10; - val -= profile.getDBHistory().getFailedLookupRate().getRate(60*1000).getLastEventCount() * 5; - //val -= profile.getDBHistory().getFailedLookupRate().getRate(60*60*1000).getCurrentEventCount(); - //val -= profile.getDBHistory().getFailedLookupRate().getRate(60*60*1000).getLastEventCount(); - //val -= profile.getDBHistory().getFailedLookupRate().getRate(24*60*60*1000).getCurrentEventCount() * 50; - //val -= profile.getDBHistory().getFailedLookupRate().getRate(24*60*60*1000).getLastEventCount() * 20; - - val -= profile.getCommError().getRate(60*1000).getCurrentEventCount() * 200; - val -= profile.getCommError().getRate(60*1000).getLastEventCount() * 200; - - val -= profile.getCommError().getRate(60*60*1000).getCurrentEventCount() * 50; - val -= profile.getCommError().getRate(60*60*1000).getLastEventCount() * 50; - - val -= profile.getCommError().getRate(24*60*60*1000).getCurrentEventCount() * 10; - - long now = Clock.getInstance().now(); - - long timeSinceRejection = now - profile.getTunnelHistory().getLastRejected(); - if (timeSinceRejection > 60*60*1000) { - // noop. rejection was over 60 minutes ago - } else if (timeSinceRejection > 10*60*1000) { - val -= 10; // 10-60 minutes ago we got a rejection - } else if (timeSinceRejection > 60*1000) { - val -= 50; // 1-10 minutes ago we got a rejection - } else { - val -= 100; // we got a rejection within the last minute - } - - if ( (profile.getLastSendSuccessful() > 0) && (now - 24*60*60*1000 > profile.getLastSendSuccessful()) ) { - // we know they're real, but we havent sent them a message successfully in over a day. - val -= 1000; - } - - val += profile.getReliabilityBonus(); - return val; + // if we've never succeeded (even if we've never tried), the reliability is zip + if (profile.getSendSuccessSize().getRate(60*60*1000).getLifetimeEventCount() < 0) + return profile.getReliabilityBonus(); + + long val = 0; + val += profile.getSendSuccessSize().getRate(60*1000).getCurrentEventCount() * 5; + val += profile.getSendSuccessSize().getRate(60*1000).getLastEventCount() * 2; + val += profile.getSendSuccessSize().getRate(60*60*1000).getLastEventCount(); + val += profile.getSendSuccessSize().getRate(60*60*1000).getCurrentEventCount(); + + val += profile.getTunnelCreateResponseTime().getRate(60*1000).getCurrentEventCount() * 10; + val += profile.getTunnelCreateResponseTime().getRate(60*1000).getLastEventCount() * 5; + val += profile.getTunnelCreateResponseTime().getRate(60*60*1000).getCurrentEventCount(); + val += profile.getTunnelCreateResponseTime().getRate(60*60*1000).getLastEventCount(); + + val -= profile.getSendFailureSize().getRate(60*1000).getLastEventCount() * 5; + val -= profile.getSendFailureSize().getRate(60*60*1000).getCurrentEventCount()*2; + val -= profile.getSendFailureSize().getRate(60*60*1000).getLastEventCount()*2; + + // penalize them heavily for dropping netDb requests + val -= profile.getDBHistory().getFailedLookupRate().getRate(60*1000).getCurrentEventCount() * 10; + val -= profile.getDBHistory().getFailedLookupRate().getRate(60*1000).getLastEventCount() * 5; + //val -= profile.getDBHistory().getFailedLookupRate().getRate(60*60*1000).getCurrentEventCount(); + //val -= profile.getDBHistory().getFailedLookupRate().getRate(60*60*1000).getLastEventCount(); + //val -= profile.getDBHistory().getFailedLookupRate().getRate(24*60*60*1000).getCurrentEventCount() * 50; + //val -= profile.getDBHistory().getFailedLookupRate().getRate(24*60*60*1000).getLastEventCount() * 20; + + val -= profile.getCommError().getRate(60*1000).getCurrentEventCount() * 200; + val -= profile.getCommError().getRate(60*1000).getLastEventCount() * 200; + + val -= profile.getCommError().getRate(60*60*1000).getCurrentEventCount() * 50; + val -= profile.getCommError().getRate(60*60*1000).getLastEventCount() * 50; + + val -= profile.getCommError().getRate(24*60*60*1000).getCurrentEventCount() * 10; + + long now = _context.clock().now(); + + long timeSinceRejection = now - profile.getTunnelHistory().getLastRejected(); + if (timeSinceRejection > 60*60*1000) { + // noop. rejection was over 60 minutes ago + } else if (timeSinceRejection > 10*60*1000) { + val -= 10; // 10-60 minutes ago we got a rejection + } else if (timeSinceRejection > 60*1000) { + val -= 50; // 1-10 minutes ago we got a rejection + } else { + val -= 100; // we got a rejection within the last minute + } + + if ( (profile.getLastSendSuccessful() > 0) && (now - 24*60*60*1000 > profile.getLastSendSuccessful()) ) { + // we know they're real, but we havent sent them a message successfully in over a day. + val -= 1000; + } + + val += profile.getReliabilityBonus(); + return val; } } diff --git a/router/java/src/net/i2p/router/peermanager/SpeedCalculator.java b/router/java/src/net/i2p/router/peermanager/SpeedCalculator.java index c4da6c408..2aac1b4e0 100644 --- a/router/java/src/net/i2p/router/peermanager/SpeedCalculator.java +++ b/router/java/src/net/i2p/router/peermanager/SpeedCalculator.java @@ -3,6 +3,7 @@ package net.i2p.router.peermanager; import net.i2p.stat.Rate; import net.i2p.stat.RateStat; import net.i2p.util.Log; +import net.i2p.router.RouterContext; /** * Quantify how fast the peer is - how fast they respond to our requests, how fast @@ -10,51 +11,57 @@ import net.i2p.util.Log; * as well as their load. * */ -class SpeedCalculator extends Calculator { - private final static Log _log = new Log(SpeedCalculator.class); +public class SpeedCalculator extends Calculator { + private Log _log; + private RouterContext _context; + + public SpeedCalculator(RouterContext context) { + _context = context; + _log = context.logManager().getLog(SpeedCalculator.class); + } public double calc(PeerProfile profile) { - double dbResponseTime = profile.getDbResponseTime().getRate(60*1000).getLifetimeAverageValue(); - double tunnelResponseTime = profile.getTunnelCreateResponseTime().getRate(60*1000).getLifetimeAverageValue(); - double roundTripRate = Math.max(dbResponseTime, tunnelResponseTime); - - // send and receive rates are the (period rate) * (saturation %) - double sendRate = calcSendRate(profile); - double receiveRate = calcReceiveRate(profile); - - - double val = 60000.0d - 0.1*roundTripRate + sendRate + receiveRate; - // if we don't have any data, the rate is 0 - if ( (roundTripRate == 0.0d) && (sendRate == 0.0d) ) - val = 0.0; - - if (_log.shouldLog(Log.DEBUG)) - _log.debug("roundTripRate: " + roundTripRate + "ms sendRate: " + sendRate + "bytes/second, receiveRate: " + receiveRate + "bytes/second, val: " + val + " for " + profile.getPeer().toBase64()); - - val += profile.getSpeedBonus(); - return val; + double dbResponseTime = profile.getDbResponseTime().getRate(60*1000).getLifetimeAverageValue(); + double tunnelResponseTime = profile.getTunnelCreateResponseTime().getRate(60*1000).getLifetimeAverageValue(); + double roundTripRate = Math.max(dbResponseTime, tunnelResponseTime); + + // send and receive rates are the (period rate) * (saturation %) + double sendRate = calcSendRate(profile); + double receiveRate = calcReceiveRate(profile); + + + double val = 60000.0d - 0.1*roundTripRate + sendRate + receiveRate; + // if we don't have any data, the rate is 0 + if ( (roundTripRate == 0.0d) && (sendRate == 0.0d) ) + val = 0.0; + + if (_log.shouldLog(Log.DEBUG)) + _log.debug("roundTripRate: " + roundTripRate + "ms sendRate: " + sendRate + "bytes/second, receiveRate: " + receiveRate + "bytes/second, val: " + val + " for " + profile.getPeer().toBase64()); + + val += profile.getSpeedBonus(); + return val; } private double calcSendRate(PeerProfile profile) { return calcRate(profile.getSendSuccessSize()); } private double calcReceiveRate(PeerProfile profile) { return calcRate(profile.getReceiveSize()); } private double calcRate(RateStat stat) { - double rate = 0.0d; - Rate hourRate = stat.getRate(60*60*1000); - rate = calcRate(hourRate); - return rate; + double rate = 0.0d; + Rate hourRate = stat.getRate(60*60*1000); + rate = calcRate(hourRate); + return rate; } private double calcRate(Rate rate) { - long events = rate.getLastEventCount() + rate.getCurrentEventCount(); - if (events >= 1) { - double ms = rate.getLastTotalEventTime() + rate.getCurrentTotalEventTime(); - double bytes = rate.getLastTotalValue() + rate.getCurrentTotalValue(); - if ( (bytes > 0) && (ms > 0) ) { - return (bytes * 1000.0d) / ms; - } - } - return 0.0d; + long events = rate.getLastEventCount() + rate.getCurrentEventCount(); + if (events >= 1) { + double ms = rate.getLastTotalEventTime() + rate.getCurrentTotalEventTime(); + double bytes = rate.getLastTotalValue() + rate.getCurrentTotalValue(); + if ( (bytes > 0) && (ms > 0) ) { + return (bytes * 1000.0d) / ms; + } + } + return 0.0d; } } diff --git a/router/java/src/net/i2p/router/peermanager/TunnelHistory.java b/router/java/src/net/i2p/router/peermanager/TunnelHistory.java index 124afec74..95a4a3a6a 100644 --- a/router/java/src/net/i2p/router/peermanager/TunnelHistory.java +++ b/router/java/src/net/i2p/router/peermanager/TunnelHistory.java @@ -4,29 +4,31 @@ import java.io.IOException; import java.io.OutputStream; import java.util.Properties; -import net.i2p.util.Clock; +import net.i2p.router.RouterContext; /** * Tunnel related history information * */ public class TunnelHistory { + private RouterContext _context; private volatile long _lifetimeAgreedTo; private volatile long _lifetimeRejected; private volatile long _lastAgreedTo; private volatile long _lastRejected; private volatile long _lifetimeFailed; private volatile long _lastFailed; - - public TunnelHistory() { - _lifetimeAgreedTo = 0; - _lifetimeFailed = 0; - _lifetimeRejected = 0; - _lastAgreedTo = 0; - _lastFailed = 0; - _lastRejected = 0; + + public TunnelHistory(RouterContext context) { + _context = context; + _lifetimeAgreedTo = 0; + _lifetimeFailed = 0; + _lifetimeRejected = 0; + _lastAgreedTo = 0; + _lastFailed = 0; + _lastRejected = 0; } - + /** total tunnels the peer has agreed to participate in */ public long getLifetimeAgreedTo() { return _lifetimeAgreedTo; } /** total tunnels the peer has refused to participate in */ @@ -39,20 +41,20 @@ public class TunnelHistory { public long getLastRejected() { return _lastRejected; } /** when the last tunnel the peer participated in failed */ public long getLastFailed() { return _lastFailed; } - + public void incrementAgreedTo() { - _lifetimeAgreedTo++; - _lastAgreedTo = Clock.getInstance().now(); + _lifetimeAgreedTo++; + _lastAgreedTo = _context.clock().now(); } - public void incrementRejected() { - _lifetimeRejected++; - _lastRejected = Clock.getInstance().now(); + public void incrementRejected() { + _lifetimeRejected++; + _lastRejected = _context.clock().now(); } - public void incrementFailed() { - _lifetimeFailed++; - _lastFailed = Clock.getInstance().now(); + public void incrementFailed() { + _lifetimeFailed++; + _lastFailed = _context.clock().now(); } - + public void setLifetimeAgreedTo(long num) { _lifetimeAgreedTo = num; } public void setLifetimeRejected(long num) { _lifetimeRejected = num; } public void setLifetimeFailed(long num) { _lifetimeFailed = num; } @@ -61,45 +63,45 @@ public class TunnelHistory { public void setLastFailed(long when) { _lastFailed = when; } private final static String NL = System.getProperty("line.separator"); - + public void store(OutputStream out) throws IOException { - StringBuffer buf = new StringBuffer(512); - buf.append(NL); - buf.append("#################").append(NL); - buf.append("# Tunnel history").append(NL); - buf.append("###").append(NL); - add(buf, "lastAgreedTo", _lastAgreedTo, "When did the peer last agree to participate in a tunnel? (milliseconds since the epoch)"); - add(buf, "lastFailed", _lastFailed, "When was the last time a tunnel that the peer agreed to participate failed? (milliseconds since the epoch)"); - add(buf, "lastRejected", _lastRejected, "When was the last time the peer refused to participate in a tunnel? (milliseconds since the epoch)"); - add(buf, "lifetimeAgreedTo", _lifetimeAgreedTo, "How many tunnels has the peer ever agreed to participate in?"); - add(buf, "lifetimeFailed", _lifetimeFailed, "How many tunnels has the peer ever agreed to participate in that failed prematurely?"); - add(buf, "lifetimeRejected", _lifetimeRejected, "How many tunnels has the peer ever refused to participate in?"); - out.write(buf.toString().getBytes()); + StringBuffer buf = new StringBuffer(512); + buf.append(NL); + buf.append("#################").append(NL); + buf.append("# Tunnel history").append(NL); + buf.append("###").append(NL); + add(buf, "lastAgreedTo", _lastAgreedTo, "When did the peer last agree to participate in a tunnel? (milliseconds since the epoch)"); + add(buf, "lastFailed", _lastFailed, "When was the last time a tunnel that the peer agreed to participate failed? (milliseconds since the epoch)"); + add(buf, "lastRejected", _lastRejected, "When was the last time the peer refused to participate in a tunnel? (milliseconds since the epoch)"); + add(buf, "lifetimeAgreedTo", _lifetimeAgreedTo, "How many tunnels has the peer ever agreed to participate in?"); + add(buf, "lifetimeFailed", _lifetimeFailed, "How many tunnels has the peer ever agreed to participate in that failed prematurely?"); + add(buf, "lifetimeRejected", _lifetimeRejected, "How many tunnels has the peer ever refused to participate in?"); + out.write(buf.toString().getBytes()); } private void add(StringBuffer buf, String name, long val, String description) { - buf.append("# ").append(name.toUpperCase()).append(NL).append("# ").append(description).append(NL); - buf.append("tunnels.").append(name).append('=').append(val).append(NL).append(NL); + buf.append("# ").append(name.toUpperCase()).append(NL).append("# ").append(description).append(NL); + buf.append("tunnels.").append(name).append('=').append(val).append(NL).append(NL); } public void load(Properties props) { - _lastAgreedTo = getLong(props, "tunnels.lastAgreedTo"); - _lastFailed = getLong(props, "tunnels.lastFailed"); - _lastRejected = getLong(props, "tunnels.lastRejected"); - _lifetimeAgreedTo = getLong(props, "tunnels.lifetimeAgreedTo"); - _lifetimeFailed = getLong(props, "tunnels.lifetimeFailed"); - _lifetimeRejected = getLong(props, "tunnels.lifetimeRejected"); + _lastAgreedTo = getLong(props, "tunnels.lastAgreedTo"); + _lastFailed = getLong(props, "tunnels.lastFailed"); + _lastRejected = getLong(props, "tunnels.lastRejected"); + _lifetimeAgreedTo = getLong(props, "tunnels.lifetimeAgreedTo"); + _lifetimeFailed = getLong(props, "tunnels.lifetimeFailed"); + _lifetimeRejected = getLong(props, "tunnels.lifetimeRejected"); } private final static long getLong(Properties props, String key) { - String val = props.getProperty(key); - if (val != null) { - try { - return Long.parseLong(val); - } catch (NumberFormatException nfe) { - return 0; - } - } - return 0; + String val = props.getProperty(key); + if (val != null) { + try { + return Long.parseLong(val); + } catch (NumberFormatException nfe) { + return 0; + } + } + return 0; } } diff --git a/router/java/src/net/i2p/router/startup/BootCommSystemJob.java b/router/java/src/net/i2p/router/startup/BootCommSystemJob.java index 71937e36e..69700549a 100644 --- a/router/java/src/net/i2p/router/startup/BootCommSystemJob.java +++ b/router/java/src/net/i2p/router/startup/BootCommSystemJob.java @@ -1,9 +1,9 @@ package net.i2p.router.startup; /* * free (adj.): unencumbered; not under the control of others - * Written by jrandom in 2003 and released into the public domain - * with no warranty of any kind, either expressed or implied. - * It probably won't make your computer catch on fire, or eat + * Written by jrandom in 2003 and released into the public domain + * with no warranty of any kind, either expressed or implied. + * It probably won't make your computer catch on fire, or eat * your children, but it might. Use at your own risk. * */ @@ -16,36 +16,40 @@ import net.i2p.router.PeerManagerFacade; import net.i2p.router.Router; import net.i2p.router.TunnelManagerFacade; import net.i2p.util.Log; +import net.i2p.router.RouterContext; public class BootCommSystemJob extends JobImpl { - private static Log _log = new Log(BootCommSystemJob.class); + private Log _log; public static final String PROP_USE_TRUSTED_LINKS = "router.trustedLinks"; - public BootCommSystemJob() { } + public BootCommSystemJob(RouterContext context) { + super(context); + _log = context.logManager().getLog(BootCommSystemJob.class); + } public String getName() { return "Boot Communication System"; } public void runJob() { - // start up the network comm system - - CommSystemFacade.getInstance().startup(); - TunnelManagerFacade.getInstance().startup(); - PeerManagerFacade.getInstance().startup(); - - Job bootDb = new BootNetworkDbJob(); - boolean useTrusted = false; - String useTrustedStr = Router.getInstance().getConfigSetting(PROP_USE_TRUSTED_LINKS); - if (useTrustedStr != null) { - useTrusted = Boolean.TRUE.toString().equalsIgnoreCase(useTrustedStr); - } - if (useTrusted) { - _log.debug("Using trusted links..."); - JobQueue.getInstance().addJob(new BuildTrustedLinksJob(bootDb)); - return; - } else { - _log.debug("Not using trusted links - boot db"); - JobQueue.getInstance().addJob(bootDb); - } + // start up the network comm system + + _context.commSystem().startup(); + _context.tunnelManager().startup(); + _context.peerManager().startup(); + + Job bootDb = new BootNetworkDbJob(_context); + boolean useTrusted = false; + String useTrustedStr = _context.router().getConfigSetting(PROP_USE_TRUSTED_LINKS); + if (useTrustedStr != null) { + useTrusted = Boolean.TRUE.toString().equalsIgnoreCase(useTrustedStr); + } + if (useTrusted) { + _log.debug("Using trusted links..."); + _context.jobQueue().addJob(new BuildTrustedLinksJob(_context, bootDb)); + return; + } else { + _log.debug("Not using trusted links - boot db"); + _context.jobQueue().addJob(bootDb); + } } } diff --git a/router/java/src/net/i2p/router/startup/BootNetworkDbJob.java b/router/java/src/net/i2p/router/startup/BootNetworkDbJob.java index 2b5f9642a..a15163235 100644 --- a/router/java/src/net/i2p/router/startup/BootNetworkDbJob.java +++ b/router/java/src/net/i2p/router/startup/BootNetworkDbJob.java @@ -1,9 +1,9 @@ package net.i2p.router.startup; /* * free (adj.): unencumbered; not under the control of others - * Written by jrandom in 2003 and released into the public domain - * with no warranty of any kind, either expressed or implied. - * It probably won't make your computer catch on fire, or eat + * Written by jrandom in 2003 and released into the public domain + * with no warranty of any kind, either expressed or implied. + * It probably won't make your computer catch on fire, or eat * your children, but it might. Use at your own risk. * */ @@ -12,19 +12,22 @@ import net.i2p.router.JobImpl; import net.i2p.router.JobQueue; import net.i2p.router.NetworkDatabaseFacade; import net.i2p.util.Log; +import net.i2p.router.RouterContext; public class BootNetworkDbJob extends JobImpl { private static Log _log = new Log(BootNetworkDbJob.class); - public BootNetworkDbJob() { } + public BootNetworkDbJob(RouterContext ctx) { + super(ctx); + } public String getName() { return "Boot Network Database"; } public void runJob() { - // start up the network database - - NetworkDatabaseFacade.getInstance().startup(); - - JobQueue.getInstance().addJob(new StartAcceptingClientsJob()); + // start up the network database + + _context.netDb().startup(); + + _context.jobQueue().addJob(new StartAcceptingClientsJob(_context)); } } diff --git a/router/java/src/net/i2p/router/startup/BuildTrustedLinksJob.java b/router/java/src/net/i2p/router/startup/BuildTrustedLinksJob.java index 95fb811cf..35298d69a 100644 --- a/router/java/src/net/i2p/router/startup/BuildTrustedLinksJob.java +++ b/router/java/src/net/i2p/router/startup/BuildTrustedLinksJob.java @@ -1,9 +1,9 @@ package net.i2p.router.startup; /* * free (adj.): unencumbered; not under the control of others - * Written by jrandom in 2003 and released into the public domain - * with no warranty of any kind, either expressed or implied. - * It probably won't make your computer catch on fire, or eat + * Written by jrandom in 2003 and released into the public domain + * with no warranty of any kind, either expressed or implied. + * It probably won't make your computer catch on fire, or eat * your children, but it might. Use at your own risk. * */ @@ -12,22 +12,25 @@ import net.i2p.router.Job; import net.i2p.router.JobImpl; import net.i2p.router.JobQueue; import net.i2p.util.Log; +import net.i2p.router.RouterContext; public class BuildTrustedLinksJob extends JobImpl { - private static Log _log = new Log(BuildTrustedLinksJob.class); + private Log _log; private Job _next; - public BuildTrustedLinksJob(Job next) { - _next = next; + public BuildTrustedLinksJob(RouterContext context, Job next) { + super(context); + _log = _context.logManager().getLog(BuildTrustedLinksJob.class); + _next = next; } public String getName() { return "Build Trusted Links"; } public void runJob() { - // create trusted links with peers - - try { Thread.sleep(5000); } catch (InterruptedException ie) {} - - JobQueue.getInstance().addJob(_next); + // create trusted links with peers + + //try { Thread.sleep(5000); } catch (InterruptedException ie) {} + + _context.jobQueue().addJob(_next); } } diff --git a/router/java/src/net/i2p/router/startup/CreateRouterInfoJob.java b/router/java/src/net/i2p/router/startup/CreateRouterInfoJob.java index 1b7d85f45..0c2821a53 100644 --- a/router/java/src/net/i2p/router/startup/CreateRouterInfoJob.java +++ b/router/java/src/net/i2p/router/startup/CreateRouterInfoJob.java @@ -1,9 +1,9 @@ package net.i2p.router.startup; /* * free (adj.): unencumbered; not under the control of others - * Written by jrandom in 2003 and released into the public domain - * with no warranty of any kind, either expressed or implied. - * It probably won't make your computer catch on fire, or eat + * Written by jrandom in 2003 and released into the public domain + * with no warranty of any kind, either expressed or implied. + * It probably won't make your computer catch on fire, or eat * your children, but it might. Use at your own risk. * */ @@ -30,94 +30,96 @@ import net.i2p.router.Router; import net.i2p.router.StatisticsManager; import net.i2p.util.Clock; import net.i2p.util.Log; +import net.i2p.router.RouterContext; public class CreateRouterInfoJob extends JobImpl { private static Log _log = new Log(CreateRouterInfoJob.class); private Job _next; - public CreateRouterInfoJob(Job next) { - _next = next; + public CreateRouterInfoJob(RouterContext ctx, Job next) { + super(ctx); + _next = next; } public String getName() { return "Create New Router Info"; } public void runJob() { - _log.debug("Creating the new router info"); - // create a new router info and store it where LoadRouterInfoJob looks - RouterInfo info = createRouterInfo(); - JobQueue.getInstance().addJob(_next); + _log.debug("Creating the new router info"); + // create a new router info and store it where LoadRouterInfoJob looks + RouterInfo info = createRouterInfo(); + _context.jobQueue().addJob(_next); } - static RouterInfo createRouterInfo() { - RouterInfo info = new RouterInfo(); - FileOutputStream fos1 = null; - FileOutputStream fos2 = null; - try { - info.setAddresses(CommSystemFacade.getInstance().createAddresses()); - info.setOptions(StatisticsManager.getInstance().publishStatistics()); - info.setPeers(new HashSet()); - info.setPublished(getCurrentPublishDate()); - RouterIdentity ident = new RouterIdentity(); - Certificate cert = new Certificate(); - cert.setCertificateType(Certificate.CERTIFICATE_TYPE_NULL); - cert.setPayload(null); - ident.setCertificate(cert); - PublicKey pubkey = null; - PrivateKey privkey = null; - SigningPublicKey signingPubKey = null; - SigningPrivateKey signingPrivKey = null; - Object keypair[] = KeyGenerator.getInstance().generatePKIKeypair(); - pubkey = (PublicKey)keypair[0]; - privkey = (PrivateKey)keypair[1]; - Object signingKeypair[] = KeyGenerator.getInstance().generateSigningKeypair(); - signingPubKey = (SigningPublicKey)signingKeypair[0]; - signingPrivKey = (SigningPrivateKey)signingKeypair[1]; - ident.setPublicKey(pubkey); - ident.setSigningPublicKey(signingPubKey); - info.setIdentity(ident); - - info.sign(signingPrivKey); - - String infoFilename = Router.getInstance().getConfigSetting(Router.PROP_INFO_FILENAME); - if (infoFilename == null) - infoFilename = Router.PROP_INFO_FILENAME_DEFAULT; - fos1 = new FileOutputStream(infoFilename); - info.writeBytes(fos1); - - String keyFilename = Router.getInstance().getConfigSetting(Router.PROP_KEYS_FILENAME); - if (keyFilename == null) - keyFilename = Router.PROP_KEYS_FILENAME_DEFAULT; - fos2 = new FileOutputStream(keyFilename); - privkey.writeBytes(fos2); - signingPrivKey.writeBytes(fos2); - pubkey.writeBytes(fos2); - signingPubKey.writeBytes(fos2); - - KeyManager.getInstance().setSigningPrivateKey(signingPrivKey); - KeyManager.getInstance().setSigningPublicKey(signingPubKey); - KeyManager.getInstance().setPrivateKey(privkey); - KeyManager.getInstance().setPublicKey(pubkey); - - _log.info("Router info created and stored at " + infoFilename + " with private keys stored at " + keyFilename + " [" + info + "]"); - } catch (DataFormatException dfe) { - _log.error("Error building the new router information", dfe); - } catch (IOException ioe) { - _log.error("Error writing out the new router information", ioe); - } finally { - if (fos1 != null) try { fos1.close(); } catch (IOException ioe) {} - if (fos2 != null) try { fos2.close(); } catch (IOException ioe) {} - } - return info; + RouterInfo createRouterInfo() { + RouterInfo info = new RouterInfo(); + FileOutputStream fos1 = null; + FileOutputStream fos2 = null; + try { + info.setAddresses(_context.commSystem().createAddresses()); + info.setOptions(_context.statPublisher().publishStatistics()); + info.setPeers(new HashSet()); + info.setPublished(getCurrentPublishDate(_context)); + RouterIdentity ident = new RouterIdentity(); + Certificate cert = new Certificate(); + cert.setCertificateType(Certificate.CERTIFICATE_TYPE_NULL); + cert.setPayload(null); + ident.setCertificate(cert); + PublicKey pubkey = null; + PrivateKey privkey = null; + SigningPublicKey signingPubKey = null; + SigningPrivateKey signingPrivKey = null; + Object keypair[] = _context.keyGenerator().generatePKIKeypair(); + pubkey = (PublicKey)keypair[0]; + privkey = (PrivateKey)keypair[1]; + Object signingKeypair[] = _context.keyGenerator().generateSigningKeypair(); + signingPubKey = (SigningPublicKey)signingKeypair[0]; + signingPrivKey = (SigningPrivateKey)signingKeypair[1]; + ident.setPublicKey(pubkey); + ident.setSigningPublicKey(signingPubKey); + info.setIdentity(ident); + + info.sign(signingPrivKey); + + String infoFilename = _context.router().getConfigSetting(Router.PROP_INFO_FILENAME); + if (infoFilename == null) + infoFilename = Router.PROP_INFO_FILENAME_DEFAULT; + fos1 = new FileOutputStream(infoFilename); + info.writeBytes(fos1); + + String keyFilename = _context.router().getConfigSetting(Router.PROP_KEYS_FILENAME); + if (keyFilename == null) + keyFilename = Router.PROP_KEYS_FILENAME_DEFAULT; + fos2 = new FileOutputStream(keyFilename); + privkey.writeBytes(fos2); + signingPrivKey.writeBytes(fos2); + pubkey.writeBytes(fos2); + signingPubKey.writeBytes(fos2); + + _context.keyManager().setSigningPrivateKey(signingPrivKey); + _context.keyManager().setSigningPublicKey(signingPubKey); + _context.keyManager().setPrivateKey(privkey); + _context.keyManager().setPublicKey(pubkey); + + _log.info("Router info created and stored at " + infoFilename + " with private keys stored at " + keyFilename + " [" + info + "]"); + } catch (DataFormatException dfe) { + _log.error("Error building the new router information", dfe); + } catch (IOException ioe) { + _log.error("Error writing out the new router information", ioe); + } finally { + if (fos1 != null) try { fos1.close(); } catch (IOException ioe) {} + if (fos2 != null) try { fos2.close(); } catch (IOException ioe) {} + } + return info; } /** - * We probably don't want to expose the exact time at which a router published its info. + * We probably don't want to expose the exact time at which a router published its info. * perhaps round down to the nearest minute? 10 minutes? 30 minutes? day? * */ - static long getCurrentPublishDate() { - _log.info("Setting published date to /now/"); - return Clock.getInstance().now(); + static long getCurrentPublishDate(RouterContext context) { + _log.info("Setting published date to /now/"); + return context.clock().now(); } } diff --git a/router/java/src/net/i2p/router/startup/LoadRouterInfoJob.java b/router/java/src/net/i2p/router/startup/LoadRouterInfoJob.java index 002529df2..2e3e78330 100644 --- a/router/java/src/net/i2p/router/startup/LoadRouterInfoJob.java +++ b/router/java/src/net/i2p/router/startup/LoadRouterInfoJob.java @@ -1,9 +1,9 @@ package net.i2p.router.startup; /* * free (adj.): unencumbered; not under the control of others - * Written by jrandom in 2003 and released into the public domain - * with no warranty of any kind, either expressed or implied. - * It probably won't make your computer catch on fire, or eat + * Written by jrandom in 2003 and released into the public domain + * with no warranty of any kind, either expressed or implied. + * It probably won't make your computer catch on fire, or eat * your children, but it might. Use at your own risk. * */ @@ -24,92 +24,99 @@ import net.i2p.router.KeyManager; import net.i2p.router.MessageHistory; import net.i2p.router.Router; import net.i2p.util.Log; +import net.i2p.router.RouterContext; public class LoadRouterInfoJob extends JobImpl { - private static Log _log = new Log(LoadRouterInfoJob.class); + private Log _log; private boolean _keysExist; private boolean _infoExists; private RouterInfo _us; + public LoadRouterInfoJob(RouterContext ctx) { + super(ctx); + _log = ctx.logManager().getLog(LoadRouterInfoJob.class); + } + public String getName() { return "Load Router Info"; } public void runJob() { - loadRouterInfo(); - if (_us == null) { - RebuildRouterInfoJob.rebuildRouterInfo(false); - JobQueue.getInstance().addJob(this); - return; - } else { - Router.getInstance().setRouterInfo(_us); - MessageHistory.initialize(); - JobQueue.getInstance().addJob(new BootCommSystemJob()); - } + loadRouterInfo(); + if (_us == null) { + RebuildRouterInfoJob r = new RebuildRouterInfoJob(_context); + r.rebuildRouterInfo(false); + _context.jobQueue().addJob(this); + return; + } else { + _context.router().setRouterInfo(_us); + _context.messageHistory().initialize(true); + _context.jobQueue().addJob(new BootCommSystemJob(_context)); + } } - + private void loadRouterInfo() { - String routerInfoFile = Router.getInstance().getConfigSetting(Router.PROP_INFO_FILENAME); - if (routerInfoFile == null) - routerInfoFile = Router.PROP_INFO_FILENAME_DEFAULT; - RouterInfo info = null; - boolean failedRead = false; - - - String keyFilename = Router.getInstance().getConfigSetting(Router.PROP_KEYS_FILENAME); - if (keyFilename == null) - keyFilename = Router.PROP_KEYS_FILENAME_DEFAULT; - - File rif = new File(routerInfoFile); - if (rif.exists()) - _infoExists = true; - File rkf = new File(keyFilename); - if (rkf.exists()) - _keysExist = true; - - FileInputStream fis1 = null; - FileInputStream fis2 = null; - try { - if (_infoExists) { - fis1 = new FileInputStream(rif); - info = new RouterInfo(); - info.readBytes(fis1); - _log.debug("Reading in routerInfo from " + rif.getAbsolutePath() + " and it has " + info.getAddresses().size() + " addresses"); - } - - if (_keysExist) { - fis2 = new FileInputStream(rkf); - PrivateKey privkey = new PrivateKey(); - privkey.readBytes(fis2); - SigningPrivateKey signingPrivKey = new SigningPrivateKey(); - signingPrivKey.readBytes(fis2); - PublicKey pubkey = new PublicKey(); - pubkey.readBytes(fis2); - SigningPublicKey signingPubKey = new SigningPublicKey(); - signingPubKey.readBytes(fis2); - - KeyManager.getInstance().setPrivateKey(privkey); - KeyManager.getInstance().setSigningPrivateKey(signingPrivKey); - KeyManager.getInstance().setPublicKey(pubkey); //info.getIdentity().getPublicKey()); - KeyManager.getInstance().setSigningPublicKey(signingPubKey); // info.getIdentity().getSigningPublicKey()); - } - - _us = info; - } catch (IOException ioe) { - _log.error("Error reading the router info from " + routerInfoFile + " and the keys from " + keyFilename, ioe); - _us = null; - rif.delete(); - rkf.delete(); - _infoExists = false; - _keysExist = false; - } catch (DataFormatException dfe) { - _log.error("Corrupt router info or keys at " + routerInfoFile + " / " + keyFilename, dfe); - _us = null; - rif.delete(); - rkf.delete(); - _infoExists = false; - _keysExist = false; - } finally { - if (fis1 != null) try { fis1.close(); } catch (IOException ioe) {} - if (fis2 != null) try { fis2.close(); } catch (IOException ioe) {} - } + String routerInfoFile = _context.router().getConfigSetting(Router.PROP_INFO_FILENAME); + if (routerInfoFile == null) + routerInfoFile = Router.PROP_INFO_FILENAME_DEFAULT; + RouterInfo info = null; + boolean failedRead = false; + + + String keyFilename = _context.router().getConfigSetting(Router.PROP_KEYS_FILENAME); + if (keyFilename == null) + keyFilename = Router.PROP_KEYS_FILENAME_DEFAULT; + + File rif = new File(routerInfoFile); + if (rif.exists()) + _infoExists = true; + File rkf = new File(keyFilename); + if (rkf.exists()) + _keysExist = true; + + FileInputStream fis1 = null; + FileInputStream fis2 = null; + try { + if (_infoExists) { + fis1 = new FileInputStream(rif); + info = new RouterInfo(); + info.readBytes(fis1); + _log.debug("Reading in routerInfo from " + rif.getAbsolutePath() + " and it has " + info.getAddresses().size() + " addresses"); + } + + if (_keysExist) { + fis2 = new FileInputStream(rkf); + PrivateKey privkey = new PrivateKey(); + privkey.readBytes(fis2); + SigningPrivateKey signingPrivKey = new SigningPrivateKey(); + signingPrivKey.readBytes(fis2); + PublicKey pubkey = new PublicKey(); + pubkey.readBytes(fis2); + SigningPublicKey signingPubKey = new SigningPublicKey(); + signingPubKey.readBytes(fis2); + + _context.keyManager().setPrivateKey(privkey); + _context.keyManager().setSigningPrivateKey(signingPrivKey); + _context.keyManager().setPublicKey(pubkey); //info.getIdentity().getPublicKey()); + _context.keyManager().setSigningPublicKey(signingPubKey); // info.getIdentity().getSigningPublicKey()); + } + + _us = info; + } catch (IOException ioe) { + _log.error("Error reading the router info from " + routerInfoFile + " and the keys from " + keyFilename, ioe); + _us = null; + rif.delete(); + rkf.delete(); + _infoExists = false; + _keysExist = false; + } catch (DataFormatException dfe) { + _log.error("Corrupt router info or keys at " + routerInfoFile + " / " + keyFilename, dfe); + _us = null; + rif.delete(); + rkf.delete(); + _infoExists = false; + _keysExist = false; + } finally { + if (fis1 != null) try { fis1.close(); } catch (IOException ioe) {} + if (fis2 != null) try { fis2.close(); } catch (IOException ioe) {} + } } } diff --git a/router/java/src/net/i2p/router/startup/ProcessInboundNetMessageJob.java b/router/java/src/net/i2p/router/startup/ProcessInboundNetMessageJob.java deleted file mode 100644 index f31d1f2c0..000000000 --- a/router/java/src/net/i2p/router/startup/ProcessInboundNetMessageJob.java +++ /dev/null @@ -1,47 +0,0 @@ -package net.i2p.router.startup; -/* - * free (adj.): unencumbered; not under the control of others - * Written by jrandom in 2003 and released into the public domain - * with no warranty of any kind, either expressed or implied. - * It probably won't make your computer catch on fire, or eat - * your children, but it might. Use at your own risk. - * - */ - -import net.i2p.router.InNetMessage; -import net.i2p.router.InNetMessagePool; -import net.i2p.router.JobImpl; -import net.i2p.router.JobQueue; -import net.i2p.util.Clock; -import net.i2p.util.Log; - -/** - * Pull a message off the inbound net message pool and begin its processing. - * This job requeues itself on completion - * - */ -public class ProcessInboundNetMessageJob extends JobImpl { - private static Log _log = new Log(ProcessInboundNetMessageJob.class); - - public ProcessInboundNetMessageJob() { } - - public String getName() { return "Check For Inbound Network Message"; } - - public void runJob() { - // start up the network comm system - - if (InNetMessagePool.getInstance().getCount() > 0) { - InNetMessage inMessage = InNetMessagePool.getInstance().getNext(); - processMessage(inMessage); - // there are messages, no need to delay as there's real work to do - } else { - getTiming().setStartAfter(Clock.getInstance().now()+1000); - } - - JobQueue.getInstance().addJob(this); - } - - private void processMessage(InNetMessage message) { - _log.debug("Received message from " + message.getFromRouter() + "/" + message.getFromRouterHash() + " containing : " + message.getMessage()); - } -} diff --git a/router/java/src/net/i2p/router/startup/ReadConfigJob.java b/router/java/src/net/i2p/router/startup/ReadConfigJob.java index ae7d8454f..e1ccb6949 100644 --- a/router/java/src/net/i2p/router/startup/ReadConfigJob.java +++ b/router/java/src/net/i2p/router/startup/ReadConfigJob.java @@ -19,51 +19,54 @@ import net.i2p.router.JobQueue; import net.i2p.router.Router; import net.i2p.util.Clock; import net.i2p.util.Log; +import net.i2p.router.RouterContext; /** * Simply read the router config */ public class ReadConfigJob extends JobImpl { - private static Log _log = new Log(ReadConfigJob.class); - private final static long DELAY = 30*1000; // reread every 30 seconds + + public ReadConfigJob(RouterContext ctx) { + super(ctx); + } public String getName() { return "Read Router Configuration"; } public void runJob() { - doRead(); - getTiming().setStartAfter(Clock.getInstance().now() + DELAY); - JobQueue.getInstance().addJob(this); + doRead(_context); + getTiming().setStartAfter(_context.clock().now() + DELAY); + _context.jobQueue().addJob(this); } - public static void doRead() { - Router r = Router.getInstance(); - String f = r.getConfigFilename(); - Properties config = getConfig(f); - for (Iterator iter = config.keySet().iterator(); iter.hasNext(); ) { - String name = (String)iter.next(); - String val = config.getProperty(name); - _log.debug("Setting config prop [" + name + "] = [" + val + "]"); - Router.getInstance().setConfigSetting(name, val); - } + public static void doRead(RouterContext ctx) { + Router r = ctx.router(); + String f = r.getConfigFilename(); + Properties config = getConfig(ctx, f); + for (Iterator iter = config.keySet().iterator(); iter.hasNext(); ) { + String name = (String)iter.next(); + String val = config.getProperty(name); + r.setConfigSetting(name, val); + } } - private static Properties getConfig(String filename) { - _log.debug("Config file: " + filename); - Properties props = new Properties(); - FileInputStream fis = null; - try { - File f = new File(filename); - if (f.canRead()) { - fis = new FileInputStream(f); - props.load(fis); - } else { - _log.error("Configuration file " + filename + " does not exist"); - } - } catch (Exception ioe) { - _log.error("Error loading the router configuration from " + filename, ioe); - } finally { - if (fis != null) try { fis.close(); } catch (IOException ioe) {} - } - return props; + private static Properties getConfig(RouterContext ctx, String filename) { + Log log = ctx.logManager().getLog(ReadConfigJob.class); + log.debug("Config file: " + filename); + Properties props = new Properties(); + FileInputStream fis = null; + try { + File f = new File(filename); + if (f.canRead()) { + fis = new FileInputStream(f); + props.load(fis); + } else { + log.error("Configuration file " + filename + " does not exist"); + } + } catch (Exception ioe) { + log.error("Error loading the router configuration from " + filename, ioe); + } finally { + if (fis != null) try { fis.close(); } catch (IOException ioe) {} + } + return props; } } diff --git a/router/java/src/net/i2p/router/startup/RebuildRouterInfoJob.java b/router/java/src/net/i2p/router/startup/RebuildRouterInfoJob.java index da4b100bb..c6fc935c3 100644 --- a/router/java/src/net/i2p/router/startup/RebuildRouterInfoJob.java +++ b/router/java/src/net/i2p/router/startup/RebuildRouterInfoJob.java @@ -1,9 +1,9 @@ package net.i2p.router.startup; /* * free (adj.): unencumbered; not under the control of others - * Written by jrandom in 2003 and released into the public domain - * with no warranty of any kind, either expressed or implied. - * It probably won't make your computer catch on fire, or eat + * Written by jrandom in 2003 and released into the public domain + * with no warranty of any kind, either expressed or implied. + * It probably won't make your computer catch on fire, or eat * your children, but it might. Use at your own risk. * */ @@ -29,6 +29,7 @@ import net.i2p.router.Router; import net.i2p.router.StatisticsManager; import net.i2p.util.Clock; import net.i2p.util.Log; +import net.i2p.router.RouterContext; /** * If the file router.info.rebuild exists, rebuild the router info and republish. @@ -40,157 +41,125 @@ import net.i2p.util.Log; * */ public class RebuildRouterInfoJob extends JobImpl { - private static Log _log = new Log(RebuildRouterInfoJob.class); + private Log _log; private final static long REBUILD_DELAY = 45*1000; // every 30 seconds - public RebuildRouterInfoJob() { - super(); + public RebuildRouterInfoJob(RouterContext context) { + super(context); + _log = context.logManager().getLog(RebuildRouterInfoJob.class); } public String getName() { return "Rebuild Router Info"; } public void runJob() { - _log.debug("Testing to rebuild router info"); - String infoFile = Router.getInstance().getConfigSetting(Router.PROP_INFO_FILENAME); - if (infoFile == null) { - _log.debug("Info filename not configured, defaulting to " + Router.PROP_INFO_FILENAME_DEFAULT); - infoFile = Router.PROP_INFO_FILENAME_DEFAULT; - } - - String keyFilename = Router.getInstance().getConfigSetting(Router.PROP_KEYS_FILENAME); - if (keyFilename == null) - keyFilename = Router.PROP_KEYS_FILENAME_DEFAULT; - File keyFile = new File(keyFilename); - - File info = new File(infoFile); - if (!info.exists() || !keyFile.exists()) { - _log.info("Router info file [" + info.getAbsolutePath() + "] or private key file [" + keyFile.getAbsolutePath() + "] deleted, rebuilding"); - rebuildRouterInfo(); - } else { - _log.debug("Router info file [" + info.getAbsolutePath() + "] exists, not rebuilding"); - } - getTiming().setStartAfter(Clock.getInstance().now() + REBUILD_DELAY); - JobQueue.getInstance().addJob(this); + _log.debug("Testing to rebuild router info"); + String infoFile = _context.router().getConfigSetting(Router.PROP_INFO_FILENAME); + if (infoFile == null) { + _log.debug("Info filename not configured, defaulting to " + Router.PROP_INFO_FILENAME_DEFAULT); + infoFile = Router.PROP_INFO_FILENAME_DEFAULT; + } + + String keyFilename = _context.router().getConfigSetting(Router.PROP_KEYS_FILENAME); + if (keyFilename == null) + keyFilename = Router.PROP_KEYS_FILENAME_DEFAULT; + File keyFile = new File(keyFilename); + + File info = new File(infoFile); + if (!info.exists() || !keyFile.exists()) { + _log.info("Router info file [" + info.getAbsolutePath() + "] or private key file [" + keyFile.getAbsolutePath() + "] deleted, rebuilding"); + rebuildRouterInfo(); + } else { + _log.debug("Router info file [" + info.getAbsolutePath() + "] exists, not rebuilding"); + } + getTiming().setStartAfter(_context.clock().now() + REBUILD_DELAY); + _context.jobQueue().addJob(this); } - static void rebuildRouterInfo() { - rebuildRouterInfo(true); + void rebuildRouterInfo() { + rebuildRouterInfo(true); } - static void rebuildRouterInfo(boolean alreadyRunning) { - _log.debug("Rebuilding the new router info"); - boolean fullRebuild = false; - RouterInfo info = null; - String infoFilename = Router.getInstance().getConfigSetting(Router.PROP_INFO_FILENAME); - if (infoFilename == null) - infoFilename = Router.PROP_INFO_FILENAME_DEFAULT; - - String keyFilename = Router.getInstance().getConfigSetting(Router.PROP_KEYS_FILENAME); - if (keyFilename == null) - keyFilename = Router.PROP_KEYS_FILENAME_DEFAULT; - File keyFile = new File(keyFilename); - - if (keyFile.exists()) { - // ok, no need to rebuild a brand new identity, just update what we can - info = Router.getInstance().getRouterInfo(); - if (info == null) { - info = new RouterInfo(); - FileInputStream fis = null; - try { - fis = new FileInputStream(keyFile); - PrivateKey privkey = new PrivateKey(); - privkey.readBytes(fis); - SigningPrivateKey signingPrivKey = new SigningPrivateKey(); - signingPrivKey.readBytes(fis); - PublicKey pubkey = new PublicKey(); - pubkey.readBytes(fis); - SigningPublicKey signingPubKey = new SigningPublicKey(); - signingPubKey.readBytes(fis); - RouterIdentity ident = new RouterIdentity(); - ident.setCertificate(new Certificate(Certificate.CERTIFICATE_TYPE_NULL, null)); - ident.setPublicKey(pubkey); - ident.setSigningPublicKey(signingPubKey); - info.setIdentity(ident); - } catch (Exception e) { - _log.error("Error reading in the key data from " + keyFile.getAbsolutePath(), e); - if (fis != null) try { fis.close(); } catch (IOException ioe) {} - fis = null; - keyFile.delete(); - rebuildRouterInfo(alreadyRunning); - return; - } finally { - if (fis != null) try { fis.close(); } catch (IOException ioe) {} - } - } - - try { - info.setAddresses(CommSystemFacade.getInstance().createAddresses()); - info.setOptions(StatisticsManager.getInstance().publishStatistics()); - // info.setPeers(new HashSet()); // this would have the trusted peers - info.setPublished(CreateRouterInfoJob.getCurrentPublishDate()); - - info.sign(KeyManager.getInstance().getSigningPrivateKey()); - } catch (DataFormatException dfe) { - _log.error("Error rebuilding the new router info", dfe); - return; - } - - FileOutputStream fos = null; - try { - fos = new FileOutputStream(infoFilename); - info.writeBytes(fos); - } catch (DataFormatException dfe) { - _log.error("Error rebuilding the router information", dfe); - } catch (IOException ioe) { - _log.error("Error writing out the rebuilt router information", ioe); - } finally { - if (fos != null) try { fos.close(); } catch (IOException ioe) {} - } - - } else { - _log.warn("Private key file " + keyFile.getAbsolutePath() + " deleted! Rebuilding a brand new router identity!"); - // this proc writes the keys and info to the file as well as builds the latest and greatest info - info = CreateRouterInfoJob.createRouterInfo(); - fullRebuild = true; - } - - //MessageHistory.initialize(); - Router.getInstance().setRouterInfo(info); - - ////// - // the following is commented out because its dangerous, extremely rarely used, - // and not necessary for a fundamental feature (rebuilding new identities without - // restarting) - ///// - - /* - _log.warn("Restarting the router identity, pausing activity"); - try { - JobQueue.getInstance().pauseQueue(); - try { Thread.sleep(1000); } catch (InterruptedException ie) {} - - if (alreadyRunning) { - if (fullRebuild) { - // if we changed our ident, then we need to drop our tunnels - TunnelManagerFacade.getInstance().shutdown(); - TunnelManagerFacade.getInstance().startup(); - } - - ClientManagerFacade.getInstance().shutdown(); - CommSystemFacade.getInstance().shutdown(); - // sleep to free up sockets - try { Thread.sleep(5000); } catch (InterruptedException ie) {} - CommSystemFacade.getInstance().startup(); - ClientManagerFacade.getInstance().startup(); - } - } catch (Throwable t) { - _log.error("Error during comm rebuilding", t); - } finally { - JobQueue.getInstance().unpauseQueue(); - } - NetworkDatabaseFacade.getInstance().publish(info); - */ - _log.info("Router info rebuilt and stored at " + infoFilename + " [" + info + "]"); + void rebuildRouterInfo(boolean alreadyRunning) { + _log.debug("Rebuilding the new router info"); + boolean fullRebuild = false; + RouterInfo info = null; + String infoFilename = _context.router().getConfigSetting(Router.PROP_INFO_FILENAME); + if (infoFilename == null) + infoFilename = Router.PROP_INFO_FILENAME_DEFAULT; + + String keyFilename = _context.router().getConfigSetting(Router.PROP_KEYS_FILENAME); + if (keyFilename == null) + keyFilename = Router.PROP_KEYS_FILENAME_DEFAULT; + File keyFile = new File(keyFilename); + + if (keyFile.exists()) { + // ok, no need to rebuild a brand new identity, just update what we can + info = _context.router().getRouterInfo(); + if (info == null) { + info = new RouterInfo(); + FileInputStream fis = null; + try { + fis = new FileInputStream(keyFile); + PrivateKey privkey = new PrivateKey(); + privkey.readBytes(fis); + SigningPrivateKey signingPrivKey = new SigningPrivateKey(); + signingPrivKey.readBytes(fis); + PublicKey pubkey = new PublicKey(); + pubkey.readBytes(fis); + SigningPublicKey signingPubKey = new SigningPublicKey(); + signingPubKey.readBytes(fis); + RouterIdentity ident = new RouterIdentity(); + ident.setCertificate(new Certificate(Certificate.CERTIFICATE_TYPE_NULL, null)); + ident.setPublicKey(pubkey); + ident.setSigningPublicKey(signingPubKey); + info.setIdentity(ident); + } catch (Exception e) { + _log.error("Error reading in the key data from " + keyFile.getAbsolutePath(), e); + if (fis != null) try { fis.close(); } catch (IOException ioe) {} + fis = null; + keyFile.delete(); + rebuildRouterInfo(alreadyRunning); + return; + } finally { + if (fis != null) try { fis.close(); } catch (IOException ioe) {} + } + } + + try { + info.setAddresses(_context.commSystem().createAddresses()); + info.setOptions(_context.statPublisher().publishStatistics()); + // info.setPeers(new HashSet()); // this would have the trusted peers + info.setPublished(CreateRouterInfoJob.getCurrentPublishDate(_context)); + + info.sign(_context.keyManager().getSigningPrivateKey()); + } catch (DataFormatException dfe) { + _log.error("Error rebuilding the new router info", dfe); + return; + } + + FileOutputStream fos = null; + try { + fos = new FileOutputStream(infoFilename); + info.writeBytes(fos); + } catch (DataFormatException dfe) { + _log.error("Error rebuilding the router information", dfe); + } catch (IOException ioe) { + _log.error("Error writing out the rebuilt router information", ioe); + } finally { + if (fos != null) try { fos.close(); } catch (IOException ioe) {} + } + + } else { + _log.warn("Private key file " + keyFile.getAbsolutePath() + " deleted! Rebuilding a brand new router identity!"); + // this proc writes the keys and info to the file as well as builds the latest and greatest info + CreateRouterInfoJob j = new CreateRouterInfoJob(_context, null); + info = j.createRouterInfo(); + fullRebuild = true; + } + + //MessageHistory.initialize(); + _context.router().setRouterInfo(info); + _log.info("Router info rebuilt and stored at " + infoFilename + " [" + info + "]"); } } diff --git a/router/java/src/net/i2p/router/startup/StartAcceptingClientsJob.java b/router/java/src/net/i2p/router/startup/StartAcceptingClientsJob.java index e1b91b43c..86c51881c 100644 --- a/router/java/src/net/i2p/router/startup/StartAcceptingClientsJob.java +++ b/router/java/src/net/i2p/router/startup/StartAcceptingClientsJob.java @@ -20,24 +20,28 @@ import net.i2p.router.admin.AdminManager; import net.i2p.util.Clock; import net.i2p.util.I2PThread; import net.i2p.util.Log; +import net.i2p.router.RouterContext; public class StartAcceptingClientsJob extends JobImpl { - private static Log _log = new Log(StartAcceptingClientsJob.class); + private Log _log; - public StartAcceptingClientsJob() { } + public StartAcceptingClientsJob(RouterContext context) { + super(context); + _log = context.logManager().getLog(StartAcceptingClientsJob.class); + } public String getName() { return "Start Accepting Clients"; } public void runJob() { // start up the network database - ClientManagerFacade.getInstance().startup(); + _context.clientManager().startup(); - JobQueue.getInstance().addJob(new ReadConfigJob()); - JobQueue.getInstance().addJob(new RebuildRouterInfoJob()); - AdminManager.getInstance().startup(); - JobQueue.getInstance().allowParallelOperation(); - JobQueue.getInstance().addJob(new LoadClientAppsJob()); + _context.jobQueue().addJob(new ReadConfigJob(_context)); + _context.jobQueue().addJob(new RebuildRouterInfoJob(_context)); + new AdminManager(_context).startup(); + _context.jobQueue().allowParallelOperation(); + _context.jobQueue().addJob(new LoadClientAppsJob(_context)); } public static void main(String args[]) { @@ -59,19 +63,20 @@ public class StartAcceptingClientsJob extends JobImpl { } class LoadClientAppsJob extends JobImpl { - private final static Log _log = new Log(LoadClientAppsJob.class); + private Log _log; /** wait 2 minutes before starting up client apps */ private final static long STARTUP_DELAY = 2*60*1000; - public LoadClientAppsJob() { - super(); - getTiming().setStartAfter(STARTUP_DELAY + Clock.getInstance().now()); + public LoadClientAppsJob(RouterContext ctx) { + super(ctx); + _log = ctx.logManager().getLog(LoadClientAppsJob.class); + getTiming().setStartAfter(STARTUP_DELAY + _context.clock().now()); } public void runJob() { int i = 0; while (true) { - String className = Router.getInstance().getConfigSetting("clientApp."+i+".main"); - String clientName = Router.getInstance().getConfigSetting("clientApp."+i+".name"); - String args = Router.getInstance().getConfigSetting("clientApp."+i+".args"); + String className = _context.router().getConfigSetting("clientApp."+i+".main"); + String clientName = _context.router().getConfigSetting("clientApp."+i+".name"); + String args = _context.router().getConfigSetting("clientApp."+i+".args"); if (className == null) break; String argVal[] = parseArgs(args); @@ -137,7 +142,7 @@ class LoadClientAppsJob extends JobImpl { t.start(); } - private final static class RunApp implements Runnable { + private final class RunApp implements Runnable { private String _className; private String _appName; private String _args[]; diff --git a/router/java/src/net/i2p/router/startup/StartupJob.java b/router/java/src/net/i2p/router/startup/StartupJob.java index a59b59cef..09a10c634 100644 --- a/router/java/src/net/i2p/router/startup/StartupJob.java +++ b/router/java/src/net/i2p/router/startup/StartupJob.java @@ -13,6 +13,7 @@ import net.i2p.router.JobImpl; import net.i2p.router.JobQueue; import net.i2p.router.StatisticsManager; import net.i2p.util.Log; +import net.i2p.router.RouterContext; /** * The StartupJob should be run once on router startup to initialize the system @@ -28,13 +29,15 @@ import net.i2p.util.Log; * */ public class StartupJob extends JobImpl { - private static Log _log = new Log(StartupJob.class); + + public StartupJob(RouterContext context) { + super(context); + } public String getName() { return "Startup Router"; } public void runJob() { - ReadConfigJob.doRead(); - StatisticsManager.getInstance().startup(); - - JobQueue.getInstance().addJob(new LoadRouterInfoJob()); + ReadConfigJob.doRead(_context); + _context.statPublisher().startup(); + _context.jobQueue().addJob(new LoadRouterInfoJob(_context)); } } diff --git a/router/java/src/net/i2p/router/transport/BandwidthLimitedInputStream.java b/router/java/src/net/i2p/router/transport/BandwidthLimitedInputStream.java index 453865af9..13b8b9f08 100644 --- a/router/java/src/net/i2p/router/transport/BandwidthLimitedInputStream.java +++ b/router/java/src/net/i2p/router/transport/BandwidthLimitedInputStream.java @@ -1,9 +1,9 @@ package net.i2p.router.transport; /* * free (adj.): unencumbered; not under the control of others - * Written by jrandom in 2003 and released into the public domain - * with no warranty of any kind, either expressed or implied. - * It probably won't make your computer catch on fire, or eat + * Written by jrandom in 2003 and released into the public domain + * with no warranty of any kind, either expressed or implied. + * It probably won't make your computer catch on fire, or eat * your children, but it might. Use at your own risk. * */ @@ -13,33 +13,36 @@ import java.io.IOException; import java.io.InputStream; import net.i2p.data.RouterIdentity; +import net.i2p.router.RouterContext; public class BandwidthLimitedInputStream extends FilterInputStream { private RouterIdentity _peer; - public BandwidthLimitedInputStream(InputStream source, RouterIdentity peer) { - super(source); - _peer = peer; + private RouterContext _context; + public BandwidthLimitedInputStream(RouterContext context, InputStream source, RouterIdentity peer) { + super(source); + _context = context; + _peer = peer; } - public int read() throws IOException { - BandwidthLimiter.getInstance().delayInbound(_peer, 1); - return in.read(); + public int read() throws IOException { + _context.bandwidthLimiter().delayInbound(_peer, 1); + return in.read(); } public int read(byte dest[]) throws IOException { - int read = in.read(dest); - BandwidthLimiter.getInstance().delayInbound(_peer, read); - return read; + int read = in.read(dest); + _context.bandwidthLimiter().delayInbound(_peer, read); + return read; } - public int read(byte dest[], int off, int len) throws IOException { - int read = in.read(dest, off, len); - BandwidthLimiter.getInstance().delayInbound(_peer, read); - return read; + public int read(byte dest[], int off, int len) throws IOException { + int read = in.read(dest, off, len); + _context.bandwidthLimiter().delayInbound(_peer, read); + return read; } - public long skip(long numBytes) throws IOException { - long skip = in.skip(numBytes); - BandwidthLimiter.getInstance().delayInbound(_peer, (int)skip); - return skip; + public long skip(long numBytes) throws IOException { + long skip = in.skip(numBytes); + _context.bandwidthLimiter().delayInbound(_peer, (int)skip); + return skip; } } diff --git a/router/java/src/net/i2p/router/transport/BandwidthLimitedOutputStream.java b/router/java/src/net/i2p/router/transport/BandwidthLimitedOutputStream.java index f3d93389e..8b9bb96e6 100644 --- a/router/java/src/net/i2p/router/transport/BandwidthLimitedOutputStream.java +++ b/router/java/src/net/i2p/router/transport/BandwidthLimitedOutputStream.java @@ -1,9 +1,9 @@ package net.i2p.router.transport; /* * free (adj.): unencumbered; not under the control of others - * Written by jrandom in 2003 and released into the public domain - * with no warranty of any kind, either expressed or implied. - * It probably won't make your computer catch on fire, or eat + * Written by jrandom in 2003 and released into the public domain + * with no warranty of any kind, either expressed or implied. + * It probably won't make your computer catch on fire, or eat * your children, but it might. Use at your own risk. * */ @@ -13,50 +13,53 @@ import java.io.IOException; import java.io.OutputStream; import net.i2p.data.RouterIdentity; +import net.i2p.router.RouterContext; public class BandwidthLimitedOutputStream extends FilterOutputStream { private RouterIdentity _peer; + private RouterContext _context; - public BandwidthLimitedOutputStream(OutputStream source, RouterIdentity peer) { - super(source); - _peer = peer; + public BandwidthLimitedOutputStream(RouterContext context, OutputStream source, RouterIdentity peer) { + super(source); + _context = context; + _peer = peer; } private final static int CHUNK_SIZE = 64; - public void write(int val) throws IOException { - BandwidthLimiter.getInstance().delayOutbound(_peer, 1); - out.write(val); + public void write(int val) throws IOException { + _context.bandwidthLimiter().delayOutbound(_peer, 1); + out.write(val); } - public void write(byte src[]) throws IOException { - if (src == null) return; - if (src.length > CHUNK_SIZE) { - for (int i = 0; i < src.length; ) { - write(src, i*CHUNK_SIZE, CHUNK_SIZE); - i += CHUNK_SIZE; - } - } else { - write(src, 0, src.length); - } + public void write(byte src[]) throws IOException { + if (src == null) return; + if (src.length > CHUNK_SIZE) { + for (int i = 0; i < src.length; ) { + write(src, i*CHUNK_SIZE, CHUNK_SIZE); + i += CHUNK_SIZE; + } + } else { + write(src, 0, src.length); + } } - public void write(byte src[], int off, int len) throws IOException { - if (src == null) return; - if (len <= 0) return; - if (len <= CHUNK_SIZE) { - BandwidthLimiter.getInstance().delayOutbound(_peer, len); - out.write(src, off, len); - } else { - int i = 0; - while (i+CHUNK_SIZE < len) { - BandwidthLimiter.getInstance().delayOutbound(_peer, CHUNK_SIZE); - out.write(src, off+i*CHUNK_SIZE, CHUNK_SIZE); - i++; - } - int remainder = len % CHUNK_SIZE; - if (remainder != 0) { - BandwidthLimiter.getInstance().delayOutbound(_peer, remainder); - out.write(src, off+len-(remainder), remainder); - } - } + public void write(byte src[], int off, int len) throws IOException { + if (src == null) return; + if (len <= 0) return; + if (len <= CHUNK_SIZE) { + _context.bandwidthLimiter().delayOutbound(_peer, len); + out.write(src, off, len); + } else { + int i = 0; + while (i+CHUNK_SIZE < len) { + _context.bandwidthLimiter().delayOutbound(_peer, CHUNK_SIZE); + out.write(src, off+i*CHUNK_SIZE, CHUNK_SIZE); + i++; + } + int remainder = len % CHUNK_SIZE; + if (remainder != 0) { + _context.bandwidthLimiter().delayOutbound(_peer, remainder); + out.write(src, off+len-(remainder), remainder); + } + } } } diff --git a/router/java/src/net/i2p/router/transport/BandwidthLimiter.java b/router/java/src/net/i2p/router/transport/BandwidthLimiter.java index 81daa3c5f..d7325ad39 100644 --- a/router/java/src/net/i2p/router/transport/BandwidthLimiter.java +++ b/router/java/src/net/i2p/router/transport/BandwidthLimiter.java @@ -1,28 +1,31 @@ package net.i2p.router.transport; /* * free (adj.): unencumbered; not under the control of others - * Written by jrandom in 2003 and released into the public domain - * with no warranty of any kind, either expressed or implied. - * It probably won't make your computer catch on fire, or eat + * Written by jrandom in 2003 and released into the public domain + * with no warranty of any kind, either expressed or implied. + * It probably won't make your computer catch on fire, or eat * your children, but it might. Use at your own risk. * */ import net.i2p.data.RouterIdentity; import net.i2p.util.Log; +import net.i2p.router.RouterContext; /** - * Coordinate the bandwidth limiting across all classes of peers. Currently + * Coordinate the bandwidth limiting across all classes of peers. Currently * treats everything as open (aka doesn't limit) * */ public class BandwidthLimiter { - private final static Log _log = new Log(BandwidthLimiter.class); - private final static BandwidthLimiter _instance = new TrivialBandwidthLimiter(); - public static BandwidthLimiter getInstance() { return _instance; } + private Log _log; + protected RouterContext _context; + + protected BandwidthLimiter(RouterContext context) { + _context = context; + _log = context.logManager().getLog(BandwidthLimiter.class); + } - protected BandwidthLimiter() {} - public long getTotalSendBytes() { return 0; } public long getTotalReceiveBytes() { return 0; } @@ -30,14 +33,14 @@ public class BandwidthLimiter { * Return how many milliseconds to wait before receiving/processing numBytes from the peer */ public long calculateDelayInbound(RouterIdentity peer, int numBytes) { - return 0; + return 0; } - + /** * Return how many milliseconds to wait before sending numBytes to the peer */ public long calculateDelayOutbound(RouterIdentity peer, int numBytes) { - return 0; + return 0; } /** @@ -54,24 +57,24 @@ public class BandwidthLimiter { * from the peer will not violate the bandwidth limits */ public void delayInbound(RouterIdentity peer, int numBytes) { - long ms = calculateDelayInbound(peer, numBytes); - if (ms > 0) { - _log.debug("Delaying inbound " + ms +"ms for " + numBytes +" bytes"); - try { Thread.sleep(ms); } catch (InterruptedException ie) {} - } - consumeInbound(peer, numBytes); + long ms = calculateDelayInbound(peer, numBytes); + if (ms > 0) { + _log.debug("Delaying inbound " + ms +"ms for " + numBytes +" bytes"); + try { Thread.sleep(ms); } catch (InterruptedException ie) {} + } + consumeInbound(peer, numBytes); } /** * Delay the required amount of time before returning so that sending numBytes * to the peer will not violate the bandwidth limits */ public void delayOutbound(RouterIdentity peer, int numBytes) { - long ms = calculateDelayOutbound(peer, numBytes); - if (ms > 0) { - _log.debug("Delaying outbound " + ms + "ms for " + numBytes + " bytes"); - try { Thread.sleep(ms); } catch (InterruptedException ie) {} - } - - consumeOutbound(peer, numBytes); + long ms = calculateDelayOutbound(peer, numBytes); + if (ms > 0) { + _log.debug("Delaying outbound " + ms + "ms for " + numBytes + " bytes"); + try { Thread.sleep(ms); } catch (InterruptedException ie) {} + } + + consumeOutbound(peer, numBytes); } } diff --git a/router/java/src/net/i2p/router/transport/CommSystemFacadeImpl.java b/router/java/src/net/i2p/router/transport/CommSystemFacadeImpl.java index 10b5abfaa..b1c9dbd7c 100644 --- a/router/java/src/net/i2p/router/transport/CommSystemFacadeImpl.java +++ b/router/java/src/net/i2p/router/transport/CommSystemFacadeImpl.java @@ -22,48 +22,51 @@ import net.i2p.router.Router; import net.i2p.router.transport.phttp.PHTTPTransport; import net.i2p.router.transport.tcp.TCPTransport; import net.i2p.util.Log; +import net.i2p.router.RouterContext; public class CommSystemFacadeImpl extends CommSystemFacade { - private final static Log _log = new Log(CommSystemFacadeImpl.class); + private Log _log; + private RouterContext _context; private TransportManager _manager; - public CommSystemFacadeImpl() { - _manager = null; + public CommSystemFacadeImpl(RouterContext context) { + _context = context; + _log = _context.logManager().getLog(CommSystemFacadeImpl.class); + _manager = null; } public void startup() { - _log.info("Starting up the comm system"); - _manager = new TransportManager(Router.getInstance().getRouterInfo(), KeyManager.getInstance().getSigningPrivateKey()); - _manager.startListening(); - //JobQueue.getInstance().addJob(new FetchOutNetMessageJob(this)); + _log.info("Starting up the comm system"); + _manager = new TransportManager(_context); + _manager.startListening(); } public void shutdown() { - if (_manager != null) - _manager.stopListening(); + if (_manager != null) + _manager.stopListening(); } public List getBids(OutNetMessage msg) { - return _manager.getBids(msg); + return _manager.getBids(msg); } public void processMessage(OutNetMessage msg) { - JobQueue.getInstance().addJob(new GetBidsJob(this, msg)); + _context.jobQueue().addJob(new GetBidsJob(_context, this, msg)); } public String renderStatusHTML() { return _manager.renderStatusHTML(); } - public Set createAddresses() { - Set addresses = new HashSet(); - RouterAddress addr = createTCPAddress(); - if (addr != null) - addresses.add(addr); - addr = createPHTTPAddress(); - if (addr != null) - addresses.add(addr); - _log.info("Creating addresses: " + addresses); - return addresses; + Set addresses = new HashSet(); + RouterAddress addr = createTCPAddress(); + if (addr != null) + addresses.add(addr); + addr = createPHTTPAddress(); + if (addr != null) + addresses.add(addr); + if (_log.shouldLog(Log.INFO)) + _log.info("Creating addresses: " + addresses); + return addresses; } private final static String PROP_I2NP_TCP_HOSTNAME = "i2np.tcp.hostname"; @@ -71,42 +74,42 @@ public class CommSystemFacadeImpl extends CommSystemFacade { private final static String PROP_I2NP_PHTTP_SEND_URL = "i2np.phttp.sendURL"; private final static String PROP_I2NP_PHTTP_REGISTER_URL = "i2np.phttp.registerURL"; - private static RouterAddress createTCPAddress() { - RouterAddress addr = new RouterAddress(); - addr.setCost(10); - addr.setExpiration(null); - Properties props = new Properties(); - String name = Router.getInstance().getConfigSetting(PROP_I2NP_TCP_HOSTNAME); - String port = Router.getInstance().getConfigSetting(PROP_I2NP_TCP_PORT); - if ( (name == null) || (port == null) ) { - _log.info("TCP Host/Port not specified in config file - skipping TCP transport"); - return null; - } else { - _log.info("Creating TCP address on " + name + ":" + port); - } - props.setProperty("host", name); - props.setProperty("port", port); - addr.setOptions(props); - addr.setTransportStyle(TCPTransport.STYLE); - return addr; + private RouterAddress createTCPAddress() { + RouterAddress addr = new RouterAddress(); + addr.setCost(10); + addr.setExpiration(null); + Properties props = new Properties(); + String name = _context.router().getConfigSetting(PROP_I2NP_TCP_HOSTNAME); + String port = _context.router().getConfigSetting(PROP_I2NP_TCP_PORT); + if ( (name == null) || (port == null) ) { + _log.info("TCP Host/Port not specified in config file - skipping TCP transport"); + return null; + } else { + _log.info("Creating TCP address on " + name + ":" + port); + } + props.setProperty("host", name); + props.setProperty("port", port); + addr.setOptions(props); + addr.setTransportStyle(TCPTransport.STYLE); + return addr; } - private static RouterAddress createPHTTPAddress() { - RouterAddress addr = new RouterAddress(); - addr.setCost(50); - addr.setExpiration(null); - Properties props = new Properties(); - String regURL = Router.getInstance().getConfigSetting(PROP_I2NP_PHTTP_REGISTER_URL); - String sendURL = Router.getInstance().getConfigSetting(PROP_I2NP_PHTTP_SEND_URL); - if ( (regURL == null) || (sendURL == null) ) { - _log.info("Polling HTTP registration/send URLs not specified in config file - skipping PHTTP transport"); - return null; - } else { - _log.info("Creating Polling HTTP address on " + regURL + " / " + sendURL); - } - props.setProperty(PHTTPTransport.PROP_TO_REGISTER_URL, regURL); - props.setProperty(PHTTPTransport.PROP_TO_SEND_URL, sendURL); - addr.setOptions(props); - addr.setTransportStyle(PHTTPTransport.STYLE); - return addr; + private RouterAddress createPHTTPAddress() { + RouterAddress addr = new RouterAddress(); + addr.setCost(50); + addr.setExpiration(null); + Properties props = new Properties(); + String regURL = _context.router().getConfigSetting(PROP_I2NP_PHTTP_REGISTER_URL); + String sendURL = _context.router().getConfigSetting(PROP_I2NP_PHTTP_SEND_URL); + if ( (regURL == null) || (sendURL == null) ) { + _log.info("Polling HTTP registration/send URLs not specified in config file - skipping PHTTP transport"); + return null; + } else { + _log.info("Creating Polling HTTP address on " + regURL + " / " + sendURL); + } + props.setProperty(PHTTPTransport.PROP_TO_REGISTER_URL, regURL); + props.setProperty(PHTTPTransport.PROP_TO_SEND_URL, sendURL); + addr.setOptions(props); + addr.setTransportStyle(PHTTPTransport.STYLE); + return addr; } } diff --git a/router/java/src/net/i2p/router/transport/FetchOutNetMessageJob.java b/router/java/src/net/i2p/router/transport/FetchOutNetMessageJob.java deleted file mode 100644 index 1e4154ff9..000000000 --- a/router/java/src/net/i2p/router/transport/FetchOutNetMessageJob.java +++ /dev/null @@ -1,49 +0,0 @@ -package net.i2p.router.transport; -/* - * free (adj.): unencumbered; not under the control of others - * Written by jrandom in 2003 and released into the public domain - * with no warranty of any kind, either expressed or implied. - * It probably won't make your computer catch on fire, or eat - * your children, but it might. Use at your own risk. - * - */ - -import net.i2p.router.JobImpl; -import net.i2p.router.JobQueue; -import net.i2p.router.OutNetMessage; -import net.i2p.router.OutNetMessagePool; -import net.i2p.util.Clock; -import net.i2p.util.Log; - -/** - * Fetch an outbound message from the outbound pool, check its validity, get a bid - * from transports, and queue it for delivery on the "winning" transport - * - */ -public class FetchOutNetMessageJob extends JobImpl { - private static Log _log = new Log(FetchOutNetMessageJob.class); - private CommSystemFacadeImpl _facade; - - public FetchOutNetMessageJob(CommSystemFacadeImpl facade) { - super(); - _facade = facade; - } - - public String getName() { return "Check For Pending Outbound Network Message"; } - public void runJob() { - OutNetMessage msg = OutNetMessagePool.getInstance().getNext(); - if (msg != null) { - processMessage(msg); - } else { - _log.debug("No new outbound messages"); - getTiming().setStartAfter(Clock.getInstance().now()+1000); - } - - JobQueue.getInstance().addJob(this); - //JobQueue.getInstance().addJob(new FetchOutNetMessageJob(_facade)); - } - - private void processMessage(OutNetMessage msg) { - JobQueue.getInstance().addJob(new GetBidsJob(_facade, msg)); - } -} diff --git a/router/java/src/net/i2p/router/transport/GetBidsJob.java b/router/java/src/net/i2p/router/transport/GetBidsJob.java index af7285b1a..df543d4da 100644 --- a/router/java/src/net/i2p/router/transport/GetBidsJob.java +++ b/router/java/src/net/i2p/router/transport/GetBidsJob.java @@ -1,9 +1,9 @@ package net.i2p.router.transport; /* * free (adj.): unencumbered; not under the control of others - * Written by jrandom in 2003 and released into the public domain - * with no warranty of any kind, either expressed or implied. - * It probably won't make your computer catch on fire, or eat + * Written by jrandom in 2003 and released into the public domain + * with no warranty of any kind, either expressed or implied. + * It probably won't make your computer catch on fire, or eat * your children, but it might. Use at your own risk. * */ @@ -19,63 +19,65 @@ import net.i2p.router.ProfileManager; import net.i2p.router.Router; import net.i2p.router.Shitlist; import net.i2p.util.Log; +import net.i2p.router.RouterContext; /** * Retrieve a set of bids for a particular outbound message, and if any are found - * that meet the message's requirements, register the message as in process and + * that meet the message's requirements, register the message as in process and * pass it on to the transport for processing * */ -public class GetBidsJob extends JobImpl { - private static Log _log = new Log(GetBidsJob.class); +public class GetBidsJob extends JobImpl { + private Log _log; private CommSystemFacadeImpl _facade; private OutNetMessage _msg; - public GetBidsJob(CommSystemFacadeImpl facade, OutNetMessage msg) { - super(); - _facade = facade; - _msg = msg; + public GetBidsJob(RouterContext ctx, CommSystemFacadeImpl facade, OutNetMessage msg) { + super(ctx); + _log = ctx.logManager().getLog(GetBidsJob.class); + _facade = facade; + _msg = msg; } public String getName() { return "Fetch bids for a message to be delivered"; } public void runJob() { - Hash to = _msg.getTarget().getIdentity().getHash(); - if (Shitlist.getInstance().isShitlisted(to)) { - _log.warn("Attempt to send a message to a shitlisted peer - " + to); - fail(); - return; - } - - Hash us = Router.getInstance().getRouterInfo().getIdentity().getHash(); - if (_msg.getTarget().getIdentity().getHash().equals(us)) { - _log.error("wtf, send a message to ourselves? nuh uh. msg = " + _msg, getAddedBy()); - fail(); - return; - } - - List bids = _facade.getBids(_msg); - if (bids.size() <= 0) { - _log.warn("No bids available for the message " + _msg); - fail(); - } else { - TransportBid bid = (TransportBid)bids.get(0); - bid.getTransport().send(_msg); - } + Hash to = _msg.getTarget().getIdentity().getHash(); + if (_context.shitlist().isShitlisted(to)) { + _log.warn("Attempt to send a message to a shitlisted peer - " + to); + fail(); + return; + } + + Hash us = _context.routerHash(); + if (_msg.getTarget().getIdentity().getHash().equals(us)) { + _log.error("wtf, send a message to ourselves? nuh uh. msg = " + _msg, getAddedBy()); + fail(); + return; + } + + List bids = _facade.getBids(_msg); + if (bids.size() <= 0) { + _log.warn("No bids available for the message " + _msg); + fail(); + } else { + TransportBid bid = (TransportBid)bids.get(0); + bid.getTransport().send(_msg); + } } private void fail() { - if (_msg.getOnFailedSendJob() != null) { - JobQueue.getInstance().addJob(_msg.getOnFailedSendJob()); - } - if (_msg.getOnFailedReplyJob() != null) { - JobQueue.getInstance().addJob(_msg.getOnFailedReplyJob()); - } - MessageSelector selector = _msg.getReplySelector(); - if (selector != null) { - OutboundMessageRegistry.getInstance().unregisterPending(_msg); - } - - ProfileManager.getInstance().messageFailed(_msg.getTarget().getIdentity().getHash()); + if (_msg.getOnFailedSendJob() != null) { + _context.jobQueue().addJob(_msg.getOnFailedSendJob()); + } + if (_msg.getOnFailedReplyJob() != null) { + _context.jobQueue().addJob(_msg.getOnFailedReplyJob()); + } + MessageSelector selector = _msg.getReplySelector(); + if (selector != null) { + _context.messageRegistry().unregisterPending(_msg); + } + + _context.profileManager().messageFailed(_msg.getTarget().getIdentity().getHash()); } } diff --git a/router/java/src/net/i2p/router/transport/OutboundMessageRegistry.java b/router/java/src/net/i2p/router/transport/OutboundMessageRegistry.java index 8b2384885..50bcf4b11 100644 --- a/router/java/src/net/i2p/router/transport/OutboundMessageRegistry.java +++ b/router/java/src/net/i2p/router/transport/OutboundMessageRegistry.java @@ -25,212 +25,232 @@ import net.i2p.router.MessageSelector; import net.i2p.router.OutNetMessage; import net.i2p.util.Clock; import net.i2p.util.Log; +import net.i2p.router.RouterContext; public class OutboundMessageRegistry { - private final static Log _log = new Log(OutboundMessageRegistry.class); - private static final OutboundMessageRegistry _instance = new OutboundMessageRegistry(); - public static OutboundMessageRegistry getInstance() { return _instance; } + private Log _log; private TreeMap _pendingMessages; + private RouterContext _context; private final static long CLEANUP_DELAY = 1000*5; // how often to expire pending unreplied messages - private OutboundMessageRegistry() { - _pendingMessages = new TreeMap(); - JobQueue.getInstance().addJob(new CleanupPendingMessagesJob()); + public OutboundMessageRegistry(RouterContext context) { + _context = context; + _log = _context.logManager().getLog(OutboundMessageRegistry.class); + _pendingMessages = new TreeMap(); + _context.jobQueue().addJob(new CleanupPendingMessagesJob()); } public List getOriginalMessages(I2NPMessage message) { - - HashSet matches = new HashSet(4); - long beforeSync = Clock.getInstance().now(); - - Map messages = null; - synchronized (_pendingMessages) { - messages = (Map)_pendingMessages.clone(); - } - - long matchTime = 0; - long continueTime = 0; - int numMessages = messages.size(); - - long afterSync1 = Clock.getInstance().now(); - - ArrayList matchedRemove = new ArrayList(32); - for (Iterator iter = messages.keySet().iterator(); iter.hasNext(); ) { - Long exp = (Long)iter.next(); - OutNetMessage msg = (OutNetMessage)messages.get(exp); - MessageSelector selector = msg.getReplySelector(); - if (selector != null) { - long before = Clock.getInstance().now(); - boolean isMatch = selector.isMatch(message); - long after = Clock.getInstance().now(); - long diff = after-before; - if (diff > 100) { - if (_log.shouldLog(Log.WARN)) - _log.warn("Matching with selector took too long (" + diff + "ms) : " + selector.getClass().getName()); - } - matchTime += diff; - - if (isMatch) { - if (_log.shouldLog(Log.DEBUG)) - _log.debug("Selector matches [" + selector); - matches.add(msg); - long beforeCon = Clock.getInstance().now(); - boolean continueMatching = selector.continueMatching(); - long afterCon = Clock.getInstance().now(); - long diffCon = afterCon - beforeCon; - if (diffCon > 100) { - if (_log.shouldLog(Log.WARN)) - _log.warn("Error continueMatching on a match took too long (" + diffCon + "ms) : " + selector.getClass().getName()); - } - continueTime += diffCon; - - if (continueMatching) { - if (_log.shouldLog(Log.DEBUG)) - _log.debug("Continue matching"); - // noop - } else { - if (_log.shouldLog(Log.DEBUG)) - _log.debug("Stop matching selector " + selector + " for message " + msg.getMessage().getClass().getName()); - matchedRemove.add(exp); - } - } else { - //_log.debug("Selector does not match [" + selector + "]"); - } - } - } - long afterSearch = Clock.getInstance().now(); - - for (Iterator iter = matchedRemove.iterator(); iter.hasNext(); ) { - Long expiration = (Long)iter.next(); - OutNetMessage m = null; - long before = Clock.getInstance().now(); - synchronized (_pendingMessages) { - m = (OutNetMessage)_pendingMessages.remove(expiration); - } - long diff = Clock.getInstance().now() - before; - if (diff > 500) - _log.error("Took too long syncing on remove (" + diff + "ms"); - - if (m != null) { - if (_log.shouldLog(Log.DEBUG)) - _log.debug("Removing message with selector " + m.getReplySelector().getClass().getName() + " :" + m.getReplySelector().toString()); - } - } - - long delay = Clock.getInstance().now() - beforeSync; - long search = afterSearch - afterSync1; - long sync = afterSync1 - beforeSync; - - int level = Log.DEBUG; - if (delay > 1000) - level = Log.ERROR; - if (_log.shouldLog(level)) { - _log.log(level, "getMessages took " + delay + "ms with search time of " + search + "ms (match: " + matchTime + "ms, continue: " + continueTime + "ms, #: " + numMessages + ") and sync time of " + sync + "ms for " + matchedRemove.size() + " removed, " + matches.size() + " matches"); - } - - return new ArrayList(matches); + HashSet matches = new HashSet(4); + long beforeSync = _context.clock().now(); + + Map messages = null; + synchronized (_pendingMessages) { + messages = (Map)_pendingMessages.clone(); + } + + long matchTime = 0; + long continueTime = 0; + int numMessages = messages.size(); + + long afterSync1 = _context.clock().now(); + + ArrayList matchedRemove = new ArrayList(32); + for (Iterator iter = messages.keySet().iterator(); iter.hasNext(); ) { + Long exp = (Long)iter.next(); + OutNetMessage msg = (OutNetMessage)messages.get(exp); + MessageSelector selector = msg.getReplySelector(); + if (selector != null) { + long before = _context.clock().now(); + boolean isMatch = selector.isMatch(message); + long after = _context.clock().now(); + long diff = after-before; + if (diff > 100) { + if (_log.shouldLog(Log.WARN)) + _log.warn("Matching with selector took too long (" + diff + "ms) : " + + selector.getClass().getName()); + } + matchTime += diff; + + if (isMatch) { + if (_log.shouldLog(Log.DEBUG)) + _log.debug("Selector matches [" + selector); + matches.add(msg); + long beforeCon = _context.clock().now(); + boolean continueMatching = selector.continueMatching(); + long afterCon = _context.clock().now(); + long diffCon = afterCon - beforeCon; + if (diffCon > 100) { + if (_log.shouldLog(Log.WARN)) + _log.warn("Error continueMatching on a match took too long (" + + diffCon + "ms) : " + selector.getClass().getName()); + } + continueTime += diffCon; + + if (continueMatching) { + if (_log.shouldLog(Log.DEBUG)) + _log.debug("Continue matching"); + // noop + } else { + if (_log.shouldLog(Log.DEBUG)) + _log.debug("Stop matching selector " + selector + " for message " + + msg.getMessage().getClass().getName()); + matchedRemove.add(exp); + } + } else { + //_log.debug("Selector does not match [" + selector + "]"); + } + } + } + long afterSearch = _context.clock().now(); + + for (Iterator iter = matchedRemove.iterator(); iter.hasNext(); ) { + Long expiration = (Long)iter.next(); + OutNetMessage m = null; + long before = _context.clock().now(); + synchronized (_pendingMessages) { + m = (OutNetMessage)_pendingMessages.remove(expiration); + } + long diff = _context.clock().now() - before; + if (diff > 500) + _log.error("Took too long syncing on remove (" + diff + "ms"); + + if (m != null) { + if (_log.shouldLog(Log.DEBUG)) + _log.debug("Removing message with selector " + + m.getReplySelector().getClass().getName() + + " :" + m.getReplySelector().toString()); + } + } + + long delay = _context.clock().now() - beforeSync; + long search = afterSearch - afterSync1; + long sync = afterSync1 - beforeSync; + + int level = Log.DEBUG; + if (delay > 1000) + level = Log.ERROR; + if (_log.shouldLog(level)) { + _log.log(level, "getMessages took " + delay + "ms with search time of " + + search + "ms (match: " + matchTime + "ms, continue: " + + continueTime + "ms, #: " + numMessages + ") and sync time of " + + sync + "ms for " + matchedRemove.size() + " removed, " + + matches.size() + " matches"); + } + + return new ArrayList(matches); } public void registerPending(OutNetMessage msg) { - if (msg == null) { - throw new IllegalArgumentException("Null OutNetMessage specified? wtf"); - } else if (msg.getMessage() == null) { - throw new IllegalArgumentException("OutNetMessage doesn't contain an I2NPMessage? wtf"); - } + if (msg == null) { + throw new IllegalArgumentException("Null OutNetMessage specified? wtf"); + } else if (msg.getMessage() == null) { + throw new IllegalArgumentException("OutNetMessage doesn't contain an I2NPMessage? wtf"); + } - long beforeSync = Clock.getInstance().now(); - long afterSync1 = 0; - long afterDone = 0; - try { - OutNetMessage oldMsg = null; - synchronized (_pendingMessages) { - if (_pendingMessages.containsValue(msg)) { - if (_log.shouldLog(Log.DEBUG)) - _log.debug("Not adding an already pending message: " + msg.getMessage().getUniqueId() + "\n: " + msg, new Exception("Duplicate message registration")); - return; - } - - long l = msg.getExpiration(); - while (_pendingMessages.containsKey(new Long(l))) - l++; - _pendingMessages.put(new Long(l), msg); - } - afterSync1 = Clock.getInstance().now(); - - if (_log.shouldLog(Log.DEBUG)) - _log.debug("Register pending: " + msg.getReplySelector().getClass().getName() + " for " + msg.getMessage().getClass().getName() + ": " + msg.getReplySelector().toString(), new Exception("Register pending")); - afterDone = Clock.getInstance().now(); - } finally { - long delay = Clock.getInstance().now() - beforeSync; - long sync1 = afterSync1 - beforeSync; - long done = afterDone - afterSync1; - String warn = delay + "ms (sync = " + sync1 + "ms, done = " + done + "ms)"; - if (delay > 1000) { - _log.error("Synchronizing in the registry.register took too long! " + warn); - MessageHistory.getInstance().messageProcessingError(msg.getMessage().getUniqueId(), msg.getMessage().getClass().getName(), "RegisterPending took too long: " + warn); - } else { - _log.debug("Synchronizing in the registry.register was quick: " + warn); - } - } - //_log.debug("* Register called of " + msg + "\n\nNow pending are: " + renderStatusHTML(), new Exception("who registered a new one?")); + long beforeSync = _context.clock().now(); + long afterSync1 = 0; + long afterDone = 0; + try { + OutNetMessage oldMsg = null; + synchronized (_pendingMessages) { + if (_pendingMessages.containsValue(msg)) { + if (_log.shouldLog(Log.DEBUG)) + _log.debug("Not adding an already pending message: " + + msg.getMessage().getUniqueId() + "\n: " + msg, + new Exception("Duplicate message registration")); + return; + } + + long l = msg.getExpiration(); + while (_pendingMessages.containsKey(new Long(l))) + l++; + _pendingMessages.put(new Long(l), msg); + } + afterSync1 = _context.clock().now(); + + if (_log.shouldLog(Log.DEBUG)) + _log.debug("Register pending: " + msg.getReplySelector().getClass().getName() + + " for " + msg.getMessage().getClass().getName() + ": " + + msg.getReplySelector().toString(), new Exception("Register pending")); + afterDone = _context.clock().now(); + } finally { + long delay = _context.clock().now() - beforeSync; + long sync1 = afterSync1 - beforeSync; + long done = afterDone - afterSync1; + String warn = delay + "ms (sync = " + sync1 + "ms, done = " + done + "ms)"; + if (delay > 1000) { + _log.error("Synchronizing in the registry.register took too long! " + warn); + _context.messageHistory().messageProcessingError(msg.getMessage().getUniqueId(), + msg.getMessage().getClass().getName(), + "RegisterPending took too long: " + warn); + } else { + _log.debug("Synchronizing in the registry.register was quick: " + warn); + } + } + //_log.debug("* Register called of " + msg + "\n\nNow pending are: " + renderStatusHTML(), new Exception("who registered a new one?")); } public void unregisterPending(OutNetMessage msg) { - long beforeSync = Clock.getInstance().now(); - try { - synchronized (_pendingMessages) { - if (_pendingMessages.containsValue(msg)) { - Long found = null; - for (Iterator iter = _pendingMessages.keySet().iterator(); iter.hasNext();) { - Long exp = (Long)iter.next(); - Object val = _pendingMessages.get(exp); - if (val.equals(msg)) { - found = exp; - break; - } - } - if (found != null) { - if (_log.shouldLog(Log.DEBUG)) - _log.debug("Unregistered message " + msg.getReplySelector() + ": " + msg, new Exception("Who unregistered?")); - _pendingMessages.remove(found); - } else { - _log.error("Arg, couldn't find the message that we... thought we could find?", new Exception("WTF")); - } - } - } - } finally { - long delay = Clock.getInstance().now() - beforeSync; - String warn = delay + "ms"; - if (delay > 1000) { - _log.error("Synchronizing in the registry.unRegister took too long! " + warn); - MessageHistory.getInstance().messageProcessingError(msg.getMessage().getUniqueId(), msg.getMessage().getClass().getName(), "Unregister took too long: " + warn); - } else { - _log.debug("Synchronizing in the registry.unRegister was quick: " + warn); - } - } + long beforeSync = _context.clock().now(); + try { + synchronized (_pendingMessages) { + if (_pendingMessages.containsValue(msg)) { + Long found = null; + for (Iterator iter = _pendingMessages.keySet().iterator(); iter.hasNext();) { + Long exp = (Long)iter.next(); + Object val = _pendingMessages.get(exp); + if (val.equals(msg)) { + found = exp; + break; + } + } + + if (found != null) { + if (_log.shouldLog(Log.DEBUG)) + _log.debug("Unregistered message " + msg.getReplySelector() + + ": " + msg, new Exception("Who unregistered?")); + _pendingMessages.remove(found); + } else { + _log.error("Arg, couldn't find the message that we... thought we could find?", + new Exception("WTF")); + } + } + } + } finally { + long delay = _context.clock().now() - beforeSync; + String warn = delay + "ms"; + if (delay > 1000) { + _log.error("Synchronizing in the registry.unRegister took too long! " + warn); + _context.messageHistory().messageProcessingError(msg.getMessage().getUniqueId(), msg.getMessage().getClass().getName(), "Unregister took too long: " + warn); + } else { + _log.debug("Synchronizing in the registry.unRegister was quick: " + warn); + } + } } public String renderStatusHTML() { - StringBuffer buf = new StringBuffer(8192); - buf.append("

    Pending messages

    \n"); - Map msgs = null; - synchronized (_pendingMessages) { - msgs = (Map)_pendingMessages.clone(); - } - buf.append("
      "); - for (Iterator iter = msgs.keySet().iterator(); iter.hasNext();) { - Long exp = (Long)iter.next(); - OutNetMessage msg = (OutNetMessage)msgs.get(exp); - buf.append("
    • ").append(msg.getMessage().getClass().getName()).append(": expiring on ").append(new Date(exp.longValue())); - if (msg.getReplySelector() != null) - buf.append(" with reply selector ").append(msg.getReplySelector().toString()); - else - buf.append(" with NO reply selector? WTF!"); - buf.append("
    • \n"); - } - buf.append("
    "); - return buf.toString(); + StringBuffer buf = new StringBuffer(8192); + buf.append("

    Pending messages

    \n"); + Map msgs = null; + synchronized (_pendingMessages) { + msgs = (Map)_pendingMessages.clone(); + } + buf.append("
      "); + for (Iterator iter = msgs.keySet().iterator(); iter.hasNext();) { + Long exp = (Long)iter.next(); + OutNetMessage msg = (OutNetMessage)msgs.get(exp); + buf.append("
    • ").append(msg.getMessage().getClass().getName()); + buf.append(": expiring on ").append(new Date(exp.longValue())); + if (msg.getReplySelector() != null) + buf.append(" with reply selector ").append(msg.getReplySelector().toString()); + else + buf.append(" with NO reply selector? WTF!"); + buf.append("
    • \n"); + } + buf.append("
    "); + return buf.toString(); } /** @@ -238,78 +258,86 @@ public class OutboundMessageRegistry { * */ private class CleanupPendingMessagesJob extends JobImpl { - public CleanupPendingMessagesJob() { - super(); - } + public CleanupPendingMessagesJob() { + super(OutboundMessageRegistry.this._context); + } - public String getName() { return "Cleanup any messages that timed out"; } - public void runJob() { - List toRemove = new ArrayList(); - long now = Clock.getInstance().now(); - Map messages = null; - synchronized (_pendingMessages) { - messages = (Map)_pendingMessages.clone(); - } - long afterCreate = Clock.getInstance().now(); + public String getName() { return "Cleanup any messages that timed out"; } + public void runJob() { + List toRemove = new ArrayList(); + long now = CleanupPendingMessagesJob.this._context.clock().now(); + Map messages = null; + synchronized (_pendingMessages) { + messages = (Map)_pendingMessages.clone(); + } + long afterCreate = CleanupPendingMessagesJob.this._context.clock().now(); + + for (Iterator iter = messages.keySet().iterator(); iter.hasNext(); ) { + Long exp = (Long)iter.next(); + OutNetMessage msg = (OutNetMessage)messages.get(exp); + if (msg.getExpiration() < now) { + toRemove.add(exp); + } + } + long findRemove = CleanupPendingMessagesJob.this._context.clock().now(); - for (Iterator iter = messages.keySet().iterator(); iter.hasNext(); ) { - Long exp = (Long)iter.next(); - OutNetMessage msg = (OutNetMessage)messages.get(exp); - if (msg.getExpiration() < now) { - toRemove.add(exp); - } - } - long findRemove = Clock.getInstance().now(); + long removeTime = 0; + long loopTime = 0; + + RouterContext ctx = OutboundMessageRegistry.this._context; + + for (Iterator iter = toRemove.iterator(); iter.hasNext(); ) { + long beforeRemove = ctx.clock().now(); + Long exp = (Long)iter.next(); + OutNetMessage msg = null; + synchronized (_pendingMessages) { + msg = (OutNetMessage)_pendingMessages.remove(exp); + } + long afterRemove = ctx.clock().now(); + long diff = afterRemove - beforeRemove; - long removeTime = 0; - long loopTime = 0; - - for (Iterator iter = toRemove.iterator(); iter.hasNext(); ) { - long beforeRemove = Clock.getInstance().now(); - Long exp = (Long)iter.next(); - OutNetMessage msg = null; - synchronized (_pendingMessages) { - msg = (OutNetMessage)_pendingMessages.remove(exp); - } - long afterRemove = Clock.getInstance().now(); - long diff = afterRemove - beforeRemove; - - if (diff > 500) - _log.error("Synchronize during remove took too long " + diff + "ms"); - removeTime += diff; - - if (msg != null) { - MessageHistory.getInstance().replyTimedOut(msg); - Job fail = msg.getOnFailedReplyJob(); - if (fail != null) { - if (_log.shouldLog(Log.DEBUG)) - _log.debug("Removing message with selector " + msg.getReplySelector() + ": " + msg.getMessage().getClass().getName() + " and firing fail job: " + fail.getClass().getName()); - JobQueue.getInstance().addJob(fail); - } else { - if (_log.shouldLog(Log.DEBUG)) - _log.debug("Removing message with selector " + msg.getReplySelector() + " and not firing any job"); - } - } - long doneLoop = Clock.getInstance().now(); - long ldiff = doneLoop - beforeRemove; - if (ldiff > 500) - _log.error("Loop took too long [" + ldiff + "ms]"); - loopTime += ldiff; - } + if (diff > 500) + _log.error("Synchronize during remove took too long " + diff + "ms"); + removeTime += diff; - long cleanupDelay = Clock.getInstance().now() - now; - long findTime = findRemove - afterCreate; - long syncTime = afterCreate - now; - String warn = cleanupDelay + "ms (syncTime = " + syncTime + "ms, findTime =" + findTime + "ms, removeTime = " + removeTime + "ms, loopTime = " + loopTime + ")"; - if (cleanupDelay > 1000) { - _log.error("Cleanup took too long! " + warn); - // yes, the following is a kludge, as its not specific to a particular message but to a whole series of messages - MessageHistory.getInstance().messageProcessingError(-1, OutboundMessageRegistry.CleanupPendingMessagesJob.class.getName(), "Cleanup took too long: " + warn); - } else { - _log.debug("Cleanup was quick: " + warn); - } - - requeue(CLEANUP_DELAY); - } + if (msg != null) { + ctx.messageHistory().replyTimedOut(msg); + Job fail = msg.getOnFailedReplyJob(); + if (fail != null) { + if (_log.shouldLog(Log.DEBUG)) + _log.debug("Removing message with selector " + msg.getReplySelector() + + ": " + msg.getMessage().getClass().getName() + + " and firing fail job: " + fail.getClass().getName()); + ctx.jobQueue().addJob(fail); + } else { + if (_log.shouldLog(Log.DEBUG)) + _log.debug("Removing message with selector " + msg.getReplySelector() + + " and not firing any job"); + } + } + long doneLoop = ctx.clock().now(); + long ldiff = doneLoop - beforeRemove; + if (ldiff > 500) + _log.error("Loop took too long [" + ldiff + "ms]"); + loopTime += ldiff; + } + + long cleanupDelay = ctx.clock().now() - now; + long findTime = findRemove - afterCreate; + long syncTime = afterCreate - now; + String warn = cleanupDelay + "ms (syncTime = " + syncTime + "ms, findTime =" + + findTime + "ms, removeTime = " + removeTime + "ms, loopTime = " + + loopTime + ")"; + if (cleanupDelay > 1000) { + _log.error("Cleanup took too long! " + warn); + // yes, the following is a kludge, as its not specific to a particular message but to a whole series of messages + ctx.messageHistory().messageProcessingError(-1, OutboundMessageRegistry.CleanupPendingMessagesJob.class.getName(), + "Cleanup took too long: " + warn); + } else { + _log.debug("Cleanup was quick: " + warn); + } + + requeue(CLEANUP_DELAY); + } } } diff --git a/router/java/src/net/i2p/router/transport/TransportImpl.java b/router/java/src/net/i2p/router/transport/TransportImpl.java index 512b6d509..c5742ece7 100644 --- a/router/java/src/net/i2p/router/transport/TransportImpl.java +++ b/router/java/src/net/i2p/router/transport/TransportImpl.java @@ -30,110 +30,122 @@ import net.i2p.router.ProfileManager; import net.i2p.stat.StatManager; import net.i2p.util.Clock; import net.i2p.util.Log; +import net.i2p.router.RouterContext; /** * Defines a way to send a message to another peer and start listening for messages * */ public abstract class TransportImpl implements Transport { - private final static Log _log = new Log(TransportImpl.class); + private Log _log; private TransportEventListener _listener; private Set _currentAddresses; private List _sendPool; + protected RouterContext _context; - static { - StatManager.getInstance().createFrequencyStat("transport.sendMessageFailureFrequency", "How often do we fail to send messages?", "Transport", new long[] { 60*1000l, 60*60*1000l, 24*60*60*1000l }); - StatManager.getInstance().createRateStat("transport.sendMessageSize", "How large are the messages sent?", "Transport", new long[] { 60*1000l, 5*60*1000l, 60*60*1000l, 24*60*60*1000l }); - StatManager.getInstance().createRateStat("transport.receiveMessageSize", "How large are the messages received?", "Transport", new long[] { 60*1000l, 5*60*1000l, 60*60*1000l, 24*60*60*1000l }); - StatManager.getInstance().createRateStat("transport.sendProcessingTime", "How long does it take from noticing that we want to send the message to having it completely sent (successfully or failed)?", "Transport", new long[] { 60*1000l, 60*60*1000l, 24*60*60*1000l }); - } - - - public TransportImpl() { - _sendPool = new LinkedList(); - _currentAddresses = new HashSet(); + public TransportImpl(RouterContext context) { + _context = context; + _log = _context.logManager().getLog(TransportImpl.class); + + _context.statManager().createFrequencyStat("transport.sendMessageFailureFrequency", "How often do we fail to send messages?", "Transport", new long[] { 60*1000l, 60*60*1000l, 24*60*60*1000l }); + _context.statManager().createRateStat("transport.sendMessageSize", "How large are the messages sent?", "Transport", new long[] { 60*1000l, 5*60*1000l, 60*60*1000l, 24*60*60*1000l }); + _context.statManager().createRateStat("transport.receiveMessageSize", "How large are the messages received?", "Transport", new long[] { 60*1000l, 5*60*1000l, 60*60*1000l, 24*60*60*1000l }); + _context.statManager().createRateStat("transport.sendProcessingTime", "How long does it take from noticing that we want to send the message to having it completely sent (successfully or failed)?", "Transport", new long[] { 60*1000l, 60*60*1000l, 24*60*60*1000l }); + _sendPool = new LinkedList(); + _currentAddresses = new HashSet(); } public OutNetMessage getNextMessage() { - OutNetMessage msg = null; - synchronized (_sendPool) { - if (_sendPool.size() <= 0) return null; - msg = (OutNetMessage)_sendPool.remove(0); // use priority queues later - } - msg.beginSend(); - return msg; + OutNetMessage msg = null; + synchronized (_sendPool) { + if (_sendPool.size() <= 0) return null; + msg = (OutNetMessage)_sendPool.remove(0); // use priority queues later + } + msg.beginSend(); + return msg; } public void afterSend(OutNetMessage msg, boolean sendSuccessful) { - boolean log = false; - msg.timestamp("afterSend(" + sendSuccessful + ")"); - if (!sendSuccessful) - msg.transportFailed(getStyle()); - - long lifetime = msg.getLifetime(); - if (lifetime > 5000) { - if (_log.shouldLog(Log.WARN)) - _log.warn("afterSend: [success=" + sendSuccessful + "]\n" + msg.toString()); - } else { - if (_log.shouldLog(Log.INFO)) - _log.info("afterSend: [success=" + sendSuccessful + "]\n" + msg.toString()); - } - - if (sendSuccessful) { - if (_log.shouldLog(Log.DEBUG)) - _log.debug("Send message " + msg.getMessage().getClass().getName() + " to " + msg.getTarget().getIdentity().getHash().toBase64() + " with transport " + getStyle() + " successfully"); - Job j = msg.getOnSendJob(); - if (j != null) JobQueue.getInstance().addJob(j); - log = true; - //NetworkDatabaseFacade.getInstance().peerReachable(msg.getTarget().getIdentity().getHash()); - } else { - if (_log.shouldLog(Log.INFO)) - _log.info("Failed to send message " + msg.getMessage().getClass().getName() + " to " + msg.getTarget().getIdentity().getHash().toBase64() + " with transport " + getStyle() + " (details: " + msg + ")"); - if ( (msg.getExpiration() <= 0) || (msg.getExpiration() > Clock.getInstance().now()) ) { - // this may not be the last transport available - keep going - OutNetMessagePool.getInstance().add(msg); - } else { - if (_log.shouldLog(Log.INFO)) - _log.info("No more time left (" + new Date(msg.getExpiration()) + ", expiring without sending successfully the " + msg.getMessage().getClass().getName()); - if (msg.getOnFailedSendJob() != null) - JobQueue.getInstance().addJob(msg.getOnFailedSendJob()); - MessageSelector selector = msg.getReplySelector(); - if (selector != null) { - OutboundMessageRegistry.getInstance().unregisterPending(msg); - } - log = true; - } - } - - if (log) { - I2NPMessage dmsg = msg.getMessage(); - String type = dmsg.getClass().getName(); - MessageHistory.getInstance().sendMessage(type, dmsg.getUniqueId(), dmsg.getMessageExpiration(), msg.getTarget().getIdentity().getHash(), sendSuccessful); - } - - long now = Clock.getInstance().now(); - long sendTime = now - msg.getSendBegin(); - long allTime = now - msg.getCreated(); - if (allTime > 5*1000) { - if (_log.shouldLog(Log.INFO)) - _log.info("Took too long from preperation to afterSend(ok? " + sendSuccessful + "): " + allTime + "ms " + " after failing on: " + msg.getFailedTransports() + " and succeeding on " + getStyle()); - if (allTime > 60*1000) { - // WTF!!@# - if (_log.shouldLog(Log.WARN)) - _log.warn("WTF, more than a minute slow? " + msg.getMessage().getClass().getName() + " of id " + msg.getMessage().getUniqueId() + " (send begin on " + new Date(msg.getSendBegin()) + " / created on " + new Date(msg.getCreated()) + "): " + msg, msg.getCreatedBy()); - MessageHistory.getInstance().messageProcessingError(msg.getMessage().getUniqueId(), msg.getMessage().getClass().getName(), "Took too long to send [" + allTime + "ms]"); - } - } - - StatManager.getInstance().addRateData("transport.sendProcessingTime", msg.getLifetime(), msg.getLifetime()); - - if (sendSuccessful) { - ProfileManager.getInstance().messageSent(msg.getTarget().getIdentity().getHash(), getStyle(), sendTime, msg.getMessageSize()); - StatManager.getInstance().addRateData("transport.sendMessageSize", msg.getMessageSize(), sendTime); - } else { - ProfileManager.getInstance().messageFailed(msg.getTarget().getIdentity().getHash(), getStyle()); - StatManager.getInstance().updateFrequency("transport.sendMessageFailureFrequency"); - } + boolean log = false; + msg.timestamp("afterSend(" + sendSuccessful + ")"); + if (!sendSuccessful) + msg.transportFailed(getStyle()); + + long lifetime = msg.getLifetime(); + if (lifetime > 5000) { + if (_log.shouldLog(Log.WARN)) + _log.warn("afterSend: [success=" + sendSuccessful + "]\n" + msg.toString()); + } else { + if (_log.shouldLog(Log.INFO)) + _log.info("afterSend: [success=" + sendSuccessful + "]\n" + msg.toString()); + } + + if (sendSuccessful) { + if (_log.shouldLog(Log.DEBUG)) + _log.debug("Send message " + msg.getMessage().getClass().getName() + " to " + + msg.getTarget().getIdentity().getHash().toBase64() + " with transport " + + getStyle() + " successfully"); + Job j = msg.getOnSendJob(); + if (j != null) + _context.jobQueue().addJob(j); + log = true; + } else { + if (_log.shouldLog(Log.INFO)) + _log.info("Failed to send message " + msg.getMessage().getClass().getName() + + " to " + msg.getTarget().getIdentity().getHash().toBase64() + + " with transport " + getStyle() + " (details: " + msg + ")"); + if ( (msg.getExpiration() <= 0) || (msg.getExpiration() > _context.clock().now()) ) { + // this may not be the last transport available - keep going + _context.outNetMessagePool().add(msg); + } else { + if (_log.shouldLog(Log.INFO)) + _log.info("No more time left (" + new Date(msg.getExpiration()) + + ", expiring without sending successfully the " + + msg.getMessage().getClass().getName()); + if (msg.getOnFailedSendJob() != null) + _context.jobQueue().addJob(msg.getOnFailedSendJob()); + MessageSelector selector = msg.getReplySelector(); + if (selector != null) { + _context.messageRegistry().unregisterPending(msg); + } + log = true; + } + } + + if (log) { + I2NPMessage dmsg = msg.getMessage(); + String type = dmsg.getClass().getName(); + _context.messageHistory().sendMessage(type, dmsg.getUniqueId(), dmsg.getMessageExpiration(), msg.getTarget().getIdentity().getHash(), sendSuccessful); + } + + long now = _context.clock().now(); + long sendTime = now - msg.getSendBegin(); + long allTime = now - msg.getCreated(); + if (allTime > 5*1000) { + if (_log.shouldLog(Log.INFO)) + _log.info("Took too long from preperation to afterSend(ok? " + sendSuccessful + + "): " + allTime + "ms " + " after failing on: " + + msg.getFailedTransports() + " and succeeding on " + getStyle()); + if (allTime > 60*1000) { + // WTF!!@# + if (_log.shouldLog(Log.WARN)) + _log.warn("WTF, more than a minute slow? " + msg.getMessage().getClass().getName() + + " of id " + msg.getMessage().getUniqueId() + " (send begin on " + + new Date(msg.getSendBegin()) + " / created on " + + new Date(msg.getCreated()) + "): " + msg, msg.getCreatedBy()); + _context.messageHistory().messageProcessingError(msg.getMessage().getUniqueId(), msg.getMessage().getClass().getName(), "Took too long to send [" + allTime + "ms]"); + } + } + + _context.statManager().addRateData("transport.sendProcessingTime", msg.getLifetime(), msg.getLifetime()); + + if (sendSuccessful) { + _context.profileManager().messageSent(msg.getTarget().getIdentity().getHash(), getStyle(), sendTime, msg.getMessageSize()); + _context.statManager().addRateData("transport.sendMessageSize", msg.getMessageSize(), sendTime); + } else { + _context.profileManager().messageFailed(msg.getTarget().getIdentity().getHash(), getStyle()); + _context.statManager().updateFrequency("transport.sendMessageFailureFrequency"); + } } /** @@ -144,23 +156,24 @@ public abstract class TransportImpl implements Transport { * */ public void send(OutNetMessage msg) { - boolean duplicate = false; - synchronized (_sendPool) { - if (_sendPool.contains(msg)) - duplicate = true; - else - _sendPool.add(msg); - } - if (duplicate) { - if (_log.shouldLog(Log.ERROR)) - _log.error("Message already is in the queue? wtf. msg = " + msg, new Exception("wtf, requeued?")); - } - - if (_log.shouldLog(Log.DEBUG)) - _log.debug("Message added to send pool"); - outboundMessageReady(); - if (_log.shouldLog(Log.INFO)) - _log.debug("OutboundMessageReady called"); + boolean duplicate = false; + synchronized (_sendPool) { + if (_sendPool.contains(msg)) + duplicate = true; + else + _sendPool.add(msg); + } + if (duplicate) { + if (_log.shouldLog(Log.ERROR)) + _log.error("Message already is in the queue? wtf. msg = " + msg, + new Exception("wtf, requeued?")); + } + + if (_log.shouldLog(Log.DEBUG)) + _log.debug("Message added to send pool"); + outboundMessageReady(); + if (_log.shouldLog(Log.INFO)) + _log.debug("OutboundMessageReady called"); } /** * This message is called whenever a new message is added to the send pool, @@ -169,42 +182,43 @@ public abstract class TransportImpl implements Transport { protected abstract void outboundMessageReady(); public void messageReceived(I2NPMessage inMsg, RouterIdentity remoteIdent, Hash remoteIdentHash, long msToReceive, int bytesReceived) { - if (_log.shouldLog(Log.INFO)) { - StringBuffer buf = new StringBuffer(128); - buf.append("Message received: ").append(inMsg.getClass().getName()); - buf.append(" in ").append(msToReceive).append("ms containing ").append(bytesReceived).append(" bytes "); - buf.append(" from "); - if (remoteIdentHash != null) { - buf.append(remoteIdentHash.toBase64()); - } else if (remoteIdent != null) { - buf.append(remoteIdent.getHash().toBase64()); - } else { - buf.append("[unknown]"); - } - buf.append(" and forwarding to listener: "); - if (_listener != null) - buf.append(_listener); + if (_log.shouldLog(Log.INFO)) { + StringBuffer buf = new StringBuffer(128); + buf.append("Message received: ").append(inMsg.getClass().getName()); + buf.append(" in ").append(msToReceive).append("ms containing "); + buf.append(bytesReceived).append(" bytes "); + buf.append(" from "); + if (remoteIdentHash != null) { + buf.append(remoteIdentHash.toBase64()); + } else if (remoteIdent != null) { + buf.append(remoteIdent.getHash().toBase64()); + } else { + buf.append("[unknown]"); + } + buf.append(" and forwarding to listener: "); + if (_listener != null) + buf.append(_listener); - _log.info(buf.toString()); - } - - if (remoteIdent != null) - remoteIdentHash = remoteIdent.getHash(); - if (remoteIdentHash != null) { - ProfileManager.getInstance().messageReceived(remoteIdentHash, getStyle(), msToReceive, bytesReceived); - StatManager.getInstance().addRateData("transport.receiveMessageSize", bytesReceived, msToReceive); - } - - //// this functionality is built into the InNetMessagePool - //String type = inMsg.getClass().getName(); - //MessageHistory.getInstance().receiveMessage(type, inMsg.getUniqueId(), inMsg.getMessageExpiration(), remoteIdentHash, true); - - if (_listener != null) { - _listener.messageReceived(inMsg, remoteIdent, remoteIdentHash); - } else { - if (_log.shouldLog(Log.ERROR)) - _log.error("WTF! Null listener! this = " + toString(), new Exception("Null listener")); - } + _log.info(buf.toString()); + } + + if (remoteIdent != null) + remoteIdentHash = remoteIdent.getHash(); + if (remoteIdentHash != null) { + _context.profileManager().messageReceived(remoteIdentHash, getStyle(), msToReceive, bytesReceived); + _context.statManager().addRateData("transport.receiveMessageSize", bytesReceived, msToReceive); + } + + //// this functionality is built into the InNetMessagePool + //String type = inMsg.getClass().getName(); + //MessageHistory.getInstance().receiveMessage(type, inMsg.getUniqueId(), inMsg.getMessageExpiration(), remoteIdentHash, true); + + if (_listener != null) { + _listener.messageReceived(inMsg, remoteIdent, remoteIdentHash); + } else { + if (_log.shouldLog(Log.ERROR)) + _log.error("WTF! Null listener! this = " + toString(), new Exception("Null listener")); + } } /** @@ -212,13 +226,13 @@ public abstract class TransportImpl implements Transport { * */ protected RouterAddress getTargetAddress(RouterInfo address) { - if (address == null) return null; - for (Iterator iter = address.getAddresses().iterator(); iter.hasNext(); ) { - RouterAddress addr = (RouterAddress)iter.next(); - if (getStyle().equals(addr.getTransportStyle())) - return addr; - } - return null; + if (address == null) return null; + for (Iterator iter = address.getAddresses().iterator(); iter.hasNext(); ) { + RouterAddress addr = (RouterAddress)iter.next(); + if (getStyle().equals(addr.getTransportStyle())) + return addr; + } + return null; } public Set getCurrentAddresses() { return _currentAddresses; } diff --git a/router/java/src/net/i2p/router/transport/TransportManager.java b/router/java/src/net/i2p/router/transport/TransportManager.java index 73fedb471..7dedabb21 100644 --- a/router/java/src/net/i2p/router/transport/TransportManager.java +++ b/router/java/src/net/i2p/router/transport/TransportManager.java @@ -21,7 +21,6 @@ import net.i2p.data.RouterAddress; import net.i2p.data.RouterIdentity; import net.i2p.data.RouterInfo; import net.i2p.data.SigningPrivateKey; -import net.i2p.data.i2np.DatabaseFindNearestMessage; import net.i2p.data.i2np.DatabaseLookupMessage; import net.i2p.data.i2np.DatabaseSearchReplyMessage; import net.i2p.data.i2np.DatabaseStoreMessage; @@ -35,227 +34,243 @@ import net.i2p.router.transport.phttp.PHTTPTransport; import net.i2p.router.transport.tcp.TCPTransport; import net.i2p.util.Clock; import net.i2p.util.Log; +import net.i2p.router.RouterContext; public class TransportManager implements TransportEventListener { - private static final Log _log = new Log(TransportManager.class); + private Log _log; private List _transports; private List _addresses; - private SigningPrivateKey _myIdentitySigningKey; + private RouterContext _context; private final static String PROP_DISABLE_TCP = "i2np.tcp.disable"; - public TransportManager(RouterInfo routerInfo, SigningPrivateKey routerSigningKey) { - _myIdentitySigningKey = routerSigningKey; - _transports = new ArrayList(); - _addresses = new ArrayList(); + public TransportManager(RouterContext context) { + _context = context; + _log = _context.logManager().getLog(TransportManager.class); + _transports = new ArrayList(); + _addresses = new ArrayList(); } public void addTransport(Transport transport) { - if (transport == null) return; - _transports.add(transport); - transport.setListener(this); + if (transport == null) return; + _transports.add(transport); + transport.setListener(this); } public void removeTransport(Transport transport) { - if (transport == null) return; - _transports.remove(transport); - transport.setListener(null); + if (transport == null) return; + _transports.remove(transport); + transport.setListener(null); } private void configTransports() { - RouterIdentity ident = Router.getInstance().getRouterInfo().getIdentity(); - Set addresses = CommSystemFacade.getInstance().createAddresses(); - RouterAddress tcpAddr = null; - RouterAddress phttpAddr = null; - for (Iterator iter = addresses.iterator(); iter.hasNext();) { - RouterAddress addr = (RouterAddress)iter.next(); - if (TCPTransport.STYLE.equals(addr.getTransportStyle())) { - tcpAddr = addr; - } - if (PHTTPTransport.STYLE.equals(addr.getTransportStyle())) { - phttpAddr = addr; - } - } - - String disableTCP = Router.getInstance().getConfigSetting(PROP_DISABLE_TCP); - if ( (disableTCP != null) && (Boolean.TRUE.toString().equalsIgnoreCase(disableTCP)) ) { - _log.info("Explicitly disabling the TCP transport!"); - } else { - Transport t = new TCPTransport(ident, _myIdentitySigningKey, tcpAddr); - t.setListener(this); - _transports.add(t); - } - Transport t = new PHTTPTransport(ident, _myIdentitySigningKey, phttpAddr); - t.setListener(this); - _transports.add(t); + RouterIdentity ident = _context.router().getRouterInfo().getIdentity(); + Set addresses = _context.commSystem().createAddresses(); + RouterAddress tcpAddr = null; + RouterAddress phttpAddr = null; + for (Iterator iter = addresses.iterator(); iter.hasNext();) { + RouterAddress addr = (RouterAddress)iter.next(); + if (TCPTransport.STYLE.equals(addr.getTransportStyle())) { + tcpAddr = addr; + } + if (PHTTPTransport.STYLE.equals(addr.getTransportStyle())) { + phttpAddr = addr; + } + } + + String disableTCP = _context.router().getConfigSetting(PROP_DISABLE_TCP); + if ( (disableTCP != null) && (Boolean.TRUE.toString().equalsIgnoreCase(disableTCP)) ) { + _log.info("Explicitly disabling the TCP transport!"); + } else { + Transport t = new TCPTransport(_context, tcpAddr); + t.setListener(this); + _transports.add(t); + } + Transport t = new PHTTPTransport(_context, phttpAddr); + t.setListener(this); + _transports.add(t); } public void startListening() { - configTransports(); - _log.debug("Starting up the transport manager"); - for (int i = 0; i < _transports.size(); i++) { - Transport t = (Transport)_transports.get(i); - RouterAddress addr = t.startListening(); - if (addr != null) _addresses.add(addr); - _log.debug("Transport " + i + " (" + t.getStyle() + ") started"); - } - _log.debug("Done start listening on transports"); + configTransports(); + _log.debug("Starting up the transport manager"); + for (int i = 0; i < _transports.size(); i++) { + Transport t = (Transport)_transports.get(i); + RouterAddress addr = t.startListening(); + if (addr != null) _addresses.add(addr); + _log.debug("Transport " + i + " (" + t.getStyle() + ") started"); + } + _log.debug("Done start listening on transports"); } public void stopListening() { - for (int i = 0; i < _transports.size(); i++) { - ((Transport)_transports.get(i)).stopListening(); - } - _transports.clear(); + for (int i = 0; i < _transports.size(); i++) { + ((Transport)_transports.get(i)).stopListening(); + } + _transports.clear(); } private boolean isSupported(Set addresses, Transport t) { - for (Iterator iter = addresses.iterator(); iter.hasNext(); ) { - RouterAddress addr = (RouterAddress)iter.next(); - if (addr.getTransportStyle().equals(t.getStyle())) - return true; - } - return false; + for (Iterator iter = addresses.iterator(); iter.hasNext(); ) { + RouterAddress addr = (RouterAddress)iter.next(); + if (addr.getTransportStyle().equals(t.getStyle())) + return true; + } + return false; } public List getBids(OutNetMessage msg) { - if (msg == null) - throw new IllegalArgumentException("Null message? no bidding on a null outNetMessage!"); - if (Router.getInstance().getRouterInfo().equals(msg.getTarget())) - throw new IllegalArgumentException("WTF, bids for a message bound to ourselves?"); - - HashSet bids = new HashSet(); - - Set addrs = msg.getTarget().getAddresses(); - Set failedTransports = msg.getFailedTransports(); - for (int i = 0; i < _transports.size(); i++) { - Transport t = (Transport)_transports.get(i); - if (failedTransports.contains(t.getStyle())) { - _log.debug("Skipping transport " + t.getStyle() + " as it already failed"); - continue; - } - // we always want to try all transports, in case there is a faster bidirectional one - // already connected (e.g. peer only has a public PHTTP address, but they've connected - // to us via TCP, send via TCP) - if (true || isSupported(addrs, t)) { - TransportBid bid = t.bid(msg.getTarget(), msg.getMessageSize()); - if (bid != null) { - bids.add(bid); - _log.debug("Transport " + t.getStyle() + " bid: " + bid); - } else { - _log.debug("Transport " + t.getStyle() + " did not produce a bid"); - } - } - } - List ordered = orderBids(bids, msg); - long delay = Clock.getInstance().now() - msg.getCreated(); - if (ordered.size() > 0) { - _log.debug("Winning bid: " + ((TransportBid)ordered.get(0)).getTransport().getStyle()); - if (delay > 5*1000) { - _log.info("Took too long to find this bid (" + delay + "ms)"); - } else { - _log.debug("Took a while to find this bid (" + delay + "ms)"); - } - } else { - _log.info("NO WINNING BIDS! peer: " + msg.getTarget()); - if (delay > 5*1000) { - _log.info("Took too long to fail (" + delay + "ms)"); - } else { - _log.debug("Took a while to fail (" + delay + "ms)"); - } - } - return ordered; + if (msg == null) + throw new IllegalArgumentException("Null message? no bidding on a null outNetMessage!"); + if (_context.router().getRouterInfo().equals(msg.getTarget())) + throw new IllegalArgumentException("WTF, bids for a message bound to ourselves?"); + + HashSet bids = new HashSet(); + + Set addrs = msg.getTarget().getAddresses(); + Set failedTransports = msg.getFailedTransports(); + for (int i = 0; i < _transports.size(); i++) { + Transport t = (Transport)_transports.get(i); + if (failedTransports.contains(t.getStyle())) { + _log.debug("Skipping transport " + t.getStyle() + " as it already failed"); + continue; + } + // we always want to try all transports, in case there is a faster bidirectional one + // already connected (e.g. peer only has a public PHTTP address, but they've connected + // to us via TCP, send via TCP) + if (true || isSupported(addrs, t)) { + TransportBid bid = t.bid(msg.getTarget(), msg.getMessageSize()); + if (bid != null) { + bids.add(bid); + if (_log.shouldLog(Log.DEBUG)) + _log.debug("Transport " + t.getStyle() + " bid: " + bid); + } else { + if (_log.shouldLog(Log.DEBUG)) + _log.debug("Transport " + t.getStyle() + " did not produce a bid"); + } + } + } + List ordered = orderBids(bids, msg); + long delay = _context.clock().now() - msg.getCreated(); + if (ordered.size() > 0) { + if (_log.shouldLog(Log.DEBUG)) + _log.debug("Winning bid: " + ((TransportBid)ordered.get(0)).getTransport().getStyle()); + if (delay > 5*1000) { + if (_log.shouldLog(Log.INFO)) + _log.info("Took too long to find this bid (" + delay + "ms)"); + } else { + if (_log.shouldLog(Log.DEBUG)) + _log.debug("Took a while to find this bid (" + delay + "ms)"); + } + } else { + if (_log.shouldLog(Log.INFO)) + _log.info("NO WINNING BIDS! peer: " + msg.getTarget()); + if (delay > 5*1000) { + if (_log.shouldLog(Log.INFO)) + _log.info("Took too long to fail (" + delay + "ms)"); + } else { + if (_log.shouldLog(Log.DEBUG)) + _log.debug("Took a while to fail (" + delay + "ms)"); + } + } + return ordered; } private List orderBids(HashSet bids, OutNetMessage msg) { - // db messages should go as fast as possible, while the others - // should use as little bandwidth as possible. - switch (msg.getMessage().getType()) { - case DatabaseFindNearestMessage.MESSAGE_TYPE: - case DatabaseLookupMessage.MESSAGE_TYPE: - case DatabaseSearchReplyMessage.MESSAGE_TYPE: - case DatabaseStoreMessage.MESSAGE_TYPE: - _log.debug("Ordering by fastest"); - return orderByFastest(bids, msg); - default: - _log.debug("Ordering by bandwidth"); - return orderByBandwidth(bids, msg); - } + // db messages should go as fast as possible, while the others + // should use as little bandwidth as possible. + switch (msg.getMessage().getType()) { + case DatabaseLookupMessage.MESSAGE_TYPE: + case DatabaseSearchReplyMessage.MESSAGE_TYPE: + case DatabaseStoreMessage.MESSAGE_TYPE: + if (_log.shouldLog(Log.DEBUG)) + _log.debug("Ordering by fastest"); + return orderByFastest(bids, msg); + default: + if (_log.shouldLog(Log.DEBUG)) + _log.debug("Ordering by bandwidth"); + return orderByBandwidth(bids, msg); + } } private int getCost(RouterInfo target, String transportStyle) { - for (Iterator iter = target.getAddresses().iterator(); iter.hasNext();) { - RouterAddress addr = (RouterAddress)iter.next(); - if (addr.getTransportStyle().equals(transportStyle)) - return addr.getCost(); - } - return 1; + for (Iterator iter = target.getAddresses().iterator(); iter.hasNext();) { + RouterAddress addr = (RouterAddress)iter.next(); + if (addr.getTransportStyle().equals(transportStyle)) + return addr.getCost(); + } + return 1; } private List orderByFastest(HashSet bids, OutNetMessage msg) { - Map ordered = new TreeMap(); - for (Iterator iter = bids.iterator(); iter.hasNext(); ) { - TransportBid bid = (TransportBid)iter.next(); - int cur = bid.getLatencyMs(); - int cost = getCost(msg.getTarget(), bid.getTransport().getStyle()); - _log.debug("Bid latency: " + (cur*cost) + " for transport " + bid.getTransport().getStyle()); - while (ordered.containsKey(new Integer(cur*cost))) - cur++; - ordered.put(new Integer(cur*cost), bid); - } - List bidList = new ArrayList(ordered.size()); - for (Iterator iter = ordered.keySet().iterator(); iter.hasNext(); ) { - Object k = iter.next(); - bidList.add(ordered.get(k)); - } - return bidList; + Map ordered = new TreeMap(); + for (Iterator iter = bids.iterator(); iter.hasNext(); ) { + TransportBid bid = (TransportBid)iter.next(); + int cur = bid.getLatencyMs(); + int cost = getCost(msg.getTarget(), bid.getTransport().getStyle()); + if (_log.shouldLog(Log.DEBUG)) + _log.debug("Bid latency: " + (cur*cost) + " for transport " + + bid.getTransport().getStyle()); + while (ordered.containsKey(new Integer(cur*cost))) + cur++; + ordered.put(new Integer(cur*cost), bid); + } + List bidList = new ArrayList(ordered.size()); + for (Iterator iter = ordered.keySet().iterator(); iter.hasNext(); ) { + Object k = iter.next(); + bidList.add(ordered.get(k)); + } + return bidList; } private List orderByBandwidth(HashSet bids, OutNetMessage msg) { - Map ordered = new TreeMap(); - for (Iterator iter = bids.iterator(); iter.hasNext(); ) { - TransportBid bid = (TransportBid)iter.next(); - int cur = bid.getBandwidthBytes(); - int cost = getCost(msg.getTarget(), bid.getTransport().getStyle()); - _log.debug("Bid size: " + (cur*cost) + " for transport " + bid.getTransport().getStyle()); - while (ordered.containsKey(new Integer(cur*cost))) - cur++; - ordered.put(new Integer(cur*cost), bid); - } - List bidList = new ArrayList(ordered.size()); - for (Iterator iter = ordered.keySet().iterator(); iter.hasNext(); ) { - Object k = iter.next(); - bidList.add(ordered.get(k)); - } - return bidList; + Map ordered = new TreeMap(); + for (Iterator iter = bids.iterator(); iter.hasNext(); ) { + TransportBid bid = (TransportBid)iter.next(); + int cur = bid.getBandwidthBytes(); + int cost = getCost(msg.getTarget(), bid.getTransport().getStyle()); + if (_log.shouldLog(Log.DEBUG)) + _log.debug("Bid size: " + (cur*cost) + " for transport " + bid.getTransport().getStyle()); + while (ordered.containsKey(new Integer(cur*cost))) + cur++; + ordered.put(new Integer(cur*cost), bid); + } + List bidList = new ArrayList(ordered.size()); + for (Iterator iter = ordered.keySet().iterator(); iter.hasNext(); ) { + Object k = iter.next(); + bidList.add(ordered.get(k)); + } + return bidList; } public void messageReceived(I2NPMessage message, RouterIdentity fromRouter, Hash fromRouterHash) { - _log.debug("I2NPMessage received: " + message.getClass().getName(), new Exception("Where did I come from again?")); - InNetMessage msg = new InNetMessage(); - msg.setFromRouter(fromRouter); - msg.setFromRouterHash(fromRouterHash); - msg.setMessage(message); - int num = InNetMessagePool.getInstance().add(msg); - _log.debug("Added to in pool: "+ num); + if (_log.shouldLog(Log.DEBUG)) + _log.debug("I2NPMessage received: " + message.getClass().getName(), new Exception("Where did I come from again?")); + InNetMessage msg = new InNetMessage(); + msg.setFromRouter(fromRouter); + msg.setFromRouterHash(fromRouterHash); + msg.setMessage(message); + int num = _context.inNetMessagePool().add(msg); + if (_log.shouldLog(Log.DEBUG)) + _log.debug("Added to in pool: "+ num); } public String renderStatusHTML() { - StringBuffer buf = new StringBuffer(); - buf.append("

    Transport Manager

    \n"); - buf.append("Listening on:
    \n");
    -	for (Iterator iter = _addresses.iterator(); iter.hasNext(); ) {
    -	    RouterAddress addr = (RouterAddress)iter.next();
    -	    buf.append(addr.toString()).append("\n\n");
    -	}
    -	buf.append("
    \n"); - buf.append("
      \n"); - for (Iterator iter = _transports.iterator(); iter.hasNext(); ) { - Transport t = (Transport)iter.next(); - String str = t.renderStatusHTML(); - if (str != null) - buf.append("
    • ").append(str).append("
    • \n"); - } - buf.append("
    \n"); - return buf.toString(); + StringBuffer buf = new StringBuffer(); + buf.append("

    Transport Manager

    \n"); + buf.append("Listening on:
    \n");
    +        for (Iterator iter = _addresses.iterator(); iter.hasNext(); ) {
    +            RouterAddress addr = (RouterAddress)iter.next();
    +            buf.append(addr.toString()).append("\n\n");
    +        }
    +        buf.append("
    \n"); + buf.append("
      \n"); + for (Iterator iter = _transports.iterator(); iter.hasNext(); ) { + Transport t = (Transport)iter.next(); + String str = t.renderStatusHTML(); + if (str != null) + buf.append("
    • ").append(str).append("
    • \n"); + } + buf.append("
    \n"); + return buf.toString(); } } diff --git a/router/java/src/net/i2p/router/transport/TrivialBandwidthLimiter.java b/router/java/src/net/i2p/router/transport/TrivialBandwidthLimiter.java index fe7c308e9..0858f7299 100644 --- a/router/java/src/net/i2p/router/transport/TrivialBandwidthLimiter.java +++ b/router/java/src/net/i2p/router/transport/TrivialBandwidthLimiter.java @@ -1,9 +1,9 @@ package net.i2p.router.transport; /* * free (adj.): unencumbered; not under the control of others - * Written by jrandom in 2003 and released into the public domain - * with no warranty of any kind, either expressed or implied. - * It probably won't make your computer catch on fire, or eat + * Written by jrandom in 2003 and released into the public domain + * with no warranty of any kind, either expressed or implied. + * It probably won't make your computer catch on fire, or eat * your children, but it might. Use at your own risk. * */ @@ -14,14 +14,15 @@ import net.i2p.router.JobQueue; import net.i2p.router.Router; import net.i2p.util.Clock; import net.i2p.util.Log; +import net.i2p.router.RouterContext; /** - * Coordinate the bandwidth limiting across all classes of peers. Currently + * Coordinate the bandwidth limiting across all classes of peers. Currently * treats everything as open (aka doesn't limit) * */ public class TrivialBandwidthLimiter extends BandwidthLimiter { - private final static Log _log = new Log(TrivialBandwidthLimiter.class); + private Log _log; private volatile long _maxReceiveBytesPerMinute; private volatile long _maxSendBytesPerMinute; private volatile long _lastResync; @@ -30,36 +31,38 @@ public class TrivialBandwidthLimiter extends BandwidthLimiter { private volatile long _totalSendBytes; private volatile long _availableSend; private volatile long _availableReceive; - + private final static String PROP_INBOUND_BANDWIDTH = "i2np.bandwidth.inboundBytesPerMinute"; private final static String PROP_OUTBOUND_BANDWIDTH = "i2np.bandwidth.outboundBytesPerMinute"; private final static long MINUTE = 60*1000; private final static long READ_CONFIG_DELAY = MINUTE; - + // max # bytes to store in the pool, in case we have lots of traffic we don't want to // spike too hard private static long MAX_IN_POOL = 10*1024; private static long MAX_OUT_POOL = 10*1024; - TrivialBandwidthLimiter() { - this(-1, -1); + public TrivialBandwidthLimiter(RouterContext ctx) { + this(ctx, -1, -1); } - TrivialBandwidthLimiter(long sendPerMinute, long receivePerMinute) { - _maxReceiveBytesPerMinute = receivePerMinute; - _maxSendBytesPerMinute = sendPerMinute; - _lastResync = Clock.getInstance().now(); - _lastReadConfig = _lastResync; - _totalReceiveBytes = 0; - _totalSendBytes = 0; - _availableReceive = receivePerMinute; - _availableSend = sendPerMinute; - MAX_IN_POOL = 10*_availableReceive; - MAX_OUT_POOL = 10*_availableSend; - - JobQueue.getInstance().addJob(new UpdateBWJob()); - updateLimits(); - _log.info("Initializing the limiter with maximum inbound [" + MAX_IN_POOL + "] outbound [" + MAX_OUT_POOL + "]"); + TrivialBandwidthLimiter(RouterContext ctx, long sendPerMinute, long receivePerMinute) { + super(ctx); + _log = ctx.logManager().getLog(TrivialBandwidthLimiter.class); + _maxReceiveBytesPerMinute = receivePerMinute; + _maxSendBytesPerMinute = sendPerMinute; + _lastResync = ctx.clock().now(); + _lastReadConfig = _lastResync; + _totalReceiveBytes = 0; + _totalSendBytes = 0; + _availableReceive = receivePerMinute; + _availableSend = sendPerMinute; + MAX_IN_POOL = 10*_availableReceive; + MAX_OUT_POOL = 10*_availableSend; + + _context.jobQueue().addJob(new UpdateBWJob()); + updateLimits(); + _log.info("Initializing the limiter with maximum inbound [" + MAX_IN_POOL + "] outbound [" + MAX_OUT_POOL + "]"); } public long getTotalSendBytes() { return _totalSendBytes; } @@ -69,133 +72,134 @@ public class TrivialBandwidthLimiter extends BandwidthLimiter { * Return how many milliseconds to wait before receiving/processing numBytes from the peer */ public long calculateDelayInbound(RouterIdentity peer, int numBytes) { - if (_maxReceiveBytesPerMinute <= 0) return 0; - if (_availableReceive - numBytes > 0) { - // we have bytes available - return 0; - } else { - // we don't have sufficient bytes. - // the delay = (needed/numPerMinute) - long val = MINUTE*(numBytes-_availableReceive)/_maxReceiveBytesPerMinute; - _log.debug("DelayInbound: " + val + " for " + numBytes + " (avail=" + _availableReceive + ", max=" + _maxReceiveBytesPerMinute + ")"); - return val; - } + if (_maxReceiveBytesPerMinute <= 0) return 0; + if (_availableReceive - numBytes > 0) { + // we have bytes available + return 0; + } else { + // we don't have sufficient bytes. + // the delay = (needed/numPerMinute) + long val = MINUTE*(numBytes-_availableReceive)/_maxReceiveBytesPerMinute; + _log.debug("DelayInbound: " + val + " for " + numBytes + " (avail=" + _availableReceive + ", max=" + _maxReceiveBytesPerMinute + ")"); + return val; + } } - + /** * Return how many milliseconds to wait before sending numBytes to the peer */ public long calculateDelayOutbound(RouterIdentity peer, int numBytes) { - if (_maxSendBytesPerMinute <= 0) return 0; - if (_availableSend - numBytes > 0) { - // we have bytes available - return 0; - } else { - // we don't have sufficient bytes. - // the delay = (needed/numPerMinute) - long val = MINUTE*(numBytes-_availableSend)/_maxSendBytesPerMinute; - _log.debug("DelayOutbound: " + val + " for " + numBytes + " (avail=" + _availableSend + ", max=" + _maxSendBytesPerMinute + ")"); - return val; - } + if (_maxSendBytesPerMinute <= 0) return 0; + if (_availableSend - numBytes > 0) { + // we have bytes available + return 0; + } else { + // we don't have sufficient bytes. + // the delay = (needed/numPerMinute) + long val = MINUTE*(numBytes-_availableSend)/_maxSendBytesPerMinute; + _log.debug("DelayOutbound: " + val + " for " + numBytes + " (avail=" + _availableSend + ", max=" + _maxSendBytesPerMinute + ")"); + return val; + } } /** * Note that numBytes have been read from the peer */ public void consumeInbound(RouterIdentity peer, int numBytes) { - _totalReceiveBytes += numBytes; - _availableReceive -= numBytes; + _totalReceiveBytes += numBytes; + _availableReceive -= numBytes; } /** * Note that numBytes have been sent to the peer */ public void consumeOutbound(RouterIdentity peer, int numBytes) { - _totalSendBytes += numBytes; - _availableSend -= numBytes; + _totalSendBytes += numBytes; + _availableSend -= numBytes; } private void updateLimits() { - String inBwStr = Router.getInstance().getConfigSetting(PROP_INBOUND_BANDWIDTH); - String outBwStr = Router.getInstance().getConfigSetting(PROP_OUTBOUND_BANDWIDTH); - if (true) { - // DISABLED UNTIL THIS STUFF GETS A REVAMP - inBwStr = "-60"; - outBwStr = "-60"; - } - long oldReceive = _maxReceiveBytesPerMinute; - long oldSend = _maxSendBytesPerMinute; - - _log.debug("Read limits ["+inBwStr+" in, " + outBwStr + " out] vs current [" + oldReceive + " in, " + oldSend + " out]"); - - if ( (inBwStr != null) && (inBwStr.trim().length() > 0) ) { - try { - long in = Long.parseLong(inBwStr); - if (in >= 0) { - _maxReceiveBytesPerMinute = in; - MAX_IN_POOL = 10*_maxReceiveBytesPerMinute; - } - } catch (NumberFormatException nfe) { - _log.warn("Invalid inbound bandwidth limit [" + inBwStr + "], keeping as " + _maxReceiveBytesPerMinute); - } - } else { - _log.warn("Inbound bandwidth limits not specified in the config via " + PROP_INBOUND_BANDWIDTH); - } - if ( (outBwStr != null) && (outBwStr.trim().length() > 0) ) { - try { - long out = Long.parseLong(outBwStr); - if (out >= 0) { - _maxSendBytesPerMinute = out; - MAX_OUT_POOL = 10*_maxSendBytesPerMinute; - } - } catch (NumberFormatException nfe) { - _log.warn("Invalid outbound bandwidth limit [" + outBwStr + "], keeping as " + _maxSendBytesPerMinute); - } - } else { - _log.warn("Outbound bandwidth limits not specified in the config via " + PROP_OUTBOUND_BANDWIDTH); - } - - if ( (oldReceive != _maxReceiveBytesPerMinute) || (oldSend != _maxSendBytesPerMinute) ) { - _log.info("Max receive bytes per minute: " + _maxReceiveBytesPerMinute + ", max send per minute: " + _maxSendBytesPerMinute); - _availableReceive = _maxReceiveBytesPerMinute; - _availableSend = _maxSendBytesPerMinute; - } + String inBwStr = _context.router().getConfigSetting(PROP_INBOUND_BANDWIDTH); + String outBwStr = _context.router().getConfigSetting(PROP_OUTBOUND_BANDWIDTH); + if (true) { + // DISABLED UNTIL THIS STUFF GETS A REVAMP + inBwStr = "-60"; + outBwStr = "-60"; + } + long oldReceive = _maxReceiveBytesPerMinute; + long oldSend = _maxSendBytesPerMinute; + + _log.debug("Read limits ["+inBwStr+" in, " + outBwStr + " out] vs current [" + oldReceive + " in, " + oldSend + " out]"); + + if ( (inBwStr != null) && (inBwStr.trim().length() > 0) ) { + try { + long in = Long.parseLong(inBwStr); + if (in >= 0) { + _maxReceiveBytesPerMinute = in; + MAX_IN_POOL = 10*_maxReceiveBytesPerMinute; + } + } catch (NumberFormatException nfe) { + _log.warn("Invalid inbound bandwidth limit [" + inBwStr + "], keeping as " + _maxReceiveBytesPerMinute); + } + } else { + _log.warn("Inbound bandwidth limits not specified in the config via " + PROP_INBOUND_BANDWIDTH); + } + if ( (outBwStr != null) && (outBwStr.trim().length() > 0) ) { + try { + long out = Long.parseLong(outBwStr); + if (out >= 0) { + _maxSendBytesPerMinute = out; + MAX_OUT_POOL = 10*_maxSendBytesPerMinute; + } + } catch (NumberFormatException nfe) { + _log.warn("Invalid outbound bandwidth limit [" + outBwStr + "], keeping as " + _maxSendBytesPerMinute); + } + } else { + _log.warn("Outbound bandwidth limits not specified in the config via " + PROP_OUTBOUND_BANDWIDTH); + } + + if ( (oldReceive != _maxReceiveBytesPerMinute) || (oldSend != _maxSendBytesPerMinute) ) { + _log.info("Max receive bytes per minute: " + _maxReceiveBytesPerMinute + ", max send per minute: " + _maxSendBytesPerMinute); + _availableReceive = _maxReceiveBytesPerMinute; + _availableSend = _maxSendBytesPerMinute; + } } private class UpdateBWJob extends JobImpl { - public UpdateBWJob() { - getTiming().setStartAfter(Clock.getInstance().now() + MINUTE); - } - public String getName() { return "Update bandwidth available"; } - - public void runJob() { - long now = Clock.getInstance().now(); - long numMinutes = ((now - _lastResync)/MINUTE) + 1; - _availableReceive += numMinutes * _maxReceiveBytesPerMinute; - _availableSend += numMinutes * _maxSendBytesPerMinute; - _lastResync = now; - - _log.debug("Adding " + (numMinutes*_maxReceiveBytesPerMinute) + " bytes to availableReceive"); - _log.debug("Adding " + (numMinutes*_maxSendBytesPerMinute) + " bytes to availableSend"); - - // if we're huge, trim - if (_availableReceive > MAX_IN_POOL) { - _log.debug("Trimming available receive to " + MAX_IN_POOL); - _availableReceive = MAX_IN_POOL; - } - if (_availableSend > MAX_OUT_POOL) { - _log.debug("Trimming available send to " + MAX_OUT_POOL); - _availableSend = MAX_OUT_POOL; - } - - getTiming().setStartAfter(now + MINUTE); - JobQueue.getInstance().addJob(UpdateBWJob.this); - - // now update the bandwidth limits, in case they've changed - if (now > _lastReadConfig + READ_CONFIG_DELAY) { - updateLimits(); - _lastReadConfig = now; - } - } + public UpdateBWJob() { + super(TrivialBandwidthLimiter.this._context); + getTiming().setStartAfter(TrivialBandwidthLimiter.this._context.clock().now() + MINUTE); + } + public String getName() { return "Update bandwidth available"; } + + public void runJob() { + long now = TrivialBandwidthLimiter.this._context.clock().now(); + long numMinutes = ((now - _lastResync)/MINUTE) + 1; + _availableReceive += numMinutes * _maxReceiveBytesPerMinute; + _availableSend += numMinutes * _maxSendBytesPerMinute; + _lastResync = now; + + _log.debug("Adding " + (numMinutes*_maxReceiveBytesPerMinute) + " bytes to availableReceive"); + _log.debug("Adding " + (numMinutes*_maxSendBytesPerMinute) + " bytes to availableSend"); + + // if we're huge, trim + if (_availableReceive > MAX_IN_POOL) { + _log.debug("Trimming available receive to " + MAX_IN_POOL); + _availableReceive = MAX_IN_POOL; + } + if (_availableSend > MAX_OUT_POOL) { + _log.debug("Trimming available send to " + MAX_OUT_POOL); + _availableSend = MAX_OUT_POOL; + } + + getTiming().setStartAfter(now + MINUTE); + UpdateBWJob.this._context.jobQueue().addJob(UpdateBWJob.this); + + // now update the bandwidth limits, in case they've changed + if (now > _lastReadConfig + READ_CONFIG_DELAY) { + updateLimits(); + _lastReadConfig = now; + } + } } } diff --git a/router/java/src/net/i2p/router/transport/phttp/PHTTPPoller.java b/router/java/src/net/i2p/router/transport/phttp/PHTTPPoller.java index d4cd9c1b1..8ef9dcf73 100644 --- a/router/java/src/net/i2p/router/transport/phttp/PHTTPPoller.java +++ b/router/java/src/net/i2p/router/transport/phttp/PHTTPPoller.java @@ -1,9 +1,9 @@ package net.i2p.router.transport.phttp; /* * free (adj.): unencumbered; not under the control of others - * Written by jrandom in 2003 and released into the public domain - * with no warranty of any kind, either expressed or implied. - * It probably won't make your computer catch on fire, or eat + * Written by jrandom in 2003 and released into the public domain + * with no warranty of any kind, either expressed or implied. + * It probably won't make your computer catch on fire, or eat * your children, but it might. Use at your own risk. * */ @@ -30,207 +30,216 @@ import net.i2p.util.Clock; import net.i2p.util.I2PThread; import net.i2p.util.Log; import net.i2p.util.RandomSource; +import net.i2p.router.RouterContext; class PHTTPPoller { - private final static Log _log = new Log(PHTTPPoller.class); + private Log _log; private PHTTPTransport _transport; private URL _pollURL; private Poller _poller; + private RouterContext _context; + private boolean _polling; - public PHTTPPoller(PHTTPTransport transport) { - _transport = transport; - _pollURL = null; - _poller = new Poller(); + public PHTTPPoller(RouterContext context, PHTTPTransport transport) { + _context = context; + _log = context.logManager().getLog(PHTTPPoller.class); + _transport = transport; + _pollURL = null; + _poller = new Poller(); + _polling = false; } - public void startPolling() { - try { - _pollURL = new URL(_transport.getMyPollURL()); - } catch (MalformedURLException mue) { - _log.error("Invalid polling URL [" + _transport.getMyPollURL() + "]", mue); - return; - } - Thread t = new I2PThread(_poller); - t.setName("HTTP Poller"); - t.setDaemon(true); - t.setPriority(I2PThread.MIN_PRIORITY); - t.start(); + public synchronized void startPolling() { + if (_polling) return; + _polling = true; + + try { + _pollURL = new URL(_transport.getMyPollURL()); + } catch (MalformedURLException mue) { + _log.error("Invalid polling URL [" + _transport.getMyPollURL() + "]", mue); + return; + } + Thread t = new I2PThread(_poller); + t.setName("HTTP Poller"); + t.setDaemon(true); + t.setPriority(I2PThread.MIN_PRIORITY); + t.start(); } public void stopPolling() { - _poller.stopPolling(); + _poller.stopPolling(); } private byte[] getAuthData() { - try { - ByteArrayOutputStream baos = new ByteArrayOutputStream(4); - long nonce = RandomSource.getInstance().nextInt(Integer.MAX_VALUE); - _log.debug("Creating nonce with value [" + nonce + "]"); - DataHelper.writeLong(baos, 4, nonce); - byte nonceData[] = baos.toByteArray(); - Signature sig = DSAEngine.getInstance().sign(nonceData, _transport.getMySigningKey()); - baos = new ByteArrayOutputStream(512); - DataHelper.writeLong(baos, 4, nonce); - sig.writeBytes(baos); - byte data[] = baos.toByteArray(); - return data; - } catch (NumberFormatException nfe) { - _log.error("Error writing the authentication data", nfe); - return null; - } catch (DataFormatException dfe) { - _log.error("Error formatting the authentication data", dfe); - return null; - } catch (IOException ioe) { - _log.error("Error writing the authentication data", ioe); - return null; - } + try { + ByteArrayOutputStream baos = new ByteArrayOutputStream(4); + long nonce = _context.random().nextInt(Integer.MAX_VALUE); + _log.debug("Creating nonce with value [" + nonce + "]"); + DataHelper.writeLong(baos, 4, nonce); + byte nonceData[] = baos.toByteArray(); + Signature sig = _context.dsa().sign(nonceData, _transport.getMySigningKey()); + baos = new ByteArrayOutputStream(512); + DataHelper.writeLong(baos, 4, nonce); + sig.writeBytes(baos); + byte data[] = baos.toByteArray(); + return data; + } catch (NumberFormatException nfe) { + _log.error("Error writing the authentication data", nfe); + return null; + } catch (DataFormatException dfe) { + _log.error("Error formatting the authentication data", dfe); + return null; + } catch (IOException ioe) { + _log.error("Error writing the authentication data", ioe); + return null; + } } public final static String CONFIG_POLL = "i2np.phttp.shouldPoll"; public final static boolean DEFAULT_POLL = false; - static boolean shouldRejectMessages() { - String val = Router.getInstance().getConfigSetting(CONFIG_POLL); - if (null == val) { - return !DEFAULT_POLL; - } else { - return !("true".equals(val)); - } + boolean shouldRejectMessages() { + String val = _context.router().getConfigSetting(CONFIG_POLL); + if (null == val) { + return !DEFAULT_POLL; + } else { + return !("true".equals(val)); + } } class Poller implements Runnable { - private boolean _running; - private I2NPMessageHandler _handler = new I2NPMessageHandler(); - public void run() { - _running = true; - // wait 5 seconds before starting to poll so we don't drop too many messages - try { Thread.sleep(10*1000); } catch (InterruptedException ie) {} - - _log.debug("Poller running with delay [" + _transport.getPollFrequencyMs() + "]"); - try { - while (_running) { - int numRead = getMessages(); - if (numRead > 0) - _log.info("# messages found: " + numRead); - try { Thread.sleep(_transport.getPollFrequencyMs()); } catch (InterruptedException ie) {} - } - } catch (Throwable t) { - _log.info("Error while polling", t); - } - } - - private int getMessages() { - // open the _pollURL, authenticate ourselves, and get any messages available - byte authData[] = getAuthData(); - if (authData == null) return 0; - - BandwidthLimiter.getInstance().delayOutbound(null, authData.length + 512); // HTTP overhead - - try { - _log.debug("Before opening " + _pollURL.toExternalForm()); - HttpURLConnection con = (HttpURLConnection)_pollURL.openConnection(); - // send the info - con.setRequestMethod("POST"); - con.setUseCaches(false); - con.setDoOutput(true); - con.setDoInput(true); - - ByteArrayOutputStream baos = new ByteArrayOutputStream(authData.length + 64); - String target = _transport.getMyIdentity().getHash().toBase64(); - baos.write("target=".getBytes()); - baos.write(target.getBytes()); - baos.write("&".getBytes()); - baos.write(authData); - byte data[] = baos.toByteArray(); - //_log.debug("Data to be sent: " + Base64.encode(data)); - - con.setRequestProperty("Content-length", ""+data.length); - con.getOutputStream().write(data); - _log.debug("Data sent, before reading results of poll for [" + target + "]"); - - con.connect(); - - // fetch the results - int rc = con.getResponseCode(); - _log.debug("Response code: " + rc); - switch (rc) { - case 200: // ok - _log.debug("Polling can progress"); - break; - case 401: // signature failed - _log.error("Signature failed during polling???"); - return 0; - case 404: // not yet registered - _log.error("Not registered with the relay - reregistering (in case they failed)"); - _transport.registerWithRelay(); - return 0; - default: // unknown - _log.error("Invalid error code returned: " + rc); - return 0; - } - - InputStream in = con.getInputStream(); - Date peerTime = DataHelper.readDate(in); - long offset = peerTime.getTime() - System.currentTimeMillis(); - if (_transport.getTrustTime()) { - _log.info("Updating time offset to " + offset + " (old offset: " + Clock.getInstance().getOffset() + ")"); - Clock.getInstance().setOffset(offset); - } - - boolean shouldReject = shouldRejectMessages(); - if (shouldReject) { - _log.debug("Rejecting any messages [we just checked in so we could get the time]"); - return 0; - } - - int numMessages = (int)DataHelper.readLong(in, 2); - if ( (numMessages > 100) || (numMessages < 0) ) { - _log.error("Invalid # messages specified [" + numMessages + "], skipping"); - return 0; - } - - int bytesRead = 512; // HTTP overhead - - int numSuccessful = 0; - for (int i = 0; i < numMessages; i++) { - _log.debug("Receiving message " + (i+1) + " of "+ numMessages + " pending"); - long len = DataHelper.readLong(in, 4); - byte msgBuf[] = new byte[(int)len]; - int read = DataHelper.read(in, msgBuf); - if (read == -1) { - _log.error("Unable to read the message as we encountered an EOF"); - return i - 1; - } else if (read != len) { - _log.error("Unable to read the message fully [" + read + " read, " + len + " expected]"); - return i - 1; - } else { - bytesRead += 4 + read; - try { - I2NPMessage msg = _handler.readMessage(new ByteArrayInputStream(msgBuf)); - if (msg == null) { - _log.warn("PHTTP couldn't read a message from the peer out of a " + len + " byte buffer"); - } else { - _log.info("Receive message " + (i+1) + " of " + numMessages + ": " + msg.getClass().getName()); - _transport.messageReceived(msg, null, null, _handler.getLastReadTime(), (int)len); - numSuccessful++; - } - } catch (IOException ioe) { - _log.warn("Unable to read the message fully", ioe); - } catch (I2NPMessageException ime) { - _log.warn("Poorly formatted message", ime); - } - } - } - - BandwidthLimiter.getInstance().delayInbound(null, bytesRead); - - return numSuccessful; - } catch (Throwable t) { - _log.debug("Error polling", t); - return 0; - } - } - - public void stopPolling() { _running = false; } + private boolean _running; + private I2NPMessageHandler _handler = new I2NPMessageHandler(_context); + public void run() { + _running = true; + // wait 5 seconds before starting to poll so we don't drop too many messages + try { Thread.sleep(10*1000); } catch (InterruptedException ie) {} + + _log.debug("Poller running with delay [" + _transport.getPollFrequencyMs() + "]"); + try { + while (_running) { + int numRead = getMessages(); + if (numRead > 0) + _log.info("# messages found: " + numRead); + try { Thread.sleep(_transport.getPollFrequencyMs()); } catch (InterruptedException ie) {} + } + } catch (Throwable t) { + _log.info("Error while polling", t); + } + } + + private int getMessages() { + // open the _pollURL, authenticate ourselves, and get any messages available + byte authData[] = getAuthData(); + if (authData == null) return 0; + + _context.bandwidthLimiter().delayOutbound(null, authData.length + 512); // HTTP overhead + + try { + _log.debug("Before opening " + _pollURL.toExternalForm()); + HttpURLConnection con = (HttpURLConnection)_pollURL.openConnection(); + // send the info + con.setRequestMethod("POST"); + con.setUseCaches(false); + con.setDoOutput(true); + con.setDoInput(true); + + ByteArrayOutputStream baos = new ByteArrayOutputStream(authData.length + 64); + String target = _transport.getMyIdentity().getHash().toBase64(); + baos.write("target=".getBytes()); + baos.write(target.getBytes()); + baos.write("&".getBytes()); + baos.write(authData); + byte data[] = baos.toByteArray(); + //_log.debug("Data to be sent: " + Base64.encode(data)); + + con.setRequestProperty("Content-length", ""+data.length); + con.getOutputStream().write(data); + _log.debug("Data sent, before reading results of poll for [" + target + "]"); + + con.connect(); + + // fetch the results + int rc = con.getResponseCode(); + _log.debug("Response code: " + rc); + switch (rc) { + case 200: // ok + _log.debug("Polling can progress"); + break; + case 401: // signature failed + _log.error("Signature failed during polling???"); + return 0; + case 404: // not yet registered + _log.error("Not registered with the relay - reregistering (in case they failed)"); + _transport.registerWithRelay(); + return 0; + default: // unknown + _log.error("Invalid error code returned: " + rc); + return 0; + } + + InputStream in = con.getInputStream(); + Date peerTime = DataHelper.readDate(in); + long offset = peerTime.getTime() - System.currentTimeMillis(); + if (_transport.getTrustTime()) { + _log.info("Updating time offset to " + offset + " (old offset: " + _context.clock().getOffset() + ")"); + _context.clock().setOffset(offset); + } + + boolean shouldReject = shouldRejectMessages(); + if (shouldReject) { + _log.debug("Rejecting any messages [we just checked in so we could get the time]"); + return 0; + } + + int numMessages = (int)DataHelper.readLong(in, 2); + if ( (numMessages > 100) || (numMessages < 0) ) { + _log.error("Invalid # messages specified [" + numMessages + "], skipping"); + return 0; + } + + int bytesRead = 512; // HTTP overhead + + int numSuccessful = 0; + for (int i = 0; i < numMessages; i++) { + _log.debug("Receiving message " + (i+1) + " of "+ numMessages + " pending"); + long len = DataHelper.readLong(in, 4); + byte msgBuf[] = new byte[(int)len]; + int read = DataHelper.read(in, msgBuf); + if (read == -1) { + _log.error("Unable to read the message as we encountered an EOF"); + return i - 1; + } else if (read != len) { + _log.error("Unable to read the message fully [" + read + " read, " + len + " expected]"); + return i - 1; + } else { + bytesRead += 4 + read; + try { + I2NPMessage msg = _handler.readMessage(new ByteArrayInputStream(msgBuf)); + if (msg == null) { + _log.warn("PHTTP couldn't read a message from the peer out of a " + len + " byte buffer"); + } else { + _log.info("Receive message " + (i+1) + " of " + numMessages + ": " + msg.getClass().getName()); + _transport.messageReceived(msg, null, null, _handler.getLastReadTime(), (int)len); + numSuccessful++; + } + } catch (IOException ioe) { + _log.warn("Unable to read the message fully", ioe); + } catch (I2NPMessageException ime) { + _log.warn("Poorly formatted message", ime); + } + } + } + + _context.bandwidthLimiter().delayInbound(null, bytesRead); + + return numSuccessful; + } catch (Throwable t) { + _log.debug("Error polling", t); + return 0; + } + } + + public void stopPolling() { _running = false; } } } diff --git a/router/java/src/net/i2p/router/transport/phttp/PHTTPSender.java b/router/java/src/net/i2p/router/transport/phttp/PHTTPSender.java index eb890f757..b32626eba 100644 --- a/router/java/src/net/i2p/router/transport/phttp/PHTTPSender.java +++ b/router/java/src/net/i2p/router/transport/phttp/PHTTPSender.java @@ -1,9 +1,9 @@ package net.i2p.router.transport.phttp; /* * free (adj.): unencumbered; not under the control of others - * Written by jrandom in 2003 and released into the public domain - * with no warranty of any kind, either expressed or implied. - * It probably won't make your computer catch on fire, or eat + * Written by jrandom in 2003 and released into the public domain + * with no warranty of any kind, either expressed or implied. + * It probably won't make your computer catch on fire, or eat * your children, but it might. Use at your own risk. * */ @@ -25,9 +25,11 @@ import net.i2p.router.transport.BandwidthLimiter; import net.i2p.util.Clock; import net.i2p.util.I2PThread; import net.i2p.util.Log; +import net.i2p.router.RouterContext; class PHTTPSender { - private final static Log _log = new Log(PHTTPSender.class); + private Log _log; + private RouterContext _context; private PHTTPTransport _transport; private volatile long _sendId = 0; @@ -35,7 +37,7 @@ class PHTTPSender { public final static long HARD_TIMEOUT = 30*1000; // no timeouts > 30 seconds /** H(routerIdent).toBase64() of the target to receive the message */ - public final static String PARAM_SEND_TARGET = "target"; + public final static String PARAM_SEND_TARGET = "target"; /** # ms to wait for the message to be delivered before failing it */ public final static String PARAM_SEND_TIMEOUTMS = "timeoutMs"; /** # bytes to be sent in the message */ @@ -55,229 +57,231 @@ class PHTTPSender { /* the URL to check to see when the message is delivered */ public final static String PROP_CHECK_URL = "statusCheckURL"; - + /** HTTP error code if the message was sent completely */ public final static int CODE_NOT_PENDING = 410; // gone /** HTTP error code if the message is still pending */ public final static int CODE_PENDING = 204; // ok, but no content - - public PHTTPSender(PHTTPTransport transport) { - _transport = transport; + + public PHTTPSender(RouterContext context, PHTTPTransport transport) { + _context = context; + _log = context.logManager().getLog(PHTTPSender.class); + _transport = transport; } public void send(OutNetMessage msg) { - _log.debug("Sending message " + msg.getMessage().getClass().getName() + " to " + msg.getTarget().getIdentity().getHash().toBase64()); - Thread t = new I2PThread(new Send(msg)); - t.setName("PHTTP Sender " + (_sendId++)); - t.setDaemon(true); - t.start(); + _log.debug("Sending message " + msg.getMessage().getClass().getName() + " to " + msg.getTarget().getIdentity().getHash().toBase64()); + Thread t = new I2PThread(new Send(msg)); + t.setName("PHTTP Sender " + (_sendId++)); + t.setDaemon(true); + t.start(); } class Send implements Runnable { - private OutNetMessage _msg; - public Send(OutNetMessage msg) { - _msg = msg; - } - public void run() { - boolean ok = false; - try { - ok = doSend(_msg); - } catch (IOException ioe) { - _log.error("Error sending the message", ioe); - } - _transport.afterSend(_msg, ok); - } + private OutNetMessage _msg; + public Send(OutNetMessage msg) { + _msg = msg; + } + public void run() { + boolean ok = false; + try { + ok = doSend(_msg); + } catch (IOException ioe) { + _log.error("Error sending the message", ioe); + } + _transport.afterSend(_msg, ok); + } } private boolean doSend(OutNetMessage msg) throws IOException { - long delay = BandwidthLimiter.getInstance().calculateDelayOutbound(msg.getTarget().getIdentity(), (int)msg.getMessageSize()); - _log.debug("Delaying [" + delay + "ms]"); - try { Thread.sleep(delay); } catch (InterruptedException ie) {} - _log.debug("Continuing with sending"); - // now send - URL sendURL = getURL(msg); - if (sendURL == null) { - _log.debug("No URL to send"); - return false; - } else { - _log.debug("Sending to " + sendURL.toExternalForm()); - HttpURLConnection con = (HttpURLConnection)sendURL.openConnection(); - // send the info - con.setRequestMethod("POST"); - con.setUseCaches(false); - con.setDoOutput(true); - con.setDoInput(true); - - byte data[] = getData(msg); - if (data == null) return false; - - BandwidthLimiter.getInstance().delayOutbound(msg.getTarget().getIdentity(), data.length+512); // HTTP overhead - - con.setRequestProperty("Content-length", ""+data.length); - OutputStream out = con.getOutputStream(); - out.write(data); - out.flush(); - _log.debug("Data sent, before reading"); - - // fetch the results - String checkURL = getCheckURL(con); - if (checkURL != null) { - _log.debug("Message sent"); - return checkDelivery(checkURL, msg); - } else { - _log.warn("Target not known or unable to send to " + msg.getTarget().getIdentity().getHash().toBase64()); - return false; - } - } + long delay = _context.bandwidthLimiter().calculateDelayOutbound(msg.getTarget().getIdentity(), (int)msg.getMessageSize()); + _log.debug("Delaying [" + delay + "ms]"); + try { Thread.sleep(delay); } catch (InterruptedException ie) {} + _log.debug("Continuing with sending"); + // now send + URL sendURL = getURL(msg); + if (sendURL == null) { + _log.debug("No URL to send"); + return false; + } else { + _log.debug("Sending to " + sendURL.toExternalForm()); + HttpURLConnection con = (HttpURLConnection)sendURL.openConnection(); + // send the info + con.setRequestMethod("POST"); + con.setUseCaches(false); + con.setDoOutput(true); + con.setDoInput(true); + + byte data[] = getData(msg); + if (data == null) return false; + + _context.bandwidthLimiter().delayOutbound(msg.getTarget().getIdentity(), data.length+512); // HTTP overhead + + con.setRequestProperty("Content-length", ""+data.length); + OutputStream out = con.getOutputStream(); + out.write(data); + out.flush(); + _log.debug("Data sent, before reading"); + + // fetch the results + String checkURL = getCheckURL(con); + if (checkURL != null) { + _log.debug("Message sent"); + return checkDelivery(checkURL, msg); + } else { + _log.warn("Target not known or unable to send to " + msg.getTarget().getIdentity().getHash().toBase64()); + return false; + } + } } private String getCheckURL(HttpURLConnection con) throws IOException { - BufferedReader reader = new BufferedReader(new InputStreamReader(con.getInputStream())); - String statusLine = reader.readLine(); - if (statusLine == null) { - _log.error("Null response line when checking URL"); - return null; - } - boolean statusOk = false; - if (!statusLine.startsWith(PROP_STATUS)) { - _log.warn("Response does not begin with status [" + statusLine + "]"); - return null; - } else { - String statVal = statusLine.substring(PROP_STATUS.length() + 1); - statusOk = STATUS_OK.equals(statVal); - - if (!statusOk) { - _log.info("Status was not ok for sending [" + statVal + "]"); - return null; - } - } - - String checkURL = reader.readLine(); - if (!checkURL.startsWith(PROP_CHECK_URL)) { - _log.warn("Incorrect OK response: " + checkURL); - return null; - } else { - String checkURLStr = checkURL.substring(PROP_CHECK_URL.length()+1); - _log.debug("Check URL = [" + checkURLStr + "]"); - return checkURLStr; - } + BufferedReader reader = new BufferedReader(new InputStreamReader(con.getInputStream())); + String statusLine = reader.readLine(); + if (statusLine == null) { + _log.error("Null response line when checking URL"); + return null; + } + boolean statusOk = false; + if (!statusLine.startsWith(PROP_STATUS)) { + _log.warn("Response does not begin with status [" + statusLine + "]"); + return null; + } else { + String statVal = statusLine.substring(PROP_STATUS.length() + 1); + statusOk = STATUS_OK.equals(statVal); + + if (!statusOk) { + _log.info("Status was not ok for sending [" + statVal + "]"); + return null; + } + } + + String checkURL = reader.readLine(); + if (!checkURL.startsWith(PROP_CHECK_URL)) { + _log.warn("Incorrect OK response: " + checkURL); + return null; + } else { + String checkURLStr = checkURL.substring(PROP_CHECK_URL.length()+1); + _log.debug("Check URL = [" + checkURLStr + "]"); + return checkURLStr; + } } private boolean checkDelivery(String checkURLStr, OutNetMessage msg) { - long now = Clock.getInstance().now(); - long expiration = msg.getExpiration(); - if (expiration <= now) - expiration = now + HARD_TIMEOUT; - - _log.debug("Check delivery [expiration = " + new Date(expiration) + "]"); - try { - URL checkStatusURL = new URL(checkURLStr); - long delay = RECHECK_DELAY; - do { - BandwidthLimiter.getInstance().delayOutbound(msg.getTarget().getIdentity(), 512); // HTTP overhead - BandwidthLimiter.getInstance().delayInbound(msg.getTarget().getIdentity(), 512); // HTTP overhead - - _log.debug("Checking delivery at " + checkURLStr); - HttpURLConnection con = (HttpURLConnection)checkStatusURL.openConnection(); - con.setRequestMethod("GET"); - //con.setInstanceFollowRedirects(false); // kaffe doesn't support this (yet) - con.setDoInput(true); - con.setDoOutput(false); - con.setUseCaches(false); - con.connect(); - - boolean isPending = getIsPending(con); - if (!isPending) { - _log.info("Check delivery successful for message " + msg.getMessage().getClass().getName()); - return true; - } - - if (now + delay > expiration) - delay = expiration - now - 30; // 30 = kludgy # for the next 4 statements - _log.debug("Still pending (wait " + delay + "ms)"); - Thread.sleep(delay); - //delay += RECHECK_DELAY; - - now = Clock.getInstance().now(); - } while (now < expiration); - _log.warn("Timeout for checking delivery to " + checkURLStr + " for message " + msg.getMessage().getClass().getName()); - } catch (Throwable t) { - _log.debug("Error checking for delivery", t); - } - return false; + long now = _context.clock().now(); + long expiration = msg.getExpiration(); + if (expiration <= now) + expiration = now + HARD_TIMEOUT; + + _log.debug("Check delivery [expiration = " + new Date(expiration) + "]"); + try { + URL checkStatusURL = new URL(checkURLStr); + long delay = RECHECK_DELAY; + do { + _context.bandwidthLimiter().delayOutbound(msg.getTarget().getIdentity(), 512); // HTTP overhead + _context.bandwidthLimiter().delayInbound(msg.getTarget().getIdentity(), 512); // HTTP overhead + + _log.debug("Checking delivery at " + checkURLStr); + HttpURLConnection con = (HttpURLConnection)checkStatusURL.openConnection(); + con.setRequestMethod("GET"); + //con.setInstanceFollowRedirects(false); // kaffe doesn't support this (yet) + con.setDoInput(true); + con.setDoOutput(false); + con.setUseCaches(false); + con.connect(); + + boolean isPending = getIsPending(con); + if (!isPending) { + _log.info("Check delivery successful for message " + msg.getMessage().getClass().getName()); + return true; + } + + if (now + delay > expiration) + delay = expiration - now - 30; // 30 = kludgy # for the next 4 statements + _log.debug("Still pending (wait " + delay + "ms)"); + Thread.sleep(delay); + //delay += RECHECK_DELAY; + + now = _context.clock().now(); + } while (now < expiration); + _log.warn("Timeout for checking delivery to " + checkURLStr + " for message " + msg.getMessage().getClass().getName()); + } catch (Throwable t) { + _log.debug("Error checking for delivery", t); + } + return false; } private boolean getIsPending(HttpURLConnection con) throws IOException { - int len = con.getContentLength(); - int rc = con.getResponseCode(); - BufferedReader reader = new BufferedReader(new InputStreamReader(con.getInputStream())); - String statusLine = reader.readLine(); - if (statusLine == null) { - _log.warn("Server didn't send back a status line [len = " + len + ", rc = " + rc + "]"); - return false; - } - boolean statusPending = false; - if (!statusLine.startsWith(PROP_STATUS)) { - _log.warn("Response does not begin with status [" + statusLine + "]"); - return false; - } else { - String statVal = statusLine.substring(PROP_STATUS.length() + 1); - statusPending = STATUS_PENDING.equals(statVal); - if (statVal.startsWith(STATUS_CLOCKSKEW)) { - long skew = Long.MAX_VALUE; - String skewStr = statVal.substring(STATUS_CLOCKSKEW.length()+1); - try { - skew = Long.parseLong(skewStr); - } catch (Throwable t) { - _log.error("Unable to decode the clock skew [" + skewStr + "]"); - skew = Long.MAX_VALUE; - } - _log.error("Clock skew talking with phttp relay: " + skew + "ms (remote-local)"); - } - return statusPending; - } + int len = con.getContentLength(); + int rc = con.getResponseCode(); + BufferedReader reader = new BufferedReader(new InputStreamReader(con.getInputStream())); + String statusLine = reader.readLine(); + if (statusLine == null) { + _log.warn("Server didn't send back a status line [len = " + len + ", rc = " + rc + "]"); + return false; + } + boolean statusPending = false; + if (!statusLine.startsWith(PROP_STATUS)) { + _log.warn("Response does not begin with status [" + statusLine + "]"); + return false; + } else { + String statVal = statusLine.substring(PROP_STATUS.length() + 1); + statusPending = STATUS_PENDING.equals(statVal); + if (statVal.startsWith(STATUS_CLOCKSKEW)) { + long skew = Long.MAX_VALUE; + String skewStr = statVal.substring(STATUS_CLOCKSKEW.length()+1); + try { + skew = Long.parseLong(skewStr); + } catch (Throwable t) { + _log.error("Unable to decode the clock skew [" + skewStr + "]"); + skew = Long.MAX_VALUE; + } + _log.error("Clock skew talking with phttp relay: " + skew + "ms (remote-local)"); + } + return statusPending; + } } private byte[] getData(OutNetMessage msg) { - try { - ByteArrayOutputStream baos = new ByteArrayOutputStream((int)(msg.getMessageSize() + 64)); - String target = msg.getTarget().getIdentity().getHash().toBase64(); - StringBuffer buf = new StringBuffer(); - buf.append(PARAM_SEND_TARGET).append('=').append(target).append('&'); - buf.append(PARAM_SEND_TIMEOUTMS).append('=').append(msg.getExpiration() - Clock.getInstance().now()).append('&'); - buf.append(PARAM_SEND_DATA_LENGTH).append('=').append(msg.getMessageSize()).append('&'); - buf.append(PARAM_SEND_TIME).append('=').append(Clock.getInstance().now()).append('&').append('\n'); - baos.write(buf.toString().getBytes()); - baos.write(msg.getMessageData()); - byte data[] = baos.toByteArray(); - _log.debug("Data to be sent: " + data.length); - return data; - } catch (Throwable t) { - _log.error("Error preparing the data", t); - return null; - } + try { + ByteArrayOutputStream baos = new ByteArrayOutputStream((int)(msg.getMessageSize() + 64)); + String target = msg.getTarget().getIdentity().getHash().toBase64(); + StringBuffer buf = new StringBuffer(); + buf.append(PARAM_SEND_TARGET).append('=').append(target).append('&'); + buf.append(PARAM_SEND_TIMEOUTMS).append('=').append(msg.getExpiration() - _context.clock().now()).append('&'); + buf.append(PARAM_SEND_DATA_LENGTH).append('=').append(msg.getMessageSize()).append('&'); + buf.append(PARAM_SEND_TIME).append('=').append(_context.clock().now()).append('&').append('\n'); + baos.write(buf.toString().getBytes()); + baos.write(msg.getMessageData()); + byte data[] = baos.toByteArray(); + _log.debug("Data to be sent: " + data.length); + return data; + } catch (Throwable t) { + _log.error("Error preparing the data", t); + return null; + } } private URL getURL(OutNetMessage msg) { - for (Iterator iter = msg.getTarget().getAddresses().iterator(); iter.hasNext(); ) { - RouterAddress addr = (RouterAddress)iter.next(); - URL url = getURL(addr); - if (url != null) return url; - } - _log.warn("No URLs could be constructed to send to " + msg.getTarget().getIdentity().getHash().toBase64()); - return null; + for (Iterator iter = msg.getTarget().getAddresses().iterator(); iter.hasNext(); ) { + RouterAddress addr = (RouterAddress)iter.next(); + URL url = getURL(addr); + if (url != null) return url; + } + _log.warn("No URLs could be constructed to send to " + msg.getTarget().getIdentity().getHash().toBase64()); + return null; } private URL getURL(RouterAddress addr) { - if (PHTTPTransport.STYLE.equals(addr.getTransportStyle())) { - String url = addr.getOptions().getProperty(PHTTPTransport.PROP_TO_SEND_URL); - if (url == null) return null; - try { - return new URL(url); - } catch (MalformedURLException mue) { - _log.info("Address has a bad url [" + url + "]", mue); - } - } - return null; + if (PHTTPTransport.STYLE.equals(addr.getTransportStyle())) { + String url = addr.getOptions().getProperty(PHTTPTransport.PROP_TO_SEND_URL); + if (url == null) return null; + try { + return new URL(url); + } catch (MalformedURLException mue) { + _log.info("Address has a bad url [" + url + "]", mue); + } + } + return null; } } diff --git a/router/java/src/net/i2p/router/transport/phttp/PHTTPTransport.java b/router/java/src/net/i2p/router/transport/phttp/PHTTPTransport.java index 3f79e61c0..e09e298d5 100644 --- a/router/java/src/net/i2p/router/transport/phttp/PHTTPTransport.java +++ b/router/java/src/net/i2p/router/transport/phttp/PHTTPTransport.java @@ -1,9 +1,9 @@ package net.i2p.router.transport.phttp; /* * free (adj.): unencumbered; not under the control of others - * Written by jrandom in 2003 and released into the public domain - * with no warranty of any kind, either expressed or implied. - * It probably won't make your computer catch on fire, or eat + * Written by jrandom in 2003 and released into the public domain + * with no warranty of any kind, either expressed or implied. + * It probably won't make your computer catch on fire, or eat * your children, but it might. Use at your own risk. * */ @@ -30,16 +30,15 @@ import net.i2p.router.transport.TransportBid; import net.i2p.router.transport.TransportImpl; import net.i2p.util.Clock; import net.i2p.util.Log; +import net.i2p.router.RouterContext; /** * * */ public class PHTTPTransport extends TransportImpl { - private final static Log _log = new Log(PHTTPTransport.class); + private Log _log; public final static String STYLE = "PHTTP"; - private RouterIdentity _myIdentity; - private SigningPrivateKey _signingKey; private RouterAddress _myAddress; private String _mySendURL; private String _myPollURL; @@ -69,7 +68,7 @@ public class PHTTPTransport extends TransportImpl { public final static String PROP_POLL_URL = "pollURL"; public final static String PROP_SEND_URL = "sendURL"; public final static String PROP_TIME_OFFSET = "timeOffset"; // ms (remote-local) - + /* values for the PROP_STATUS */ public final static String STATUS_FAILED = "failed"; public final static String STATUS_REGISTERED = "registered"; @@ -77,205 +76,205 @@ public class PHTTPTransport extends TransportImpl { public final static String CONFIG_POLL_FREQUENCY = "i2np.phttp.pollFrequencySeconds"; public final static long DEFAULT_POLL_FREQUENCY = 60*1000; // every 60 seconds - /** - * do we want to assume that the relay's clock is sync'ed with NTP and update + /** + * do we want to assume that the relay's clock is sync'ed with NTP and update * our offset according to what they say? */ public final static String CONFIG_TRUST_TIME = "i2np.phttp.trustRelayTime"; public final static boolean DEFAULT_TRUST_TIME = true; - public PHTTPTransport(RouterIdentity myIdentity, SigningPrivateKey signingKey, RouterAddress myAddress) { - super(); - _myIdentity = myIdentity; - _signingKey = signingKey; - _myAddress = myAddress; - - if (myAddress != null) { - Properties opts = myAddress.getOptions(); - _myRegisterURL = opts.getProperty(PROP_TO_REGISTER_URL); - _mySendURL = opts.getProperty(PROP_TO_SEND_URL); - _pollFrequencyMs = DEFAULT_POLL_FREQUENCY; - String pollFreq = Router.getInstance().getConfigSetting(CONFIG_POLL_FREQUENCY); - if (pollFreq != null) { - try { - long val = Long.parseLong(pollFreq); - _pollFrequencyMs = val*1000; - _log.info("PHTTP Polling Frequency specified as once every " + val + " seconds"); - } catch (NumberFormatException nfe) { - _log.error("Poll frequency is not valid (" + pollFreq + ")", nfe); - } - } else { - _log.info("PHTTP Polling Frequency not specified via (" + CONFIG_POLL_FREQUENCY + "), defaulting to once every " + (DEFAULT_POLL_FREQUENCY/1000) + " seconds"); - } - - String trustTime = Router.getInstance().getConfigSetting(CONFIG_TRUST_TIME); - if (trustTime != null) { - _trustTime = Boolean.TRUE.toString().equalsIgnoreCase(trustTime); - } else { - _trustTime = DEFAULT_TRUST_TIME; - } - - JobQueue.getInstance().addJob(new RegisterJob()); - } - _sender = new PHTTPSender(this); + public PHTTPTransport(RouterContext ctx, RouterAddress myAddress) { + super(ctx); + _log = ctx.logManager().getLog(PHTTPTransport.class); + _myAddress = myAddress; + + if (myAddress != null) { + Properties opts = myAddress.getOptions(); + _myRegisterURL = opts.getProperty(PROP_TO_REGISTER_URL); + _mySendURL = opts.getProperty(PROP_TO_SEND_URL); + _pollFrequencyMs = DEFAULT_POLL_FREQUENCY; + String pollFreq = _context.router().getConfigSetting(CONFIG_POLL_FREQUENCY); + if (pollFreq != null) { + try { + long val = Long.parseLong(pollFreq); + _pollFrequencyMs = val*1000; + _log.info("PHTTP Polling Frequency specified as once every " + val + " seconds"); + } catch (NumberFormatException nfe) { + _log.error("Poll frequency is not valid (" + pollFreq + ")", nfe); + } + } else { + _log.info("PHTTP Polling Frequency not specified via (" + CONFIG_POLL_FREQUENCY + "), defaulting to once every " + (DEFAULT_POLL_FREQUENCY/1000) + " seconds"); + } + + String trustTime = _context.router().getConfigSetting(CONFIG_TRUST_TIME); + if (trustTime != null) { + _trustTime = Boolean.TRUE.toString().equalsIgnoreCase(trustTime); + } else { + _trustTime = DEFAULT_TRUST_TIME; + } + + _context.jobQueue().addJob(new RegisterJob()); + } + _sender = new PHTTPSender(_context, this); + _poller = new PHTTPPoller(_context, this); } public String getMySendURL() { return _mySendURL; } - SigningPrivateKey getMySigningKey() { return _signingKey; } - RouterIdentity getMyIdentity() { return _myIdentity; } + SigningPrivateKey getMySigningKey() { return _context.keyManager().getSigningPrivateKey(); } + RouterIdentity getMyIdentity() { return _context.router().getRouterInfo().getIdentity(); } String getMyPollURL() { return _myPollURL; } long getPollFrequencyMs() { return _pollFrequencyMs; } private class RegisterJob extends JobImpl { - public String getName() { return "Register with PHTTP relay"; } - public void runJob() { - boolean ok = doRegisterWithRelay(); - if (ok) { - _log.debug("Registration successful with the last registration delay of " + _nextRegisterDelay + "ms"); - if (_poller == null) { - _poller = new PHTTPPoller(PHTTPTransport.this); - _poller.startPolling(); - } - } else { - _nextRegisterDelay = _nextRegisterDelay * 2; - if (_nextRegisterDelay > MAX_REGISTER_DELAY) - _nextRegisterDelay = MAX_REGISTER_DELAY; - long nextRegister = Clock.getInstance().now() + _nextRegisterDelay; - _log.debug("Registration failed, next registration attempt in " + _nextRegisterDelay + "ms"); - requeue(nextRegister); - } - } + public RegisterJob() { + super(PHTTPTransport.this._context); + } + public String getName() { return "Register with PHTTP relay"; } + public void runJob() { + boolean ok = doRegisterWithRelay(); + if (ok) { + _log.debug("Registration successful with the last registration delay of " + _nextRegisterDelay + "ms"); + _poller.startPolling(); + } else { + _nextRegisterDelay = _nextRegisterDelay * 2; + if (_nextRegisterDelay > MAX_REGISTER_DELAY) + _nextRegisterDelay = MAX_REGISTER_DELAY; + long nextRegister = _context.clock().now() + _nextRegisterDelay; + _log.debug("Registration failed, next registration attempt in " + _nextRegisterDelay + "ms"); + requeue(nextRegister); + } + } } boolean registerWithRelay() { - boolean ok = doRegisterWithRelay(); - if (ok) { - _log.info("Registered with PHTTP relay"); - return ok; - } - _log.error("Unable to register with relay"); - return false; + boolean ok = doRegisterWithRelay(); + if (ok) { + _log.info("Registered with PHTTP relay"); + return ok; + } + _log.error("Unable to register with relay"); + return false; } synchronized boolean doRegisterWithRelay() { - _log.debug("Beginning registration"); - ByteArrayOutputStream baos = new ByteArrayOutputStream(512); - try { - DataHelper.writeDate(baos, new Date(Clock.getInstance().now())); - _myIdentity.writeBytes(baos); - int postLength = baos.size(); - - BandwidthLimiter.getInstance().delayOutbound(null, postLength+512); // HTTP overhead - BandwidthLimiter.getInstance().delayInbound(null, 2048+512); // HTTP overhead - - long now = Clock.getInstance().now(); - _log.debug("Before opening " + _myRegisterURL); - URL url = new URL(_myRegisterURL); - HttpURLConnection con = (HttpURLConnection)url.openConnection(); - // send the info - con.setRequestMethod("POST"); - con.setUseCaches(false); - con.setDoOutput(true); - con.setDoInput(true); - con.setRequestProperty("Content-length", ""+postLength); - baos.writeTo(con.getOutputStream()); - _log.debug("Data sent, before reading"); - con.connect(); - // fetch the results - BufferedReader reader = new BufferedReader(new InputStreamReader(con.getInputStream())); - String line = null; - String stat = null; - boolean ok = false; - while ( (line = reader.readLine()) != null) { - if (line.startsWith(PROP_SEND_URL)) { - _mySendURL = line.substring(PROP_SEND_URL.length()+1).trim(); - } else if (line.startsWith(PROP_POLL_URL)) { - _myPollURL = line.substring(PROP_POLL_URL.length()+1).trim(); - } else if (line.startsWith(PROP_STATUS)) { - stat = line.substring(PROP_STATUS.length()+1).trim(); - if (STATUS_REGISTERED.equals(stat.toLowerCase())) - ok = true; - } else if (line.startsWith(PROP_TIME_OFFSET)) { - String offset = line.substring(PROP_TIME_OFFSET.length()+1).trim(); - try { - _timeOffset = Long.parseLong(offset); - } catch (Throwable t) { - _log.warn("Unable to parse time offset [" + offset + "] - treating as MAX"); - _timeOffset = Long.MAX_VALUE; - } - } - if ( (_myPollURL != null) && (_mySendURL != null) && (stat != null) ) - break; - } - - if (_trustTime) { - _log.info("Setting time offset to " + _timeOffset + " (old offset: " + Clock.getInstance().getOffset() + ")"); - Clock.getInstance().setOffset(_timeOffset); - } - //if ( (_timeOffset > Router.CLOCK_FUDGE_FACTOR) || (_timeOffset < 0 - Router.CLOCK_FUDGE_FACTOR) ) { - // _log.error("Unable to register with PHTTP relay, as there is too much clock skew! " + _timeOffset + "ms difference (them-us)", new Exception("Too much clock skew with phttp relay!")); - // return false; - //} - - if (ok) { - _log.info("Registered with the PHTTP relay [" + _myRegisterURL + "]"); - _log.info("Registered sending url: [" + _mySendURL + "]"); - _log.info("Registered polling url: [" + _myPollURL + "]"); - return true; - } else { - _log.warn("PHTTP relay [" + _myRegisterURL + "] rejected registration"); - } - } catch (Throwable t) { - _log.warn("Error registering", t); - } - - return false; + _log.debug("Beginning registration"); + ByteArrayOutputStream baos = new ByteArrayOutputStream(512); + try { + DataHelper.writeDate(baos, new Date(_context.clock().now())); + _context.router().getRouterInfo().getIdentity().writeBytes(baos); + int postLength = baos.size(); + + _context.bandwidthLimiter().delayOutbound(null, postLength+512); // HTTP overhead + _context.bandwidthLimiter().delayInbound(null, 2048+512); // HTTP overhead + + long now = _context.clock().now(); + _log.debug("Before opening " + _myRegisterURL); + URL url = new URL(_myRegisterURL); + HttpURLConnection con = (HttpURLConnection)url.openConnection(); + // send the info + con.setRequestMethod("POST"); + con.setUseCaches(false); + con.setDoOutput(true); + con.setDoInput(true); + con.setRequestProperty("Content-length", ""+postLength); + baos.writeTo(con.getOutputStream()); + _log.debug("Data sent, before reading"); + con.connect(); + // fetch the results + BufferedReader reader = new BufferedReader(new InputStreamReader(con.getInputStream())); + String line = null; + String stat = null; + boolean ok = false; + while ( (line = reader.readLine()) != null) { + if (line.startsWith(PROP_SEND_URL)) { + _mySendURL = line.substring(PROP_SEND_URL.length()+1).trim(); + } else if (line.startsWith(PROP_POLL_URL)) { + _myPollURL = line.substring(PROP_POLL_URL.length()+1).trim(); + } else if (line.startsWith(PROP_STATUS)) { + stat = line.substring(PROP_STATUS.length()+1).trim(); + if (STATUS_REGISTERED.equals(stat.toLowerCase())) + ok = true; + } else if (line.startsWith(PROP_TIME_OFFSET)) { + String offset = line.substring(PROP_TIME_OFFSET.length()+1).trim(); + try { + _timeOffset = Long.parseLong(offset); + } catch (Throwable t) { + _log.warn("Unable to parse time offset [" + offset + "] - treating as MAX"); + _timeOffset = Long.MAX_VALUE; + } + } + if ( (_myPollURL != null) && (_mySendURL != null) && (stat != null) ) + break; + } + + if (_trustTime) { + _log.info("Setting time offset to " + _timeOffset + " (old offset: " + _context.clock().getOffset() + ")"); + _context.clock().setOffset(_timeOffset); + } + //if ( (_timeOffset > Router.CLOCK_FUDGE_FACTOR) || (_timeOffset < 0 - Router.CLOCK_FUDGE_FACTOR) ) { + // _log.error("Unable to register with PHTTP relay, as there is too much clock skew! " + _timeOffset + "ms difference (them-us)", new Exception("Too much clock skew with phttp relay!")); + // return false; + //} + + if (ok) { + _log.info("Registered with the PHTTP relay [" + _myRegisterURL + "]"); + _log.info("Registered sending url: [" + _mySendURL + "]"); + _log.info("Registered polling url: [" + _myPollURL + "]"); + return true; + } else { + _log.warn("PHTTP relay [" + _myRegisterURL + "] rejected registration"); + } + } catch (Throwable t) { + _log.warn("Error registering", t); + } + + return false; } protected void outboundMessageReady() { - OutNetMessage msg = getNextMessage(); - if (msg != null) { - JobQueue.getInstance().addJob(new PushNewMessageJob(msg)); - } else { - _log.debug("OutboundMessageReady called, but none were available"); - } - } - - public TransportBid bid(RouterInfo toAddress, long dataSize) { - if (PHTTPPoller.shouldRejectMessages()) - return null; // we're not using phttp - - long latencyStartup = BandwidthLimiter.getInstance().calculateDelayOutbound(toAddress.getIdentity(), (int)dataSize); - latencyStartup += _pollFrequencyMs / 2; // average distance until the next poll - long sendTime = (int)((dataSize)/(16*1024)); // 16K/sec ARBITRARY - int bytes = (int)dataSize+1024; - - // lets seriously penalize phttp to heavily prefer TCP - bytes += 1024*100; - latencyStartup += 1000*600; - - TransportBid bid = new TransportBid(); - bid.setBandwidthBytes(bytes); - bid.setExpiration(new Date(Clock.getInstance().now()+1000*60)); // 1 minute, since the bwlimiter goes per minute - bid.setLatencyMs((int) (latencyStartup + sendTime)); - bid.setMessageSize((int)dataSize); - bid.setRouter(toAddress); - bid.setTransport(this); - - RouterAddress addr = getTargetAddress(toAddress); - if (addr == null) - return null; - - return bid; + OutNetMessage msg = getNextMessage(); + if (msg != null) { + _context.jobQueue().addJob(new PushNewMessageJob(msg)); + } else { + _log.debug("OutboundMessageReady called, but none were available"); + } } - public RouterAddress startListening() { - _log.debug("Start listening"); - return _myAddress; + public TransportBid bid(RouterInfo toAddress, long dataSize) { + if (_poller.shouldRejectMessages()) + return null; // we're not using phttp + + long latencyStartup = _context.bandwidthLimiter().calculateDelayOutbound(toAddress.getIdentity(), (int)dataSize); + latencyStartup += _pollFrequencyMs / 2; // average distance until the next poll + long sendTime = (int)((dataSize)/(16*1024)); // 16K/sec ARBITRARY + int bytes = (int)dataSize+1024; + + // lets seriously penalize phttp to heavily prefer TCP + bytes += 1024*100; + latencyStartup += 1000*600; + + TransportBid bid = new TransportBid(); + bid.setBandwidthBytes(bytes); + bid.setExpiration(new Date(_context.clock().now()+1000*60)); // 1 minute, since the bwlimiter goes per minute + bid.setLatencyMs((int) (latencyStartup + sendTime)); + bid.setMessageSize((int)dataSize); + bid.setRouter(toAddress); + bid.setTransport(this); + + RouterAddress addr = getTargetAddress(toAddress); + if (addr == null) + return null; + + return bid; + } + + public RouterAddress startListening() { + _log.debug("Start listening"); + return _myAddress; } public void stopListening() { - if (_poller != null) - _poller.stopPolling(); + if (_poller != null) + _poller.stopPolling(); } @@ -286,17 +285,20 @@ public class PHTTPTransport extends TransportImpl { boolean getTrustTime() { return _trustTime; } private class PushNewMessageJob extends JobImpl { - private OutNetMessage _msg; - public PushNewMessageJob(OutNetMessage msg) { _msg = msg; } - public String getName() { return "Push New PHTTP Message"; } - public void runJob() { - long delay = BandwidthLimiter.getInstance().calculateDelayOutbound(_msg.getTarget().getIdentity(), (int)_msg.getMessageSize()); - if (delay > 0) { - getTiming().setStartAfter(delay + Clock.getInstance().now()); - JobQueue.getInstance().addJob(this); - } else { - _sender.send(_msg); - } - } + private OutNetMessage _msg; + public PushNewMessageJob(OutNetMessage msg) { + super(PHTTPTransport.this._context); + _msg = msg; + } + public String getName() { return "Push New PHTTP Message"; } + public void runJob() { + long delay = _context.bandwidthLimiter().calculateDelayOutbound(_msg.getTarget().getIdentity(), (int)_msg.getMessageSize()); + if (delay > 0) { + getTiming().setStartAfter(delay + _context.clock().now()); + PHTTPTransport.this._context.jobQueue().addJob(this); + } else { + _sender.send(_msg); + } + } } } diff --git a/router/java/src/net/i2p/router/transport/tcp/RestrictiveTCPConnection.java b/router/java/src/net/i2p/router/transport/tcp/RestrictiveTCPConnection.java index 688214177..c4fce3dfd 100644 --- a/router/java/src/net/i2p/router/transport/tcp/RestrictiveTCPConnection.java +++ b/router/java/src/net/i2p/router/transport/tcp/RestrictiveTCPConnection.java @@ -1,9 +1,9 @@ package net.i2p.router.transport.tcp; /* * free (adj.): unencumbered; not under the control of others - * Written by jrandom in 2003 and released into the public domain - * with no warranty of any kind, either expressed or implied. - * It probably won't make your computer catch on fire, or eat + * Written by jrandom in 2003 and released into the public domain + * with no warranty of any kind, either expressed or implied. + * It probably won't make your computer catch on fire, or eat * your children, but it might. Use at your own risk. * */ @@ -31,6 +31,7 @@ import net.i2p.stat.StatManager; import net.i2p.util.Clock; import net.i2p.util.I2PThread; import net.i2p.util.Log; +import net.i2p.router.RouterContext; /** * TCPConnection that validates the time and protocol version, dropping connection if @@ -38,153 +39,151 @@ import net.i2p.util.Log; * */ class RestrictiveTCPConnection extends TCPConnection { - private final static Log _log = new Log(RestrictiveTCPConnection.class); + private Log _log; - public RestrictiveTCPConnection(Socket s, RouterIdentity myIdent, SigningPrivateKey signingKey, boolean locallyInitiated) { - super(s, myIdent, signingKey, locallyInitiated); + public RestrictiveTCPConnection(RouterContext context, Socket s, boolean locallyInitiated) { + super(context, s, locallyInitiated); + _log = context.logManager().getLog(RestrictiveTCPConnection.class); + _context.statManager().createRateStat("tcp.establishConnectionTime", "How long does it take for us to successfully establish a connection (either locally or remotely initiated)?", "TCP Transport", new long[] { 60*1000l, 60*60*1000l, 24*60*60*1000l }); } - + /** passed in the handshake process for the connection, and only equivilant protocols will be accepted */ private final static long PROTO_ID = 12; - - static { - StatManager.getInstance().createRateStat("tcp.establishConnectionTime", "How long does it take for us to successfully establish a connection (either locally or remotely initiated)?", "TCP Transport", new long[] { 60*1000l, 60*60*1000l, 24*60*60*1000l }); - } - + private boolean validateVersion() throws DataFormatException, IOException { - if (_log.shouldLog(Log.DEBUG)) _log.debug("Before validating version"); - ByteArrayOutputStream baos = new ByteArrayOutputStream(8); - DataHelper.writeLong(baos, 4, PROTO_ID); - byte encr[] = AESEngine.getInstance().safeEncrypt(baos.toByteArray(), _key, _iv, 16); - DataHelper.writeLong(_out, 2, encr.length); - _out.write(encr); - - if (_log.shouldLog(Log.DEBUG)) _log.debug("Version sent"); - // we've sent our version, now read what theirs is - - int rlen = (int)DataHelper.readLong(_in, 2); - byte pencr[] = new byte[rlen]; - int read = DataHelper.read(_in, pencr); - if (read != rlen) - throw new DataFormatException("Not enough data in peer version"); - byte decr[] = AESEngine.getInstance().safeDecrypt(pencr, _key, _iv); - if (decr == null) - throw new DataFormatException("Unable to decrypt - failed version?"); - - ByteArrayInputStream bais = new ByteArrayInputStream(decr); - long peerProtoId = DataHelper.readLong(bais, 4); - - - if (_log.shouldLog(Log.DEBUG)) _log.debug("Version received [" + peerProtoId + "]"); - - return validateVersion(PROTO_ID, peerProtoId); + if (_log.shouldLog(Log.DEBUG)) _log.debug("Before validating version"); + ByteArrayOutputStream baos = new ByteArrayOutputStream(8); + DataHelper.writeLong(baos, 4, PROTO_ID); + byte encr[] = _context.AESEngine().safeEncrypt(baos.toByteArray(), _key, _iv, 16); + DataHelper.writeLong(_out, 2, encr.length); + _out.write(encr); + + if (_log.shouldLog(Log.DEBUG)) _log.debug("Version sent"); + // we've sent our version, now read what theirs is + + int rlen = (int)DataHelper.readLong(_in, 2); + byte pencr[] = new byte[rlen]; + int read = DataHelper.read(_in, pencr); + if (read != rlen) + throw new DataFormatException("Not enough data in peer version"); + byte decr[] = _context.AESEngine().safeDecrypt(pencr, _key, _iv); + if (decr == null) + throw new DataFormatException("Unable to decrypt - failed version?"); + + ByteArrayInputStream bais = new ByteArrayInputStream(decr); + long peerProtoId = DataHelper.readLong(bais, 4); + + + if (_log.shouldLog(Log.DEBUG)) _log.debug("Version received [" + peerProtoId + "]"); + + return validateVersion(PROTO_ID, peerProtoId); } private boolean validateVersion(long us, long them) throws DataFormatException, IOException { - if (us != them) { - if (_log.shouldLog(Log.ERROR)) - _log.error("INVALID PROTOCOL VERSIONS! us = " + us + " them = " + them + ": " + _remoteIdentity.getHash()); - if (them > us) - Router.getInstance().setHigherVersionSeen(true); - return false; - } else { - if (_log.shouldLog(Log.DEBUG)) - _log.debug("Valid protocol version: us = " + us + " them = " + them + ": " + _remoteIdentity.getHash()); - return true; - } + if (us != them) { + if (_log.shouldLog(Log.ERROR)) + _log.error("INVALID PROTOCOL VERSIONS! us = " + us + " them = " + them + ": " + _remoteIdentity.getHash()); + if (them > us) + _context.router().setHigherVersionSeen(true); + return false; + } else { + if (_log.shouldLog(Log.DEBUG)) + _log.debug("Valid protocol version: us = " + us + " them = " + them + ": " + _remoteIdentity.getHash()); + return true; + } } private boolean validateTime() throws DataFormatException, IOException { - Date now = new Date(Clock.getInstance().now()); - ByteArrayOutputStream baos = new ByteArrayOutputStream(8); - DataHelper.writeDate(baos, now); - - byte encr[] = AESEngine.getInstance().safeEncrypt(baos.toByteArray(), _key, _iv, 16); - DataHelper.writeLong(_out, 2, encr.length); - _out.write(encr); - - // we've sent our date, now read what theirs is - - int rlen = (int)DataHelper.readLong(_in, 2); - byte pencr[] = new byte[rlen]; - int read = DataHelper.read(_in, pencr); - if (read != rlen) - throw new DataFormatException("Not enough data in peer date"); - byte decr[] = AESEngine.getInstance().safeDecrypt(pencr, _key, _iv); - if (decr == null) - throw new DataFormatException("Unable to decrypt - failed date?"); - - ByteArrayInputStream bais = new ByteArrayInputStream(decr); - Date theirNow = DataHelper.readDate(bais); - - long diff = now.getTime() - theirNow.getTime(); - if ( (diff > Router.CLOCK_FUDGE_FACTOR) || (diff < (0-Router.CLOCK_FUDGE_FACTOR)) ) { - if (_log.shouldLog(Log.ERROR)) - _log.error("Peer is out of time sync! They think it is " + theirNow + ": " + _remoteIdentity.getHash(), new Exception("Time sync error - please make sure your clock is correct!")); - return false; - } else { - if (_log.shouldLog(Log.DEBUG)) - _log.debug("Peer sync difference: " + diff + "ms: " + _remoteIdentity.getHash()); - return true; - } + Date now = new Date(_context.clock().now()); + ByteArrayOutputStream baos = new ByteArrayOutputStream(8); + DataHelper.writeDate(baos, now); + + byte encr[] = _context.AESEngine().safeEncrypt(baos.toByteArray(), _key, _iv, 16); + DataHelper.writeLong(_out, 2, encr.length); + _out.write(encr); + + // we've sent our date, now read what theirs is + + int rlen = (int)DataHelper.readLong(_in, 2); + byte pencr[] = new byte[rlen]; + int read = DataHelper.read(_in, pencr); + if (read != rlen) + throw new DataFormatException("Not enough data in peer date"); + byte decr[] = _context.AESEngine().safeDecrypt(pencr, _key, _iv); + if (decr == null) + throw new DataFormatException("Unable to decrypt - failed date?"); + + ByteArrayInputStream bais = new ByteArrayInputStream(decr); + Date theirNow = DataHelper.readDate(bais); + + long diff = now.getTime() - theirNow.getTime(); + if ( (diff > Router.CLOCK_FUDGE_FACTOR) || (diff < (0-Router.CLOCK_FUDGE_FACTOR)) ) { + if (_log.shouldLog(Log.ERROR)) + _log.error("Peer is out of time sync! They think it is " + theirNow + ": " + _remoteIdentity.getHash(), new Exception("Time sync error - please make sure your clock is correct!")); + return false; + } else { + if (_log.shouldLog(Log.DEBUG)) + _log.debug("Peer sync difference: " + diff + "ms: " + _remoteIdentity.getHash()); + return true; + } } - /** + /** * Exchange TCP addresses, and if we're didn't establish this connection, validate * the peer with validatePeerAddresses(TCPAddress[]). * * @return true if the peer is valid (and reachable) */ private boolean validatePeerAddress() throws DataFormatException, IOException { - if (_log.shouldLog(Log.DEBUG)) _log.debug("Before sending my addresses"); - TCPAddress me[] = _transport.getMyAddresses(); - ByteArrayOutputStream baos = new ByteArrayOutputStream(256); - if (_log.shouldLog(Log.DEBUG)) _log.debug("Sending " + me.length + " addresses"); - DataHelper.writeLong(baos, 1, me.length); - for (int i = 0; i < me.length; i++) { - DataHelper.writeString(baos, me[i].getHost()); - DataHelper.writeLong(baos, 2, me[i].getPort()); - if (_log.shouldLog(Log.DEBUG)) _log.debug("Sent my address [" + me[i].getHost() + ":" + me[i].getPort() + "]"); - } - if (_log.shouldLog(Log.DEBUG)) _log.debug("Sent my " + me.length + " addresses"); - - byte encr[] = AESEngine.getInstance().safeEncrypt(baos.toByteArray(), _key, _iv, 256); - DataHelper.writeLong(_out, 2, encr.length); - _out.write(encr); - - // we've sent our addresses, now read their addresses - - int rlen = (int)DataHelper.readLong(_in, 2); - byte pencr[] = new byte[rlen]; - int read = DataHelper.read(_in, pencr); - if (read != rlen) - throw new DataFormatException("Not enough data in peer addresses"); - byte decr[] = AESEngine.getInstance().safeDecrypt(pencr, _key, _iv); - if (decr == null) - throw new DataFormatException("Unable to decrypt - invalid addresses?"); - - ByteArrayInputStream bais = new ByteArrayInputStream(decr); - long numAddresses = DataHelper.readLong(bais, 1); - if (_log.shouldLog(Log.DEBUG)) _log.debug("Peer will send us " + numAddresses + " addresses"); - TCPAddress peer[] = new TCPAddress[(int)numAddresses]; - for (int i = 0; i < peer.length; i++) { - String host = DataHelper.readString(bais); - int port = (int)DataHelper.readLong(bais, 2); - peer[i] = new TCPAddress(host, port); - if (_log.shouldLog(Log.DEBUG)) _log.debug("Received peer address [" + peer[i].getHost() + ":" + peer[i].getPort() + "]"); - } - - // ok, we've received their addresses, now we determine whether we need to - // validate them or not - if (weInitiatedConnection()) { - if (_log.shouldLog(Log.DEBUG)) _log.debug("We initiated the connection, so no need to validate"); - return true; // we connected to them, so we know we can, um, connect to them - } else { - if (_log.shouldLog(Log.DEBUG)) _log.debug("We received the connection, so validate"); - boolean valid = validatePeerAddresses(peer); - if (_log.shouldLog(Log.DEBUG)) _log.debug("We received the connection, validated? " + valid); - return valid; - } + if (_log.shouldLog(Log.DEBUG)) _log.debug("Before sending my addresses"); + TCPAddress me[] = _transport.getMyAddresses(); + ByteArrayOutputStream baos = new ByteArrayOutputStream(256); + if (_log.shouldLog(Log.DEBUG)) _log.debug("Sending " + me.length + " addresses"); + DataHelper.writeLong(baos, 1, me.length); + for (int i = 0; i < me.length; i++) { + DataHelper.writeString(baos, me[i].getHost()); + DataHelper.writeLong(baos, 2, me[i].getPort()); + if (_log.shouldLog(Log.DEBUG)) _log.debug("Sent my address [" + me[i].getHost() + ":" + me[i].getPort() + "]"); + } + if (_log.shouldLog(Log.DEBUG)) _log.debug("Sent my " + me.length + " addresses"); + + byte encr[] = _context.AESEngine().safeEncrypt(baos.toByteArray(), _key, _iv, 256); + DataHelper.writeLong(_out, 2, encr.length); + _out.write(encr); + + // we've sent our addresses, now read their addresses + + int rlen = (int)DataHelper.readLong(_in, 2); + byte pencr[] = new byte[rlen]; + int read = DataHelper.read(_in, pencr); + if (read != rlen) + throw new DataFormatException("Not enough data in peer addresses"); + byte decr[] = _context.AESEngine().safeDecrypt(pencr, _key, _iv); + if (decr == null) + throw new DataFormatException("Unable to decrypt - invalid addresses?"); + + ByteArrayInputStream bais = new ByteArrayInputStream(decr); + long numAddresses = DataHelper.readLong(bais, 1); + if (_log.shouldLog(Log.DEBUG)) _log.debug("Peer will send us " + numAddresses + " addresses"); + TCPAddress peer[] = new TCPAddress[(int)numAddresses]; + for (int i = 0; i < peer.length; i++) { + String host = DataHelper.readString(bais); + int port = (int)DataHelper.readLong(bais, 2); + peer[i] = new TCPAddress(host, port); + if (_log.shouldLog(Log.DEBUG)) _log.debug("Received peer address [" + peer[i].getHost() + ":" + peer[i].getPort() + "]"); + } + + // ok, we've received their addresses, now we determine whether we need to + // validate them or not + if (weInitiatedConnection()) { + if (_log.shouldLog(Log.DEBUG)) _log.debug("We initiated the connection, so no need to validate"); + return true; // we connected to them, so we know we can, um, connect to them + } else { + if (_log.shouldLog(Log.DEBUG)) _log.debug("We received the connection, so validate"); + boolean valid = validatePeerAddresses(peer); + if (_log.shouldLog(Log.DEBUG)) _log.debug("We received the connection, validated? " + valid); + return valid; + } } /** @@ -202,120 +201,120 @@ class RestrictiveTCPConnection extends TCPConnection { * */ private boolean validatePeerAddresses(TCPAddress addresses[]) throws DataFormatException, IOException { - if (_log.shouldLog(Log.DEBUG)) _log.debug("Before validating peer addresses [" + addresses.length + "]..."); - for (int i = 0; i < addresses.length; i++) { - if (_log.shouldLog(Log.DEBUG)) _log.debug("Before validating peer address (" + addresses[i].getHost() + ":"+ addresses[i].getPort() + ")..."); - boolean ok = sendsUsData(addresses[i]); - if (_log.shouldLog(Log.DEBUG)) _log.debug("Before validating peer address (" + addresses[i].getHost() + ":"+ addresses[i].getPort() + ") [" + ok + "]..."); - if (ok) { - if (_log.shouldLog(Log.DEBUG)) - _log.debug("Peer address " + addresses[i].getHost() + ":" + addresses[i].getPort() + " validated!"); - return true; - } else { - if (_log.shouldLog(Log.WARN)) - _log.warn("Peer address " + addresses[i].getHost() + ":" + addresses[i].getPort() + " could NOT be validated"); - } - } - if (_log.shouldLog(Log.WARN)) - _log.warn("None of the peer addresses could be validated!"); - return false; + if (_log.shouldLog(Log.DEBUG)) _log.debug("Before validating peer addresses [" + addresses.length + "]..."); + for (int i = 0; i < addresses.length; i++) { + if (_log.shouldLog(Log.DEBUG)) _log.debug("Before validating peer address (" + addresses[i].getHost() + ":"+ addresses[i].getPort() + ")..."); + boolean ok = sendsUsData(addresses[i]); + if (_log.shouldLog(Log.DEBUG)) _log.debug("Before validating peer address (" + addresses[i].getHost() + ":"+ addresses[i].getPort() + ") [" + ok + "]..."); + if (ok) { + if (_log.shouldLog(Log.DEBUG)) + _log.debug("Peer address " + addresses[i].getHost() + ":" + addresses[i].getPort() + " validated!"); + return true; + } else { + if (_log.shouldLog(Log.WARN)) + _log.warn("Peer address " + addresses[i].getHost() + ":" + addresses[i].getPort() + " could NOT be validated"); + } + } + if (_log.shouldLog(Log.WARN)) + _log.warn("None of the peer addresses could be validated!"); + return false; } private boolean sendsUsData(TCPAddress peer) { - SocketCreator creator = new SocketCreator(peer.getHost(), peer.getPort(), false); - I2PThread sockCreator = new I2PThread(creator); - sockCreator.setDaemon(true); - sockCreator.setName("PeerCallback"); - sockCreator.setPriority(I2PThread.MIN_PRIORITY); - sockCreator.start(); - - if (_log.shouldLog(Log.DEBUG)) _log.debug("Before joining socket creator via peer callback..."); - try { - synchronized (creator) { - creator.wait(TCPTransport.SOCKET_CREATE_TIMEOUT); - } - } catch (InterruptedException ie) {} - - boolean established = creator.couldEstablish(); - // returns a socket if and only if the connection was established and the I2P handshake byte sent and received - if (_log.shouldLog(Log.DEBUG)) - _log.debug("After joining socket creator via peer callback [could establish? " + established + "]"); - return established; + SocketCreator creator = new SocketCreator(peer.getHost(), peer.getPort(), false); + I2PThread sockCreator = new I2PThread(creator); + sockCreator.setDaemon(true); + sockCreator.setName("PeerCallback"); + sockCreator.setPriority(I2PThread.MIN_PRIORITY); + sockCreator.start(); + + if (_log.shouldLog(Log.DEBUG)) _log.debug("Before joining socket creator via peer callback..."); + try { + synchronized (creator) { + creator.wait(TCPTransport.SOCKET_CREATE_TIMEOUT); + } + } catch (InterruptedException ie) {} + + boolean established = creator.couldEstablish(); + // returns a socket if and only if the connection was established and the I2P handshake byte sent and received + if (_log.shouldLog(Log.DEBUG)) + _log.debug("After joining socket creator via peer callback [could establish? " + established + "]"); + return established; } public RouterIdentity establishConnection() { - long start = Clock.getInstance().now(); - long success = 0; - if (_log.shouldLog(Log.DEBUG)) _log.debug("Establishing connection..."); - BigInteger myPub = _builder.getMyPublicValue(); - try { - _socket.setSoTimeout(ESTABLISHMENT_TIMEOUT); - if (_log.shouldLog(Log.DEBUG)) _log.debug("Before key exchange..."); - exchangeKey(); - if (_log.shouldLog(Log.DEBUG)) _log.debug("Key exchanged..."); - // key exchanged. now say who we are and prove it - boolean ok = identifyStationToStation(); - if (_log.shouldLog(Log.DEBUG)) _log.debug("After station to station [" + ok + "]..."); - - if (!ok) - throw new DataFormatException("Station to station identification failed! MITM?"); - - - if (_log.shouldLog(Log.DEBUG)) _log.debug("before validateVersion..."); - boolean versionOk = validateVersion(); - if (_log.shouldLog(Log.DEBUG)) _log.debug("after validateVersion [" + versionOk + "]..."); - - if (!versionOk) { - // not only do we remove the reference to the invalid peer - NetworkDatabaseFacade.getInstance().fail(_remoteIdentity.getHash()); - // but we make sure that we don't try to talk to them soon even if we get a new ref - Shitlist.getInstance().shitlistRouter(_remoteIdentity.getHash()); - throw new DataFormatException("Peer uses an invalid version! dropping"); - } - - if (_log.shouldLog(Log.DEBUG)) _log.debug("before validateTime..."); - boolean timeOk = validateTime(); - if (_log.shouldLog(Log.DEBUG)) _log.debug("after validateTime [" + timeOk + "]..."); - if (!timeOk) { - Shitlist.getInstance().shitlistRouter(_remoteIdentity.getHash()); - throw new DataFormatException("Peer is too far out of sync with the current router's clock! dropping"); - } - - if (_log.shouldLog(Log.DEBUG)) _log.debug("before validate peer address..."); - boolean peerReachable = validatePeerAddress(); - if (_log.shouldLog(Log.DEBUG)) _log.debug("after validatePeerAddress [" + peerReachable + "]..."); - if (!peerReachable) { - Shitlist.getInstance().shitlistRouter(_remoteIdentity.getHash()); - throw new DataFormatException("Peer provided us with an unreachable router address, and we can't handle restricted routes yet! dropping"); - } - - if (_log.shouldLog(Log.INFO)) - _log.info("TCP connection " + _id + " established with " + _remoteIdentity.getHash().toBase64()); - _in = new AESInputStream(new BandwidthLimitedInputStream(_in, _remoteIdentity), _key, _iv); - _out = new AESOutputStream(new BandwidthLimitedOutputStream(_out, _remoteIdentity), _key, _iv); - _socket.setSoTimeout(0); - success = Clock.getInstance().now(); - established(); - return _remoteIdentity; - - } catch (IOException ioe) { - if (_log.shouldLog(Log.WARN)) - _log.warn("Error establishing connection with " + _socket.getInetAddress().getHostAddress() + ":" + _socket.getPort(), ioe); - closeConnection(); - return null; - } catch (DataFormatException dfe) { - if (_log.shouldLog(Log.WARN)) - _log.warn("Error establishing connection with " + _socket.getInetAddress().getHostAddress() + ":" + _socket.getPort(), dfe); - closeConnection(); - return null; - } catch (Throwable t) { - if (_log.shouldLog(Log.ERROR)) - _log.error("jrandom is paranoid so we're catching it all during establishConnection " + _socket.getInetAddress().getHostAddress() + ":" + _socket.getPort(), t); - closeConnection(); - return null; - } finally { - if (success > 0) - StatManager.getInstance().addRateData("tcp.establishConnectionTime", success-start, success-start); - } + long start = _context.clock().now(); + long success = 0; + if (_log.shouldLog(Log.DEBUG)) _log.debug("Establishing connection..."); + BigInteger myPub = _builder.getMyPublicValue(); + try { + _socket.setSoTimeout(ESTABLISHMENT_TIMEOUT); + if (_log.shouldLog(Log.DEBUG)) _log.debug("Before key exchange..."); + exchangeKey(); + if (_log.shouldLog(Log.DEBUG)) _log.debug("Key exchanged..."); + // key exchanged. now say who we are and prove it + boolean ok = identifyStationToStation(); + if (_log.shouldLog(Log.DEBUG)) _log.debug("After station to station [" + ok + "]..."); + + if (!ok) + throw new DataFormatException("Station to station identification failed! MITM?"); + + + if (_log.shouldLog(Log.DEBUG)) _log.debug("before validateVersion..."); + boolean versionOk = validateVersion(); + if (_log.shouldLog(Log.DEBUG)) _log.debug("after validateVersion [" + versionOk + "]..."); + + if (!versionOk) { + // not only do we remove the reference to the invalid peer + _context.netDb().fail(_remoteIdentity.getHash()); + // but we make sure that we don't try to talk to them soon even if we get a new ref + _context.shitlist().shitlistRouter(_remoteIdentity.getHash()); + throw new DataFormatException("Peer uses an invalid version! dropping"); + } + + if (_log.shouldLog(Log.DEBUG)) _log.debug("before validateTime..."); + boolean timeOk = validateTime(); + if (_log.shouldLog(Log.DEBUG)) _log.debug("after validateTime [" + timeOk + "]..."); + if (!timeOk) { + _context.shitlist().shitlistRouter(_remoteIdentity.getHash()); + throw new DataFormatException("Peer is too far out of sync with the current router's clock! dropping"); + } + + if (_log.shouldLog(Log.DEBUG)) _log.debug("before validate peer address..."); + boolean peerReachable = validatePeerAddress(); + if (_log.shouldLog(Log.DEBUG)) _log.debug("after validatePeerAddress [" + peerReachable + "]..."); + if (!peerReachable) { + _context.shitlist().shitlistRouter(_remoteIdentity.getHash()); + throw new DataFormatException("Peer provided us with an unreachable router address, and we can't handle restricted routes yet! dropping"); + } + + if (_log.shouldLog(Log.INFO)) + _log.info("TCP connection " + _id + " established with " + _remoteIdentity.getHash().toBase64()); + _in = new AESInputStream(_context, new BandwidthLimitedInputStream(_context, _in, _remoteIdentity), _key, _iv); + _out = new AESOutputStream(_context, new BandwidthLimitedOutputStream(_context, _out, _remoteIdentity), _key, _iv); + _socket.setSoTimeout(0); + success = _context.clock().now(); + established(); + return _remoteIdentity; + + } catch (IOException ioe) { + if (_log.shouldLog(Log.WARN)) + _log.warn("Error establishing connection with " + _socket.getInetAddress().getHostAddress() + ":" + _socket.getPort(), ioe); + closeConnection(); + return null; + } catch (DataFormatException dfe) { + if (_log.shouldLog(Log.WARN)) + _log.warn("Error establishing connection with " + _socket.getInetAddress().getHostAddress() + ":" + _socket.getPort(), dfe); + closeConnection(); + return null; + } catch (Throwable t) { + if (_log.shouldLog(Log.ERROR)) + _log.error("jrandom is paranoid so we're catching it all during establishConnection " + _socket.getInetAddress().getHostAddress() + ":" + _socket.getPort(), t); + closeConnection(); + return null; + } finally { + if (success > 0) + _context.statManager().addRateData("tcp.establishConnectionTime", success-start, success-start); + } } } diff --git a/router/java/src/net/i2p/router/transport/tcp/TCPConnection.java b/router/java/src/net/i2p/router/transport/tcp/TCPConnection.java index 4100b66a3..413984679 100644 --- a/router/java/src/net/i2p/router/transport/tcp/TCPConnection.java +++ b/router/java/src/net/i2p/router/transport/tcp/TCPConnection.java @@ -44,6 +44,7 @@ import net.i2p.stat.StatManager; import net.i2p.util.Clock; import net.i2p.util.I2PThread; import net.i2p.util.Log; +import net.i2p.router.RouterContext; /** * Wraps a connection - this contains a reader thread (via I2NPMessageReader) and @@ -52,7 +53,7 @@ import net.i2p.util.Log; * */ class TCPConnection implements I2NPMessageReader.I2NPMessageEventListener { - private final static Log _log = new Log(TCPConnection.class); + private Log _log; protected static int _idCounter = 0; protected int _id; protected DHSessionKeyBuilder _builder; @@ -60,7 +61,6 @@ class TCPConnection implements I2NPMessageReader.I2NPMessageEventListener { protected I2NPMessageReader _reader; protected InputStream _in; protected OutputStream _out; - protected RouterIdentity _myIdentity; protected RouterIdentity _remoteIdentity; protected TCPTransport _transport; protected ConnectionRunner _runner; @@ -68,28 +68,25 @@ class TCPConnection implements I2NPMessageReader.I2NPMessageEventListener { protected SessionKey _key; protected ByteArray _extraBytes; protected byte[] _iv; - protected SigningPrivateKey _signingKey; protected int _maxQueuedMessages; private long _lastSliceRun; private boolean _closed; private boolean _weInitiated; private long _created; + protected RouterContext _context; public final static String PARAM_MAX_QUEUED_MESSAGES = "i2np.tcp.maxQueuedMessages"; private final static int DEFAULT_MAX_QUEUED_MESSAGES = 20; - - static { - StatManager.getInstance().createRateStat("tcp.queueSize", "How many messages were already in the queue when a new message was added?", - "TCP Transport", new long[] { 60*1000l, 60*60*1000l, 24*60*60*1000l }); - } - public TCPConnection(Socket s, RouterIdentity myIdent, SigningPrivateKey signingKey, boolean locallyInitiated) { + public TCPConnection(RouterContext context, Socket s, boolean locallyInitiated) { + _context = context; + _log = context.logManager().getLog(TCPConnection.class); + _context.statManager().createRateStat("tcp.queueSize", "How many messages were already in the queue when a new message was added?", + "TCP Transport", new long[] { 60*1000l, 60*60*1000l, 24*60*60*1000l }); _id = ++_idCounter; _weInitiated = locallyInitiated; _closed = false; _socket = s; - _myIdentity = myIdent; - _signingKey = signingKey; _created = -1; _toBeSent = new ArrayList(); try { @@ -110,7 +107,7 @@ class TCPConnection implements I2NPMessageReader.I2NPMessageEventListener { /** how long has this connection been around for, or -1 if it isn't established yet */ public long getLifetime() { if (_created > 0) - return Clock.getInstance().now() - _created; + return _context.clock().now() - _created; else return -1; } @@ -118,7 +115,7 @@ class TCPConnection implements I2NPMessageReader.I2NPMessageEventListener { protected boolean weInitiatedConnection() { return _weInitiated; } private void updateMaxQueuedMessages() { - String str = Router.getInstance().getConfigSetting(PARAM_MAX_QUEUED_MESSAGES); + String str = _context.router().getConfigSetting(PARAM_MAX_QUEUED_MESSAGES); if ( (str != null) && (str.trim().length() > 0) ) { try { int max = Integer.parseInt(str); @@ -166,13 +163,13 @@ class TCPConnection implements I2NPMessageReader.I2NPMessageEventListener { protected boolean identifyStationToStation() throws IOException, DataFormatException { ByteArrayOutputStream baos = new ByteArrayOutputStream(512); - _myIdentity.writeBytes(baos); - Hash keyHash = SHA256Generator.getInstance().calculateHash(_key.getData()); + _context.router().getRouterInfo().getIdentity().writeBytes(baos); + Hash keyHash = _context.sha().calculateHash(_key.getData()); keyHash.writeBytes(baos); - Signature sig = DSAEngine.getInstance().sign(baos.toByteArray(), _signingKey); + Signature sig = _context.dsa().sign(baos.toByteArray(), _context.keyManager().getSigningPrivateKey()); sig.writeBytes(baos); - byte encr[] = AESEngine.getInstance().safeEncrypt(baos.toByteArray(), _key, _iv, 1024); + byte encr[] = _context.AESEngine().safeEncrypt(baos.toByteArray(), _key, _iv, 1024); DataHelper.writeLong(_out, 2, encr.length); _out.write(encr); @@ -182,7 +179,7 @@ class TCPConnection implements I2NPMessageReader.I2NPMessageEventListener { int read = DataHelper.read(_in, pencr); if (read != rlen) throw new DataFormatException("Not enough data in peer ident"); - byte decr[] = AESEngine.getInstance().safeDecrypt(pencr, _key, _iv); + byte decr[] = _context.AESEngine().safeDecrypt(pencr, _key, _iv); if (decr == null) throw new DataFormatException("Unable to decrypt - failed exchange?"); @@ -202,7 +199,7 @@ class TCPConnection implements I2NPMessageReader.I2NPMessageEventListener { rsig.readBytes(bais); byte signedData[] = new byte[decr.length - rsig.getData().length]; System.arraycopy(decr, 0, signedData, 0, signedData.length); - return DSAEngine.getInstance().verifySignature(rsig, signedData, _remoteIdentity.getSigningPublicKey()); + return _context.dsa().verifySignature(rsig, signedData, _remoteIdentity.getSigningPublicKey()); } protected final static int ESTABLISHMENT_TIMEOUT = 10*1000; // 10 second lag (not necessarily for the entire establish) @@ -221,8 +218,8 @@ class TCPConnection implements I2NPMessageReader.I2NPMessageEventListener { if (_log.shouldLog(Log.INFO)) _log.info("TCP connection " + _id + " established with " + _remoteIdentity.getHash().toBase64()); - _in = new AESInputStream(new BandwidthLimitedInputStream(_in, _remoteIdentity), _key, _iv); - _out = new AESOutputStream(new BandwidthLimitedOutputStream(_out, _remoteIdentity), _key, _iv); + _in = new AESInputStream(_context, new BandwidthLimitedInputStream(_context, _in, _remoteIdentity), _key, _iv); + _out = new AESOutputStream(_context, new BandwidthLimitedOutputStream(_context, _out, _remoteIdentity), _key, _iv); _socket.setSoTimeout(0); established(); return _remoteIdentity; @@ -245,7 +242,7 @@ class TCPConnection implements I2NPMessageReader.I2NPMessageEventListener { } } - protected void established() { _created = Clock.getInstance().now(); } + protected void established() { _created = _context.clock().now(); } public void runConnection() { if (_log.shouldLog(Log.DEBUG)) @@ -255,7 +252,7 @@ class TCPConnection implements I2NPMessageReader.I2NPMessageEventListener { t.setName("Run Conn [" + _id + "]"); t.setDaemon(true); t.start(); - _reader = new I2NPMessageReader(_in, this, "TCP Read [" + _id + "]"); + _reader = new I2NPMessageReader(_context, _in, this, "TCP Read [" + _id + "]"); _reader.startReading(); } @@ -265,7 +262,7 @@ class TCPConnection implements I2NPMessageReader.I2NPMessageEventListener { msg.timestamp("TCPConnection.addMessage"); int totalPending = 0; boolean fail = false; - long beforeAdd = Clock.getInstance().now(); + long beforeAdd = _context.clock().now(); synchronized (_toBeSent) { if ( (_maxQueuedMessages > 0) && (_toBeSent.size() >= _maxQueuedMessages) ) { fail = true; @@ -276,9 +273,9 @@ class TCPConnection implements I2NPMessageReader.I2NPMessageEventListener { } _toBeSent.notifyAll(); } - long afterAdd = Clock.getInstance().now(); + long afterAdd = _context.clock().now(); - StatManager.getInstance().addRateData("tcp.queueSize", totalPending-1, 0); + _context.statManager().addRateData("tcp.queueSize", totalPending-1, 0); if (fail) { if (_log.shouldLog(Log.ERROR)) @@ -306,7 +303,7 @@ class TCPConnection implements I2NPMessageReader.I2NPMessageEventListener { if (slicesTooLong()) { if (_log.shouldLog(Log.ERROR)) { - long sliceTime = Clock.getInstance().now()-_lastSliceRun; + long sliceTime = _context.clock().now()-_lastSliceRun; _log.error("onAdd: Slices are taking too long (" + sliceTime + "ms) - perhaps the remote side is disconnected or hung? remote=" + _remoteIdentity.getHash().toBase64()); @@ -339,7 +336,7 @@ class TCPConnection implements I2NPMessageReader.I2NPMessageEventListener { if (_out != null) try { _out.close(); } catch (IOException ioe) {} if (_socket != null) try { _socket.close(); } catch (IOException ioe) {} if (_toBeSent != null) { - long now = Clock.getInstance().now(); + long now = _context.clock().now(); synchronized (_toBeSent) { for (Iterator iter = _toBeSent.iterator(); iter.hasNext(); ) { OutNetMessage msg = (OutNetMessage)iter.next(); @@ -384,7 +381,7 @@ class TCPConnection implements I2NPMessageReader.I2NPMessageEventListener { if (slicesTooLong()) { if (_log.shouldLog(Log.ERROR)) { - long sliceTime = Clock.getInstance().now()-_lastSliceRun; + long sliceTime = _context.clock().now()-_lastSliceRun; _log.error("onReceive: Slices are taking too long (" + sliceTime + "ms) - perhaps the remote side is disconnected or hung? peer = " + _remoteIdentity.getHash().toBase64()); @@ -417,7 +414,7 @@ class TCPConnection implements I2NPMessageReader.I2NPMessageEventListener { */ private boolean slicesTooLong() { if (_lastSliceRun <= 0) return false; - long diff = Clock.getInstance().now() - _lastSliceRun; + long diff = _context.clock().now() - _lastSliceRun; return (diff > MAX_SLICE_DURATION); } @@ -427,10 +424,10 @@ class TCPConnection implements I2NPMessageReader.I2NPMessageEventListener { _running = true; try { while (_running) { - long startSlice = Clock.getInstance().now(); + long startSlice = _context.clock().now(); _lastSliceRun = startSlice; processSlice(); - long endSlice = Clock.getInstance().now(); + long endSlice = _context.clock().now(); } } catch (IOException ioe) { if (_log.shouldLog(Log.ERROR)) @@ -445,7 +442,7 @@ class TCPConnection implements I2NPMessageReader.I2NPMessageEventListener { } private void processSlice() throws IOException { - long start = Clock.getInstance().now(); + long start = _context.clock().now(); OutNetMessage msg = null; int remaining = 0; @@ -488,7 +485,7 @@ class TCPConnection implements I2NPMessageReader.I2NPMessageEventListener { " messages queued up for sending to " + _remoteIdentity.getHash().toBase64()); } - long afterExpire = Clock.getInstance().now(); + long afterExpire = _context.clock().now(); if (msg != null) { msg.timestamp("TCPConnection.runner.processSlice fetched"); @@ -508,13 +505,13 @@ class TCPConnection implements I2NPMessageReader.I2NPMessageEventListener { } msg.timestamp("TCPConnection.runner.processSlice sent and flushed"); - long end = Clock.getInstance().now(); + long end = _context.clock().now(); long timeLeft = msg.getMessage().getMessageExpiration().getTime() - end; if (_log.shouldLog(Log.INFO)) _log.info("Message " + msg.getMessage().getClass().getName() + " (expiring in " + timeLeft + "ms) sent to " + _remoteIdentity.getHash().toBase64() + " from " - + _myIdentity.getHash().toBase64() + + _context.routerHash().toBase64() + " over connection " + _id + " with " + data.length + " bytes in " + (end - start) + "ms"); if (timeLeft < 10*1000) { diff --git a/router/java/src/net/i2p/router/transport/tcp/TCPListener.java b/router/java/src/net/i2p/router/transport/tcp/TCPListener.java index 60c164bcb..e7b40b1b5 100644 --- a/router/java/src/net/i2p/router/transport/tcp/TCPListener.java +++ b/router/java/src/net/i2p/router/transport/tcp/TCPListener.java @@ -1,9 +1,9 @@ package net.i2p.router.transport.tcp; /* * free (adj.): unencumbered; not under the control of others - * Written by jrandom in 2003 and released into the public domain - * with no warranty of any kind, either expressed or implied. - * It probably won't make your computer catch on fire, or eat + * Written by jrandom in 2003 and released into the public domain + * with no warranty of any kind, either expressed or implied. + * It probably won't make your computer catch on fire, or eat * your children, but it might. Use at your own risk. * */ @@ -17,137 +17,141 @@ import java.net.UnknownHostException; import net.i2p.util.I2PThread; import net.i2p.util.Log; +import net.i2p.router.RouterContext; /** * Listen for TCP connections with a listener thread * */ class TCPListener { - private final static Log _log = new Log(TCPListener.class); + private Log _log; private TCPTransport _transport; private TCPAddress _myAddress; private ServerSocket _socket; private ListenerRunner _listener; + private RouterContext _context; - public TCPListener(TCPTransport transport) { - _myAddress = null; - _transport = transport; + public TCPListener(RouterContext context, TCPTransport transport) { + _context = context; + _log = context.logManager().getLog(TCPListener.class); + _myAddress = null; + _transport = transport; } public void setAddress(TCPAddress address) { _myAddress = address; } public TCPAddress getAddress() { return _myAddress; } public void startListening() { - _listener = new ListenerRunner(); - Thread t = new I2PThread(_listener); - t.setName("Listener [" + _myAddress.getPort()+"]"); - t.setDaemon(true); - t.start(); + _listener = new ListenerRunner(); + Thread t = new I2PThread(_listener); + t.setName("Listener [" + _myAddress.getPort()+"]"); + t.setDaemon(true); + t.start(); } public void stopListening() { - _listener.stopListening(); - if (_socket != null) - try { - _socket.close(); - _socket = null; - } catch (IOException ioe) {} + _listener.stopListening(); + if (_socket != null) + try { + _socket.close(); + _socket = null; + } catch (IOException ioe) {} } private InetAddress getInetAddress(String host) { - try { - return InetAddress.getByName(host); - } catch (UnknownHostException uhe) { - _log.warn("Listen host " + host + " unknown", uhe); - try { - return InetAddress.getLocalHost(); - } catch (UnknownHostException uhe2) { - _log.error("Local host is not reachable", uhe2); - return null; - } - } + try { + return InetAddress.getByName(host); + } catch (UnknownHostException uhe) { + _log.warn("Listen host " + host + " unknown", uhe); + try { + return InetAddress.getLocalHost(); + } catch (UnknownHostException uhe2) { + _log.error("Local host is not reachable", uhe2); + return null; + } + } } - + private final static int MAX_FAIL_DELAY = 5*60*1000; class ListenerRunner implements Runnable { - private boolean _isRunning; - private int _nextFailDelay = 1000; - public ListenerRunner() { - _isRunning = true; - } - public void stopListening() { _isRunning = false; } - - public void run() { - _log.info("Beginning TCP listener"); - - int curDelay = 0; - while ( (_isRunning) && (curDelay < MAX_FAIL_DELAY) ) { - try { - if (_transport.getListenAddressIsValid()) { - _socket = new ServerSocket(_myAddress.getPort(), 5, getInetAddress(_myAddress.getHost())); - } else { - _socket = new ServerSocket(_myAddress.getPort()); - } - _log.info("Begin looping for host " + _myAddress.getHost() + ":" + _myAddress.getPort()); - curDelay = 0; - loop(); - } catch (IOException ioe) { - _log.error("Error listening to tcp connection " + _myAddress.getHost() + ":" + _myAddress.getPort(), ioe); - } - - if (_socket != null) { - stopListening(); - try { _socket.close(); } catch (IOException ioe) {} - _socket = null; - } - - _log.error("Error listening, waiting " + _nextFailDelay + "ms before we try again"); - try { Thread.sleep(_nextFailDelay); } catch (InterruptedException ie) {} - curDelay += _nextFailDelay; - _nextFailDelay *= 5; - } - _log.error("CANCELING TCP LISTEN. delay = " + curDelay, new Exception("TCP Listen cancelled!!!")); - _isRunning = false; - } - private void loop() { - while (_isRunning) { - try { - if (_log.shouldLog(Log.INFO)) - _log.info("Waiting for a connection on " + _myAddress.getHost() + ":" + _myAddress.getPort()); - - Socket s = _socket.accept(); - if (_log.shouldLog(Log.INFO)) - _log.info("Connection handled on " + _myAddress.getHost() + ":" + _myAddress.getPort() + " with " + s.getInetAddress().toString() + ":" + s.getPort()); - - TimedHandler h = new TimedHandler(s); - I2PThread t = new I2PThread(h); - t.setDaemon(true); - t.start(); - synchronized (h) { - h.wait(HANDLE_TIMEOUT); - } - if (h.wasSuccessful()) { - if (_log.shouldLog(Log.DEBUG)) - _log.debug("Handle successful"); - } else { - if (h.receivedIdentByte()) { - if (_log.shouldLog(Log.ERROR)) - _log.error("Unable to handle in the time allotted"); - } else { - if (_log.shouldLog(Log.DEBUG)) - _log.debug("Peer didn't send the ident byte, so either they were testing us, or portscanning"); - } - try { s.close(); } catch (IOException ioe) {} - } - } catch (SocketException se) { - _log.error("Error handling a connection - closed?", se); - return; - } catch (Throwable t) { - _log.error("Error handling a connection", t); - } - } - } + private boolean _isRunning; + private int _nextFailDelay = 1000; + public ListenerRunner() { + _isRunning = true; + } + public void stopListening() { _isRunning = false; } + + public void run() { + _log.info("Beginning TCP listener"); + + int curDelay = 0; + while ( (_isRunning) && (curDelay < MAX_FAIL_DELAY) ) { + try { + if (_transport.getListenAddressIsValid()) { + _socket = new ServerSocket(_myAddress.getPort(), 5, getInetAddress(_myAddress.getHost())); + } else { + _socket = new ServerSocket(_myAddress.getPort()); + } + _log.info("Begin looping for host " + _myAddress.getHost() + ":" + _myAddress.getPort()); + curDelay = 0; + loop(); + } catch (IOException ioe) { + _log.error("Error listening to tcp connection " + _myAddress.getHost() + ":" + _myAddress.getPort(), ioe); + } + + if (_socket != null) { + stopListening(); + try { _socket.close(); } catch (IOException ioe) {} + _socket = null; + } + + _log.error("Error listening, waiting " + _nextFailDelay + "ms before we try again"); + try { Thread.sleep(_nextFailDelay); } catch (InterruptedException ie) {} + curDelay += _nextFailDelay; + _nextFailDelay *= 5; + } + _log.error("CANCELING TCP LISTEN. delay = " + curDelay, new Exception("TCP Listen cancelled!!!")); + _isRunning = false; + } + private void loop() { + while (_isRunning) { + try { + if (_log.shouldLog(Log.INFO)) + _log.info("Waiting for a connection on " + _myAddress.getHost() + ":" + _myAddress.getPort()); + + Socket s = _socket.accept(); + if (_log.shouldLog(Log.INFO)) + _log.info("Connection handled on " + _myAddress.getHost() + ":" + _myAddress.getPort() + " with " + s.getInetAddress().toString() + ":" + s.getPort()); + + TimedHandler h = new TimedHandler(s); + I2PThread t = new I2PThread(h); + t.setDaemon(true); + t.start(); + synchronized (h) { + h.wait(HANDLE_TIMEOUT); + } + if (h.wasSuccessful()) { + if (_log.shouldLog(Log.DEBUG)) + _log.debug("Handle successful"); + } else { + if (h.receivedIdentByte()) { + if (_log.shouldLog(Log.ERROR)) + _log.error("Unable to handle in the time allotted"); + } else { + if (_log.shouldLog(Log.DEBUG)) + _log.debug("Peer didn't send the ident byte, so either they were testing us, or portscanning"); + } + try { s.close(); } catch (IOException ioe) {} + } + } catch (SocketException se) { + _log.error("Error handling a connection - closed?", se); + return; + } catch (Throwable t) { + _log.error("Error handling a connection", t); + } + } + } } /** if we're not making progress in 30s, drop 'em */ @@ -155,54 +159,54 @@ class TCPListener { private static volatile int __handlerId = 0; private class TimedHandler implements Runnable { - private int _handlerId; - private Socket _socket; - private boolean _wasSuccessful; - private boolean _receivedIdentByte; - public TimedHandler(Socket socket) { - _socket = socket; - _wasSuccessful = false; - _handlerId = ++__handlerId; - _receivedIdentByte = false; - } - public void run() { - Thread.currentThread().setName("TimedHandler"+_handlerId); - try { - _socket.getOutputStream().write(SocketCreator.I2P_FLAG); - if (_log.shouldLog(Log.DEBUG)) - _log.debug("listener: I2P flag sent"); - int val = _socket.getInputStream().read(); - if (_log.shouldLog(Log.DEBUG)) - _log.debug("listener: Value read: [" + val + "] == flag? [" + SocketCreator.I2P_FLAG + "]"); - if (val == -1) - throw new UnsupportedOperationException ("Peer disconnected while we were looking for the I2P flag"); - if (val != SocketCreator.I2P_FLAG) { - throw new UnsupportedOperationException ("Peer connecting to us didn't send the right I2P byte [" + val + "]"); - } - - _receivedIdentByte = true; - - TCPConnection c = new RestrictiveTCPConnection(_socket, _transport.getMyIdentity(), _transport.getMySigningKey(), false); - _transport.handleConnection(c, null); - _wasSuccessful = true; - } catch (UnsupportedOperationException uoe) { - if (_log.shouldLog(Log.DEBUG)) - _log.debug("Failed to state they wanted to connect as I2P", uoe); - _wasSuccessful = false; - } catch (IOException ioe) { - if (_log.shouldLog(Log.DEBUG)) - _log.debug("Error listening to the peer", ioe); - _wasSuccessful = false; - } catch (Throwable t) { - if (_log.shouldLog(Log.ERROR)) - _log.error("Error handling", t); - _wasSuccessful = false; - } - synchronized (TimedHandler.this) { - TimedHandler.this.notifyAll(); - } - } - public boolean wasSuccessful() { return _wasSuccessful; } - public boolean receivedIdentByte() { return _receivedIdentByte; } + private int _handlerId; + private Socket _socket; + private boolean _wasSuccessful; + private boolean _receivedIdentByte; + public TimedHandler(Socket socket) { + _socket = socket; + _wasSuccessful = false; + _handlerId = ++__handlerId; + _receivedIdentByte = false; + } + public void run() { + Thread.currentThread().setName("TimedHandler"+_handlerId); + try { + _socket.getOutputStream().write(SocketCreator.I2P_FLAG); + if (_log.shouldLog(Log.DEBUG)) + _log.debug("listener: I2P flag sent"); + int val = _socket.getInputStream().read(); + if (_log.shouldLog(Log.DEBUG)) + _log.debug("listener: Value read: [" + val + "] == flag? [" + SocketCreator.I2P_FLAG + "]"); + if (val == -1) + throw new UnsupportedOperationException("Peer disconnected while we were looking for the I2P flag"); + if (val != SocketCreator.I2P_FLAG) { + throw new UnsupportedOperationException("Peer connecting to us didn't send the right I2P byte [" + val + "]"); + } + + _receivedIdentByte = true; + + TCPConnection c = new RestrictiveTCPConnection(_context, _socket, false); + _transport.handleConnection(c, null); + _wasSuccessful = true; + } catch (UnsupportedOperationException uoe) { + if (_log.shouldLog(Log.DEBUG)) + _log.debug("Failed to state they wanted to connect as I2P", uoe); + _wasSuccessful = false; + } catch (IOException ioe) { + if (_log.shouldLog(Log.DEBUG)) + _log.debug("Error listening to the peer", ioe); + _wasSuccessful = false; + } catch (Throwable t) { + if (_log.shouldLog(Log.ERROR)) + _log.error("Error handling", t); + _wasSuccessful = false; + } + synchronized (TimedHandler.this) { + TimedHandler.this.notifyAll(); + } + } + public boolean wasSuccessful() { return _wasSuccessful; } + public boolean receivedIdentByte() { return _receivedIdentByte; } } } diff --git a/router/java/src/net/i2p/router/transport/tcp/TCPTransport.java b/router/java/src/net/i2p/router/transport/tcp/TCPTransport.java index 13eb43389..f735630ff 100644 --- a/router/java/src/net/i2p/router/transport/tcp/TCPTransport.java +++ b/router/java/src/net/i2p/router/transport/tcp/TCPTransport.java @@ -1,9 +1,9 @@ package net.i2p.router.transport.tcp; /* * free (adj.): unencumbered; not under the control of others - * Written by jrandom in 2003 and released into the public domain - * with no warranty of any kind, either expressed or implied. - * It probably won't make your computer catch on fire, or eat + * Written by jrandom in 2003 and released into the public domain + * with no warranty of any kind, either expressed or implied. + * It probably won't make your computer catch on fire, or eat * your children, but it might. Use at your own risk. * */ @@ -39,87 +39,85 @@ import net.i2p.stat.StatManager; import net.i2p.util.Clock; import net.i2p.util.I2PThread; import net.i2p.util.Log; +import net.i2p.router.RouterContext; /** * Defines a way to send a message to another peer and start listening for messages * */ public class TCPTransport extends TransportImpl { - private final static Log _log = new Log(TCPTransport.class); + private Log _log; public final static String STYLE = "TCP"; - private List _listeners; private Map _connections; // routerIdentity --> List of TCPConnection - private RouterIdentity _myIdentity; private String _listenHost; private int _listenPort; private RouterAddress _address; - private SigningPrivateKey _signingKey; private boolean _listenAddressIsValid; private Map _msgs; // H(ident) --> PendingMessages for unestablished connections private boolean _running; - + private int _numConnectionEstablishers; private final static String PROP_ESTABLISHERS = "i2np.tcp.concurrentEstablishers"; private final static int DEFAULT_ESTABLISHERS = 3; public static String PROP_LISTEN_IS_VALID = "i2np.tcp.listenAddressIsValid"; - static { - StatManager.getInstance().createFrequencyStat("tcp.attemptFailureFrequency", "How often do we attempt to contact someone, and fail?", "TCP Transport", new long[] { 60*1000l, 60*60*1000l, 24*60*60*1000l }); - StatManager.getInstance().createFrequencyStat("tcp.attemptSuccessFrequency", "How often do we attempt to contact someone, and succeed?", "TCP Transport", new long[] { 60*1000l, 60*60*1000l, 24*60*60*1000l }); - StatManager.getInstance().createFrequencyStat("tcp.acceptFailureFrequency", "How often do we reject someone who contacts us?", "TCP Transport", new long[] { 60*1000l, 60*60*1000l, 24*60*60*1000l }); - StatManager.getInstance().createFrequencyStat("tcp.acceptSuccessFrequency", "How often do we accept someone who contacts us?", "TCP Transport", new long[] { 60*1000l, 60*60*1000l, 24*60*60*1000l }); - StatManager.getInstance().createRateStat("tcp.connectionLifetime", "How long do connections last (measured when they close)?", "TCP Transport", new long[] { 60*1000l, 60*60*1000l, 24*60*60*1000l }); - } - - /** + /** * pre 1.4 java doesn't have a way to timeout the creation of sockets (which - * can take up to 3 minutes), so we do it on a seperate thread and wait for + * can take up to 3 minutes), so we do it on a seperate thread and wait for * either that thread to complete, or for this timeout to be reached. */ final static long SOCKET_CREATE_TIMEOUT = 10*1000; + + public TCPTransport(RouterContext context, RouterAddress address) { + super(context); + _log = context.logManager().getLog(TCPTransport.class); + if (_context == null) throw new RuntimeException("Context is null"); + if (_context.statManager() == null) throw new RuntimeException("Stat manager is null"); + _context.statManager().createFrequencyStat("tcp.attemptFailureFrequency", "How often do we attempt to contact someone, and fail?", "TCP Transport", new long[] { 60*1000l, 60*60*1000l, 24*60*60*1000l }); + _context.statManager().createFrequencyStat("tcp.attemptSuccessFrequency", "How often do we attempt to contact someone, and succeed?", "TCP Transport", new long[] { 60*1000l, 60*60*1000l, 24*60*60*1000l }); + _context.statManager().createFrequencyStat("tcp.acceptFailureFrequency", "How often do we reject someone who contacts us?", "TCP Transport", new long[] { 60*1000l, 60*60*1000l, 24*60*60*1000l }); + _context.statManager().createFrequencyStat("tcp.acceptSuccessFrequency", "How often do we accept someone who contacts us?", "TCP Transport", new long[] { 60*1000l, 60*60*1000l, 24*60*60*1000l }); + _context.statManager().createRateStat("tcp.connectionLifetime", "How long do connections last (measured when they close)?", "TCP Transport", new long[] { 60*1000l, 60*60*1000l, 24*60*60*1000l }); - public TCPTransport(RouterIdentity myIdentity, SigningPrivateKey signingKey, RouterAddress address) { - super(); - _listeners = new ArrayList(); - _connections = new HashMap(); - _msgs = new HashMap(); - _myIdentity = myIdentity; - _address = address; - _signingKey = signingKey; - if (address != null) { - _listenHost = address.getOptions().getProperty(TCPAddress.PROP_HOST); - String portStr = address.getOptions().getProperty(TCPAddress.PROP_PORT); - try { - _listenPort = Integer.parseInt(portStr); - } catch (NumberFormatException nfe) { - _log.error("Invalid port: " + portStr + " Address: \n" + address, nfe); - } - } - _listenAddressIsValid = false; - try { - _listenAddressIsValid = Boolean.TRUE.toString().equalsIgnoreCase(Router.getInstance().getConfigSetting(PROP_LISTEN_IS_VALID)); - } catch (Throwable t) { - _listenAddressIsValid = false; - if (_log.shouldLog(Log.WARN)) - _log.warn("Unable to determine whether TCP listening address is valid, so we're assuming it isn't. Set " + PROP_LISTEN_IS_VALID + " otherwise"); - } - _running = false; + _listeners = new ArrayList(); + _connections = new HashMap(); + _msgs = new HashMap(); + _address = address; + if (address != null) { + _listenHost = address.getOptions().getProperty(TCPAddress.PROP_HOST); + String portStr = address.getOptions().getProperty(TCPAddress.PROP_PORT); + try { + _listenPort = Integer.parseInt(portStr); + } catch (NumberFormatException nfe) { + _log.error("Invalid port: " + portStr + " Address: \n" + address, nfe); + } + } + _listenAddressIsValid = false; + try { + String setting = _context.router().getConfigSetting(PROP_LISTEN_IS_VALID); + _listenAddressIsValid = Boolean.TRUE.toString().equalsIgnoreCase(setting); + } catch (Throwable t) { + _listenAddressIsValid = false; + if (_log.shouldLog(Log.WARN)) + _log.warn("Unable to determine whether TCP listening address is valid, so we're assuming it isn't. Set " + PROP_LISTEN_IS_VALID + " otherwise"); + } + _running = false; } boolean getListenAddressIsValid() { return _listenAddressIsValid; } - SigningPrivateKey getMySigningKey() { return _signingKey; } + SigningPrivateKey getMySigningKey() { return _context.keyManager().getSigningPrivateKey(); } /** fetch all of our TCP listening addresses */ TCPAddress[] getMyAddresses() { - if (_address != null) { - TCPAddress rv[] = new TCPAddress[1]; - rv[0] = new TCPAddress(_listenHost, _listenPort); - return rv; - } else { - return new TCPAddress[0]; - } + if (_address != null) { + TCPAddress rv[] = new TCPAddress[1]; + rv[0] = new TCPAddress(_listenHost, _listenPort); + return rv; + } else { + return new TCPAddress[0]; + } } /** @@ -127,18 +125,23 @@ public class TCPTransport extends TransportImpl { * and it should not block */ protected void outboundMessageReady() { - JobQueue.getInstance().addJob(new JobImpl() { - public void runJob() { - OutNetMessage msg = getNextMessage(); - if (msg != null) { - handleOutbound(msg); // this just adds to either the establish thread's queue or the conn's queue - } else { - if (_log.shouldLog(Log.DEBUG)) - _log.debug("OutboundMessageReady called, but none were available"); - } - } - public String getName() { return "TCP Message Ready to send"; } - }); + _context.jobQueue().addJob(new NextJob()); + } + + private class NextJob extends JobImpl { + public NextJob() { + super(TCPTransport.this._context); + } + public void runJob() { + OutNetMessage msg = getNextMessage(); + if (msg != null) { + handleOutbound(msg); // this just adds to either the establish thread's queue or the conn's queue + } else { + if (_log.shouldLog(Log.DEBUG)) + _log.debug("OutboundMessageReady called, but none were available"); + } + } + public String getName() { return "TCP Message Ready to send"; } } /** @@ -146,521 +149,518 @@ public class TCPTransport extends TransportImpl { * */ private TCPConnection getConnection(RouterIdentity peer) { - synchronized (_connections) { - if (!_connections.containsKey(peer)) - return null; - List cons = (List)_connections.get(peer); - if (cons.size() <= 0) - return null; - TCPConnection first = (TCPConnection)cons.get(0); - return first; - } + synchronized (_connections) { + if (!_connections.containsKey(peer)) + return null; + List cons = (List)_connections.get(peer); + if (cons.size() <= 0) + return null; + TCPConnection first = (TCPConnection)cons.get(0); + return first; + } } protected void handleOutbound(OutNetMessage msg) { - msg.timestamp("TCPTransport.handleOutbound before handleConnection"); - TCPConnection con = getConnection(msg.getTarget().getIdentity()); - if (con == null) { - if (_log.shouldLog(Log.DEBUG)) - _log.debug("Handling outbound message to an unestablished peer"); - msg.timestamp("TCPTransport.handleOutbound to addPending"); - addPending(msg); - } else { - if (_log.shouldLog(Log.DEBUG)) - _log.debug("Toss the message onto an established peer's connection"); - msg.timestamp("TCPTransport.handleOutbound to con.addMessage"); - con.addMessage(msg); - } + msg.timestamp("TCPTransport.handleOutbound before handleConnection"); + TCPConnection con = getConnection(msg.getTarget().getIdentity()); + if (con == null) { + if (_log.shouldLog(Log.DEBUG)) + _log.debug("Handling outbound message to an unestablished peer"); + msg.timestamp("TCPTransport.handleOutbound to addPending"); + addPending(msg); + } else { + if (_log.shouldLog(Log.DEBUG)) + _log.debug("Toss the message onto an established peer's connection"); + msg.timestamp("TCPTransport.handleOutbound to con.addMessage"); + con.addMessage(msg); + } } protected boolean establishConnection(RouterInfo target) { - long startEstablish = 0; - long socketCreated = 0; - long conCreated = 0; - long conEstablished = 0; - try { - for (Iterator iter = target.getAddresses().iterator(); iter.hasNext(); ) { - RouterAddress addr = (RouterAddress)iter.next(); - startEstablish = Clock.getInstance().now(); - if (getStyle().equals(addr.getTransportStyle())) { - if (_log.shouldLog(Log.DEBUG)) - _log.debug("Establishing a connection with address " + addr); - Socket s = createSocket(addr); - socketCreated = Clock.getInstance().now(); - if (s == null) { - if (_log.shouldLog(Log.WARN)) - _log.warn("Unable to establish a socket in time to " + addr); - ProfileManager.getInstance().commErrorOccurred(target.getIdentity().getHash()); - return false; - } - if (_log.shouldLog(Log.DEBUG)) - _log.debug("Socket created"); - if (s != null) { - TCPConnection con = new RestrictiveTCPConnection(s, _myIdentity, _signingKey, true); - conCreated = Clock.getInstance().now(); - if (_log.shouldLog(Log.DEBUG)) - _log.debug("TCPConnection created"); - boolean established = handleConnection(con, target); - conEstablished = Clock.getInstance().now(); - if (_log.shouldLog(Log.DEBUG)) - _log.debug("connection handled"); - return established; - } - } - } - } catch (Throwable t) { - if (_log.shouldLog(Log.WARN)) - _log.warn("Unexpected error establishing the connection", t); - } finally { - long diff = conEstablished - startEstablish; - if ( ( (diff > 6000) || (conEstablished == 0) ) && (_log.shouldLog(Log.WARN)) ) { - _log.warn("establishConnection took too long: socketCreate: " + - (socketCreated-startEstablish) + "ms conCreated: " + - (conCreated-socketCreated) + "ms conEstablished: " + - (conEstablished - conCreated) + "ms overall: " + diff); - } - } - return false; + long startEstablish = 0; + long socketCreated = 0; + long conCreated = 0; + long conEstablished = 0; + try { + for (Iterator iter = target.getAddresses().iterator(); iter.hasNext(); ) { + RouterAddress addr = (RouterAddress)iter.next(); + startEstablish = _context.clock().now(); + if (getStyle().equals(addr.getTransportStyle())) { + if (_log.shouldLog(Log.DEBUG)) + _log.debug("Establishing a connection with address " + addr); + Socket s = createSocket(addr); + socketCreated = _context.clock().now(); + if (s == null) { + if (_log.shouldLog(Log.WARN)) + _log.warn("Unable to establish a socket in time to " + addr); + _context.profileManager().commErrorOccurred(target.getIdentity().getHash()); + return false; + } + if (_log.shouldLog(Log.DEBUG)) + _log.debug("Socket created"); + if (s != null) { + TCPConnection con = new RestrictiveTCPConnection(_context, s, true); + conCreated = _context.clock().now(); + if (_log.shouldLog(Log.DEBUG)) + _log.debug("TCPConnection created"); + boolean established = handleConnection(con, target); + conEstablished = _context.clock().now(); + if (_log.shouldLog(Log.DEBUG)) + _log.debug("connection handled"); + return established; + } + } + } + } catch (Throwable t) { + if (_log.shouldLog(Log.WARN)) + _log.warn("Unexpected error establishing the connection", t); + } finally { + long diff = conEstablished - startEstablish; + if ( ( (diff > 6000) || (conEstablished == 0) ) && (_log.shouldLog(Log.WARN)) ) { + _log.warn("establishConnection took too long: socketCreate: " + + (socketCreated-startEstablish) + "ms conCreated: " + + (conCreated-socketCreated) + "ms conEstablished: " + + (conEstablished - conCreated) + "ms overall: " + diff); + } + } + return false; } - + protected Socket createSocket(RouterAddress addr) { - String host = addr.getOptions().getProperty(TCPAddress.PROP_HOST); - String portStr = addr.getOptions().getProperty(TCPAddress.PROP_PORT); - int port = -1; - try { - port = Integer.parseInt(portStr); - } catch (NumberFormatException nfe) { - if (_log.shouldLog(Log.ERROR)) - _log.error("Invalid port number in router address: " + portStr, nfe); - return null; - } - - long start = Clock.getInstance().now(); - SocketCreator creator = new SocketCreator(host, port); - I2PThread sockCreator = new I2PThread(creator); - sockCreator.setDaemon(true); - sockCreator.setName("SocketCreator"); - sockCreator.setPriority(I2PThread.MIN_PRIORITY); - sockCreator.start(); - - try { - synchronized (creator) { - creator.wait(SOCKET_CREATE_TIMEOUT); - } - } catch (InterruptedException ie) {} - - long finish = Clock.getInstance().now(); - long diff = finish - start; - if (diff > 6000) { - if (_log.shouldLog(Log.WARN)) - _log.warn("Creating a new socket took too long? wtf?! " + diff + "ms for " + host + ':' + port); - } - return creator.getSocket(); + String host = addr.getOptions().getProperty(TCPAddress.PROP_HOST); + String portStr = addr.getOptions().getProperty(TCPAddress.PROP_PORT); + int port = -1; + try { + port = Integer.parseInt(portStr); + } catch (NumberFormatException nfe) { + if (_log.shouldLog(Log.ERROR)) + _log.error("Invalid port number in router address: " + portStr, nfe); + return null; + } + + long start = _context.clock().now(); + SocketCreator creator = new SocketCreator(host, port); + I2PThread sockCreator = new I2PThread(creator); + sockCreator.setDaemon(true); + sockCreator.setName("SocketCreator"); + sockCreator.setPriority(I2PThread.MIN_PRIORITY); + sockCreator.start(); + + try { + synchronized (creator) { + creator.wait(SOCKET_CREATE_TIMEOUT); + } + } catch (InterruptedException ie) {} + + long finish = _context.clock().now(); + long diff = finish - start; + if (diff > 6000) { + if (_log.shouldLog(Log.WARN)) + _log.warn("Creating a new socket took too long? wtf?! " + diff + "ms for " + host + ':' + port); + } + return creator.getSocket(); } private boolean isConnected(RouterInfo info) { - return (null != getConnection(info.getIdentity())); + return (null != getConnection(info.getIdentity())); } public TransportBid bid(RouterInfo toAddress, long dataSize) { - TCPConnection con = getConnection(toAddress.getIdentity()); - int latencyStartup = 0; - if (con == null) - latencyStartup = 2000; - else - latencyStartup = 0; - - int sendTime = (int)((dataSize)/(16*1024)); // 16K/sec - int bytes = (int)dataSize+8; - - if (con != null) - sendTime += 50000 * con.getPendingMessageCount(); // try to avoid backed up (throttled) connections - - TransportBid bid = new TransportBid(); - bid.setBandwidthBytes(bytes); - bid.setExpiration(new Date(Clock.getInstance().now()+1000*60)); // 1 minute - bid.setLatencyMs(latencyStartup + sendTime); - bid.setMessageSize((int)dataSize); - bid.setRouter(toAddress); - bid.setTransport(this); - - RouterAddress addr = getTargetAddress(toAddress); - if (addr == null) { - if (con == null) { - if (_log.shouldLog(Log.INFO)) - _log.info("No address or connection to " + toAddress.getIdentity().getHash().toBase64()); - // don't bid if we can't send them a message - return null; - } else { - if (_log.shouldLog(Log.DEBUG)) - _log.debug("No address, but we're connected to " + toAddress.getIdentity().getHash().toBase64()); - } - } - - return bid; + TCPConnection con = getConnection(toAddress.getIdentity()); + int latencyStartup = 0; + if (con == null) + latencyStartup = 2000; + else + latencyStartup = 0; + + int sendTime = (int)((dataSize)/(16*1024)); // 16K/sec + int bytes = (int)dataSize+8; + + if (con != null) + sendTime += 50000 * con.getPendingMessageCount(); // try to avoid backed up (throttled) connections + + TransportBid bid = new TransportBid(); + bid.setBandwidthBytes(bytes); + bid.setExpiration(new Date(_context.clock().now()+1000*60)); // 1 minute + bid.setLatencyMs(latencyStartup + sendTime); + bid.setMessageSize((int)dataSize); + bid.setRouter(toAddress); + bid.setTransport(this); + + RouterAddress addr = getTargetAddress(toAddress); + if (addr == null) { + if (con == null) { + if (_log.shouldLog(Log.INFO)) + _log.info("No address or connection to " + toAddress.getIdentity().getHash().toBase64()); + // don't bid if we can't send them a message + return null; + } else { + if (_log.shouldLog(Log.DEBUG)) + _log.debug("No address, but we're connected to " + toAddress.getIdentity().getHash().toBase64()); + } + } + + return bid; } public void rotateAddresses() { - // noop + // noop } public void addAddressInfo(Properties infoForNewAddress) { - // noop + // noop } public RouterAddress startListening() { - RouterAddress address = new RouterAddress(); - - address.setTransportStyle(getStyle()); - address.setCost(10); - address.setExpiration(null); - Properties options = new Properties(); - if (_address != null) { - options.setProperty(TCPAddress.PROP_HOST, _listenHost); - options.setProperty(TCPAddress.PROP_PORT, _listenPort+""); - } - address.setOptions(options); - - if (_address != null) { - try { - TCPAddress addr = new TCPAddress(); - addr.setHost(_listenHost); - addr.setPort(_listenPort); - TCPListener listener = new TCPListener(this); - listener.setAddress(addr); - _listeners.add(listener); - listener.startListening(); - } catch (NumberFormatException nfe) { - if (_log.shouldLog(Log.ERROR)) - _log.error("Error parsing port number", nfe); - } - - addCurrentAddress(address); - } - - String str = Router.getInstance().getConfigSetting(PROP_ESTABLISHERS); - if (str != null) { - try { - _numConnectionEstablishers = Integer.parseInt(str); - } catch (NumberFormatException nfe) { - if (_log.shouldLog(Log.ERROR)) - _log.error("Invalid number of connection establishers [" + str + "]"); - _numConnectionEstablishers = DEFAULT_ESTABLISHERS; - } - } else { - _numConnectionEstablishers = DEFAULT_ESTABLISHERS; - } - - _running = true; - for (int i = 0; i < _numConnectionEstablishers; i++) { - Thread t = new I2PThread(new ConnEstablisher(i)); - t.setDaemon(true); - t.start(); - } - - return address; + RouterAddress address = new RouterAddress(); + + address.setTransportStyle(getStyle()); + address.setCost(10); + address.setExpiration(null); + Properties options = new Properties(); + if (_address != null) { + options.setProperty(TCPAddress.PROP_HOST, _listenHost); + options.setProperty(TCPAddress.PROP_PORT, _listenPort+""); + } + address.setOptions(options); + + if (_address != null) { + try { + TCPAddress addr = new TCPAddress(); + addr.setHost(_listenHost); + addr.setPort(_listenPort); + TCPListener listener = new TCPListener(_context, this); + listener.setAddress(addr); + _listeners.add(listener); + listener.startListening(); + } catch (NumberFormatException nfe) { + if (_log.shouldLog(Log.ERROR)) + _log.error("Error parsing port number", nfe); + } + + addCurrentAddress(address); + } + + String str = _context.router().getConfigSetting(PROP_ESTABLISHERS); + if (str != null) { + try { + _numConnectionEstablishers = Integer.parseInt(str); + } catch (NumberFormatException nfe) { + if (_log.shouldLog(Log.ERROR)) + _log.error("Invalid number of connection establishers [" + str + "]"); + _numConnectionEstablishers = DEFAULT_ESTABLISHERS; + } + } else { + _numConnectionEstablishers = DEFAULT_ESTABLISHERS; + } + + _running = true; + for (int i = 0; i < _numConnectionEstablishers; i++) { + Thread t = new I2PThread(new ConnEstablisher(i)); + t.setDaemon(true); + t.start(); + } + + return address; } public void stopListening() { - if (_log.shouldLog(Log.ERROR)) - _log.error("Stop listening called! No more TCP", new Exception("Die tcp, die")); - _running = false; - - for (int i = 0; i < _listeners.size(); i++) { - TCPListener lsnr = (TCPListener)_listeners.get(i); - lsnr.stopListening(); - } - Set allCons = new HashSet(); - synchronized (_connections) { - for (Iterator iter = _connections.values().iterator(); iter.hasNext(); ) { - List cons = (List)iter.next(); - for (Iterator citer = cons.iterator(); citer.hasNext(); ) { - TCPConnection con = (TCPConnection)citer.next(); - allCons.add(con); - } - } - } - for (Iterator iter = allCons.iterator(); iter.hasNext(); ) { - TCPConnection con = (TCPConnection)iter.next(); - con.closeConnection(); - } + if (_log.shouldLog(Log.ERROR)) + _log.error("Stop listening called! No more TCP", new Exception("Die tcp, die")); + _running = false; + + for (int i = 0; i < _listeners.size(); i++) { + TCPListener lsnr = (TCPListener)_listeners.get(i); + lsnr.stopListening(); + } + Set allCons = new HashSet(); + synchronized (_connections) { + for (Iterator iter = _connections.values().iterator(); iter.hasNext(); ) { + List cons = (List)iter.next(); + for (Iterator citer = cons.iterator(); citer.hasNext(); ) { + TCPConnection con = (TCPConnection)citer.next(); + allCons.add(con); + } + } + } + for (Iterator iter = allCons.iterator(); iter.hasNext(); ) { + TCPConnection con = (TCPConnection)iter.next(); + con.closeConnection(); + } } - public RouterIdentity getMyIdentity() { return _myIdentity; } + public RouterIdentity getMyIdentity() { return _context.router().getRouterInfo().getIdentity(); } void connectionClosed(TCPConnection con) { - if (_log.shouldLog(Log.INFO)) - _log.info("Connection closed with " + con.getRemoteRouterIdentity()); - StringBuffer buf = new StringBuffer(256); - buf.append("Still connected to: "); - synchronized (_connections) { - List cons = (List)_connections.get(con.getRemoteRouterIdentity()); - if ( (cons != null) && (cons.size() > 0) ) { - cons.remove(con); - long lifetime = con.getLifetime(); - if (_log.shouldLog(Log.INFO)) - _log.info("Connection closed (with remaining) after lifetime " + lifetime); - StatManager.getInstance().addRateData("tcp.connectionLifetime", lifetime, 0); - } - Set toRemove = new HashSet(); - for (Iterator iter = _connections.keySet().iterator(); iter.hasNext();) { - RouterIdentity ident = (RouterIdentity)iter.next(); - List all = (List)_connections.get(ident); - if (all.size() > 0) - buf.append(ident.getHash().toBase64()).append(" "); - else - toRemove.add(ident); - } - for (Iterator iter = toRemove.iterator(); iter.hasNext(); ) { - _connections.remove(iter.next()); - } - } - if (_log.shouldLog(Log.INFO)) - _log.info(buf.toString()); - //if (con.getRemoteRouterIdentity() != null) - // ProfileManager.getInstance().commErrorOccurred(con.getRemoteRouterIdentity().getHash()); + if (_log.shouldLog(Log.INFO)) + _log.info("Connection closed with " + con.getRemoteRouterIdentity()); + StringBuffer buf = new StringBuffer(256); + buf.append("Still connected to: "); + synchronized (_connections) { + List cons = (List)_connections.get(con.getRemoteRouterIdentity()); + if ( (cons != null) && (cons.size() > 0) ) { + cons.remove(con); + long lifetime = con.getLifetime(); + if (_log.shouldLog(Log.INFO)) + _log.info("Connection closed (with remaining) after lifetime " + lifetime); + _context.statManager().addRateData("tcp.connectionLifetime", lifetime, 0); + } + Set toRemove = new HashSet(); + for (Iterator iter = _connections.keySet().iterator(); iter.hasNext();) { + RouterIdentity ident = (RouterIdentity)iter.next(); + List all = (List)_connections.get(ident); + if (all.size() > 0) + buf.append(ident.getHash().toBase64()).append(" "); + else + toRemove.add(ident); + } + for (Iterator iter = toRemove.iterator(); iter.hasNext(); ) { + _connections.remove(iter.next()); + } + } + if (_log.shouldLog(Log.INFO)) + _log.info(buf.toString()); + //if (con.getRemoteRouterIdentity() != null) } boolean handleConnection(TCPConnection con, RouterInfo target) { - con.setTransport(this); - if (_log.shouldLog(Log.DEBUG)) - _log.debug("Before establishing connection"); - long start = Clock.getInstance().now(); - RouterIdentity ident = con.establishConnection(); - long afterEstablish = Clock.getInstance().now(); - long startRunning = 0; - - if (ident == null) { - StatManager.getInstance().updateFrequency("tcp.acceptFailureFrequency"); - con.closeConnection(); - //if ( (target != null) && (target.getIdentity() != null) ) - // ProfileManager.getInstance().commErrorOccurred(target.getIdentity().getHash()); - return false; - } - - if (_log.shouldLog(Log.INFO)) - _log.info("Connection established with " + ident); - if (target != null) { - if (!target.getIdentity().equals(ident)) { - StatManager.getInstance().updateFrequency("tcp.acceptFailureFrequency"); - if (_log.shouldLog(Log.ERROR)) - _log.error("Target changed identities!!! was " + target.getIdentity().getHash().toBase64() + ", now is " + ident.getHash().toBase64() + "! DROPPING CONNECTION"); - con.closeConnection(); - // remove the old ref, since they likely just created a new identity - NetworkDatabaseFacade.getInstance().fail(target.getIdentity().getHash()); - return false; - } else { - if (_log.shouldLog(Log.DEBUG)) - _log.debug("Target is the same as who we connected with"); - } - } - if (ident != null) { - Set toClose = new HashSet(4); - List toAdd = new LinkedList(); - synchronized (_connections) { - if (!_connections.containsKey(ident)) - _connections.put(ident, new ArrayList(2)); - List cons = (List)_connections.get(ident); - if (cons.size() > 0) { - if (_log.shouldLog(Log.WARN)) - _log.warn("Attempted to open additional connections with " + ident.getHash() + ": closing older connections", new Exception("multiple cons")); - while (cons.size() > 0) { - TCPConnection oldCon = (TCPConnection)cons.remove(0); - toAdd.addAll(oldCon.getPendingMessages()); - toClose.add(oldCon); - } - } - cons.add(con); - - Set toRemove = new HashSet(); - for (Iterator iter = _connections.keySet().iterator(); iter.hasNext();) { - RouterIdentity cur = (RouterIdentity)iter.next(); - List all = (List)_connections.get(cur); - if (all.size() <= 0) - toRemove.add(ident); - } - for (Iterator iter = toRemove.iterator(); iter.hasNext(); ) { - _connections.remove(iter.next()); - } - } - - if (toAdd.size() > 0) { - for (Iterator iter = toAdd.iterator(); iter.hasNext(); ) { - OutNetMessage msg = (OutNetMessage)iter.next(); - con.addMessage(msg); - } - if (_log.shouldLog(Log.INFO)) - _log.info("Transferring " + toAdd.size() + " messages from old cons to the newly established con"); - } - - Shitlist.getInstance().unshitlistRouter(ident.getHash()); - con.runConnection(); - startRunning = Clock.getInstance().now(); - - if (toClose.size() > 0) { - for (Iterator iter = toClose.iterator(); iter.hasNext(); ) { - TCPConnection oldCon = (TCPConnection)iter.next(); - if (_log.shouldLog(Log.INFO)) - _log.info("Closing old duplicate connection " + oldCon.toString(), new Exception("Closing old con")); - oldCon.closeConnection(); - StatManager.getInstance().addRateData("tcp.connectionLifetime", oldCon.getLifetime(), 0); - } - } - long done = Clock.getInstance().now(); - - long diff = done - start; - if ( (diff > 3*1000) && (_log.shouldLog(Log.WARN)) ) { - _log.warn("handleConnection took too long: " + diff + "ms with " + - (afterEstablish-start) + "ms to establish " + - (startRunning-afterEstablish) + "ms to start running " + - (done-startRunning) + "ms to cleanup"); - } - if (_log.shouldLog(Log.DEBUG)) - _log.debug("runConnection called on the con"); - } - - StatManager.getInstance().updateFrequency("tcp.acceptSuccessFrequency"); - return true; + con.setTransport(this); + if (_log.shouldLog(Log.DEBUG)) + _log.debug("Before establishing connection"); + long start = _context.clock().now(); + RouterIdentity ident = con.establishConnection(); + long afterEstablish = _context.clock().now(); + long startRunning = 0; + + if (ident == null) { + _context.statManager().updateFrequency("tcp.acceptFailureFrequency"); + con.closeConnection(); + return false; + } + + if (_log.shouldLog(Log.INFO)) + _log.info("Connection established with " + ident); + if (target != null) { + if (!target.getIdentity().equals(ident)) { + _context.statManager().updateFrequency("tcp.acceptFailureFrequency"); + if (_log.shouldLog(Log.ERROR)) + _log.error("Target changed identities!!! was " + target.getIdentity().getHash().toBase64() + ", now is " + ident.getHash().toBase64() + "! DROPPING CONNECTION"); + con.closeConnection(); + // remove the old ref, since they likely just created a new identity + _context.netDb().fail(target.getIdentity().getHash()); + return false; + } else { + if (_log.shouldLog(Log.DEBUG)) + _log.debug("Target is the same as who we connected with"); + } + } + if (ident != null) { + Set toClose = new HashSet(4); + List toAdd = new LinkedList(); + synchronized (_connections) { + if (!_connections.containsKey(ident)) + _connections.put(ident, new ArrayList(2)); + List cons = (List)_connections.get(ident); + if (cons.size() > 0) { + if (_log.shouldLog(Log.WARN)) + _log.warn("Attempted to open additional connections with " + ident.getHash() + ": closing older connections", new Exception("multiple cons")); + while (cons.size() > 0) { + TCPConnection oldCon = (TCPConnection)cons.remove(0); + toAdd.addAll(oldCon.getPendingMessages()); + toClose.add(oldCon); + } + } + cons.add(con); + + Set toRemove = new HashSet(); + for (Iterator iter = _connections.keySet().iterator(); iter.hasNext();) { + RouterIdentity cur = (RouterIdentity)iter.next(); + List all = (List)_connections.get(cur); + if (all.size() <= 0) + toRemove.add(ident); + } + for (Iterator iter = toRemove.iterator(); iter.hasNext(); ) { + _connections.remove(iter.next()); + } + } + + if (toAdd.size() > 0) { + for (Iterator iter = toAdd.iterator(); iter.hasNext(); ) { + OutNetMessage msg = (OutNetMessage)iter.next(); + con.addMessage(msg); + } + if (_log.shouldLog(Log.INFO)) + _log.info("Transferring " + toAdd.size() + " messages from old cons to the newly established con"); + } + + _context.shitlist().unshitlistRouter(ident.getHash()); + con.runConnection(); + startRunning = _context.clock().now(); + + if (toClose.size() > 0) { + for (Iterator iter = toClose.iterator(); iter.hasNext(); ) { + TCPConnection oldCon = (TCPConnection)iter.next(); + if (_log.shouldLog(Log.INFO)) + _log.info("Closing old duplicate connection " + oldCon.toString(), new Exception("Closing old con")); + oldCon.closeConnection(); + _context.statManager().addRateData("tcp.connectionLifetime", oldCon.getLifetime(), 0); + } + } + long done = _context.clock().now(); + + long diff = done - start; + if ( (diff > 3*1000) && (_log.shouldLog(Log.WARN)) ) { + _log.warn("handleConnection took too long: " + diff + "ms with " + + (afterEstablish-start) + "ms to establish " + + (startRunning-afterEstablish) + "ms to start running " + + (done-startRunning) + "ms to cleanup"); + } + if (_log.shouldLog(Log.DEBUG)) + _log.debug("runConnection called on the con"); + } + + _context.statManager().updateFrequency("tcp.acceptSuccessFrequency"); + return true; } - + public String getStyle() { return STYLE; } - public String renderStatusHTML() { - StringBuffer buf = new StringBuffer(); - Map cons = new HashMap(); - synchronized (_connections) { - cons.putAll(_connections); - } - int established = 0; - buf.append("TCP Transport (").append(cons.size()).append(" connections)
    \n"); - buf.append("
      "); - for (Iterator iter = cons.keySet().iterator(); iter.hasNext(); ) { - buf.append("
    • "); - RouterIdentity ident = (RouterIdentity)iter.next(); - List curCons = (List)cons.get(ident); - buf.append("Connections to ").append(ident.getHash().toBase64()).append(": ").append(curCons.size()).append("
        \n"); - for (int i = 0; i < curCons.size(); i++) { - TCPConnection con = (TCPConnection)curCons.get(i); - if (con.getLifetime() > 0) { - established++; - buf.append("
      • Connection ").append(con.getId()).append(": pending # messages to be sent: ").append(con.getPendingMessageCount()).append(" lifetime: ").append(DataHelper.formatDuration(con.getLifetime())).append("
      • \n"); - } else { - buf.append("
      • Connection ").append(con.getId()).append(": [connection in progress]
      • \n"); - } - } - buf.append("
      \n"); - buf.append("
    • \n"); - } - buf.append("
    \n"); - - if (established == 0) { - buf.append("No TCP connections
      "); - buf.append("
    • Is your publicly reachable IP address / hostname ").append(_listenHost).append("?
    • \n"); - buf.append("
    • Is your firewall / NAT open to receive connections on port ").append(_listenPort).append("?
    • \n"); - buf.append("
    • Do you have any reachable peer references (see down below for \"Routers\", "); - buf.append(" or check your netDb directory - you want at least two routers, since one of them is your own)
    • \n"); - buf.append("
    \n"); - } - return buf.toString(); + public String renderStatusHTML() { + StringBuffer buf = new StringBuffer(); + Map cons = new HashMap(); + synchronized (_connections) { + cons.putAll(_connections); + } + int established = 0; + buf.append("TCP Transport (").append(cons.size()).append(" connections)
    \n"); + buf.append("
      "); + for (Iterator iter = cons.keySet().iterator(); iter.hasNext(); ) { + buf.append("
    • "); + RouterIdentity ident = (RouterIdentity)iter.next(); + List curCons = (List)cons.get(ident); + buf.append("Connections to ").append(ident.getHash().toBase64()).append(": ").append(curCons.size()).append("
        \n"); + for (int i = 0; i < curCons.size(); i++) { + TCPConnection con = (TCPConnection)curCons.get(i); + if (con.getLifetime() > 0) { + established++; + buf.append("
      • Connection ").append(con.getId()).append(": pending # messages to be sent: ").append(con.getPendingMessageCount()).append(" lifetime: ").append(DataHelper.formatDuration(con.getLifetime())).append("
      • \n"); + } else { + buf.append("
      • Connection ").append(con.getId()).append(": [connection in progress]
      • \n"); + } + } + buf.append("
      \n"); + buf.append("
    • \n"); + } + buf.append("
    \n"); + + if (established == 0) { + buf.append("No TCP connections
      "); + buf.append("
    • Is your publicly reachable IP address / hostname ").append(_listenHost).append("?
    • \n"); + buf.append("
    • Is your firewall / NAT open to receive connections on port ").append(_listenPort).append("?
    • \n"); + buf.append("
    • Do you have any reachable peer references (see down below for \"Routers\", "); + buf.append(" or check your netDb directory - you want at least two routers, since one of them is your own)
    • \n"); + buf.append("
    \n"); + } + return buf.toString(); } - /** + /** * only establish one connection at a time, and if multiple requests are pooled * for the same one, once one is established send all the messages through * */ private class ConnEstablisher implements Runnable { - private int _id; - - public ConnEstablisher(int id) { - _id = id; - } - - public int getId() { return _id; } - - public void run() { - Thread.currentThread().setName("Conn Establisher" + _id); - - while (_running) { - try { - PendingMessages pending = nextPeer(this); - - long start = Clock.getInstance().now(); - - if (_log.shouldLog(Log.INFO)) - _log.info("Beginning establishment with " + pending.getPeer().toBase64() + " [not error]"); - - TCPConnection con = getConnection(pending.getPeerInfo().getIdentity()); - long conFetched = Clock.getInstance().now(); - long sentPending = 0; - long establishedCon = 0; - long refetchedCon = 0; - long sentRefetched = 0; - long failedPending = 0; - - if (con != null) { - sendPending(con, pending); - sentPending = Clock.getInstance().now(); - } else { - boolean established = establishConnection(pending.getPeerInfo()); - establishedCon = Clock.getInstance().now(); - if (established) { - StatManager.getInstance().updateFrequency("tcp.attemptSuccessFrequency"); - if (_log.shouldLog(Log.DEBUG)) - _log.debug("Connection established"); - con = getConnection(pending.getPeerInfo().getIdentity()); - refetchedCon = Clock.getInstance().now(); - if (con == null) { - if (_log.shouldLog(Log.ERROR)) - _log.error("Connection established but we can't find the connection? wtf! peer = " + pending.getPeer()); - } else { - Shitlist.getInstance().unshitlistRouter(pending.getPeer()); - sendPending(con, pending); - sentRefetched = Clock.getInstance().now(); - } - } else { - StatManager.getInstance().updateFrequency("tcp.attemptFailureFrequency"); - if (_log.shouldLog(Log.INFO)) - _log.info("Unable to establish a connection to " + pending.getPeer()); - failPending(pending); - Shitlist.getInstance().shitlistRouter(pending.getPeer()); - //ProfileManager.getInstance().commErrorOccurred(pending.getPeer()); - failedPending = Clock.getInstance().now(); - } - } - - long end = Clock.getInstance().now(); - long diff = end - start; - - StringBuffer buf = new StringBuffer(128); - buf.append("Time to establish with ").append(pending.getPeer().toBase64()).append(": ").append(diff).append("ms"); - buf.append(" fetched: ").append(conFetched-start).append(" ms"); - if (sentPending != 0) - buf.append(" sendPending: ").append(sentPending - conFetched).append("ms"); - if (establishedCon != 0) { - buf.append(" established: ").append(establishedCon - conFetched).append("ms"); - if (refetchedCon != 0) { - buf.append(" refetched: ").append(refetchedCon - establishedCon).append("ms"); - if (sentRefetched != 0) { - buf.append(" sentRefetched: ").append(sentRefetched - refetchedCon).append("ms"); - } - } else { - buf.append(" failedPending: ").append(failedPending - establishedCon).append("ms"); - } - } - if (diff > 6000) { - if (_log.shouldLog(Log.WARN)) - _log.warn(buf.toString()); - } else { - if (_log.shouldLog(Log.INFO)) - _log.info(buf.toString()); - } - } catch (Throwable t) { - if (_log.shouldLog(Log.CRIT)) - _log.log(Log.CRIT, "Error in connection establisher thread - NO MORE CONNECTIONS", t); - } - } - } + private int _id; + + public ConnEstablisher(int id) { + _id = id; + } + + public int getId() { return _id; } + + public void run() { + Thread.currentThread().setName("Conn Establisher" + _id); + + while (_running) { + try { + PendingMessages pending = nextPeer(this); + + long start = _context.clock().now(); + + if (_log.shouldLog(Log.INFO)) + _log.info("Beginning establishment with " + pending.getPeer().toBase64() + " [not error]"); + + TCPConnection con = getConnection(pending.getPeerInfo().getIdentity()); + long conFetched = _context.clock().now(); + long sentPending = 0; + long establishedCon = 0; + long refetchedCon = 0; + long sentRefetched = 0; + long failedPending = 0; + + if (con != null) { + sendPending(con, pending); + sentPending = _context.clock().now(); + } else { + boolean established = establishConnection(pending.getPeerInfo()); + establishedCon = _context.clock().now(); + if (established) { + _context.statManager().updateFrequency("tcp.attemptSuccessFrequency"); + if (_log.shouldLog(Log.DEBUG)) + _log.debug("Connection established"); + con = getConnection(pending.getPeerInfo().getIdentity()); + refetchedCon = _context.clock().now(); + if (con == null) { + if (_log.shouldLog(Log.ERROR)) + _log.error("Connection established but we can't find the connection? wtf! peer = " + pending.getPeer()); + } else { + _context.shitlist().unshitlistRouter(pending.getPeer()); + sendPending(con, pending); + sentRefetched = _context.clock().now(); + } + } else { + _context.statManager().updateFrequency("tcp.attemptFailureFrequency"); + if (_log.shouldLog(Log.INFO)) + _log.info("Unable to establish a connection to " + pending.getPeer()); + failPending(pending); + _context.shitlist().shitlistRouter(pending.getPeer()); + //ProfileManager.getInstance().commErrorOccurred(pending.getPeer()); + failedPending = _context.clock().now(); + } + } + + long end = _context.clock().now(); + long diff = end - start; + + StringBuffer buf = new StringBuffer(128); + buf.append("Time to establish with ").append(pending.getPeer().toBase64()).append(": ").append(diff).append("ms"); + buf.append(" fetched: ").append(conFetched-start).append(" ms"); + if (sentPending != 0) + buf.append(" sendPending: ").append(sentPending - conFetched).append("ms"); + if (establishedCon != 0) { + buf.append(" established: ").append(establishedCon - conFetched).append("ms"); + if (refetchedCon != 0) { + buf.append(" refetched: ").append(refetchedCon - establishedCon).append("ms"); + if (sentRefetched != 0) { + buf.append(" sentRefetched: ").append(sentRefetched - refetchedCon).append("ms"); + } + } else { + buf.append(" failedPending: ").append(failedPending - establishedCon).append("ms"); + } + } + if (diff > 6000) { + if (_log.shouldLog(Log.WARN)) + _log.warn(buf.toString()); + } else { + if (_log.shouldLog(Log.INFO)) + _log.info(buf.toString()); + } + } catch (Throwable t) { + if (_log.shouldLog(Log.CRIT)) + _log.log(Log.CRIT, "Error in connection establisher thread - NO MORE CONNECTIONS", t); + } + } + } } /** @@ -669,177 +669,177 @@ public class TCPTransport extends TransportImpl { * */ public void addPending(OutNetMessage msg) { - synchronized (_msgs) { - Hash target = msg.getTarget().getIdentity().getHash(); - PendingMessages msgs = (PendingMessages)_msgs.get(target); - if (msgs == null) { - msgs = new PendingMessages(msg.getTarget()); - msgs.addPending(msg); - _msgs.put(target, msgs); - if (_log.shouldLog(Log.DEBUG)) - _log.debug("Adding a pending to new " + target.toBase64()); - } else { - msgs.addPending(msg); - if (_log.shouldLog(Log.DEBUG)) - _log.debug("Adding a pending to existing " + target.toBase64()); - } - int level = Log.INFO; - if (msgs.getMessageCount() > 1) - level = Log.WARN; - if (_log.shouldLog(level)) - _log.log(level, "Add message to " + target.toBase64() + ", making a total of " + msgs.getMessageCount() + " for them, with another " + (_msgs.size() -1) + " peers pending establishment"); - _msgs.notifyAll(); - } - msg.timestamp("TCPTransport.addPending finished and notified"); + synchronized (_msgs) { + Hash target = msg.getTarget().getIdentity().getHash(); + PendingMessages msgs = (PendingMessages)_msgs.get(target); + if (msgs == null) { + msgs = new PendingMessages(msg.getTarget()); + msgs.addPending(msg); + _msgs.put(target, msgs); + if (_log.shouldLog(Log.DEBUG)) + _log.debug("Adding a pending to new " + target.toBase64()); + } else { + msgs.addPending(msg); + if (_log.shouldLog(Log.DEBUG)) + _log.debug("Adding a pending to existing " + target.toBase64()); + } + int level = Log.INFO; + if (msgs.getMessageCount() > 1) + level = Log.WARN; + if (_log.shouldLog(level)) + _log.log(level, "Add message to " + target.toBase64() + ", making a total of " + msgs.getMessageCount() + " for them, with another " + (_msgs.size() -1) + " peers pending establishment"); + _msgs.notifyAll(); + } + msg.timestamp("TCPTransport.addPending finished and notified"); } - + /** - * blocking call to claim the next available targeted peer. does a wait on + * blocking call to claim the next available targeted peer. does a wait on * the _msgs pool which should be notified from addPending. * */ private PendingMessages nextPeer(ConnEstablisher establisher) { - PendingMessages rv = null; - while (true) { - synchronized (_msgs) { - if (_msgs.size() > 0) { - for (Iterator iter = _msgs.keySet().iterator(); iter.hasNext(); ) { - Object key = iter.next(); - rv = (PendingMessages)_msgs.get(key); - if (!rv.setEstablisher(establisher)) { - // unable to claim this peer - if (_log.shouldLog(Log.INFO)) - _log.info("Peer is still in process: " + rv.getPeer() + " on establisher " + rv.getEstablisher().getId()); - rv = null; - } else { - if (_log.shouldLog(Log.INFO)) - _log.info("Returning next peer " + rv.getPeer().toBase64()); - return rv; - } - } - } - try { _msgs.wait(1000); } catch (InterruptedException ie) {} - } - } - + PendingMessages rv = null; + while (true) { + synchronized (_msgs) { + if (_msgs.size() > 0) { + for (Iterator iter = _msgs.keySet().iterator(); iter.hasNext(); ) { + Object key = iter.next(); + rv = (PendingMessages)_msgs.get(key); + if (!rv.setEstablisher(establisher)) { + // unable to claim this peer + if (_log.shouldLog(Log.INFO)) + _log.info("Peer is still in process: " + rv.getPeer() + " on establisher " + rv.getEstablisher().getId()); + rv = null; + } else { + if (_log.shouldLog(Log.INFO)) + _log.info("Returning next peer " + rv.getPeer().toBase64()); + return rv; + } + } + } + try { _msgs.wait(1000); } catch (InterruptedException ie) {} + } + } + } - + /** * Send all the messages targetting the given location * over the established connection * */ private void sendPending(TCPConnection con, PendingMessages pending) { - if (con == null) { - if (_log.shouldLog(Log.ERROR)) - _log.error("Send pending to null con?", new Exception("Hmm")); - return; - } - if (pending == null) { - if (_log.shouldLog(Log.ERROR)) - _log.error("Null pending, 'eh?", new Exception("Hmm..")); - return; - } - if (_log.shouldLog(Log.INFO)) - _log.info("Connection established, now queueing up " + pending.getMessageCount() + " messages to be sent"); - synchronized (_msgs) { - _msgs.remove(pending.getPeer()); - - OutNetMessage msg = null; - while ( (msg = pending.getNextMessage()) != null) { - msg.timestamp("TCPTransport.sendPending to con.addMessage"); - con.addMessage(msg); - } - } + if (con == null) { + if (_log.shouldLog(Log.ERROR)) + _log.error("Send pending to null con?", new Exception("Hmm")); + return; + } + if (pending == null) { + if (_log.shouldLog(Log.ERROR)) + _log.error("Null pending, 'eh?", new Exception("Hmm..")); + return; + } + if (_log.shouldLog(Log.INFO)) + _log.info("Connection established, now queueing up " + pending.getMessageCount() + " messages to be sent"); + synchronized (_msgs) { + _msgs.remove(pending.getPeer()); + + OutNetMessage msg = null; + while ( (msg = pending.getNextMessage()) != null) { + msg.timestamp("TCPTransport.sendPending to con.addMessage"); + con.addMessage(msg); + } + } } - + /** * Fail out all messages pending to the specified peer */ private void failPending(PendingMessages pending) { - if (pending != null) { - synchronized (_msgs) { - _msgs.remove(pending.getPeer()); - } - - OutNetMessage msg = null; - while ( (msg = pending.getNextMessage()) != null) { - afterSend(msg, false); - } - } + if (pending != null) { + synchronized (_msgs) { + _msgs.remove(pending.getPeer()); + } + + OutNetMessage msg = null; + while ( (msg = pending.getNextMessage()) != null) { + afterSend(msg, false); + } + } } - + /** * Coordinate messages for a particular peer that hasn't been established yet * */ private static class PendingMessages { - private List _messages; - private Hash _peer; - private RouterInfo _peerInfo; - private ConnEstablisher _establisher; - - public PendingMessages(RouterInfo peer) { - _messages = new LinkedList(); - _peerInfo = peer; - _peer = peer.getIdentity().getHash(); - _establisher = null; - } - - /** - * Claim a peer for a specific establisher - * - * @return true if the claim was successful, false if someone beat us to it - */ - public boolean setEstablisher(ConnEstablisher establisher) { - synchronized (PendingMessages.this) { - if (_establisher == null) { - _establisher = establisher; - return true; - } else { - return false; - } - } - } - public ConnEstablisher getEstablisher() { - return _establisher; - } - - /** - * Add a new message to this to-be-established connection - */ - public void addPending(OutNetMessage msg) { - synchronized (_messages) { - _messages.add(msg); - } - } - - /** - * Get the next message queued up for delivery on this connection being established - * - */ - public OutNetMessage getNextMessage() { - synchronized (_messages) { - if (_messages.size() <= 0) - return null; - else - return (OutNetMessage)_messages.remove(0); - } - } - - /** - * Get the number of messages queued up for this to be established connection - * - */ - public int getMessageCount() { - synchronized (_messages) { - return _messages.size(); - } - } - - /** who are we going to establish with? */ - public Hash getPeer() { return _peer; } - /** who are we going to establish with? */ - public RouterInfo getPeerInfo() { return _peerInfo; } + private List _messages; + private Hash _peer; + private RouterInfo _peerInfo; + private ConnEstablisher _establisher; + + public PendingMessages(RouterInfo peer) { + _messages = new LinkedList(); + _peerInfo = peer; + _peer = peer.getIdentity().getHash(); + _establisher = null; + } + + /** + * Claim a peer for a specific establisher + * + * @return true if the claim was successful, false if someone beat us to it + */ + public boolean setEstablisher(ConnEstablisher establisher) { + synchronized (PendingMessages.this) { + if (_establisher == null) { + _establisher = establisher; + return true; + } else { + return false; + } + } + } + public ConnEstablisher getEstablisher() { + return _establisher; + } + + /** + * Add a new message to this to-be-established connection + */ + public void addPending(OutNetMessage msg) { + synchronized (_messages) { + _messages.add(msg); + } + } + + /** + * Get the next message queued up for delivery on this connection being established + * + */ + public OutNetMessage getNextMessage() { + synchronized (_messages) { + if (_messages.size() <= 0) + return null; + else + return (OutNetMessage)_messages.remove(0); + } + } + + /** + * Get the number of messages queued up for this to be established connection + * + */ + public int getMessageCount() { + synchronized (_messages) { + return _messages.size(); + } + } + + /** who are we going to establish with? */ + public Hash getPeer() { return _peer; } + /** who are we going to establish with? */ + public RouterInfo getPeerInfo() { return _peerInfo; } } } diff --git a/router/java/src/net/i2p/router/tunnelmanager/ClientLeaseSetManagerJob.java b/router/java/src/net/i2p/router/tunnelmanager/ClientLeaseSetManagerJob.java index 6736e3e92..15bd8b81f 100644 --- a/router/java/src/net/i2p/router/tunnelmanager/ClientLeaseSetManagerJob.java +++ b/router/java/src/net/i2p/router/tunnelmanager/ClientLeaseSetManagerJob.java @@ -16,6 +16,7 @@ import net.i2p.router.NetworkDatabaseFacade; import net.i2p.router.TunnelInfo; import net.i2p.util.Clock; import net.i2p.util.Log; +import net.i2p.router.RouterContext; /** * Manage the process of requesting a lease set as necessary for a client based @@ -30,7 +31,7 @@ import net.i2p.util.Log; * */ class ClientLeaseSetManagerJob extends JobImpl { - private final static Log _log = new Log(ClientLeaseSetManagerJob.class); + private Log _log; private ClientTunnelPool _pool; private LeaseSet _currentLeaseSet; private long _lastCreated; @@ -47,54 +48,66 @@ class ClientLeaseSetManagerJob extends JobImpl { */ private final static long REQUEST_LEASE_TIMEOUT = 30*1000; - public ClientLeaseSetManagerJob(ClientTunnelPool pool) { - super(); - _pool = pool; - _currentLeaseSet = null; - _lastCreated = -1; + public ClientLeaseSetManagerJob(RouterContext context, ClientTunnelPool pool) { + super(context); + _log = context.logManager().getLog(ClientLeaseSetManagerJob.class); + _pool = pool; + _currentLeaseSet = null; + _lastCreated = -1; } public void forceRequestLease() { _forceRequestLease = true; } public String getName() { return "Manage Client Lease Set"; } public void runJob() { - - if (_pool.isStopped()) { - if ( (_pool.getInactiveInboundTunnelIds().size() <= 0) && - (_pool.getInboundTunnelIds().size() <= 0) ) { - if (_log.shouldLog(Log.INFO)) - _log.info("No more tunnels and the client has stopped, so no need to manage the leaseSet any more for " + _pool.getDestination().calculateHash()); - return; - } else { - if (_log.shouldLog(Log.INFO)) - _log.info("Client " + _pool.getDestination().calculateHash() + " is stopped, but they still have some tunnels, so don't stop maintaining the leaseSet"); - requeue(RECHECK_DELAY); - return; - } - } - - int available = _pool.getSafePoolSize(); - if (available >= _pool.getClientSettings().getNumInboundTunnels()) { - if (_forceRequestLease) { - _log.info("Forced to request a new lease (reconnected client perhaps?)"); - _forceRequestLease = false; - requestNewLeaseSet(); - } else if (_currentLeaseSet == null) { - _log.info("No leaseSet is known - request a new one"); - requestNewLeaseSet(); - } else if (tunnelsChanged()) { - _log.info("Tunnels changed from the old leaseSet - request a new one: [pool = " + _pool.getInboundTunnelIds() + " old leaseSet: " + _currentLeaseSet); - requestNewLeaseSet(); - } else if (Clock.getInstance().now() > _lastCreated + _pool.getClientSettings().getInboundDuration()) { - _log.info("We've exceeded the client's requested duration (limit = " + new Date(_lastCreated + _pool.getClientSettings().getInboundDuration()) + " / " + _pool.getClientSettings().getInboundDuration() + ") - request a new leaseSet"); - requestNewLeaseSet(); - } else { - _log.debug("The current LeaseSet is fine, noop"); - } - } else { - _log.warn("Insufficient safe inbound tunnels exist for the client (" + available + " available, " + _pool.getClientSettings().getNumInboundTunnels() + " required) - no leaseSet requested"); - } - requeue(RECHECK_DELAY); + if (_pool.isStopped()) { + if ( (_pool.getInactiveInboundTunnelIds().size() <= 0) && + (_pool.getInboundTunnelIds().size() <= 0) ) { + if (_log.shouldLog(Log.INFO)) + _log.info("No more tunnels and the client has stopped, so no need to manage the leaseSet any more for " + + _pool.getDestination().calculateHash()); + return; + } else { + if (_log.shouldLog(Log.INFO)) + _log.info("Client " + _pool.getDestination().calculateHash() + + " is stopped, but they still have some tunnels, so don't stop maintaining the leaseSet"); + requeue(RECHECK_DELAY); + return; + } + } + + int available = _pool.getSafePoolSize(); + if (available >= _pool.getClientSettings().getNumInboundTunnels()) { + if (_forceRequestLease) { + if (_log.shouldLog(Log.INFO)) + _log.info("Forced to request a new lease (reconnected client perhaps?)"); + _forceRequestLease = false; + requestNewLeaseSet(); + } else if (_currentLeaseSet == null) { + if (_log.shouldLog(Log.INFO)) + _log.info("No leaseSet is known - request a new one"); + requestNewLeaseSet(); + } else if (tunnelsChanged()) { + if (_log.shouldLog(Log.INFO)) + _log.info("Tunnels changed from the old leaseSet - request a new one: [pool = " + + _pool.getInboundTunnelIds() + " old leaseSet: " + _currentLeaseSet); + requestNewLeaseSet(); + } else if (_context.clock().now() > _lastCreated + _pool.getClientSettings().getInboundDuration()) { + if (_log.shouldLog(Log.INFO)) + _log.info("We've exceeded the client's requested duration (limit = " + + new Date(_lastCreated + _pool.getClientSettings().getInboundDuration()) + + " / " + _pool.getClientSettings().getInboundDuration() + + ") - request a new leaseSet"); + requestNewLeaseSet(); + } else { + _log.debug("The current LeaseSet is fine, noop"); + } + } else { + _log.warn("Insufficient safe inbound tunnels exist for the client (" + available + + " available, " + _pool.getClientSettings().getNumInboundTunnels() + + " required) - no leaseSet requested"); + } + requeue(RECHECK_DELAY); } /** * Determine if the tunnels in the current leaseSet are the same as the @@ -103,101 +116,108 @@ class ClientLeaseSetManagerJob extends JobImpl { * @return true if the tunnels are /not/ the same, else true if they are */ private boolean tunnelsChanged() { - long furthestInFuture = 0; - Set currentIds = new HashSet(_currentLeaseSet.getLeaseCount()); - for (int i = 0; i < _currentLeaseSet.getLeaseCount(); i++) { - Lease lease = (Lease)_currentLeaseSet.getLease(i); - currentIds.add(lease.getTunnelId()); - if (lease.getEndDate().getTime() > furthestInFuture) - furthestInFuture = lease.getEndDate().getTime(); - } - Set avail = _pool.getInboundTunnelIds(); - avail.removeAll(currentIds); - // check to see if newer ones exist in the available pool - for (Iterator iter = avail.iterator(); iter.hasNext(); ) { - TunnelId id = (TunnelId)iter.next(); - TunnelInfo info = _pool.getInboundTunnel(id); - // we need to check this in case the tunnel was deleted since 6 lines up - if ( (id != null) && (info != null) && (info.getSettings() != null) ) { - // if something available but not in the currently published lease will be - // around longer than any of the published leases, we want that tunnel to - // be added to our published lease - if (info.getSettings().getExpiration() > furthestInFuture) { - _log.debug("Tunnel " + id.getTunnelId() + " expires " + (info.getSettings().getExpiration()-furthestInFuture) + "ms after any of the existing ones do"); - return true; - } - } - } - _log.debug("None of the available tunnels expire after the existing lease set's tunnels"); - return false; + long furthestInFuture = 0; + Set currentIds = new HashSet(_currentLeaseSet.getLeaseCount()); + for (int i = 0; i < _currentLeaseSet.getLeaseCount(); i++) { + Lease lease = (Lease)_currentLeaseSet.getLease(i); + currentIds.add(lease.getTunnelId()); + if (lease.getEndDate().getTime() > furthestInFuture) + furthestInFuture = lease.getEndDate().getTime(); + } + Set avail = _pool.getInboundTunnelIds(); + avail.removeAll(currentIds); + // check to see if newer ones exist in the available pool + for (Iterator iter = avail.iterator(); iter.hasNext(); ) { + TunnelId id = (TunnelId)iter.next(); + TunnelInfo info = _pool.getInboundTunnel(id); + // we need to check this in case the tunnel was deleted since 6 lines up + if ( (id != null) && (info != null) && (info.getSettings() != null) ) { + // if something available but not in the currently published lease will be + // around longer than any of the published leases, we want that tunnel to + // be added to our published lease + if (info.getSettings().getExpiration() > furthestInFuture) { + if (_log.shouldLog(Log.DEBUG)) + _log.debug("Tunnel " + id.getTunnelId() + " expires " + + (info.getSettings().getExpiration()-furthestInFuture) + + "ms after any of the existing ones do"); + return true; + } + } + } + if (_log.shouldLog(Log.DEBUG)) + _log.debug("None of the available tunnels expire after the existing lease set's tunnels"); + return false; } /** * Request a new leaseSet based off the currently available safe tunnels */ private void requestNewLeaseSet() { - LeaseSet proposed = buildNewLeaseSet(); - ClientManagerFacade.getInstance().requestLeaseSet(_pool.getDestination(), proposed, REQUEST_LEASE_TIMEOUT, new LeaseSetCreatedJob(), null); + LeaseSet proposed = buildNewLeaseSet(); + _context.clientManager().requestLeaseSet(_pool.getDestination(), proposed, + REQUEST_LEASE_TIMEOUT, new LeaseSetCreatedJob(), + null); } /** * Create a new proposed leaseSet with all inbound tunnels */ private LeaseSet buildNewLeaseSet() { - LeaseSet ls = new LeaseSet(); - TreeMap tunnels = new TreeMap(); - long now = Clock.getInstance().now(); - for (Iterator iter = _pool.getInboundTunnelIds().iterator(); iter.hasNext(); ) { - TunnelId id = (TunnelId)iter.next(); - TunnelInfo info = _pool.getInboundTunnel(id); - - if (!info.getIsReady()) - continue; - long exp = info.getSettings().getExpiration(); - if (now + RECHECK_DELAY + REQUEST_LEASE_TIMEOUT > exp) - continue; - RouterInfo ri = NetworkDatabaseFacade.getInstance().lookupRouterInfoLocally(info.getThisHop()); - if (ri == null) - continue; - - Lease lease = new Lease(); - lease.setEndDate(new Date(exp)); - lease.setRouterIdentity(ri.getIdentity()); - lease.setTunnelId(id); - tunnels.put(new Long(0-exp), lease); - } - - // now pick the N tunnels with the longest time remaining (n = # tunnels the client requested) - // place tunnels.size() - N into the inactive pool - int selected = 0; - int wanted = _pool.getClientSettings().getNumInboundTunnels(); - for (Iterator iter = tunnels.values().iterator(); iter.hasNext(); ) { - Lease lease = (Lease)iter.next(); - if (selected < wanted) { - ls.addLease(lease); - selected++; - } else { - _pool.moveToInactive(lease.getTunnelId()); - } - } - ls.setDestination(_pool.getDestination()); - return ls; + LeaseSet ls = new LeaseSet(); + TreeMap tunnels = new TreeMap(); + long now = _context.clock().now(); + for (Iterator iter = _pool.getInboundTunnelIds().iterator(); iter.hasNext(); ) { + TunnelId id = (TunnelId)iter.next(); + TunnelInfo info = _pool.getInboundTunnel(id); + + if (!info.getIsReady()) + continue; + long exp = info.getSettings().getExpiration(); + if (now + RECHECK_DELAY + REQUEST_LEASE_TIMEOUT > exp) + continue; + RouterInfo ri = _context.netDb().lookupRouterInfoLocally(info.getThisHop()); + if (ri == null) + continue; + + Lease lease = new Lease(); + lease.setEndDate(new Date(exp)); + lease.setRouterIdentity(ri.getIdentity()); + lease.setTunnelId(id); + tunnels.put(new Long(0-exp), lease); + } + + // now pick the N tunnels with the longest time remaining (n = # tunnels the client requested) + // place tunnels.size() - N into the inactive pool + int selected = 0; + int wanted = _pool.getClientSettings().getNumInboundTunnels(); + for (Iterator iter = tunnels.values().iterator(); iter.hasNext(); ) { + Lease lease = (Lease)iter.next(); + if (selected < wanted) { + ls.addLease(lease); + selected++; + } else { + _pool.moveToInactive(lease.getTunnelId()); + } + } + ls.setDestination(_pool.getDestination()); + return ls; } private class LeaseSetCreatedJob extends JobImpl { - public LeaseSetCreatedJob() { - super(); - } - public String getName() { return "LeaseSet created"; } - public void runJob() { - LeaseSet ls = NetworkDatabaseFacade.getInstance().lookupLeaseSetLocally(_pool.getDestination().calculateHash()); - if (ls != null) { - _log.info("New leaseSet completely created"); - _lastCreated = Clock.getInstance().now(); - _currentLeaseSet = ls; - } else { - _log.error("New lease set created, but not found locally? wtf?!"); - } - } + public LeaseSetCreatedJob() { + super(ClientLeaseSetManagerJob.this._context); + } + public String getName() { return "LeaseSet created"; } + public void runJob() { + RouterContext ctx = ClientLeaseSetManagerJob.this._context; + LeaseSet ls = ctx.netDb().lookupLeaseSetLocally(_pool.getDestination().calculateHash()); + if (ls != null) { + _log.info("New leaseSet completely created"); + _lastCreated = ctx.clock().now(); + _currentLeaseSet = ls; + } else { + _log.error("New lease set created, but not found locally? wtf?!"); + } + } } } diff --git a/router/java/src/net/i2p/router/tunnelmanager/ClientTunnelPool.java b/router/java/src/net/i2p/router/tunnelmanager/ClientTunnelPool.java index fcf284ebf..ead99700d 100644 --- a/router/java/src/net/i2p/router/tunnelmanager/ClientTunnelPool.java +++ b/router/java/src/net/i2p/router/tunnelmanager/ClientTunnelPool.java @@ -15,9 +15,10 @@ import net.i2p.router.Router; import net.i2p.router.TunnelInfo; import net.i2p.util.Clock; import net.i2p.util.Log; +import net.i2p.router.RouterContext; class ClientTunnelPool { - private final static Log _log = new Log(ClientTunnelPool.class); + private Log _log; private Destination _dest; private ClientTunnelSettings _settings; private TunnelPool _pool; @@ -29,87 +30,91 @@ class ClientTunnelPool { private boolean _isStopped; private static int __poolId; private int _poolId; + private RouterContext _context; - public ClientTunnelPool(Destination dest, ClientTunnelSettings settings, TunnelPool pool) { - _dest = dest; - _settings = settings; - _pool = pool; - _inboundTunnels = new HashMap(); - _inactiveInboundTunnels = new HashMap(); - _isStopped = true; - _poolId = ++__poolId; + public ClientTunnelPool(RouterContext ctx, Destination dest, ClientTunnelSettings settings, + TunnelPool pool) { + _context = ctx; + _log = ctx.logManager().getLog(ClientTunnelPool.class); + _dest = dest; + _settings = settings; + _pool = pool; + _inboundTunnels = new HashMap(); + _inactiveInboundTunnels = new HashMap(); + _isStopped = true; + _poolId = ++__poolId; } public void startPool() { - if (!_isStopped) { - if (_log.shouldLog(Log.WARN)) - _log.warn("Pool " + _poolId +": Not starting the pool /again/ (its already running)"); - return; - } else { - if (_log.shouldLog(Log.INFO)) - _log.info("Pool " + _poolId +": Starting up the pool "); - } - _isStopped = false; - if (_mgrJob == null) { - _mgrJob = new ClientTunnelPoolManagerJob(_pool, this); - JobQueue.getInstance().addJob(_mgrJob); - } - if (_leaseMgrJob == null) { - _leaseMgrJob = new ClientLeaseSetManagerJob(this); - JobQueue.getInstance().addJob(_leaseMgrJob); - } else { - // we just restarted, so make sure we ask for a new leaseSet ASAP - _leaseMgrJob.forceRequestLease(); - _leaseMgrJob.getTiming().setStartAfter(Clock.getInstance().now()); - JobQueue.getInstance().addJob(_leaseMgrJob); - } - if (_tunnelExpirationJob == null) { - _tunnelExpirationJob = new ClientTunnelPoolExpirationJob(this, _pool); - JobQueue.getInstance().addJob(_tunnelExpirationJob); - } + if (!_isStopped) { + if (_log.shouldLog(Log.WARN)) + _log.warn("Pool " + _poolId +": Not starting the pool /again/ (its already running)"); + return; + } else { + if (_log.shouldLog(Log.INFO)) + _log.info("Pool " + _poolId +": Starting up the pool "); + } + _isStopped = false; + if (_mgrJob == null) { + _mgrJob = new ClientTunnelPoolManagerJob(_context, _pool, this); + _context.jobQueue().addJob(_mgrJob); + } + if (_leaseMgrJob == null) { + _leaseMgrJob = new ClientLeaseSetManagerJob(_context, this); + _context.jobQueue().addJob(_leaseMgrJob); + } else { + // we just restarted, so make sure we ask for a new leaseSet ASAP + _leaseMgrJob.forceRequestLease(); + _leaseMgrJob.getTiming().setStartAfter(_context.clock().now()); + _context.jobQueue().addJob(_leaseMgrJob); + } + if (_tunnelExpirationJob == null) { + _tunnelExpirationJob = new ClientTunnelPoolExpirationJob(_context, this, _pool); + _context.jobQueue().addJob(_tunnelExpirationJob); + } } public void stopPool() { _isStopped = true; } public boolean isStopped() { return _isStopped; } public void setClientSettings(ClientTunnelSettings settings) { - _settings = settings; - if (settings != null) { - _log.info("Client settings specified - the client may have reconnected, so restart the pool"); - startPool(); - } + _settings = settings; + if (settings != null) { + _log.info("Client settings specified - the client may have reconnected, so restart the pool"); + startPool(); + } } public ClientTunnelSettings getClientSettings() { return _settings; } public Destination getDestination() { return _dest; } public void moveToInactive(TunnelId id) { - TunnelInfo info = removeInboundTunnel(id); - if (info != null) { - MessageHistory.getInstance().tunnelJoined("inactive inbound", info); - synchronized (_inactiveInboundTunnels) { - _inactiveInboundTunnels.put(id, info); - } - _log.info("Marking tunnel " + id + " as inactive"); - } + TunnelInfo info = removeInboundTunnel(id); + if (info != null) { + _context.messageHistory().tunnelJoined("inactive inbound", info); + synchronized (_inactiveInboundTunnels) { + _inactiveInboundTunnels.put(id, info); + } + _log.info("Marking tunnel " + id + " as inactive"); + } } void setActiveTunnels(Set activeTunnels) { - for (Iterator iter = activeTunnels.iterator(); iter.hasNext(); ) { - TunnelInfo info = (TunnelInfo)iter.next(); - MessageHistory.getInstance().tunnelJoined("active inbound", info); - synchronized (_inboundTunnels) { - _inboundTunnels.put(info.getTunnelId(), info); - } - } + for (Iterator iter = activeTunnels.iterator(); iter.hasNext(); ) { + TunnelInfo info = (TunnelInfo)iter.next(); + _context.messageHistory().tunnelJoined("active inbound", info); + synchronized (_inboundTunnels) { + _inboundTunnels.put(info.getTunnelId(), info); + } + } } void setInactiveTunnels(Set inactiveTunnels) { - for (Iterator iter = inactiveTunnels.iterator(); iter.hasNext(); ) { - TunnelInfo info = (TunnelInfo)iter.next(); - MessageHistory.getInstance().tunnelJoined("inactive inbound", info); - synchronized (_inactiveInboundTunnels) { - _inactiveInboundTunnels.put(info.getTunnelId(), info); - } - } + for (Iterator iter = inactiveTunnels.iterator(); iter.hasNext(); ) { + TunnelInfo info = (TunnelInfo)iter.next(); + _context.messageHistory().tunnelJoined("inactive inbound", info); + synchronized (_inactiveInboundTunnels) { + _inactiveInboundTunnels.put(info.getTunnelId(), info); + } + } } /** @@ -118,7 +123,7 @@ class ClientTunnelPool { * */ public int getSafePoolSize() { - return getSafePoolSize(0); + return getSafePoolSize(0); } /** * Get the safe # pools at some point in the future @@ -126,15 +131,15 @@ class ClientTunnelPool { * @param futureMs number of milliseconds in the future that we want to check safety for */ public int getSafePoolSize(long futureMs) { - int numSafe = 0; - long expireAfter = Clock.getInstance().now() + Router.CLOCK_FUDGE_FACTOR + futureMs; - for (Iterator iter = getInboundTunnelIds().iterator(); iter.hasNext(); ) { - TunnelId id = (TunnelId)iter.next(); - TunnelInfo info = getInboundTunnel(id); - if ( (info != null) && (info.getIsReady()) && (info.getSettings().getExpiration() > expireAfter) ) - numSafe++; - } - return numSafe; + int numSafe = 0; + long expireAfter = _context.clock().now() + Router.CLOCK_FUDGE_FACTOR + futureMs; + for (Iterator iter = getInboundTunnelIds().iterator(); iter.hasNext(); ) { + TunnelId id = (TunnelId)iter.next(); + TunnelInfo info = getInboundTunnel(id); + if ( (info != null) && (info.getIsReady()) && (info.getSettings().getExpiration() > expireAfter) ) + numSafe++; + } + return numSafe; } /** @@ -142,50 +147,50 @@ class ClientTunnelPool { * */ public Set getInboundTunnelIds() { - synchronized (_inboundTunnels) { - return new HashSet(_inboundTunnels.keySet()); - } + synchronized (_inboundTunnels) { + return new HashSet(_inboundTunnels.keySet()); + } } public boolean isInboundTunnel(TunnelId id) { - synchronized (_inboundTunnels) { - return _inboundTunnels.containsKey(id); - } + synchronized (_inboundTunnels) { + return _inboundTunnels.containsKey(id); + } } public TunnelInfo getInboundTunnel(TunnelId id) { - synchronized (_inboundTunnels) { - return (TunnelInfo)_inboundTunnels.get(id); - } + synchronized (_inboundTunnels) { + return (TunnelInfo)_inboundTunnels.get(id); + } } public void addInboundTunnel(TunnelInfo tunnel) { - MessageHistory.getInstance().tunnelJoined("active inbound", tunnel); - synchronized (_inboundTunnels) { - _inboundTunnels.put(tunnel.getTunnelId(), tunnel); - } + _context.messageHistory().tunnelJoined("active inbound", tunnel); + synchronized (_inboundTunnels) { + _inboundTunnels.put(tunnel.getTunnelId(), tunnel); + } } public TunnelInfo removeInboundTunnel(TunnelId id) { - synchronized (_inboundTunnels) { - return (TunnelInfo)_inboundTunnels.remove(id); - } + synchronized (_inboundTunnels) { + return (TunnelInfo)_inboundTunnels.remove(id); + } } public Set getInactiveInboundTunnelIds() { - synchronized (_inactiveInboundTunnels) { - return new HashSet(_inactiveInboundTunnels.keySet()); - } + synchronized (_inactiveInboundTunnels) { + return new HashSet(_inactiveInboundTunnels.keySet()); + } } public boolean isInactiveInboundTunnel(TunnelId id) { - synchronized (_inactiveInboundTunnels) { - return _inactiveInboundTunnels.containsKey(id); - } + synchronized (_inactiveInboundTunnels) { + return _inactiveInboundTunnels.containsKey(id); + } } public TunnelInfo getInactiveInboundTunnel(TunnelId id) { - synchronized (_inactiveInboundTunnels) { - return (TunnelInfo)_inactiveInboundTunnels.get(id); - } + synchronized (_inactiveInboundTunnels) { + return (TunnelInfo)_inactiveInboundTunnels.get(id); + } } public TunnelInfo removeInactiveInboundTunnel(TunnelId id) { - synchronized (_inactiveInboundTunnels) { - return (TunnelInfo)_inactiveInboundTunnels.remove(id); - } + synchronized (_inactiveInboundTunnels) { + return (TunnelInfo)_inactiveInboundTunnels.remove(id); + } } } diff --git a/router/java/src/net/i2p/router/tunnelmanager/ClientTunnelPoolExpirationJob.java b/router/java/src/net/i2p/router/tunnelmanager/ClientTunnelPoolExpirationJob.java index 0c9026fd4..01b8ed5bf 100644 --- a/router/java/src/net/i2p/router/tunnelmanager/ClientTunnelPoolExpirationJob.java +++ b/router/java/src/net/i2p/router/tunnelmanager/ClientTunnelPoolExpirationJob.java @@ -9,6 +9,7 @@ import net.i2p.router.Router; import net.i2p.router.TunnelInfo; import net.i2p.util.Clock; import net.i2p.util.Log; +import net.i2p.router.RouterContext; /** * Periodically go through all of the tunnels assigned to this client and mark @@ -16,7 +17,7 @@ import net.i2p.util.Log; * */ class ClientTunnelPoolExpirationJob extends JobImpl { - private final static Log _log = new Log(ClientTunnelPoolExpirationJob.class); + private Log _log; private ClientTunnelPool _pool; private TunnelPool _tunnelPool; @@ -28,31 +29,35 @@ class ClientTunnelPoolExpirationJob extends JobImpl { */ private final static long EXPIRE_BUFFER = 30*1000; - public ClientTunnelPoolExpirationJob(ClientTunnelPool pool, TunnelPool tunnelPool) { - super(); - _pool = pool; - _tunnelPool = tunnelPool; - getTiming().setStartAfter(Clock.getInstance().now() + EXPIRE_POOL_DELAY); + public ClientTunnelPoolExpirationJob(RouterContext context, ClientTunnelPool pool, TunnelPool tunnelPool) { + super(context); + _log = context.logManager().getLog(ClientTunnelPoolExpirationJob.class); + _pool = pool; + _tunnelPool = tunnelPool; + getTiming().setStartAfter(_context.clock().now() + EXPIRE_POOL_DELAY); } public String getName() { return "Expire Pooled Client Tunnels"; } public void runJob() { - if (_pool.isStopped()) { - if ( (_pool.getInactiveInboundTunnelIds().size() <= 0) && - (_pool.getInboundTunnelIds().size() <= 0) ) { - // this may get called twice - once here, and once by the ClientTunnelPoolManagerJob - // but its safe to do, and passing around messages would be overkill. - _tunnelPool.removeClientPool(_pool.getDestination()); - _log.info("No more tunnels to expire in the client tunnel pool for the stopped client " + _pool.getDestination().calculateHash()); - return; - } else { - _log.info("Client " + _pool.getDestination().calculateHash() + " is stopped, but they still have some tunnels, so don't stop expiring"); - } - } - - expireInactiveTunnels(); - expireActiveTunnels(); - - requeue(EXPIRE_POOL_DELAY); + if (_pool.isStopped()) { + if ( (_pool.getInactiveInboundTunnelIds().size() <= 0) && + (_pool.getInboundTunnelIds().size() <= 0) ) { + // this may get called twice - once here, and once by the ClientTunnelPoolManagerJob + // but its safe to do, and passing around messages would be overkill. + _tunnelPool.removeClientPool(_pool.getDestination()); + if (_log.shouldLog(Log.INFO)) + _log.info("No more tunnels to expire in the client tunnel pool for the stopped client " + _pool.getDestination().calculateHash()); + return; + } else { + if (_log.shouldLog(Log.INFO)) + _log.info("Client " + _pool.getDestination().calculateHash() + + " is stopped, but they still have some tunnels, so don't stop expiring"); + } + } + + expireInactiveTunnels(); + expireActiveTunnels(); + + requeue(EXPIRE_POOL_DELAY); } /** @@ -61,22 +66,25 @@ class ClientTunnelPoolExpirationJob extends JobImpl { * */ public void expireInactiveTunnels() { - long now = Clock.getInstance().now(); - long expire = now - EXPIRE_BUFFER - 2*Router.CLOCK_FUDGE_FACTOR; - - for (Iterator iter = _pool.getInactiveInboundTunnelIds().iterator(); iter.hasNext(); ) { - TunnelId id = (TunnelId)iter.next(); - TunnelInfo info = _pool.getInactiveInboundTunnel(id); - if ( (info != null) && (info.getSettings() != null) ) { - if (info.getSettings().getExpiration() < expire) { - _log.info("Expiring inactive tunnel " + id + " [" + new Date(info.getSettings().getExpiration()) + "]"); - _pool.removeInactiveInboundTunnel(id); - } else if (info.getSettings().getExpiration() < now) { - _log.info("It is past the expiration for inactive tunnel " + id + " but not yet the buffer, mark it as no longer ready"); - info.setIsReady(false); - } - } - } + long now = _context.clock().now(); + long expire = now - EXPIRE_BUFFER - 2*Router.CLOCK_FUDGE_FACTOR; + + for (Iterator iter = _pool.getInactiveInboundTunnelIds().iterator(); iter.hasNext(); ) { + TunnelId id = (TunnelId)iter.next(); + TunnelInfo info = _pool.getInactiveInboundTunnel(id); + if ( (info != null) && (info.getSettings() != null) ) { + if (info.getSettings().getExpiration() < expire) { + if (_log.shouldLog(Log.INFO)) + _log.info("Expiring inactive tunnel " + id + " [" + + new Date(info.getSettings().getExpiration()) + "]"); + _pool.removeInactiveInboundTunnel(id); + } else if (info.getSettings().getExpiration() < now) { + _log.info("It is past the expiration for inactive tunnel " + id + + " but not yet the buffer, mark it as no longer ready"); + info.setIsReady(false); + } + } + } } /** @@ -85,22 +93,25 @@ class ClientTunnelPoolExpirationJob extends JobImpl { * */ public void expireActiveTunnels() { - long now = Clock.getInstance().now(); - long expire = now - EXPIRE_BUFFER - 2*Router.CLOCK_FUDGE_FACTOR; - - for (Iterator iter = _pool.getInboundTunnelIds().iterator(); iter.hasNext(); ) { - TunnelId id = (TunnelId)iter.next(); - TunnelInfo info = _pool.getInboundTunnel(id); - if ( (info != null) && (info.getSettings() != null) ) { - if (info.getSettings().getExpiration() < expire) { - _log.info("Expiring active tunnel " + id + " [" + new Date(info.getSettings().getExpiration()) + "]"); - _pool.removeInboundTunnel(id); - } else if (info.getSettings().getExpiration() < now) { - _log.info("It is past the expiration for active tunnel " + id + " but not yet the buffer, mark it as no longer ready"); - info.setIsReady(false); - } - } - } + long now = _context.clock().now(); + long expire = now - EXPIRE_BUFFER - 2*Router.CLOCK_FUDGE_FACTOR; + + for (Iterator iter = _pool.getInboundTunnelIds().iterator(); iter.hasNext(); ) { + TunnelId id = (TunnelId)iter.next(); + TunnelInfo info = _pool.getInboundTunnel(id); + if ( (info != null) && (info.getSettings() != null) ) { + if (info.getSettings().getExpiration() < expire) { + if (_log.shouldLog(Log.INFO)) + _log.info("Expiring active tunnel " + id + " [" + + new Date(info.getSettings().getExpiration()) + "]"); + _pool.removeInboundTunnel(id); + } else if (info.getSettings().getExpiration() < now) { + if (_log.shouldLog(Log.INFO)) + _log.info("It is past the expiration for active tunnel " + id + + " but not yet the buffer, mark it as no longer ready"); + info.setIsReady(false); + } + } + } } - } diff --git a/router/java/src/net/i2p/router/tunnelmanager/ClientTunnelPoolManagerJob.java b/router/java/src/net/i2p/router/tunnelmanager/ClientTunnelPoolManagerJob.java index 358d852ef..54a86f322 100644 --- a/router/java/src/net/i2p/router/tunnelmanager/ClientTunnelPoolManagerJob.java +++ b/router/java/src/net/i2p/router/tunnelmanager/ClientTunnelPoolManagerJob.java @@ -12,6 +12,7 @@ import net.i2p.router.JobQueue; import net.i2p.router.TunnelInfo; import net.i2p.util.Clock; import net.i2p.util.Log; +import net.i2p.router.RouterContext; /** * refill the client tunnel pool as necessary, either from the TunnelPool's free @@ -19,17 +20,20 @@ import net.i2p.util.Log; * */ class ClientTunnelPoolManagerJob extends JobImpl { - private final static Log _log = new Log(ClientTunnelPoolManagerJob.class); + private Log _log; private ClientTunnelPool _clientPool; private TunnelPool _tunnelPool; + private TunnelBuilder _tunnelBuilder; /** check the pool every 30 seconds to make sure it has enough tunnels */ private final static long POOL_CHECK_DELAY = 30*1000; - public ClientTunnelPoolManagerJob(TunnelPool pool, ClientTunnelPool clientPool) { - super(); + public ClientTunnelPoolManagerJob(RouterContext ctx, TunnelPool pool, ClientTunnelPool clientPool) { + super(ctx); + _log = ctx.logManager().getLog(ClientTunnelPoolManagerJob.class); _clientPool = clientPool; _tunnelPool = pool; + _tunnelBuilder = new TunnelBuilder(ctx); } public String getName() { return "Manage Client Tunnel Pool"; } public void runJob() { @@ -39,7 +43,7 @@ class ClientTunnelPoolManagerJob extends JobImpl { return; } - if (!ClientManagerFacade.getInstance().isLocal(_clientPool.getDestination())) { + if (!_context.clientManager().isLocal(_clientPool.getDestination())) { if (_log.shouldLog(Log.INFO)) _log.info("Client " + _clientPool.getDestination().calculateHash() + " is no longer connected, stop the pool"); @@ -62,7 +66,7 @@ class ClientTunnelPoolManagerJob extends JobImpl { * The pool is stopped, so lets see if we should keep doing anything */ private void handleStopped() { - if (ClientManagerFacade.getInstance().isLocal(_clientPool.getDestination())) { + if (_context.clientManager().isLocal(_clientPool.getDestination())) { // it was stopped, but they've reconnected, so boot 'er up again if (_log.shouldLog(Log.INFO)) _log.info("Client " + _clientPool.getDestination().calculateHash().toBase64() @@ -168,7 +172,7 @@ class ClientTunnelPoolManagerJob extends JobImpl { return false; } - long expireAfter = Clock.getInstance().now() + POOL_CHECK_DELAY + _tunnelPool.getTunnelCreationTimeout()*2; + long expireAfter = _context.clock().now() + POOL_CHECK_DELAY + _tunnelPool.getTunnelCreationTimeout()*2; if (info.getSettings().getExpiration() <= expireAfter) { if (_log.shouldLog(Log.DEBUG)) _log.debug("Refusing tunnel " + info.getTunnelId() + " because it is going to expire soon"); @@ -229,7 +233,7 @@ class ClientTunnelPoolManagerJob extends JobImpl { */ private void requestCustomTunnels(int numTunnels) { for (int i = 0; i < numTunnels; i++) { - JobQueue.getInstance().addJob(new RequestCustomTunnelJob()); + _context.jobQueue().addJob(new RequestCustomTunnelJob()); } } @@ -239,11 +243,14 @@ class ClientTunnelPoolManagerJob extends JobImpl { * */ private class RequestCustomTunnelJob extends JobImpl { + public RequestCustomTunnelJob() { + super(ClientTunnelPoolManagerJob.this._context); + } public String getName() { return "Request Custom Client Tunnel"; } public void runJob() { - TunnelInfo tunnelGateway = TunnelBuilder.getInstance().configureInboundTunnel(_clientPool.getDestination(), _clientPool.getClientSettings()); - RequestTunnelJob reqJob = new RequestTunnelJob(_tunnelPool, tunnelGateway, true, _tunnelPool.getTunnelCreationTimeout()); - JobQueue.getInstance().addJob(reqJob); + TunnelInfo tunnelGateway = _tunnelBuilder.configureInboundTunnel(_clientPool.getDestination(), _clientPool.getClientSettings()); + RequestTunnelJob reqJob = new RequestTunnelJob(RequestCustomTunnelJob.this._context, _tunnelPool, tunnelGateway, true, _tunnelPool.getTunnelCreationTimeout()); + RequestCustomTunnelJob.this._context.jobQueue().addJob(reqJob); } } } diff --git a/router/java/src/net/i2p/router/tunnelmanager/HandleTunnelCreateMessageJob.java b/router/java/src/net/i2p/router/tunnelmanager/HandleTunnelCreateMessageJob.java index 27a314bf2..e5b767d7b 100644 --- a/router/java/src/net/i2p/router/tunnelmanager/HandleTunnelCreateMessageJob.java +++ b/router/java/src/net/i2p/router/tunnelmanager/HandleTunnelCreateMessageJob.java @@ -29,9 +29,10 @@ import net.i2p.router.message.BuildTestMessageJob; import net.i2p.router.message.SendReplyMessageJob; import net.i2p.util.Clock; import net.i2p.util.Log; +import net.i2p.router.RouterContext; public class HandleTunnelCreateMessageJob extends JobImpl { - private final static Log _log = new Log(HandleTunnelCreateMessageJob.class); + private Log _log; private TunnelCreateMessage _message; private RouterIdentity _from; private Hash _fromHash; @@ -40,7 +41,10 @@ public class HandleTunnelCreateMessageJob extends JobImpl { private final static long TIMEOUT = 30*1000; // 30 secs to contact a peer that will be our next hop private final static int PRIORITY = 123; - HandleTunnelCreateMessageJob(TunnelCreateMessage receivedMessage, RouterIdentity from, Hash fromHash, SourceRouteBlock replyBlock) { + HandleTunnelCreateMessageJob(RouterContext ctx, TunnelCreateMessage receivedMessage, + RouterIdentity from, Hash fromHash, SourceRouteBlock replyBlock) { + super(ctx); + _log = ctx.logManager().getLog(HandleTunnelCreateMessageJob.class); _message = receivedMessage; _from = from; _fromHash = fromHash; @@ -49,23 +53,23 @@ public class HandleTunnelCreateMessageJob extends JobImpl { public void runJob() { if (_log.shouldLog(Log.DEBUG)) _log.debug("Handling tunnel create"); - TunnelInfo info = new TunnelInfo(); + TunnelInfo info = new TunnelInfo(_context); info.setConfigurationKey(_message.getConfigurationKey()); info.setEncryptionKey(_message.getTunnelKey()); info.setNextHop(_message.getNextRouter()); - TunnelSettings settings = new TunnelSettings(); + TunnelSettings settings = new TunnelSettings(_context); settings.setBytesPerMinuteAverage(_message.getMaxAvgBytesPerMin()); settings.setBytesPerMinutePeak(_message.getMaxPeakBytesPerMin()); settings.setMessagesPerMinuteAverage(_message.getMaxAvgMessagesPerMin()); settings.setMessagesPerMinutePeak(_message.getMaxPeakMessagesPerMin()); - settings.setExpiration(_message.getTunnelDurationSeconds()*1000+Clock.getInstance().now()); + settings.setExpiration(_message.getTunnelDurationSeconds()*1000+_context.clock().now()); settings.setIncludeDummy(_message.getIncludeDummyTraffic()); settings.setReorder(_message.getReorderMessages()); info.setSettings(settings); info.setSigningKey(_message.getVerificationPrivateKey()); - info.setThisHop(Router.getInstance().getRouterInfo().getIdentity().getHash()); + info.setThisHop(_context.routerHash()); info.setTunnelId(_message.getTunnelId()); info.setVerificationKey(_message.getVerificationPublicKey()); @@ -73,22 +77,23 @@ public class HandleTunnelCreateMessageJob extends JobImpl { if (_message.getNextRouter() == null) { if (_log.shouldLog(Log.DEBUG)) _log.debug("We're the endpoint, don't test the \"next\" peer [duh]"); - boolean ok = TunnelManagerFacade.getInstance().joinTunnel(info); + boolean ok = _context.tunnelManager().joinTunnel(info); sendReply(ok); } else { - NetworkDatabaseFacade.getInstance().lookupRouterInfo(info.getNextHop(), new TestJob(info), new JoinJob(info, false), TIMEOUT); + _context.netDb().lookupRouterInfo(info.getNextHop(), new TestJob(info), new JoinJob(info, false), TIMEOUT); } } private class TestJob extends JobImpl { private TunnelInfo _target; public TestJob(TunnelInfo target) { + super(HandleTunnelCreateMessageJob.this._context); _target = target; } public String getName() { return "Run a test for peer reachability"; } public void runJob() { - RouterInfo info = NetworkDatabaseFacade.getInstance().lookupRouterInfoLocally(_target.getNextHop()); + RouterInfo info = TestJob.this._context.netDb().lookupRouterInfoLocally(_target.getNextHop()); if (info == null) { if (_log.shouldLog(Log.ERROR)) _log.error("Error - unable to look up peer " + _target.toBase64() + ", even though we were queued up via onSuccess??"); @@ -96,11 +101,11 @@ public class HandleTunnelCreateMessageJob extends JobImpl { } else { if (_log.shouldLog(Log.INFO)) _log.info("Lookup successful for tested peer " + _target.toBase64() + ", now continue with the test"); - Hash peer = Router.getInstance().getRouterInfo().getIdentity().getHash(); + Hash peer = TestJob.this._context.routerHash(); JoinJob success = new JoinJob(_target, true); JoinJob failure = new JoinJob(_target, false); - BuildTestMessageJob test = new BuildTestMessageJob(info, peer, success, failure, TIMEOUT, PRIORITY); - JobQueue.getInstance().addJob(test); + BuildTestMessageJob test = new BuildTestMessageJob(TestJob.this._context, info, peer, success, failure, TIMEOUT, PRIORITY); + TestJob.this._context.jobQueue().addJob(test); } } } @@ -111,12 +116,12 @@ public class HandleTunnelCreateMessageJob extends JobImpl { _log.debug("Sending reply to a tunnel create of id " + _message.getTunnelId() + " with ok (" + ok + ") to router " + _message.getReplyBlock().getRouter().toBase64()); - MessageHistory.getInstance().receiveTunnelCreate(_message.getTunnelId(), _message.getNextRouter(), - new Date(Clock.getInstance().now() + 1000*_message.getTunnelDurationSeconds()), - ok, _message.getReplyBlock().getRouter()); + _context.messageHistory().receiveTunnelCreate(_message.getTunnelId(), _message.getNextRouter(), + new Date(_context.clock().now() + 1000*_message.getTunnelDurationSeconds()), + ok, _message.getReplyBlock().getRouter()); - TunnelCreateStatusMessage msg = new TunnelCreateStatusMessage(); - msg.setFromHash(Router.getInstance().getRouterInfo().getIdentity().getHash()); + TunnelCreateStatusMessage msg = new TunnelCreateStatusMessage(_context); + msg.setFromHash(_context.routerHash()); msg.setTunnelId(_message.getTunnelId()); if (ok) { msg.setStatus(TunnelCreateStatusMessage.STATUS_SUCCESS); @@ -124,9 +129,9 @@ public class HandleTunnelCreateMessageJob extends JobImpl { // since we don't actually check anything, this is a catch all msg.setStatus(TunnelCreateStatusMessage.STATUS_FAILED_OVERLOADED); } - msg.setMessageExpiration(new Date(Clock.getInstance().now()+60*1000)); - SendReplyMessageJob job = new SendReplyMessageJob(_message.getReplyBlock(), msg, PRIORITY); - JobQueue.getInstance().addJob(job); + msg.setMessageExpiration(new Date(_context.clock().now()+60*1000)); + SendReplyMessageJob job = new SendReplyMessageJob(_context, _message.getReplyBlock(), msg, PRIORITY); + _context.jobQueue().addJob(job); } public String getName() { return "Handle Tunnel Create Message"; } @@ -135,23 +140,24 @@ public class HandleTunnelCreateMessageJob extends JobImpl { private TunnelInfo _info; private boolean _isReachable; public JoinJob(TunnelInfo info, boolean isReachable) { + super(HandleTunnelCreateMessageJob.this._context); _info = info; _isReachable = isReachable; } public void runJob() { if (!_isReachable) { - long before = Clock.getInstance().now(); + long before = JoinJob.this._context.clock().now(); sendReply(false); - long after = Clock.getInstance().now(); + long after = JoinJob.this._context.clock().now(); if (_log.shouldLog(Log.DEBUG)) _log.debug("JoinJob .refuse took " + (after-before) + "ms to refuse " + _info); } else { - long before = Clock.getInstance().now(); - boolean ok = TunnelManagerFacade.getInstance().joinTunnel(_info); - long afterJoin = Clock.getInstance().now(); + long before = JoinJob.this._context.clock().now(); + boolean ok = JoinJob.this._context.tunnelManager().joinTunnel(_info); + long afterJoin = JoinJob.this._context.clock().now(); sendReply(ok); - long after = Clock.getInstance().now(); + long after = JoinJob.this._context.clock().now(); if (_log.shouldLog(Log.DEBUG)) _log.debug("JoinJob .joinTunnel took " + (afterJoin-before) + "ms and sendReply took " + (after-afterJoin) + "ms"); } @@ -160,6 +166,8 @@ public class HandleTunnelCreateMessageJob extends JobImpl { } public void dropped() { - MessageHistory.getInstance().messageProcessingError(_message.getUniqueId(), _message.getClass().getName(), "Dropped due to overload"); + _context.messageHistory().messageProcessingError(_message.getUniqueId(), + _message.getClass().getName(), + "Dropped due to overload"); } } diff --git a/router/java/src/net/i2p/router/tunnelmanager/PoolingTunnelManagerFacade.java b/router/java/src/net/i2p/router/tunnelmanager/PoolingTunnelManagerFacade.java index 357c38fd5..561a5d301 100644 --- a/router/java/src/net/i2p/router/tunnelmanager/PoolingTunnelManagerFacade.java +++ b/router/java/src/net/i2p/router/tunnelmanager/PoolingTunnelManagerFacade.java @@ -17,32 +17,35 @@ import net.i2p.router.TunnelSelectionCriteria; import net.i2p.stat.StatManager; import net.i2p.util.Clock; import net.i2p.util.Log; +import net.i2p.router.RouterContext; /** * Main interface to the pool * */ -public class PoolingTunnelManagerFacade extends TunnelManagerFacade { - private final static Log _log = new Log(PoolingTunnelManagerFacade.class); +public class PoolingTunnelManagerFacade implements TunnelManagerFacade { + private Log _log; private TunnelPool _pool; private TunnelTestManager _testManager; + private RouterContext _context; + private PoolingTunnelSelector _selector; - static { - StatManager.getInstance().createFrequencyStat("tunnel.acceptRequestFrequency", "How often do we accept requests to join a tunnel?", "Tunnels", new long[] { 60*1000l, 60*60*1000l, 24*60*60*1000l }); - StatManager.getInstance().createFrequencyStat("tunnel.rejectRequestFrequency", "How often do we reject requests to join a tunnel?", "Tunnels", new long[] { 60*1000l, 60*60*1000l, 24*60*60*1000l }); - StatManager.getInstance().createRateStat("tunnel.participatingTunnels", "How many tunnels are we participating in?", "Tunnels", new long[] { 5*60*1000l, 60*60*1000l, 24*60*60*1000l }); - } - - public PoolingTunnelManagerFacade() { - super(); - InNetMessagePool.getInstance().registerHandlerJobBuilder(TunnelCreateMessage.MESSAGE_TYPE, new TunnelCreateMessageHandler()); + public PoolingTunnelManagerFacade(RouterContext context) { + if (context == null) throw new IllegalArgumentException("Null routerContext is not supported"); + _context = context; + _log = context.logManager().getLog(PoolingTunnelManagerFacade.class); + _context.statManager().createFrequencyStat("tunnel.acceptRequestFrequency", "How often do we accept requests to join a tunnel?", "Tunnels", new long[] { 60*1000l, 60*60*1000l, 24*60*60*1000l }); + _context.statManager().createFrequencyStat("tunnel.rejectRequestFrequency", "How often do we reject requests to join a tunnel?", "Tunnels", new long[] { 60*1000l, 60*60*1000l, 24*60*60*1000l }); + _context.statManager().createRateStat("tunnel.participatingTunnels", "How many tunnels are we participating in?", "Tunnels", new long[] { 5*60*1000l, 60*60*1000l, 24*60*60*1000l }); + _context.inNetMessagePool().registerHandlerJobBuilder(TunnelCreateMessage.MESSAGE_TYPE, new TunnelCreateMessageHandler(_context)); + _selector = new PoolingTunnelSelector(context); } public void startup() { if (_pool == null) - _pool = new TunnelPool(); + _pool = new TunnelPool(_context); _pool.startup(); - _testManager = new TunnelTestManager(_pool); + _testManager = new TunnelTestManager(_context, _pool); } public void shutdown() { @@ -60,27 +63,27 @@ public class PoolingTunnelManagerFacade extends TunnelManagerFacade { if (info == null) { if (_log.shouldLog(Log.ERROR)) _log.error("Null tunnel", new Exception("Null tunnel")); - StatManager.getInstance().updateFrequency("tunnel.rejectRequestFrequency"); + _context.statManager().updateFrequency("tunnel.rejectRequestFrequency"); return false; } if (info.getSettings() == null) { if (_log.shouldLog(Log.ERROR)) _log.error("Null settings!", new Exception("settings are null")); - StatManager.getInstance().updateFrequency("tunnel.rejectRequestFrequency"); + _context.statManager().updateFrequency("tunnel.rejectRequestFrequency"); return false; } if (info.getSettings().getExpiration() == 0) { if (_log.shouldLog(Log.INFO)) _log.info("No expiration for tunnel " + info.getTunnelId().getTunnelId(), new Exception("No expiration")); - StatManager.getInstance().updateFrequency("tunnel.rejectRequestFrequency"); + _context.statManager().updateFrequency("tunnel.rejectRequestFrequency"); return false; } else { - if (info.getSettings().getExpiration() < Clock.getInstance().now()) { + if (info.getSettings().getExpiration() < _context.clock().now()) { if (_log.shouldLog(Log.WARN)) _log.warn("Already expired - " + new Date(info.getSettings().getExpiration()), new Exception("Already expired")); - StatManager.getInstance().updateFrequency("tunnel.rejectRequestFrequency"); + _context.statManager().updateFrequency("tunnel.rejectRequestFrequency"); return false; } } @@ -89,10 +92,10 @@ public class PoolingTunnelManagerFacade extends TunnelManagerFacade { _log.debug("Joining tunnel: " + info); boolean ok = _pool.addParticipatingTunnel(info); if (!ok) - StatManager.getInstance().updateFrequency("tunnel.rejectRequestFrequency"); + _context.statManager().updateFrequency("tunnel.rejectRequestFrequency"); else - StatManager.getInstance().updateFrequency("tunnel.acceptRequestFrequency"); - StatManager.getInstance().addRateData("tunnel.participatingTunnels", _pool.getParticipatingTunnelCount(), 0); + _context.statManager().updateFrequency("tunnel.acceptRequestFrequency"); + _context.statManager().addRateData("tunnel.participatingTunnels", _pool.getParticipatingTunnelCount(), 0); return ok; } /** @@ -106,13 +109,13 @@ public class PoolingTunnelManagerFacade extends TunnelManagerFacade { * Retrieve a set of tunnels from the existing ones for various purposes */ public List selectOutboundTunnelIds(TunnelSelectionCriteria criteria) { - return PoolingTunnelSelector.selectOutboundTunnelIds(_pool, criteria); + return _selector.selectOutboundTunnelIds(_pool, criteria); } /** * Retrieve a set of tunnels from the existing ones for various purposes */ public List selectInboundTunnelIds(TunnelSelectionCriteria criteria) { - return PoolingTunnelSelector.selectInboundTunnelIds(_pool, criteria); + return _selector.selectInboundTunnelIds(_pool, criteria); } /** @@ -146,8 +149,8 @@ public class PoolingTunnelManagerFacade extends TunnelManagerFacade { info.setIsReady(false); numFailed++; - long lifetime = Clock.getInstance().now() - info.getCreated(); - StatManager.getInstance().addRateData("tunnel.failAfterTime", lifetime, lifetime); + long lifetime = _context.clock().now() - info.getCreated(); + _context.statManager().addRateData("tunnel.failAfterTime", lifetime, lifetime); } } diff --git a/router/java/src/net/i2p/router/tunnelmanager/PoolingTunnelSelector.java b/router/java/src/net/i2p/router/tunnelmanager/PoolingTunnelSelector.java index c64630928..1434386df 100644 --- a/router/java/src/net/i2p/router/tunnelmanager/PoolingTunnelSelector.java +++ b/router/java/src/net/i2p/router/tunnelmanager/PoolingTunnelSelector.java @@ -14,77 +14,84 @@ import net.i2p.router.TunnelSelectionCriteria; import net.i2p.util.Clock; import net.i2p.util.Log; import net.i2p.util.RandomSource; +import net.i2p.router.RouterContext; /** * Implement the tunnel selection algorithms * */ class PoolingTunnelSelector { - private final static Log _log = new Log(PoolingTunnelSelector.class); + private Log _log; + private RouterContext _context; /** don't use a tunnel thats about to expire */ private static long POOL_USE_SAFETY_MARGIN = 10*1000; - public static List selectOutboundTunnelIds(TunnelPool pool, TunnelSelectionCriteria criteria) { - List tunnelIds = new LinkedList(); - - for (int i = pool.getOutboundTunnelCount(); i < criteria.getMinimumTunnelsRequired(); i++) { - if (_log.shouldLog(Log.WARN)) - _log.warn("Building fake tunnels because the outbound tunnels weren't sufficient"); - pool.buildFakeTunnels(); - } - - Set outIds = pool.getOutboundTunnels(); - for (Iterator iter = outIds.iterator(); iter.hasNext(); ) { - TunnelId id = (TunnelId)iter.next(); - TunnelInfo info = pool.getOutboundTunnel(id); - if ( (info != null) && (info.getIsReady()) ) { - tunnelIds.add(id); - } else { - if (info == null) { - if (_log.shouldLog(Log.WARN)) - _log.warn("Outbound tunnel " + id + " was not found?! expire race perhaps?"); - } else { - if (_log.shouldLog(Log.DEBUG)) - _log.debug("Outbound tunnel " + id + " was not ready?! " + new Date(info.getSettings().getExpiration())); - } - } - } - List ordered = randomize(pool, tunnelIds); - List rv = new ArrayList(criteria.getMinimumTunnelsRequired()); - for (Iterator iter = ordered.iterator(); iter.hasNext() && (rv.size() < criteria.getMinimumTunnelsRequired()); ) { - rv.add(iter.next()); - } - _log.info("Selecting outbound tunnelIds [all outbound tunnels: " + outIds.size() + ", tunnelIds ready: " + ordered.size() + ", rv: " + rv + "]"); - return rv; + public PoolingTunnelSelector(RouterContext context) { + _context = context; + _log = context.logManager().getLog(PoolingTunnelSelector.class); } - public static List selectInboundTunnelIds(TunnelPool pool, TunnelSelectionCriteria criteria) { - List tunnels = new LinkedList(); - - for (int i = pool.getFreeTunnelCount(); i < criteria.getMinimumTunnelsRequired(); i++) { - if (_log.shouldLog(Log.WARN)) - _log.warn("Building fake tunnels because the inbound tunnels weren't sufficient"); - pool.buildFakeTunnels(); - } - - for (Iterator iter = pool.getFreeTunnels().iterator(); iter.hasNext(); ) { - TunnelId id = (TunnelId)iter.next(); - TunnelInfo info = pool.getFreeTunnel(id); - if (info == null) continue; - if (info.getIsReady()) { - tunnels.add(id); - } else { - _log.debug("Inbound tunnel " + id + " is not ready?! " + new Date(info.getSettings().getExpiration())); - } - } - - List ordered = randomize(pool, tunnels); - List rv = new ArrayList(criteria.getMinimumTunnelsRequired()); - for (Iterator iter = ordered.iterator(); iter.hasNext() && (rv.size() < criteria.getMinimumTunnelsRequired()); ) { - rv.add(iter.next()); - } - _log.info("Selecting inbound tunnelIds [tunnelIds ready: " + tunnels.size() + ", rv: " + rv + "]"); - return rv; + public List selectOutboundTunnelIds(TunnelPool pool, TunnelSelectionCriteria criteria) { + List tunnelIds = new LinkedList(); + + for (int i = pool.getOutboundTunnelCount(); i < criteria.getMinimumTunnelsRequired(); i++) { + if (_log.shouldLog(Log.WARN)) + _log.warn("Building fake tunnels because the outbound tunnels weren't sufficient"); + pool.buildFakeTunnels(); + } + + Set outIds = pool.getOutboundTunnels(); + for (Iterator iter = outIds.iterator(); iter.hasNext(); ) { + TunnelId id = (TunnelId)iter.next(); + TunnelInfo info = pool.getOutboundTunnel(id); + if ( (info != null) && (info.getIsReady()) ) { + tunnelIds.add(id); + } else { + if (info == null) { + if (_log.shouldLog(Log.WARN)) + _log.warn("Outbound tunnel " + id + " was not found?! expire race perhaps?"); + } else { + if (_log.shouldLog(Log.DEBUG)) + _log.debug("Outbound tunnel " + id + " was not ready?! " + new Date(info.getSettings().getExpiration())); + } + } + } + List ordered = randomize(pool, tunnelIds); + List rv = new ArrayList(criteria.getMinimumTunnelsRequired()); + for (Iterator iter = ordered.iterator(); iter.hasNext() && (rv.size() < criteria.getMinimumTunnelsRequired()); ) { + rv.add(iter.next()); + } + _log.info("Selecting outbound tunnelIds [all outbound tunnels: " + outIds.size() + ", tunnelIds ready: " + ordered.size() + ", rv: " + rv + "]"); + return rv; + } + + public List selectInboundTunnelIds(TunnelPool pool, TunnelSelectionCriteria criteria) { + List tunnels = new LinkedList(); + + for (int i = pool.getFreeTunnelCount(); i < criteria.getMinimumTunnelsRequired(); i++) { + if (_log.shouldLog(Log.WARN)) + _log.warn("Building fake tunnels because the inbound tunnels weren't sufficient"); + pool.buildFakeTunnels(); + } + + for (Iterator iter = pool.getFreeTunnels().iterator(); iter.hasNext(); ) { + TunnelId id = (TunnelId)iter.next(); + TunnelInfo info = pool.getFreeTunnel(id); + if (info == null) continue; + if (info.getIsReady()) { + tunnels.add(id); + } else { + _log.debug("Inbound tunnel " + id + " is not ready?! " + new Date(info.getSettings().getExpiration())); + } + } + + List ordered = randomize(pool, tunnels); + List rv = new ArrayList(criteria.getMinimumTunnelsRequired()); + for (Iterator iter = ordered.iterator(); iter.hasNext() && (rv.size() < criteria.getMinimumTunnelsRequired()); ) { + rv.add(iter.next()); + } + _log.info("Selecting inbound tunnelIds [tunnelIds ready: " + tunnels.size() + ", rv: " + rv + "]"); + return rv; } //// @@ -92,28 +99,28 @@ class PoolingTunnelSelector { //// - private final static List randomize(TunnelPool pool, List tunnelIds) { - List rv = new ArrayList(tunnelIds.size()); - for (Iterator iter = tunnelIds.iterator(); iter.hasNext(); ) { - TunnelId id = (TunnelId)iter.next(); - if (isAlmostExpired(pool, id, POOL_USE_SAFETY_MARGIN)) - continue; - rv.add(id); - } - Collections.shuffle(rv, RandomSource.getInstance()); - return rv; + private List randomize(TunnelPool pool, List tunnelIds) { + List rv = new ArrayList(tunnelIds.size()); + for (Iterator iter = tunnelIds.iterator(); iter.hasNext(); ) { + TunnelId id = (TunnelId)iter.next(); + if (isAlmostExpired(pool, id, POOL_USE_SAFETY_MARGIN)) + continue; + rv.add(id); + } + Collections.shuffle(rv, _context.random()); + return rv; } - private final static boolean isAlmostExpired(TunnelPool pool, TunnelId id, long safetyMargin) { - TunnelInfo info = pool.getTunnelInfo(id); - if (info == null) return true; - if (info.getSettings() == null) return true; - if (info.getSettings().getExpiration() <= 0) return true; - if (info.getSettings().getExpiration() - safetyMargin <= Clock.getInstance().now()) { - _log.debug("Expiration of tunnel " + id.getTunnelId() + " has almost been reached [" + new Date(info.getSettings().getExpiration()) + "]"); - return true; - } else { - return false; - } + private boolean isAlmostExpired(TunnelPool pool, TunnelId id, long safetyMargin) { + TunnelInfo info = pool.getTunnelInfo(id); + if (info == null) return true; + if (info.getSettings() == null) return true; + if (info.getSettings().getExpiration() <= 0) return true; + if (info.getSettings().getExpiration() - safetyMargin <= _context.clock().now()) { + _log.debug("Expiration of tunnel " + id.getTunnelId() + " has almost been reached [" + new Date(info.getSettings().getExpiration()) + "]"); + return true; + } else { + return false; + } } } diff --git a/router/java/src/net/i2p/router/tunnelmanager/RequestInboundTunnelJob.java b/router/java/src/net/i2p/router/tunnelmanager/RequestInboundTunnelJob.java index a78031020..3f4c9c9c8 100644 --- a/router/java/src/net/i2p/router/tunnelmanager/RequestInboundTunnelJob.java +++ b/router/java/src/net/i2p/router/tunnelmanager/RequestInboundTunnelJob.java @@ -4,26 +4,30 @@ import net.i2p.router.JobImpl; import net.i2p.router.JobQueue; import net.i2p.router.TunnelInfo; import net.i2p.util.Log; +import net.i2p.router.RouterContext; class RequestInboundTunnelJob extends JobImpl { - private final static Log _log = new Log(RequestInboundTunnelJob.class); + private Log _log; private TunnelPool _pool; private boolean _useFake; + private TunnelBuilder _builder; - public RequestInboundTunnelJob(TunnelPool pool) { - this(pool, false); + public RequestInboundTunnelJob(RouterContext context, TunnelPool pool) { + this(context, pool, false); } - public RequestInboundTunnelJob(TunnelPool pool, boolean useFake) { - super(); - _pool = pool; - _useFake = useFake; + public RequestInboundTunnelJob(RouterContext context, TunnelPool pool, boolean useFake) { + super(context); + _log = context.logManager().getLog(RequestInboundTunnelJob.class); + _pool = pool; + _useFake = useFake; + _builder = new TunnelBuilder(context); } public String getName() { return "Request Inbound Tunnel"; } public void runJob() { - _log.debug("Client pool settings: " + _pool.getPoolSettings().toString()); - TunnelInfo tunnelGateway = TunnelBuilder.getInstance().configureInboundTunnel(null, _pool.getPoolSettings(), _useFake); - RequestTunnelJob reqJob = new RequestTunnelJob(_pool, tunnelGateway, true, _pool.getTunnelCreationTimeout()); - JobQueue.getInstance().addJob(reqJob); + _log.debug("Client pool settings: " + _pool.getPoolSettings().toString()); + TunnelInfo tunnelGateway = _builder.configureInboundTunnel(null, _pool.getPoolSettings(), _useFake); + RequestTunnelJob reqJob = new RequestTunnelJob(_context, _pool, tunnelGateway, true, _pool.getTunnelCreationTimeout()); + _context.jobQueue().addJob(reqJob); } } diff --git a/router/java/src/net/i2p/router/tunnelmanager/RequestOutboundTunnelJob.java b/router/java/src/net/i2p/router/tunnelmanager/RequestOutboundTunnelJob.java index 0094e0e85..c1e05dff5 100644 --- a/router/java/src/net/i2p/router/tunnelmanager/RequestOutboundTunnelJob.java +++ b/router/java/src/net/i2p/router/tunnelmanager/RequestOutboundTunnelJob.java @@ -3,21 +3,24 @@ package net.i2p.router.tunnelmanager; import net.i2p.router.JobImpl; import net.i2p.router.JobQueue; import net.i2p.router.TunnelInfo; +import net.i2p.router.RouterContext; class RequestOutboundTunnelJob extends JobImpl { private TunnelPool _pool; private boolean _useFake; + private TunnelBuilder _builder; - public RequestOutboundTunnelJob(TunnelPool pool, boolean useFake) { - super(); - _pool = pool; - _useFake = useFake; + public RequestOutboundTunnelJob(RouterContext context, TunnelPool pool, boolean useFake) { + super(context); + _pool = pool; + _useFake = useFake; + _builder = new TunnelBuilder(context); } public String getName() { return "Request Outbound Tunnel"; } public void runJob() { - TunnelInfo tunnelGateway = TunnelBuilder.getInstance().configureOutboundTunnel(_pool.getPoolSettings(), _useFake); - RequestTunnelJob reqJob = new RequestTunnelJob(_pool, tunnelGateway, false, _pool.getTunnelCreationTimeout()); - JobQueue.getInstance().addJob(reqJob); + TunnelInfo tunnelGateway = _builder.configureOutboundTunnel(_pool.getPoolSettings(), _useFake); + RequestTunnelJob reqJob = new RequestTunnelJob(_context, _pool, tunnelGateway, false, _pool.getTunnelCreationTimeout()); + _context.jobQueue().addJob(reqJob); } } diff --git a/router/java/src/net/i2p/router/tunnelmanager/RequestTunnelJob.java b/router/java/src/net/i2p/router/tunnelmanager/RequestTunnelJob.java index 9e4fa9131..dd685cb83 100644 --- a/router/java/src/net/i2p/router/tunnelmanager/RequestTunnelJob.java +++ b/router/java/src/net/i2p/router/tunnelmanager/RequestTunnelJob.java @@ -55,13 +55,14 @@ import net.i2p.stat.StatManager; import net.i2p.util.Clock; import net.i2p.util.Log; import net.i2p.util.RandomSource; +import net.i2p.router.RouterContext; /** * Request the creation of a new tunnel * */ public class RequestTunnelJob extends JobImpl { - private final static Log _log = new Log(RequestTunnelJob.class); + private Log _log; private TunnelPool _pool; private boolean _complete; private long _timeoutMs; @@ -73,125 +74,125 @@ public class RequestTunnelJob extends JobImpl { private final static int PRIORITY = 300; // high since we are creating tunnels for a client - static { - StatManager.getInstance().createFrequencyStat("tunnel.buildFrequency", "How often does the router build a tunnel?", "Tunnels", new long[] { 60*1000l, 60*60*1000l, 24*60*60*1000l }); - StatManager.getInstance().createFrequencyStat("tunnel.buildFailFrequency", "How often does a peer in the tunnel fail to join??", "Tunnels", new long[] { 60*1000l, 60*60*1000l, 24*60*60*1000l }); - } + RequestTunnelJob(RouterContext context, TunnelPool pool, TunnelInfo tunnelGateway, boolean isInbound, long timeoutMs) { + super(context); + _log = context.logManager().getLog(RequestTunnelJob.class); + context.statManager().createFrequencyStat("tunnel.buildFrequency", "How often does the router build a tunnel?", "Tunnels", new long[] { 60*1000l, 60*60*1000l, 24*60*60*1000l }); + context.statManager().createFrequencyStat("tunnel.buildFailFrequency", "How often does a peer in the tunnel fail to join??", "Tunnels", new long[] { 60*1000l, 60*60*1000l, 24*60*60*1000l }); - - RequestTunnelJob(TunnelPool pool, TunnelInfo tunnelGateway, boolean isInbound, long timeoutMs) { - _pool = pool; - _tunnelGateway = tunnelGateway; - _toBeRequested = new ArrayList(); - _timeoutMs = timeoutMs; - _expiration = -1; - _isInbound = isInbound; - _failedTunnelParticipants = new HashSet(); - _complete = false; - - List participants = new ArrayList(); - TunnelInfo cur = _tunnelGateway; - while (cur != null) { - if (_log.shouldLog(Log.DEBUG)) - _log.debug("Tunnel " + cur.getTunnelId() + " includes " + cur.getThisHop().toBase64()); - participants.add(cur); - cur = cur.getNextHopInfo(); - } - if (isInbound) { - if (_log.shouldLog(Log.INFO)) - _log.info("Requesting inbound tunnel " + _tunnelGateway.getTunnelId() + " with " + participants.size() + " participants in it"); - } else { - if (_log.shouldLog(Log.INFO)) - _log.info("Requesting outbound tunnel " + _tunnelGateway.getTunnelId() + " with " + participants.size() + " participants in it"); - } - - // since we request serially, we need to up the timeout serially - // change this once we go parallel - _timeoutMs *= participants.size()+1; - - // work backwards (end point, then the router pointing at the endpoint, then the router pointing at that, etc, until the gateway - _toBeRequested = new ArrayList(participants.size()); - for (int i = participants.size()-1; i >= 0; i--) - _toBeRequested.add(participants.get(i)); + _pool = pool; + _tunnelGateway = tunnelGateway; + _toBeRequested = new ArrayList(); + _timeoutMs = timeoutMs; + _expiration = -1; + _isInbound = isInbound; + _failedTunnelParticipants = new HashSet(); + _complete = false; + + List participants = new ArrayList(); + TunnelInfo cur = _tunnelGateway; + while (cur != null) { + if (_log.shouldLog(Log.DEBUG)) + _log.debug("Tunnel " + cur.getTunnelId() + " includes " + cur.getThisHop().toBase64()); + participants.add(cur); + cur = cur.getNextHopInfo(); + } + if (isInbound) { + if (_log.shouldLog(Log.INFO)) + _log.info("Requesting inbound tunnel " + _tunnelGateway.getTunnelId() + " with " + + participants.size() + " participants in it"); + } else { + if (_log.shouldLog(Log.INFO)) + _log.info("Requesting outbound tunnel " + _tunnelGateway.getTunnelId() + " with " + participants.size() + " participants in it"); + } + + // since we request serially, we need to up the timeout serially + // change this once we go parallel + _timeoutMs *= participants.size()+1; + + // work backwards (end point, then the router pointing at the endpoint, then the router pointing at that, etc, until the gateway + _toBeRequested = new ArrayList(participants.size()); + for (int i = participants.size()-1; i >= 0; i--) + _toBeRequested.add(participants.get(i)); } public String getName() { return "Request Tunnel"; } public void runJob() { - if (_expiration < 0) _expiration = _timeoutMs + Clock.getInstance().now(); - if (Clock.getInstance().now() > _expiration) { - if (_log.shouldLog(Log.WARN)) - _log.warn("Timeout reached building tunnel (timeout = " + _timeoutMs + " expiration = " + new Date(_expiration) + ")"); - fail(); - return; - } - - TunnelInfo peer = null; - synchronized (_toBeRequested) { - if (_toBeRequested.size() > 0) { - _pool.addPendingTunnel(_tunnelGateway); + if (_expiration < 0) _expiration = _timeoutMs + _context.clock().now(); + if (_context.clock().now() > _expiration) { + if (_log.shouldLog(Log.WARN)) + _log.warn("Timeout reached building tunnel (timeout = " + _timeoutMs + " expiration = " + new Date(_expiration) + ")"); + fail(); + return; + } - peer = (TunnelInfo)_toBeRequested.remove(0); - if ( (peer == null) || (peer.getThisHop() == null) ) { - return; - } else { - // jump out of the synchronized block to request - } - } - } - if (peer != null) - requestParticipation(peer); + TunnelInfo peer = null; + synchronized (_toBeRequested) { + if (_toBeRequested.size() > 0) { + _pool.addPendingTunnel(_tunnelGateway); + + peer = (TunnelInfo)_toBeRequested.remove(0); + if ( (peer == null) || (peer.getThisHop() == null) ) { + return; + } else { + // jump out of the synchronized block to request + } + } + } + if (peer != null) + requestParticipation(peer); } private void requestParticipation(TunnelInfo participant) { - // find the info about who we're looking for - RouterInfo target = NetworkDatabaseFacade.getInstance().lookupRouterInfoLocally(participant.getThisHop()); - if (target == null) { - if (_log.shouldLog(Log.ERROR)) - _log.error("Error - no db info known for participant " + participant.getThisHop()); - fail(); - return; - } - - if (target.getIdentity().getHash().equals(Router.getInstance().getRouterInfo().getIdentity().getHash())) { - // short circuit the ok - okLocalParticipation(participant); - return; - } - - // select send method [outbound tunnel or garlic through peers] - TunnelId outboundTunnel = selectOutboundTunnel(); - if (outboundTunnel == null) { - if (_log.shouldLog(Log.WARN)) - _log.warn("No outbound tunnels! unable to request a new tunnel!"); - fail(); - return; - } - - // select reply peer [peer to which SourceRouteReply should be sent, and from which the reply will be forwarded to an inbound tunnel] - RouterInfo replyPeer = selectReplyPeer(participant); - if (replyPeer == null) { - if (_log.shouldLog(Log.WARN)) - _log.warn("No reply peers available! unable to request a new tunnel!"); - fail(); - return; - } - - // select inbound tunnel gateway - TunnelGateway inboundGateway = selectInboundGateway(participant, replyPeer); - if (inboundGateway == null) { - if (_log.shouldLog(Log.ERROR)) - _log.error("Unable to find an inbound gateway"); - fail(); - return; - } - - SessionKey wrappedKey = new SessionKey(); - Set wrappedTags = new HashSet(64); - PublicKey wrappedTo = new PublicKey(); - - RequestState state = new RequestState(wrappedKey, wrappedTags, wrappedTo, participant, inboundGateway, replyPeer, outboundTunnel, target); - Request r = new Request(state); - JobQueue.getInstance().addJob(r); + // find the info about who we're looking for + RouterInfo target = _context.netDb().lookupRouterInfoLocally(participant.getThisHop()); + if (target == null) { + if (_log.shouldLog(Log.ERROR)) + _log.error("Error - no db info known for participant " + participant.getThisHop()); + fail(); + return; + } + + if (target.getIdentity().getHash().equals(_context.routerHash())) { + // short circuit the ok + okLocalParticipation(participant); + return; + } + + // select send method [outbound tunnel or garlic through peers] + TunnelId outboundTunnel = selectOutboundTunnel(); + if (outboundTunnel == null) { + if (_log.shouldLog(Log.WARN)) + _log.warn("No outbound tunnels! unable to request a new tunnel!"); + fail(); + return; + } + + // select reply peer [peer to which SourceRouteReply should be sent, and from which the reply will be forwarded to an inbound tunnel] + RouterInfo replyPeer = selectReplyPeer(participant); + if (replyPeer == null) { + if (_log.shouldLog(Log.WARN)) + _log.warn("No reply peers available! unable to request a new tunnel!"); + fail(); + return; + } + + // select inbound tunnel gateway + TunnelGateway inboundGateway = selectInboundGateway(participant, replyPeer); + if (inboundGateway == null) { + if (_log.shouldLog(Log.ERROR)) + _log.error("Unable to find an inbound gateway"); + fail(); + return; + } + + SessionKey wrappedKey = new SessionKey(); + Set wrappedTags = new HashSet(64); + PublicKey wrappedTo = new PublicKey(); + + RequestState state = new RequestState(wrappedKey, wrappedTags, wrappedTo, participant, inboundGateway, replyPeer, outboundTunnel, target); + Request r = new Request(state); + _context.jobQueue().addJob(r); } /** @@ -202,27 +203,27 @@ public class RequestTunnelJob extends JobImpl { * blocks, the garlic, etc). */ public class Request extends JobImpl { - private RequestState _state; - Request(RequestState state) { - super(); - _state = state; - } - - public void runJob() { - boolean needsMore = _state.doNext(); - if (needsMore) - requeue(0); - else - MessageHistory.getInstance().requestTunnelCreate(_tunnelGateway.getTunnelId(), - _state.getOutboundTunnel(), - _state.getParticipant().getThisHop(), - _state.getParticipant().getNextHop(), - _state.getReplyPeer().getIdentity().getHash(), - _state.getInboundGateway().getTunnelId(), - _state.getInboundGateway().getGateway()); - } - - public String getName() { return "Request Tunnel (partial)"; } + private RequestState _state; + Request(RequestState state) { + super(RequestTunnelJob.this._context); + _state = state; + } + + public void runJob() { + boolean needsMore = _state.doNext(); + if (needsMore) + requeue(0); + else + Request.this._context.messageHistory().requestTunnelCreate(_tunnelGateway.getTunnelId(), + _state.getOutboundTunnel(), + _state.getParticipant().getThisHop(), + _state.getParticipant().getNextHop(), + _state.getReplyPeer().getIdentity().getHash(), + _state.getInboundGateway().getTunnelId(), + _state.getInboundGateway().getGateway()); + } + + public String getName() { return "Request Tunnel (partial)"; } } /** @@ -234,60 +235,60 @@ public class RequestTunnelJob extends JobImpl { * */ private class RequestState { - private SessionKey _wrappedKey; - private Set _wrappedTags; - private PublicKey _wrappedTo; - private TunnelCreateMessage _createMsg; - private DeliveryStatusMessage _statusMsg; - private GarlicMessage _garlicMessage; - private TunnelInfo _participant; - private TunnelGateway _inboundGateway; - private RouterInfo _replyPeer; - private TunnelId _outboundTunnel; - private RouterInfo _target; - - public RequestState(SessionKey wrappedKey, Set wrappedTags, PublicKey wrappedTo, TunnelInfo participant, TunnelGateway inboundGateway, RouterInfo replyPeer, TunnelId outboundTunnel, RouterInfo target) { - _wrappedKey = wrappedKey; - _wrappedTags = wrappedTags; - _wrappedTo = wrappedTo; - _participant = participant; - _inboundGateway = inboundGateway; - _replyPeer = replyPeer; - _outboundTunnel = outboundTunnel; - _target = target; - } - - public TunnelId getOutboundTunnel() { return _outboundTunnel; } - public TunnelInfo getParticipant() { return _participant; } - public RouterInfo getReplyPeer() { return _replyPeer; } - public TunnelGateway getInboundGateway() { return _inboundGateway; } - - public boolean doNext() { - if (_createMsg == null) { - _createMsg = buildTunnelCreate(_participant, _inboundGateway, _replyPeer); - return true; - } else if (_statusMsg == null) { - _statusMsg = buildDeliveryStatusMessage(); - return true; - } else if (_garlicMessage == null) { - _garlicMessage = buildGarlicMessage(_createMsg, _statusMsg, _replyPeer, _inboundGateway, _target, _wrappedKey, _wrappedTags, _wrappedTo); - return true; - } else { - // send the GarlicMessage - if (_log.shouldLog(Log.INFO)) - _log.info("Sending tunnel create to " + _target.getIdentity().getHash().toBase64() + - " with replies through " + _replyPeer.getIdentity().getHash().toBase64() + - " to inbound gateway " + _inboundGateway.getGateway().toBase64() + - " : " + _inboundGateway.getTunnelId().getTunnelId()); - ReplyJob onReply = new Success(_participant, _wrappedKey, _wrappedTags, _wrappedTo); - Job onFail = new Failure(_participant, _replyPeer.getIdentity().getHash()); - MessageSelector selector = new Selector(_participant, _statusMsg.getMessageId()); - SendTunnelMessageJob j = new SendTunnelMessageJob(_garlicMessage, _outboundTunnel, _target.getIdentity().getHash(), - null, null, onReply, onFail, selector, _timeoutMs, PRIORITY); - JobQueue.getInstance().addJob(j); - return false; - } - } + private SessionKey _wrappedKey; + private Set _wrappedTags; + private PublicKey _wrappedTo; + private TunnelCreateMessage _createMsg; + private DeliveryStatusMessage _statusMsg; + private GarlicMessage _garlicMessage; + private TunnelInfo _participant; + private TunnelGateway _inboundGateway; + private RouterInfo _replyPeer; + private TunnelId _outboundTunnel; + private RouterInfo _target; + + public RequestState(SessionKey wrappedKey, Set wrappedTags, PublicKey wrappedTo, TunnelInfo participant, TunnelGateway inboundGateway, RouterInfo replyPeer, TunnelId outboundTunnel, RouterInfo target) { + _wrappedKey = wrappedKey; + _wrappedTags = wrappedTags; + _wrappedTo = wrappedTo; + _participant = participant; + _inboundGateway = inboundGateway; + _replyPeer = replyPeer; + _outboundTunnel = outboundTunnel; + _target = target; + } + + public TunnelId getOutboundTunnel() { return _outboundTunnel; } + public TunnelInfo getParticipant() { return _participant; } + public RouterInfo getReplyPeer() { return _replyPeer; } + public TunnelGateway getInboundGateway() { return _inboundGateway; } + + public boolean doNext() { + if (_createMsg == null) { + _createMsg = buildTunnelCreate(_participant, _inboundGateway, _replyPeer); + return true; + } else if (_statusMsg == null) { + _statusMsg = buildDeliveryStatusMessage(); + return true; + } else if (_garlicMessage == null) { + _garlicMessage = buildGarlicMessage(_createMsg, _statusMsg, _replyPeer, _inboundGateway, _target, _wrappedKey, _wrappedTags, _wrappedTo); + return true; + } else { + // send the GarlicMessage + if (_log.shouldLog(Log.INFO)) + _log.info("Sending tunnel create to " + _target.getIdentity().getHash().toBase64() + + " with replies through " + _replyPeer.getIdentity().getHash().toBase64() + + " to inbound gateway " + _inboundGateway.getGateway().toBase64() + + " : " + _inboundGateway.getTunnelId().getTunnelId()); + ReplyJob onReply = new Success(_participant, _wrappedKey, _wrappedTags, _wrappedTo); + Job onFail = new Failure(_participant, _replyPeer.getIdentity().getHash()); + MessageSelector selector = new Selector(_participant, _statusMsg.getMessageId()); + SendTunnelMessageJob j = new SendTunnelMessageJob(_context, _garlicMessage, _outboundTunnel, _target.getIdentity().getHash(), + null, null, onReply, onFail, selector, _timeoutMs, PRIORITY); + _context.jobQueue().addJob(j); + return false; + } + } } /** @@ -296,9 +297,9 @@ public class RequestTunnelJob extends JobImpl { * */ private void okLocalParticipation(TunnelInfo info) { - if (_log.shouldLog(Log.INFO)) - _log.info("Short circuiting the local join to tunnel " + info.getTunnelId()); - peerSuccess(info); + if (_log.shouldLog(Log.INFO)) + _log.info("Short circuiting the local join to tunnel " + info.getTunnelId()); + peerSuccess(info); } /** @@ -306,20 +307,20 @@ public class RequestTunnelJob extends JobImpl { * */ private TunnelId selectOutboundTunnel() { - TunnelSelectionCriteria crit = new TunnelSelectionCriteria(); - crit.setMaximumTunnelsRequired(1); - crit.setMinimumTunnelsRequired(1); - crit.setAnonymityPriority(50); // arbitrary - crit.setLatencyPriority(50); // arbitrary - crit.setReliabilityPriority(50); // arbitrary - - List tunnelIds = TunnelManagerFacade.getInstance().selectOutboundTunnelIds(crit); - TunnelId id = null; - if (tunnelIds.size() > 0) - id = (TunnelId)tunnelIds.get(0); - if (_log.shouldLog(Log.DEBUG)) - _log.debug("Outbound tunnel selected: " + id); - return id; + TunnelSelectionCriteria crit = new TunnelSelectionCriteria(); + crit.setMaximumTunnelsRequired(1); + crit.setMinimumTunnelsRequired(1); + crit.setAnonymityPriority(50); // arbitrary + crit.setLatencyPriority(50); // arbitrary + crit.setReliabilityPriority(50); // arbitrary + + List tunnelIds = _context.tunnelManager().selectOutboundTunnelIds(crit); + TunnelId id = null; + if (tunnelIds.size() > 0) + id = (TunnelId)tunnelIds.get(0); + if (_log.shouldLog(Log.DEBUG)) + _log.debug("Outbound tunnel selected: " + id); + return id; } /** @@ -329,33 +330,33 @@ public class RequestTunnelJob extends JobImpl { * Currently just a random peer */ private RouterInfo selectReplyPeer(TunnelInfo tunnelParticipant) { - PeerSelectionCriteria criteria = new PeerSelectionCriteria(); - criteria.setMaximumRequired(1); - criteria.setMinimumRequired(1); - criteria.setPurpose(PeerSelectionCriteria.PURPOSE_SOURCE_ROUTE); - List peerHashes = PeerManagerFacade.getInstance().selectPeers(criteria); - - RouterInfo peerInfo = null; - for (int i = 0; (i < peerHashes.size()) && (peerInfo == null); i++) { - Hash peerHash = (Hash)peerHashes.get(i); - peerInfo = NetworkDatabaseFacade.getInstance().lookupRouterInfoLocally(peerHash); - if (peerInfo == null) { - if (_log.shouldLog(Log.DEBUG)) - _log.debug("Selected a peer [" + peerHash + "] we don't have info on locally... trying another"); - } else { - if (_log.shouldLog(Log.DEBUG)) - _log.debug("Peer [" + peerHash.toBase64() + "] is known locally, keep it in the list of replyPeers"); - break; - } - } - - if (peerInfo == null) { - if (_log.shouldLog(Log.WARN)) - _log.warn("No peers know for a reply (out of " + peerHashes.size() + ") - using ourself"); - return Router.getInstance().getRouterInfo(); - } else { - return peerInfo; - } + PeerSelectionCriteria criteria = new PeerSelectionCriteria(); + criteria.setMaximumRequired(1); + criteria.setMinimumRequired(1); + criteria.setPurpose(PeerSelectionCriteria.PURPOSE_SOURCE_ROUTE); + List peerHashes = _context.peerManager().selectPeers(criteria); + + RouterInfo peerInfo = null; + for (int i = 0; (i < peerHashes.size()) && (peerInfo == null); i++) { + Hash peerHash = (Hash)peerHashes.get(i); + peerInfo = _context.netDb().lookupRouterInfoLocally(peerHash); + if (peerInfo == null) { + if (_log.shouldLog(Log.DEBUG)) + _log.debug("Selected a peer [" + peerHash + "] we don't have info on locally... trying another"); + } else { + if (_log.shouldLog(Log.DEBUG)) + _log.debug("Peer [" + peerHash.toBase64() + "] is known locally, keep it in the list of replyPeers"); + break; + } + } + + if (peerInfo == null) { + if (_log.shouldLog(Log.WARN)) + _log.warn("No peers know for a reply (out of " + peerHashes.size() + ") - using ourself"); + return _context.router().getRouterInfo(); + } else { + return peerInfo; + } } /** @@ -364,153 +365,153 @@ public class RequestTunnelJob extends JobImpl { * */ private TunnelGateway selectInboundGateway(TunnelInfo participant, RouterInfo replyPeer) { - TunnelSelectionCriteria criteria = new TunnelSelectionCriteria(); - criteria.setAnonymityPriority(66); - criteria.setReliabilityPriority(66); - criteria.setLatencyPriority(33); - criteria.setMaximumTunnelsRequired(1); - criteria.setMinimumTunnelsRequired(1); - List ids = TunnelManagerFacade.getInstance().selectInboundTunnelIds(criteria); - if (ids.size() <= 0) { - if (_log.shouldLog(Log.ERROR)) - _log.error("No inbound tunnels to receive the tunnel create messages. Argh", new Exception("Tunnels suck. whats up?")); - return null; - } else { - TunnelInfo gateway = null; - TunnelId id = null; - for (int i = 0; i < ids.size(); i++) { - id = (TunnelId)ids.get(i); - gateway = TunnelManagerFacade.getInstance().getTunnelInfo(id); - if (gateway != null) - break; - } - if (gateway != null) { - TunnelGateway gw = new TunnelGateway(id, gateway.getThisHop()); - if (_log.shouldLog(Log.DEBUG)) - _log.debug("Inbound tunnel gateway: " + id + " on router " + gateway.getThisHop()); - return gw; - } else { - if (_log.shouldLog(Log.ERROR)) - _log.error("No gateway found?!", new Exception("No gateway")); - return null; - } - } + TunnelSelectionCriteria criteria = new TunnelSelectionCriteria(); + criteria.setAnonymityPriority(66); + criteria.setReliabilityPriority(66); + criteria.setLatencyPriority(33); + criteria.setMaximumTunnelsRequired(1); + criteria.setMinimumTunnelsRequired(1); + List ids = _context.tunnelManager().selectInboundTunnelIds(criteria); + if (ids.size() <= 0) { + if (_log.shouldLog(Log.ERROR)) + _log.error("No inbound tunnels to receive the tunnel create messages. Argh", new Exception("Tunnels suck. whats up?")); + return null; + } else { + TunnelInfo gateway = null; + TunnelId id = null; + for (int i = 0; i < ids.size(); i++) { + id = (TunnelId)ids.get(i); + gateway = _context.tunnelManager().getTunnelInfo(id); + if (gateway != null) + break; + } + if (gateway != null) { + TunnelGateway gw = new TunnelGateway(id, gateway.getThisHop()); + if (_log.shouldLog(Log.DEBUG)) + _log.debug("Inbound tunnel gateway: " + id + " on router " + gateway.getThisHop()); + return gw; + } else { + if (_log.shouldLog(Log.ERROR)) + _log.error("No gateway found?!", new Exception("No gateway")); + return null; + } + } } /** * Build a TunnelCreateMessage to the participant */ private TunnelCreateMessage buildTunnelCreate(TunnelInfo participant, TunnelGateway replyGateway, RouterInfo replyPeer) { - TunnelCreateMessage msg = new TunnelCreateMessage(); - msg.setCertificate(new Certificate(Certificate.CERTIFICATE_TYPE_NULL, null)); - msg.setConfigurationKey(participant.getConfigurationKey()); - msg.setIncludeDummyTraffic(participant.getSettings().getIncludeDummy()); - msg.setMaxAvgBytesPerMin(participant.getSettings().getBytesPerMinuteAverage()); - msg.setMaxAvgMessagesPerMin(participant.getSettings().getMessagesPerMinuteAverage()); - msg.setMaxPeakBytesPerMin(participant.getSettings().getBytesPerMinutePeak()); - msg.setMaxPeakMessagesPerMin(participant.getSettings().getMessagesPerMinutePeak()); - msg.setNextRouter(participant.getNextHop()); - if (participant.getNextHop() == null) - msg.setParticipantType(TunnelCreateMessage.PARTICIPANT_TYPE_ENDPOINT); - else if (participant.getSigningKey() != null) - msg.setParticipantType(TunnelCreateMessage.PARTICIPANT_TYPE_GATEWAY); - else - msg.setParticipantType(TunnelCreateMessage.PARTICIPANT_TYPE_OTHER); - msg.setReorderMessages(participant.getSettings().getReorder()); - - SourceRouteBlock replyBlock = buildReplyBlock(replyGateway, replyPeer); - if (replyBlock == null) - return null; - - msg.setReplyBlock(replyBlock); - long duration = participant.getSettings().getExpiration() - Clock.getInstance().now(); - if (duration == 0) duration = 1; - msg.setTunnelDurationSeconds(duration/1000); - msg.setTunnelId(participant.getTunnelId()); - msg.setTunnelKey(participant.getEncryptionKey()); - msg.setVerificationPrivateKey(participant.getSigningKey()); - msg.setVerificationPublicKey(participant.getVerificationKey()); - - return msg; + TunnelCreateMessage msg = new TunnelCreateMessage(_context); + msg.setCertificate(new Certificate(Certificate.CERTIFICATE_TYPE_NULL, null)); + msg.setConfigurationKey(participant.getConfigurationKey()); + msg.setIncludeDummyTraffic(participant.getSettings().getIncludeDummy()); + msg.setMaxAvgBytesPerMin(participant.getSettings().getBytesPerMinuteAverage()); + msg.setMaxAvgMessagesPerMin(participant.getSettings().getMessagesPerMinuteAverage()); + msg.setMaxPeakBytesPerMin(participant.getSettings().getBytesPerMinutePeak()); + msg.setMaxPeakMessagesPerMin(participant.getSettings().getMessagesPerMinutePeak()); + msg.setNextRouter(participant.getNextHop()); + if (participant.getNextHop() == null) + msg.setParticipantType(TunnelCreateMessage.PARTICIPANT_TYPE_ENDPOINT); + else if (participant.getSigningKey() != null) + msg.setParticipantType(TunnelCreateMessage.PARTICIPANT_TYPE_GATEWAY); + else + msg.setParticipantType(TunnelCreateMessage.PARTICIPANT_TYPE_OTHER); + msg.setReorderMessages(participant.getSettings().getReorder()); + + SourceRouteBlock replyBlock = buildReplyBlock(replyGateway, replyPeer); + if (replyBlock == null) + return null; + + msg.setReplyBlock(replyBlock); + long duration = participant.getSettings().getExpiration() - _context.clock().now(); + if (duration == 0) duration = 1; + msg.setTunnelDurationSeconds(duration/1000); + msg.setTunnelId(participant.getTunnelId()); + msg.setTunnelKey(participant.getEncryptionKey()); + msg.setVerificationPrivateKey(participant.getSigningKey()); + msg.setVerificationPublicKey(participant.getVerificationKey()); + + return msg; } /** - * Build a source route block directing the reply through the gateway by means of the + * Build a source route block directing the reply through the gateway by means of the * replyPeer * */ private SourceRouteBlock buildReplyBlock(TunnelGateway gateway, RouterInfo replyPeer) { - if (replyPeer == null) { - if (_log.shouldLog(Log.ERROR)) - _log.error("No peer specified for reply!"); - return null; - } - - SessionKey replySessionKey = KeyGenerator.getInstance().generateSessionKey(); - SessionTag tag = new SessionTag(true); - Set tags = new HashSet(); - tags.add(tag); - // make it so we'll read the session tag correctly and use the right session key - SessionKeyManager.getInstance().tagsReceived(replySessionKey, tags); - - PublicKey pk = replyPeer.getIdentity().getPublicKey(); - - DeliveryInstructions instructions = new DeliveryInstructions(); - instructions.setDelayRequested(false); - instructions.setDelaySeconds(0); - instructions.setDeliveryMode(DeliveryInstructions.DELIVERY_MODE_TUNNEL); - instructions.setDestination(null); - instructions.setEncrypted(false); - instructions.setEncryptionKey(null); - instructions.setRouter(gateway.getGateway()); - instructions.setTunnelId(gateway.getTunnelId()); - - long replyId = RandomSource.getInstance().nextInt(Integer.MAX_VALUE); - - Certificate replyCert = new Certificate(Certificate.CERTIFICATE_TYPE_NULL, null); - - long expiration = _expiration; - if (_log.shouldLog(Log.DEBUG)) - _log.debug("Setting the expiration on the reply block to " + (new Date(expiration))); - SourceRouteBlock block = new SourceRouteBlock(); - try { - long begin = Clock.getInstance().now(); - block.setData(instructions, replyId, replyCert, expiration, pk); - long end = Clock.getInstance().now(); - if ( (end - begin) > 1000) { - if (_log.shouldLog(Log.WARN)) - _log.warn("Took too long (" + (end-begin) + "ms) to build source route block"); - } else { - if (_log.shouldLog(Log.DEBUG)) - _log.debug("did NOT take long (" + (end-begin) + "ms) to build source route block!"); - } - } catch (DataFormatException dfe) { - if (_log.shouldLog(Log.ERROR)) - _log.error("Error building the reply block", dfe); - return null; - } - - block.setRouter(replyPeer.getIdentity().getHash()); - block.setKey(replySessionKey); - block.setTag(tag); - - return block; + if (replyPeer == null) { + if (_log.shouldLog(Log.ERROR)) + _log.error("No peer specified for reply!"); + return null; + } + + SessionKey replySessionKey = _context.keyGenerator().generateSessionKey(); + SessionTag tag = new SessionTag(true); + Set tags = new HashSet(); + tags.add(tag); + // make it so we'll read the session tag correctly and use the right session key + _context.sessionKeyManager().tagsReceived(replySessionKey, tags); + + PublicKey pk = replyPeer.getIdentity().getPublicKey(); + + DeliveryInstructions instructions = new DeliveryInstructions(); + instructions.setDelayRequested(false); + instructions.setDelaySeconds(0); + instructions.setDeliveryMode(DeliveryInstructions.DELIVERY_MODE_TUNNEL); + instructions.setDestination(null); + instructions.setEncrypted(false); + instructions.setEncryptionKey(null); + instructions.setRouter(gateway.getGateway()); + instructions.setTunnelId(gateway.getTunnelId()); + + long replyId = _context.random().nextInt(Integer.MAX_VALUE); + + Certificate replyCert = new Certificate(Certificate.CERTIFICATE_TYPE_NULL, null); + + long expiration = _expiration; + if (_log.shouldLog(Log.DEBUG)) + _log.debug("Setting the expiration on the reply block to " + (new Date(expiration))); + SourceRouteBlock block = new SourceRouteBlock(); + try { + long begin = _context.clock().now(); + block.setData(_context, instructions, replyId, replyCert, expiration, pk); + long end = _context.clock().now(); + if ( (end - begin) > 1000) { + if (_log.shouldLog(Log.WARN)) + _log.warn("Took too long (" + (end-begin) + "ms) to build source route block"); + } else { + if (_log.shouldLog(Log.DEBUG)) + _log.debug("did NOT take long (" + (end-begin) + "ms) to build source route block!"); + } + } catch (DataFormatException dfe) { + if (_log.shouldLog(Log.ERROR)) + _log.error("Error building the reply block", dfe); + return null; + } + + block.setRouter(replyPeer.getIdentity().getHash()); + block.setKey(replySessionKey); + block.setTag(tag); + + return block; } /** - * Create a message containing a random id to check for after garlic routing + * Create a message containing a random id to check for after garlic routing * it out so that we know the other message in the garlic has been received * */ private DeliveryStatusMessage buildDeliveryStatusMessage() { - DeliveryStatusMessage msg = new DeliveryStatusMessage(); - msg.setArrival(new Date(Clock.getInstance().now())); - msg.setMessageId(RandomSource.getInstance().nextInt(Integer.MAX_VALUE)); - Date exp = new Date(_expiration); - if (_log.shouldLog(Log.DEBUG)) - _log.debug("Setting the expiration on the delivery status message to " + exp); - msg.setMessageExpiration(exp); - return msg; + DeliveryStatusMessage msg = new DeliveryStatusMessage(_context); + msg.setArrival(new Date(_context.clock().now())); + msg.setMessageId(_context.random().nextInt(Integer.MAX_VALUE)); + Date exp = new Date(_expiration); + if (_log.shouldLog(Log.DEBUG)) + _log.debug("Setting the expiration on the delivery status message to " + exp); + msg.setMessageExpiration(exp); + return msg; } @@ -521,342 +522,344 @@ public class RequestTunnelJob extends JobImpl { * */ private GarlicMessage buildGarlicMessage(I2NPMessage data, I2NPMessage status, RouterInfo replyPeer, TunnelGateway replyTunnel, RouterInfo target, SessionKey wrappedKey, Set wrappedTags, PublicKey wrappedTo) { - GarlicConfig config = buildGarlicConfig(data, status, replyPeer, replyTunnel, target); - - PublicKey rcptKey = config.getRecipientPublicKey(); - if (rcptKey == null) { - if (config.getRecipient() == null) { - throw new IllegalArgumentException("Null recipient specified"); - } else if (config.getRecipient().getIdentity() == null) { - throw new IllegalArgumentException("Null recipient.identity specified"); - } else if (config.getRecipient().getIdentity().getPublicKey() == null) { - throw new IllegalArgumentException("Null recipient.identity.publicKey specified"); - } else - rcptKey = config.getRecipient().getIdentity().getPublicKey(); - } - - if (wrappedTo != null) - wrappedTo.setData(rcptKey.getData()); - - long start = Clock.getInstance().now(); - GarlicMessage message = GarlicMessageBuilder.buildMessage(config, wrappedKey, wrappedTags); - long end = Clock.getInstance().now(); - if ( (end - start) > 1000) { - if (_log.shouldLog(Log.WARN)) - _log.warn("Took more than a second (" + (end-start) + "ms) to create the garlic for the tunnel"); - } else { - if (_log.shouldLog(Log.DEBUG)) - _log.debug("Took LESS than a second (" + (end-start) + "ms) to create the garlic for the tunnel!"); - } - return message; + GarlicConfig config = buildGarlicConfig(data, status, replyPeer, replyTunnel, target); + + PublicKey rcptKey = config.getRecipientPublicKey(); + if (rcptKey == null) { + if (config.getRecipient() == null) { + throw new IllegalArgumentException("Null recipient specified"); + } else if (config.getRecipient().getIdentity() == null) { + throw new IllegalArgumentException("Null recipient.identity specified"); + } else if (config.getRecipient().getIdentity().getPublicKey() == null) { + throw new IllegalArgumentException("Null recipient.identity.publicKey specified"); + } else + rcptKey = config.getRecipient().getIdentity().getPublicKey(); + } + + if (wrappedTo != null) + wrappedTo.setData(rcptKey.getData()); + + long start = _context.clock().now(); + GarlicMessage message = GarlicMessageBuilder.buildMessage(_context, config, wrappedKey, wrappedTags); + long end = _context.clock().now(); + if ( (end - start) > 1000) { + if (_log.shouldLog(Log.WARN)) + _log.warn("Took more than a second (" + (end-start) + "ms) to create the garlic for the tunnel"); + } else { + if (_log.shouldLog(Log.DEBUG)) + _log.debug("Took LESS than a second (" + (end-start) + "ms) to create the garlic for the tunnel!"); + } + return message; } - private GarlicConfig buildGarlicConfig(I2NPMessage data, I2NPMessage status, RouterInfo replyPeer, TunnelGateway replyTunnel, RouterInfo target) { - GarlicConfig config = new GarlicConfig(); - - PayloadGarlicConfig dataClove = buildDataClove(data, target, _expiration); - config.addClove(dataClove); - PayloadGarlicConfig ackClove = buildAckClove(status, replyPeer, replyTunnel, _expiration); - config.addClove(ackClove); - - DeliveryInstructions instructions = new DeliveryInstructions(); - instructions.setDeliveryMode(DeliveryInstructions.DELIVERY_MODE_ROUTER); - instructions.setDelayRequested(false); - instructions.setDelaySeconds(0); - instructions.setEncrypted(false); - instructions.setEncryptionKey(null); - instructions.setRouter(target.getIdentity().getHash()); - instructions.setTunnelId(null); - - _log.info("Setting the expiration on the garlic config to " + (new Date(_expiration))); - - config.setCertificate(new Certificate(Certificate.CERTIFICATE_TYPE_NULL, null)); - config.setDeliveryInstructions(instructions); - config.setId(RandomSource.getInstance().nextInt(Integer.MAX_VALUE)); - config.setExpiration(_expiration); - config.setRecipientPublicKey(target.getIdentity().getPublicKey()); - config.setRequestAck(false); - - return config; + private GarlicConfig buildGarlicConfig(I2NPMessage data, I2NPMessage status, RouterInfo replyPeer, TunnelGateway replyTunnel, RouterInfo target) { + GarlicConfig config = new GarlicConfig(); + + PayloadGarlicConfig dataClove = buildDataClove(data, target, _expiration); + config.addClove(dataClove); + PayloadGarlicConfig ackClove = buildAckClove(status, replyPeer, replyTunnel, _expiration); + config.addClove(ackClove); + + DeliveryInstructions instructions = new DeliveryInstructions(); + instructions.setDeliveryMode(DeliveryInstructions.DELIVERY_MODE_ROUTER); + instructions.setDelayRequested(false); + instructions.setDelaySeconds(0); + instructions.setEncrypted(false); + instructions.setEncryptionKey(null); + instructions.setRouter(target.getIdentity().getHash()); + instructions.setTunnelId(null); + + _log.info("Setting the expiration on the garlic config to " + (new Date(_expiration))); + + config.setCertificate(new Certificate(Certificate.CERTIFICATE_TYPE_NULL, null)); + config.setDeliveryInstructions(instructions); + config.setId(_context.random().nextInt(Integer.MAX_VALUE)); + config.setExpiration(_expiration); + config.setRecipientPublicKey(target.getIdentity().getPublicKey()); + config.setRequestAck(false); + + return config; } /** * Build a clove that sends a DeliveryStatusMessage to us */ private PayloadGarlicConfig buildAckClove(I2NPMessage ackMsg, RouterInfo replyPeer, TunnelGateway replyTunnel, long expiration) { - PayloadGarlicConfig ackClove = new PayloadGarlicConfig(); - - Hash replyToTunnelRouter = replyTunnel.getGateway(); // inbound tunnel gateway - TunnelId replyToTunnelId = replyTunnel.getTunnelId(); // tunnel id on that gateway - - DeliveryInstructions ackInstructions = new DeliveryInstructions(); - ackInstructions.setDeliveryMode(DeliveryInstructions.DELIVERY_MODE_TUNNEL); - ackInstructions.setRouter(replyToTunnelRouter); - ackInstructions.setTunnelId(replyToTunnelId); - ackInstructions.setDelayRequested(false); - ackInstructions.setDelaySeconds(0); - ackInstructions.setEncrypted(false); - - ackClove.setCertificate(new Certificate(Certificate.CERTIFICATE_TYPE_NULL, null)); - ackClove.setDeliveryInstructions(ackInstructions); - ackClove.setExpiration(expiration); - ackClove.setId(RandomSource.getInstance().nextInt(Integer.MAX_VALUE)); - ackClove.setPayload(ackMsg); - ackClove.setRecipient(replyPeer); - ackClove.setRequestAck(false); - - return ackClove; - } + PayloadGarlicConfig ackClove = new PayloadGarlicConfig(); + Hash replyToTunnelRouter = replyTunnel.getGateway(); // inbound tunnel gateway + TunnelId replyToTunnelId = replyTunnel.getTunnelId(); // tunnel id on that gateway + + DeliveryInstructions ackInstructions = new DeliveryInstructions(); + ackInstructions.setDeliveryMode(DeliveryInstructions.DELIVERY_MODE_TUNNEL); + ackInstructions.setRouter(replyToTunnelRouter); + ackInstructions.setTunnelId(replyToTunnelId); + ackInstructions.setDelayRequested(false); + ackInstructions.setDelaySeconds(0); + ackInstructions.setEncrypted(false); + + ackClove.setCertificate(new Certificate(Certificate.CERTIFICATE_TYPE_NULL, null)); + ackClove.setDeliveryInstructions(ackInstructions); + ackClove.setExpiration(expiration); + ackClove.setId(_context.random().nextInt(Integer.MAX_VALUE)); + ackClove.setPayload(ackMsg); + ackClove.setRecipient(replyPeer); + ackClove.setRequestAck(false); + + return ackClove; + } + /** * Build a clove that sends the data to the target (which is local) */ private PayloadGarlicConfig buildDataClove(I2NPMessage data, RouterInfo target, long expiration) { - PayloadGarlicConfig clove = new PayloadGarlicConfig(); - - DeliveryInstructions instructions = new DeliveryInstructions(); - instructions.setDeliveryMode(DeliveryInstructions.DELIVERY_MODE_LOCAL); - instructions.setRouter(target.getIdentity().getHash()); - instructions.setTunnelId(null); - instructions.setDelayRequested(false); - instructions.setDelaySeconds(0); - instructions.setEncrypted(false); - - clove.setCertificate(new Certificate(Certificate.CERTIFICATE_TYPE_NULL, null)); - clove.setDeliveryInstructions(instructions); - clove.setExpiration(expiration); - clove.setId(RandomSource.getInstance().nextInt(Integer.MAX_VALUE)); - clove.setPayload(data); - clove.setRecipientPublicKey(null); - clove.setRequestAck(false); - - return clove; + PayloadGarlicConfig clove = new PayloadGarlicConfig(); + + DeliveryInstructions instructions = new DeliveryInstructions(); + instructions.setDeliveryMode(DeliveryInstructions.DELIVERY_MODE_LOCAL); + instructions.setRouter(target.getIdentity().getHash()); + instructions.setTunnelId(null); + instructions.setDelayRequested(false); + instructions.setDelaySeconds(0); + instructions.setEncrypted(false); + + clove.setCertificate(new Certificate(Certificate.CERTIFICATE_TYPE_NULL, null)); + clove.setDeliveryInstructions(instructions); + clove.setExpiration(expiration); + clove.setId(_context.random().nextInt(Integer.MAX_VALUE)); + clove.setPayload(data); + clove.setRecipientPublicKey(null); + clove.setRequestAck(false); + + return clove; } private void fail() { - if (_complete) { - if (_log.shouldLog(Log.WARN)) - _log.warn("Build tunnel failed via " + _tunnelGateway.getThisHop().toBase64() + ", but we've already completed, so fuck off: " + _tunnelGateway, new Exception("Fail aborted")); - } else { - if (_log.shouldLog(Log.WARN)) - _log.warn("Build tunnel " + _tunnelGateway.getTunnelId().getTunnelId() + " with gateway " + _tunnelGateway.getThisHop().toBase64() + " FAILED: " + _failedTunnelParticipants + " - " + _tunnelGateway, new Exception("Why did we fail building?")); - synchronized (_toBeRequested) { - _toBeRequested.clear(); - } - synchronized (_failedTunnelParticipants) { - _failedTunnelParticipants.clear(); - } - _complete = true; - } - } + if (_complete) { + if (_log.shouldLog(Log.WARN)) + _log.warn("Build tunnel failed via " + _tunnelGateway.getThisHop().toBase64() + ", but we've already completed, so fuck off: " + _tunnelGateway, new Exception("Fail aborted")); + } else { + if (_log.shouldLog(Log.WARN)) + _log.warn("Build tunnel " + _tunnelGateway.getTunnelId().getTunnelId() + " with gateway " + _tunnelGateway.getThisHop().toBase64() + " FAILED: " + _failedTunnelParticipants + " - " + _tunnelGateway, new Exception("Why did we fail building?")); + synchronized (_toBeRequested) { + _toBeRequested.clear(); + } + synchronized (_failedTunnelParticipants) { + _failedTunnelParticipants.clear(); + } + _complete = true; + } + } private void peerSuccess(TunnelInfo peer) { - int numLeft = 0; - synchronized (_toBeRequested) { - numLeft = _toBeRequested.size(); - } - if (numLeft <= 0) { - if (_log.shouldLog(Log.INFO)) - _log.info("Peer (" + peer.getThisHop().toBase64() + ") successful: mark the tunnel as completely ready [inbound? " + _isInbound + "]"); - _complete = true; - if (_isInbound) - _pool.addFreeTunnel(_tunnelGateway); - else - _pool.addOutboundTunnel(_tunnelGateway); - _tunnelGateway.setIsReady(true); - StatManager.getInstance().updateFrequency("tunnel.buildFrequency"); - } else { - if (_log.shouldLog(Log.DEBUG)) { - StringBuffer buf = new StringBuffer(128); - buf.append("Hop to ").append(peer.getThisHop().toBase64()).append(" successful for tunnel ").append(peer.getTunnelId().getTunnelId()); - buf.append(", but ").append(numLeft).append(" are pending"); - _log.debug(buf.toString()); - } - JobQueue.getInstance().addJob(this); - } + int numLeft = 0; + synchronized (_toBeRequested) { + numLeft = _toBeRequested.size(); + } + if (numLeft <= 0) { + if (_log.shouldLog(Log.INFO)) + _log.info("Peer (" + peer.getThisHop().toBase64() + ") successful: mark the tunnel as completely ready [inbound? " + _isInbound + "]"); + _complete = true; + if (_isInbound) + _pool.addFreeTunnel(_tunnelGateway); + else + _pool.addOutboundTunnel(_tunnelGateway); + _tunnelGateway.setIsReady(true); + _context.statManager().updateFrequency("tunnel.buildFrequency"); + } else { + if (_log.shouldLog(Log.DEBUG)) { + StringBuffer buf = new StringBuffer(128); + buf.append("Hop to ").append(peer.getThisHop().toBase64()).append(" successful for tunnel ").append(peer.getTunnelId().getTunnelId()); + buf.append(", but ").append(numLeft).append(" are pending"); + _log.debug(buf.toString()); + } + _context.jobQueue().addJob(this); + } } public void dropped() { - _pool.buildFakeTunnels(); - if (_log.shouldLog(Log.WARN)) - _log.warn("Dropping request to create a new tunnel, so we may have manually created a new fake inbound and a new fake outbound, just in case we needed that..."); + _pool.buildFakeTunnels(); + if (_log.shouldLog(Log.WARN)) + _log.warn("Dropping request to create a new tunnel, so we may have manually created a new fake inbound and a new fake outbound, just in case we needed that..."); } - - + + private class Success extends JobImpl implements ReplyJob { - private TunnelInfo _tunnel; - private List _messages; - private boolean _successCompleted; - private SessionKey _wrappedKey; - private Set _wrappedTags; - private PublicKey _wrappedTo; - private long _started; - - public Success(TunnelInfo tunnel, SessionKey wrappedKey, Set wrappedTags, PublicKey wrappedTo) { - _tunnel = tunnel; - _messages = new LinkedList(); - _successCompleted = false; - _wrappedKey = wrappedKey; - _wrappedTags = wrappedTags; - _wrappedTo = wrappedTo; - _started = Clock.getInstance().now(); - } - - public String getName() { return "Create Tunnel Status Received"; } - public void runJob() { - List toProc = null; - synchronized (_messages) { - if (_log.shouldLog(Log.DEBUG)) - _log.debug("# messages received for successs: " + _messages.size()); - toProc = new ArrayList(_messages); - _messages.clear(); - } - - long responseTime = Clock.getInstance().now() - _started; - for (Iterator iter = toProc.iterator(); iter.hasNext(); ) { - I2NPMessage msg = (I2NPMessage)iter.next(); - process(msg, responseTime); - } - } - - private void process(I2NPMessage message, long responseTime) { - if (_log.shouldLog(Log.DEBUG)) - _log.debug("Running success status job (tunnel = " + _tunnel + " msg = " + message + ")"); - if (message.getType() == DeliveryStatusMessage.MESSAGE_TYPE) { - if (_log.shouldLog(Log.INFO)) - _log.info("Tunnel creation message acknowledged for tunnel " + _tunnel.getTunnelId() + " at router " + _tunnel.getThisHop().toBase64()); - } else { - TunnelCreateStatusMessage msg = (TunnelCreateStatusMessage)message; - if (_successCompleted) { - _log.info("Already completed in the Success task [skipping " + msg.getStatus() + "]"); - return; - } - switch (msg.getStatus()) { - case TunnelCreateStatusMessage.STATUS_FAILED_CERTIFICATE: - case TunnelCreateStatusMessage.STATUS_FAILED_DELETED: - case TunnelCreateStatusMessage.STATUS_FAILED_DUPLICATE_ID: - case TunnelCreateStatusMessage.STATUS_FAILED_OVERLOADED: - if (_log.shouldLog(Log.WARN)) - _log.warn("Tunnel creation failed for tunnel " + _tunnel.getTunnelId() + " at router " + _tunnel.getThisHop().toBase64() + " with status " + msg.getStatus()); - ProfileManager.getInstance().tunnelRejected(_tunnel.getThisHop(), responseTime); - MessageHistory.getInstance().tunnelRejected(_tunnel.getThisHop(), _tunnel.getTunnelId(), null, "refused"); - fail(); - _successCompleted = true; - break; - case TunnelCreateStatusMessage.STATUS_SUCCESS: - if (_log.shouldLog(Log.DEBUG)) - _log.debug("Tunnel creation succeeded for tunnel " + _tunnel.getTunnelId() + " at router " + _tunnel.getThisHop().toBase64()); - - if ( (_wrappedKey != null) && (_wrappedKey.getData() != null) && (_wrappedTags != null) && (_wrappedTags.size() > 0) && (_wrappedTo != null) ) { - SessionKeyManager.getInstance().tagsDelivered(_wrappedTo, _wrappedKey, _wrappedTags); - if (_log.shouldLog(Log.INFO)) - _log.info("Delivered tags successfully to " + _tunnel.getThisHop().toBase64() + "! # tags: " + _wrappedTags.size()); - } - - _tunnel.setIsReady(true); - ProfileManager.getInstance().tunnelJoined(_tunnel.getThisHop(), responseTime); - peerSuccess(_tunnel); - _successCompleted = true; - break; - } - } - } - - public void setMessage(I2NPMessage message) { - synchronized (_messages) { - _messages.add(message); - if (_log.shouldLog(Log.DEBUG)) - _log.debug("Reply message " + _messages.size() + " received " + message.getClass().getName(), new Exception("Received from")); - } - } + private TunnelInfo _tunnel; + private List _messages; + private boolean _successCompleted; + private SessionKey _wrappedKey; + private Set _wrappedTags; + private PublicKey _wrappedTo; + private long _started; + + public Success(TunnelInfo tunnel, SessionKey wrappedKey, Set wrappedTags, PublicKey wrappedTo) { + super(RequestTunnelJob.this._context); + _tunnel = tunnel; + _messages = new LinkedList(); + _successCompleted = false; + _wrappedKey = wrappedKey; + _wrappedTags = wrappedTags; + _wrappedTo = wrappedTo; + _started = _context.clock().now(); + } + + public String getName() { return "Create Tunnel Status Received"; } + public void runJob() { + List toProc = null; + synchronized (_messages) { + if (_log.shouldLog(Log.DEBUG)) + _log.debug("# messages received for successs: " + _messages.size()); + toProc = new ArrayList(_messages); + _messages.clear(); + } + + long responseTime = _context.clock().now() - _started; + for (Iterator iter = toProc.iterator(); iter.hasNext(); ) { + I2NPMessage msg = (I2NPMessage)iter.next(); + process(msg, responseTime); + } + } + + private void process(I2NPMessage message, long responseTime) { + if (_log.shouldLog(Log.DEBUG)) + _log.debug("Running success status job (tunnel = " + _tunnel + " msg = " + message + ")"); + if (message.getType() == DeliveryStatusMessage.MESSAGE_TYPE) { + if (_log.shouldLog(Log.INFO)) + _log.info("Tunnel creation message acknowledged for tunnel " + _tunnel.getTunnelId() + " at router " + _tunnel.getThisHop().toBase64()); + } else { + TunnelCreateStatusMessage msg = (TunnelCreateStatusMessage)message; + if (_successCompleted) { + _log.info("Already completed in the Success task [skipping " + msg.getStatus() + "]"); + return; + } + switch (msg.getStatus()) { + case TunnelCreateStatusMessage.STATUS_FAILED_CERTIFICATE: + case TunnelCreateStatusMessage.STATUS_FAILED_DELETED: + case TunnelCreateStatusMessage.STATUS_FAILED_DUPLICATE_ID: + case TunnelCreateStatusMessage.STATUS_FAILED_OVERLOADED: + if (_log.shouldLog(Log.WARN)) + _log.warn("Tunnel creation failed for tunnel " + _tunnel.getTunnelId() + " at router " + _tunnel.getThisHop().toBase64() + " with status " + msg.getStatus()); + _context.profileManager().tunnelRejected(_tunnel.getThisHop(), responseTime); + Success.this._context.messageHistory().tunnelRejected(_tunnel.getThisHop(), _tunnel.getTunnelId(), null, "refused"); + fail(); + _successCompleted = true; + break; + case TunnelCreateStatusMessage.STATUS_SUCCESS: + if (_log.shouldLog(Log.DEBUG)) + _log.debug("Tunnel creation succeeded for tunnel " + _tunnel.getTunnelId() + " at router " + _tunnel.getThisHop().toBase64()); + + if ( (_wrappedKey != null) && (_wrappedKey.getData() != null) && (_wrappedTags != null) && (_wrappedTags.size() > 0) && (_wrappedTo != null) ) { + Success.this._context.sessionKeyManager().tagsDelivered(_wrappedTo, _wrappedKey, _wrappedTags); + if (_log.shouldLog(Log.INFO)) + _log.info("Delivered tags successfully to " + _tunnel.getThisHop().toBase64() + "! # tags: " + _wrappedTags.size()); + } + + _tunnel.setIsReady(true); + _context.profileManager().tunnelJoined(_tunnel.getThisHop(), responseTime); + peerSuccess(_tunnel); + _successCompleted = true; + break; + } + } + } + + public void setMessage(I2NPMessage message) { + synchronized (_messages) { + _messages.add(message); + if (_log.shouldLog(Log.DEBUG)) + _log.debug("Reply message " + _messages.size() + " received " + message.getClass().getName(), new Exception("Received from")); + } + } } private class Failure extends JobImpl { - private TunnelInfo _tunnel; - private Hash _replyThrough; - private long _started; - public Failure(TunnelInfo tunnel, Hash replyThrough) { - _tunnel = tunnel; - _replyThrough = replyThrough; - _started = Clock.getInstance().now(); - } - - public String getName() { return "Create Tunnel Failed"; } - public void runJob() { - // update the tunnel so its known to be not working - if (_log.shouldLog(Log.WARN)) - _log.warn("Tunnel creation timed out for tunnel " + _tunnel.getTunnelId() + " at router " + _tunnel.getThisHop().toBase64() + " with expiration " + new Date(_expiration)); - synchronized (_failedTunnelParticipants) { - _failedTunnelParticipants.add(_tunnel.getThisHop()); - _failedTunnelParticipants.add(_replyThrough); - } - MessageHistory.getInstance().tunnelRequestTimedOut(_tunnel.getThisHop(), _tunnel.getTunnelId(), _replyThrough); - // perhaps not an explicit reject, but an implicit one (due to overload & dropped messages, etc) - ProfileManager.getInstance().tunnelRejected(_tunnel.getThisHop(), Clock.getInstance().now() - _started); - ProfileManager.getInstance().messageFailed(_tunnel.getThisHop()); - StatManager.getInstance().updateFrequency("tunnel.buildFailFrequency"); - fail(); - } + private TunnelInfo _tunnel; + private Hash _replyThrough; + private long _started; + public Failure(TunnelInfo tunnel, Hash replyThrough) { + super(RequestTunnelJob.this._context); + _tunnel = tunnel; + _replyThrough = replyThrough; + _started = _context.clock().now(); + } + + public String getName() { return "Create Tunnel Failed"; } + public void runJob() { + // update the tunnel so its known to be not working + if (_log.shouldLog(Log.WARN)) + _log.warn("Tunnel creation timed out for tunnel " + _tunnel.getTunnelId() + " at router " + _tunnel.getThisHop().toBase64() + " with expiration " + new Date(_expiration)); + synchronized (_failedTunnelParticipants) { + _failedTunnelParticipants.add(_tunnel.getThisHop()); + _failedTunnelParticipants.add(_replyThrough); + } + Failure.this._context.messageHistory().tunnelRequestTimedOut(_tunnel.getThisHop(), _tunnel.getTunnelId(), _replyThrough); + // perhaps not an explicit reject, but an implicit one (due to overload & dropped messages, etc) + _context.profileManager().tunnelRejected(_tunnel.getThisHop(), _context.clock().now() - _started); + _context.profileManager().messageFailed(_tunnel.getThisHop()); + Failure.this._context.statManager().updateFrequency("tunnel.buildFailFrequency"); + fail(); + } } private class Selector implements MessageSelector { - private TunnelInfo _tunnel; - private long _ackId; - private boolean _statusFound; - private boolean _ackFound; - - public Selector(TunnelInfo tunnel, long ackId) { - _tunnel = tunnel; - _ackId = ackId; - _statusFound = false; - _ackFound = false; - } - - public boolean continueMatching() { - if (_log.shouldLog(Log.DEBUG)) - _log.debug("ContinueMatching looking for tunnel " + _tunnel.getTunnelId().getTunnelId() + " from " + _tunnel.getThisHop().toBase64() + ": found? " + _statusFound + " ackFound? " + _ackFound); - return !_statusFound || !_ackFound; - //return !_statusFound; // who cares about the ack if we get the status OK? - } - public long getExpiration() { return _expiration; } - public boolean isMatch(I2NPMessage message) { - if (message.getType() == TunnelCreateStatusMessage.MESSAGE_TYPE) { - TunnelCreateStatusMessage msg = (TunnelCreateStatusMessage)message; - if (_tunnel.getThisHop().equals(msg.getFromHash())) { - if (_tunnel.getTunnelId().equals(msg.getTunnelId())) { - if (_log.shouldLog(Log.DEBUG)) - _log.debug("Matches the tunnel create status message"); - _statusFound = true; - return true; - } else { - // hmm another tunnel through the peer... - if (_log.shouldLog(Log.DEBUG)) - _log.debug("Status message from peer [" + msg.getFromHash().toBase64() + "], with wrong tunnelId [" + msg.getTunnelId() + "] not [" + _tunnel.getTunnelId().getTunnelId() + "]"); - return false; - } - } else { - // status message but from the wrong peer - if (_log.shouldLog(Log.DEBUG)) - _log.debug("Status message from the wrong peer [" + msg.getFromHash().toBase64() + "], not [" + _tunnel.getThisHop().toBase64() + "]"); - return false; - } - } else if (message.getType() == DeliveryStatusMessage.MESSAGE_TYPE) { - if (((DeliveryStatusMessage)message).getMessageId() == _ackId) { - if (_log.shouldLog(Log.DEBUG)) - _log.debug("Matches the ping message tied to the tunnel create status message"); - _ackFound = true; - return true; - } else { - if (_log.shouldLog(Log.DEBUG)) - _log.debug("Message is a delivery status message, but with the wrong id"); - return false; - } - } else { - //_log.debug("Message " + message.getClass().getName() + " is not a delivery status or tunnel create status message [waiting for ok for tunnel " + _tunnel.getTunnelId() + " so we can fire " + _onCreated + "]"); - return false; - } - } - - public String toString() { return "Build Tunnel Job Selector for tunnel " + _tunnel.getTunnelId().getTunnelId() + " at " + _tunnel.getThisHop().toBase64() + " [found=" + _statusFound + ", ack=" + _ackFound + "] (@" + (new Date(getExpiration())) + ")"; } + private TunnelInfo _tunnel; + private long _ackId; + private boolean _statusFound; + private boolean _ackFound; + + public Selector(TunnelInfo tunnel, long ackId) { + _tunnel = tunnel; + _ackId = ackId; + _statusFound = false; + _ackFound = false; + } + + public boolean continueMatching() { + if (_log.shouldLog(Log.DEBUG)) + _log.debug("ContinueMatching looking for tunnel " + _tunnel.getTunnelId().getTunnelId() + " from " + _tunnel.getThisHop().toBase64() + ": found? " + _statusFound + " ackFound? " + _ackFound); + return !_statusFound || !_ackFound; + //return !_statusFound; // who cares about the ack if we get the status OK? + } + public long getExpiration() { return _expiration; } + public boolean isMatch(I2NPMessage message) { + if (message.getType() == TunnelCreateStatusMessage.MESSAGE_TYPE) { + TunnelCreateStatusMessage msg = (TunnelCreateStatusMessage)message; + if (_tunnel.getThisHop().equals(msg.getFromHash())) { + if (_tunnel.getTunnelId().equals(msg.getTunnelId())) { + if (_log.shouldLog(Log.DEBUG)) + _log.debug("Matches the tunnel create status message"); + _statusFound = true; + return true; + } else { + // hmm another tunnel through the peer... + if (_log.shouldLog(Log.DEBUG)) + _log.debug("Status message from peer [" + msg.getFromHash().toBase64() + "], with wrong tunnelId [" + msg.getTunnelId() + "] not [" + _tunnel.getTunnelId().getTunnelId() + "]"); + return false; + } + } else { + // status message but from the wrong peer + if (_log.shouldLog(Log.DEBUG)) + _log.debug("Status message from the wrong peer [" + msg.getFromHash().toBase64() + "], not [" + _tunnel.getThisHop().toBase64() + "]"); + return false; + } + } else if (message.getType() == DeliveryStatusMessage.MESSAGE_TYPE) { + if (((DeliveryStatusMessage)message).getMessageId() == _ackId) { + if (_log.shouldLog(Log.DEBUG)) + _log.debug("Matches the ping message tied to the tunnel create status message"); + _ackFound = true; + return true; + } else { + if (_log.shouldLog(Log.DEBUG)) + _log.debug("Message is a delivery status message, but with the wrong id"); + return false; + } + } else { + //_log.debug("Message " + message.getClass().getName() + " is not a delivery status or tunnel create status message [waiting for ok for tunnel " + _tunnel.getTunnelId() + " so we can fire " + _onCreated + "]"); + return false; + } + } + + public String toString() { return "Build Tunnel Job Selector for tunnel " + _tunnel.getTunnelId().getTunnelId() + " at " + _tunnel.getThisHop().toBase64() + " [found=" + _statusFound + ", ack=" + _ackFound + "] (@" + (new Date(getExpiration())) + ")"; } } } diff --git a/router/java/src/net/i2p/router/tunnelmanager/TestTunnelJob.java b/router/java/src/net/i2p/router/tunnelmanager/TestTunnelJob.java index d59954bf4..c4429550d 100644 --- a/router/java/src/net/i2p/router/tunnelmanager/TestTunnelJob.java +++ b/router/java/src/net/i2p/router/tunnelmanager/TestTunnelJob.java @@ -1,9 +1,9 @@ package net.i2p.router.tunnelmanager; /* * free (adj.): unencumbered; not under the control of others - * Written by jrandom in 2003 and released into the public domain - * with no warranty of any kind, either expressed or implied. - * It probably won't make your computer catch on fire, or eat + * Written by jrandom in 2003 and released into the public domain + * with no warranty of any kind, either expressed or implied. + * It probably won't make your computer catch on fire, or eat * your children, but it might. Use at your own risk. * */ @@ -33,46 +33,48 @@ import net.i2p.router.message.SendTunnelMessageJob; import net.i2p.util.Clock; import net.i2p.util.Log; import net.i2p.util.RandomSource; +import net.i2p.router.RouterContext; class TestTunnelJob extends JobImpl { - private final static Log _log = new Log(TestTunnelJob.class); + private Log _log; private TunnelId _id; private TunnelPool _pool; private long _nonce; - public TestTunnelJob(TunnelId id, TunnelPool pool) { - super(); - _id = id; - _pool = pool; - _nonce = RandomSource.getInstance().nextInt(Integer.MAX_VALUE); + public TestTunnelJob(RouterContext ctx, TunnelId id, TunnelPool pool) { + super(ctx); + _log = ctx.logManager().getLog(TestTunnelJob.class); + _id = id; + _pool = pool; + _nonce = ctx.random().nextInt(Integer.MAX_VALUE); } public String getName() { return "Test Tunnel"; } public void runJob() { - if (_log.shouldLog(Log.INFO)) - _log.info("Testing tunnel " + _id.getTunnelId()); - TunnelInfo info = _pool.getTunnelInfo(_id); - if (info == null) { - _log.error("wtf, why are we testing a tunnel that we do not know about? [" + _id.getTunnelId() + "]", getAddedBy()); - return; - } - if (isOutbound(info)) { - testOutbound(info); - } else { - testInbound(info); - } + if (_log.shouldLog(Log.INFO)) + _log.info("Testing tunnel " + _id.getTunnelId()); + TunnelInfo info = _pool.getTunnelInfo(_id); + if (info == null) { + _log.error("wtf, why are we testing a tunnel that we do not know about? [" + _id.getTunnelId() + "]", getAddedBy()); + return; + } + if (isOutbound(info)) { + testOutbound(info); + } else { + testInbound(info); + } } private boolean isOutbound(TunnelInfo info) { - if (info == null) { - _log.error("wtf, null info?", new Exception("Who checked a null tunnel info?")); - return false; - } - if (Router.getInstance().getRouterInfo().getIdentity().getHash().equals(info.getThisHop())) - return true; - else - return false; + if (info == null) { + _log.error("wtf, null info?", new Exception("Who checked a null tunnel info?")); + return false; + } + if (_context.routerHash().equals(info.getThisHop())) + return true; + else + return false; } - + private final static long TEST_TIMEOUT = 60*1000; // 60 seconds for a test to succeed private final static int TEST_PRIORITY = 100; @@ -81,21 +83,21 @@ class TestTunnelJob extends JobImpl { * to ourselves and wait for it to arrive. */ private void testOutbound(TunnelInfo info) { - if (_log.shouldLog(Log.INFO)) - _log.info("Testing outbound tunnel " + info); - DeliveryStatusMessage msg = new DeliveryStatusMessage(); - msg.setArrival(new Date(Clock.getInstance().now())); - msg.setMessageId(_nonce); - Hash us = Router.getInstance().getRouterInfo().getIdentity().getHash(); - TunnelId inboundTunnelId = getReplyTunnel(); - if (inboundTunnelId == null) { - return; - } - - TestFailedJob failureJob = new TestFailedJob(); - MessageSelector selector = new TestMessageSelector(msg.getMessageId(), info.getTunnelId().getTunnelId()); - SendTunnelMessageJob testJob = new SendTunnelMessageJob(msg, info.getTunnelId(), us, inboundTunnelId, null, new TestSuccessfulJob(), failureJob, selector, TEST_TIMEOUT, TEST_PRIORITY); - JobQueue.getInstance().addJob(testJob); + if (_log.shouldLog(Log.INFO)) + _log.info("Testing outbound tunnel " + info); + DeliveryStatusMessage msg = new DeliveryStatusMessage(_context); + msg.setArrival(new Date(_context.clock().now())); + msg.setMessageId(_nonce); + Hash us = _context.routerHash(); + TunnelId inboundTunnelId = getReplyTunnel(); + if (inboundTunnelId == null) { + return; + } + + TestFailedJob failureJob = new TestFailedJob(); + MessageSelector selector = new TestMessageSelector(msg.getMessageId(), info.getTunnelId().getTunnelId()); + SendTunnelMessageJob testJob = new SendTunnelMessageJob(_context, msg, info.getTunnelId(), us, inboundTunnelId, null, new TestSuccessfulJob(), failureJob, selector, TEST_TIMEOUT, TEST_PRIORITY); + _context.jobQueue().addJob(testJob); } /** @@ -103,148 +105,148 @@ class TestTunnelJob extends JobImpl { * */ private TunnelId getReplyTunnel() { - TunnelSelectionCriteria crit = new TunnelSelectionCriteria(); - crit.setMinimumTunnelsRequired(2); - crit.setMaximumTunnelsRequired(2); - // arbitrary priorities - crit.setAnonymityPriority(50); - crit.setLatencyPriority(50); - crit.setReliabilityPriority(50); - List tunnelIds = TunnelManagerFacade.getInstance().selectInboundTunnelIds(crit); - - for (int i = 0; i < tunnelIds.size(); i++) { - TunnelId id = (TunnelId)tunnelIds.get(i); - if (id.equals(_id)) { - if (_log.shouldLog(Log.DEBUG)) - _log.debug("Not testing a tunnel with itself [duh]"); - } else { - return id; - } - } - - _log.error("Unable to test tunnel " + _id + ", since there are NO OTHER INBOUND TUNNELS to receive the ack through"); - return null; + TunnelSelectionCriteria crit = new TunnelSelectionCriteria(); + crit.setMinimumTunnelsRequired(2); + crit.setMaximumTunnelsRequired(2); + // arbitrary priorities + crit.setAnonymityPriority(50); + crit.setLatencyPriority(50); + crit.setReliabilityPriority(50); + List tunnelIds = _context.tunnelManager().selectInboundTunnelIds(crit); + + for (int i = 0; i < tunnelIds.size(); i++) { + TunnelId id = (TunnelId)tunnelIds.get(i); + if (id.equals(_id)) { + if (_log.shouldLog(Log.DEBUG)) + _log.debug("Not testing a tunnel with itself [duh]"); + } else { + return id; + } + } + + _log.error("Unable to test tunnel " + _id + ", since there are NO OTHER INBOUND TUNNELS to receive the ack through"); + return null; } /** * Send a message to the gateway and wait for it to arrive. - * todo: send the message to the gateway via an outbound tunnel or garlic, NOT DIRECT. + * todo: send the message to the gateway via an outbound tunnel or garlic, NOT DIRECT. */ private void testInbound(TunnelInfo info) { - if (_log.shouldLog(Log.INFO)) - _log.info("Testing inbound tunnel " + info); - DeliveryStatusMessage msg = new DeliveryStatusMessage(); - msg.setArrival(new Date(Clock.getInstance().now())); - msg.setMessageId(_nonce); - TestFailedJob failureJob = new TestFailedJob(); - MessageSelector selector = new TestMessageSelector(msg.getMessageId(), info.getTunnelId().getTunnelId()); - TunnelMessage tmsg = new TunnelMessage(); - try { - ByteArrayOutputStream baos = new ByteArrayOutputStream(1024); - msg.writeBytes(baos); - tmsg.setData(baos.toByteArray()); - tmsg.setTunnelId(info.getTunnelId()); - JobQueue.getInstance().addJob(new SendMessageDirectJob(tmsg, info.getThisHop(), new TestSuccessfulJob(), failureJob, selector, Clock.getInstance().now() + TEST_TIMEOUT, TEST_PRIORITY)); - - String bodyType = msg.getClass().getName(); - MessageHistory.getInstance().wrap(bodyType, msg.getUniqueId(), TunnelMessage.class.getName(), tmsg.getUniqueId()); - } catch (IOException ioe) { - _log.error("Error writing out the tunnel message to send to the tunnel", ioe); - _pool.tunnelFailed(_id); - } catch (DataFormatException dfe) { - _log.error("Error writing out the tunnel message to send to the tunnel", dfe); - _pool.tunnelFailed(_id); - } + if (_log.shouldLog(Log.INFO)) + _log.info("Testing inbound tunnel " + info); + DeliveryStatusMessage msg = new DeliveryStatusMessage(_context); + msg.setArrival(new Date(_context.clock().now())); + msg.setMessageId(_nonce); + TestFailedJob failureJob = new TestFailedJob(); + MessageSelector selector = new TestMessageSelector(msg.getMessageId(), info.getTunnelId().getTunnelId()); + TunnelMessage tmsg = new TunnelMessage(_context); + try { + ByteArrayOutputStream baos = new ByteArrayOutputStream(1024); + msg.writeBytes(baos); + tmsg.setData(baos.toByteArray()); + tmsg.setTunnelId(info.getTunnelId()); + _context.jobQueue().addJob(new SendMessageDirectJob(_context, tmsg, info.getThisHop(), new TestSuccessfulJob(), failureJob, selector, _context.clock().now() + TEST_TIMEOUT, TEST_PRIORITY)); + + String bodyType = msg.getClass().getName(); + _context.messageHistory().wrap(bodyType, msg.getUniqueId(), TunnelMessage.class.getName(), tmsg.getUniqueId()); + } catch (IOException ioe) { + _log.error("Error writing out the tunnel message to send to the tunnel", ioe); + _pool.tunnelFailed(_id); + } catch (DataFormatException dfe) { + _log.error("Error writing out the tunnel message to send to the tunnel", dfe); + _pool.tunnelFailed(_id); + } } private class TestFailedJob extends JobImpl { - public TestFailedJob() { - super(); - } - - public String getName() { return "Tunnel Test Failed"; } - public void runJob() { - if (_log.shouldLog(Log.WARN)) - _log.warn("Test of tunnel " + _id.getTunnelId() + " failed while waiting for nonce " + _nonce, getAddedBy()); - _pool.tunnelFailed(_id); - } + public TestFailedJob() { + super(TestTunnelJob.this._context); + } + + public String getName() { return "Tunnel Test Failed"; } + public void runJob() { + if (_log.shouldLog(Log.WARN)) + _log.warn("Test of tunnel " + _id.getTunnelId() + " failed while waiting for nonce " + _nonce, getAddedBy()); + _pool.tunnelFailed(_id); + } } private class TestSuccessfulJob extends JobImpl implements ReplyJob { - private DeliveryStatusMessage _msg; - public TestSuccessfulJob() { - super(); - _msg = null; - } - - public String getName() { return "Tunnel Test Successful"; } - public void runJob() { - long time = (Clock.getInstance().now() - _msg.getArrival().getTime()); - if (_log.shouldLog(Log.INFO)) - _log.info("Test of tunnel " + _id+ " successfull after " + time + "ms waiting for " + _nonce); - TunnelInfo info = _pool.getTunnelInfo(_id); - if (info != null) - MessageHistory.getInstance().tunnelValid(info, time); - } - - public void setMessage(I2NPMessage message) { - _msg = (DeliveryStatusMessage)message; - } + private DeliveryStatusMessage _msg; + public TestSuccessfulJob() { + super(TestTunnelJob.this._context); + _msg = null; + } + + public String getName() { return "Tunnel Test Successful"; } + public void runJob() { + long time = (_context.clock().now() - _msg.getArrival().getTime()); + if (_log.shouldLog(Log.INFO)) + _log.info("Test of tunnel " + _id+ " successfull after " + time + "ms waiting for " + _nonce); + TunnelInfo info = _pool.getTunnelInfo(_id); + if (info != null) + TestTunnelJob.this._context.messageHistory().tunnelValid(info, time); + } + + public void setMessage(I2NPMessage message) { + _msg = (DeliveryStatusMessage)message; + } } - private static class TestMessageSelector implements MessageSelector { - private long _id; - private long _tunnelId; - private boolean _found; - private long _expiration; - public TestMessageSelector(long id, long tunnelId) { - _id = id; - _tunnelId = tunnelId; - _found = false; - _expiration = Clock.getInstance().now() + TEST_TIMEOUT; - if (_log.shouldLog(Log.DEBUG)) - _log.debug("the expiration while testing tunnel " + tunnelId + " waiting for nonce " + id + ": " + new Date(_expiration)); - } - public boolean continueMatching() { - if (!_found) { - if (_log.shouldLog(Log.DEBUG)) - _log.debug("Continue matching while looking for nonce for tunnel " + _tunnelId); - } else { - if (_log.shouldLog(Log.INFO)) - _log.info("Don't continue matching for tunnel " + _tunnelId + " / " + _id); - } - return !_found; - } - public long getExpiration() { - if (_expiration < Clock.getInstance().now()) { - if (_log.shouldLog(Log.DEBUG)) - _log.debug("EXPIRED while looking for nonce " + _id + " for tunnel " + _tunnelId); - } - return _expiration; - } - public boolean isMatch(I2NPMessage message) { - if ( (message != null) && (message instanceof DeliveryStatusMessage) ) { - DeliveryStatusMessage msg = (DeliveryStatusMessage)message; - if (msg.getMessageId() == _id) { - if (_log.shouldLog(Log.DEBUG)) - _log.debug("Found successful test of tunnel " + _tunnelId + " after " + (Clock.getInstance().now() - msg.getArrival().getTime()) + "ms waiting for " + _id); - _found = true; - return true; - } else { - if (_log.shouldLog(Log.DEBUG)) - _log.debug("Found a delivery status message, but it contains nonce " + msg.getMessageId() + " and not " + _id); - } - } else { - //_log.debug("Not a match while looking to test tunnel " + _tunnelId + " with nonce " + _id + " (" + message + ")"); - } - return false; - } - public String toString() { - StringBuffer buf = new StringBuffer(256); - buf.append(super.toString()); - buf.append(": TestMessageSelector: tunnel ").append(_tunnelId).append(" looking for ").append(_id).append(" expiring on "); - buf.append(new Date(_expiration)); - return buf.toString(); - } + private class TestMessageSelector implements MessageSelector { + private long _id; + private long _tunnelId; + private boolean _found; + private long _expiration; + public TestMessageSelector(long id, long tunnelId) { + _id = id; + _tunnelId = tunnelId; + _found = false; + _expiration = _context.clock().now() + TEST_TIMEOUT; + if (_log.shouldLog(Log.DEBUG)) + _log.debug("the expiration while testing tunnel " + tunnelId + " waiting for nonce " + id + ": " + new Date(_expiration)); + } + public boolean continueMatching() { + if (!_found) { + if (_log.shouldLog(Log.DEBUG)) + _log.debug("Continue matching while looking for nonce for tunnel " + _tunnelId); + } else { + if (_log.shouldLog(Log.INFO)) + _log.info("Don't continue matching for tunnel " + _tunnelId + " / " + _id); + } + return !_found; + } + public long getExpiration() { + if (_expiration < _context.clock().now()) { + if (_log.shouldLog(Log.DEBUG)) + _log.debug("EXPIRED while looking for nonce " + _id + " for tunnel " + _tunnelId); + } + return _expiration; + } + public boolean isMatch(I2NPMessage message) { + if ( (message != null) && (message instanceof DeliveryStatusMessage) ) { + DeliveryStatusMessage msg = (DeliveryStatusMessage)message; + if (msg.getMessageId() == _id) { + if (_log.shouldLog(Log.DEBUG)) + _log.debug("Found successful test of tunnel " + _tunnelId + " after " + (_context.clock().now() - msg.getArrival().getTime()) + "ms waiting for " + _id); + _found = true; + return true; + } else { + if (_log.shouldLog(Log.DEBUG)) + _log.debug("Found a delivery status message, but it contains nonce " + msg.getMessageId() + " and not " + _id); + } + } else { + //_log.debug("Not a match while looking to test tunnel " + _tunnelId + " with nonce " + _id + " (" + message + ")"); + } + return false; + } + public String toString() { + StringBuffer buf = new StringBuffer(256); + buf.append(super.toString()); + buf.append(": TestMessageSelector: tunnel ").append(_tunnelId).append(" looking for ").append(_id).append(" expiring on "); + buf.append(new Date(_expiration)); + return buf.toString(); + } } } diff --git a/router/java/src/net/i2p/router/tunnelmanager/TunnelBuilder.java b/router/java/src/net/i2p/router/tunnelmanager/TunnelBuilder.java index c939b023d..e2437793b 100644 --- a/router/java/src/net/i2p/router/tunnelmanager/TunnelBuilder.java +++ b/router/java/src/net/i2p/router/tunnelmanager/TunnelBuilder.java @@ -1,9 +1,9 @@ package net.i2p.router.tunnelmanager; /* * free (adj.): unencumbered; not under the control of others - * Written by jrandom in 2003 and released into the public domain - * with no warranty of any kind, either expressed or implied. - * It probably won't make your computer catch on fire, or eat + * Written by jrandom in 2003 and released into the public domain + * with no warranty of any kind, either expressed or implied. + * It probably won't make your computer catch on fire, or eat * your children, but it might. Use at your own risk. * */ @@ -31,11 +31,11 @@ import net.i2p.router.TunnelSettings; import net.i2p.util.Clock; import net.i2p.util.Log; import net.i2p.util.RandomSource; +import net.i2p.router.RouterContext; class TunnelBuilder { - private final static Log _log = new Log(TunnelBuilder.class); - private final static TunnelBuilder _instance = new TunnelBuilder(); - public final static TunnelBuilder getInstance() { return _instance; } + private Log _log; + private RouterContext _context; private final static long DEFAULT_TUNNEL_DURATION = 10*60*1000; // 10 minutes /** @@ -44,52 +44,57 @@ class TunnelBuilder { private final static int PROBABILITY_LOCAL = -1; private final static int PROBABILITY_LOCAL_SCALE = 10; + public TunnelBuilder(RouterContext context) { + _context = context; + _log = context.logManager().getLog(TunnelBuilder.class); + } + public TunnelInfo configureInboundTunnel(Destination dest, ClientTunnelSettings settings) { - return configureInboundTunnel(dest, settings, false); + return configureInboundTunnel(dest, settings, false); } public TunnelInfo configureInboundTunnel(Destination dest, ClientTunnelSettings settings, boolean useFake) { - boolean randFake = (RandomSource.getInstance().nextInt(PROBABILITY_LOCAL_SCALE) <= PROBABILITY_LOCAL); - List peerLists = null; - if (useFake || randFake) { - peerLists = new ArrayList(0); - } else { - List peerHashes = selectInboundPeers(1, settings.getDepthInbound()); - peerLists = randomizeLists(peerHashes, 1, settings.getDepthInbound()); - } - if (peerLists.size() <= 0) { - _log.info("Configuring local inbound tunnel"); - return configureInboundTunnel(dest, settings, new ArrayList()); - } else { - List peerHashList = (List)peerLists.get(0); - return configureInboundTunnel(dest, settings, peerHashList); - } + boolean randFake = (_context.random().nextInt(PROBABILITY_LOCAL_SCALE) <= PROBABILITY_LOCAL); + List peerLists = null; + if (useFake || randFake) { + peerLists = new ArrayList(0); + } else { + List peerHashes = selectInboundPeers(1, settings.getDepthInbound()); + peerLists = randomizeLists(peerHashes, 1, settings.getDepthInbound()); + } + if (peerLists.size() <= 0) { + _log.info("Configuring local inbound tunnel"); + return configureInboundTunnel(dest, settings, new ArrayList()); + } else { + List peerHashList = (List)peerLists.get(0); + return configureInboundTunnel(dest, settings, peerHashList); + } } public TunnelInfo configureOutboundTunnel(ClientTunnelSettings settings) { - return configureOutboundTunnel(settings, false); + return configureOutboundTunnel(settings, false); } public TunnelInfo configureOutboundTunnel(ClientTunnelSettings settings, boolean useFake) { - boolean randFake = (RandomSource.getInstance().nextInt(PROBABILITY_LOCAL_SCALE) <= PROBABILITY_LOCAL); - List peerLists = null; - if (useFake || randFake) { - peerLists = new ArrayList(0); - } else { - List peerHashes = selectOutboundPeers(1, settings.getDepthOutbound()); - peerLists = randomizeLists(peerHashes, 1, settings.getDepthOutbound()); - } - if (peerLists.size() <= 0) { - _log.info("Configuring local outbound tunnel"); - return configureOutboundTunnel(settings, new ArrayList()); - } else { - List peerHashList = (List)peerLists.get(0); - return configureOutboundTunnel(settings, peerHashList); - } + boolean randFake = (_context.random().nextInt(PROBABILITY_LOCAL_SCALE) <= PROBABILITY_LOCAL); + List peerLists = null; + if (useFake || randFake) { + peerLists = new ArrayList(0); + } else { + List peerHashes = selectOutboundPeers(1, settings.getDepthOutbound()); + peerLists = randomizeLists(peerHashes, 1, settings.getDepthOutbound()); + } + if (peerLists.size() <= 0) { + _log.info("Configuring local outbound tunnel"); + return configureOutboundTunnel(settings, new ArrayList()); + } else { + List peerHashList = (List)peerLists.get(0); + return configureOutboundTunnel(settings, peerHashList); + } } /** - * Select a series of participants for the inbound tunnel, define each of - * their operating characteristics, and return them as a chain of TunnelInfo - * structures. The first TunnelInfo in each chain is the inbound gateway + * Select a series of participants for the inbound tunnel, define each of + * their operating characteristics, and return them as a chain of TunnelInfo + * structures. The first TunnelInfo in each chain is the inbound gateway * to which the lease should be attached, and the last is the local router. * * @return set of TunnelInfo structures, where each value is the gateway of @@ -97,86 +102,86 @@ class TunnelBuilder { * via getNextHopInfo()) */ public Set configureInboundTunnels(Destination dest, ClientTunnelSettings settings) { - return configureInboundTunnels(dest, settings, false); + return configureInboundTunnels(dest, settings, false); } /** * @param useFake if true, make this tunnel include no remote peers (so it'll always succeed) * */ public Set configureInboundTunnels(Destination dest, ClientTunnelSettings settings, boolean useFake) { - Set tunnels = new HashSet(); - int numIn = settings.getNumInboundTunnels(); - if (numIn <= 0) { - _log.info("No inbound tunnels requested, but we're creating one anyway"); - numIn = 1; - } - List peerLists = null; - if (!useFake) { - List peerHashes = selectInboundPeers(numIn, settings.getDepthInbound()); - _log.debug("Peer hashes selected: " + peerHashes.size()); - peerLists = randomizeLists(peerHashes, settings.getNumInboundTunnels(), settings.getDepthInbound()); - } else { - peerLists = new ArrayList(0); - } - if (peerLists.size() <= 0) { - for (int i = 0; i < numIn; i++) { - TunnelInfo tunnel = configureInboundTunnel(dest, settings, new ArrayList()); - tunnels.add(tunnel); - _log.info("Dummy inbound tunnel " + tunnel.getTunnelId() + " configured (" + tunnel + ")"); - } - } else { - for (Iterator iter = peerLists.iterator(); iter.hasNext();) { - List peerList = (List)iter.next(); - TunnelInfo tunnel = configureInboundTunnel(dest, settings, peerList); - tunnels.add(tunnel); - _log.info("Real inbound tunnel " + tunnel.getTunnelId() + " configured (" + tunnel + ")"); - } - } - - return tunnels; + Set tunnels = new HashSet(); + int numIn = settings.getNumInboundTunnels(); + if (numIn <= 0) { + _log.info("No inbound tunnels requested, but we're creating one anyway"); + numIn = 1; + } + List peerLists = null; + if (!useFake) { + List peerHashes = selectInboundPeers(numIn, settings.getDepthInbound()); + _log.debug("Peer hashes selected: " + peerHashes.size()); + peerLists = randomizeLists(peerHashes, settings.getNumInboundTunnels(), settings.getDepthInbound()); + } else { + peerLists = new ArrayList(0); + } + if (peerLists.size() <= 0) { + for (int i = 0; i < numIn; i++) { + TunnelInfo tunnel = configureInboundTunnel(dest, settings, new ArrayList()); + tunnels.add(tunnel); + _log.info("Dummy inbound tunnel " + tunnel.getTunnelId() + " configured (" + tunnel + ")"); + } + } else { + for (Iterator iter = peerLists.iterator(); iter.hasNext();) { + List peerList = (List)iter.next(); + TunnelInfo tunnel = configureInboundTunnel(dest, settings, peerList); + tunnels.add(tunnel); + _log.info("Real inbound tunnel " + tunnel.getTunnelId() + " configured (" + tunnel + ")"); + } + } + + return tunnels; } public Set configureOutboundTunnels(ClientTunnelSettings settings) { - return configureOutboundTunnels(settings, false); + return configureOutboundTunnels(settings, false); } /** * @param useFake if true, make this tunnel include no remote peers (so it'll always succeed) * */ public Set configureOutboundTunnels(ClientTunnelSettings settings, boolean useFake) { - Set tunnels = new HashSet(); - - List peerLists = null; - if (!useFake) { - List peerHashes = selectOutboundPeers(settings.getNumOutboundTunnels(), settings.getDepthOutbound()); - _log.debug("Peer hashes selected: " + peerHashes.size()); - peerLists = randomizeLists(peerHashes, settings.getNumOutboundTunnels(), settings.getDepthOutbound()); - } else { - peerLists = new ArrayList(0); - } - if (peerLists.size() <= 0) { - for (int i = 0; i < settings.getNumOutboundTunnels(); i++) { - TunnelInfo tunnel = configureOutboundTunnel(settings, new ArrayList()); - tunnels.add(tunnel); - _log.info("Dummy outbound tunnel " + tunnel.getTunnelId() + " configured (" + tunnel + ")"); - } - } else { - for (Iterator iter = peerLists.iterator(); iter.hasNext();) { - List peerList = (List)iter.next(); - TunnelInfo tunnel = configureOutboundTunnel(settings, peerList); - tunnels.add(tunnel); - _log.info("Real outbound tunnel " + tunnel.getTunnelId() + " configured (" + tunnel + ")"); - } - } - return tunnels; + Set tunnels = new HashSet(); + + List peerLists = null; + if (!useFake) { + List peerHashes = selectOutboundPeers(settings.getNumOutboundTunnels(), settings.getDepthOutbound()); + _log.debug("Peer hashes selected: " + peerHashes.size()); + peerLists = randomizeLists(peerHashes, settings.getNumOutboundTunnels(), settings.getDepthOutbound()); + } else { + peerLists = new ArrayList(0); + } + if (peerLists.size() <= 0) { + for (int i = 0; i < settings.getNumOutboundTunnels(); i++) { + TunnelInfo tunnel = configureOutboundTunnel(settings, new ArrayList()); + tunnels.add(tunnel); + _log.info("Dummy outbound tunnel " + tunnel.getTunnelId() + " configured (" + tunnel + ")"); + } + } else { + for (Iterator iter = peerLists.iterator(); iter.hasNext();) { + List peerList = (List)iter.next(); + TunnelInfo tunnel = configureOutboundTunnel(settings, peerList); + tunnels.add(tunnel); + _log.info("Real outbound tunnel " + tunnel.getTunnelId() + " configured (" + tunnel + ")"); + } + } + return tunnels; } private List selectInboundPeers(int numTunnels, int numPerTunnel) { - return selectPeers(numTunnels, numPerTunnel); + return selectPeers(numTunnels, numPerTunnel); } private List selectOutboundPeers(int numTunnels, int numPerTunnel) { - return selectPeers(numTunnels, numPerTunnel); + return selectPeers(numTunnels, numPerTunnel); } /** @@ -187,24 +192,24 @@ class TunnelBuilder { * */ private List selectPeers(int numTunnels, int numPerTunnel) { - PeerSelectionCriteria criteria = new PeerSelectionCriteria(); - int maxNeeded = numTunnels * numPerTunnel; - int minNeeded = numPerTunnel; - criteria.setMaximumRequired(maxNeeded); - criteria.setMinimumRequired(minNeeded); - criteria.setPurpose(PeerSelectionCriteria.PURPOSE_TUNNEL); - - List peers = PeerManagerFacade.getInstance().selectPeers(criteria); - List rv = new ArrayList(peers.size()); - for (Iterator iter = peers.iterator(); iter.hasNext(); ) { - Hash peer = (Hash)iter.next(); - if (null != NetworkDatabaseFacade.getInstance().lookupRouterInfoLocally(peer)) - rv.add(peer); - else { - _log.warn("peer manager selected a peer we don't know about - drop it"); - } - } - return rv; + PeerSelectionCriteria criteria = new PeerSelectionCriteria(); + int maxNeeded = numTunnels * numPerTunnel; + int minNeeded = numPerTunnel; + criteria.setMaximumRequired(maxNeeded); + criteria.setMinimumRequired(minNeeded); + criteria.setPurpose(PeerSelectionCriteria.PURPOSE_TUNNEL); + + List peers = _context.peerManager().selectPeers(criteria); + List rv = new ArrayList(peers.size()); + for (Iterator iter = peers.iterator(); iter.hasNext(); ) { + Hash peer = (Hash)iter.next(); + if (null != _context.netDb().lookupRouterInfoLocally(peer)) + rv.add(peer); + else { + _log.warn("peer manager selected a peer we don't know about - drop it"); + } + } + return rv; } /** @@ -217,31 +222,31 @@ class TunnelBuilder { * hence, an empty list) is a valid (albeit insecure) length */ private List randomizeLists(List peerHashes, int numTunnels, int numPerTunnel) { - List tunnels = new ArrayList(numTunnels); - - if (peerHashes.size() == 0) { - _log.info("No peer hashes provided"); - return tunnels; - } else { - _log.info("# peers randomizing: " + peerHashes + " into " + numTunnels + " tunnels"); - } - - for (int i = 0; i < numTunnels; i++) { - int startOn = RandomSource.getInstance().nextInt(peerHashes.size()); - List peers = new ArrayList(); - for (int j = 0; j < numPerTunnel; j++) { - int k = (j + startOn) % peerHashes.size(); - Hash peer = (Hash)peerHashes.get(k); - if (!peers.contains(peer)) - peers.add(peer); - } - _log.info("Tunnel " + i + " [" + numPerTunnel + "/(" + startOn+ ")]: " + peers); - tunnels.add(peers); - } - - _log.info("Tunnels: " + tunnels); - - return tunnels; + List tunnels = new ArrayList(numTunnels); + + if (peerHashes.size() == 0) { + _log.info("No peer hashes provided"); + return tunnels; + } else { + _log.info("# peers randomizing: " + peerHashes + " into " + numTunnels + " tunnels"); + } + + for (int i = 0; i < numTunnels; i++) { + int startOn = _context.random().nextInt(peerHashes.size()); + List peers = new ArrayList(); + for (int j = 0; j < numPerTunnel; j++) { + int k = (j + startOn) % peerHashes.size(); + Hash peer = (Hash)peerHashes.get(k); + if (!peers.contains(peer)) + peers.add(peer); + } + _log.info("Tunnel " + i + " [" + numPerTunnel + "/(" + startOn+ ")]: " + peers); + tunnels.add(peers); + } + + _log.info("Tunnels: " + tunnels); + + return tunnels; } /** @@ -250,77 +255,77 @@ class TunnelBuilder { * router */ private TunnelInfo configureInboundTunnel(Destination dest, ClientTunnelSettings settings, List peerHashList) { - SessionKey encryptionKey = KeyGenerator.getInstance().generateSessionKey(); - Object kp[] = KeyGenerator.getInstance().generateSigningKeypair(); - SigningPublicKey pubkey = (SigningPublicKey)kp[0]; - SigningPrivateKey privkey = (SigningPrivateKey)kp[1]; - - long duration = settings.getInboundDuration(); - if (duration <= 0) - duration = DEFAULT_TUNNEL_DURATION; - long expiration = Clock.getInstance().now() + duration; - - TunnelSettings tunnelSettings = new TunnelSettings(); - tunnelSettings.setBytesPerMinuteAverage(settings.getBytesPerMinuteInboundAverage()); - tunnelSettings.setBytesPerMinutePeak(settings.getBytesPerMinuteInboundPeak()); - tunnelSettings.setDepth(peerHashList.size()+1); - tunnelSettings.setExpiration(expiration); - tunnelSettings.setIncludeDummy(settings.getIncludeDummyInbound()); - tunnelSettings.setMessagesPerMinuteAverage(settings.getMessagesPerMinuteInboundAverage()); - tunnelSettings.setMessagesPerMinutePeak(settings.getMessagesPerMinuteInboundPeak()); - tunnelSettings.setReorder(settings.getReorderInbound()); - - TunnelId id = new TunnelId(); - id.setTunnelId(RandomSource.getInstance().nextInt(Integer.MAX_VALUE)); - id.setType(TunnelId.TYPE_INBOUND); - - TunnelInfo first = null; - TunnelInfo prev = null; - for (int i = 0; i < peerHashList.size(); i++) { - Hash peer = (Hash)peerHashList.get(i); - TunnelInfo cur = new TunnelInfo(); - cur.setThisHop(peer); - cur.setConfigurationKey(KeyGenerator.getInstance().generateSessionKey()); - cur.setDestination(null); - if (i == 0) { - // gateway - cur.setEncryptionKey(encryptionKey); - cur.setSigningKey(privkey); - } - cur.setSettings(tunnelSettings); - cur.setTunnelId(id); - cur.setVerificationKey(pubkey); - - if (prev != null) { - prev.setNextHop(peer); - prev.setNextHopInfo(cur); - } else { - first = cur; - } - prev = cur; - } - - TunnelInfo last = new TunnelInfo(); - last.setThisHop(Router.getInstance().getRouterInfo().getIdentity().getHash()); - last.setDestination(dest); - last.setEncryptionKey(encryptionKey); - last.setSettings(tunnelSettings); - last.setTunnelId(id); - last.setVerificationKey(pubkey); - last.setSigningKey(privkey); - last.setConfigurationKey(KeyGenerator.getInstance().generateSessionKey()); - - TunnelInfo cur = first; - if (cur == null) { - first = last; - } else { - while (cur.getNextHopInfo() != null) - cur = cur.getNextHopInfo(); - cur.setNextHop(last.getThisHop()); - cur.setNextHopInfo(last); - } - - return first; + SessionKey encryptionKey = _context.keyGenerator().generateSessionKey(); + Object kp[] = _context.keyGenerator().generateSigningKeypair(); + SigningPublicKey pubkey = (SigningPublicKey)kp[0]; + SigningPrivateKey privkey = (SigningPrivateKey)kp[1]; + + long duration = settings.getInboundDuration(); + if (duration <= 0) + duration = DEFAULT_TUNNEL_DURATION; + long expiration = _context.clock().now() + duration; + + TunnelSettings tunnelSettings = new TunnelSettings(_context); + tunnelSettings.setBytesPerMinuteAverage(settings.getBytesPerMinuteInboundAverage()); + tunnelSettings.setBytesPerMinutePeak(settings.getBytesPerMinuteInboundPeak()); + tunnelSettings.setDepth(peerHashList.size()+1); + tunnelSettings.setExpiration(expiration); + tunnelSettings.setIncludeDummy(settings.getIncludeDummyInbound()); + tunnelSettings.setMessagesPerMinuteAverage(settings.getMessagesPerMinuteInboundAverage()); + tunnelSettings.setMessagesPerMinutePeak(settings.getMessagesPerMinuteInboundPeak()); + tunnelSettings.setReorder(settings.getReorderInbound()); + + TunnelId id = new TunnelId(); + id.setTunnelId(_context.random().nextInt(Integer.MAX_VALUE)); + id.setType(TunnelId.TYPE_INBOUND); + + TunnelInfo first = null; + TunnelInfo prev = null; + for (int i = 0; i < peerHashList.size(); i++) { + Hash peer = (Hash)peerHashList.get(i); + TunnelInfo cur = new TunnelInfo(_context); + cur.setThisHop(peer); + cur.setConfigurationKey(_context.keyGenerator().generateSessionKey()); + cur.setDestination(null); + if (i == 0) { + // gateway + cur.setEncryptionKey(encryptionKey); + cur.setSigningKey(privkey); + } + cur.setSettings(tunnelSettings); + cur.setTunnelId(id); + cur.setVerificationKey(pubkey); + + if (prev != null) { + prev.setNextHop(peer); + prev.setNextHopInfo(cur); + } else { + first = cur; + } + prev = cur; + } + + TunnelInfo last = new TunnelInfo(_context); + last.setThisHop(_context.routerHash()); + last.setDestination(dest); + last.setEncryptionKey(encryptionKey); + last.setSettings(tunnelSettings); + last.setTunnelId(id); + last.setVerificationKey(pubkey); + last.setSigningKey(privkey); + last.setConfigurationKey(_context.keyGenerator().generateSessionKey()); + + TunnelInfo cur = first; + if (cur == null) { + first = last; + } else { + while (cur.getNextHopInfo() != null) + cur = cur.getNextHopInfo(); + cur.setNextHop(last.getThisHop()); + cur.setNextHopInfo(last); + } + + return first; } @@ -329,60 +334,60 @@ class TunnelBuilder { * the supplied routers for each hop, starting with the current router */ private TunnelInfo configureOutboundTunnel(ClientTunnelSettings settings, List peerHashList) { - SessionKey encryptionKey = KeyGenerator.getInstance().generateSessionKey(); - Object kp[] = KeyGenerator.getInstance().generateSigningKeypair(); - SigningPublicKey pubkey = (SigningPublicKey)kp[0]; - SigningPrivateKey privkey = (SigningPrivateKey)kp[1]; - - long duration = settings.getInboundDuration(); // uses inbound duration for symmetry - if (duration <= 0) - duration = DEFAULT_TUNNEL_DURATION; - long expiration = Clock.getInstance().now() + duration; - - TunnelSettings tunnelSettings = new TunnelSettings(); - tunnelSettings.setBytesPerMinuteAverage(settings.getBytesPerMinuteInboundAverage()); - tunnelSettings.setBytesPerMinutePeak(settings.getBytesPerMinuteInboundPeak()); - tunnelSettings.setDepth(peerHashList.size()+1); - tunnelSettings.setExpiration(expiration); - tunnelSettings.setIncludeDummy(settings.getIncludeDummyInbound()); - tunnelSettings.setMessagesPerMinuteAverage(settings.getMessagesPerMinuteInboundAverage()); - tunnelSettings.setMessagesPerMinutePeak(settings.getMessagesPerMinuteInboundPeak()); - tunnelSettings.setReorder(settings.getReorderInbound()); - - TunnelId id = new TunnelId(); - id.setTunnelId(RandomSource.getInstance().nextInt(Integer.MAX_VALUE)); - id.setType(TunnelId.TYPE_OUTBOUND); - - TunnelInfo first = new TunnelInfo(); - first.setThisHop(Router.getInstance().getRouterInfo().getIdentity().getHash()); - first.setDestination(null); - first.setEncryptionKey(encryptionKey); - first.setSettings(tunnelSettings); - first.setTunnelId(id); - first.setVerificationKey(pubkey); - first.setSigningKey(privkey); - first.setConfigurationKey(KeyGenerator.getInstance().generateSessionKey()); - - TunnelInfo prev = first; - for (int i = 0; i < peerHashList.size(); i++) { - Hash peer = (Hash)peerHashList.get(i); - TunnelInfo cur = new TunnelInfo(); - cur.setThisHop(peer); - cur.setConfigurationKey(KeyGenerator.getInstance().generateSessionKey()); - cur.setDestination(null); - if (i == peerHashList.size() -1) { - // endpoint - cur.setEncryptionKey(encryptionKey); - } - cur.setSettings(tunnelSettings); - cur.setTunnelId(id); - cur.setVerificationKey(pubkey); - - prev.setNextHop(peer); - prev.setNextHopInfo(cur); - prev = cur; - } - - return first; + SessionKey encryptionKey = _context.keyGenerator().generateSessionKey(); + Object kp[] = _context.keyGenerator().generateSigningKeypair(); + SigningPublicKey pubkey = (SigningPublicKey)kp[0]; + SigningPrivateKey privkey = (SigningPrivateKey)kp[1]; + + long duration = settings.getInboundDuration(); // uses inbound duration for symmetry + if (duration <= 0) + duration = DEFAULT_TUNNEL_DURATION; + long expiration = _context.clock().now() + duration; + + TunnelSettings tunnelSettings = new TunnelSettings(_context); + tunnelSettings.setBytesPerMinuteAverage(settings.getBytesPerMinuteInboundAverage()); + tunnelSettings.setBytesPerMinutePeak(settings.getBytesPerMinuteInboundPeak()); + tunnelSettings.setDepth(peerHashList.size()+1); + tunnelSettings.setExpiration(expiration); + tunnelSettings.setIncludeDummy(settings.getIncludeDummyInbound()); + tunnelSettings.setMessagesPerMinuteAverage(settings.getMessagesPerMinuteInboundAverage()); + tunnelSettings.setMessagesPerMinutePeak(settings.getMessagesPerMinuteInboundPeak()); + tunnelSettings.setReorder(settings.getReorderInbound()); + + TunnelId id = new TunnelId(); + id.setTunnelId(_context.random().nextInt(Integer.MAX_VALUE)); + id.setType(TunnelId.TYPE_OUTBOUND); + + TunnelInfo first = new TunnelInfo(_context); + first.setThisHop(_context.routerHash()); + first.setDestination(null); + first.setEncryptionKey(encryptionKey); + first.setSettings(tunnelSettings); + first.setTunnelId(id); + first.setVerificationKey(pubkey); + first.setSigningKey(privkey); + first.setConfigurationKey(_context.keyGenerator().generateSessionKey()); + + TunnelInfo prev = first; + for (int i = 0; i < peerHashList.size(); i++) { + Hash peer = (Hash)peerHashList.get(i); + TunnelInfo cur = new TunnelInfo(_context); + cur.setThisHop(peer); + cur.setConfigurationKey(_context.keyGenerator().generateSessionKey()); + cur.setDestination(null); + if (i == peerHashList.size() -1) { + // endpoint + cur.setEncryptionKey(encryptionKey); + } + cur.setSettings(tunnelSettings); + cur.setTunnelId(id); + cur.setVerificationKey(pubkey); + + prev.setNextHop(peer); + prev.setNextHopInfo(cur); + prev = cur; + } + + return first; } } diff --git a/router/java/src/net/i2p/router/tunnelmanager/TunnelCreateMessageHandler.java b/router/java/src/net/i2p/router/tunnelmanager/TunnelCreateMessageHandler.java index 5a11e3a94..371ad329e 100644 --- a/router/java/src/net/i2p/router/tunnelmanager/TunnelCreateMessageHandler.java +++ b/router/java/src/net/i2p/router/tunnelmanager/TunnelCreateMessageHandler.java @@ -1,9 +1,9 @@ package net.i2p.router.tunnelmanager; /* * free (adj.): unencumbered; not under the control of others - * Written by jrandom in 2003 and released into the public domain - * with no warranty of any kind, either expressed or implied. - * It probably won't make your computer catch on fire, or eat + * Written by jrandom in 2003 and released into the public domain + * with no warranty of any kind, either expressed or implied. + * It probably won't make your computer catch on fire, or eat * your children, but it might. Use at your own risk. * */ @@ -15,11 +15,15 @@ import net.i2p.data.i2np.SourceRouteBlock; import net.i2p.data.i2np.TunnelCreateMessage; import net.i2p.router.HandlerJobBuilder; import net.i2p.router.Job; +import net.i2p.router.RouterContext; class TunnelCreateMessageHandler implements HandlerJobBuilder { - + private RouterContext _context; + public TunnelCreateMessageHandler(RouterContext context) { + _context = context; + } public Job createJob(I2NPMessage receivedMessage, RouterIdentity from, Hash fromHash, SourceRouteBlock replyBlock) { - return new HandleTunnelCreateMessageJob((TunnelCreateMessage)receivedMessage, from, fromHash, replyBlock); + return new HandleTunnelCreateMessageJob(_context, (TunnelCreateMessage)receivedMessage, from, fromHash, replyBlock); } } diff --git a/router/java/src/net/i2p/router/tunnelmanager/TunnelPool.java b/router/java/src/net/i2p/router/tunnelmanager/TunnelPool.java index 862db7dcb..7edc38eb2 100644 --- a/router/java/src/net/i2p/router/tunnelmanager/TunnelPool.java +++ b/router/java/src/net/i2p/router/tunnelmanager/TunnelPool.java @@ -20,6 +20,7 @@ import net.i2p.router.TunnelInfo; import net.i2p.stat.StatManager; import net.i2p.util.Clock; import net.i2p.util.Log; +import net.i2p.router.RouterContext; /** * Store the data for free inbound, outbound, and client pooled tunnels, and serve @@ -27,7 +28,8 @@ import net.i2p.util.Log; * */ class TunnelPool { - private final static Log _log = new Log(TunnelPool.class); + private Log _log; + private RouterContext _context; /** TunnelId --> TunnelInfo of outbound tunnels */ private Map _outboundTunnels; /** TunnelId --> TunnelInfo of free inbound tunnels */ @@ -47,6 +49,7 @@ class TunnelPool { private int _targetClients; /** active or has it been shutdown? */ private boolean _isLive; + private TunnelBuilder _tunnelBuilder; /** write out the current state every 60 seconds */ private final static long WRITE_POOL_DELAY = 60*1000; @@ -57,15 +60,17 @@ class TunnelPool { public final static String TARGET_CLIENTS_PARAM = "router.targetClients"; public final static int TARGET_CLIENTS_DEFAULT = 3; - - static { - StatManager.getInstance().createFrequencyStat("tunnel.failFrequency", "How often do tunnels prematurely fail (after being successfully built)?", "Tunnels", new long[] { 60*1000l, 60*60*1000l, 24*60*60*1000l }); - StatManager.getInstance().createRateStat("tunnel.failAfterTime", "How long do tunnels that fail prematurely last before failing?", "Tunnels", new long[] { 5*60*1000l, 60*60*1000l, 24*60*60*1000l }); - } - public TunnelPool() { + public TunnelPool(RouterContext ctx) { + _context = ctx; + _log = ctx.logManager().getLog(TunnelPool.class); + + _context.statManager().createFrequencyStat("tunnel.failFrequency", "How often do tunnels prematurely fail (after being successfully built)?", "Tunnels", new long[] { 60*1000l, 60*60*1000l, 24*60*60*1000l }); + _context.statManager().createRateStat("tunnel.failAfterTime", "How long do tunnels that fail prematurely last before failing?", "Tunnels", new long[] { 5*60*1000l, 60*60*1000l, 24*60*60*1000l }); + _isLive = true; - _persistenceHelper = new TunnelPoolPersistenceHelper(); + _persistenceHelper = new TunnelPoolPersistenceHelper(_context); + _tunnelBuilder = new TunnelBuilder(_context); } /** @@ -191,7 +196,7 @@ class TunnelPool { public void addOutboundTunnel(TunnelInfo tunnel) { if (!_isLive) return; if (_log.shouldLog(Log.DEBUG)) _log.debug("Add outbound tunnel " + tunnel.getTunnelId()); - MessageHistory.getInstance().tunnelJoined("outbound", tunnel); + _context.messageHistory().tunnelJoined("outbound", tunnel); synchronized (_outboundTunnels) { _outboundTunnels.put(tunnel.getTunnelId(), tunnel); } @@ -236,7 +241,7 @@ class TunnelPool { public void addFreeTunnel(TunnelInfo tunnel) { if (!_isLive) return; if (_log.shouldLog(Log.DEBUG)) _log.debug("Add free inbound tunnel " + tunnel.getTunnelId()); - MessageHistory.getInstance().tunnelJoined("free inbound", tunnel); + _context.messageHistory().tunnelJoined("free inbound", tunnel); synchronized (_freeInboundTunnels) { _freeInboundTunnels.put(tunnel.getTunnelId(), tunnel); } @@ -287,7 +292,7 @@ class TunnelPool { public boolean addParticipatingTunnel(TunnelInfo tunnel) { if (!_isLive) return false; if (_log.shouldLog(Log.DEBUG)) _log.debug("Add participating tunnel " + tunnel.getTunnelId()); - MessageHistory.getInstance().tunnelJoined("participant", tunnel); + _context.messageHistory().tunnelJoined("participant", tunnel); synchronized (_participatingTunnels) { if (_participatingTunnels.containsKey(tunnel.getTunnelId())) { return false; @@ -331,7 +336,7 @@ class TunnelPool { if (_log.shouldLog(Log.INFO)) _log.info("Reusing an existing client tunnel pool for " + dest.calculateHash()); } else { - pool = new ClientTunnelPool(dest, settings, this); + pool = new ClientTunnelPool(_context, dest, settings, this); if (_log.shouldLog(Log.INFO)) _log.info("New client tunnel pool created for " + dest.calculateHash()); _clientPools.put(dest, pool); @@ -384,7 +389,7 @@ class TunnelPool { } public void addPendingTunnel(TunnelInfo info) { if (!_isLive) return; - MessageHistory.getInstance().tunnelJoined("pending", info); + _context.messageHistory().tunnelJoined("pending", info); synchronized (_pendingTunnels) { _pendingTunnels.put(info.getTunnelId(), info); } @@ -433,15 +438,15 @@ class TunnelPool { if (getFreeValidTunnelCount() < 3) { if (_log.shouldLog(Log.WARN)) _log.warn("Running low on valid inbound tunnels, building another"); - TunnelInfo inTunnelGateway = TunnelBuilder.getInstance().configureInboundTunnel(null, getPoolSettings(), true); - RequestTunnelJob inReqJob = new RequestTunnelJob(this, inTunnelGateway, true, getTunnelCreationTimeout()); + TunnelInfo inTunnelGateway = _tunnelBuilder.configureInboundTunnel(null, getPoolSettings(), true); + RequestTunnelJob inReqJob = new RequestTunnelJob(_context, this, inTunnelGateway, true, getTunnelCreationTimeout()); inReqJob.runJob(); } if (getOutboundValidTunnelCount() < 3) { if (_log.shouldLog(Log.WARN)) _log.warn("Running low on valid outbound tunnels, building another"); - TunnelInfo outTunnelGateway = TunnelBuilder.getInstance().configureOutboundTunnel(getPoolSettings(), true); - RequestTunnelJob outReqJob = new RequestTunnelJob(this, outTunnelGateway, false, getTunnelCreationTimeout()); + TunnelInfo outTunnelGateway = _tunnelBuilder.configureOutboundTunnel(getPoolSettings(), true); + RequestTunnelJob outReqJob = new RequestTunnelJob(_context, this, outTunnelGateway, false, getTunnelCreationTimeout()); outReqJob.runJob(); } } @@ -449,7 +454,7 @@ class TunnelPool { private int getFreeValidTunnelCount() { int found = 0; Set ids = getFreeTunnels(); - long mustExpireAfter = Clock.getInstance().now(); + long mustExpireAfter = _context.clock().now(); for (Iterator iter = ids.iterator(); iter.hasNext(); ) { TunnelId id = (TunnelId)iter.next(); @@ -468,7 +473,7 @@ class TunnelPool { private int getOutboundValidTunnelCount() { int found = 0; Set ids = getOutboundTunnels(); - long mustExpireAfter = Clock.getInstance().now(); + long mustExpireAfter = _context.clock().now(); for (Iterator iter = ids.iterator(); iter.hasNext(); ) { TunnelId id = (TunnelId)iter.next(); @@ -489,18 +494,18 @@ class TunnelPool { TunnelInfo info = getTunnelInfo(id); if (info == null) return; - MessageHistory.getInstance().tunnelFailed(info.getTunnelId()); + _context.messageHistory().tunnelFailed(info.getTunnelId()); info.setIsReady(false); - Hash us = Router.getInstance().getRouterInfo().getIdentity().getHash(); - long lifetime = Clock.getInstance().now() - info.getCreated(); + Hash us = _context.routerHash(); + long lifetime = _context.clock().now() - info.getCreated(); while (info != null) { if (!info.getThisHop().equals(us)) { - ProfileManager.getInstance().tunnelFailed(info.getThisHop()); + _context.profileManager().tunnelFailed(info.getThisHop()); } info = info.getNextHopInfo(); } - StatManager.getInstance().addRateData("tunnel.failAfterTime", lifetime, lifetime); - StatManager.getInstance().updateFrequency("tunnel.failFrequency"); + _context.statManager().addRateData("tunnel.failAfterTime", lifetime, lifetime); + _context.statManager().updateFrequency("tunnel.failFrequency"); buildFakeTunnels(); } @@ -516,22 +521,22 @@ class TunnelPool { _persistenceHelper.loadPool(this); _tunnelCreationTimeout = -1; try { - String str = Router.getInstance().getConfigSetting(TUNNEL_CREATION_TIMEOUT_PARAM); + String str = _context.router().getConfigSetting(TUNNEL_CREATION_TIMEOUT_PARAM); _tunnelCreationTimeout = Long.parseLong(str); } catch (Throwable t) { _tunnelCreationTimeout = TUNNEL_CREATION_TIMEOUT_DEFAULT; } _targetClients = TARGET_CLIENTS_DEFAULT; try { - String str = Router.getInstance().getConfigSetting(TARGET_CLIENTS_PARAM); + String str = _context.router().getConfigSetting(TARGET_CLIENTS_PARAM); _targetClients = Integer.parseInt(str); } catch (Throwable t) { _targetClients = TARGET_CLIENTS_DEFAULT; } buildFakeTunnels(); - JobQueue.getInstance().addJob(new WritePoolJob()); - JobQueue.getInstance().addJob(new TunnelPoolManagerJob(this)); - JobQueue.getInstance().addJob(new TunnelPoolExpirationJob(this)); + _context.jobQueue().addJob(new WritePoolJob()); + _context.jobQueue().addJob(new TunnelPoolManagerJob(_context, this)); + _context.jobQueue().addJob(new TunnelPoolExpirationJob(_context, this)); } public void shutdown() { @@ -551,7 +556,7 @@ class TunnelPool { private ClientTunnelSettings createPoolSettings() { ClientTunnelSettings settings = new ClientTunnelSettings(); - settings.readFromProperties(Router.getInstance().getConfigMap()); + settings.readFromProperties(_context.router().getConfigMap()); return settings; } @@ -641,7 +646,8 @@ class TunnelPool { */ private class WritePoolJob extends JobImpl { public WritePoolJob() { - getTiming().setStartAfter(Clock.getInstance().now() + WRITE_POOL_DELAY); + super(TunnelPool.this._context); + getTiming().setStartAfter(TunnelPool.this._context.clock().now() + WRITE_POOL_DELAY); } public String getName() { return "Write Out Tunnel Pool"; } public void runJob() { diff --git a/router/java/src/net/i2p/router/tunnelmanager/TunnelPoolExpirationJob.java b/router/java/src/net/i2p/router/tunnelmanager/TunnelPoolExpirationJob.java index be70bd2b9..beed09a1f 100644 --- a/router/java/src/net/i2p/router/tunnelmanager/TunnelPoolExpirationJob.java +++ b/router/java/src/net/i2p/router/tunnelmanager/TunnelPoolExpirationJob.java @@ -9,62 +9,64 @@ import net.i2p.router.Router; import net.i2p.router.TunnelInfo; import net.i2p.util.Clock; import net.i2p.util.Log; +import net.i2p.router.RouterContext; /** - * Periodically go through all of the tunnels not assigned to a client and mark + * Periodically go through all of the tunnels not assigned to a client and mark * them as no longer ready and/or drop them (as appropriate) * */ class TunnelPoolExpirationJob extends JobImpl { - private final static Log _log = new Log(TunnelPoolExpirationJob.class); + private Log _log; private TunnelPool _pool; /** expire tunnels as necessary every 30 seconds */ - private final static long EXPIRE_POOL_DELAY = 30*1000; + private final static long EXPIRE_POOL_DELAY = 30*1000; /** - * don't hard expire a tunnel until its later than expiration + buffer - */ + * don't hard expire a tunnel until its later than expiration + buffer + */ private final static long EXPIRE_BUFFER = 30*1000; - public TunnelPoolExpirationJob(TunnelPool pool) { - super(); - _pool = pool; - getTiming().setStartAfter(Clock.getInstance().now() + EXPIRE_POOL_DELAY); + public TunnelPoolExpirationJob(RouterContext ctx, TunnelPool pool) { + super(ctx); + _log = ctx.logManager().getLog(TunnelPoolExpirationJob.class); + _pool = pool; + getTiming().setStartAfter(_context.clock().now() + EXPIRE_POOL_DELAY); } public String getName() { return "Expire Pooled Tunnels"; } public void runJob() { - if (!_pool.isLive()) - return; - expireFree(); - expireOutbound(); - expireParticipants(); - expirePending(); - requeue(EXPIRE_POOL_DELAY); + if (!_pool.isLive()) + return; + expireFree(); + expireOutbound(); + expireParticipants(); + expirePending(); + requeue(EXPIRE_POOL_DELAY); } /** - * Drop all pooled free tunnels that are expired or are close enough to + * Drop all pooled free tunnels that are expired or are close enough to * being expired that allocating them to a client would suck. * */ public void expireFree() { - long now = Clock.getInstance().now(); - long expire = now - EXPIRE_BUFFER - Router.CLOCK_FUDGE_FACTOR; - - for (Iterator iter = _pool.getFreeTunnels().iterator(); iter.hasNext(); ) { - TunnelId id = (TunnelId)iter.next(); - TunnelInfo info = _pool.getFreeTunnel(id); - if ( (info != null) && (info.getSettings() != null) ) { - if (info.getSettings().getExpiration() < expire) { - _log.info("Expiring free inbound tunnel " + id + " [" + new Date(info.getSettings().getExpiration()) + "] (expire = " + new Date(expire) + ")"); - _pool.removeFreeTunnel(id); - } else if (info.getSettings().getExpiration() < now) { - _log.info("It is past the expiration for free inbound tunnel " + id + " but not yet the buffer, mark it as no longer ready"); - info.setIsReady(false); - } - } - } + long now = _context.clock().now(); + long expire = now - EXPIRE_BUFFER - Router.CLOCK_FUDGE_FACTOR; + + for (Iterator iter = _pool.getFreeTunnels().iterator(); iter.hasNext(); ) { + TunnelId id = (TunnelId)iter.next(); + TunnelInfo info = _pool.getFreeTunnel(id); + if ( (info != null) && (info.getSettings() != null) ) { + if (info.getSettings().getExpiration() < expire) { + _log.info("Expiring free inbound tunnel " + id + " [" + new Date(info.getSettings().getExpiration()) + "] (expire = " + new Date(expire) + ")"); + _pool.removeFreeTunnel(id); + } else if (info.getSettings().getExpiration() < now) { + _log.info("It is past the expiration for free inbound tunnel " + id + " but not yet the buffer, mark it as no longer ready"); + info.setIsReady(false); + } + } + } } /** @@ -72,22 +74,22 @@ class TunnelPoolExpirationJob extends JobImpl { * */ public void expireOutbound() { - long now = Clock.getInstance().now(); - long expire = now - EXPIRE_BUFFER - Router.CLOCK_FUDGE_FACTOR; - - for (Iterator iter = _pool.getOutboundTunnels().iterator(); iter.hasNext(); ) { - TunnelId id = (TunnelId)iter.next(); - TunnelInfo info = _pool.getOutboundTunnel(id); - if ( (info != null) && (info.getSettings() != null) ) { - if (info.getSettings().getExpiration() < expire) { - _log.info("Expiring outbound tunnel " + id + " [" + new Date(info.getSettings().getExpiration()) + "]"); - _pool.removeOutboundTunnel(id); - } else if (info.getSettings().getExpiration() < now) { - _log.info("It is past the expiration for outbound tunnel " + id + " but not yet the buffer, mark it as no longer ready"); - info.setIsReady(false); - } - } - } + long now = _context.clock().now(); + long expire = now - EXPIRE_BUFFER - Router.CLOCK_FUDGE_FACTOR; + + for (Iterator iter = _pool.getOutboundTunnels().iterator(); iter.hasNext(); ) { + TunnelId id = (TunnelId)iter.next(); + TunnelInfo info = _pool.getOutboundTunnel(id); + if ( (info != null) && (info.getSettings() != null) ) { + if (info.getSettings().getExpiration() < expire) { + _log.info("Expiring outbound tunnel " + id + " [" + new Date(info.getSettings().getExpiration()) + "]"); + _pool.removeOutboundTunnel(id); + } else if (info.getSettings().getExpiration() < now) { + _log.info("It is past the expiration for outbound tunnel " + id + " but not yet the buffer, mark it as no longer ready"); + info.setIsReady(false); + } + } + } } /** @@ -95,19 +97,19 @@ class TunnelPoolExpirationJob extends JobImpl { * */ public void expireParticipants() { - long now = Clock.getInstance().now(); - long expire = now - EXPIRE_BUFFER - Router.CLOCK_FUDGE_FACTOR; - - for (Iterator iter = _pool.getParticipatingTunnels().iterator(); iter.hasNext(); ) { - TunnelId id = (TunnelId)iter.next(); - TunnelInfo info = _pool.getParticipatingTunnel(id); - if ( (info != null) && (info.getSettings() != null) ) { - if (info.getSettings().getExpiration() < expire) { - _log.info("Expiring participation in tunnel " + id + " [" + new Date(info.getSettings().getExpiration()) + "]"); - _pool.removeParticipatingTunnel(id); - } - } - } + long now = _context.clock().now(); + long expire = now - EXPIRE_BUFFER - Router.CLOCK_FUDGE_FACTOR; + + for (Iterator iter = _pool.getParticipatingTunnels().iterator(); iter.hasNext(); ) { + TunnelId id = (TunnelId)iter.next(); + TunnelInfo info = _pool.getParticipatingTunnel(id); + if ( (info != null) && (info.getSettings() != null) ) { + if (info.getSettings().getExpiration() < expire) { + _log.info("Expiring participation in tunnel " + id + " [" + new Date(info.getSettings().getExpiration()) + "]"); + _pool.removeParticipatingTunnel(id); + } + } + } } /** @@ -115,18 +117,18 @@ class TunnelPoolExpirationJob extends JobImpl { * */ public void expirePending() { - long now = Clock.getInstance().now(); - long expire = now - EXPIRE_BUFFER - Router.CLOCK_FUDGE_FACTOR; - - for (Iterator iter = _pool.getPendingTunnels().iterator(); iter.hasNext(); ) { - TunnelId id = (TunnelId)iter.next(); - TunnelInfo info = _pool.getPendingTunnel(id); - if ( (info != null) && (info.getSettings() != null) ) { - if (info.getSettings().getExpiration() < expire) { - _log.info("Expiring pending tunnel " + id + " [" + new Date(info.getSettings().getExpiration()) + "]"); - _pool.removePendingTunnel(id); - } - } - } + long now = _context.clock().now(); + long expire = now - EXPIRE_BUFFER - Router.CLOCK_FUDGE_FACTOR; + + for (Iterator iter = _pool.getPendingTunnels().iterator(); iter.hasNext(); ) { + TunnelId id = (TunnelId)iter.next(); + TunnelInfo info = _pool.getPendingTunnel(id); + if ( (info != null) && (info.getSettings() != null) ) { + if (info.getSettings().getExpiration() < expire) { + _log.info("Expiring pending tunnel " + id + " [" + new Date(info.getSettings().getExpiration()) + "]"); + _pool.removePendingTunnel(id); + } + } + } } } diff --git a/router/java/src/net/i2p/router/tunnelmanager/TunnelPoolManagerJob.java b/router/java/src/net/i2p/router/tunnelmanager/TunnelPoolManagerJob.java index 9e82da1f3..7b4fe0fb9 100644 --- a/router/java/src/net/i2p/router/tunnelmanager/TunnelPoolManagerJob.java +++ b/router/java/src/net/i2p/router/tunnelmanager/TunnelPoolManagerJob.java @@ -9,6 +9,7 @@ import net.i2p.router.JobQueue; import net.i2p.router.TunnelInfo; import net.i2p.util.Clock; import net.i2p.util.Log; +import net.i2p.router.RouterContext; /** * Request new tunnels to be created if insufficient free inbound tunnels or @@ -16,7 +17,7 @@ import net.i2p.util.Log; * */ class TunnelPoolManagerJob extends JobImpl { - private final static Log _log = new Log(TunnelPoolManagerJob.class); + private Log _log; private TunnelPool _pool; /** whether we built tunnels on the last run */ @@ -34,64 +35,73 @@ class TunnelPoolManagerJob extends JobImpl { */ private final static long EXPIRE_FUDGE_PERIOD = 60*1000; - public TunnelPoolManagerJob(TunnelPool pool) { - super(); - _pool = pool; + public TunnelPoolManagerJob(RouterContext ctx, TunnelPool pool) { + super(ctx); + _log = ctx.logManager().getLog(TunnelPoolManagerJob.class); + _pool = pool; } public String getName() { return "Manage Tunnel Pool"; } public void runJob() { - try { - if (!_pool.isLive()) - return; + try { + if (!_pool.isLive()) + return; + + boolean built = false; + + int targetClients = _pool.getTargetClients(); + int targetInboundTunnels = targetClients*_pool.getPoolSettings().getNumInboundTunnels() + 3; + int targetOutboundTunnels = targetClients*_pool.getPoolSettings().getNumOutboundTunnels() + 3; + + int curFreeInboundTunnels = getFreeTunnelCount(); + if (curFreeInboundTunnels < targetInboundTunnels) { + if (_log.shouldLog(Log.INFO)) + _log.info("Insufficient free inbound tunnels (" + curFreeInboundTunnels + ", not " + + targetInboundTunnels + "), requesting more"); + requestInboundTunnels(targetInboundTunnels - curFreeInboundTunnels); + //requestFakeInboundTunnels(1); + built = true; + } else { + if (_builtOnLastRun) { + // all good, no need for more inbound tunnels + if (_log.shouldLog(Log.DEBUG)) + _log.debug("Sufficient inbound tunnels (" + curFreeInboundTunnels + ")"); + } else { + if (_log.shouldLog(Log.INFO)) + _log.info("Building another inbound tunnel, cuz tunnels r k00l"); + requestInboundTunnels(1); + built = true; + } + } - boolean built = false; + int curOutboundTunnels = getOutboundTunnelCount(); + if (curOutboundTunnels < targetOutboundTunnels) { + if (_log.shouldLog(Log.INFO)) + _log.info("Insufficient outbound tunnels (" + curOutboundTunnels + ", not " + + targetOutboundTunnels + "), requesting more"); + requestOutboundTunnels(targetOutboundTunnels - curOutboundTunnels); + //requestFakeOutboundTunnels(1); + built = true; + } else { + if (_builtOnLastRun) { + // all good, no need for more outbound tunnels + if (_log.shouldLog(Log.DEBUG)) + _log.debug("Sufficient outbound tunnels (" + curOutboundTunnels + ")"); + } else { + if (_log.shouldLog(Log.INFO)) + _log.info("Building another outbound tunnel, since gravity still works"); + requestOutboundTunnels(1); + built = true; + } + } - int targetClients = _pool.getTargetClients(); - int targetInboundTunnels = targetClients*_pool.getPoolSettings().getNumInboundTunnels() + 3; - int targetOutboundTunnels = targetClients*_pool.getPoolSettings().getNumOutboundTunnels() + 3; + _pool.buildFakeTunnels(); + _builtOnLastRun = built; + } catch (Throwable t) { + _log.log(Log.CRIT, "Unhandled exception managing the tunnel pool", t); + } - int curFreeInboundTunnels = getFreeTunnelCount(); - if (curFreeInboundTunnels < targetInboundTunnels) { - _log.info("Insufficient free inbound tunnels (" + curFreeInboundTunnels + ", not " + targetInboundTunnels + "), requesting more"); - requestInboundTunnels(targetInboundTunnels - curFreeInboundTunnels); - //requestFakeInboundTunnels(1); - built = true; - } else { - if (_builtOnLastRun) { - // all good, no need for more inbound tunnels - _log.debug("Sufficient inbound tunnels (" + curFreeInboundTunnels + ")"); - } else { - _log.info("Building another inbound tunnel, cuz tunnels r k00l"); - requestInboundTunnels(1); - built = true; - } - } - - int curOutboundTunnels = getOutboundTunnelCount(); - if (curOutboundTunnels < targetOutboundTunnels) { - _log.info("Insufficient outbound tunnels (" + curOutboundTunnels + ", not " + targetOutboundTunnels + "), requesting more"); - requestOutboundTunnels(targetOutboundTunnels - curOutboundTunnels); - //requestFakeOutboundTunnels(1); - built = true; - } else { - if (_builtOnLastRun) { - // all good, no need for more outbound tunnels - _log.debug("Sufficient outbound tunnels (" + curOutboundTunnels + ")"); - } else { - _log.info("Building another outbound tunnel, since gravity still works"); - requestOutboundTunnels(1); - built = true; - } - } - - _pool.buildFakeTunnels(); - _builtOnLastRun = built; - } catch (Throwable t) { - _log.log(Log.CRIT, "Unhandled exception managing the tunnel pool", t); - } - - requeue(POOL_CHECK_DELAY); + requeue(POOL_CHECK_DELAY); } /** @@ -99,36 +109,38 @@ class TunnelPoolManagerJob extends JobImpl { * */ private int getFreeTunnelCount() { - Set freeTunnels = _pool.getFreeTunnels(); - int free = 0; - int minLength = _pool.getPoolSettings().getDepthInbound(); - long mustExpireAfter = Clock.getInstance().now() + EXPIRE_FUDGE_PERIOD; - for (Iterator iter = freeTunnels.iterator(); iter.hasNext(); ) { - TunnelId id = (TunnelId)iter.next(); - TunnelInfo info = _pool.getFreeTunnel(id); - if ( (info != null) && (info.getIsReady()) ) { - if (info.getSettings().getExpiration() > mustExpireAfter) { - if (info.getLength() >= minLength) { - if (info.getDestination() == null) { - free++; - } else { - // already alloc'ed - _log.error("Why is a free inbound tunnel allocated to a destination? [" + info.getTunnelId().getTunnelId() + " to " + info.getDestination().toBase64() + "]"); - } - } else { - // its valid, sure, but its not long enough *cough* - - // for the moment we'll keep these around so that we can use them - // for tunnel management and db messages, rather than force all - // tunnels to be the 2+ hop length as required for clients - free++; - } - } else { - _log.info("Inbound tunnel " + id + " is expiring in the upcoming period, consider it not-free"); - } - } - } - return free; + Set freeTunnels = _pool.getFreeTunnels(); + int free = 0; + int minLength = _pool.getPoolSettings().getDepthInbound(); + long mustExpireAfter = _context.clock().now() + EXPIRE_FUDGE_PERIOD; + for (Iterator iter = freeTunnels.iterator(); iter.hasNext(); ) { + TunnelId id = (TunnelId)iter.next(); + TunnelInfo info = _pool.getFreeTunnel(id); + if ( (info != null) && (info.getIsReady()) ) { + if (info.getSettings().getExpiration() > mustExpireAfter) { + if (info.getLength() >= minLength) { + if (info.getDestination() == null) { + free++; + } else { + // already alloc'ed + _log.error("Why is a free inbound tunnel allocated to a destination? [" + + info.getTunnelId().getTunnelId() + " to " + + info.getDestination().toBase64() + "]"); + } + } else { + // its valid, sure, but its not long enough *cough* + + // for the moment we'll keep these around so that we can use them + // for tunnel management and db messages, rather than force all + // tunnels to be the 2+ hop length as required for clients + free++; + } + } else { + _log.info("Inbound tunnel " + id + " is expiring in the upcoming period, consider it not-free"); + } + } + } + return free; } /** @@ -136,32 +148,32 @@ class TunnelPoolManagerJob extends JobImpl { * */ private int getOutboundTunnelCount() { - Set outboundTunnels = _pool.getOutboundTunnels(); - int outbound = 0; - long mustExpireAfter = Clock.getInstance().now() + EXPIRE_FUDGE_PERIOD; - for (Iterator iter = outboundTunnels.iterator(); iter.hasNext(); ) { - TunnelId id = (TunnelId)iter.next(); - TunnelInfo info = _pool.getOutboundTunnel(id); - if ( (info != null) && (info.getIsReady()) ) { - if (info.getSettings().getExpiration() > mustExpireAfter) { - outbound++; - } else { - _log.info("Outbound tunnel " + id + " is expiring in the upcoming period, consider it not-free"); - } - } - } - return outbound; + Set outboundTunnels = _pool.getOutboundTunnels(); + int outbound = 0; + long mustExpireAfter = _context.clock().now() + EXPIRE_FUDGE_PERIOD; + for (Iterator iter = outboundTunnels.iterator(); iter.hasNext(); ) { + TunnelId id = (TunnelId)iter.next(); + TunnelInfo info = _pool.getOutboundTunnel(id); + if ( (info != null) && (info.getIsReady()) ) { + if (info.getSettings().getExpiration() > mustExpireAfter) { + outbound++; + } else { + _log.info("Outbound tunnel " + id + " is expiring in the upcoming period, consider it not-free"); + } + } + } + return outbound; } private void requestInboundTunnels(int numTunnelsToRequest) { - _log.info("Requesting " + numTunnelsToRequest + " inbound tunnels"); - for (int i = 0; i < numTunnelsToRequest; i++) - JobQueue.getInstance().addJob(new RequestInboundTunnelJob(_pool, false)); + _log.info("Requesting " + numTunnelsToRequest + " inbound tunnels"); + for (int i = 0; i < numTunnelsToRequest; i++) + _context.jobQueue().addJob(new RequestInboundTunnelJob(_context, _pool, false)); } private void requestOutboundTunnels(int numTunnelsToRequest) { - _log.info("Requesting " + numTunnelsToRequest + " outbound tunnels"); - for (int i = 0; i < numTunnelsToRequest; i++) - JobQueue.getInstance().addJob(new RequestOutboundTunnelJob(_pool, false)); + _log.info("Requesting " + numTunnelsToRequest + " outbound tunnels"); + for (int i = 0; i < numTunnelsToRequest; i++) + _context.jobQueue().addJob(new RequestOutboundTunnelJob(_context, _pool, false)); } } diff --git a/router/java/src/net/i2p/router/tunnelmanager/TunnelPoolPersistenceHelper.java b/router/java/src/net/i2p/router/tunnelmanager/TunnelPoolPersistenceHelper.java index 3ea7c45fd..2a5925bcf 100644 --- a/router/java/src/net/i2p/router/tunnelmanager/TunnelPoolPersistenceHelper.java +++ b/router/java/src/net/i2p/router/tunnelmanager/TunnelPoolPersistenceHelper.java @@ -16,6 +16,7 @@ import net.i2p.router.ClientTunnelSettings; import net.i2p.router.Router; import net.i2p.router.TunnelInfo; import net.i2p.util.Log; +import net.i2p.router.RouterContext; /** * Handle all of the load / store of the tunnel pool (including any contained @@ -23,163 +24,169 @@ import net.i2p.util.Log; * */ class TunnelPoolPersistenceHelper { - private final static Log _log = new Log(TunnelPoolPersistenceHelper.class); + private Log _log; + private RouterContext _context; public final static String PARAM_TUNNEL_POOL_FILE = "router.tunnelPoolFile"; public final static String DEFAULT_TUNNEL_POOL_FILE = "tunnelPool.dat"; + public TunnelPoolPersistenceHelper(RouterContext ctx) { + _context = ctx; + _log = ctx.logManager().getLog(TunnelPoolPersistenceHelper.class); + } + public void writePool(TunnelPool pool) { - File f = getTunnelPoolFile(); - writePool(pool, f); + File f = getTunnelPoolFile(); + writePool(pool, f); } public void writePool(TunnelPool pool, File f) { - FileOutputStream fos = null; - try { - fos = new FileOutputStream(f); - DataHelper.writeLong(fos, 2, pool.getFreeTunnelCount()); - for (Iterator iter = pool.getFreeTunnels().iterator(); iter.hasNext(); ) { - TunnelId id = (TunnelId)iter.next(); - TunnelInfo info = pool.getFreeTunnel(id); - if (info != null) - info.writeBytes(fos); - } - DataHelper.writeLong(fos, 2, pool.getOutboundTunnelCount()); - for (Iterator iter = pool.getOutboundTunnels().iterator(); iter.hasNext(); ) { - TunnelId id = (TunnelId)iter.next(); - TunnelInfo info = pool.getOutboundTunnel(id); - if (info != null) - info.writeBytes(fos); - } - DataHelper.writeLong(fos, 2, pool.getParticipatingTunnels().size()); - for (Iterator iter = pool.getParticipatingTunnels().iterator(); iter.hasNext(); ) { - TunnelId id = (TunnelId)iter.next(); - TunnelInfo info = pool.getParticipatingTunnel(id); - if (info != null) - info.writeBytes(fos); - } - DataHelper.writeLong(fos, 2, pool.getPendingTunnels().size()); - for (Iterator iter = pool.getPendingTunnels().iterator(); iter.hasNext(); ) { - TunnelId id = (TunnelId)iter.next(); - TunnelInfo info = pool.getPendingTunnel(id); - if (info != null) - info.writeBytes(fos); - } - DataHelper.writeLong(fos, 2, pool.getClientPools().size()); - for (Iterator iter = pool.getClientPools().iterator(); iter.hasNext(); ) { - Destination dest = (Destination)iter.next(); - ClientTunnelPool cpool = (ClientTunnelPool)pool.getClientPool(dest); - writeClientPool(fos, cpool); - } - fos.flush(); - } catch (IOException ioe) { - _log.error("Error writing tunnel pool at " + f.getName(), ioe); - } catch (DataFormatException dfe) { - _log.error("Error formatting tunnels at " + f.getName(), dfe); - } finally { - if (fos != null) try { fos.close(); } catch (IOException ioe) {} - _log.debug("Tunnel pool state written to " + f.getName()); - } + FileOutputStream fos = null; + try { + fos = new FileOutputStream(f); + DataHelper.writeLong(fos, 2, pool.getFreeTunnelCount()); + for (Iterator iter = pool.getFreeTunnels().iterator(); iter.hasNext(); ) { + TunnelId id = (TunnelId)iter.next(); + TunnelInfo info = pool.getFreeTunnel(id); + if (info != null) + info.writeBytes(fos); + } + DataHelper.writeLong(fos, 2, pool.getOutboundTunnelCount()); + for (Iterator iter = pool.getOutboundTunnels().iterator(); iter.hasNext(); ) { + TunnelId id = (TunnelId)iter.next(); + TunnelInfo info = pool.getOutboundTunnel(id); + if (info != null) + info.writeBytes(fos); + } + DataHelper.writeLong(fos, 2, pool.getParticipatingTunnels().size()); + for (Iterator iter = pool.getParticipatingTunnels().iterator(); iter.hasNext(); ) { + TunnelId id = (TunnelId)iter.next(); + TunnelInfo info = pool.getParticipatingTunnel(id); + if (info != null) + info.writeBytes(fos); + } + DataHelper.writeLong(fos, 2, pool.getPendingTunnels().size()); + for (Iterator iter = pool.getPendingTunnels().iterator(); iter.hasNext(); ) { + TunnelId id = (TunnelId)iter.next(); + TunnelInfo info = pool.getPendingTunnel(id); + if (info != null) + info.writeBytes(fos); + } + DataHelper.writeLong(fos, 2, pool.getClientPools().size()); + for (Iterator iter = pool.getClientPools().iterator(); iter.hasNext(); ) { + Destination dest = (Destination)iter.next(); + ClientTunnelPool cpool = (ClientTunnelPool)pool.getClientPool(dest); + writeClientPool(fos, cpool); + } + fos.flush(); + } catch (IOException ioe) { + _log.error("Error writing tunnel pool at " + f.getName(), ioe); + } catch (DataFormatException dfe) { + _log.error("Error formatting tunnels at " + f.getName(), dfe); + } finally { + if (fos != null) try { fos.close(); } catch (IOException ioe) {} + _log.debug("Tunnel pool state written to " + f.getName()); + } } private void writeClientPool(FileOutputStream fos, ClientTunnelPool pool) throws IOException, DataFormatException { - pool.getDestination().writeBytes(fos); - Properties props = new Properties(); - pool.getClientSettings().writeToProperties(props); - DataHelper.writeProperties(fos, props); - DataHelper.writeLong(fos, 2, pool.getInboundTunnelIds().size()); - for (Iterator iter = pool.getInboundTunnelIds().iterator(); iter.hasNext(); ) { - TunnelId id = (TunnelId)iter.next(); - TunnelInfo info = pool.getInboundTunnel(id); - if (info != null) - info.writeBytes(fos); - } - DataHelper.writeLong(fos, 2, pool.getInactiveInboundTunnelIds().size()); - for (Iterator iter = pool.getInactiveInboundTunnelIds().iterator(); iter.hasNext(); ) { - TunnelId id = (TunnelId)iter.next(); - TunnelInfo info = pool.getInactiveInboundTunnel(id); - if (info != null) - info.writeBytes(fos); - } + pool.getDestination().writeBytes(fos); + Properties props = new Properties(); + pool.getClientSettings().writeToProperties(props); + DataHelper.writeProperties(fos, props); + DataHelper.writeLong(fos, 2, pool.getInboundTunnelIds().size()); + for (Iterator iter = pool.getInboundTunnelIds().iterator(); iter.hasNext(); ) { + TunnelId id = (TunnelId)iter.next(); + TunnelInfo info = pool.getInboundTunnel(id); + if (info != null) + info.writeBytes(fos); + } + DataHelper.writeLong(fos, 2, pool.getInactiveInboundTunnelIds().size()); + for (Iterator iter = pool.getInactiveInboundTunnelIds().iterator(); iter.hasNext(); ) { + TunnelId id = (TunnelId)iter.next(); + TunnelInfo info = pool.getInactiveInboundTunnel(id); + if (info != null) + info.writeBytes(fos); + } } /** * Load up the tunnels from disk, adding as appropriate to the TunnelPool */ public void loadPool(TunnelPool pool) { - File f = getTunnelPoolFile(); - loadPool(pool, f); + File f = getTunnelPoolFile(); + loadPool(pool, f); } public void loadPool(TunnelPool pool, File f) { - if (!f.exists()) return; - FileInputStream fin = null; - try { - fin = new FileInputStream(f); - int numFree = (int)DataHelper.readLong(fin, 2); - for (int i = 0; i < numFree; i++) { - TunnelInfo info = new TunnelInfo(); - info.readBytes(fin); - pool.addFreeTunnel(info); - } - int numOut = (int)DataHelper.readLong(fin, 2); - for (int i = 0; i < numOut; i++) { - TunnelInfo info = new TunnelInfo(); - info.readBytes(fin); - pool.addOutboundTunnel(info); - } - int numParticipating = (int)DataHelper.readLong(fin, 2); - for (int i = 0; i < numParticipating; i++) { - TunnelInfo info = new TunnelInfo(); - info.readBytes(fin); - pool.addParticipatingTunnel(info); - } - int numPending = (int)DataHelper.readLong(fin, 2); - for (int i = 0; i < numPending; i++) { - TunnelInfo info = new TunnelInfo(); - info.readBytes(fin); - pool.addPendingTunnel(info); - } - int numClients = (int)DataHelper.readLong(fin, 2); - for (int i = 0; i < numClients; i++) { - readClientPool(fin, pool); - } - } catch (IOException ioe) { - _log.error("Error reading tunnel pool from " + f.getName(), ioe); - } catch (DataFormatException dfe) { - _log.error("Error formatting tunnels from " + f.getName(), dfe); - } finally { - if (fin != null) try { fin.close(); } catch (IOException ioe) {} - _log.debug("Tunnel pool state written to " + f.getName()); - } + if (!f.exists()) return; + FileInputStream fin = null; + try { + fin = new FileInputStream(f); + int numFree = (int)DataHelper.readLong(fin, 2); + for (int i = 0; i < numFree; i++) { + TunnelInfo info = new TunnelInfo(_context); + info.readBytes(fin); + pool.addFreeTunnel(info); + } + int numOut = (int)DataHelper.readLong(fin, 2); + for (int i = 0; i < numOut; i++) { + TunnelInfo info = new TunnelInfo(_context); + info.readBytes(fin); + pool.addOutboundTunnel(info); + } + int numParticipating = (int)DataHelper.readLong(fin, 2); + for (int i = 0; i < numParticipating; i++) { + TunnelInfo info = new TunnelInfo(_context); + info.readBytes(fin); + pool.addParticipatingTunnel(info); + } + int numPending = (int)DataHelper.readLong(fin, 2); + for (int i = 0; i < numPending; i++) { + TunnelInfo info = new TunnelInfo(_context); + info.readBytes(fin); + pool.addPendingTunnel(info); + } + int numClients = (int)DataHelper.readLong(fin, 2); + for (int i = 0; i < numClients; i++) { + readClientPool(fin, pool); + } + } catch (IOException ioe) { + _log.error("Error reading tunnel pool from " + f.getName(), ioe); + } catch (DataFormatException dfe) { + _log.error("Error formatting tunnels from " + f.getName(), dfe); + } finally { + if (fin != null) try { fin.close(); } catch (IOException ioe) {} + _log.debug("Tunnel pool state written to " + f.getName()); + } } private void readClientPool(FileInputStream fin, TunnelPool pool) throws IOException, DataFormatException { - Destination dest = new Destination(); - dest.readBytes(fin); - ClientTunnelSettings settings = new ClientTunnelSettings(); - Properties props = DataHelper.readProperties(fin); - settings.readFromProperties(props); - HashSet activeTunnels = new HashSet(); - int numActiveTunnels = (int)DataHelper.readLong(fin, 2); - for (int i = 0; i < numActiveTunnels; i++) { - TunnelInfo info = new TunnelInfo(); - info.readBytes(fin); - activeTunnels.add(info); - } - HashSet inactiveTunnels = new HashSet(); - int numInactiveTunnels = (int)DataHelper.readLong(fin, 2); - for (int i = 0; i < numInactiveTunnels; i++) { - TunnelInfo info = new TunnelInfo(); - info.readBytes(fin); - inactiveTunnels.add(info); - } + Destination dest = new Destination(); + dest.readBytes(fin); + ClientTunnelSettings settings = new ClientTunnelSettings(); + Properties props = DataHelper.readProperties(fin); + settings.readFromProperties(props); + HashSet activeTunnels = new HashSet(); + int numActiveTunnels = (int)DataHelper.readLong(fin, 2); + for (int i = 0; i < numActiveTunnels; i++) { + TunnelInfo info = new TunnelInfo(_context); + info.readBytes(fin); + activeTunnels.add(info); + } + HashSet inactiveTunnels = new HashSet(); + int numInactiveTunnels = (int)DataHelper.readLong(fin, 2); + for (int i = 0; i < numInactiveTunnels; i++) { + TunnelInfo info = new TunnelInfo(_context); + info.readBytes(fin); + inactiveTunnels.add(info); + } - ClientTunnelPool cpool = new ClientTunnelPool(dest, settings, pool); - cpool.setActiveTunnels(activeTunnels); - cpool.setInactiveTunnels(inactiveTunnels); - pool.addClientPool(cpool); - cpool.startPool(); + ClientTunnelPool cpool = new ClientTunnelPool(_context, dest, settings, pool); + cpool.setActiveTunnels(activeTunnels); + cpool.setInactiveTunnels(inactiveTunnels); + pool.addClientPool(cpool); + cpool.startPool(); } @@ -188,14 +195,14 @@ class TunnelPoolPersistenceHelper { * */ private File getTunnelPoolFile() { - String filename = null; + String filename = null; - String str = Router.getInstance().getConfigSetting(PARAM_TUNNEL_POOL_FILE); - if ( (str != null) && (str.trim().length() > 0) ) - filename = str; - else - filename = DEFAULT_TUNNEL_POOL_FILE; - - return new File(filename); + String str = _context.router().getConfigSetting(PARAM_TUNNEL_POOL_FILE); + if ( (str != null) && (str.trim().length() > 0) ) + filename = str; + else + filename = DEFAULT_TUNNEL_POOL_FILE; + + return new File(filename); } } diff --git a/router/java/src/net/i2p/router/tunnelmanager/TunnelTestManager.java b/router/java/src/net/i2p/router/tunnelmanager/TunnelTestManager.java index 1ec202fd5..5160b0e8f 100644 --- a/router/java/src/net/i2p/router/tunnelmanager/TunnelTestManager.java +++ b/router/java/src/net/i2p/router/tunnelmanager/TunnelTestManager.java @@ -1,9 +1,9 @@ package net.i2p.router.tunnelmanager; /* * free (adj.): unencumbered; not under the control of others - * Written by jrandom in 2003 and released into the public domain - * with no warranty of any kind, either expressed or implied. - * It probably won't make your computer catch on fire, or eat + * Written by jrandom in 2003 and released into the public domain + * with no warranty of any kind, either expressed or implied. + * It probably won't make your computer catch on fire, or eat * your children, but it might. Use at your own risk. * */ @@ -20,13 +20,15 @@ import net.i2p.router.TunnelInfo; import net.i2p.util.Clock; import net.i2p.util.Log; import net.i2p.util.RandomSource; +import net.i2p.router.RouterContext; /** * Manage the testing for free, outbound, and active inbound client tunnels * */ class TunnelTestManager { - private final static Log _log = new Log(TunnelTestManager.class); + private Log _log; + private RouterContext _context; private TunnelPool _pool; private boolean _stopTesting; @@ -37,83 +39,85 @@ class TunnelTestManager { /** how many times we'll be able to try the tests (this should take into consideration user prefs, but fsck it for now) */ private final static int CHANCES_PER_DURATION = 8; - public TunnelTestManager(TunnelPool pool) { - _pool = pool; - _stopTesting = false; - JobQueue.getInstance().addJob(new CoordinateTunnelTestingJob()); + public TunnelTestManager(RouterContext ctx, TunnelPool pool) { + _context = ctx; + _log = ctx.logManager().getLog(TunnelTestManager.class); + _pool = pool; + _stopTesting = false; + _context.jobQueue().addJob(new CoordinateTunnelTestingJob()); } private Set selectTunnelsToTest() { - Set allIds = getAllIds(); - Set toTest = new HashSet(allIds.size()); - long now = Clock.getInstance().now(); - for (Iterator iter = allIds.iterator(); iter.hasNext();) { - TunnelId id = (TunnelId)iter.next(); - TunnelInfo info = _pool.getTunnelInfo(id); - if ( (info != null) && (info.getSettings() != null) ) { - if (info.getSettings().getExpiration() <= 0) { - // skip local tunnels - } else if (!info.getIsReady()) { - // skip not ready tunnels - } else if (info.getSettings().getExpiration() < now + MINIMUM_RETEST_DELAY) { - if (_log.shouldLog(Log.DEBUG)) - _log.debug("Tunnel " + id.getTunnelId() + " will be expiring within the current period (" + new Date(info.getSettings().getExpiration()) + "), so skip testing it"); - } else if (info.getSettings().getCreated() + MINIMUM_RETEST_DELAY < now) { - double probability = TESTS_PER_DURATION / (allIds.size() * CHANCES_PER_DURATION); - if (RandomSource.getInstance().nextInt(10) <= (probability*10d)) { - toTest.add(id); - } else { - if (_log.shouldLog(Log.DEBUG)) - _log.debug("Tunnel " + id.getTunnelId() + " could be tested, but probabilistically isn't going to be"); - } - } else { - if (_log.shouldLog(Log.DEBUG)) - _log.debug("Tunnel " + id.getTunnelId() + " was just created (" + new Date(info.getSettings().getCreated()) + "), wait until the next pass to test it"); - } - } else { - if (_log.shouldLog(Log.WARN)) - _log.warn("Hmm, a normally testable tunnel [" + id.getTunnelId() + "] didn't have info or settings: " + info); - } - } - return toTest; + Set allIds = getAllIds(); + Set toTest = new HashSet(allIds.size()); + long now = _context.clock().now(); + for (Iterator iter = allIds.iterator(); iter.hasNext();) { + TunnelId id = (TunnelId)iter.next(); + TunnelInfo info = _pool.getTunnelInfo(id); + if ( (info != null) && (info.getSettings() != null) ) { + if (info.getSettings().getExpiration() <= 0) { + // skip local tunnels + } else if (!info.getIsReady()) { + // skip not ready tunnels + } else if (info.getSettings().getExpiration() < now + MINIMUM_RETEST_DELAY) { + if (_log.shouldLog(Log.DEBUG)) + _log.debug("Tunnel " + id.getTunnelId() + " will be expiring within the current period (" + new Date(info.getSettings().getExpiration()) + "), so skip testing it"); + } else if (info.getSettings().getCreated() + MINIMUM_RETEST_DELAY < now) { + double probability = TESTS_PER_DURATION / (allIds.size() * CHANCES_PER_DURATION); + if (_context.random().nextInt(10) <= (probability*10d)) { + toTest.add(id); + } else { + if (_log.shouldLog(Log.DEBUG)) + _log.debug("Tunnel " + id.getTunnelId() + " could be tested, but probabilistically isn't going to be"); + } + } else { + if (_log.shouldLog(Log.DEBUG)) + _log.debug("Tunnel " + id.getTunnelId() + " was just created (" + new Date(info.getSettings().getCreated()) + "), wait until the next pass to test it"); + } + } else { + if (_log.shouldLog(Log.WARN)) + _log.warn("Hmm, a normally testable tunnel [" + id.getTunnelId() + "] didn't have info or settings: " + info); + } + } + return toTest; } private Set getAllIds() { - return _pool.getManagedTunnelIds(); + return _pool.getManagedTunnelIds(); } public void stopTesting() { _stopTesting = true; } private void runTest(TunnelId tunnel) { - JobQueue.getInstance().addJob(new TestTunnelJob(tunnel, _pool)); + _context.jobQueue().addJob(new TestTunnelJob(_context, tunnel, _pool)); } private class CoordinateTunnelTestingJob extends JobImpl { - public CoordinateTunnelTestingJob() { - super(); - getTiming().setStartAfter(Clock.getInstance().now() + MINIMUM_RETEST_DELAY); - } - public String getName() { return "Coordinate Tunnel Testing"; } - public void runJob() { - if (_stopTesting) return; - - Set toTestIds = selectTunnelsToTest(); - if (_log.shouldLog(Log.INFO)) - _log.info("Running tests on selected tunnels: " + toTestIds); - for (Iterator iter = toTestIds.iterator(); iter.hasNext(); ) { - TunnelId id = (TunnelId)iter.next(); - runTest(id); - } - reschedule(); - } - - private void reschedule() { - long minNext = Clock.getInstance().now() + MINIMUM_RETEST_DELAY; - long nxt = minNext + RandomSource.getInstance().nextInt(60*1000); // test tunnels once every 30-90 seconds - getTiming().setStartAfter(nxt); - if (_log.shouldLog(Log.INFO)) - _log.info("Rescheduling tunnel tests for " + new Date(nxt)); - JobQueue.getInstance().addJob(CoordinateTunnelTestingJob.this); - } + public CoordinateTunnelTestingJob() { + super(TunnelTestManager.this._context); + getTiming().setStartAfter(TunnelTestManager.this._context.clock().now() + MINIMUM_RETEST_DELAY); + } + public String getName() { return "Coordinate Tunnel Testing"; } + public void runJob() { + if (_stopTesting) return; + + Set toTestIds = selectTunnelsToTest(); + if (_log.shouldLog(Log.INFO)) + _log.info("Running tests on selected tunnels: " + toTestIds); + for (Iterator iter = toTestIds.iterator(); iter.hasNext(); ) { + TunnelId id = (TunnelId)iter.next(); + runTest(id); + } + reschedule(); + } + + private void reschedule() { + long minNext = TunnelTestManager.this._context.clock().now() + MINIMUM_RETEST_DELAY; + long nxt = minNext + TunnelTestManager.this._context.random().nextInt(60*1000); // test tunnels once every 30-90 seconds + getTiming().setStartAfter(nxt); + if (_log.shouldLog(Log.INFO)) + _log.info("Rescheduling tunnel tests for " + new Date(nxt)); + TunnelTestManager.this._context.jobQueue().addJob(CoordinateTunnelTestingJob.this); + } } } diff --git a/router/java/test/net/i2p/data/i2np/DatabaseStoreMessageTest.java b/router/java/test/net/i2p/data/i2np/DatabaseStoreMessageTest.java index d4b28c11c..f3bda79c8 100644 --- a/router/java/test/net/i2p/data/i2np/DatabaseStoreMessageTest.java +++ b/router/java/test/net/i2p/data/i2np/DatabaseStoreMessageTest.java @@ -1,9 +1,9 @@ package net.i2p.data.i2np; /* * free (adj.): unencumbered; not under the control of others - * Written by jrandom in 2003 and released into the public domain - * with no warranty of any kind, either expressed or implied. - * It probably won't make your computer catch on fire, or eat + * Written by jrandom in 2003 and released into the public domain + * with no warranty of any kind, either expressed or implied. + * It probably won't make your computer catch on fire, or eat * your children, but it might. Use at your own risk. * */ @@ -29,13 +29,13 @@ class DatabaseStoreMessageTest extends StructureTest { TestData.registerTest(new DatabaseStoreMessageTest(), "DatabaseStoreMessage"); } public DataStructure createDataStructure() throws DataFormatException { - DatabaseStoreMessage msg = new DatabaseStoreMessage(); - RouterInfo info = (RouterInfo)new RouterInfoTest().createDataStructure(); - msg.setKey(info.getIdentity().getHash()); - msg.setMessageExpiration(new Date(Clock.getInstance().now())); - msg.setUniqueId(42); - msg.setRouterInfo(info); - return msg; + DatabaseStoreMessage msg = new DatabaseStoreMessage(_context); + RouterInfo info = (RouterInfo)new RouterInfoTest().createDataStructure(); + msg.setKey(info.getIdentity().getHash()); + msg.setMessageExpiration(new Date(Clock.getInstance().now())); + msg.setUniqueId(42); + msg.setRouterInfo(info); + return msg; } - public DataStructure createStructureToRead() { return new DatabaseStoreMessage(); } + public DataStructure createStructureToRead() { return new DatabaseStoreMessage(_context); } } diff --git a/router/java/test/net/i2p/data/i2np/I2NPMessageReaderTest.java b/router/java/test/net/i2p/data/i2np/I2NPMessageReaderTest.java index 6ef407f85..7eda0e081 100644 --- a/router/java/test/net/i2p/data/i2np/I2NPMessageReaderTest.java +++ b/router/java/test/net/i2p/data/i2np/I2NPMessageReaderTest.java @@ -1,9 +1,9 @@ package net.i2p.data.i2np; /* * free (adj.): unencumbered; not under the control of others - * Written by jrandom in 2003 and released into the public domain - * with no warranty of any kind, either expressed or implied. - * It probably won't make your computer catch on fire, or eat + * Written by jrandom in 2003 and released into the public domain + * with no warranty of any kind, either expressed or implied. + * It probably won't make your computer catch on fire, or eat * your children, but it might. Use at your own risk. * */ @@ -18,6 +18,7 @@ import net.i2p.data.i2np.DatabaseStoreMessage; import net.i2p.data.i2np.I2NPMessage; import net.i2p.data.i2np.I2NPMessageReader; import net.i2p.util.Log; +import net.i2p.router.RouterContext; /** * Test harness for loading / storing I2NP DatabaseStore message objects @@ -26,53 +27,54 @@ import net.i2p.util.Log; */ class I2NPMessageReaderTest implements I2NPMessageReader.I2NPMessageEventListener { private final static Log _log = new Log(I2NPMessageReaderTest.class); + private static RouterContext _context = new RouterContext(null); public static void main(String args[]) { - I2NPMessageReaderTest test = new I2NPMessageReaderTest(); - test.runTest(); - try { Thread.sleep(30*1000); } catch (InterruptedException ie) {} + I2NPMessageReaderTest test = new I2NPMessageReaderTest(); + test.runTest(); + try { Thread.sleep(30*1000); } catch (InterruptedException ie) {} } public void runTest() { - InputStream data = getData(); - test(data); + InputStream data = getData(); + test(data); } private InputStream getData() { - ByteArrayOutputStream baos = new ByteArrayOutputStream(); - try { - DatabaseStoreMessage msg = (DatabaseStoreMessage)new DatabaseStoreMessageTest().createDataStructure(); - msg.writeBytes(baos); - msg.writeBytes(baos); - msg.writeBytes(baos); - _log.debug("DB Store message in tunnel contains: " + msg); - msg.writeBytes(baos); - } catch (DataFormatException dfe) { - _log.error("Error building data", dfe); - } catch (IOException ioe) { - _log.error("Error writing stream", ioe); - } - return new ByteArrayInputStream(baos.toByteArray()); + ByteArrayOutputStream baos = new ByteArrayOutputStream(); + try { + DatabaseStoreMessage msg = (DatabaseStoreMessage)new DatabaseStoreMessageTest().createDataStructure(); + msg.writeBytes(baos); + msg.writeBytes(baos); + msg.writeBytes(baos); + _log.debug("DB Store message in tunnel contains: " + msg); + msg.writeBytes(baos); + } catch (DataFormatException dfe) { + _log.error("Error building data", dfe); + } catch (IOException ioe) { + _log.error("Error writing stream", ioe); + } + return new ByteArrayInputStream(baos.toByteArray()); } private void test(InputStream in) { - _log.debug("Testing the input stream"); - I2NPMessageReader reader = new I2NPMessageReader(in, this); - _log.debug("Created, beginning reading"); - reader.startReading(); - _log.debug("Reading commenced"); + _log.debug("Testing the input stream"); + I2NPMessageReader reader = new I2NPMessageReader(_context, in, this); + _log.debug("Created, beginning reading"); + reader.startReading(); + _log.debug("Reading commenced"); } public void disconnected(I2NPMessageReader reader) { - _log.debug("Disconnected"); + _log.debug("Disconnected"); } public void messageReceived(I2NPMessageReader reader, I2NPMessage message, long msToRead) { - _log.debug("Message received: " + message); + _log.debug("Message received: " + message); } public void readError(I2NPMessageReader reader, Exception error) { - _log.debug("Read error: " + error.getMessage(), error); + _log.debug("Read error: " + error.getMessage(), error); } }