\n"); - + out.write("If this is checked, the registration, admin, and remote passwords are unnecessary - anyone"); out.write("can register and administer Syndie, as well as use any remote functionality. This should not be checked if untrusted"); out.write("parties can access this web interface.
\n"); + */ out.write("Default user: \n"); diff --git a/apps/syndie/java/src/net/i2p/syndie/web/BaseServlet.java b/apps/syndie/java/src/net/i2p/syndie/web/BaseServlet.java index f848b6af8..6e3dc7a9a 100644 --- a/apps/syndie/java/src/net/i2p/syndie/web/BaseServlet.java +++ b/apps/syndie/java/src/net/i2p/syndie/web/BaseServlet.java @@ -716,11 +716,13 @@ public abstract class BaseServlet extends HttpServlet { for (Iterator iter = names.iterator(); iter.hasNext(); ) { String name = (String) iter.next(); PetName pn = db.getByName(name); - if ("syndieblog".equals(pn.getProtocol()) && pn.isMember(FilteredThreadIndex.GROUP_FAVORITE)) { + String proto = pn.getProtocol(); + String loc = pn.getLocation(); + if (proto != null && loc != null && "syndieblog".equals(proto) && pn.isMember(FilteredThreadIndex.GROUP_FAVORITE)) { if ( (author != null) && (author.equals(pn.getLocation())) ) - out.write("\n"); + out.write("\n"); else - out.write("\n"); + out.write("\n"); } } out.write("\n"); diff --git a/apps/syndie/java/src/net/i2p/syndie/web/PostServlet.java b/apps/syndie/java/src/net/i2p/syndie/web/PostServlet.java index 2e36dfb19..4cc2127b4 100644 --- a/apps/syndie/java/src/net/i2p/syndie/web/PostServlet.java +++ b/apps/syndie/java/src/net/i2p/syndie/web/PostServlet.java @@ -217,7 +217,7 @@ public class PostServlet extends BaseServlet { out.write("Post subject: "); out.write("
\n"); - out.write("Post content (in raw SML, no headers):
\n"); + out.write("Post content (in raw SML, no headers):
\n"); out.write("
\n"); out.write("SML post headers:
\n"); out.write("
\n"); @@ -273,7 +273,7 @@ public class PostServlet extends BaseServlet { out.write("Post subject: "); out.write("
\n"); - out.write("Post content (in raw SML, no headers):
\n"); + out.write("Post content (in raw SML, no headers):
\n"); out.write("
\n"); out.write("SML post headers:
\n"); out.write("
\n"); diff --git a/apps/syndie/java/src/net/i2p/syndie/web/RemoteArchiveBean.java b/apps/syndie/java/src/net/i2p/syndie/web/RemoteArchiveBean.java index d555172a9..1fc7b77b6 100644 --- a/apps/syndie/java/src/net/i2p/syndie/web/RemoteArchiveBean.java +++ b/apps/syndie/java/src/net/i2p/syndie/web/RemoteArchiveBean.java @@ -278,7 +278,7 @@ public class RemoteArchiveBean { scheduler.fetch(shouldBlock); } - public void fetchIndex(User user, String schema, String location, String proxyHost, String proxyPort) { + public void fetchIndex(User user, String schema, String location, String proxyHost, String proxyPort, boolean allowCaching) { _fetchIndexInProgress = true; _remoteIndex = null; _remoteLocation = location; @@ -330,9 +330,12 @@ public class RemoteArchiveBean { } catch (IOException ioe) { //ignore } - + + String tag = null; + if (allowCaching) + tag = etags.getProperty(location); EepGet eep = new EepGet(I2PAppContext.getGlobalContext(), ((_proxyHost != null) && (_proxyPort > 0)), - _proxyHost, _proxyPort, 0, archiveFile.getAbsolutePath(), location, true, etags.getProperty(location)); + _proxyHost, _proxyPort, 0, archiveFile.getAbsolutePath(), location, allowCaching, tag); eep.addStatusListener(new IndexFetcherStatusListener(archiveFile)); eep.fetch(); diff --git a/apps/syndie/java/src/net/i2p/syndie/web/SyndicateServlet.java b/apps/syndie/java/src/net/i2p/syndie/web/SyndicateServlet.java index 0d6c17ebc..0705dc93f 100644 --- a/apps/syndie/java/src/net/i2p/syndie/web/SyndicateServlet.java +++ b/apps/syndie/java/src/net/i2p/syndie/web/SyndicateServlet.java @@ -49,9 +49,11 @@ public class SyndicateServlet extends BaseServlet { if (pnval != null) location = pnval.getLocation(); } + // dont allow caching if they explicit ask for a fetch + boolean allowCaching = false; remote.fetchIndex(user, req.getParameter(PARAM_SCHEMA), location, req.getParameter("proxyhost"), - req.getParameter("proxyport")); + req.getParameter("proxyport"), allowCaching); } else if ("Fetch metadata".equals(action)) { remote.fetchMetadata(user, req.getParameterMap()); } else if ("Fetch selected entries".equals(action)) { diff --git a/core/java/src/freenet/support/CPUInformation/CPUID.java b/core/java/src/freenet/support/CPUInformation/CPUID.java index f543c28c8..59a7f2cc4 100644 --- a/core/java/src/freenet/support/CPUInformation/CPUID.java +++ b/core/java/src/freenet/support/CPUInformation/CPUID.java @@ -135,6 +135,8 @@ public class CPUID { { if(!_nativeOk) throw new UnknownCPUException("Failed to read CPU information from the system. Please verify the existence of the jcpuid dll/so."); + if(getCPUVendorID().equals("CentaurHauls")) + return new VIAC3Impl(); if(!isX86) throw new UnknownCPUException("Failed to read CPU information from the system. The CPUID instruction exists on x86 CPU's only"); if(getCPUVendorID().equals("AuthenticAMD")) @@ -159,6 +161,11 @@ public class CPUID { public boolean hasSSE2(){ return (getCPUFlags() & 0x4000000) >0; //Bit 26 } + public boolean IsC3Compatible() { return false; } + } + protected static class VIAC3Impl extends CPUIDCPUInfo implements CPUInfo { + public boolean isC3Compatible() { return true; } + public String getCPUModelString() { return "VIA C3"; } } protected static class AMDInfoImpl extends CPUIDCPUInfo implements AMDCPUInfo { diff --git a/core/java/src/freenet/support/CPUInformation/CPUInfo.java b/core/java/src/freenet/support/CPUInformation/CPUInfo.java index 7d7691f53..e6b660927 100644 --- a/core/java/src/freenet/support/CPUInformation/CPUInfo.java +++ b/core/java/src/freenet/support/CPUInformation/CPUInfo.java @@ -41,5 +41,6 @@ public interface CPUInfo * @return true iff the CPU support the SSE2 instruction set. */ public boolean hasSSE2(); - + + public boolean IsC3Compatible(); } diff --git a/core/java/src/net/i2p/client/SessionStatusMessageHandler.java b/core/java/src/net/i2p/client/SessionStatusMessageHandler.java index abf7805c1..9fc688651 100644 --- a/core/java/src/net/i2p/client/SessionStatusMessageHandler.java +++ b/core/java/src/net/i2p/client/SessionStatusMessageHandler.java @@ -34,10 +34,11 @@ class SessionStatusMessageHandler extends HandlerImpl { break; case SessionStatusMessage.STATUS_DESTROYED: _log.info("Session destroyed"); - session.destroySession(); + //session.destroySession(); + session.reconnect(); // la la la break; case SessionStatusMessage.STATUS_INVALID: - session.destroySession(); + session.destroySession(); // ok, honor this destroy message, because we're b0rked break; case SessionStatusMessage.STATUS_UPDATED: _log.info("Session status updated"); diff --git a/core/java/src/net/i2p/crypto/DHSessionKeyBuilder.java b/core/java/src/net/i2p/crypto/DHSessionKeyBuilder.java index 82b9d9346..a1754a3ad 100644 --- a/core/java/src/net/i2p/crypto/DHSessionKeyBuilder.java +++ b/core/java/src/net/i2p/crypto/DHSessionKeyBuilder.java @@ -228,7 +228,7 @@ public class DHSessionKeyBuilder { */ public BigInteger generateMyValue() { long start = System.currentTimeMillis(); - _myPrivateValue = new NativeBigInteger(2048, RandomSource.getInstance()); + _myPrivateValue = new NativeBigInteger(KeyGenerator.PUBKEY_EXPONENT_SIZE, RandomSource.getInstance()); BigInteger myValue = CryptoConstants.elgg.modPow(_myPrivateValue, CryptoConstants.elgp); long end = System.currentTimeMillis(); long diff = end - start; diff --git a/core/java/src/net/i2p/crypto/HMACSHA256Generator.java b/core/java/src/net/i2p/crypto/HMACSHA256Generator.java index 6430b8d60..86bb80f9e 100644 --- a/core/java/src/net/i2p/crypto/HMACSHA256Generator.java +++ b/core/java/src/net/i2p/crypto/HMACSHA256Generator.java @@ -40,7 +40,7 @@ public class HMACSHA256Generator { _useMD5 = true; else _useMD5 = false; - if ("true".equals(context.getProperty("i2p.HMACBrokenSize", "true"))) + if ("true".equals(context.getProperty("i2p.HMACBrokenSize", "false"))) _macSize = 32; else _macSize = (_useMD5 ? 16 : 32); diff --git a/core/java/src/net/i2p/crypto/KeyGenerator.java b/core/java/src/net/i2p/crypto/KeyGenerator.java index 54f47ca23..8e3af0a51 100644 --- a/core/java/src/net/i2p/crypto/KeyGenerator.java +++ b/core/java/src/net/i2p/crypto/KeyGenerator.java @@ -52,13 +52,27 @@ public class KeyGenerator { key.setData(data); return key; } + + /** standard exponent size */ + private static final int PUBKEY_EXPONENT_SIZE_FULL = 2048; + /** + * short exponent size, which should be safe for use with the Oakley primes, + * per "On Diffie-Hellman Key Agreement with Short Exponents" - van Oorschot, Weiner + * at EuroCrypt 96, and crypto++'s benchmarks at http://www.eskimo.com/~weidai/benchmarks.html + * Also, "Koshiba & Kurosawa: Short Exponent Diffie-Hellman Problems" (PKC 2004, LNCS 2947, pp. 173-186) + * aparently supports this, according to + * http://groups.google.com/group/sci.crypt/browse_thread/thread/1855a5efa7416677/339fa2f945cc9ba0#339fa2f945cc9ba0 + * (damn commercial access to http://www.springerlink.com/(xrkdvv45w0cmnur4aimsxx55)/app/home/contribution.asp?referrer=parent&backto=issue,13,31;journal,893,3280;linkingpublicationresults,1:105633,1 ) + */ + private static final int PUBKEY_EXPONENT_SIZE_SHORT = 226; + public static final int PUBKEY_EXPONENT_SIZE = PUBKEY_EXPONENT_SIZE_SHORT; /** Generate a pair of keys, where index 0 is a PublicKey, and * index 1 is a PrivateKey * @return pair of keys */ public Object[] generatePKIKeypair() { - BigInteger a = new NativeBigInteger(2048, _context.random()); + BigInteger a = new NativeBigInteger(PUBKEY_EXPONENT_SIZE, _context.random()); BigInteger aalpha = CryptoConstants.elgg.modPow(a, CryptoConstants.elgp); Object[] keys = new Object[2]; @@ -130,7 +144,7 @@ public class KeyGenerator { * Pad the buffer w/ leading 0s or trim off leading bits so the result is the * given length. */ - private final static byte[] padBuffer(byte src[], int length) { + final static byte[] padBuffer(byte src[], int length) { byte buf[] = new byte[length]; if (src.length > buf.length) // extra bits, chop leading bits diff --git a/core/java/src/net/i2p/crypto/YKGenerator.java b/core/java/src/net/i2p/crypto/YKGenerator.java index cc9f42fb7..e10b917f6 100644 --- a/core/java/src/net/i2p/crypto/YKGenerator.java +++ b/core/java/src/net/i2p/crypto/YKGenerator.java @@ -129,7 +129,7 @@ class YKGenerator { long t1 = 0; while (k == null) { t0 = Clock.getInstance().now(); - k = new NativeBigInteger(2048, RandomSource.getInstance()); + k = new NativeBigInteger(KeyGenerator.PUBKEY_EXPONENT_SIZE, RandomSource.getInstance()); t1 = Clock.getInstance().now(); if (BigInteger.ZERO.compareTo(k) == 0) { k = null; diff --git a/core/java/src/net/i2p/data/Certificate.java b/core/java/src/net/i2p/data/Certificate.java index 9afc99ee4..89a5aca97 100644 --- a/core/java/src/net/i2p/data/Certificate.java +++ b/core/java/src/net/i2p/data/Certificate.java @@ -34,6 +34,8 @@ public class Certificate extends DataStructureImpl { public final static int CERTIFICATE_TYPE_NULL = 0; /** specifies a Hashcash style certificate */ public final static int CERTIFICATE_TYPE_HASHCASH = 1; + /** we should not be used for anything (don't use us in the netDb, in tunnels, or tell others about us) */ + public final static int CERTIFICATE_TYPE_HIDDEN = 2; public Certificate() { _type = 0; @@ -76,7 +78,7 @@ public class Certificate extends DataStructureImpl { public void writeBytes(OutputStream out) throws DataFormatException, IOException { if (_type < 0) throw new DataFormatException("Invalid certificate type: " + _type); - if ((_type != 0) && (_payload == null)) throw new DataFormatException("Payload is required for non null type"); + //if ((_type != 0) && (_payload == null)) throw new DataFormatException("Payload is required for non null type"); DataHelper.writeLong(out, 1, _type); if (_payload != null) { diff --git a/core/java/src/net/i2p/data/RouterIdentity.java b/core/java/src/net/i2p/data/RouterIdentity.java index 573fb9d08..bab441282 100644 --- a/core/java/src/net/i2p/data/RouterIdentity.java +++ b/core/java/src/net/i2p/data/RouterIdentity.java @@ -63,6 +63,16 @@ public class RouterIdentity extends DataStructureImpl { _signingKey = key; __calculatedHash = null; } + + /** + * This router specified that they should not be used as a part of a tunnel, + * nor queried for the netDb, and that disclosure of their contact information + * should be limited. + * + */ + public boolean isHidden() { + return (_certificate != null) && (_certificate.getCertificateType() == Certificate.CERTIFICATE_TYPE_HIDDEN); + } public void readBytes(InputStream in) throws DataFormatException, IOException { _publicKey = new PublicKey(); diff --git a/core/java/src/net/i2p/util/DecayingBloomFilter.java b/core/java/src/net/i2p/util/DecayingBloomFilter.java index 074eefb5e..b76e99408 100644 --- a/core/java/src/net/i2p/util/DecayingBloomFilter.java +++ b/core/java/src/net/i2p/util/DecayingBloomFilter.java @@ -80,14 +80,17 @@ public class DecayingBloomFilter { * */ public boolean add(byte entry[]) { + return add(entry, 0, entry.length); + } + public boolean add(byte entry[], int off, int len) { if (ALWAYS_MISS) return false; if (entry == null) throw new IllegalArgumentException("Null entry"); - if (entry.length != _entryBytes) - throw new IllegalArgumentException("Bad entry [" + entry.length + ", expected " + if (len != _entryBytes) + throw new IllegalArgumentException("Bad entry [" + len + ", expected " + _entryBytes + "]"); synchronized (this) { - return locked_add(entry); + return locked_add(entry, off, len); } } @@ -101,14 +104,15 @@ public class DecayingBloomFilter { if (ALWAYS_MISS) return false; synchronized (this) { if (_entryBytes <= 7) - entry &= _longToEntryMask; + entry = ((entry ^ _longToEntryMask) & ((1 << 31)-1)) | (entry ^ _longToEntryMask); + //entry &= _longToEntryMask; if (entry < 0) { DataHelper.toLong(_longToEntry, 0, _entryBytes, 0-entry); _longToEntry[0] |= (1 << 7); } else { DataHelper.toLong(_longToEntry, 0, _entryBytes, entry); } - return locked_add(_longToEntry); + return locked_add(_longToEntry, 0, _longToEntry.length); } } @@ -121,26 +125,26 @@ public class DecayingBloomFilter { if (ALWAYS_MISS) return false; synchronized (this) { if (_entryBytes <= 7) - entry &= _longToEntryMask; + entry = ((entry ^ _longToEntryMask) & ((1 << 31)-1)) | (entry ^ _longToEntryMask); if (entry < 0) { DataHelper.toLong(_longToEntry, 0, _entryBytes, 0-entry); _longToEntry[0] |= (1 << 7); } else { DataHelper.toLong(_longToEntry, 0, _entryBytes, entry); } - return locked_add(_longToEntry, false); + return locked_add(_longToEntry, 0, _longToEntry.length, false); } } - private boolean locked_add(byte entry[]) { - return locked_add(entry, true); + private boolean locked_add(byte entry[], int offset, int len) { + return locked_add(entry, offset, len, true); } - private boolean locked_add(byte entry[], boolean addIfNew) { + private boolean locked_add(byte entry[], int offset, int len, boolean addIfNew) { if (_extended != null) { // extend the entry to 32 bytes - System.arraycopy(entry, 0, _extended, 0, entry.length); + System.arraycopy(entry, offset, _extended, 0, len); for (int i = 0; i < _extenders.length; i++) - DataHelper.xor(entry, 0, _extenders[i], 0, _extended, _entryBytes * (i+1), _entryBytes); + DataHelper.xor(entry, offset, _extenders[i], 0, _extended, _entryBytes * (i+1), _entryBytes); boolean seen = _current.member(_extended); seen = seen || _previous.member(_extended); @@ -155,15 +159,15 @@ public class DecayingBloomFilter { return false; } } else { - boolean seen = _current.locked_member(entry); - seen = seen || _previous.locked_member(entry); + boolean seen = _current.locked_member(entry, offset, len); + seen = seen || _previous.locked_member(entry, offset, len); if (seen) { _currentDuplicates++; return true; } else { if (addIfNew) { - _current.locked_insert(entry); - _previous.locked_insert(entry); + _current.locked_insert(entry, offset, len); + _previous.locked_insert(entry, offset, len); } return false; } diff --git a/core/java/src/net/i2p/util/NativeBigInteger.java b/core/java/src/net/i2p/util/NativeBigInteger.java index 8e5a45845..589abb733 100644 --- a/core/java/src/net/i2p/util/NativeBigInteger.java +++ b/core/java/src/net/i2p/util/NativeBigInteger.java @@ -103,6 +103,7 @@ public class NativeBigInteger extends BigInteger { private final static String JBIGI_OPTIMIZATION_PENTIUM2 = "pentium2"; private final static String JBIGI_OPTIMIZATION_PENTIUM3 = "pentium3"; private final static String JBIGI_OPTIMIZATION_PENTIUM4 = "pentium4"; + private final static String JBIGI_OPTIMIZATION_VIAC3 = "viac3"; private static final boolean _isWin = System.getProperty("os.name").startsWith("Win"); private static final boolean _isOS2 = System.getProperty("os.name").startsWith("OS/2"); @@ -134,6 +135,8 @@ public class NativeBigInteger extends BigInteger { try { CPUInfo c = CPUID.getInfo(); + if (c.IsC3Compatible()) + return JBIGI_OPTIMIZATION_VIAC3; if (c instanceof AMDCPUInfo) { AMDCPUInfo amdcpu = (AMDCPUInfo) c; if (amdcpu.IsAthlon64Compatible()) @@ -146,20 +149,18 @@ public class NativeBigInteger extends BigInteger { return JBIGI_OPTIMIZATION_K6_2; if (amdcpu.IsK6Compatible()) return JBIGI_OPTIMIZATION_K6; - } else { - if (c instanceof IntelCPUInfo) { - IntelCPUInfo intelcpu = (IntelCPUInfo) c; - if (intelcpu.IsPentium4Compatible()) - return JBIGI_OPTIMIZATION_PENTIUM4; - if (intelcpu.IsPentium3Compatible()) - return JBIGI_OPTIMIZATION_PENTIUM3; - if (intelcpu.IsPentium2Compatible()) - return JBIGI_OPTIMIZATION_PENTIUM2; - if (intelcpu.IsPentiumMMXCompatible()) - return JBIGI_OPTIMIZATION_PENTIUMMMX; - if (intelcpu.IsPentiumCompatible()) - return JBIGI_OPTIMIZATION_PENTIUM; - } + } else if (c instanceof IntelCPUInfo) { + IntelCPUInfo intelcpu = (IntelCPUInfo) c; + if (intelcpu.IsPentium4Compatible()) + return JBIGI_OPTIMIZATION_PENTIUM4; + if (intelcpu.IsPentium3Compatible()) + return JBIGI_OPTIMIZATION_PENTIUM3; + if (intelcpu.IsPentium2Compatible()) + return JBIGI_OPTIMIZATION_PENTIUM2; + if (intelcpu.IsPentiumMMXCompatible()) + return JBIGI_OPTIMIZATION_PENTIUMMMX; + if (intelcpu.IsPentiumCompatible()) + return JBIGI_OPTIMIZATION_PENTIUM; } return null; } catch (UnknownCPUException e) { @@ -287,7 +288,7 @@ public class NativeBigInteger extends BigInteger { int runsProcessed = 0; for (runsProcessed = 0; runsProcessed < numRuns; runsProcessed++) { - BigInteger bi = new BigInteger(2048, rand); + BigInteger bi = new BigInteger(226, rand); // 2048, rand); // NativeBigInteger g = new NativeBigInteger(_sampleGenerator); NativeBigInteger p = new NativeBigInteger(_samplePrime); NativeBigInteger k = new NativeBigInteger(1, bi.toByteArray()); diff --git a/core/java/src/org/xlattice/crypto/filters/BloomSHA1.java b/core/java/src/org/xlattice/crypto/filters/BloomSHA1.java index 7c020306c..05b515bd2 100644 --- a/core/java/src/org/xlattice/crypto/filters/BloomSHA1.java +++ b/core/java/src/org/xlattice/crypto/filters/BloomSHA1.java @@ -148,14 +148,16 @@ public class BloomSHA1 { * * @param b byte array representing a key (SHA1 digest) */ - public void insert (byte[]b) { + public void insert (byte[]b) { insert(b, 0, b.length); } + public void insert (byte[]b, int offset, int len) { synchronized(this) { locked_insert(b); } } - public final void locked_insert(byte[]b) { - ks.getOffsets(b); + public final void locked_insert(byte[]b) { locked_insert(b, 0, b.length); } + public final void locked_insert(byte[]b, int offset, int len) { + ks.getOffsets(b, offset, len); for (int i = 0; i < k; i++) { filter[wordOffset[i]] |= 1 << bitOffset[i]; } @@ -168,8 +170,9 @@ public class BloomSHA1 { * @param b byte array representing a key (SHA1 digest) * @return true if b is in the filter */ - protected final boolean isMember(byte[] b) { - ks.getOffsets(b); + protected final boolean isMember(byte[] b) { return isMember(b, 0, b.length); } + protected final boolean isMember(byte[] b, int offset, int len) { + ks.getOffsets(b, offset, len); for (int i = 0; i < k; i++) { if (! ((filter[wordOffset[i]] & (1 << bitOffset[i])) != 0) ) { return false; @@ -179,6 +182,7 @@ public class BloomSHA1 { } public final boolean locked_member(byte[]b) { return isMember(b); } + public final boolean locked_member(byte[]b, int offset, int len) { return isMember(b, offset, len); } /** * Is a key in the filter. External interface, internally synchronized. @@ -186,9 +190,10 @@ public class BloomSHA1 { * @param b byte array representing a key (SHA1 digest) * @return true if b is in the filter */ - public final boolean member(byte[]b) { + public final boolean member(byte[]b) { return member(b, 0, b.length); } + public final boolean member(byte[]b, int offset, int len) { synchronized (this) { - return isMember(b); + return isMember(b, offset, len); } } diff --git a/core/java/src/org/xlattice/crypto/filters/KeySelector.java b/core/java/src/org/xlattice/crypto/filters/KeySelector.java index 6f8dd417f..3a1528ed9 100644 --- a/core/java/src/org/xlattice/crypto/filters/KeySelector.java +++ b/core/java/src/org/xlattice/crypto/filters/KeySelector.java @@ -18,6 +18,8 @@ public class KeySelector { private int m; private int k; private byte[] b; + private int offset; // index into b to select + private int length; // length into b to select private int[] bitOffset; private int[] wordOffset; private BitSelector bitSel; @@ -70,7 +72,7 @@ public class KeySelector { public class GenericBitSelector implements BitSelector { /** Do the extraction */ public void getBitSelectors() { - int curBit = 0; + int curBit = 8 * offset; int curByte; for (int j = 0; j < k; j++) { curByte = curBit / 8; @@ -126,7 +128,7 @@ public class KeySelector { public void getWordSelectors() { int stride = m - 5; //assert true: stride<16; - int curBit = k * 5; + int curBit = (k * 5) + (offset * 8); int curByte; for (int j = 0; j < k; j++) { curByte = curBit / 8; @@ -216,15 +218,18 @@ public class KeySelector { * * @param key cryptographic key used in populating the arrays */ - public void getOffsets (byte[] key) { + public void getOffsets (byte[] key) { getOffsets(key, 0, key.length); } + public void getOffsets (byte[] key, int off, int len) { if (key == null) { throw new IllegalArgumentException("null key"); } - if (key.length < 20) { + if (len < 20) { throw new IllegalArgumentException( "key must be at least 20 bytes long"); } b = key; + offset = off; + length = len; // // DEBUG // System.out.println("KeySelector.getOffsets for " // + BloomSHA1.keyToString(b)); diff --git a/history.txt b/history.txt index 5c777d866..66c6ca387 100644 --- a/history.txt +++ b/history.txt @@ -1,4 +1,126 @@ -$Id: history.txt,v 1.395 2006/01/25 10:34:28 dust Exp $ +$Id: history.txt,v 1.396.2.19 2006/02/15 00:16:31 jrandom Exp $ + +2006-02-15 jrandom + * Merged in the i2p_0_6_1_10_PRE branch to the trunk, so CVS HEAD is no + longer backwards compatible (and should not be used until 0.6.1.1 is + out) + +2006-02-14 jrandom + * Syndie ui bugfixes (thanks all!) + +2006-02-13 jrandom + * Use the current directory for some temporary I2PSnark files, rather than + the OS default temp dir (thanks anon!) + * Increase the base streaming lib window size (still shrinks to 1 on + retransmission though, of course) + * Fixed the I2PTunnel newlines to work with lighthttpd (thanks all!) + * Implement fast retransmit in the streaming lib (fires at most once per + packet), and increased the default ack delay to 2 seconds (from .5s) + * Don't ask for garlic level message acks for end to end messages unless + they're useful (e.g. to ack session tags) + +2006-02-12 cervantes + * Use a different santisation method for some SML attributes + * Make router console update config save button actually save. + * Fix console bandwidth limiter burst rate dropdowns, so the display + relates to what is saved in the config. + +2006-02-12 cervantes + * SML is now stricter in it's formatting (attributes should only use + double quotes instead of being allowed to mix with singles). + * Using apostrophes in SML attributes will no longer invalidate the tag. + * Some instances of [blog] tag description were not being displayed + correctly. + +2006-02-12 jrandom + * Further SSU peer test throttling + * Put the most common router console features on the main index page too + +2006-02-11 jrandom + * Be more careful about SSU peer test floods + +2006-02-09 jrandom + * Adjusted one of the SSU timeouts so we don't drop peers as easily (duh) + +2006-02-08 jrandom + * Added transparent support for VIA C3 CPUs to jbigi (thanks Nekow42), and + bundled a precompiled libjbigi.so in the jbigi.jar + * Cleaned up the synchronization for some SSU packet handling code + * Allow explicit rejection of more lagged tunnel build requests, rather + than dropping them outright + * Use lighter load testing + +2006-02-07 jrandom + * Handle HTTP headers without any values (thanks Sugadude!) + * Don't show the option to make Syndie multiuser, since very few people + need it, and multiuser mode is a lot more complex to use. Geeks can + enable it by adding "syndie.singleUser=false" to syndie/syndie.config + (or in the router's advanced config, for the embedded Syndie) + * When a peer rejects participation in a tunnel, they mean it (duh) + * Decrease tunnel test timeout period to 20s (a 40s lag is insane) + * Remove a throttle on the size of the SSU active outbound pool, since + it was essentially arbitrary + * Use a more appropriate SSU bloom filter size + * Don't "proactively" drop SSU connections if we have partially received + inbound messages (duh) + * Migrate most of the message state across SSU connection reestablishment + +2006-02-06 jrandom + * Reduce the SSU retransmit timeout range, and increase the number of ACKs + piggybacked + +2006-02-05 jrandom + * Experiment with short exponents for DH/ElGamal, using a 226bit x instead + of a 2048bit x, as reports suggest that size is sufficient for 2048bit + DH/ElGamal when using safe primes (see KeyGenerator.java for references) + * Enable the messageHistory.txt by default, for debugging + +2006-02-05 jrandom + * Substantial bugfix for the duplicate message detection in the transport + layer + * Handle tunnel build responses ASAP, rather than queueing them up to wait + in line (processing them is really fast - just a few AES loops) + * Don't bother handling build requests that we have queued up for a while + locally, as the requestor will have timed it out anyway (perhaps we + should reply regardless, but with a backoff instead?) + +2006-02-04 jrandom + * Further tunnel test cleanup and disabling of the old tunnel creation + code + +2006-02-04 jrandom + * Clean up and reenable the tunnel testing for the new tunnel system. + +2006-02-04 jrandom + * Don't cache the archive.txt in syndie when fetching it through the web + interface. + * Logging updates + +2006-02-03 jrandom + * Added further replay prevention on the tunnel build requests + * More aggressive streaming lib closing on reset + +2006-02-03 jrandom + * More aggressive refusal of peers from the wrong network (oops) + +2006-02-01 jrandom + * Instruct the router to reseed against a new URL, for migration purposes: + http://dev.i2p.net/i2pdb2/ + * Aggressive error handling during UDP packet creation (thanks cervantes) + +2006-02-01 jrandom + * Fix the new tunnel creation crypto, including the addition of a 4 byte + "next message ID" to the encrypted request structure in the spec. + * Backwards incompatible change, using the new tunnel creation crypto, the + fixed MD5 HMAC size, and a new network ID (to prevent cross pollination + with the old incompatible network). + * Reworked the leaseSet request process to handle a race condition + * Disable the TCP transport + * Run four separate threads on the job queue to cut down on job lag + +2006-01-28 jrandom + * Removed a race that could show up in leaseSet requesting with the new + tunnel building process 2006-01-25 jrandom * Run the peer profile coalescing/reorganization outside the job queue diff --git a/installer/lib/jbigi/README.txt b/installer/lib/jbigi/README.txt index 5065b2bc9..9a25b08ad 100644 --- a/installer/lib/jbigi/README.txt +++ b/installer/lib/jbigi/README.txt @@ -16,3 +16,6 @@ jbigi.jar after being mistakenly removed in the Sep 18 update (d'oh!) On Dec 30, 2005, the libjcpuid-x86-linux.so was updated to use the (year old) C version of jcpuid, rather than the C++ version. This removes the libg++.so.5 dependency that has been a problem for a few linux distros. + +On Feb 8, 2006, the libjbigi-linux-viac3.so was added to jbigi.jar after +being compiled by jrandom on linux/p4 (cross compiled to --host=viac3) diff --git a/installer/lib/jbigi/jbigi.jar b/installer/lib/jbigi/jbigi.jar index be80ee85b..1926c9669 100644 Binary files a/installer/lib/jbigi/jbigi.jar and b/installer/lib/jbigi/jbigi.jar differ diff --git a/installer/lib/jbigi/jbigi.jar.sig b/installer/lib/jbigi/jbigi.jar.sig index a4332e2c3..2eeb21015 100644 Binary files a/installer/lib/jbigi/jbigi.jar.sig and b/installer/lib/jbigi/jbigi.jar.sig differ diff --git a/router/doc/techintro.html b/router/doc/techintro.html index 56f766e9b..1574f5fa2 100644 --- a/router/doc/techintro.html +++ b/router/doc/techintro.html @@ -17,7 +17,7 @@ pre { font-size: 10; font-family: sans-serif }
a scalable framework for anonymous communication
-$Id: techintro.html,v 1.7 2005/10/04 20:45:21 jrandom Exp $ +$Id: techintro.html,v 1.8.2.1 2006/02/13 07:13:35 jrandom Exp $
@@ -56,15 +56,16 @@ pre { font-size: 10; font-family: sans-serif }
Introduction
-I2P is a scalable, self organizing, resilient message based anonymous network layer, +I2P is a scalable, self organizing, resilient packet switched anonymous network layer, upon which any number of different anonymity or security conscious applications can operate. Each of these applications may make their own anonymity, latency, and throughput tradeoffs without worrying about the proper implementation of a free route mixnet, allowing them to blend their activity with the larger anonymity set of users already running on top of I2P. Applications available already provide the full range of typical Internet activities - anonymous web browsing, anonymous web hosting, -anonymous blogging (with Syndie), anonymous chat (via IRC or -Jabber), anonymous swarming file transfers (with i2p-bt and +anonymous blogging and content syndication (with Syndie), +anonymous chat (via IRC or Jabber), anonymous swarming file transfers (with i2p-bt, I2PSnark, and Azureus), anonymous file sharing (with I2Phex), anonymous email (with I2Pmail and susimail), anonymous newsgroups, as well as several @@ -85,8 +86,8 @@ to allow I2P's anonymous best-effort messages to transfer as reliable, in-order transparently offering a TCP based congestion control algorithm tuned for the high bandwidth delay product of the network. While there have been several simple SOCKS proxies available to tie existing applications into the network, their value has been -limited as nearly every application routinely exposes what in an anonymity context is -sensitive information. The only safe way to go is to fully audit an application to +limited as nearly every application routinely exposes what, in an anonymous context, +is sensitive information. The only safe way to go is to fully audit an application to ensure proper operation, and to assist in that we provide a series of APIs in various languages which can be used to make the most out of the network.
@@ -113,14 +114,14 @@ level of anonymity to those who need it. It has been in active development sinc early 2003 with one full time developer and a dedicated group of part time contributors from all over the world. All of the work done on I2P is open source and freely available on the website, with the majority -of the code released outright into the public domain but making use of a few +of the code released outright into the public domain, though making use of a few cryptographic routines under BSD-style licenses. The people working on I2P do not control what people release client applications under, and there are several GPL'ed applications available (I2PTunnel, -susimail, Azureus, +susimail, I2PSnark, Azureus, I2Phex). Funding for I2P comes entirely from donations, and does not receive any tax breaks in any -jurisdiction, as many of the developers are themselves anonymous. +jurisdiction at this time, as many of the developers are themselves anonymous.Operation
@@ -165,16 +166,18 @@ inbound tunnels as well as when that tunnel will expire. The leaseSet also contains a pair of public keys which can be used for layered garlic encryption. +When Alice wants to send a message to Bob, she first does a lookup in the -netDb to find Bob's leaseSet, giving her his current inbound tunnel gateways -(3 and 4). She then picks one of her outbound tunnels and sends the message +netDb to find Bob's leaseSet, giving her his current inbound tunnel gateways. +She then picks one of her outbound tunnels and sends the message down it with instructions for the outbound tunnel's endpoint to forward the message on to one of Bob's inbound tunnel gateways. When the outbound tunnel endpoint receives those instructions, it forwards the message as @@ -263,7 +266,7 @@ by measuring their indirect behavior - for instance, when a peer responds to a netDb lookup in 1.3 seconds, that round trip latency is recorded in the profiles for all of the routers involved in the two tunnels (inbound and outbound) through which the request and response passed, as well as the queried -peer's profile. Direction measurement, such as transport layer latency or +peer's profile. Direct measurement, such as transport layer latency or congestion, is not used as part of the profile, as it can be manipulated and associated with the measuring router, exposing them to trivial attacks. While gathering these profiles, a series of calculations are run on each to summarize @@ -438,10 +441,10 @@ addressing network obstacles, like most NATs or firewalls. A bare minimum set of cryptographic primitives are combined together to provide I2P's layered defenses against a variety of adversaries. At the lowest level, interrouter communication is protected by the transport layer security - SSU -encrypts each packet with AES256/CBC with both an explicit IV and MAC (HMAC-SHA256-128) +encrypts each packet with AES256/CBC with both an explicit IV and MAC (HMAC-MD5-128) after agreeing upon an ephemeral session key through a 2048bit Diffie-Hellman exchange, station-to-station authentication with the other router's DSA key, plus each network -message has their own SHA256 hash for local integrity checking. +message has their own hash for local integrity checking. Tunnel messages passed over the transports have their own layered AES256/CBC encryption with an explicit IV and verified at the tunnel endpoint with an additional SHA256 hash. Various other messages are passed along inside @@ -686,14 +689,10 @@ outbound tunnel along the same routers.
Another anonymity issue comes up in Tor's use of telescopic tunnel creation, as simple packet counting and timing measurements as the cells in a circuit pass through an adversary's node exposes statistical information regarding where the -adversary is within the circuit. I2P's use of exploratory tunnels for delivering -and receiving the tunnel creation requests and responses effectively spreads the -messages randomly across the network, so that each of the peers who forwards the -individual tunnel creation messages only see the peer they transmit to or receive -from, and thanks to the garlic encryption, they are not aware of whether the message -is part of a tunnel creation process or not. The participant positional information -is useful to an adversary for mounting predecessor, intersection, and traffic -confirmation attacks. +adversary is within the circuit. I2P's unidirectional tunnel creation with a +single message so that this data is not exposed. Protecting the position in a +tunnel is important, as an adversary would otherwise be able to mounting a +series of powerful predecessor, intersection, and traffic confirmation attacks.@@ -754,13 +753,6 @@ has been said the anonymity and scalability claims seem highly dubious. In particular, the appropriateness for use in hostile regimes against state level adversaries has been tremendously overstated, and any analysis on the implications of resource scarcity upon the scalability of the network has seemingly been avoided. -Specifically, while publishing the "anonymous" topology in the darknet does not -necessarily immediately expose all identities, it is equivalent to publishing an -organizational chart for a covert group, which can in turn be used by an adversary -alongside existing knowledge of their target to narrow down or identify different -participants. In addition, by using only peers that are locally connected, the -network's mixnet layer is vulnerable to a class of -local view attacks. Further questions regarding susceptibility to traffic analysis, trust, and other topics do exist, but a more in-depth review of this "globally scalable darknet" will have to wait until the Freenet team makes more information available. @@ -941,6 +933,17 @@ application and to take into consideration the fact that IPs cannot be used for identifying peers.
+I2PSnark
+I2PSnark developed: jrandom, et al, ported from mjw's Snark client
+ ++Bundled with the I2P install, I2PSnark offers a simple anonymous bittorrent +client with multitorrent capabilities, exposing all of the functionality through +a plain HTML web interface. +
+Azureus/azneti2p
Developed by: parg, et al
diff --git a/router/doc/tunnel-alt-creation.html b/router/doc/tunnel-alt-creation.html index 15f1bec62..0eb4a5d90 100644 --- a/router/doc/tunnel-alt-creation.html +++ b/router/doc/tunnel-alt-creation.html @@ -1,4 +1,4 @@ -$Id: tunnel-alt.html,v 1.9 2005/07/27 14:04:07 jrandom Exp $
+$Id: tunnel-alt-creation.html,v 1.1.2.1 2006/02/01 20:28:34 jrandom Exp $
1) Tunnel creation 1.1) Tunnel creation request record @@ -35,12 +35,14 @@ the asymmetrically encrypted record only at the appropriate time. bytes 168-183: reply IV byte 184: flags bytes 185-188: request time (in hours since the epoch) - bytes 189-222: uninterpreted / random padding+ bytes 189-192: next message ID + bytes 193-222: uninterpreted / random padding
The next tunnel ID and next router identity hash fields are used to specify the next hop in the tunnel, though for an outbound tunnel endpoint, they specify where the rewritten tunnel creation reply -message should be sent.
+message should be sent. In addition, the next message ID specifies the +message ID that the message (or reply) should use.The flags field currently has two bits defined:
bit 0: if set, allow messages from anyone diff --git a/router/doc/udp.html b/router/doc/udp.html index 7768c8e89..3220542c8 100644 --- a/router/doc/udp.html +++ b/router/doc/udp.html @@ -1,4 +1,4 @@ -* */ @@ -57,6 +58,7 @@ public class BuildRequestRecord { private static final int OFF_REPLY_IV = OFF_REPLY_KEY + SessionKey.KEYSIZE_BYTES; private static final int OFF_FLAG = OFF_REPLY_IV + IV_SIZE; private static final int OFF_REQ_TIME = OFF_FLAG + 1; + private static final int OFF_SEND_MSG_ID = OFF_REQ_TIME + 4; /** what tunnel ID should this receive messages on */ public long readReceiveTunnelId() { @@ -135,7 +137,14 @@ public class BuildRequestRecord { public long readRequestTime() { return DataHelper.fromLong(_data.getData(), _data.getOffset() + OFF_REQ_TIME, 4) * 60l * 60l * 1000l; } - + /** + * What message ID should we send the request to the next hop with. If this is the outbound tunnel endpoint, + * this specifies the message ID with which the reply should be sent. + */ + public long readReplyMessageId() { + return DataHelper.fromLong(_data.getData(), _data.getOffset() + OFF_SEND_MSG_ID, 4); + } + /** * Encrypt the record to the specified peer. The result is formatted as:$Id: udp.html,v 1.17 2005/09/09 23:30:37 jrandom Exp $
+$Id: udp.html,v 1.18.2.1 2006/02/15 00:16:29 jrandom Exp $
Secure Semireliable UDP (SSU)
DRAFT @@ -44,10 +44,10 @@ capabilities, see below.All UDP datagrams begin with a MAC and an IV, followed by a variable size payload encrypted with the appropriate key. The MAC used is -HMAC-SHA256, truncated to 16 bytes, while the key is a full AES256 +HMAC-MD5, truncated to 16 bytes, while the key is a full AES256 key. The specific construct of the MAC is the first 16 bytes from:
- HMAC-SHA256(payload || IV || payloadLength, macKey) + HMAC-MD5(payload || IV || payloadLength, macKey)The payload itself is AES256/CBC encrypted with the IV and the diff --git a/router/java/src/net/i2p/data/i2np/BuildRequestRecord.java b/router/java/src/net/i2p/data/i2np/BuildRequestRecord.java index 21b9aa79c..a374c1001 100644 --- a/router/java/src/net/i2p/data/i2np/BuildRequestRecord.java +++ b/router/java/src/net/i2p/data/i2np/BuildRequestRecord.java @@ -17,7 +17,8 @@ import net.i2p.data.*; * bytes 168-183: reply IV * byte 184: flags * bytes 185-188: request time (in hours since the epoch) - * bytes 189-222: uninterpreted / random padding + * bytes 189-192: next message ID + * bytes 193-222: uninterpreted / random padding *
* bytes 0-15: SHA-256-128 of the current hop's identity (the toPeer parameter) @@ -144,7 +153,7 @@ public class BuildRequestRecord { */ public void encryptRecord(I2PAppContext ctx, PublicKey toKey, Hash toPeer, byte out[], int outOffset) { System.arraycopy(toPeer.getData(), 0, out, outOffset, PEER_SIZE); - byte preEncr[] = new byte[OFF_REQ_TIME + 4 + PADDING_SIZE]; + byte preEncr[] = new byte[OFF_SEND_MSG_ID + 4 + PADDING_SIZE]; System.arraycopy(_data.getData(), _data.getOffset(), preEncr, 0, preEncr.length); byte encrypted[] = ctx.elGamalEngine().encrypt(preEncr, toKey); // the elg engine formats it kind of weird, giving 257 bytes for each part rather than 256, so @@ -175,7 +184,7 @@ public class BuildRequestRecord { } } - private static final int PADDING_SIZE = 33; + private static final int PADDING_SIZE = 29; /** * Populate this instance with data. A new buffer is created to contain the data, with the @@ -185,6 +194,7 @@ public class BuildRequestRecord { * @param peer current hop's identity * @param nextTunnelId id for the next hop, or where we send the reply (if we are the outbound endpoint) * @param nextHop next hop's identity, or where we send the reply (if we are the outbound endpoint) + * @param nextMsgId message ID to use when sending on to the next hop (or for the reply) * @param layerKey tunnel layer key to be used by the peer * @param ivKey tunnel IV key to be used by the peer * @param replyKey key to be used when encrypting the reply to this build request @@ -192,12 +202,12 @@ public class BuildRequestRecord { * @param isInGateway are we the gateway of an inbound tunnel? * @param isOutEndpoint are we the endpoint of an outbound tunnel? */ - public void createRecord(I2PAppContext ctx, long receiveTunnelId, Hash peer, long nextTunnelId, Hash nextHop, + public void createRecord(I2PAppContext ctx, long receiveTunnelId, Hash peer, long nextTunnelId, Hash nextHop, long nextMsgId, SessionKey layerKey, SessionKey ivKey, SessionKey replyKey, byte iv[], boolean isInGateway, boolean isOutEndpoint) { if ( (_data == null) || (_data.getData() != null) ) _data = new ByteArray(); - byte buf[] = new byte[OFF_REQ_TIME+4+PADDING_SIZE]; + byte buf[] = new byte[OFF_SEND_MSG_ID+4+PADDING_SIZE]; _data.setData(buf); /* bytes 0-3: tunnel ID to receive messages as @@ -210,7 +220,8 @@ public class BuildRequestRecord { * bytes 168-183: reply IV * byte 184: flags * bytes 185-188: request time (in hours since the epoch) - * bytes 189-222: uninterpreted / random padding + * bytes 189-192: next message ID + * bytes 193-222: uninterpreted / random padding */ DataHelper.toLong(buf, OFF_RECV_TUNNEL, 4, receiveTunnelId); System.arraycopy(peer.getData(), 0, buf, OFF_OUR_IDENT, Hash.HASH_LENGTH); @@ -227,9 +238,10 @@ public class BuildRequestRecord { long truncatedHour = ctx.clock().now(); truncatedHour /= (60l*60l*1000l); DataHelper.toLong(buf, OFF_REQ_TIME, 4, truncatedHour); + DataHelper.toLong(buf, OFF_SEND_MSG_ID, 4, nextMsgId); byte rnd[] = new byte[PADDING_SIZE]; ctx.random().nextBytes(rnd); - System.arraycopy(rnd, 0, buf, OFF_REQ_TIME+4, rnd.length); + System.arraycopy(rnd, 0, buf, OFF_SEND_MSG_ID+4, rnd.length); byte wroteIV[] = readReplyIV(); if (!DataHelper.eq(iv, wroteIV)) diff --git a/router/java/src/net/i2p/data/i2np/BuildResponseRecord.java b/router/java/src/net/i2p/data/i2np/BuildResponseRecord.java index 1e3f7959d..f6f97b3cc 100644 --- a/router/java/src/net/i2p/data/i2np/BuildResponseRecord.java +++ b/router/java/src/net/i2p/data/i2np/BuildResponseRecord.java @@ -2,6 +2,7 @@ package net.i2p.data.i2np; import net.i2p.I2PAppContext; import net.i2p.data.*; +import net.i2p.util.Log; /** * Read and write the reply to a tunnel build message record. @@ -11,13 +12,18 @@ public class BuildResponseRecord { /** * Create a new encrypted response */ - public byte[] create(I2PAppContext ctx, int status, SessionKey replyKey, byte replyIV[]) { + public byte[] create(I2PAppContext ctx, int status, SessionKey replyKey, byte replyIV[], long responseMessageId) { + Log log = ctx.logManager().getLog(BuildResponseRecord.class); byte rv[] = new byte[TunnelBuildReplyMessage.RECORD_SIZE]; ctx.random().nextBytes(rv); DataHelper.toLong(rv, TunnelBuildMessage.RECORD_SIZE-1, 1, status); // rv = AES(SHA256(padding+status) + padding + status, replyKey, replyIV) ctx.sha().calculateHash(rv, Hash.HASH_LENGTH, rv.length - Hash.HASH_LENGTH, rv, 0); + if (log.shouldLog(Log.DEBUG)) + log.debug(responseMessageId + ": before encrypt: " + Base64.encode(rv, 0, 128) + " with " + replyKey.toBase64() + "/" + Base64.encode(replyIV)); ctx.aes().encrypt(rv, 0, rv, 0, replyKey, replyIV, rv.length); + if (log.shouldLog(Log.DEBUG)) + log.debug(responseMessageId + ": after encrypt: " + Base64.encode(rv, 0, 128)); return rv; } } diff --git a/router/java/src/net/i2p/data/i2np/TunnelBuildMessage.java b/router/java/src/net/i2p/data/i2np/TunnelBuildMessage.java index d3015304e..859251dff 100644 --- a/router/java/src/net/i2p/data/i2np/TunnelBuildMessage.java +++ b/router/java/src/net/i2p/data/i2np/TunnelBuildMessage.java @@ -33,14 +33,15 @@ public class TunnelBuildMessage extends I2NPMessageImpl { for (int i = 0; i < RECORD_COUNT; i++) { int off = offset + (i * RECORD_SIZE); - int len = RECORD_SIZE; - setRecord(i, new ByteArray(data, off, len)); + byte rec[] = new byte[RECORD_SIZE]; + System.arraycopy(data, off, rec, 0, RECORD_SIZE); + setRecord(i, new ByteArray(rec)); //new ByteArray(data, off, len)); } } protected int writeMessageBody(byte[] out, int curIndex) throws I2NPMessageException { int remaining = out.length - (curIndex + calculateWrittenLength()); - if (remaining <= 0) + if (remaining < 0) throw new I2NPMessageException("Not large enough (too short by " + remaining + ")"); for (int i = 0; i < RECORD_COUNT; i++) { System.arraycopy(_records[i].getData(), _records[i].getOffset(), out, curIndex, RECORD_SIZE); diff --git a/router/java/src/net/i2p/data/i2np/TunnelBuildReplyMessage.java b/router/java/src/net/i2p/data/i2np/TunnelBuildReplyMessage.java index 3549f3e15..277e425c2 100644 --- a/router/java/src/net/i2p/data/i2np/TunnelBuildReplyMessage.java +++ b/router/java/src/net/i2p/data/i2np/TunnelBuildReplyMessage.java @@ -35,13 +35,16 @@ public class TunnelBuildReplyMessage extends I2NPMessageImpl { for (int i = 0; i < RECORD_COUNT; i++) { int off = offset + (i * RECORD_SIZE); int len = RECORD_SIZE; - setRecord(i, new ByteArray(data, off, len)); + byte rec[] = new byte[RECORD_SIZE]; + System.arraycopy(data, off, rec, 0, RECORD_SIZE); + setRecord(i, new ByteArray(rec)); + //setRecord(i, new ByteArray(data, off, len)); } } protected int writeMessageBody(byte[] out, int curIndex) throws I2NPMessageException { int remaining = out.length - (curIndex + calculateWrittenLength()); - if (remaining <= 0) + if (remaining < 0) throw new I2NPMessageException("Not large enough (too short by " + remaining + ")"); for (int i = 0; i < RECORD_COUNT; i++) { System.arraycopy(_records[i].getData(), _records[i].getOffset(), out, curIndex, RECORD_SIZE); diff --git a/router/java/src/net/i2p/router/InNetMessagePool.java b/router/java/src/net/i2p/router/InNetMessagePool.java index 7f0e907be..018150f58 100644 --- a/router/java/src/net/i2p/router/InNetMessagePool.java +++ b/router/java/src/net/i2p/router/InNetMessagePool.java @@ -63,7 +63,7 @@ public class InNetMessagePool implements Service { public InNetMessagePool(RouterContext context) { _context = context; - _handlerJobBuilders = new HandlerJobBuilder[20]; + _handlerJobBuilders = new HandlerJobBuilder[32]; _pendingDataMessages = new ArrayList(16); _pendingDataMessagesFrom = new ArrayList(16); _pendingGatewayMessages = new ArrayList(16); @@ -133,7 +133,7 @@ public class InNetMessagePool implements Service { + ": " + messageBody); _context.statManager().addRateData("inNetPool.dropped", 1, 0); _context.statManager().addRateData("inNetPool.duplicate", 1, 0); - _context.messageHistory().droppedOtherMessage(messageBody); + _context.messageHistory().droppedOtherMessage(messageBody, (fromRouter != null ? fromRouter.calculateHash() : fromRouterHash)); _context.messageHistory().messageProcessingError(messageBody.getUniqueId(), messageBody.getClass().getName(), "Duplicate/expired"); @@ -184,7 +184,7 @@ public class InNetMessagePool implements Service { // not handled as a reply if (!jobFound) { // was not handled via HandlerJobBuilder - _context.messageHistory().droppedOtherMessage(messageBody); + _context.messageHistory().droppedOtherMessage(messageBody, (fromRouter != null ? fromRouter.calculateHash() : fromRouterHash)); if (type == DeliveryStatusMessage.MESSAGE_TYPE) { long timeSinceSent = _context.clock().now() - ((DeliveryStatusMessage)messageBody).getArrival(); diff --git a/router/java/src/net/i2p/router/JobQueue.java b/router/java/src/net/i2p/router/JobQueue.java index c2177a666..400c68dfe 100644 --- a/router/java/src/net/i2p/router/JobQueue.java +++ b/router/java/src/net/i2p/router/JobQueue.java @@ -228,6 +228,7 @@ public class JobQueue { public void allowParallelOperation() { _allowParallelOperation = true; + runQueue(4); } public void restart() { @@ -579,7 +580,8 @@ public class JobQueue { activeJobs.add(job); } else { job = runner.getLastJob(); - justFinishedJobs.add(job); + if (job != null) + justFinishedJobs.add(job); } } numRunners = _queueRunners.size(); diff --git a/router/java/src/net/i2p/router/LoadTestManager.java b/router/java/src/net/i2p/router/LoadTestManager.java index c164ff5c8..241245b21 100644 --- a/router/java/src/net/i2p/router/LoadTestManager.java +++ b/router/java/src/net/i2p/router/LoadTestManager.java @@ -70,38 +70,10 @@ public class LoadTestManager { public static final boolean TEST_LIVE_TUNNELS = true; - public Job getTestJob() { return new TestJob(_context); } - private class TestJob extends JobImpl { - public TestJob(RouterContext ctx) { - super(ctx); - // wait 5m to start up - getTiming().setStartAfter(3*60*1000 + getContext().clock().now()); - } - public String getName() { return "run load tests"; } - public void runJob() { - if (!TEST_LIVE_TUNNELS) { - runTest(); - getTiming().setStartAfter(10*60*1000 + getContext().clock().now()); - getContext().jobQueue().addJob(TestJob.this); - } - } - } - /** 1 peer at a time */ private static final int CONCURRENT_PEERS = 1; /** 4 messages per peer at a time */ - private static final int CONCURRENT_MESSAGES = 4; - - public void runTest() { - if ( (_untestedPeers == null) || (_untestedPeers.size() <= 0) ) { - UDPTransport t = UDPTransport._instance(); - if (t != null) - _untestedPeers = t._getActivePeers(); - } - int peers = getConcurrency(); - for (int i = 0; i < peers && _untestedPeers.size() > 0; i++) - buildTestTunnel((Hash)_untestedPeers.remove(0)); - } + private static final int CONCURRENT_MESSAGES = 1;//4; private int getConcurrency() { int rv = CONCURRENT_PEERS; @@ -118,11 +90,14 @@ public class LoadTestManager { } private int getPeerMessages() { + String msgsPerPeer = _context.getProperty("router.loadTestMessagesPerPeer"); int rv = CONCURRENT_MESSAGES; - try { - rv = Integer.parseInt(_context.getProperty("router.loadTestMessagesPerPeer", CONCURRENT_MESSAGES+"")); - } catch (NumberFormatException nfe) { - rv = CONCURRENT_MESSAGES; + if (msgsPerPeer != null) { + try { + rv = Integer.parseInt(msgsPerPeer); + } catch (NumberFormatException nfe) { + rv = CONCURRENT_MESSAGES; + } } if (rv < 1) rv = 1; @@ -449,121 +424,6 @@ public class LoadTestManager { } } - - private boolean getBuildOneHop() { - return Boolean.valueOf(_context.getProperty("router.loadTestOneHop", "false")).booleanValue(); - } - - private void buildTestTunnel(Hash peer) { - if (getBuildOneHop()) { - buildOneHop(peer); - } else { - buildLonger(peer); - } - } - private void buildOneHop(Hash peer) { - long expiration = _context.clock().now() + 10*60*1000; - - PooledTunnelCreatorConfig cfg = new PooledTunnelCreatorConfig(_context, 2, true); - // cfg.getPeer() is ordered gateway first - cfg.setPeer(0, peer); - HopConfig hop = cfg.getConfig(0); - hop.setExpiration(expiration); - hop.setIVKey(_context.keyGenerator().generateSessionKey()); - hop.setLayerKey(_context.keyGenerator().generateSessionKey()); - // now for ourselves - cfg.setPeer(1, _context.routerHash()); - hop = cfg.getConfig(1); - hop.setExpiration(expiration); - hop.setIVKey(_context.keyGenerator().generateSessionKey()); - hop.setLayerKey(_context.keyGenerator().generateSessionKey()); - - cfg.setExpiration(expiration); - - if (_log.shouldLog(Log.DEBUG)) - _log.debug("Config for " + peer.toBase64() + ": " + cfg); - - LoadTestTunnelConfig ltCfg = new LoadTestTunnelConfig(cfg); - - CreatedJob onCreated = new CreatedJob(_context, ltCfg); - FailedJob fail = new FailedJob(_context, ltCfg); - RequestTunnelJob req = new RequestTunnelJob(_context, cfg, onCreated, fail, cfg.getLength()-1, false, true); - _context.jobQueue().addJob(req); - } - - private Hash pickFastPeer(Hash skipPeer) { - String peers = _context.getProperty("router.loadTestFastPeers"); - if (peers != null) { - StringTokenizer tok = new StringTokenizer(peers.trim(), ", \t"); - List peerList = new ArrayList(); - while (tok.hasMoreTokens()) { - String str = tok.nextToken(); - try { - Hash h = new Hash(); - h.fromBase64(str); - peerList.add(h); - } catch (DataFormatException dfe) { - // ignore - } - } - Collections.shuffle(peerList); - while (peerList.size() > 0) { - Hash cur = (Hash)peerList.remove(0); - if (!cur.equals(skipPeer)) - return cur; - } - } - return null; - } - - private void buildLonger(Hash peer) { - long expiration = _context.clock().now() + 10*60*1000; - - PooledTunnelCreatorConfig cfg = new PooledTunnelCreatorConfig(_context, 3, true); - // cfg.getPeer() is ordered gateway first - cfg.setPeer(0, peer); - HopConfig hop = cfg.getConfig(0); - hop.setExpiration(expiration); - hop.setIVKey(_context.keyGenerator().generateSessionKey()); - hop.setLayerKey(_context.keyGenerator().generateSessionKey()); - // now lets put in a fast peer - Hash fastPeer = pickFastPeer(peer); - if (fastPeer == null) { - if (_log.shouldLog(Log.INFO)) - _log.info("Unable to pick a fast peer for the load test of " + peer.toBase64()); - buildOneHop(peer); - return; - } else if (fastPeer.equals(peer)) { - if (_log.shouldLog(Log.WARN)) - _log.warn("Can't test the peer with themselves, going one hop for " + peer.toBase64()); - buildOneHop(peer); - return; - } - cfg.setPeer(1, fastPeer); - hop = cfg.getConfig(1); - hop.setExpiration(expiration); - hop.setIVKey(_context.keyGenerator().generateSessionKey()); - hop.setLayerKey(_context.keyGenerator().generateSessionKey()); - // now for ourselves - cfg.setPeer(2, _context.routerHash()); - hop = cfg.getConfig(2); - hop.setExpiration(expiration); - hop.setIVKey(_context.keyGenerator().generateSessionKey()); - hop.setLayerKey(_context.keyGenerator().generateSessionKey()); - - cfg.setExpiration(expiration); - - if (_log.shouldLog(Log.DEBUG)) - _log.debug("Config for " + peer.toBase64() + " with fastPeer: " + fastPeer.toBase64() + ": " + cfg); - - - LoadTestTunnelConfig ltCfg = new LoadTestTunnelConfig(cfg); - CreatedJob onCreated = new CreatedJob(_context, ltCfg); - FailedJob fail = new FailedJob(_context, ltCfg); - RequestTunnelJob req = new RequestTunnelJob(_context, cfg, onCreated, fail, cfg.getLength()-1, false, true); - _context.jobQueue().addJob(req); - } - /** * If we are testing live tunnels, see if we want to test the one that was just created * fully. @@ -647,8 +507,8 @@ public class LoadTestManager { runTest(_cfg); } } - private long TEST_PERIOD_MAX = 10*60*1000; - private long TEST_PERIOD_MIN = 90*1000; + private long TEST_PERIOD_MAX = 5*60*1000; + private long TEST_PERIOD_MIN = 1*60*1000; private class Expire extends JobImpl { private LoadTestTunnelConfig _cfg; diff --git a/router/java/src/net/i2p/router/MessageHistory.java b/router/java/src/net/i2p/router/MessageHistory.java index eed0a9d71..a48a94420 100644 --- a/router/java/src/net/i2p/router/MessageHistory.java +++ b/router/java/src/net/i2p/router/MessageHistory.java @@ -39,7 +39,7 @@ public class MessageHistory { /** config property determining whether we want to debug with the message history */ public final static String PROP_KEEP_MESSAGE_HISTORY = "router.keepHistory"; - public final static boolean DEFAULT_KEEP_MESSAGE_HISTORY = false; + public final static boolean DEFAULT_KEEP_MESSAGE_HISTORY = true; /** config property determining where we want to log the message history, if we're keeping one */ public final static String PROP_MESSAGE_HISTORY_FILENAME = "router.historyFilename"; public final static String DEFAULT_MESSAGE_HISTORY_FILENAME = "messageHistory.txt"; @@ -48,6 +48,7 @@ public class MessageHistory { public MessageHistory(RouterContext context) { _context = context; + _log = context.logManager().getLog(getClass()); _fmt = new SimpleDateFormat("yy/MM/dd.HH:mm:ss.SSS"); _fmt.setTimeZone(TimeZone.getTimeZone("GMT")); _reinitializeJob = new ReinitializeJob(); @@ -270,6 +271,16 @@ public class MessageHistory { addEntry(buf.toString()); } + public void tunnelParticipantRejected(Hash peer, String msg) { + if (!_doLog) return; + if (peer == null) return; + StringBuffer buf = new StringBuffer(128); + buf.append(getPrefix()); + buf.append("tunnel participation rejected by ["); + buf.append(getName(peer)).append("]: ").append(msg); + addEntry(buf.toString()); + } + /** * The peer did not accept the tunnel join for the given reason (this may be because * of a timeout or an explicit refusal). @@ -305,16 +316,37 @@ public class MessageHistory { /** * We received another message we weren't waiting for and don't know how to handle */ - public void droppedOtherMessage(I2NPMessage message) { + public void droppedOtherMessage(I2NPMessage message, Hash from) { if (!_doLog) return; if (message == null) return; StringBuffer buf = new StringBuffer(512); buf.append(getPrefix()); buf.append("dropped [").append(message.getClass().getName()).append("] ").append(message.getUniqueId()); - buf.append(" [").append(message.toString()).append("]"); + buf.append(" [").append(message.toString()).append("] from ["); + if (from != null) + buf.append(from.toBase64()); + else + buf.append("unknown"); + buf.append("] expiring in ").append(message.getMessageExpiration()-_context.clock().now()).append("ms"); addEntry(buf.toString()); } + public void droppedInboundMessage(long messageId, Hash from, String info) { + if (!_doLog) return; + StringBuffer buf = new StringBuffer(512); + buf.append(getPrefix()); + buf.append("dropped inbound message ").append(messageId); + buf.append(" from "); + if (from != null) + buf.append(from.toBase64()); + else + buf.append("unknown"); + buf.append(": ").append(info); + addEntry(buf.toString()); + //if (_log.shouldLog(Log.ERROR)) + // _log.error(buf.toString(), new Exception("source")); + } + /** * The message wanted a reply but no reply came in the time expected * @@ -348,6 +380,24 @@ public class MessageHistory { addEntry(buf.toString()); } + /** + * We shitlisted the peer + */ + public void shitlist(Hash peer, String reason) { + if (!_doLog) return; + if (peer == null) return; + addEntry("Shitlist " + peer.toBase64() + ": " + reason); + } + + /** + * We unshitlisted the peer + */ + public void unshitlist(Hash peer) { + if (!_doLog) return; + if (peer == null) return; + addEntry("Unshitlist " + peer.toBase64()); + } + /** * We just sent a message to the peer * @@ -358,7 +408,7 @@ public class MessageHistory { * @param peer router that the message was sent to * @param sentOk whether the message was sent successfully */ - public void sendMessage(String messageType, long messageId, long expiration, Hash peer, boolean sentOk) { + public void sendMessage(String messageType, long messageId, long expiration, Hash peer, boolean sentOk, String info) { if (!_doLog) return; if (false) return; StringBuffer buf = new StringBuffer(256); @@ -370,6 +420,8 @@ public class MessageHistory { buf.append("successfully"); else buf.append("failed"); + if (info != null) + buf.append(info); addEntry(buf.toString()); } @@ -469,22 +521,30 @@ public class MessageHistory { buf.append(" ").append(status); addEntry(buf.toString()); } - public void fragmentMessage(long messageId, int numFragments) { + public void fragmentMessage(long messageId, int numFragments, int totalLength, List messageIds, String msg) { if (!_doLog) return; - if (messageId == -1) throw new IllegalArgumentException("why are you -1?"); + //if (messageId == -1) throw new IllegalArgumentException("why are you -1?"); StringBuffer buf = new StringBuffer(48); buf.append(getPrefix()); buf.append("Break message ").append(messageId).append(" into fragments: ").append(numFragments); + buf.append(" total size ").append(totalLength); + buf.append(" contained in ").append(messageIds); + if (msg != null) + buf.append(": ").append(msg); addEntry(buf.toString()); } - public void fragmentMessage(long messageId, int numFragments, Object tunnel) { + public void fragmentMessage(long messageId, int numFragments, int totalLength, List messageIds, Object tunnel, String msg) { if (!_doLog) return; - if (messageId == -1) throw new IllegalArgumentException("why are you -1?"); + //if (messageId == -1) throw new IllegalArgumentException("why are you -1?"); StringBuffer buf = new StringBuffer(48); buf.append(getPrefix()); buf.append("Break message ").append(messageId).append(" into fragments: ").append(numFragments); + buf.append(" total size ").append(totalLength); + buf.append(" contained in ").append(messageIds); if (tunnel != null) buf.append(" on ").append(tunnel.toString()); + if (msg != null) + buf.append(": ").append(msg); addEntry(buf.toString()); } public void droppedTunnelDataMessageUnknown(long msgId, long tunnelId) { diff --git a/router/java/src/net/i2p/router/Router.java b/router/java/src/net/i2p/router/Router.java index fab4d2b85..78467aece 100644 --- a/router/java/src/net/i2p/router/Router.java +++ b/router/java/src/net/i2p/router/Router.java @@ -28,11 +28,7 @@ import java.util.TreeSet; import net.i2p.CoreVersion; import net.i2p.crypto.DHSessionKeyBuilder; -import net.i2p.data.DataFormatException; -import net.i2p.data.DataHelper; -import net.i2p.data.RouterAddress; -import net.i2p.data.RouterInfo; -import net.i2p.data.SigningPrivateKey; +import net.i2p.data.*; import net.i2p.data.i2np.GarlicMessage; //import net.i2p.data.i2np.TunnelMessage; import net.i2p.router.message.GarlicMessageHandler; @@ -73,7 +69,7 @@ public class Router { public final static long CLOCK_FUDGE_FACTOR = 1*60*1000; /** used to differentiate routerInfo files on different networks */ - public static final int NETWORK_ID = 1; + public static final int NETWORK_ID = 2; public final static String PROP_HIDDEN = "router.hiddenMode"; public final static String PROP_DYNAMIC_KEYS = "router.dynamicKeys"; @@ -389,6 +385,24 @@ public class Router { } } + public boolean isHidden() { + RouterInfo ri = _routerInfo; + if ( (ri != null) && (ri.isHidden()) ) + return true; + return Boolean.valueOf(_context.getProperty("router.isHidden", "false")).booleanValue(); + } + public Certificate createCertificate() { + Certificate cert = new Certificate(); + if (isHidden()) { + cert.setCertificateType(Certificate.CERTIFICATE_TYPE_HIDDEN); + cert.setPayload(null); + } else { + cert.setCertificateType(Certificate.CERTIFICATE_TYPE_NULL); + cert.setPayload(null); + } + return cert; + } + /** * Ugly list of files that we need to kill if we are building a new identity * diff --git a/router/java/src/net/i2p/router/RouterThrottle.java b/router/java/src/net/i2p/router/RouterThrottle.java index 4c91aa303..ae621cfb8 100644 --- a/router/java/src/net/i2p/router/RouterThrottle.java +++ b/router/java/src/net/i2p/router/RouterThrottle.java @@ -25,7 +25,7 @@ public interface RouterThrottle { * * @return 0 if it should be accepted, higher values for more severe rejection */ - public int acceptTunnelRequest(TunnelCreateMessage msg); + public int acceptTunnelRequest(); /** * Should we accept the netDb lookup message, replying either with the * value or some closer peers, or should we simply drop it due to overload? diff --git a/router/java/src/net/i2p/router/RouterThrottleImpl.java b/router/java/src/net/i2p/router/RouterThrottleImpl.java index 1f437e48a..7fe822b10 100644 --- a/router/java/src/net/i2p/router/RouterThrottleImpl.java +++ b/router/java/src/net/i2p/router/RouterThrottleImpl.java @@ -21,7 +21,7 @@ class RouterThrottleImpl implements RouterThrottle { * to a job, we're congested. * */ - private static int JOB_LAG_LIMIT = 10*1000; + private static int JOB_LAG_LIMIT = 2*1000; /** * Arbitrary hard limit - if we throttle our network connection this many * times in the previous 2 minute period, don't accept requests to @@ -80,7 +80,7 @@ class RouterThrottleImpl implements RouterThrottle { } } - public int acceptTunnelRequest(TunnelCreateMessage msg) { + public int acceptTunnelRequest() { if (_context.getProperty(Router.PROP_SHUTDOWN_IN_PROGRESS) != null) { if (_log.shouldLog(Log.WARN)) _log.warn("Refusing tunnel request since we are shutting down ASAP"); @@ -253,7 +253,7 @@ class RouterThrottleImpl implements RouterThrottle { _context.statManager().addRateData("router.throttleTunnelBandwidthExceeded", (long)bytesAllocated, 0); return TunnelHistory.TUNNEL_REJECT_BANDWIDTH; } - _context.statManager().addRateData("tunnel.bytesAllocatedAtAccept", (long)bytesAllocated, msg.getDurationSeconds()*1000); + _context.statManager().addRateData("tunnel.bytesAllocatedAtAccept", (long)bytesAllocated, 60*10*1000); if (_log.shouldLog(Log.DEBUG)) diff --git a/router/java/src/net/i2p/router/RouterVersion.java b/router/java/src/net/i2p/router/RouterVersion.java index 51bfac4eb..bc263f70d 100644 --- a/router/java/src/net/i2p/router/RouterVersion.java +++ b/router/java/src/net/i2p/router/RouterVersion.java @@ -15,9 +15,9 @@ import net.i2p.CoreVersion; * */ public class RouterVersion { - public final static String ID = "$Revision: 1.339 $ $Date: 2006/01/25 10:34:31 $"; + public final static String ID = "$Revision: 1.340.2.17 $ $Date: 2006/02/15 00:16:30 $"; public final static String VERSION = "0.6.1.9"; - public final static long BUILD = 8; + public final static long BUILD = 25; public static void main(String args[]) { System.out.println("I2P Router version: " + VERSION + "-" + BUILD); System.out.println("Router ID: " + RouterVersion.ID); diff --git a/router/java/src/net/i2p/router/Shitlist.java b/router/java/src/net/i2p/router/Shitlist.java index 7ccd906c1..54162eb6d 100644 --- a/router/java/src/net/i2p/router/Shitlist.java +++ b/router/java/src/net/i2p/router/Shitlist.java @@ -85,6 +85,8 @@ public class Shitlist { _context.netDb().fail(peer); //_context.tunnelManager().peerFailed(peer); _context.messageRegistry().peerFailed(peer); + if (!wasAlready) + _context.messageHistory().shitlist(peer, reason); return wasAlready; } @@ -93,7 +95,8 @@ public class Shitlist { } private void unshitlistRouter(Hash peer, boolean realUnshitlist) { if (peer == null) return; - _log.info("Unshitlisting router " + peer.toBase64()); + if (_log.shouldLog(Log.INFO)) + _log.info("Unshitlisting router " + peer.toBase64()); synchronized (_shitlist) { _shitlist.remove(peer); _shitlistCause.remove(peer); @@ -103,6 +106,7 @@ public class Shitlist { if (prof != null) prof.unshitlist(); } + _context.messageHistory().unshitlist(peer); } public boolean isShitlisted(Hash peer) { diff --git a/router/java/src/net/i2p/router/StatisticsManager.java b/router/java/src/net/i2p/router/StatisticsManager.java index de1d4cc7f..1f3eaffd2 100644 --- a/router/java/src/net/i2p/router/StatisticsManager.java +++ b/router/java/src/net/i2p/router/StatisticsManager.java @@ -110,7 +110,7 @@ public class StatisticsManager implements Service { includeRate("tunnel.fragmentedDropped", stats, new long[] { 10*60*1000, 3*60*60*1000 }); //includeRate("tunnel.fullFragments", stats, new long[] { 10*60*1000, 3*60*60*1000 }); //includeRate("tunnel.smallFragments", stats, new long[] { 10*60*1000, 3*60*60*1000 }); - includeRate("tunnel.testFailedTime", stats, new long[] { 60*60*1000 }); + includeRate("tunnel.testFailedTime", stats, new long[] { 10*60*1000 }); includeRate("tunnel.buildFailure", stats, new long[] { 60*60*1000 }); includeRate("tunnel.buildSuccess", stats, new long[] { 60*60*1000 }); @@ -129,7 +129,7 @@ public class StatisticsManager implements Service { includeRate("udp.statusDifferent", stats, new long[] { 20*60*1000 }); includeRate("udp.statusReject", stats, new long[] { 20*60*1000 }); includeRate("udp.statusUnknown", stats, new long[] { 20*60*1000 }); - includeRate("udp.statusKnownharlie", stats, new long[] { 1*60*1000, 10*60*1000 }); + includeRate("udp.statusKnownCharlie", stats, new long[] { 1*60*1000, 10*60*1000 }); includeRate("udp.addressUpdated", stats, new long[] { 1*60*1000 }); includeRate("udp.addressTestInsteadOfUpdate", stats, new long[] { 1*60*1000 }); @@ -137,19 +137,34 @@ public class StatisticsManager implements Service { //includeRate("transport.sendProcessingTime", stats, new long[] { 60*60*1000 }); //includeRate("jobQueue.jobRunSlow", stats, new long[] { 10*60*1000l, 60*60*1000l }); - includeRate("crypto.elGamal.encrypt", stats, new long[] { 60*60*1000 }); + includeRate("crypto.elGamal.encrypt", stats, new long[] { 60*1000, 60*60*1000 }); includeRate("tunnel.participatingTunnels", stats, new long[] { 5*60*1000, 60*60*1000 }); - includeRate("tunnel.testSuccessTime", stats, new long[] { 60*60*1000l, 24*60*60*1000l }); + includeRate("tunnel.testSuccessTime", stats, new long[] { 10*60*1000l }); includeRate("client.sendAckTime", stats, new long[] { 60*60*1000 }, true); includeRate("udp.sendConfirmTime", stats, new long[] { 10*60*1000 }); includeRate("udp.sendVolleyTime", stats, new long[] { 10*60*1000 }); - includeRate("udp.ignoreRecentDuplicate", stats, new long[] { 10*60*1000 }); + includeRate("udp.ignoreRecentDuplicate", stats, new long[] { 60*1000 }); includeRate("udp.congestionOccurred", stats, new long[] { 10*60*1000 }); //includeRate("stream.con.sendDuplicateSize", stats, new long[] { 60*60*1000 }); //includeRate("stream.con.receiveDuplicateSize", stats, new long[] { 60*60*1000 }); stats.setProperty("stat_uptime", DataHelper.formatDuration(_context.router().getUptime())); stats.setProperty("stat__rateKey", "avg;maxAvg;pctLifetime;[sat;satLim;maxSat;maxSatLim;][num;lifetimeFreq;maxFreq]"); + includeRate("tunnel.buildRequestTime", stats, new long[] { 60*1000, 10*60*1000 }); + includeRate("tunnel.decryptRequestTime", stats, new long[] { 60*1000, 10*60*1000 }); + includeRate("tunnel.buildClientExpire", stats, new long[] { 60*1000, 10*60*1000 }); + includeRate("tunnel.buildClientReject", stats, new long[] { 60*1000, 10*60*1000 }); + includeRate("tunnel.buildClientSuccess", stats, new long[] { 60*1000, 10*60*1000 }); + includeRate("tunnel.buildExploratoryExpire", stats, new long[] { 60*1000, 10*60*1000 }); + includeRate("tunnel.buildExploratoryReject", stats, new long[] { 60*1000, 10*60*1000 }); + includeRate("tunnel.buildExploratorySuccess", stats, new long[] { 60*1000, 10*60*1000 }); + includeRate("tunnel.rejectTimeout", stats, new long[] { 60*1000, 10*60*1000 }); + includeRate("udp.packetDequeueTime", stats, new long[] { 60*1000 }); + includeRate("udp.packetVerifyTime", stats, new long[] { 60*1000 }); + + includeRate("tunnel.rejectOverloaded", stats, new long[] { 60*1000, 10*60*1000 }); + includeRate("tunnel.acceptLoad", stats, new long[] { 60*1000, 10*60*1000 }); + if (FloodfillNetworkDatabaseFacade.isFloodfill(_context.router().getRouterInfo())) { stats.setProperty("netdb.knownRouters", ""+_context.netDb().getKnownRouters()); stats.setProperty("netdb.knownLeaseSets", ""+_context.netDb().getKnownLeaseSets()); @@ -159,7 +174,7 @@ public class StatisticsManager implements Service { } else { _log.debug("Not publishing peer rankings"); } - + if (_log.shouldLog(Log.DEBUG)) _log.debug("Building status: " + stats); return stats; diff --git a/router/java/src/net/i2p/router/client/ClientConnectionRunner.java b/router/java/src/net/i2p/router/client/ClientConnectionRunner.java index 91ca2877a..6178f7ed7 100644 --- a/router/java/src/net/i2p/router/client/ClientConnectionRunner.java +++ b/router/java/src/net/i2p/router/client/ClientConnectionRunner.java @@ -36,6 +36,7 @@ import net.i2p.router.RouterContext; import net.i2p.util.I2PThread; import net.i2p.util.Log; import net.i2p.util.RandomSource; +import net.i2p.util.SimpleTimer; /** * Bridge the router and the client - managing state for a client. @@ -149,7 +150,13 @@ public class ClientConnectionRunner { void setSessionId(SessionId id) { if (id != null) _sessionId = id; } /** data for the current leaseRequest, or null if there is no active leaseSet request */ LeaseRequestState getLeaseRequest() { return _leaseRequest; } - void setLeaseRequest(LeaseRequestState req) { _leaseRequest = req; } + void setLeaseRequest(LeaseRequestState req) { + synchronized (this) { + if ( (_leaseRequest != null) && (req != _leaseRequest) ) + _log.error("Changing leaseRequest from " + _leaseRequest + " to " + req); + _leaseRequest = req; + } + } /** already closed? */ boolean isDead() { return _dead; } /** message body */ @@ -214,16 +221,23 @@ public class ClientConnectionRunner { * updated. This takes care of all the LeaseRequestState stuff (including firing any jobs) */ void leaseSetCreated(LeaseSet ls) { - if (_leaseRequest == null) { - _log.error("LeaseRequest is null and we've received a new lease?! WTF"); - return; - } else { - _leaseRequest.setIsSuccessful(true); - if (_leaseRequest.getOnGranted() != null) - _context.jobQueue().addJob(_leaseRequest.getOnGranted()); - _leaseRequest = null; - _currentLeaseSet = ls; + LeaseRequestState state = null; + synchronized (this) { + state = _leaseRequest; + if (state == null) { + if (_log.shouldLog(Log.WARN)) + _log.warn("LeaseRequest is null and we've received a new lease?! perhaps this is odd... " + ls); + return; + } else { + state.setIsSuccessful(true); + _currentLeaseSet = ls; + if (_log.shouldLog(Log.DEBUG)) + _log.debug("LeaseSet created fully: " + state + " / " + ls); + _leaseRequest = null; + } } + if ( (state != null) && (state.getOnGranted() != null) ) + _context.jobQueue().addJob(state.getOnGranted()); } void disconnectClient(String reason) { @@ -236,7 +250,7 @@ public class ClientConnectionRunner { try { doSend(msg); } catch (I2CPMessageException ime) { - _log.error("Error writing out the disconnect message", ime); + _log.error("Error writing out the disconnect message: " + ime); } stopRunning(); } @@ -288,12 +302,14 @@ public class ClientConnectionRunner { * */ void ackSendMessage(MessageId id, long nonce) { + SessionId sid = _sessionId; + if (sid == null) return; if (_log.shouldLog(Log.DEBUG)) _log.debug("Acking message send [accepted]" + id + " / " + nonce + " for sessionId " - + _sessionId, new Exception("sendAccepted")); + + sid, new Exception("sendAccepted")); MessageStatusMessage status = new MessageStatusMessage(); status.setMessageId(id.getMessageId()); - status.setSessionId(_sessionId.getSessionId()); + status.setSessionId(sid.getSessionId()); status.setSize(0L); status.setNonce(nonce); status.setStatus(MessageStatusMessage.STATUS_SEND_ACCEPTED); @@ -312,7 +328,7 @@ public class ClientConnectionRunner { + " overall, synchronized took " + (inLock - beforeLock)); } } catch (I2CPMessageException ime) { - _log.error("Error writing out the message status message", ime); + _log.error("Error writing out the message status message: " + ime); } } @@ -323,7 +339,7 @@ public class ClientConnectionRunner { void receiveMessage(Destination toDest, Destination fromDest, Payload payload) { if (_dead) return; MessageReceivedJob j = new MessageReceivedJob(_context, this, toDest, fromDest, payload); - j.runJob(); + _context.jobQueue().addJob(j);//j.runJob(); } /** @@ -348,16 +364,65 @@ public class ClientConnectionRunner { * @param onFailedJob Job to run after the timeout passes without receiving authorization */ void requestLeaseSet(LeaseSet set, long expirationTime, Job onCreateJob, Job onFailedJob) { - if (_dead) return; - if ( (_currentLeaseSet != null) && (_currentLeaseSet.equals(set)) ) + if (_dead) { + if (_log.shouldLog(Log.WARN)) + _log.warn("Requesting leaseSet from a dead client: " + set); + if (onFailedJob != null) + _context.jobQueue().addJob(onFailedJob); + return; + } + if ( (_currentLeaseSet != null) && (_currentLeaseSet.equals(set)) ) { + if (_log.shouldLog(Log.DEBUG)) + _log.debug("Requested leaseSet hasn't changed"); + if (onCreateJob != null) + _context.jobQueue().addJob(onCreateJob); return; // no change - if (_leaseRequest != null) - return; // already requesting - _context.jobQueue().addJob(new RequestLeaseSetJob(_context, this, set, _context.clock().now() + expirationTime, onCreateJob, onFailedJob)); + } + LeaseRequestState state = null; + synchronized (this) { + state = _leaseRequest; + if (state != null) { + if (_log.shouldLog(Log.DEBUG)) + _log.debug("Already requesting " + state); + LeaseSet requested = state.getRequested(); + LeaseSet granted = state.getGranted(); + long ours = set.getEarliestLeaseDate(); + if ( ( (requested != null) && (requested.getEarliestLeaseDate() > ours) ) || + ( (granted != null) && (granted.getEarliestLeaseDate() > ours) ) ) { + // theirs is newer + } else { + // ours is newer, so wait a few secs and retry + SimpleTimer.getInstance().addEvent(new Rerequest(set, expirationTime, onCreateJob, onFailedJob), 3*1000); + } + // fire onCreated? + return; // already requesting + } else { + _leaseRequest = state = new LeaseRequestState(onCreateJob, onFailedJob, _context.clock().now() + expirationTime, set); + _log.debug("Not already requesting, continue to request " + set); + } + } + _context.jobQueue().addJob(new RequestLeaseSetJob(_context, this, set, _context.clock().now() + expirationTime, onCreateJob, onFailedJob, state)); } + private class Rerequest implements SimpleTimer.TimedEvent { + private LeaseSet _ls; + private long _expirationTime; + private Job _onCreate; + private Job _onFailed; + public Rerequest(LeaseSet ls, long expirationTime, Job onCreate, Job onFailed) { + _ls = ls; + _expirationTime = expirationTime; + _onCreate = onCreate; + _onFailed = onFailed; + } + public void timeReached() { + requestLeaseSet(_ls, _expirationTime, _onCreate, _onFailed); + } + } + void disconnected() { - _log.error("Disconnected", new Exception("Disconnected?")); + if (_log.shouldLog(Log.WARN)) + _log.warn("Disconnected", new Exception("Disconnected?")); stopRunning(); } @@ -376,10 +441,10 @@ public class ClientConnectionRunner { _log.debug("after writeMessage("+ msg.getClass().getName() + "): " + (_context.clock().now()-before) + "ms");; } catch (I2CPMessageException ime) { - _log.error("Message exception sending I2CP message", ime); + _log.error("Message exception sending I2CP message: " + ime); stopRunning(); } catch (IOException ioe) { - _log.error("IO exception sending I2CP message", ioe); + _log.error("IO exception sending I2CP message: " + ioe); stopRunning(); } catch (Throwable t) { _log.log(Log.CRIT, "Unhandled exception sending I2CP message", t); diff --git a/router/java/src/net/i2p/router/client/LeaseRequestState.java b/router/java/src/net/i2p/router/client/LeaseRequestState.java index 7cb36bfd1..8a6a1b1b0 100644 --- a/router/java/src/net/i2p/router/client/LeaseRequestState.java +++ b/router/java/src/net/i2p/router/client/LeaseRequestState.java @@ -29,13 +29,13 @@ class LeaseRequestState { private boolean _successful; public LeaseRequestState(Job onGranted, Job onFailed, long expiration, LeaseSet requested) { - _onGranted = onGranted; - _onFailed = onFailed; - _expiration = expiration; - _requestedLeaseSet = requested; - _successful = false; + _onGranted = onGranted; + _onFailed = onFailed; + _expiration = expiration; + _requestedLeaseSet = requested; + _successful = false; } - + /** created lease set from client */ public LeaseSet getGranted() { return _grantedLeaseSet; } public void setGranted(LeaseSet ls) { _grantedLeaseSet = ls; } @@ -59,4 +59,11 @@ class LeaseRequestState { /** whether the request was successful in the time allotted */ public boolean getIsSuccessful() { return _successful; } public void setIsSuccessful(boolean is) { _successful = is; } + + public String toString() { + return "leaseSet request asking for " + _requestedLeaseSet + + " having received " + _grantedLeaseSet + + " succeeding? " + _successful + + " expiring on " + _expiration; + } } diff --git a/router/java/src/net/i2p/router/client/RequestLeaseSetJob.java b/router/java/src/net/i2p/router/client/RequestLeaseSetJob.java index 845fc96b8..9e6c34dbb 100644 --- a/router/java/src/net/i2p/router/client/RequestLeaseSetJob.java +++ b/router/java/src/net/i2p/router/client/RequestLeaseSetJob.java @@ -33,7 +33,9 @@ class RequestLeaseSetJob extends JobImpl { private long _expiration; private Job _onCreate; private Job _onFail; - public RequestLeaseSetJob(RouterContext ctx, ClientConnectionRunner runner, LeaseSet set, long expiration, Job onCreate, Job onFail) { + private LeaseRequestState _requestState; + + public RequestLeaseSetJob(RouterContext ctx, ClientConnectionRunner runner, LeaseSet set, long expiration, Job onCreate, Job onFail, LeaseRequestState state) { super(ctx); _log = ctx.logManager().getLog(RequestLeaseSetJob.class); _runner = runner; @@ -41,6 +43,7 @@ class RequestLeaseSetJob extends JobImpl { _expiration = expiration; _onCreate = onCreate; _onFail = onFail; + _requestState = state; ctx.statManager().createRateStat("client.requestLeaseSetSuccess", "How frequently the router requests successfully a new leaseSet?", "ClientMessages", new long[] { 10*60*1000, 60*60*1000, 24*60*60*1000 }); ctx.statManager().createRateStat("client.requestLeaseSetTimeout", "How frequently the router requests a new leaseSet but gets no reply?", "ClientMessages", new long[] { 10*60*1000, 60*60*1000, 24*60*60*1000 }); ctx.statManager().createRateStat("client.requestLeaseSetDropped", "How frequently the router requests a new leaseSet but the client drops?", "ClientMessages", new long[] { 10*60*1000, 60*60*1000, 24*60*60*1000 }); @@ -49,43 +52,31 @@ class RequestLeaseSetJob extends JobImpl { public String getName() { return "Request Lease Set"; } public void runJob() { if (_runner.isDead()) return; - LeaseRequestState oldReq = _runner.getLeaseRequest(); - if (oldReq != null) { - if (oldReq.getExpiration() > getContext().clock().now()) { - _log.info("request of a leaseSet is still active, wait a little bit before asking again"); - } else { - if (_log.shouldLog(Log.WARN)) - _log.warn("Old *expired* leaseRequest exists! Why did the old request not get killed? (expiration = " + new Date(oldReq.getExpiration()) + ")", getAddedBy()); - } - return; - } - - LeaseRequestState state = new LeaseRequestState(_onCreate, _onFail, _expiration, _ls); RequestLeaseSetMessage msg = new RequestLeaseSetMessage(); Date end = null; // get the earliest end date - for (int i = 0; i < state.getRequested().getLeaseCount(); i++) { - if ( (end == null) || (end.getTime() > state.getRequested().getLease(i).getEndDate().getTime()) ) - end = state.getRequested().getLease(i).getEndDate(); + for (int i = 0; i < _requestState.getRequested().getLeaseCount(); i++) { + if ( (end == null) || (end.getTime() > _requestState.getRequested().getLease(i).getEndDate().getTime()) ) + end = _requestState.getRequested().getLease(i).getEndDate(); } msg.setEndDate(end); msg.setSessionId(_runner.getSessionId()); - for (int i = 0; i < state.getRequested().getLeaseCount(); i++) { - msg.addEndpoint(state.getRequested().getLease(i).getGateway(), state.getRequested().getLease(i).getTunnelId()); + for (int i = 0; i < _requestState.getRequested().getLeaseCount(); i++) { + msg.addEndpoint(_requestState.getRequested().getLease(i).getGateway(), _requestState.getRequested().getLease(i).getTunnelId()); } try { - _runner.setLeaseRequest(state); + //_runner.setLeaseRequest(state); _runner.doSend(msg); - getContext().jobQueue().addJob(new CheckLeaseRequestStatus(getContext(), state)); + getContext().jobQueue().addJob(new CheckLeaseRequestStatus(getContext(), _requestState)); return; } catch (I2CPMessageException ime) { getContext().statManager().addRateData("client.requestLeaseSetDropped", 1, 0); _log.error("Error sending I2CP message requesting the lease set", ime); - state.setIsSuccessful(false); + _requestState.setIsSuccessful(false); _runner.setLeaseRequest(null); _runner.disconnectClient("I2CP error requesting leaseSet"); return; @@ -100,24 +91,32 @@ class RequestLeaseSetJob extends JobImpl { */ private class CheckLeaseRequestStatus extends JobImpl { private LeaseRequestState _req; + private long _start; public CheckLeaseRequestStatus(RouterContext enclosingContext, LeaseRequestState state) { super(enclosingContext); _req = state; + _start = System.currentTimeMillis(); getTiming().setStartAfter(state.getExpiration()); } public void runJob() { - if (_runner.isDead()) return; + if (_runner.isDead()) { + if (_log.shouldLog(Log.DEBUG)) + _log.debug("Already dead, dont try to expire the leaseSet lookup"); + return; + } if (_req.getIsSuccessful()) { // we didn't fail RequestLeaseSetJob.CheckLeaseRequestStatus.this.getContext().statManager().addRateData("client.requestLeaseSetSuccess", 1, 0); return; } else { RequestLeaseSetJob.CheckLeaseRequestStatus.this.getContext().statManager().addRateData("client.requestLeaseSetTimeout", 1, 0); - if (_log.shouldLog(Log.CRIT)) - _log.log(Log.CRIT, "Failed to receive a leaseSet in the time allotted (" + new Date(_req.getExpiration()) + ") for " + if (_log.shouldLog(Log.CRIT)) { + long waited = System.currentTimeMillis() - _start; + _log.log(Log.CRIT, "Failed to receive a leaseSet in the time allotted (" + waited + "): " + _req + " for " + _runner.getConfig().getDestination().calculateHash().toBase64()); + } _runner.disconnectClient("Took too long to request leaseSet"); if (_req.getOnFailed() != null) RequestLeaseSetJob.this.getContext().jobQueue().addJob(_req.getOnFailed()); diff --git a/router/java/src/net/i2p/router/message/GarlicMessageBuilder.java b/router/java/src/net/i2p/router/message/GarlicMessageBuilder.java index 74112481d..7f8279012 100644 --- a/router/java/src/net/i2p/router/message/GarlicMessageBuilder.java +++ b/router/java/src/net/i2p/router/message/GarlicMessageBuilder.java @@ -30,13 +30,23 @@ import net.i2p.util.Log; * */ public class GarlicMessageBuilder { + public static int estimateAvailableTags(RouterContext ctx, PublicKey key) { + SessionKey curKey = ctx.sessionKeyManager().getCurrentKey(key); + if (curKey == null) + return 0; + return ctx.sessionKeyManager().getAvailableTags(key, curKey); + } + public static GarlicMessage buildMessage(RouterContext ctx, GarlicConfig config) { return buildMessage(ctx, config, new SessionKey(), new HashSet()); } public static GarlicMessage buildMessage(RouterContext ctx, GarlicConfig config, SessionKey wrappedKey, Set wrappedTags) { - return buildMessage(ctx, config, wrappedKey, wrappedTags, 50); + return buildMessage(ctx, config, wrappedKey, wrappedTags, 100); } public static GarlicMessage buildMessage(RouterContext ctx, GarlicConfig config, SessionKey wrappedKey, Set wrappedTags, int numTagsToDeliver) { + return buildMessage(ctx, config, wrappedKey, wrappedTags, numTagsToDeliver, false); + } + public static GarlicMessage buildMessage(RouterContext ctx, GarlicConfig config, SessionKey wrappedKey, Set wrappedTags, int numTagsToDeliver, boolean forceElGamal) { Log log = ctx.logManager().getLog(GarlicMessageBuilder.class); PublicKey key = config.getRecipientPublicKey(); if (key == null) { @@ -54,31 +64,33 @@ public class GarlicMessageBuilder { log.info("Encrypted with public key " + key + " to expire on " + new Date(config.getExpiration())); SessionKey curKey = ctx.sessionKeyManager().getCurrentKey(key); + SessionTag curTag = null; if (curKey == null) curKey = ctx.sessionKeyManager().createSession(key); - - SessionTag curTag = ctx.sessionKeyManager().consumeNextAvailableTag(key, curKey); + if (!forceElGamal) { + curTag = ctx.sessionKeyManager().consumeNextAvailableTag(key, curKey); + + int availTags = ctx.sessionKeyManager().getAvailableTags(key, curKey); + if (log.shouldLog(Log.DEBUG)) + log.debug("Available tags for encryption to " + key + ": " + availTags); - int availTags = ctx.sessionKeyManager().getAvailableTags(key, curKey); - if (log.shouldLog(Log.DEBUG)) - log.debug("Available tags for encryption to " + key + ": " + availTags); - - if (availTags < 10) { // arbitrary threshold - for (int i = 0; i < numTagsToDeliver; i++) - wrappedTags.add(new SessionTag(true)); - if (log.shouldLog(Log.INFO)) - log.info("Less than 10 tags are available (" + availTags + "), so we're including more"); - } else if (ctx.sessionKeyManager().getAvailableTimeLeft(key, curKey) < 60*1000) { - // if we have > 10 tags, but they expire in under 30 seconds, we want more - for (int i = 0; i < numTagsToDeliver; i++) - wrappedTags.add(new SessionTag(true)); - if (log.shouldLog(Log.INFO)) - log.info("Tags are almost expired, adding new ones"); - } else { - // always tack on at least one more - not necessary. - //wrappedTags.add(new SessionTag(true)); + if (availTags < 20) { // arbitrary threshold + for (int i = 0; i < numTagsToDeliver; i++) + wrappedTags.add(new SessionTag(true)); + if (log.shouldLog(Log.INFO)) + log.info("Less than 20 tags are available (" + availTags + "), so we're including more"); + } else if (ctx.sessionKeyManager().getAvailableTimeLeft(key, curKey) < 60*1000) { + // if we have > 20 tags, but they expire in under 30 seconds, we want more + for (int i = 0; i < numTagsToDeliver; i++) + wrappedTags.add(new SessionTag(true)); + if (log.shouldLog(Log.INFO)) + log.info("Tags are almost expired, adding new ones"); + } else { + // always tack on at least one more - not necessary. + //wrappedTags.add(new SessionTag(true)); + } } - + wrappedKey.setData(curKey.getData()); return buildMessage(ctx, config, wrappedKey, wrappedTags, key, curKey, curTag); diff --git a/router/java/src/net/i2p/router/message/OutboundClientMessageOneShotJob.java b/router/java/src/net/i2p/router/message/OutboundClientMessageOneShotJob.java index e94c60633..b13227d66 100644 --- a/router/java/src/net/i2p/router/message/OutboundClientMessageOneShotJob.java +++ b/router/java/src/net/i2p/router/message/OutboundClientMessageOneShotJob.java @@ -107,8 +107,8 @@ public class OutboundClientMessageOneShotJob extends JobImpl { ctx.statManager().createFrequencyStat("client.sendMessageFailFrequency", "How often does a client fail to send a message?", "ClientMessages", new long[] { 60*1000l, 60*60*1000l, 24*60*60*1000l }); ctx.statManager().createRateStat("client.sendMessageSize", "How large are messages sent by the client?", "ClientMessages", new long[] { 60*1000l, 60*60*1000l, 24*60*60*1000l }); - ctx.statManager().createRateStat("client.sendAckTime", "How long does it take to get an ACK back from a message?", "ClientMessages", new long[] { 5*60*1000l, 60*60*1000l, 24*60*60*1000l }); - ctx.statManager().createRateStat("client.timeoutCongestionTunnel", "How lagged our tunnels are when a send times out?", "ClientMessages", new long[] { 5*60*1000l, 60*60*1000l, 24*60*60*1000l }); + ctx.statManager().createRateStat("client.sendAckTime", "How long does it take to get an ACK back from a message?", "ClientMessages", new long[] { 60*1000l, 5*60*1000l, 60*60*1000l, 24*60*60*1000l }); + ctx.statManager().createRateStat("client.timeoutCongestionTunnel", "How lagged our tunnels are when a send times out?", "ClientMessages", new long[] { 60*1000l, 5*60*1000l, 60*60*1000l, 24*60*60*1000l }); ctx.statManager().createRateStat("client.timeoutCongestionMessage", "How fast we process messages locally when a send times out?", "ClientMessages", new long[] { 5*60*1000l, 60*60*1000l, 24*60*60*1000l }); ctx.statManager().createRateStat("client.timeoutCongestionInbound", "How much faster we are receiving data than our average bps when a send times out?", "ClientMessages", new long[] { 5*60*1000l, 60*60*1000l, 24*60*60*1000l }); ctx.statManager().createRateStat("client.leaseSetFoundLocally", "How often we tried to look for a leaseSet and found it locally?", "ClientMessages", new long[] { 5*60*1000l, 60*60*1000l, 24*60*60*1000l }); @@ -118,6 +118,7 @@ public class OutboundClientMessageOneShotJob extends JobImpl { ctx.statManager().createRateStat("client.dispatchTime", "How long until we've dispatched the message (since we started)?", "ClientMessages", new long[] { 5*60*1000l, 60*60*1000l, 24*60*60*1000l }); ctx.statManager().createRateStat("client.dispatchSendTime", "How long the actual dispatching takes?", "ClientMessages", new long[] { 5*60*1000l, 60*60*1000l, 24*60*60*1000l }); ctx.statManager().createRateStat("client.dispatchNoTunnels", "How long after start do we run out of tunnels to send/receive with?", "ClientMessages", new long[] { 5*60*1000l, 60*60*1000l, 24*60*60*1000l }); + ctx.statManager().createRateStat("client.dispatchNoACK", "How often we send a client message without asking for an ACK?", "ClientMessages", new long[] { 60*1000l, 5*60*1000l, 60*60*1000l }); long timeoutMs = OVERALL_TIMEOUT_MS_DEFAULT; _clientMessage = msg; _clientMessageId = msg.getMessageId(); @@ -312,7 +313,12 @@ public class OutboundClientMessageOneShotJob extends JobImpl { */ private void send() { if (_finished) return; - long token = getContext().random().nextLong(I2NPMessage.MAX_ID_VALUE); + boolean wantACK = true; + int existingTags = GarlicMessageBuilder.estimateAvailableTags(getContext(), _leaseSet.getEncryptionKey()); + if (existingTags > 30) + wantACK = false; + + long token = (wantACK ? getContext().random().nextLong(I2NPMessage.MAX_ID_VALUE) : -1); PublicKey key = _leaseSet.getEncryptionKey(); SessionKey sessKey = new SessionKey(); Set tags = new HashSet(); @@ -321,7 +327,8 @@ public class OutboundClientMessageOneShotJob extends JobImpl { replyLeaseSet = getContext().netDb().lookupLeaseSetLocally(_from.calculateHash()); } - _inTunnel = selectInboundTunnel(); + if (wantACK) + _inTunnel = selectInboundTunnel(); buildClove(); if (_log.shouldLog(Log.DEBUG)) @@ -331,7 +338,7 @@ public class OutboundClientMessageOneShotJob extends JobImpl { _clove, _from.calculateHash(), _to, _inTunnel, sessKey, tags, - true, replyLeaseSet); + wantACK, replyLeaseSet); if (msg == null) { // set to null if there are no tunnels to ack the reply back through // (should we always fail for this? or should we send it anyway, even if @@ -346,9 +353,14 @@ public class OutboundClientMessageOneShotJob extends JobImpl { if (_log.shouldLog(Log.DEBUG)) _log.debug(getJobId() + ": send() - token expected " + token + " to " + _toString); - SendSuccessJob onReply = new SendSuccessJob(getContext(), sessKey, tags); - SendTimeoutJob onFail = new SendTimeoutJob(getContext()); - ReplySelector selector = new ReplySelector(token); + SendSuccessJob onReply = null; + SendTimeoutJob onFail = null; + ReplySelector selector = null; + if (wantACK) { + onReply = new SendSuccessJob(getContext(), sessKey, tags); + onFail = new SendTimeoutJob(getContext()); + selector = new ReplySelector(token); + } if (_log.shouldLog(Log.DEBUG)) _log.debug(getJobId() + ": Placing GarlicMessage into the new tunnel message bound for " @@ -378,6 +390,8 @@ public class OutboundClientMessageOneShotJob extends JobImpl { _clientMessage = null; _clove = null; getContext().statManager().addRateData("client.dispatchPrepareTime", getContext().clock().now() - _start, 0); + if (!wantACK) + getContext().statManager().addRateData("client.dispatchNoACK", 1, 0); } private class DispatchJob extends JobImpl { @@ -396,7 +410,8 @@ public class OutboundClientMessageOneShotJob extends JobImpl { } public String getName() { return "Dispatch outbound client message"; } public void runJob() { - getContext().messageRegistry().registerPending(_selector, _replyFound, _replyTimeout, _timeoutMs); + if (_selector != null) + getContext().messageRegistry().registerPending(_selector, _replyFound, _replyTimeout, _timeoutMs); if (_log.shouldLog(Log.INFO)) _log.info("Dispatching message to " + _toString + ": " + _msg); long before = getContext().clock().now(); diff --git a/router/java/src/net/i2p/router/networkdb/HandleDatabaseLookupMessageJob.java b/router/java/src/net/i2p/router/networkdb/HandleDatabaseLookupMessageJob.java index c2dd0bc56..f0e27d044 100644 --- a/router/java/src/net/i2p/router/networkdb/HandleDatabaseLookupMessageJob.java +++ b/router/java/src/net/i2p/router/networkdb/HandleDatabaseLookupMessageJob.java @@ -120,7 +120,7 @@ public class HandleDatabaseLookupMessageJob extends JobImpl { } else { RouterInfo info = getContext().netDb().lookupRouterInfoLocally(_message.getSearchKey()); if ( (info != null) && (info.isCurrent(EXPIRE_DELAY)) ) { - if (isUnreachable(info) && !publishUnreachable()) { + if ( (info.getIdentity().isHidden()) || (isUnreachable(info) && !publishUnreachable()) ) { if (_log.shouldLog(Log.DEBUG)) _log.debug("Not answering a query for a netDb peer who isn't reachable"); Set us = new HashSet(1); diff --git a/router/java/src/net/i2p/router/networkdb/kademlia/FloodfillStoreJob.java b/router/java/src/net/i2p/router/networkdb/kademlia/FloodfillStoreJob.java index e50a872ab..8f9cf35d3 100644 --- a/router/java/src/net/i2p/router/networkdb/kademlia/FloodfillStoreJob.java +++ b/router/java/src/net/i2p/router/networkdb/kademlia/FloodfillStoreJob.java @@ -56,7 +56,8 @@ class FloodfillStoreJob extends StoreJob { */ protected void succeed() { super.succeed(); - getContext().jobQueue().addJob(new FloodfillVerifyStoreJob(getContext(), _state.getTarget(), _facade)); + if (_state != null) + getContext().jobQueue().addJob(new FloodfillVerifyStoreJob(getContext(), _state.getTarget(), _facade)); } public String getName() { return "Floodfill netDb store"; } diff --git a/router/java/src/net/i2p/router/networkdb/kademlia/KademliaNetworkDatabaseFacade.java b/router/java/src/net/i2p/router/networkdb/kademlia/KademliaNetworkDatabaseFacade.java index 5c8fc08af..1449b65fc 100644 --- a/router/java/src/net/i2p/router/networkdb/kademlia/KademliaNetworkDatabaseFacade.java +++ b/router/java/src/net/i2p/router/networkdb/kademlia/KademliaNetworkDatabaseFacade.java @@ -110,8 +110,8 @@ public class KademliaNetworkDatabaseFacade extends NetworkDatabaseFacade { public final static String PROP_DB_DIR = "router.networkDatabase.dbDir"; public final static String DEFAULT_DB_DIR = "netDb"; - /** if we have less than 20 routers left, don't drop any more, even if they're failing or doing bad shit */ - private final static int MIN_REMAINING_ROUTERS = 20; + /** if we have less than 5 routers left, don't drop any more, even if they're failing or doing bad shit */ + private final static int MIN_REMAINING_ROUTERS = 5; /** * dont accept any dbDtore of a router over 24 hours old (unless we dont @@ -644,6 +644,11 @@ public class KademliaNetworkDatabaseFacade extends NetworkDatabaseFacade { _log.warn("Invalid routerInfo signature! forged router structure! router = " + routerInfo); return "Invalid routerInfo signature on " + key.toBase64(); } else if (!routerInfo.isCurrent(ROUTER_INFO_EXPIRATION)) { + if (routerInfo.getNetworkId() != Router.NETWORK_ID) { + _context.shitlist().shitlistRouter(key, "Peer is not in our network"); + return "Peer is not in our network (" + routerInfo.getNetworkId() + ", wants " + + Router.NETWORK_ID + "): " + routerInfo.calculateHash().toBase64(); + } long age = _context.clock().now() - routerInfo.getPublished(); int existing = _kb.size(); if (existing >= MIN_REMAINING_ROUTERS) { @@ -713,18 +718,22 @@ public class KademliaNetworkDatabaseFacade extends NetworkDatabaseFacade { isRouterInfo = true; if (isRouterInfo) { - int remaining = _kb.size(); - if (remaining < MIN_REMAINING_ROUTERS) { - if (_log.shouldLog(Log.ERROR)) - _log.error("Not removing " + dbEntry + " because we have so few routers left (" - + remaining + ") - perhaps a reseed is necessary?"); - return; - } - if (System.currentTimeMillis() < _started + DONT_FAIL_PERIOD) { - if (_log.shouldLog(Log.WARN)) - _log.warn("Not failing the key " + dbEntry.toBase64() - + " since we've just started up and don't want to drop /everyone/"); - return; + if (((RouterInfo)o).getNetworkId() != Router.NETWORK_ID) { + // definitely drop them + } else { + int remaining = _kb.size(); + if (remaining < MIN_REMAINING_ROUTERS) { + if (_log.shouldLog(Log.WARN)) + _log.warn("Not removing " + dbEntry + " because we have so few routers left (" + + remaining + ") - perhaps a reseed is necessary?"); + return; + } + if (System.currentTimeMillis() < _started + DONT_FAIL_PERIOD) { + if (_log.shouldLog(Log.WARN)) + _log.warn("Not failing the key " + dbEntry.toBase64() + + " since we've just started up and don't want to drop /everyone/"); + return; + } } _context.peerManager().removeCapabilities(dbEntry); diff --git a/router/java/src/net/i2p/router/networkdb/kademlia/PeerSelector.java b/router/java/src/net/i2p/router/networkdb/kademlia/PeerSelector.java index f8273b7ff..8be6bfb8c 100644 --- a/router/java/src/net/i2p/router/networkdb/kademlia/PeerSelector.java +++ b/router/java/src/net/i2p/router/networkdb/kademlia/PeerSelector.java @@ -18,8 +18,7 @@ import java.util.Map; import java.util.Set; import java.util.TreeMap; -import net.i2p.data.DataHelper; -import net.i2p.data.Hash; +import net.i2p.data.*; import net.i2p.router.RouterContext; import net.i2p.router.peermanager.PeerProfile; import net.i2p.stat.Rate; @@ -120,7 +119,10 @@ class PeerSelector { return; if (_toIgnore.contains(entry)) return; - if (_context.netDb().lookupRouterInfoLocally(entry) == null) + RouterInfo info = _context.netDb().lookupRouterInfoLocally(entry); + if (info == null) + return; + if (info.getIdentity().isHidden()) return; BigInteger diff = getDistance(_key, entry); diff --git a/router/java/src/net/i2p/router/networkdb/kademlia/PersistentDataStore.java b/router/java/src/net/i2p/router/networkdb/kademlia/PersistentDataStore.java index b5e1c46e5..c39bd019b 100644 --- a/router/java/src/net/i2p/router/networkdb/kademlia/PersistentDataStore.java +++ b/router/java/src/net/i2p/router/networkdb/kademlia/PersistentDataStore.java @@ -22,6 +22,7 @@ import net.i2p.data.Hash; import net.i2p.data.LeaseSet; import net.i2p.data.RouterInfo; import net.i2p.router.JobImpl; +import net.i2p.router.Router; import net.i2p.router.RouterContext; import net.i2p.util.I2PThread; import net.i2p.util.Log; @@ -346,11 +347,18 @@ class PersistentDataStore extends TransientDataStore { fis = new FileInputStream(_routerFile); RouterInfo ri = new RouterInfo(); ri.readBytes(fis); - try { - _facade.store(ri.getIdentity().getHash(), ri); - } catch (IllegalArgumentException iae) { - _log.info("Refused locally loaded routerInfo - deleting"); + if (ri.getNetworkId() != Router.NETWORK_ID) { corrupt = true; + if (_log.shouldLog(Log.WARN)) + _log.warn("The router is from a different network: " + + ri.getIdentity().calculateHash().toBase64()); + } else { + try { + _facade.store(ri.getIdentity().getHash(), ri); + } catch (IllegalArgumentException iae) { + _log.info("Refused locally loaded routerInfo - deleting"); + corrupt = true; + } } } catch (DataFormatException dfe) { _log.warn("Error reading the routerInfo from " + _routerFile.getAbsolutePath(), dfe); diff --git a/router/java/src/net/i2p/router/networkdb/kademlia/StoreJob.java b/router/java/src/net/i2p/router/networkdb/kademlia/StoreJob.java index 6b0c19a30..ac6dbe76c 100644 --- a/router/java/src/net/i2p/router/networkdb/kademlia/StoreJob.java +++ b/router/java/src/net/i2p/router/networkdb/kademlia/StoreJob.java @@ -158,8 +158,10 @@ class StoreJob extends JobImpl { } else { int peerTimeout = _facade.getPeerTimeout(peer); PeerProfile prof = getContext().profileOrganizer().getProfile(peer); - RateStat failing = prof.getDBHistory().getFailedLookupRate(); - Rate failed = failing.getRate(60*60*1000); + if (prof != null) { + RateStat failing = prof.getDBHistory().getFailedLookupRate(); + Rate failed = failing.getRate(60*60*1000); + } //long failedCount = failed.getCurrentEventCount()+failed.getLastEventCount(); //if (failedCount > 10) { // _state.addSkipped(peer); diff --git a/router/java/src/net/i2p/router/peermanager/ProfileOrganizer.java b/router/java/src/net/i2p/router/peermanager/ProfileOrganizer.java index 76779b2dc..28d121d88 100644 --- a/router/java/src/net/i2p/router/peermanager/ProfileOrganizer.java +++ b/router/java/src/net/i2p/router/peermanager/ProfileOrganizer.java @@ -18,6 +18,7 @@ import java.util.Set; import java.util.TreeSet; import net.i2p.data.Hash; +import net.i2p.data.RouterInfo; import net.i2p.router.RouterContext; import net.i2p.router.NetworkDatabaseFacade; import net.i2p.stat.Rate; @@ -791,10 +792,17 @@ public class ProfileOrganizer { return false; // never select a shitlisted peer } - if (null != netDb.lookupRouterInfoLocally(peer)) { - if (_log.shouldLog(Log.INFO)) - _log.info("Peer " + peer.toBase64() + " is locally known, allowing its use"); - return true; + RouterInfo info = netDb.lookupRouterInfoLocally(peer); + if (null != info) { + if (info.getIdentity().isHidden()) { + if (_log.shouldLog(Log.WARN)) + _log.warn("Peer " + peer.toBase64() + " is marked as hidden, disallowing its use"); + return false; + } else { + if (_log.shouldLog(Log.INFO)) + _log.info("Peer " + peer.toBase64() + " is locally known, allowing its use"); + return true; + } } else { if (_log.shouldLog(Log.WARN)) _log.warn("Peer " + peer.toBase64() + " is NOT locally known, disallowing its use"); diff --git a/router/java/src/net/i2p/router/startup/CreateRouterInfoJob.java b/router/java/src/net/i2p/router/startup/CreateRouterInfoJob.java index 09bbe3ffa..6fcbcaeaf 100644 --- a/router/java/src/net/i2p/router/startup/CreateRouterInfoJob.java +++ b/router/java/src/net/i2p/router/startup/CreateRouterInfoJob.java @@ -63,9 +63,7 @@ public class CreateRouterInfoJob extends JobImpl { info.setPeers(new HashSet()); info.setPublished(getCurrentPublishDate(getContext())); RouterIdentity ident = new RouterIdentity(); - Certificate cert = new Certificate(); - cert.setCertificateType(Certificate.CERTIFICATE_TYPE_NULL); - cert.setPayload(null); + Certificate cert = getContext().router().createCertificate(); ident.setCertificate(cert); PublicKey pubkey = null; PrivateKey privkey = null; diff --git a/router/java/src/net/i2p/router/startup/RebuildRouterInfoJob.java b/router/java/src/net/i2p/router/startup/RebuildRouterInfoJob.java index 0ecd51249..76403fe8c 100644 --- a/router/java/src/net/i2p/router/startup/RebuildRouterInfoJob.java +++ b/router/java/src/net/i2p/router/startup/RebuildRouterInfoJob.java @@ -106,7 +106,8 @@ public class RebuildRouterInfoJob extends JobImpl { SigningPublicKey signingPubKey = new SigningPublicKey(); signingPubKey.readBytes(fis); RouterIdentity ident = new RouterIdentity(); - ident.setCertificate(new Certificate(Certificate.CERTIFICATE_TYPE_NULL, null)); + Certificate cert = getContext().router().createCertificate(); + ident.setCertificate(cert); ident.setPublicKey(pubkey); ident.setSigningPublicKey(signingPubKey); info.setIdentity(ident); diff --git a/router/java/src/net/i2p/router/transport/CommSystemFacadeImpl.java b/router/java/src/net/i2p/router/transport/CommSystemFacadeImpl.java index 4c63f5ec3..97143f01f 100644 --- a/router/java/src/net/i2p/router/transport/CommSystemFacadeImpl.java +++ b/router/java/src/net/i2p/router/transport/CommSystemFacadeImpl.java @@ -77,6 +77,7 @@ public class CommSystemFacadeImpl extends CommSystemFacade { public short getReachabilityStatus() { if (_manager == null) return CommSystemFacade.STATUS_UNKNOWN; + if (_context.router().isHidden()) return CommSystemFacade.STATUS_OK; return _manager.getReachabilityStatus(); } public void recheckReachability() { _manager.recheckReachability(); } @@ -109,6 +110,7 @@ public class CommSystemFacadeImpl extends CommSystemFacade { private final static String PROP_I2NP_TCP_DISABLED = "i2np.tcp.disable"; private RouterAddress createTCPAddress() { + if (true) return null; RouterAddress addr = new RouterAddress(); addr.setCost(10); addr.setExpiration(null); @@ -117,7 +119,7 @@ public class CommSystemFacadeImpl extends CommSystemFacade { String port = _context.router().getConfigSetting(PROP_I2NP_TCP_PORT); String disabledStr = _context.router().getConfigSetting(PROP_I2NP_TCP_DISABLED); boolean disabled = false; - if ( (disabledStr != null) && ("true".equalsIgnoreCase(disabledStr)) ) + if ( (disabledStr == null) || ("true".equalsIgnoreCase(disabledStr)) ) return null; if ( (name == null) || (port == null) ) { //_log.info("TCP Host/Port not specified in config file - skipping TCP transport"); diff --git a/router/java/src/net/i2p/router/transport/TransportImpl.java b/router/java/src/net/i2p/router/transport/TransportImpl.java index a867b5a81..292a12c47 100644 --- a/router/java/src/net/i2p/router/transport/TransportImpl.java +++ b/router/java/src/net/i2p/router/transport/TransportImpl.java @@ -208,10 +208,13 @@ public abstract class TransportImpl implements Transport { if (log) { String type = msg.getMessageType(); + // the udp transport logs some further details + /* _context.messageHistory().sendMessage(type, msg.getMessageId(), msg.getExpiration(), msg.getTarget().getIdentity().getHash(), sendSuccessful); + */ } long now = _context.clock().now(); diff --git a/router/java/src/net/i2p/router/transport/TransportManager.java b/router/java/src/net/i2p/router/transport/TransportManager.java index 335efc5e2..9c35655a8 100644 --- a/router/java/src/net/i2p/router/transport/TransportManager.java +++ b/router/java/src/net/i2p/router/transport/TransportManager.java @@ -58,7 +58,7 @@ public class TransportManager implements TransportEventListener { private void configTransports() { String disableTCP = _context.router().getConfigSetting(PROP_DISABLE_TCP); - if ( (disableTCP != null) && (Boolean.TRUE.toString().equalsIgnoreCase(disableTCP)) ) { + if ( true || (disableTCP == null) || (Boolean.TRUE.toString().equalsIgnoreCase(disableTCP)) ) { _log.info("Explicitly disabling the TCP transport!"); } else { Transport t = new TCPTransport(_context); diff --git a/router/java/src/net/i2p/router/transport/VMCommSystem.java b/router/java/src/net/i2p/router/transport/VMCommSystem.java index a1f7b473e..ab16b8c79 100644 --- a/router/java/src/net/i2p/router/transport/VMCommSystem.java +++ b/router/java/src/net/i2p/router/transport/VMCommSystem.java @@ -84,7 +84,7 @@ public class VMCommSystem extends CommSystemFacade { if (true) { I2NPMessage dmsg = msg.getMessage(); String type = dmsg.getClass().getName(); - _context.messageHistory().sendMessage(type, dmsg.getUniqueId(), dmsg.getMessageExpiration(), msg.getTarget().getIdentity().getHash(), sendSuccessful); + _context.messageHistory().sendMessage(type, dmsg.getUniqueId(), dmsg.getMessageExpiration(), msg.getTarget().getIdentity().getHash(), sendSuccessful, null); } msg.discardData(); diff --git a/router/java/src/net/i2p/router/transport/udp/EstablishmentManager.java b/router/java/src/net/i2p/router/transport/udp/EstablishmentManager.java index 1f7505244..7a861693c 100644 --- a/router/java/src/net/i2p/router/transport/udp/EstablishmentManager.java +++ b/router/java/src/net/i2p/router/transport/udp/EstablishmentManager.java @@ -122,7 +122,12 @@ public class EstablishmentManager { public void establish(OutNetMessage msg) { RouterAddress ra = msg.getTarget().getTargetAddress(_transport.getStyle()); if (ra == null) { - _transport.failed(msg); + _transport.failed(msg, "Remote peer has no address, cannot establish"); + return; + } + if (msg.getTarget().getNetworkId() != Router.NETWORK_ID) { + _context.shitlist().shitlistRouter(msg.getTarget().getIdentity().calculateHash()); + _transport.failed(msg, "Remote peer is on the wrong network, cannot establish"); return; } UDPAddress addr = new UDPAddress(ra); @@ -133,7 +138,7 @@ public class EstablishmentManager { to = new RemoteHostId(remAddr.getAddress(), port); if (!_transport.isValid(to.getIP())) { - _transport.failed(msg); + _transport.failed(msg, "Remote peer's IP isn't valid"); _context.shitlist().shitlistRouter(msg.getTarget().getIdentity().calculateHash(), "Invalid SSU address"); return; } @@ -294,7 +299,7 @@ public class EstablishmentManager { // _log.log(Log.CRIT, "Admitted " + admitted + " with " + remaining + " remaining queued and " + active + " active"); if (_log.shouldLog(Log.INFO)) - _log.info("Outbound established completely! yay"); + _log.info("Outbound established completely! yay: " + state); PeerState peer = handleCompletelyEstablished(state); notifyActivity(); return peer; @@ -316,7 +321,7 @@ public class EstablishmentManager { RouterAddress ra = msg.getTarget().getTargetAddress(_transport.getStyle()); if (ra == null) { for (int i = 0; i < queued.size(); i++) - _transport.failed((OutNetMessage)queued.get(i)); + _transport.failed((OutNetMessage)queued.get(i), "Cannot admit to the queue, as it has no address"); continue; } UDPAddress addr = new UDPAddress(ra); @@ -354,8 +359,6 @@ public class EstablishmentManager { * */ private void handleCompletelyEstablished(InboundEstablishState state) { - if (_log.shouldLog(Log.DEBUG)) - _log.debug("Handle completely established (inbound): " + state.getRemoteHostId().toString()); long now = _context.clock().now(); RouterIdentity remote = state.getConfirmedIdentity(); PeerState peer = new PeerState(_context); @@ -369,6 +372,11 @@ public class EstablishmentManager { peer.setRemotePeer(remote.calculateHash()); peer.setWeRelayToThemAs(state.getSentRelayTag()); peer.setTheyRelayToUsAs(0); + + if (_log.shouldLog(Log.DEBUG)) + _log.debug("Handle completely established (inbound): " + state.getRemoteHostId().toString() + + " - " + peer.getRemotePeer().toBase64()); + //if (true) // for now, only support direct // peer.setRemoteRequiresIntroduction(false); @@ -377,7 +385,7 @@ public class EstablishmentManager { _transport.inboundConnectionReceived(); _context.statManager().addRateData("udp.inboundEstablishTime", state.getLifetime(), 0); - sendOurInfo(peer); + sendOurInfo(peer, true); } /** @@ -386,8 +394,6 @@ public class EstablishmentManager { * */ private PeerState handleCompletelyEstablished(OutboundEstablishState state) { - if (_log.shouldLog(Log.DEBUG)) - _log.debug("Handle completely established (outbound): " + state.getRemoteHostId().toString()); long now = _context.clock().now(); RouterIdentity remote = state.getRemoteIdentity(); PeerState peer = new PeerState(_context); @@ -402,10 +408,15 @@ public class EstablishmentManager { peer.setTheyRelayToUsAs(state.getReceivedRelayTag()); peer.setWeRelayToThemAs(0); + if (_log.shouldLog(Log.DEBUG)) + _log.debug("Handle completely established (outbound): " + state.getRemoteHostId().toString() + + " - " + peer.getRemotePeer().toBase64()); + + _transport.addRemotePeerState(peer); _context.statManager().addRateData("udp.outboundEstablishTime", state.getLifetime(), 0); - sendOurInfo(peer); + sendOurInfo(peer, false); int i = 0; while (true) { @@ -414,7 +425,7 @@ public class EstablishmentManager { break; if (now - Router.CLOCK_FUDGE_FACTOR > msg.getExpiration()) { msg.timestamp("took too long but established..."); - _transport.failed(msg); + _transport.failed(msg, "Took too long to establish, but it was established"); } else { msg.timestamp("session fully established and sent " + i); _transport.send(msg); @@ -424,9 +435,10 @@ public class EstablishmentManager { return peer; } - private void sendOurInfo(PeerState peer) { + private void sendOurInfo(PeerState peer, boolean isInbound) { if (_log.shouldLog(Log.INFO)) - _log.info("Publishing to the peer after confirm: " + peer); + _log.info("Publishing to the peer after confirm: " + + (isInbound ? " inbound con from " + peer : "outbound con to " + peer)); DatabaseStoreMessage m = new DatabaseStoreMessage(_context); m.setKey(_context.routerHash()); @@ -765,7 +777,7 @@ public class EstablishmentManager { OutNetMessage msg = outboundState.getNextQueuedMessage(); if (msg == null) break; - _transport.failed(msg); + _transport.failed(msg, "Expired during failed establish"); } String err = null; switch (outboundState.getState()) { diff --git a/router/java/src/net/i2p/router/transport/udp/InboundMessageFragments.java b/router/java/src/net/i2p/router/transport/udp/InboundMessageFragments.java index 9d44aef53..6c9acb369 100644 --- a/router/java/src/net/i2p/router/transport/udp/InboundMessageFragments.java +++ b/router/java/src/net/i2p/router/transport/udp/InboundMessageFragments.java @@ -32,8 +32,8 @@ public class InboundMessageFragments /*implements UDPTransport.PartialACKSource private MessageReceiver _messageReceiver; private boolean _alive; - /** decay the recently completed every 2 minutes */ - private static final int DECAY_PERIOD = 120*1000; + /** decay the recently completed every 20 seconds */ + private static final int DECAY_PERIOD = 10*1000; public InboundMessageFragments(RouterContext ctx, OutboundMessageFragments outbound, UDPTransport transport) { _context = ctx; @@ -57,7 +57,7 @@ public class InboundMessageFragments /*implements UDPTransport.PartialACKSource // may want to extend the DecayingBloomFilter so we can use a smaller // array size (currently its tuned for 10 minute rates for the // messageValidator) - _recentlyCompletedMessages = new DecayingBloomFilter(_context, DECAY_PERIOD, 8); + _recentlyCompletedMessages = new DecayingBloomFilter(_context, DECAY_PERIOD, 4); _ackSender.startup(); _messageReceiver.startup(); } @@ -114,6 +114,7 @@ public class InboundMessageFragments /*implements UDPTransport.PartialACKSource _log.warn("Message received is a dup: " + mid + " dups: " + _recentlyCompletedMessages.getCurrentDuplicateCount() + " out of " + _recentlyCompletedMessages.getInsertedCount()); + _context.messageHistory().droppedInboundMessage(mid, from.getRemotePeer(), "dup"); continue; } @@ -162,6 +163,7 @@ public class InboundMessageFragments /*implements UDPTransport.PartialACKSource state.releaseResources(); if (_log.shouldLog(Log.WARN)) _log.warn("Message expired while only being partially read: " + state); + _context.messageHistory().droppedInboundMessage(state.getMessageId(), state.getFrom(), "expired hile partially read: " + state.toString()); } else if (partialACK) { // not expired but not yet complete... lets queue up a partial ACK if (_log.shouldLog(Log.DEBUG)) diff --git a/router/java/src/net/i2p/router/transport/udp/InboundMessageState.java b/router/java/src/net/i2p/router/transport/udp/InboundMessageState.java index be8d03cd0..a99929f40 100644 --- a/router/java/src/net/i2p/router/transport/udp/InboundMessageState.java +++ b/router/java/src/net/i2p/router/transport/udp/InboundMessageState.java @@ -165,10 +165,18 @@ public class InboundMessageState { public String toString() { StringBuffer buf = new StringBuffer(32); buf.append("Message: ").append(_messageId); - //if (isComplete()) { - // buf.append(" completely received with "); - // buf.append(getCompleteSize()).append(" bytes"); - //} + if (isComplete()) { + buf.append(" completely received with "); + buf.append(getCompleteSize()).append(" bytes"); + } else { + for (int i = 0; (_fragments != null) && (i < _fragments.length); i++) { + buf.append(" fragment ").append(i); + if (_fragments[i] != null) + buf.append(": known at size ").append(_fragments[i].getValid()); + else + buf.append(": unknown"); + } + } buf.append(" lifetime: ").append(getLifetime()); return buf.toString(); } diff --git a/router/java/src/net/i2p/router/transport/udp/IntroductionManager.java b/router/java/src/net/i2p/router/transport/udp/IntroductionManager.java index 6b47b7fb0..c869832e1 100644 --- a/router/java/src/net/i2p/router/transport/udp/IntroductionManager.java +++ b/router/java/src/net/i2p/router/transport/udp/IntroductionManager.java @@ -82,13 +82,23 @@ public class IntroductionManager { } public void receiveRelayIntro(RemoteHostId bob, UDPPacketReader reader) { + if (_context.router().isHidden()) + return; + if (_log.shouldLog(Log.INFO)) + _log.info("Receive relay intro from " + bob); _context.statManager().addRateData("udp.receiveRelayIntro", 1, 0); _transport.send(_builder.buildHolePunch(reader)); } public void receiveRelayRequest(RemoteHostId alice, UDPPacketReader reader) { + if (_context.router().isHidden()) + return; long tag = reader.getRelayRequestReader().readTag(); PeerState charlie = _transport.getPeerState(tag); + if (_log.shouldLog(Log.INFO)) + _log.info("Receive relay request from " + alice + + " for tag " + tag + + " and relaying with " + charlie); if (charlie == null) return; byte key[] = new byte[SessionKey.KEYSIZE_BYTES]; diff --git a/router/java/src/net/i2p/router/transport/udp/MessageReceiver.java b/router/java/src/net/i2p/router/transport/udp/MessageReceiver.java index e8501086c..3a29cd179 100644 --- a/router/java/src/net/i2p/router/transport/udp/MessageReceiver.java +++ b/router/java/src/net/i2p/router/transport/udp/MessageReceiver.java @@ -115,9 +115,11 @@ public class MessageReceiver implements Runnable { } catch (I2NPMessageException ime) { if (_log.shouldLog(Log.WARN)) _log.warn("Message invalid: " + state, ime); + _context.messageHistory().droppedInboundMessage(state.getMessageId(), state.getFrom(), "error: " + ime.toString() + ": " + state.toString()); return null; } catch (Exception e) { _log.log(Log.CRIT, "Error dealing with a message: " + state, e); + _context.messageHistory().droppedInboundMessage(state.getMessageId(), state.getFrom(), "error: " + e.toString() + ": " + state.toString()); return null; } finally { state.releaseResources(); diff --git a/router/java/src/net/i2p/router/transport/udp/OutboundMessageFragments.java b/router/java/src/net/i2p/router/transport/udp/OutboundMessageFragments.java index e4148ea58..05018b68b 100644 --- a/router/java/src/net/i2p/router/transport/udp/OutboundMessageFragments.java +++ b/router/java/src/net/i2p/router/transport/udp/OutboundMessageFragments.java @@ -75,7 +75,7 @@ public class OutboundMessageFragments { _context.statManager().createRateStat("udp.sendPiggyback", "How many acks were piggybacked on a data packet (time == message lifetime)", "udp", new long[] { 60*1000, 10*60*1000, 60*60*1000, 24*60*60*1000 }); _context.statManager().createRateStat("udp.sendPiggybackPartial", "How many partial acks were piggybacked on a data packet (time == message lifetime)", "udp", new long[] { 60*1000, 10*60*1000, 60*60*1000, 24*60*60*1000 }); _context.statManager().createRateStat("udp.activeDelay", "How often we wait blocking on the active queue", "udp", new long[] { 60*1000, 10*60*1000, 60*60*1000, 24*60*60*1000 }); - _context.statManager().createRateStat("udp.packetsRetransmitted", "How many packets have been retransmitted (lifetime) when a burst of packets are retransmitted (period == packets transmitted, lifetime)", "udp", new long[] { 60*1000, 10*60*1000, 60*60*1000, 24*60*60*1000 }); + _context.statManager().createRateStat("udp.packetsRetransmitted", "Lifetime of packets during their retransmission (period == packets transmitted, lifetime)", "udp", new long[] { 60*1000, 10*60*1000, 60*60*1000, 24*60*60*1000 }); _context.statManager().createRateStat("udp.peerPacketsRetransmitted", "How many packets have been retransmitted to the peer (lifetime) when a burst of packets are retransmitted (period == packets transmitted, lifetime)", "udp", new long[] { 60*1000, 10*60*1000, 60*60*1000, 24*60*60*1000 }); _context.statManager().createRateStat("udp.blockedRetransmissions", "How packets have been transmitted to the peer when we blocked a retransmission to them?", "udp", new long[] { 60*1000, 10*60*1000, 60*60*1000, 24*60*60*1000 }); } @@ -97,7 +97,7 @@ public class OutboundMessageFragments { public boolean waitForMoreAllowed() { // test without choking. // perhaps this should check the lifetime of the first activeMessage? - if (false) return true; + if (true) return true; long start = _context.clock().now(); int numActive = 0; @@ -173,7 +173,7 @@ public class OutboundMessageFragments { if (state.isComplete()) { _activeMessages.remove(i); locked_removeRetransmitter(state); - _transport.succeeded(state.getMessage()); + _transport.succeeded(state); if ( (peer != null) && (peer.getSendWindowBytesRemaining() > 0) ) _throttle.unchoke(peer.getRemotePeer()); state.releaseResources(); @@ -299,8 +299,8 @@ public class OutboundMessageFragments { if (state.getMessage() != null) state.getMessage().timestamp("peer disconnected"); _transport.failed(state); - if (_log.shouldLog(Log.WARN)) - _log.warn("Peer disconnected for " + state); + if (_log.shouldLog(Log.ERROR)) + _log.error("Peer disconnected for " + state); if ( (peer != null) && (peer.getSendWindowBytesRemaining() > 0) ) _throttle.unchoke(peer.getRemotePeer()); state.releaseResources(); @@ -403,7 +403,7 @@ public class OutboundMessageFragments { } int size = state.getUnackedSize(); - if (peer.allocateSendingBytes(size)) { + if (peer.allocateSendingBytes(size, state.getPushCount())) { if (_log.shouldLog(Log.INFO)) _log.info("Allocation of " + size + " allowed with " + peer.getSendWindowBytesRemaining() @@ -413,6 +413,7 @@ public class OutboundMessageFragments { if (state.getPushCount() > 0) { _retransmitters.put(peer, state); + /* int fragments = state.getFragmentCount(); int toSend = 0; @@ -428,6 +429,7 @@ public class OutboundMessageFragments { if (_log.shouldLog(Log.WARN)) _log.warn("Retransmitting " + state + " to " + peer); _context.statManager().addRateData("udp.sendVolleyTime", state.getLifetime(), toSend); + */ } state.push(); @@ -482,7 +484,14 @@ public class OutboundMessageFragments { UDPPacket rv[] = new UDPPacket[fragments]; //sparse for (int i = 0; i < fragments; i++) { if (state.needsSending(i)) { - rv[i] = _builder.buildPacket(state, i, peer, remaining, partialACKBitfields); + try { + rv[i] = _builder.buildPacket(state, i, peer, remaining, partialACKBitfields); + } catch (ArrayIndexOutOfBoundsException aioobe) { + _log.log(Log.CRIT, "Corrupt trying to build a packet - please tell jrandom: " + + partialACKBitfields + " / " + remaining + " / " + msgIds); + sparseCount++; + continue; + } if (rv[i] == null) { sparseCount++; continue; @@ -520,6 +529,16 @@ public class OutboundMessageFragments { if (_log.shouldLog(Log.INFO)) _log.info("Building packet for " + state + " to " + peer + " with sparse count: " + sparseCount); peer.packetsTransmitted(fragments - sparseCount); + if (state.getPushCount() > 1) { + int toSend = fragments-sparseCount; + peer.messageRetransmitted(toSend); + _packetsRetransmitted += toSend; // lifetime for the transport + _context.statManager().addRateData("udp.peerPacketsRetransmitted", peer.getPacketsRetransmitted(), peer.getPacketsTransmitted()); + _context.statManager().addRateData("udp.packetsRetransmitted", state.getLifetime(), peer.getPacketsTransmitted()); + if (_log.shouldLog(Log.WARN)) + _log.warn("Retransmitting " + state + " to " + peer); + _context.statManager().addRateData("udp.sendVolleyTime", state.getLifetime(), toSend); + } return rv; } else { // !alive @@ -595,7 +614,7 @@ public class OutboundMessageFragments { _context.statManager().addRateData("udp.sendConfirmFragments", state.getFragmentCount(), state.getLifetime()); if (numSends > 1) _context.statManager().addRateData("udp.sendConfirmVolley", numSends, state.getFragmentCount()); - _transport.succeeded(state.getMessage()); + _transport.succeeded(state); int numFragments = state.getFragmentCount(); PeerState peer = state.getPeer(); if (peer != null) { @@ -682,7 +701,7 @@ public class OutboundMessageFragments { _context.statManager().addRateData("udp.sendConfirmVolley", numSends, state.getFragmentCount()); if (state.getMessage() != null) state.getMessage().timestamp("partial ack to complete after " + numSends); - _transport.succeeded(state.getMessage()); + _transport.succeeded(state); if (state.getPeer() != null) { // this adjusts the rtt/rto/window/etc diff --git a/router/java/src/net/i2p/router/transport/udp/OutboundMessageState.java b/router/java/src/net/i2p/router/transport/udp/OutboundMessageState.java index 4ebcbcc0b..d6baef7a8 100644 --- a/router/java/src/net/i2p/router/transport/udp/OutboundMessageState.java +++ b/router/java/src/net/i2p/router/transport/udp/OutboundMessageState.java @@ -175,7 +175,7 @@ public class OutboundMessageState { // stupid brute force, but the cardinality should be trivial short sends[] = _fragmentSends; if (sends != null) - for (int i = 0; i < bitfield.fragmentCount(); i++) + for (int i = 0; i < bitfield.fragmentCount() && i < sends.length; i++) if (bitfield.received(i)) sends[i] = (short)-1; diff --git a/router/java/src/net/i2p/router/transport/udp/PacketBuilder.java b/router/java/src/net/i2p/router/transport/udp/PacketBuilder.java index a56a540ac..154ae930b 100644 --- a/router/java/src/net/i2p/router/transport/udp/PacketBuilder.java +++ b/router/java/src/net/i2p/router/transport/udp/PacketBuilder.java @@ -57,9 +57,12 @@ public class PacketBuilder { StringBuffer msg = null; boolean acksIncluded = false; - if (_log.shouldLog(Log.WARN)) { + if (_log.shouldLog(Log.INFO)) { msg = new StringBuffer(128); - msg.append("building data packet with acks to ").append(peer.getRemotePeer().toBase64().substring(0,6)); + msg.append("Send to ").append(peer.getRemotePeer().toBase64()); + msg.append(" msg ").append(state.getMessageId()).append(":").append(fragment); + if (fragment == state.getFragmentCount() - 1) + msg.append("*"); } byte data[] = packet.getPacket().getData(); @@ -136,7 +139,7 @@ public class PacketBuilder { } if ( (msg != null) && (acksIncluded) ) - _log.warn(msg.toString()); + _log.debug(msg.toString()); DataHelper.toLong(data, off, 1, 1); // only one fragment in this message off++; @@ -181,6 +184,11 @@ public class PacketBuilder { packet.getPacket().setLength(off); authenticate(packet, peer.getCurrentCipherKey(), peer.getCurrentMACKey()); setTo(packet, peer.getRemoteIPAddress(), peer.getRemotePort()); + + if (_log.shouldLog(Log.INFO)) { + _log.info(msg.toString()); + } + return packet; } @@ -193,7 +201,7 @@ public class PacketBuilder { UDPPacket packet = UDPPacket.acquire(_context); StringBuffer msg = null; - if (_log.shouldLog(Log.WARN)) { + if (_log.shouldLog(Log.DEBUG)) { msg = new StringBuffer(128); msg.append("building ACK packet to ").append(peer.getRemotePeer().toBase64().substring(0,6)); } @@ -270,7 +278,7 @@ public class PacketBuilder { off++; if (msg != null) - _log.warn(msg.toString()); + _log.debug(msg.toString()); // we can pad here if we want, maybe randomized? diff --git a/router/java/src/net/i2p/router/transport/udp/PacketHandler.java b/router/java/src/net/i2p/router/transport/udp/PacketHandler.java index 35caf3192..07f47bc39 100644 --- a/router/java/src/net/i2p/router/transport/udp/PacketHandler.java +++ b/router/java/src/net/i2p/router/transport/udp/PacketHandler.java @@ -34,7 +34,7 @@ public class PacketHandler { private boolean _keepReading; private List _handlers; - private static final int NUM_HANDLERS = 3; + private static final int NUM_HANDLERS = 5; /** let packets be up to 30s slow */ private static final long GRACE_PERIOD = Router.CLOCK_FUDGE_FACTOR + 30*1000; @@ -60,6 +60,8 @@ public class PacketHandler { _context.statManager().createRateStat("udp.droppedInvalidEstablish", "How old the packet we dropped due to invalidity (establishment, bad key) was", "udp", new long[] { 10*60*1000, 60*60*1000 }); _context.statManager().createRateStat("udp.droppedInvalidInboundEstablish", "How old the packet we dropped due to invalidity (inbound establishment, bad key) was", "udp", new long[] { 10*60*1000, 60*60*1000 }); _context.statManager().createRateStat("udp.droppedInvalidSkew", "How skewed the packet we dropped due to invalidity (valid except bad skew) was", "udp", new long[] { 10*60*1000, 60*60*1000 }); + _context.statManager().createRateStat("udp.packetDequeueTime", "How long it takes the UDPReader to pull a packet off the inbound packet queue (when its slow)", "udp", new long[] { 10*60*1000, 60*60*1000 }); + _context.statManager().createRateStat("udp.packetVerifyTime", "How long it takes the PacketHandler to verify a data packet after dequeueing (when its slow)", "udp", new long[] { 10*60*1000, 60*60*1000 }); } public void startup() { @@ -101,8 +103,9 @@ public class PacketHandler { UDPPacket packet = _endpoint.receive(); _state = 3; if (packet == null) continue; // keepReading is probably false... - if (_log.shouldLog(Log.DEBUG)) - _log.debug("Received the packet " + packet); + packet.received(); + if (_log.shouldLog(Log.INFO)) + _log.info("Received the packet " + packet); _state = 4; long queueTime = packet.getLifetime(); long handleStart = _context.clock().now(); @@ -116,16 +119,30 @@ public class PacketHandler { _log.error("Crazy error handling a packet: " + packet, e); } long handleTime = _context.clock().now() - handleStart; + packet.afterHandling(); _context.statManager().addRateData("udp.handleTime", handleTime, packet.getLifetime()); _context.statManager().addRateData("udp.queueTime", queueTime, packet.getLifetime()); _state = 8; + if (_log.shouldLog(Log.INFO)) + _log.info("Done receiving the packet " + packet); + if (handleTime > 1000) { if (_log.shouldLog(Log.WARN)) _log.warn("Took " + handleTime + " to process the packet " + packet + ": " + _reader); } - + + long timeToDequeue = packet.getTimeSinceEnqueue() - packet.getTimeSinceReceived(); + long timeToVerify = 0; + long beforeRecv = packet.getTimeSinceReceiveFragments(); + if (beforeRecv > 0) + timeToVerify = beforeRecv - packet.getTimeSinceReceived(); + if (timeToDequeue > 50) + _context.statManager().addRateData("udp.packetDequeueTime", timeToDequeue, timeToDequeue); + if (timeToVerify > 50) + _context.statManager().addRateData("udp.packetVerifyTime", timeToVerify, timeToVerify); + // back to the cache with thee! packet.release(); _state = 9; @@ -396,7 +413,22 @@ public class PacketHandler { state = _establisher.receiveData(outState); if (_log.shouldLog(Log.DEBUG)) _log.debug("Received new DATA packet from " + state + ": " + packet); - _inbound.receiveData(state, reader.getDataReader()); + UDPPacketReader.DataReader dr = reader.getDataReader(); + if (_log.shouldLog(Log.INFO)) { + StringBuffer msg = new StringBuffer(); + msg.append("Receive ").append(System.identityHashCode(packet)); + msg.append(" from ").append(state.getRemotePeer().toBase64()).append(" ").append(state.getRemoteHostId()); + for (int i = 0; i < dr.readFragmentCount(); i++) { + msg.append(" msg ").append(dr.readMessageId(i)); + msg.append(":").append(dr.readMessageFragmentNum(i)); + if (dr.readMessageIsLast(i)) + msg.append("*"); + } + msg.append(": ").append(dr.toString()); + _log.info(msg.toString()); + } + packet.beforeReceiveFragments(); + _inbound.receiveData(state, dr); break; case UDPPacket.PAYLOAD_TYPE_TEST: _state = 51; diff --git a/router/java/src/net/i2p/router/transport/udp/PeerState.java b/router/java/src/net/i2p/router/transport/udp/PeerState.java index 9a248620f..2c8a00839 100644 --- a/router/java/src/net/i2p/router/transport/udp/PeerState.java +++ b/router/java/src/net/i2p/router/transport/udp/PeerState.java @@ -14,6 +14,7 @@ import net.i2p.I2PAppContext; import net.i2p.data.Hash; import net.i2p.data.SessionKey; import net.i2p.util.Log; +import net.i2p.router.RouterContext; /** * Contain all of the state about a UDP connection to a peer. @@ -168,6 +169,9 @@ public class PeerState { /** Message (Long) to InboundMessageState for active message */ private Map _inboundMessages; + /** have we migrated away from this peer to another newer one? */ + private volatile boolean _dead; + private static final int DEFAULT_SEND_WINDOW_BYTES = 8*1024; private static final int MINIMUM_WINDOW_BYTES = DEFAULT_SEND_WINDOW_BYTES; private static final int MAX_SEND_WINDOW_BYTES = 1024*1024; @@ -188,8 +192,8 @@ public class PeerState { */ private static final int LARGE_MTU = 1350; - private static final int MIN_RTO = 500 + ACKSender.ACK_FREQUENCY; - private static final int MAX_RTO = 2500; // 5000; + private static final int MIN_RTO = 100 + ACKSender.ACK_FREQUENCY; + private static final int MAX_RTO = 1200; // 5000; /** override the default MTU */ private static final String PROP_DEFAULT_MTU = "i2np.udp.mtu"; @@ -241,6 +245,7 @@ public class PeerState { _packetsReceived = 0; _packetsReceivedDuplicate = 0; _inboundMessages = new HashMap(8); + _dead = false; _context.statManager().createRateStat("udp.congestionOccurred", "How large the cwin was when congestion occurred (duration == sendBps)", "udp", new long[] { 60*1000, 10*60*1000, 60*60*1000, 24*60*60*1000 }); _context.statManager().createRateStat("udp.congestedRTO", "retransmission timeout after congestion (duration == rtt dev)", "udp", new long[] { 60*1000, 10*60*1000, 60*60*1000, 24*60*60*1000 }); _context.statManager().createRateStat("udp.sendACKPartial", "Number of partial ACKs sent (duration == number of full ACKs in that ack packet)", "udp", new long[] { 60*1000, 10*60*1000, 60*60*1000, 24*60*60*1000 }); @@ -433,14 +438,21 @@ public class PeerState { * the previous second's ACKs be sent? */ public void remoteDoesNotWantPreviousACKs() { _remoteWantsPreviousACKs = false; } + + /** should we ignore the peer state's congestion window, and let anything through? */ + private static final boolean IGNORE_CWIN = false; + /** should we ignore the congestion window on the first push of every message? */ + private static final boolean ALWAYS_ALLOW_FIRST_PUSH = false; + /** * Decrement the remaining bytes in the current period's window, * returning true if the full size can be decremented, false if it * cannot. If it is not decremented, the window size remaining is * not adjusted at all. */ - public boolean allocateSendingBytes(int size) { return allocateSendingBytes(size, false); } - public boolean allocateSendingBytes(int size, boolean isForACK) { + public boolean allocateSendingBytes(int size, int messagePushCount) { return allocateSendingBytes(size, false, messagePushCount); } + public boolean allocateSendingBytes(int size, boolean isForACK) { return allocateSendingBytes(size, isForACK, -1); } + public boolean allocateSendingBytes(int size, boolean isForACK, int messagePushCount) { long now = _context.clock().now(); long duration = now - _lastSendRefill; if (duration >= 1000) { @@ -456,7 +468,7 @@ public class PeerState { _lastSendRefill = now; } //if (true) return true; - if (size <= _sendWindowBytesRemaining) { + if (IGNORE_CWIN || size <= _sendWindowBytesRemaining || (ALWAYS_ALLOW_FIRST_PUSH && messagePushCount == 0)) { _sendWindowBytesRemaining -= size; _sendBytes += size; _lastSendTime = now; @@ -541,6 +553,32 @@ public class PeerState { * Access to this map must be synchronized explicitly! */ public Map getInboundMessages() { return _inboundMessages; } + /** + * Expire partially received inbound messages, returning how many are still pending. + * This should probably be fired periodically, in case a peer goes silent and we don't + * try to send them any messages (and don't receive any messages from them either) + * + */ + public int expireInboundMessages() { + int rv = 0; + + synchronized (_inboundMessages) { + for (Iterator iter = _inboundMessages.values().iterator(); iter.hasNext(); ) { + InboundMessageState state = (InboundMessageState)iter.next(); + if (state.isExpired()) { + iter.remove(); + } else { + if (state.isComplete()) { + _log.error("inbound message is complete, but wasn't handled inline? " + state + " with " + this); + iter.remove(); + } else { + rv++; + } + } + } + } + return rv; + } /** * either they told us to back off, or we had to resend to get @@ -593,7 +631,7 @@ public class PeerState { _lastACKSend = _context.clock().now(); } - private static final int MAX_RESEND_ACKS = 8; + private static final int MAX_RESEND_ACKS = 16; /** * grab a list of ACKBitfield instances, some of which may fully @@ -674,6 +712,8 @@ public class PeerState { for (Iterator iter = _inboundMessages.values().iterator(); iter.hasNext(); ) { InboundMessageState state = (InboundMessageState)iter.next(); if (state.isExpired()) { + //if (_context instanceof RouterContext) + // ((RouterContext)_context).messageHistory().droppedInboundMessage(state.getMessageId(), state.getFrom(), "expired partially received: " + state.toString()); iter.remove(); } else { if (!state.isComplete()) { @@ -877,6 +917,42 @@ public class PeerState { public RemoteHostId getRemoteHostId() { return _remoteHostId; } + /** + * Transfer the basic activity/state from the old peer to the current peer + * + */ + public void loadFrom(PeerState oldPeer) { + _rto = oldPeer._rto; + _rtt = oldPeer._rtt; + _rttDeviation = oldPeer._rttDeviation; + _slowStartThreshold = oldPeer._slowStartThreshold; + _sendWindowBytes = oldPeer._sendWindowBytes; + oldPeer._dead = true; + + List tmp = new ArrayList(); + synchronized (oldPeer._currentACKs) { + tmp.addAll(oldPeer._currentACKs); + oldPeer._currentACKs.clear(); + } + synchronized (_currentACKs) { _currentACKs.addAll(tmp); } + tmp.clear(); + + synchronized (oldPeer._currentACKsResend) { + tmp.addAll(oldPeer._currentACKsResend); + oldPeer._currentACKsResend.clear(); + } + synchronized (_currentACKsResend) { _currentACKsResend.addAll(tmp); } + tmp.clear(); + + Map msgs = new HashMap(); + synchronized (oldPeer._inboundMessages) { + msgs.putAll(oldPeer._inboundMessages); + oldPeer._inboundMessages.clear(); + } + synchronized (_inboundMessages) { _inboundMessages.putAll(msgs); } + + } + public int hashCode() { if (_remotePeer != null) return _remotePeer.hashCode(); @@ -901,6 +977,17 @@ public class PeerState { buf.append(_remoteHostId.toString()); if (_remotePeer != null) buf.append(" ").append(_remotePeer.toBase64().substring(0,6)); + + long now = _context.clock().now(); + buf.append(" recvAge: ").append(now-_lastReceiveTime); + buf.append(" sendAge: ").append(now-_lastSendFullyTime); + buf.append(" sendAttemptAge: ").append(now-_lastSendTime); + buf.append(" sendACKAge: ").append(now-_lastACKSend); + buf.append(" lifetime: ").append(now-_keyEstablishedTime); + buf.append(" cwin: ").append(_sendWindowBytes); + buf.append(" acwin: ").append(_sendWindowBytesRemaining); + buf.append(" recv OK/Dup: ").append(_packetsReceived).append('/').append(_packetsReceivedDuplicate); + buf.append(" send OK/Dup: ").append(_packetsTransmitted).append('/').append(_packetsRetransmitted); return buf.toString(); } } diff --git a/router/java/src/net/i2p/router/transport/udp/PeerTestManager.java b/router/java/src/net/i2p/router/transport/udp/PeerTestManager.java index cc030a619..8e53ea433 100644 --- a/router/java/src/net/i2p/router/transport/udp/PeerTestManager.java +++ b/router/java/src/net/i2p/router/transport/udp/PeerTestManager.java @@ -31,6 +31,7 @@ class PeerTestManager { private Map _activeTests; /** current test we are running, or null */ private PeerTestState _currentTest; + private boolean _currentTestComplete; private List _recentTests; /** longest we will keep track of a Charlie nonce for */ @@ -44,6 +45,7 @@ class PeerTestManager { _recentTests = Collections.synchronizedList(new ArrayList(16)); _packetBuilder = new PacketBuilder(context, transport); _currentTest = null; + _currentTestComplete = false; _context.statManager().createRateStat("udp.statusKnownCharlie", "How often the bob we pick passes us to a charlie we already have a session with?", "udp", new long[] { 60*1000, 20*60*1000, 60*60*1000 }); } @@ -62,6 +64,7 @@ class PeerTestManager { test.setLastSendTime(test.getBeginTime()); test.setOurRole(PeerTestState.ALICE); _currentTest = test; + _currentTestComplete = false; if (_log.shouldLog(Log.DEBUG)) _log.debug("Running test with bob = " + bobIP + ":" + bobPort + " " + test.getNonce()); @@ -81,7 +84,7 @@ class PeerTestManager { // already completed return; } else if (expired()) { - testComplete(); + testComplete(true); } else if (_context.clock().now() - state.getLastSendTime() >= RESEND_TIMEOUT) { if (state.getReceiveBobTime() <= 0) { // no message from Bob yet, send it again @@ -98,31 +101,36 @@ class PeerTestManager { SimpleTimer.getInstance().addEvent(ContinueTest.this, RESEND_TIMEOUT); } } - private boolean expired() { - PeerTestState state = _currentTest; - if (state != null) - return _currentTest.getBeginTime() + MAX_TEST_TIME < _context.clock().now(); - else - return true; - } } + private boolean expired() { + PeerTestState state = _currentTest; + if (state != null) + return state.getBeginTime() + MAX_TEST_TIME < _context.clock().now(); + else + return true; + } + private void sendTestToBob() { PeerTestState test = _currentTest; - if (test != null) { + if (!expired()) { if (_log.shouldLog(Log.DEBUG)) _log.debug("Sending test to bob: " + test.getBobIP() + ":" + test.getBobPort()); _transport.send(_packetBuilder.buildPeerTestFromAlice(test.getBobIP(), test.getBobPort(), test.getBobCipherKey(), test.getBobMACKey(), //_bobIntroKey, test.getNonce(), _transport.getIntroKey())); + } else { + _currentTest = null; } } private void sendTestToCharlie() { PeerTestState test = _currentTest; - if (test != null) { + if (!expired()) { if (_log.shouldLog(Log.DEBUG)) _log.debug("Sending test to charlie: " + test.getCharlieIP() + ":" + test.getCharliePort()); _transport.send(_packetBuilder.buildPeerTestFromAlice(test.getCharlieIP(), test.getCharliePort(), test.getCharlieIntroKey(), test.getNonce(), _transport.getIntroKey())); + } else { + _currentTest = null; } } @@ -139,7 +147,10 @@ class PeerTestManager { */ private void receiveTestReply(RemoteHostId from, UDPPacketReader.PeerTestReader testInfo) { PeerTestState test = _currentTest; - if (test == null) return; + if (expired()) + return; + if (_currentTestComplete) + return; if ( (DataHelper.eq(from.getIP(), test.getBobIP().getAddress())) && (from.getPort() == test.getBobPort()) ) { byte ip[] = new byte[testInfo.readIPSize()]; testInfo.readIP(ip, 0); @@ -152,7 +163,7 @@ class PeerTestManager { if (_log.shouldLog(Log.DEBUG)) _log.debug("Receive test reply from bob @ " + from.getIP() + " via our " + test.getAlicePort() + "/" + test.getAlicePortFromCharlie()); if (test.getAlicePortFromCharlie() > 0) - testComplete(); + testComplete(false); } catch (UnknownHostException uhe) { if (_log.shouldLog(Log.ERROR)) _log.error("Unable to get our IP from bob's reply: " + from + ", " + testInfo, uhe); @@ -166,12 +177,11 @@ class PeerTestManager { if (_log.shouldLog(Log.WARN)) _log.warn("Bob chose a charlie we already have a session to, cancelling the test and rerunning (bob: " + _currentTest + ", charlie: " + from + ")"); - _currentTest = null; _context.statManager().addRateData("udp.statusKnownCharlie", 1, 0); honorStatus(CommSystemFacade.STATUS_UNKNOWN); return; } - + if (test.getReceiveCharlieTime() > 0) { // this is our second charlie, yay! test.setAlicePortFromCharlie(testInfo.readPort()); @@ -184,12 +194,19 @@ class PeerTestManager { _log.debug("Receive test reply from charlie @ " + test.getCharlieIP() + " via our " + test.getAlicePort() + "/" + test.getAlicePortFromCharlie()); if (test.getReceiveBobTime() > 0) - testComplete(); + testComplete(false); } catch (UnknownHostException uhe) { if (_log.shouldLog(Log.ERROR)) _log.error("Charlie @ " + from + " said we were an invalid IP address: " + uhe.getMessage(), uhe); } } else { + if (test.getPacketsRelayed() > MAX_RELAYED_PER_TEST) { + testComplete(false); + if (_log.shouldLog(Log.WARN)) + _log.warn("Received too many packets on the test: " + test); + return; + } + // ok, first charlie. send 'em a packet test.setReceiveCharlieTime(_context.clock().now()); SessionKey charlieIntroKey = new SessionKey(new byte[SessionKey.KEYSIZE_BYTES]); @@ -214,10 +231,14 @@ class PeerTestManager { * we have successfully received the second PeerTest from a Charlie. * */ - private void testComplete() { + private void testComplete(boolean forgetTest) { + _currentTestComplete = true; short status = -1; PeerTestState test = _currentTest; - if (test == null) return; + if (expired()) { + _currentTest = null; + return; + } if (test.getAlicePortFromCharlie() > 0) { // we received a second message from charlie if ( (test.getAlicePort() == test.getAlicePortFromCharlie()) && @@ -243,7 +264,8 @@ class PeerTestManager { _log.info("Test complete: " + test); honorStatus(status); - _currentTest = null; + if (forgetTest) + _currentTest = null; } /** @@ -324,6 +346,8 @@ class PeerTestManager { } } + private static final int MAX_RELAYED_PER_TEST = 5; + /** * The packet's IP/port does not match the IP/port included in the message, * so we must be Charlie receiving a PeerTest from Bob. @@ -370,6 +394,14 @@ class PeerTestManager { state.setBobMACKey(bob.getCurrentMACKey()); } + state.incrementPacketsRelayed(); + if (state.getPacketsRelayed() > MAX_RELAYED_PER_TEST) { + if (_log.shouldLog(Log.WARN)) + _log.warn("Receive from bob (" + from + ") as charlie with alice @ " + aliceIP + ":" + alicePort + + ", but we've already relayed too many packets to that test, so we're dropping it"); + return; + } + if (_log.shouldLog(Log.DEBUG)) _log.debug("Receive from bob (" + from + ") as charlie, sending back to bob and sending to alice @ " + aliceIP + ":" + alicePort); @@ -446,6 +478,14 @@ class PeerTestManager { state.setOurRole(PeerTestState.BOB); state.setReceiveAliceTime(_context.clock().now()); + state.incrementPacketsRelayed(); + if (state.getPacketsRelayed() > MAX_RELAYED_PER_TEST) { + if (_log.shouldLog(Log.WARN)) + _log.warn("Receive from alice (" + aliceIP + ":" + from.getPort() + + ") as bob, but we've already relayed too many packets to that test, so we're dropping it"); + return; + } + if (isNew) { synchronized (_activeTests) { _activeTests.put(new Long(nonce), state); @@ -477,6 +517,14 @@ class PeerTestManager { */ private void receiveFromCharlieAsBob(RemoteHostId from, PeerTestState state) { state.setReceiveCharlieTime(_context.clock().now()); + + state.incrementPacketsRelayed(); + if (state.getPacketsRelayed() > MAX_RELAYED_PER_TEST) { + if (_log.shouldLog(Log.WARN)) + _log.warn("Received from charlie (" + from + ") as bob (" + state + "), but we've already relayed too many, so drop it"); + return; + } + UDPPacket packet = _packetBuilder.buildPeerTestToAlice(state.getAliceIP(), state.getAlicePort(), state.getAliceIntroKey(), state.getCharlieIntroKey(), state.getNonce()); diff --git a/router/java/src/net/i2p/router/transport/udp/PeerTestState.java b/router/java/src/net/i2p/router/transport/udp/PeerTestState.java index 74e498428..a9870a15d 100644 --- a/router/java/src/net/i2p/router/transport/udp/PeerTestState.java +++ b/router/java/src/net/i2p/router/transport/udp/PeerTestState.java @@ -26,6 +26,7 @@ class PeerTestState { private long _receiveAliceTime; private long _receiveBobTime; private long _receiveCharlieTime; + private int _packetsRelayed; public static final short ALICE = 1; public static final short BOB = 2; @@ -91,6 +92,9 @@ class PeerTestState { public synchronized long getReceiveCharlieTime() { return _receiveCharlieTime; } public synchronized void setReceiveCharlieTime(long when) { _receiveCharlieTime = when; } + public int getPacketsRelayed() { return _packetsRelayed; } + public void incrementPacketsRelayed() { ++_packetsRelayed; } + public synchronized String toString() { StringBuffer buf = new StringBuffer(512); buf.append("Role: "); @@ -113,6 +117,7 @@ class PeerTestState { buf.append(" receive from bob after ").append(_receiveBobTime - _beginTime).append("ms"); if (_receiveCharlieTime > 0) buf.append(" receive from charlie after ").append(_receiveCharlieTime - _beginTime).append("ms"); + buf.append(" packets relayed: ").append(_packetsRelayed); return buf.toString(); } } diff --git a/router/java/src/net/i2p/router/transport/udp/TimedWeightedPriorityMessageQueue.java b/router/java/src/net/i2p/router/transport/udp/TimedWeightedPriorityMessageQueue.java index ed18a452a..4d0834853 100644 --- a/router/java/src/net/i2p/router/transport/udp/TimedWeightedPriorityMessageQueue.java +++ b/router/java/src/net/i2p/router/transport/udp/TimedWeightedPriorityMessageQueue.java @@ -224,7 +224,7 @@ public class TimedWeightedPriorityMessageQueue implements MessageQueue, Outbound } public interface FailedListener { - public void failed(OutNetMessage msg); + public void failed(OutNetMessage msg, String reason); } /** @@ -253,7 +253,7 @@ public class TimedWeightedPriorityMessageQueue implements MessageQueue, Outbound for (int i = 0; i < removed.size(); i++) { OutNetMessage m = (OutNetMessage)removed.get(i); m.timestamp("expirer killed it"); - _listener.failed(m); + _listener.failed(m, "expired before getting on the active pool"); } removed.clear(); diff --git a/router/java/src/net/i2p/router/transport/udp/UDPPacket.java b/router/java/src/net/i2p/router/transport/udp/UDPPacket.java index 1c9a050b1..a8948237b 100644 --- a/router/java/src/net/i2p/router/transport/udp/UDPPacket.java +++ b/router/java/src/net/i2p/router/transport/udp/UDPPacket.java @@ -36,6 +36,10 @@ public class UDPPacket { private volatile boolean _released; private volatile Exception _releasedBy; private volatile Exception _acquiredBy; + private long _enqueueTime; + private long _receivedTime; + private long _beforeReceiveFragments; + private long _afterHandlingTime; private static final List _packetCache; static { @@ -194,6 +198,24 @@ public class UDPPacket { _context.aes().decrypt(_data, _packet.getOffset() + MAC_SIZE + IV_SIZE, _data, _packet.getOffset() + MAC_SIZE + IV_SIZE, cipherKey, iv.getData(), len - MAC_SIZE - IV_SIZE); _ivCache.release(iv); } + + /** the UDPReceiver has tossed it onto the inbound queue */ + void enqueue() { _enqueueTime = _context.clock().now(); } + /** a packet handler has pulled it off the inbound queue */ + void received() { _receivedTime = _context.clock().now(); } + /** a packet handler has decrypted and verified the packet and is about to parse out the good bits */ + void beforeReceiveFragments() { _beforeReceiveFragments = _context.clock().now(); } + /** a packet handler has finished parsing out the good bits */ + void afterHandling() { _afterHandlingTime = _context.clock().now(); } + + /** the UDPReceiver has tossed it onto the inbound queue */ + long getTimeSinceEnqueue() { return (_enqueueTime > 0 ? _context.clock().now() - _enqueueTime : 0); } + /** a packet handler has pulled it off the inbound queue */ + long getTimeSinceReceived() { return (_receivedTime > 0 ? _context.clock().now() - _receivedTime : 0); } + /** a packet handler has decrypted and verified the packet and is about to parse out the good bits */ + long getTimeSinceReceiveFragments() { return (_beforeReceiveFragments > 0 ? _context.clock().now() - _beforeReceiveFragments : 0); } + /** a packet handler has finished parsing out the good bits */ + long getTimeSinceHandling() { return (_afterHandlingTime > 0 ? _context.clock().now() - _afterHandlingTime : 0); } public String toString() { verifyNotReleased(); @@ -203,7 +225,12 @@ public class UDPPacket { buf.append(_packet.getAddress().getHostAddress()).append(":"); buf.append(_packet.getPort()); buf.append(" id=").append(System.identityHashCode(this)); - buf.append("\ndata=").append(Base64.encode(_packet.getData(), _packet.getOffset(), _packet.getLength())); + + buf.append(" sinceEnqueued=").append((_enqueueTime > 0 ? _context.clock().now()-_enqueueTime : -1)); + buf.append(" sinceReceived=").append((_receivedTime > 0 ? _context.clock().now()-_receivedTime : -1)); + buf.append(" beforeReceiveFragments=").append((_beforeReceiveFragments > 0 ? _context.clock().now()-_beforeReceiveFragments : -1)); + buf.append(" sinceHandled=").append((_afterHandlingTime > 0 ? _context.clock().now()-_afterHandlingTime : -1)); + //buf.append("\ndata=").append(Base64.encode(_packet.getData(), _packet.getOffset(), _packet.getLength())); return buf.toString(); } diff --git a/router/java/src/net/i2p/router/transport/udp/UDPReceiver.java b/router/java/src/net/i2p/router/transport/udp/UDPReceiver.java index 8d0ada03e..4cde762c6 100644 --- a/router/java/src/net/i2p/router/transport/udp/UDPReceiver.java +++ b/router/java/src/net/i2p/router/transport/udp/UDPReceiver.java @@ -29,10 +29,13 @@ public class UDPReceiver { private boolean _keepRunning; private Runner _runner; private UDPTransport _transport; + private static int __id; + private int _id; public UDPReceiver(RouterContext ctx, UDPTransport transport, DatagramSocket socket, String name) { _context = ctx; _log = ctx.logManager().getLog(UDPReceiver.class); + _id = ++_id; _name = name; _inboundQueue = new ArrayList(128); _socket = socket; @@ -48,7 +51,7 @@ public class UDPReceiver { public void startup() { adjustDropProbability(); _keepRunning = true; - I2PThread t = new I2PThread(_runner, _name); + I2PThread t = new I2PThread(_runner, _name + "." + _id); t.setDaemon(true); t.start(); } @@ -65,11 +68,11 @@ public class UDPReceiver { String p = _context.getProperty("i2np.udp.dropProbability"); if (p != null) { try { - ARTIFICIAL_DROP_PROBABILITY = Float.parseFloat(p); + ARTIFICIAL_DROP_PROBABILITY = Integer.parseInt(p); } catch (NumberFormatException nfe) {} if (ARTIFICIAL_DROP_PROBABILITY < 0) ARTIFICIAL_DROP_PROBABILITY = 0; } else { - ARTIFICIAL_DROP_PROBABILITY = 0; + //ARTIFICIAL_DROP_PROBABILITY = 0; } } @@ -83,12 +86,12 @@ public class UDPReceiver { } /** if a packet been sitting in the queue for a full second (meaning the handlers are overwhelmed), drop subsequent packets */ - private static final long MAX_QUEUE_PERIOD = 1*1000; + private static final long MAX_QUEUE_PERIOD = 2*1000; - private static float ARTIFICIAL_DROP_PROBABILITY = 0.0f; // 0.02f; // 0.0f; + private static int ARTIFICIAL_DROP_PROBABILITY = 0; // 4 - private static final int ARTIFICIAL_DELAY = 0; // 100; - private static final int ARTIFICIAL_DELAY_BASE = 0; //100; + private static final int ARTIFICIAL_DELAY = 0; // 200; + private static final int ARTIFICIAL_DELAY_BASE = 0; //600; private int receive(UDPPacket packet) { //adjustDropProbability(); @@ -96,10 +99,10 @@ public class UDPReceiver { if (ARTIFICIAL_DROP_PROBABILITY > 0) { // the first check is to let the compiler optimize away this // random block on the live system when the probability is == 0 - int v = _context.random().nextInt(1000); - if (v < ARTIFICIAL_DROP_PROBABILITY*1000) { + int v = _context.random().nextInt(100); + if (v <= ARTIFICIAL_DROP_PROBABILITY) { if (_log.shouldLog(Log.ERROR)) - _log.error("Drop with v=" + v + " p=" + ARTIFICIAL_DROP_PROBABILITY + " packet size: " + packet.getPacket().getLength()); + _log.error("Drop with v=" + v + " p=" + ARTIFICIAL_DROP_PROBABILITY + " packet size: " + packet.getPacket().getLength() + ": " + packet); _context.statManager().addRateData("udp.droppedInboundProbabalistically", 1, 0); return -1; } else { @@ -108,15 +111,20 @@ public class UDPReceiver { } if ( (ARTIFICIAL_DELAY > 0) || (ARTIFICIAL_DELAY_BASE > 0) ) { - SimpleTimer.getInstance().addEvent(new ArtificiallyDelayedReceive(packet), ARTIFICIAL_DELAY_BASE + _context.random().nextInt(ARTIFICIAL_DELAY)); + long delay = ARTIFICIAL_DELAY_BASE + _context.random().nextInt(ARTIFICIAL_DELAY); + if (_log.shouldLog(Log.INFO)) + _log.info("Delay packet " + packet + " for " + delay); + SimpleTimer.getInstance().addEvent(new ArtificiallyDelayedReceive(packet), delay); + return -1; } return doReceive(packet); } private final int doReceive(UDPPacket packet) { - if (_log.shouldLog(Log.DEBUG)) - _log.debug("Received: " + packet); + if (_log.shouldLog(Log.INFO)) + _log.info("Received: " + packet); + packet.enqueue(); boolean rejected = false; int queueSize = 0; long headPeriod = 0; @@ -164,17 +172,16 @@ public class UDPReceiver { */ public UDPPacket receiveNext() { while (_keepRunning) { - try { - synchronized (_inboundQueue) { - if (_inboundQueue.size() > 0) { - UDPPacket rv = (UDPPacket)_inboundQueue.remove(0); + synchronized (_inboundQueue) { + if (_inboundQueue.size() <= 0) + try { _inboundQueue.wait(); } catch (InterruptedException ie) {} + if (_inboundQueue.size() > 0) { + UDPPacket rv = (UDPPacket)_inboundQueue.remove(0); + if (_inboundQueue.size() > 0) _inboundQueue.notifyAll(); - return rv; - } else { - _inboundQueue.wait(500); - } + return rv; } - } catch (InterruptedException ie) {} + } } return null; } @@ -185,7 +192,7 @@ public class UDPReceiver { _socketChanged = false; while (_keepRunning) { if (_socketChanged) { - Thread.currentThread().setName(_name); + Thread.currentThread().setName(_name + "." + _id); _socketChanged = false; } UDPPacket packet = UDPPacket.acquire(_context); @@ -197,14 +204,14 @@ public class UDPReceiver { try { Thread.sleep(10); } catch (InterruptedException ie) {} try { - if (_log.shouldLog(Log.DEBUG)) - _log.debug("Before blocking socket.receive"); + if (_log.shouldLog(Log.INFO)) + _log.info("Before blocking socket.receive on " + System.identityHashCode(packet)); synchronized (Runner.this) { _socket.receive(packet.getPacket()); } int size = packet.getPacket().getLength(); - if (_log.shouldLog(Log.DEBUG)) - _log.debug("After blocking socket.receive: packet is " + size + " bytes!"); + if (_log.shouldLog(Log.INFO)) + _log.info("After blocking socket.receive: packet is " + size + " bytes on " + System.identityHashCode(packet)); packet.resetBegin(); // and block after we know how much we read but before diff --git a/router/java/src/net/i2p/router/transport/udp/UDPSender.java b/router/java/src/net/i2p/router/transport/udp/UDPSender.java index 274c138d4..63860dcc9 100644 --- a/router/java/src/net/i2p/router/transport/udp/UDPSender.java +++ b/router/java/src/net/i2p/router/transport/udp/UDPSender.java @@ -185,6 +185,8 @@ public class UDPSender { } long sendTime = _context.clock().now() - before; _context.statManager().addRateData("udp.socketSendTime", sendTime, packet.getLifetime()); + if (_log.shouldLog(Log.INFO)) + _log.info("Sent the packet " + packet); long throttleTime = afterBW - acquireTime; if (throttleTime > 10) _context.statManager().addRateData("udp.sendBWThrottleTime", throttleTime, acquireTime - packet.getBegin()); diff --git a/router/java/src/net/i2p/router/transport/udp/UDPTransport.java b/router/java/src/net/i2p/router/transport/udp/UDPTransport.java index 1b09bfafe..66b0c0784 100644 --- a/router/java/src/net/i2p/router/transport/udp/UDPTransport.java +++ b/router/java/src/net/i2p/router/transport/udp/UDPTransport.java @@ -469,15 +469,15 @@ public class UDPTransport extends TransportImpl implements TimedWeightedPriority boolean addRemotePeerState(PeerState peer) { if (_log.shouldLog(Log.INFO)) _log.info("Add remote peer state: " + peer); + Hash remotePeer = peer.getRemotePeer(); long oldEstablishedOn = -1; PeerState oldPeer = null; - if (peer.getRemotePeer() != null) { + if (remotePeer != null) { synchronized (_peersByIdent) { - oldPeer = (PeerState)_peersByIdent.put(peer.getRemotePeer(), peer); + oldPeer = (PeerState)_peersByIdent.put(remotePeer, peer); if ( (oldPeer != null) && (oldPeer != peer) ) { - // should we transfer the oldPeer's RTT/RTO/etc? nah - // or perhaps reject the new session? nah, - // using the new one allow easier reconnect + // transfer over the old state/inbound message fragments/etc + peer.loadFrom(oldPeer); oldEstablishedOn = oldPeer.getKeyEstablishedTime(); } } @@ -491,8 +491,8 @@ public class UDPTransport extends TransportImpl implements TimedWeightedPriority synchronized (_peersByRemoteHost) { oldPeer = (PeerState)_peersByRemoteHost.put(remoteId, peer); if ( (oldPeer != null) && (oldPeer != peer) ) { - //_peersByRemoteHost.put(remoteString, oldPeer); - //return false; + // transfer over the old state/inbound message fragments/etc + peer.loadFrom(oldPeer); oldEstablishedOn = oldPeer.getKeyEstablishedTime(); } } @@ -531,13 +531,56 @@ public class UDPTransport extends TransportImpl implements TimedWeightedPriority return super.getCurrentAddress(); } + public void messageReceived(I2NPMessage inMsg, RouterIdentity remoteIdent, Hash remoteIdentHash, long msToReceive, int bytesReceived) { + if (inMsg.getType() == DatabaseStoreMessage.MESSAGE_TYPE) { + DatabaseStoreMessage dsm = (DatabaseStoreMessage)inMsg; + if ( (dsm.getRouterInfo() != null) && + (dsm.getRouterInfo().getNetworkId() != Router.NETWORK_ID) ) { + /* + if (remoteIdentHash != null) { + _context.shitlist().shitlistRouter(remoteIdentHash, "Sent us a peer from the wrong network"); + dropPeer(remoteIdentHash); + if (_log.shouldLog(Log.ERROR)) + _log.error("Dropping the peer " + remoteIdentHash + + " because they are in the wrong net"); + } else if (remoteIdent != null) { + _context.shitlist().shitlistRouter(remoteIdent.calculateHash(), "Sent us a peer from the wrong network"); + dropPeer(remoteIdent.calculateHash()); + if (_log.shouldLog(Log.ERROR)) + _log.error("Dropping the peer " + remoteIdent.calculateHash() + + " because they are in the wrong net"); + } + */ + _context.shitlist().shitlistRouter(dsm.getRouterInfo().getIdentity().calculateHash(), "Part of the wrong network"); + dropPeer(dsm.getRouterInfo().getIdentity().calculateHash()); + if (_log.shouldLog(Log.WARN)) + _log.warn("Dropping the peer " + dsm.getRouterInfo().getIdentity().calculateHash().toBase64() + + " because they are in the wrong net"); + return; + } else { + if (dsm.getRouterInfo() != null) { + if (_log.shouldLog(Log.INFO)) + _log.info("Received an RI from the same net"); + } else { + if (_log.shouldLog(Log.INFO)) + _log.info("Received a leaseSet: " + dsm); + } + } + } else { + if (_log.shouldLog(Log.INFO)) + _log.info("Received another message: " + inMsg.getClass().getName()); + } + super.messageReceived(inMsg, remoteIdent, remoteIdentHash, msToReceive, bytesReceived); + } + + void dropPeer(Hash peer) { PeerState state = getPeerState(peer); if (state != null) dropPeer(state, false); } private void dropPeer(PeerState peer, boolean shouldShitlist) { - if (_log.shouldLog(Log.INFO)) { + if (_log.shouldLog(Log.WARN)) { long now = _context.clock().now(); StringBuffer buf = new StringBuffer(4096); long timeSinceSend = now - peer.getLastSendTime(); @@ -574,7 +617,7 @@ public class UDPTransport extends TransportImpl implements TimedWeightedPriority buf.append("\n"); } } - _log.info(buf.toString(), new Exception("Dropped by")); + _log.warn(buf.toString(), new Exception("Dropped by")); } _introManager.remove(peer); @@ -684,10 +727,12 @@ public class UDPTransport extends TransportImpl implements TimedWeightedPriority long lastSend = peer.getLastSendFullyTime(); long lastRecv = peer.getLastReceiveTime(); long now = _context.clock().now(); + int inboundActive = peer.expireInboundMessages(); if ( (lastSend > 0) && (lastRecv > 0) ) { if ( (now - lastSend > MAX_IDLE_TIME) && (now - lastRecv > MAX_IDLE_TIME) && - (peer.getConsecutiveFailedSends() > 0) ) { + (peer.getConsecutiveFailedSends() > 0) && + (inboundActive <= 0)) { // peer is waaaay idle, drop the con and queue it up as a new con dropPeer(peer, false); msg.timestamp("peer is really idle, dropping con and reestablishing"); @@ -747,6 +792,9 @@ public class UDPTransport extends TransportImpl implements TimedWeightedPriority void rebuildExternalAddress() { rebuildExternalAddress(true); } void rebuildExternalAddress(boolean allowRebuildRouterInfo) { + if (_context.router().isHidden()) + return; + // if the external port is specified, we want to use that to bind to even // if we don't know the external host. String port = _context.getProperty(PROP_EXTERNAL_PORT); @@ -822,6 +870,7 @@ public class UDPTransport extends TransportImpl implements TimedWeightedPriority boolean wantsRebuild = false; if ( (_externalAddress == null) || !(_externalAddress.equals(addr)) ) wantsRebuild = true; + RouterAddress oldAddress = _externalAddress; _externalAddress = addr; if (_log.shouldLog(Log.INFO)) @@ -883,7 +932,7 @@ public class UDPTransport extends TransportImpl implements TimedWeightedPriority return ""; } - private static final int DROP_INACTIVITY_TIME = 10*1000; + private static final int DROP_INACTIVITY_TIME = 60*1000; public void failed(OutboundMessageState msg) { if (msg == null) return; @@ -892,31 +941,76 @@ public class UDPTransport extends TransportImpl implements TimedWeightedPriority ( (msg.getMaxSends() >= OutboundMessageFragments.MAX_VOLLEYS) || (msg.isExpired())) ) { OutNetMessage m = msg.getMessage(); + long recvDelay = _context.clock().now() - msg.getPeer().getLastReceiveTime(); + long sendDelay = _context.clock().now() - msg.getPeer().getLastSendFullyTime(); if (m != null) m.timestamp("message failure - volleys = " + msg.getMaxSends() - + " lastReceived: " + (_context.clock().now() - msg.getPeer().getLastReceiveTime()) - + " lastSentFully: " + (_context.clock().now() - msg.getPeer().getLastSendFullyTime()) + + " lastReceived: " + recvDelay + + " lastSentFully: " + sendDelay + " expired? " + msg.isExpired()); consecutive = msg.getPeer().incrementConsecutiveFailedSends(); if (_log.shouldLog(Log.WARN)) - _log.warn("Consecutive failure #" + consecutive + " sending to " + msg.getPeer()); + _log.warn("Consecutive failure #" + consecutive + + " on " + msg.toString() + + " to " + msg.getPeer()); if ( (consecutive > MAX_CONSECUTIVE_FAILED) && (msg.getPeer().getInactivityTime() > DROP_INACTIVITY_TIME)) dropPeer(msg.getPeer(), false); } - failed(msg.getMessage()); + noteSend(msg, false); + super.afterSend(msg.getMessage(), false); } - public void failed(OutNetMessage msg) { + private void noteSend(OutboundMessageState msg, boolean successful) { + int pushCount = msg.getPushCount(); + int sends = msg.getMaxSends(); + boolean expired = msg.isExpired(); + + OutNetMessage m = msg.getMessage(); + PeerState p = msg.getPeer(); + StringBuffer buf = new StringBuffer(64); + buf.append(" lifetime: ").append(msg.getLifetime()); + buf.append(" sends: ").append(sends); + buf.append(" pushes: ").append(pushCount); + buf.append(" expired? ").append(expired); + buf.append(" unacked: ").append(msg.getUnackedSize()); + if (!successful) { + buf.append(" consec_failed: ").append(p.getConsecutiveFailedSends()); + long timeSinceSend = _context.clock().now() - p.getLastSendFullyTime(); + buf.append(" lastFullSend: ").append(timeSinceSend); + long timeSinceRecv = _context.clock().now() - p.getLastReceiveTime(); + buf.append(" lastRecv: ").append(timeSinceRecv); + buf.append(" xfer: ").append(p.getSendBps()).append("/").append(p.getReceiveBps()); + buf.append(" mtu: ").append(p.getMTU()); + buf.append(" rto: ").append(p.getRTO()); + buf.append(" sent: ").append(p.getMessagesSent()).append("/").append(p.getPacketsTransmitted()); + buf.append(" recv: ").append(p.getMessagesReceived()).append("/").append(p.getPacketsReceived()); + buf.append(" uptime: ").append(_context.clock().now()-p.getKeyEstablishedTime()); + } + if ( (m != null) && (p != null) ) { + _context.messageHistory().sendMessage(m.getMessageType(), msg.getMessageId(), m.getExpiration(), + p.getRemotePeer(), successful, buf.toString()); + } else { + _context.messageHistory().sendMessage("establish", msg.getMessageId(), -1, + (p != null ? p.getRemotePeer() : null), successful, buf.toString()); + } + } + + public void failed(OutNetMessage msg, String reason) { if (msg == null) return; if (_log.shouldLog(Log.WARN)) _log.warn("Sending message failed: " + msg, new Exception("failed from")); + + _context.messageHistory().sendMessage(msg.getMessageType(), msg.getMessageId(), msg.getExpiration(), + msg.getTarget().getIdentity().calculateHash(), false, reason); super.afterSend(msg, false); } - public void succeeded(OutNetMessage msg) { + public void succeeded(OutboundMessageState msg) { if (msg == null) return; if (_log.shouldLog(Log.DEBUG)) _log.debug("Sending message succeeded: " + msg); - super.afterSend(msg, true); + noteSend(msg, true); + if (msg.getMessage() != null) + super.afterSend(msg.getMessage(), true); } public int countActivePeers() { diff --git a/router/java/src/net/i2p/router/tunnel/BatchedPreprocessor.java b/router/java/src/net/i2p/router/tunnel/BatchedPreprocessor.java index 775f7eec5..30822c4a3 100644 --- a/router/java/src/net/i2p/router/tunnel/BatchedPreprocessor.java +++ b/router/java/src/net/i2p/router/tunnel/BatchedPreprocessor.java @@ -68,23 +68,6 @@ public class BatchedPreprocessor extends TrivialPreprocessor { if (_log.shouldLog(Log.DEBUG)) _log.debug("Preprocess queue with " + pending.size() + " to send"); - if (false) { - if (DISABLE_BATCHING || getSendDelay() <= 0) { - if (_log.shouldLog(Log.INFO)) - _log.info("No batching, send all messages immediately"); - while (pending.size() > 0) { - // loops because sends may be partial - TunnelGateway.Pending msg = (TunnelGateway.Pending)pending.get(0); - send(pending, 0, 0, sender, rec); - if (msg.getOffset() >= msg.getData().length) { - notePreprocessing(msg.getMessageId(), msg.getFragmentNumber()); - pending.remove(0); - } - } - return false; - } - } - int batchCount = 0; int beforeLooping = pending.size(); @@ -104,7 +87,7 @@ public class BatchedPreprocessor extends TrivialPreprocessor { msg = (TunnelGateway.Pending)pending.get(i); allocated -= curWanted; if (_log.shouldLog(Log.DEBUG)) - _log.debug("Pushback of " + curWanted + " (message " + (i+1) + ")"); + _log.debug("Pushback of " + curWanted + " (message " + (i+1) + " in " + pending + ")"); } if (_pendingSince > 0) { long waited = _context.clock().now() - _pendingSince; @@ -122,13 +105,13 @@ public class BatchedPreprocessor extends TrivialPreprocessor { if (cur.getOffset() < cur.getData().length) throw new IllegalArgumentException("i=" + i + " j=" + j + " off=" + cur.getOffset() + " len=" + cur.getData().length + " alloc=" + allocated); - notePreprocessing(cur.getMessageId(), cur.getFragmentNumber()); + notePreprocessing(cur.getMessageId(), cur.getFragmentNumber(), cur.getData().length, cur.getMessageIds(), "flushed allocated"); _context.statManager().addRateData("tunnel.writeDelay", cur.getLifetime(), cur.getData().length); } if (msg.getOffset() >= msg.getData().length) { // ok, this last message fit perfectly, remove it too TunnelGateway.Pending cur = (TunnelGateway.Pending)pending.remove(0); - notePreprocessing(cur.getMessageId(), cur.getFragmentNumber()); + notePreprocessing(cur.getMessageId(), cur.getFragmentNumber(), msg.getData().length, msg.getMessageIds(), "flushed tail, remaining: " + pending); _context.statManager().addRateData("tunnel.writeDelay", cur.getLifetime(), cur.getData().length); } if (i > 0) @@ -160,7 +143,7 @@ public class BatchedPreprocessor extends TrivialPreprocessor { TunnelGateway.Pending cur = (TunnelGateway.Pending)pending.get(i); if (cur.getOffset() >= cur.getData().length) { pending.remove(i); - notePreprocessing(cur.getMessageId(), cur.getFragmentNumber()); + notePreprocessing(cur.getMessageId(), cur.getFragmentNumber(), cur.getData().length, cur.getMessageIds(), "flushed remaining"); _context.statManager().addRateData("tunnel.writeDelay", cur.getLifetime(), cur.getData().length); i--; } @@ -234,7 +217,7 @@ public class BatchedPreprocessor extends TrivialPreprocessor { */ protected void send(List pending, int startAt, int sendThrough, TunnelGateway.Sender sender, TunnelGateway.Receiver rec) { if (_log.shouldLog(Log.DEBUG)) - _log.debug("Sending " + startAt + ":" + sendThrough + " out of " + pending.size()); + _log.debug("Sending " + startAt + ":" + sendThrough + " out of " + pending); byte preprocessed[] = _dataCache.acquire().getData(); int offset = 0; @@ -256,7 +239,11 @@ public class BatchedPreprocessor extends TrivialPreprocessor { preprocess(preprocessed, offset); - sender.sendPreprocessed(preprocessed, rec); + long msgId = sender.sendPreprocessed(preprocessed, rec); + for (int i = 0; i < pending.size(); i++) { + TunnelGateway.Pending cur = (TunnelGateway.Pending)pending.get(i); + cur.addMessageId(msgId); + } } /** diff --git a/router/java/src/net/i2p/router/tunnel/BatchedRouterPreprocessor.java b/router/java/src/net/i2p/router/tunnel/BatchedRouterPreprocessor.java index 64c48f3f8..1f72cf47b 100644 --- a/router/java/src/net/i2p/router/tunnel/BatchedRouterPreprocessor.java +++ b/router/java/src/net/i2p/router/tunnel/BatchedRouterPreprocessor.java @@ -1,6 +1,6 @@ package net.i2p.router.tunnel; -import java.util.Properties; +import java.util.*; import net.i2p.router.RouterContext; /** @@ -75,10 +75,10 @@ public class BatchedRouterPreprocessor extends BatchedPreprocessor { return DEFAULT_BATCH_FREQUENCY; } - protected void notePreprocessing(long messageId, int numFragments) { + protected void notePreprocessing(long messageId, int numFragments, int totalLength, List messageIds, String msg) { if (_config != null) - _routerContext.messageHistory().fragmentMessage(messageId, numFragments, _config); + _routerContext.messageHistory().fragmentMessage(messageId, numFragments, totalLength, messageIds, _config, msg); else - _routerContext.messageHistory().fragmentMessage(messageId, numFragments, _hopConfig); + _routerContext.messageHistory().fragmentMessage(messageId, numFragments, totalLength, messageIds, _hopConfig, msg); } } diff --git a/router/java/src/net/i2p/router/tunnel/BuildMessageGenerator.java b/router/java/src/net/i2p/router/tunnel/BuildMessageGenerator.java index a4c3c48eb..49a978eef 100644 --- a/router/java/src/net/i2p/router/tunnel/BuildMessageGenerator.java +++ b/router/java/src/net/i2p/router/tunnel/BuildMessageGenerator.java @@ -12,7 +12,7 @@ import net.i2p.util.Log; */ public class BuildMessageGenerator { // cached, rather than creating lots of temporary Integer objects whenever we build a tunnel - static final Integer ORDER[] = new Integer[TunnelBuildMessage.RECORD_COUNT]; + public static final Integer ORDER[] = new Integer[TunnelBuildMessage.RECORD_COUNT]; static { for (int i = 0; i < ORDER.length; i++) ORDER[i] = new Integer(i); } /** return null if it is unable to find a router's public key (etc) */ @@ -46,34 +46,40 @@ public class BuildMessageGenerator { * containing the hop's configuration (as well as the reply info, if it is an outbound endpoint) */ public void createRecord(int recordNum, int hop, TunnelBuildMessage msg, TunnelCreatorConfig cfg, Hash replyRouter, long replyTunnel, I2PAppContext ctx, PublicKey peerKey) { - Log log = ctx.logManager().getLog(getClass()); - BuildRequestRecord req = null; - if ( (!cfg.isInbound()) && (hop + 1 == cfg.getLength()) ) //outbound endpoint - req = createUnencryptedRecord(ctx, cfg, hop, replyRouter, replyTunnel); - else - req = createUnencryptedRecord(ctx, cfg, hop, null, -1); byte encrypted[] = new byte[TunnelBuildMessage.RECORD_SIZE]; - if (hop < cfg.getLength()) { + Log log = ctx.logManager().getLog(getClass()); + if (peerKey != null) { + BuildRequestRecord req = null; + if ( (!cfg.isInbound()) && (hop + 1 == cfg.getLength()) ) //outbound endpoint + req = createUnencryptedRecord(ctx, cfg, hop, replyRouter, replyTunnel); + else + req = createUnencryptedRecord(ctx, cfg, hop, null, -1); Hash peer = cfg.getPeer(hop); - if (peerKey == null) - throw new RuntimeException("hop = " + hop + " recordNum = " + recordNum + " len = " + cfg.getLength()); - //if (log.shouldLog(Log.DEBUG)) - // log.debug("Record " + recordNum + "/" + hop + ": unencrypted = " + Base64.encode(req.getData().getData())); + if (log.shouldLog(Log.DEBUG)) + log.debug("Record " + recordNum + "/" + hop + "/" + peer.toBase64() + + ": unencrypted = " + Base64.encode(req.getData().getData())); req.encryptRecord(ctx, peerKey, peer, encrypted, 0); //if (log.shouldLog(Log.DEBUG)) // log.debug("Record " + recordNum + "/" + hop + ": encrypted = " + Base64.encode(encrypted)); } else { + if (log.shouldLog(Log.DEBUG)) + log.debug("Record " + recordNum + "/" + hop + "/ is blank/random"); ctx.random().nextBytes(encrypted); } msg.setRecord(recordNum, new ByteArray(encrypted)); } private BuildRequestRecord createUnencryptedRecord(I2PAppContext ctx, TunnelCreatorConfig cfg, int hop, Hash replyRouter, long replyTunnel) { + Log log = ctx.logManager().getLog(BuildMessageGenerator.class); if (hop < cfg.getLength()) { // ok, now lets fill in some data HopConfig hopConfig = cfg.getConfig(hop); Hash peer = cfg.getPeer(hop); - long recvTunnelId = hopConfig.getReceiveTunnel().getTunnelId(); + long recvTunnelId = -1; + if (cfg.isInbound() || (hop > 0)) + recvTunnelId = hopConfig.getReceiveTunnel().getTunnelId(); + else + recvTunnelId = 0; long nextTunnelId = -1; Hash nextPeer = null; if (hop + 1 < cfg.getLength()) { @@ -101,10 +107,22 @@ public class BuildMessageGenerator { boolean isInGW = (cfg.isInbound() && (hop == 0)); boolean isOutEnd = (!cfg.isInbound() && (hop + 1 >= cfg.getLength())); + long nextMsgId = -1; + if (isOutEnd || (cfg.isInbound() && (hop + 2 >= cfg.getLength())) ) { + nextMsgId = cfg.getReplyMessageId(); + } else { + // dont care about these intermediary hops + nextMsgId = ctx.random().nextLong(I2NPMessage.MAX_ID_VALUE); + } + + if (log.shouldLog(Log.DEBUG)) + log.debug("Hop " + hop + " has the next message ID of " + nextMsgId + " for " + cfg + + " with replyKey " + replyKey.toBase64() + " and replyIV " + Base64.encode(iv)); + BuildRequestRecord rec= new BuildRequestRecord(); - rec.createRecord(ctx, recvTunnelId, peer, nextTunnelId, nextPeer, layerKey, ivKey, replyKey, + rec.createRecord(ctx, recvTunnelId, peer, nextTunnelId, nextPeer, nextMsgId, layerKey, ivKey, replyKey, iv, isInGW, isOutEnd); - + return rec; } else { return null; @@ -116,21 +134,49 @@ public class BuildMessageGenerator { * @param order list of hop #s as Integers. For instance, if (order.get(1) is 4), it is peer cfg.getPeer(4) */ public void layeredEncrypt(I2PAppContext ctx, TunnelBuildMessage msg, TunnelCreatorConfig cfg, List order) { + Log log = ctx.logManager().getLog(BuildMessageGenerator.class); // encrypt the records so that the right elements will be visible at the right time for (int i = 0; i < TunnelBuildMessage.RECORD_COUNT; i++) { ByteArray rec = msg.getRecord(i); Integer hopNum = (Integer)order.get(i); int hop = hopNum.intValue(); - if (hop >= cfg.getLength()) - continue; // no need to encrypt it, as its random + if ( (isBlank(cfg, hop)) || (!cfg.isInbound() && hop == 1) ) { + if (log.shouldLog(Log.DEBUG)) + log.debug(msg.getUniqueId() + ": not pre-decrypting record " + i + "/" + hop + " for " + cfg); + continue; + } + if (log.shouldLog(Log.DEBUG)) + log.debug(msg.getUniqueId() + ": pre-decrypting record " + i + "/" + hop + " for " + cfg); // ok, now decrypt the record with all of the reply keys from cfg.getConfig(0) through hop-1 - for (int j = hop-1; j >= 0; j--) { + int stop = (cfg.isInbound() ? 0 : 1); + for (int j = hop-1; j >= stop; j--) { HopConfig hopConfig = cfg.getConfig(j); SessionKey key = hopConfig.getReplyKey(); byte iv[] = hopConfig.getReplyIV().getData(); int off = rec.getOffset(); + if (log.shouldLog(Log.DEBUG)) + log.debug(msg.getUniqueId() + ": pre-decrypting record " + i + "/" + hop + " for " + cfg + + " with " + key.toBase64() + "/" + Base64.encode(iv)); ctx.aes().decrypt(rec.getData(), off, rec.getData(), off, key, iv, TunnelBuildMessage.RECORD_SIZE); } } + if (log.shouldLog(Log.DEBUG)) + log.debug(msg.getUniqueId() + ": done pre-decrypting all records for " + cfg); + } + + public static boolean isBlank(TunnelCreatorConfig cfg, int hop) { + if (cfg.isInbound()) { + if (hop + 1 >= cfg.getLength()) + return true; + else + return false; + } else { + if (hop == 0) + return true; + else if (hop >= cfg.getLength()) + return true; + else + return false; + } } } diff --git a/router/java/src/net/i2p/router/tunnel/BuildMessageProcessor.java b/router/java/src/net/i2p/router/tunnel/BuildMessageProcessor.java index 984f34e6d..6eaa59e3b 100644 --- a/router/java/src/net/i2p/router/tunnel/BuildMessageProcessor.java +++ b/router/java/src/net/i2p/router/tunnel/BuildMessageProcessor.java @@ -4,6 +4,7 @@ import net.i2p.I2PAppContext; import net.i2p.data.*; import net.i2p.data.i2np.*; import net.i2p.router.RouterContext; +import net.i2p.util.DecayingBloomFilter; import net.i2p.util.Log; /** @@ -12,7 +13,13 @@ import net.i2p.util.Log; * encrypt the reply record, and return a TunnelBuildMessage to forward on to * the next hop */ -class BuildMessageProcessor { +public class BuildMessageProcessor { + private DecayingBloomFilter _filter; + + public BuildMessageProcessor(I2PAppContext ctx) { + _filter = new DecayingBloomFilter(ctx, 60*1000, 32); + ctx.statManager().createRateStat("tunnel.buildRequestDup", "How frequently we get dup build request messages", "Tunnels", new long[] { 60*1000, 10*60*1000 }); + } /** * Decrypt the record targetting us, encrypting all of the other records with the included * reply key and IV. The original, encrypted record targetting us is removed from the request @@ -30,17 +37,31 @@ class BuildMessageProcessor { int off = rec.getOffset(); int len = BuildRequestRecord.PEER_SIZE; if (DataHelper.eq(ourHash.getData(), 0, rec.getData(), off, len)) { + boolean isDup = _filter.add(rec.getData(), off + len, 32); + if (isDup) { + if (log.shouldLog(Log.WARN)) + log.debug(msg.getUniqueId() + ": A record matching our hash was found, but it seems to be a duplicate"); + ctx.statManager().addRateData("tunnel.buildRequestDup", 1, 0); + return null; + } BuildRequestRecord req = new BuildRequestRecord(); boolean ok = req.decryptRecord(ctx, privKey, ourHash, rec); - if (ok) + if (ok) { + if (log.shouldLog(Log.DEBUG)) + log.debug(msg.getUniqueId() + ": A record matching our hash was found and decrypted"); rv = req; - else + } else { + if (log.shouldLog(Log.DEBUG)) + log.debug(msg.getUniqueId() + ": A record matching our hash was found, but could not be decrypted"); return null; // our hop is invalid? b0rkage + } ourHop = i; } } if (rv == null) { // none of the records matched, b0rk + if (log.shouldLog(Log.DEBUG)) + log.debug(msg.getUniqueId() + ": No records matching our hash was found"); return null; } SessionKey replyKey = rv.readReplyKey(); @@ -48,9 +69,9 @@ class BuildMessageProcessor { int ivOff = 0; for (int i = 0; i < TunnelBuildMessage.RECORD_COUNT; i++) { if (i != ourHop) { - if (log.shouldLog(Log.DEBUG)) - log.debug("Encrypting record " + i + "/? with replyKey " + replyKey.toBase64() + "/" + Base64.encode(iv, ivOff, 16)); ByteArray data = msg.getRecord(i); + if (log.shouldLog(Log.DEBUG)) + log.debug("Encrypting record " + i + "/?/" + data.getOffset() + "/" + data.getValid() + " with replyKey " + replyKey.toBase64() + "/" + Base64.encode(iv, ivOff, 16)); ctx.aes().encrypt(data.getData(), data.getOffset(), data.getData(), data.getOffset(), replyKey, iv, ivOff, data.getValid()); } diff --git a/router/java/src/net/i2p/router/tunnel/BuildMessageTest.java b/router/java/src/net/i2p/router/tunnel/BuildMessageTest.java index 60f26757a..ec8140554 100644 --- a/router/java/src/net/i2p/router/tunnel/BuildMessageTest.java +++ b/router/java/src/net/i2p/router/tunnel/BuildMessageTest.java @@ -57,8 +57,8 @@ public class BuildMessageTest { // now msg is fully encrypted, so lets go through the hops, decrypting and replying // as necessary + BuildMessageProcessor proc = new BuildMessageProcessor(ctx); for (int i = 0; i < cfg.getLength(); i++) { - BuildMessageProcessor proc = new BuildMessageProcessor(); // this not only decrypts the current hop's record, but encrypts the other records // with the reply key BuildRequestRecord req = proc.decrypt(ctx, msg, _peers[i], _privKeys[i]); @@ -77,7 +77,7 @@ public class BuildMessageTest { int ourSlot = -1; BuildResponseRecord resp = new BuildResponseRecord(); - byte reply[] = resp.create(ctx, 0, req.readReplyKey(), req.readReplyIV()); + byte reply[] = resp.create(ctx, 0, req.readReplyKey(), req.readReplyIV(), -1); for (int j = 0; j < TunnelBuildMessage.RECORD_COUNT; j++) { if (msg.getRecord(j) == null) { ourSlot = j; diff --git a/router/java/src/net/i2p/router/tunnel/BuildReplyHandler.java b/router/java/src/net/i2p/router/tunnel/BuildReplyHandler.java index 0e6c7bc4b..7627f8069 100644 --- a/router/java/src/net/i2p/router/tunnel/BuildReplyHandler.java +++ b/router/java/src/net/i2p/router/tunnel/BuildReplyHandler.java @@ -21,12 +21,27 @@ public class BuildReplyHandler { * always have 0 as their value */ public int[] decrypt(I2PAppContext ctx, TunnelBuildReplyMessage reply, TunnelCreatorConfig cfg, List recordOrder) { + Log log = ctx.logManager().getLog(getClass()); int rv[] = new int[TunnelBuildReplyMessage.RECORD_COUNT]; for (int i = 0; i < rv.length; i++) { int hop = ((Integer)recordOrder.get(i)).intValue(); - int ok = decryptRecord(ctx, reply, cfg, i, hop); - if (ok == -1) return null; - rv[i] = ok; + if (BuildMessageGenerator.isBlank(cfg, hop)) { + // self... + if (log.shouldLog(Log.DEBUG)) + log.debug(reply.getUniqueId() + ": no need to decrypt record " + i + "/" + hop + ", as its out of range: " + cfg); + rv[i] = 0; + } else { + int ok = decryptRecord(ctx, reply, cfg, i, hop); + if (ok == -1) { + if (log.shouldLog(Log.WARN)) + log.warn(reply.getUniqueId() + ": decrypt record " + i + "/" + hop + " was not ok: " + cfg); + return null; + } else { + if (log.shouldLog(Log.DEBUG)) + log.debug(reply.getUniqueId() + ": decrypt record " + i + "/" + hop + " was ok: " + ok + " for " + cfg); + } + rv[i] = ok; + } } return rv; } @@ -38,35 +53,47 @@ public class BuildReplyHandler { */ private int decryptRecord(I2PAppContext ctx, TunnelBuildReplyMessage reply, TunnelCreatorConfig cfg, int recordNum, int hop) { Log log = ctx.logManager().getLog(getClass()); - if (hop >= cfg.getLength()) { + if (BuildMessageGenerator.isBlank(cfg, hop)) { if (log.shouldLog(Log.DEBUG)) - log.debug("Record " + recordNum + "/" + hop + " is fake, so consider it valid..."); + log.debug(reply.getUniqueId() + ": Record " + recordNum + "/" + hop + " is fake, so consider it valid..."); return 0; } ByteArray rec = reply.getRecord(recordNum); int off = rec.getOffset(); - for (int j = cfg.getLength() - 1; j >= hop; j--) { + int start = cfg.getLength() - 1; + if (cfg.isInbound()) + start--; // the last hop in an inbound tunnel response doesn't actually encrypt + // do we need to adjust this for the endpoint? + for (int j = start; j >= hop; j--) { HopConfig hopConfig = cfg.getConfig(j); SessionKey replyKey = hopConfig.getReplyKey(); byte replyIV[] = hopConfig.getReplyIV().getData(); int replyIVOff = hopConfig.getReplyIV().getOffset(); if (log.shouldLog(Log.DEBUG)) - log.debug("Decrypting record " + recordNum + "/" + hop + " with replyKey " + replyKey.toBase64() + "/" + Base64.encode(replyIV, replyIVOff, 16)); + log.debug(reply.getUniqueId() + ": Decrypting record " + recordNum + "/" + hop + "/" + j + " with replyKey " + + replyKey.toBase64() + "/" + Base64.encode(replyIV, replyIVOff, 16) + ": " + cfg); + if (log.shouldLog(Log.DEBUG)) + log.debug(reply.getUniqueId() + ": before decrypt("+ off + "-"+(off+rec.getValid())+"): " + Base64.encode(rec.getData(), off, rec.getValid())); + + if (log.shouldLog(Log.DEBUG)) + log.debug(reply.getUniqueId() + ": Full reply rec: offset=" + off + ", sz=" + rec.getData().length + "/" + rec.getValid() + ", data=" + Base64.encode(rec.getData(), off, TunnelBuildReplyMessage.RECORD_SIZE)); ctx.aes().decrypt(rec.getData(), off, rec.getData(), off, replyKey, replyIV, replyIVOff, TunnelBuildReplyMessage.RECORD_SIZE); + if (log.shouldLog(Log.DEBUG)) + log.debug(reply.getUniqueId() + ": after decrypt: " + Base64.encode(rec.getData(), off, rec.getValid())); } // ok, all of the layered encryption is stripped, so lets verify it // (formatted per BuildResponseRecord.create) Hash h = ctx.sha().calculateHash(rec.getData(), off + Hash.HASH_LENGTH, TunnelBuildReplyMessage.RECORD_SIZE-Hash.HASH_LENGTH); if (!DataHelper.eq(h.getData(), 0, rec.getData(), off, Hash.HASH_LENGTH)) { if (log.shouldLog(Log.DEBUG)) - log.debug("Failed verification on " + recordNum + "/" + hop + ": " + h.toBase64() + " calculated, " + + log.debug(reply.getUniqueId() + ": Failed verification on " + recordNum + "/" + hop + ": " + h.toBase64() + " calculated, " + Base64.encode(rec.getData(), off, Hash.HASH_LENGTH) + " expected\n" + - "Record: " + Base64.encode(rec.getData())); + "Record: " + Base64.encode(rec.getData(), off+Hash.HASH_LENGTH, TunnelBuildReplyMessage.RECORD_SIZE-Hash.HASH_LENGTH)); return -1; } else { int rv = (int)DataHelper.fromLong(rec.getData(), off + TunnelBuildReplyMessage.RECORD_SIZE - 1, 1); if (log.shouldLog(Log.DEBUG)) - log.debug("Verified: " + rv + " for record " + recordNum + "/" + hop); + log.debug(reply.getUniqueId() + ": Verified: " + rv + " for record " + recordNum + "/" + hop); return rv; } } diff --git a/router/java/src/net/i2p/router/tunnel/FragmentHandler.java b/router/java/src/net/i2p/router/tunnel/FragmentHandler.java index e265cf643..32128d059 100644 --- a/router/java/src/net/i2p/router/tunnel/FragmentHandler.java +++ b/router/java/src/net/i2p/router/tunnel/FragmentHandler.java @@ -360,6 +360,8 @@ public class FragmentHandler { private void receiveComplete(FragmentedMessage msg) { _completed++; try { + int fragmentCount = msg.getFragmentCount(); + // toByteArray destroys the contents of the message completely byte data[] = msg.toByteArray(); if (msg == null) return; @@ -367,6 +369,7 @@ public class FragmentHandler { _log.debug("RECV(" + data.length + "): " + Base64.encode(data) + " " + _context.sha().calculateHash(data).toBase64()); I2NPMessage m = new I2NPMessageHandler(_context).readMessage(data); + noteReception(m.getUniqueId(), fragmentCount-1, "complete: ");// + msg.toString()); noteCompletion(m.getUniqueId()); _receiver.receiveComplete(m, msg.getTargetRouter(), msg.getTargetTunnel()); } catch (IOException ioe) { diff --git a/router/java/src/net/i2p/router/tunnel/HopProcessor.java b/router/java/src/net/i2p/router/tunnel/HopProcessor.java index 6a3201e0c..f758b502d 100644 --- a/router/java/src/net/i2p/router/tunnel/HopProcessor.java +++ b/router/java/src/net/i2p/router/tunnel/HopProcessor.java @@ -74,8 +74,8 @@ public class HopProcessor { boolean okIV = _validator.receiveIV(orig, offset, orig, offset + IV_LENGTH); if (!okIV) { - if (_log.shouldLog(Log.WARN)) - _log.warn("Invalid IV received on tunnel " + _config.getReceiveTunnelId()); + if (_log.shouldLog(Log.ERROR)) + _log.error("Invalid IV received on tunnel " + _config.getReceiveTunnelId()); return false; } diff --git a/router/java/src/net/i2p/router/tunnel/InboundGatewayReceiver.java b/router/java/src/net/i2p/router/tunnel/InboundGatewayReceiver.java index d0be132f4..8c56379d0 100644 --- a/router/java/src/net/i2p/router/tunnel/InboundGatewayReceiver.java +++ b/router/java/src/net/i2p/router/tunnel/InboundGatewayReceiver.java @@ -18,10 +18,10 @@ public class InboundGatewayReceiver implements TunnelGateway.Receiver { _context = ctx; _config = cfg; } - public void receiveEncrypted(byte[] encrypted) { - receiveEncrypted(encrypted, false); + public long receiveEncrypted(byte[] encrypted) { + return receiveEncrypted(encrypted, false); } - public void receiveEncrypted(byte[] encrypted, boolean alreadySearched) { + public long receiveEncrypted(byte[] encrypted, boolean alreadySearched) { if (_target == null) { _target = _context.netDb().lookupRouterInfoLocally(_config.getSendTo()); if (_target == null) { @@ -29,7 +29,7 @@ public class InboundGatewayReceiver implements TunnelGateway.Receiver { if (!alreadySearched) j = new ReceiveJob(_context, encrypted); _context.netDb().lookupRouterInfo(_config.getSendTo(), j, j, 5*1000); - return; + return -1; } } @@ -43,6 +43,7 @@ public class InboundGatewayReceiver implements TunnelGateway.Receiver { out.setExpiration(msg.getMessageExpiration()); out.setPriority(400); _context.outNetMessagePool().add(out); + return msg.getUniqueId(); } private class ReceiveJob extends JobImpl { diff --git a/router/java/src/net/i2p/router/tunnel/InboundMessageDistributor.java b/router/java/src/net/i2p/router/tunnel/InboundMessageDistributor.java index 8bfa60a11..45db9536b 100644 --- a/router/java/src/net/i2p/router/tunnel/InboundMessageDistributor.java +++ b/router/java/src/net/i2p/router/tunnel/InboundMessageDistributor.java @@ -10,6 +10,7 @@ import net.i2p.data.i2np.DeliveryInstructions; import net.i2p.data.i2np.I2NPMessage; import net.i2p.data.i2np.GarlicMessage; import net.i2p.data.i2np.TunnelGatewayMessage; +import net.i2p.data.i2np.TunnelBuildReplyMessage; import net.i2p.router.ClientMessage; import net.i2p.router.RouterContext; import net.i2p.router.TunnelInfo; @@ -54,7 +55,8 @@ public class InboundMessageDistributor implements GarlicMessageReceiver.CloveRec if ( (_client != null) && (msg.getType() != DeliveryStatusMessage.MESSAGE_TYPE) && - (msg.getType() != GarlicMessage.MESSAGE_TYPE) ) { + (msg.getType() != GarlicMessage.MESSAGE_TYPE) && + (msg.getType() != TunnelBuildReplyMessage.MESSAGE_TYPE)) { // drop it, since we should only get tunnel test messages and garlic messages down // client tunnels _context.statManager().addRateData("tunnel.dropDangerousClientTunnelMessage", 1, msg.getType()); @@ -156,7 +158,8 @@ public class InboundMessageDistributor implements GarlicMessageReceiver.CloveRec // a data message targetting the local router is how we send load tests (real // data messages target destinations) _context.statManager().addRateData("tunnel.handleLoadClove", 1, 0); - _context.inNetMessagePool().add(data, null, null); + data = null; + //_context.inNetMessagePool().add(data, null, null); } else { if ( (_client != null) && (data.getType() != DeliveryStatusMessage.MESSAGE_TYPE) ) { // drop it, since the data we receive shouldn't include other stuff, diff --git a/router/java/src/net/i2p/router/tunnel/InboundSender.java b/router/java/src/net/i2p/router/tunnel/InboundSender.java index 59dd39dee..1fd1b915d 100644 --- a/router/java/src/net/i2p/router/tunnel/InboundSender.java +++ b/router/java/src/net/i2p/router/tunnel/InboundSender.java @@ -21,9 +21,9 @@ public class InboundSender implements TunnelGateway.Sender { _processor = new InboundGatewayProcessor(_context, config); } - public void sendPreprocessed(byte[] preprocessed, TunnelGateway.Receiver receiver) { + public long sendPreprocessed(byte[] preprocessed, TunnelGateway.Receiver receiver) { if (USE_ENCRYPTION) _processor.process(preprocessed, 0, preprocessed.length); - receiver.receiveEncrypted(preprocessed); + return receiver.receiveEncrypted(preprocessed); } } diff --git a/router/java/src/net/i2p/router/tunnel/OutboundReceiver.java b/router/java/src/net/i2p/router/tunnel/OutboundReceiver.java index 08d8733ab..f3b53eb4b 100644 --- a/router/java/src/net/i2p/router/tunnel/OutboundReceiver.java +++ b/router/java/src/net/i2p/router/tunnel/OutboundReceiver.java @@ -25,7 +25,7 @@ class OutboundReceiver implements TunnelGateway.Receiver { _nextHopCache = _context.netDb().lookupRouterInfoLocally(_config.getPeer(1)); } - public void receiveEncrypted(byte encrypted[]) { + public long receiveEncrypted(byte encrypted[]) { TunnelDataMessage msg = new TunnelDataMessage(_context); msg.setData(encrypted); msg.setTunnelId(_config.getConfig(0).getSendTunnel()); @@ -38,11 +38,13 @@ class OutboundReceiver implements TunnelGateway.Receiver { if (ri != null) { _nextHopCache = ri; send(msg, ri); + return msg.getUniqueId(); } else { if (_log.shouldLog(Log.DEBUG)) _log.debug("lookup of " + _config.getPeer(1).toBase64().substring(0,4) + " required for " + msg); _context.netDb().lookupRouterInfo(_config.getPeer(1), new SendJob(_context, msg), new FailedJob(_context), 10*1000); + return -1; } } diff --git a/router/java/src/net/i2p/router/tunnel/OutboundSender.java b/router/java/src/net/i2p/router/tunnel/OutboundSender.java index 145df3656..e5e1bfb42 100644 --- a/router/java/src/net/i2p/router/tunnel/OutboundSender.java +++ b/router/java/src/net/i2p/router/tunnel/OutboundSender.java @@ -24,15 +24,16 @@ public class OutboundSender implements TunnelGateway.Sender { _processor = new OutboundGatewayProcessor(_context, config); } - public void sendPreprocessed(byte[] preprocessed, TunnelGateway.Receiver receiver) { + public long sendPreprocessed(byte[] preprocessed, TunnelGateway.Receiver receiver) { if (_log.shouldLog(Log.DEBUG)) _log.debug("preprocessed data going out " + _config + ": " + Base64.encode(preprocessed)); if (USE_ENCRYPTION) _processor.process(preprocessed, 0, preprocessed.length); if (_log.shouldLog(Log.DEBUG)) _log.debug("after wrapping up the preprocessed data on " + _config); - receiver.receiveEncrypted(preprocessed); + long rv = receiver.receiveEncrypted(preprocessed); if (_log.shouldLog(Log.DEBUG)) _log.debug("after receiving on " + _config + ": receiver = " + receiver); + return rv; } } diff --git a/router/java/src/net/i2p/router/tunnel/TrivialPreprocessor.java b/router/java/src/net/i2p/router/tunnel/TrivialPreprocessor.java index d1e3579e8..c5a3ecf01 100644 --- a/router/java/src/net/i2p/router/tunnel/TrivialPreprocessor.java +++ b/router/java/src/net/i2p/router/tunnel/TrivialPreprocessor.java @@ -49,14 +49,20 @@ public class TrivialPreprocessor implements TunnelGateway.QueuePreprocessor { if (_log.shouldLog(Log.DEBUG)) _log.debug("Preprocessed: fragment " + i + "/" + (preprocessed.length-1) + " in " + msg.getMessageId() + ": " + Base64.encode(preprocessed[i])); - sender.sendPreprocessed(preprocessed[i], rec); + long id = sender.sendPreprocessed(preprocessed[i], rec); + msg.addMessageId(id); + } + notePreprocessing(msg.getMessageId(), msg.getFragmentNumber(), preprocessed.length, msg.getMessageIds(), null); + if (preprocessed.length != msg.getFragmentNumber() + 1) { + throw new RuntimeException("wtf, preprocessed " + msg.getMessageId() + " into " + + msg.getFragmentNumber() + "/" + preprocessed.length + " fragments, size = " + + msg.getData().length); } - notePreprocessing(msg.getMessageId(), preprocessed.length); } return false; } - protected void notePreprocessing(long messageId, int numFragments) {} + protected void notePreprocessing(long messageId, int numFragments, int totalLength, List messageIds, String msg) {} private byte[][] preprocess(TunnelGateway.Pending msg) { List fragments = new ArrayList(1); diff --git a/router/java/src/net/i2p/router/tunnel/TrivialRouterPreprocessor.java b/router/java/src/net/i2p/router/tunnel/TrivialRouterPreprocessor.java index 5c8e76b40..62106b531 100644 --- a/router/java/src/net/i2p/router/tunnel/TrivialRouterPreprocessor.java +++ b/router/java/src/net/i2p/router/tunnel/TrivialRouterPreprocessor.java @@ -1,5 +1,6 @@ package net.i2p.router.tunnel; +import java.util.List; import net.i2p.router.RouterContext; /** @@ -14,7 +15,7 @@ public class TrivialRouterPreprocessor extends TrivialPreprocessor { _routerContext = ctx; } - protected void notePreprocessing(long messageId, int numFragments) { - _routerContext.messageHistory().fragmentMessage(messageId, numFragments); + protected void notePreprocessing(long messageId, int numFragments, int totalLength, List messageIds) { + _routerContext.messageHistory().fragmentMessage(messageId, numFragments, totalLength, messageIds, null); } } diff --git a/router/java/src/net/i2p/router/tunnel/TunnelCreatorConfig.java b/router/java/src/net/i2p/router/tunnel/TunnelCreatorConfig.java index 715653fb6..e7f47660c 100644 --- a/router/java/src/net/i2p/router/tunnel/TunnelCreatorConfig.java +++ b/router/java/src/net/i2p/router/tunnel/TunnelCreatorConfig.java @@ -1,8 +1,6 @@ package net.i2p.router.tunnel; -import java.util.Date; -import java.util.Locale; -import java.util.Properties; +import java.util.*; import java.text.SimpleDateFormat; import net.i2p.data.Base64; @@ -25,6 +23,8 @@ public class TunnelCreatorConfig implements TunnelInfo { /** gateway first */ private Hash _peers[]; private long _expiration; + private List _order; + private long _replyMessageId; private boolean _isInbound; private long _messagesProcessed; private volatile long _verifiedBytesTransferred; @@ -83,6 +83,13 @@ public class TunnelCreatorConfig implements TunnelInfo { public long getExpiration() { return _expiration; } public void setExpiration(long when) { _expiration = when; } + /** component ordering in the new style request */ + public List getReplyOrder() { return _order; } + public void setReplyOrder(List order) { _order = order; } + /** new style reply message id */ + public long getReplyMessageId() { return _replyMessageId; } + public void setReplyMessageId(long id) { _replyMessageId = id; } + public void testSuccessful(int ms) {} /** take note of a message being pumped through this tunnel */ @@ -158,6 +165,9 @@ public class TunnelCreatorConfig implements TunnelInfo { buf.append(" expiring on ").append(getExpirationString()); if (_destination != null) buf.append(" for ").append(Base64.encode(_destination.getData(), 0, 3)); + if (_replyMessageId > 0) + buf.append(" replyMessageId ").append(_replyMessageId); + buf.append(" with ").append(_messagesProcessed).append("/").append(_verifiedBytesTransferred).append(" msgs/bytes"); return buf.toString(); } diff --git a/router/java/src/net/i2p/router/tunnel/TunnelDispatcher.java b/router/java/src/net/i2p/router/tunnel/TunnelDispatcher.java index e123776ff..efa6dce21 100644 --- a/router/java/src/net/i2p/router/tunnel/TunnelDispatcher.java +++ b/router/java/src/net/i2p/router/tunnel/TunnelDispatcher.java @@ -427,7 +427,7 @@ public class TunnelDispatcher implements Service { + " messageId " + msg.getUniqueId() + "/" + msg.getMessage().getUniqueId() + " messageType: " + msg.getMessage().getClass().getName() - + " existing = " + _inboundGateways.size()); + + " existing = " + _inboundGateways.size(), new Exception("source")); } long dispatchTime = _context.clock().now() - before; @@ -494,7 +494,7 @@ public class TunnelDispatcher implements Service { int level = (_context.router().getUptime() > 10*60*1000 ? Log.ERROR : Log.WARN); if (_log.shouldLog(level)) _log.log(level, "no matching outbound tunnel for id=" + outboundTunnel - + ": existing = " + _outboundGateways.size()); + + ": existing = " + _outboundGateways.size(), new Exception("src")); } long dispatchTime = _context.clock().now() - before; diff --git a/router/java/src/net/i2p/router/tunnel/TunnelGateway.java b/router/java/src/net/i2p/router/tunnel/TunnelGateway.java index 6e343e5e8..0646ce870 100644 --- a/router/java/src/net/i2p/router/tunnel/TunnelGateway.java +++ b/router/java/src/net/i2p/router/tunnel/TunnelGateway.java @@ -98,6 +98,8 @@ public class TunnelGateway { synchronized (_queue) { _queue.add(cur); afterAdded = _context.clock().now(); + if (_log.shouldLog(Log.DEBUG)) + _log.debug("Added before direct flush preprocessing: " + _queue); delayedFlush = _preprocessor.preprocessQueue(_queue, _sender, _receiver); if (delayedFlush) delayAmount = _preprocessor.getDelayAmount(); @@ -107,13 +109,15 @@ public class TunnelGateway { for (int i = 0; i < _queue.size(); i++) { Pending m = (Pending)_queue.get(i); if (m.getExpiration() + Router.CLOCK_FUDGE_FACTOR < _lastFlush) { - if (_log.shouldLog(Log.ERROR)) - _log.error("Expire on the queue (size=" + _queue.size() + "): " + m); + if (_log.shouldLog(Log.DEBUG)) + _log.debug("Expire on the queue (size=" + _queue.size() + "): " + m); _queue.remove(i); i--; } } remaining = _queue.size(); + if ( (remaining > 0) && (_log.shouldLog(Log.DEBUG)) ) + _log.debug("Remaining after preprocessing: " + _queue); } if (delayedFlush) { @@ -130,8 +134,9 @@ public class TunnelGateway { * it, and pass it on to the receiver * * @param preprocessed IV + (rand padding) + 0x0 + Hash[0:3] + {instruction+fragment}* + * @return message ID it was sent in, or -1 if it was deferred */ - public void sendPreprocessed(byte preprocessed[], Receiver receiver); + public long sendPreprocessed(byte preprocessed[], Receiver receiver); } public interface QueuePreprocessor { @@ -150,8 +155,9 @@ public class TunnelGateway { public interface Receiver { /** * Take the encrypted data and send it off to the next hop + * @return message ID it was sent in, or -1 if it had to be deferred */ - public void receiveEncrypted(byte encrypted[]); + public long receiveEncrypted(byte encrypted[]); } public static class Pending { @@ -163,6 +169,7 @@ public class TunnelGateway { protected int _offset; protected int _fragmentNumber; protected long _created; + private List _messageIds; public Pending(I2NPMessage message, Hash toRouter, TunnelId toTunnel) { this(message, toRouter, toTunnel, System.currentTimeMillis()); @@ -176,6 +183,7 @@ public class TunnelGateway { _offset = 0; _fragmentNumber = 0; _created = now; + _messageIds = null; } /** may be null */ public Hash getToRouter() { return _toRouter; } @@ -194,6 +202,21 @@ public class TunnelGateway { public int getFragmentNumber() { return _fragmentNumber; } /** ok, fragment sent, increment what the next will be */ public void incrementFragmentNumber() { _fragmentNumber++; } + public void addMessageId(long id) { + synchronized (Pending.this) { + if (_messageIds == null) + _messageIds = new ArrayList(); + _messageIds.add(new Long(id)); + } + } + public List getMessageIds() { + synchronized (Pending.this) { + if (_messageIds != null) + return new ArrayList(_messageIds); + else + return new ArrayList(); + } + } } private class PendingImpl extends Pending { public PendingImpl(I2NPMessage message, Hash toRouter, TunnelId toTunnel) { @@ -238,9 +261,13 @@ public class TunnelGateway { System.out.println("foo!"); afterChecked = _context.clock().now(); if (_queue.size() > 0) { + if ( (remaining > 0) && (_log.shouldLog(Log.DEBUG)) ) + _log.debug("Remaining before delayed flush preprocessing: " + _queue); wantRequeue = _preprocessor.preprocessQueue(_queue, _sender, _receiver); if (wantRequeue) delayAmount = _preprocessor.getDelayAmount(); + if (_log.shouldLog(Log.DEBUG)) + _log.debug("Remaining after delayed flush preprocessing (requeue? " + wantRequeue + "): " + _queue); } remaining = _queue.size(); } diff --git a/router/java/src/net/i2p/router/tunnel/TunnelParticipant.java b/router/java/src/net/i2p/router/tunnel/TunnelParticipant.java index 0dad70c92..6f27e561f 100644 --- a/router/java/src/net/i2p/router/tunnel/TunnelParticipant.java +++ b/router/java/src/net/i2p/router/tunnel/TunnelParticipant.java @@ -68,8 +68,8 @@ public class TunnelParticipant { ok = _inboundEndpointProcessor.retrievePreprocessedData(msg.getData(), 0, msg.getData().length, recvFrom); if (!ok) { - if (_log.shouldLog(Log.WARN)) - _log.warn("Failed to dispatch " + msg + ": processor=" + _processor + if (_log.shouldLog(Log.ERROR)) + _log.error("Failed to dispatch " + msg + ": processor=" + _processor + " inboundEndpoint=" + _inboundEndpointProcessor); return; } @@ -147,7 +147,10 @@ public class TunnelParticipant { } private void send(HopConfig config, TunnelDataMessage msg, RouterInfo ri) { - msg.setUniqueId(_context.random().nextLong(I2NPMessage.MAX_ID_VALUE)); + long oldId = msg.getUniqueId(); + long newId = _context.random().nextLong(I2NPMessage.MAX_ID_VALUE); + _context.messageHistory().wrap("TunnelDataMessage", oldId, "TunnelDataMessage", newId); + msg.setUniqueId(newId); msg.setMessageExpiration(_context.clock().now() + 10*1000); OutNetMessage m = new OutNetMessage(_context); msg.setTunnelId(config.getSendTunnel()); diff --git a/router/java/src/net/i2p/router/tunnel/pool/BuildExecutor.java b/router/java/src/net/i2p/router/tunnel/pool/BuildExecutor.java index 876d2160e..79683925e 100644 --- a/router/java/src/net/i2p/router/tunnel/pool/BuildExecutor.java +++ b/router/java/src/net/i2p/router/tunnel/pool/BuildExecutor.java @@ -1,11 +1,11 @@ package net.i2p.router.tunnel.pool; import java.util.*; -import net.i2p.router.Job; -import net.i2p.router.JobImpl; -import net.i2p.router.RouterContext; -import net.i2p.router.TunnelManagerFacade; -import net.i2p.router.tunnel.TunnelCreatorConfig; +import net.i2p.data.*; +import net.i2p.data.i2np.*; +import net.i2p.router.*; +import net.i2p.router.tunnel.*; +import net.i2p.router.peermanager.TunnelHistory; import net.i2p.util.Log; /** @@ -23,7 +23,9 @@ class BuildExecutor implements Runnable { /** list of TunnelCreatorConfig elements of tunnels currently being built */ private List _currentlyBuilding; private boolean _isRunning; - + private BuildHandler _handler; + private boolean _repoll; + public BuildExecutor(RouterContext ctx, TunnelPoolManager mgr) { _context = ctx; _log = ctx.logManager().getLog(getClass()); @@ -31,6 +33,16 @@ class BuildExecutor implements Runnable { _currentlyBuilding = new ArrayList(10); _context.statManager().createRateStat("tunnel.concurrentBuilds", "How many builds are going at once", "Tunnels", new long[] { 60*1000, 5*60*1000, 60*60*1000 }); _context.statManager().createRateStat("tunnel.concurrentBuildsLagged", "How many builds are going at once when we reject further builds, due to job lag (period is lag)", "Tunnels", new long[] { 60*1000, 5*60*1000, 60*60*1000 }); + _context.statManager().createRateStat("tunnel.buildExploratoryExpire", "How often an exploratory tunnel times out during creation", "Tunnels", new long[] { 60*1000, 10*60*1000 }); + _context.statManager().createRateStat("tunnel.buildClientExpire", "How often a client tunnel times out during creation", "Tunnels", new long[] { 60*1000, 10*60*1000 }); + _context.statManager().createRateStat("tunnel.buildExploratorySuccess", "How often an exploratory tunnel is fully built", "Tunnels", new long[] { 60*1000, 10*60*1000 }); + _context.statManager().createRateStat("tunnel.buildClientSuccess", "How often a client tunnel is fully built", "Tunnels", new long[] { 60*1000, 10*60*1000 }); + _context.statManager().createRateStat("tunnel.buildExploratoryReject", "How often an exploratory tunnel is rejected", "Tunnels", new long[] { 60*1000, 10*60*1000 }); + _context.statManager().createRateStat("tunnel.buildClientReject", "How often a client tunnel is rejected", "Tunnels", new long[] { 60*1000, 10*60*1000 }); + _context.statManager().createRateStat("tunnel.buildRequestTime", "How long it takes to build a tunnel request", "Tunnels", new long[] { 60*1000, 10*60*1000 }); + _context.statManager().createRateStat("tunnel.buildRequestZeroHopTime", "How long it takes to build a zero hop tunnel", "Tunnels", new long[] { 60*1000, 10*60*1000 }); + _repoll = false; + _handler = new BuildHandler(ctx, this); } private int allowed() { @@ -44,13 +56,44 @@ class BuildExecutor implements Runnable { if (prop != null) try { allowed = Integer.valueOf(prop).intValue(); } catch (NumberFormatException nfe) {} + List expired = null; int concurrent = 0; + long expireBefore = _context.clock().now() + 10*60*1000 - BuildRequestor.REQUEST_TIMEOUT; synchronized (_currentlyBuilding) { + // expire any old requests + for (int i = 0; i < _currentlyBuilding.size(); i++) { + TunnelCreatorConfig cfg = (TunnelCreatorConfig)_currentlyBuilding.get(i); + if (cfg.getExpiration() <= expireBefore) { + _currentlyBuilding.remove(i); + i--; + if (expired == null) + expired = new ArrayList(); + expired.add(cfg); + } + } concurrent = _currentlyBuilding.size(); allowed -= concurrent; if (buf != null) buf.append(allowed).append(" ").append(_currentlyBuilding.toString()); } + + if (expired != null) { + for (int i = 0; i < expired.size(); i++) { + PooledTunnelCreatorConfig cfg = (PooledTunnelCreatorConfig)expired.get(i); + // note the fact that this tunnel request timed out in the peers' profiles. + // or... not. + if (_log.shouldLog(Log.ERROR)) + _log.error("Timed out waiting for reply asking for " + cfg); + TunnelPool pool = cfg.getTunnelPool(); + if (pool != null) + pool.buildComplete(cfg); + if (cfg.getDestination() == null) + _context.statManager().addRateData("tunnel.buildExploratoryExpire", 1, 0); + else + _context.statManager().addRateData("tunnel.buildClientExpire", 1, 0); + } + } + if (buf != null) _log.debug(buf.toString()); @@ -76,6 +119,7 @@ class BuildExecutor implements Runnable { while (!_manager.isShutdown()){ try { + _repoll = false; _manager.listPools(pools); for (int i = 0; i < pools.size(); i++) { TunnelPool pool = (TunnelPool)pools.get(i); @@ -84,6 +128,9 @@ class BuildExecutor implements Runnable { wanted.add(pool); } + _handler.handleInboundReplies(); + + // allowed() also expires timed out requests (for new style requests) int allowed = allowed(); if (_log.shouldLog(Log.DEBUG)) @@ -100,11 +147,18 @@ class BuildExecutor implements Runnable { // we don't have either inbound or outbound tunnels, so don't bother trying to build // non-zero-hop tunnels synchronized (_currentlyBuilding) { - _currentlyBuilding.wait(5*1000+_context.random().nextInt(5*1000)); + if (!_repoll) + _currentlyBuilding.wait(5*1000+_context.random().nextInt(5*1000)); } } else { if ( (allowed > 0) && (wanted.size() > 0) ) { Collections.shuffle(wanted, _context.random()); + + // force the loops to be short, since 20 consecutive tunnel build requests can take + // a long, long time + if (allowed > 5) + allowed = 5; + for (int i = 0; (i < allowed) && (wanted.size() > 0); i++) { TunnelPool pool = (TunnelPool)wanted.remove(0); //if (pool.countWantedTunnels() <= 0) @@ -120,6 +174,9 @@ class BuildExecutor implements Runnable { // 0hops are taken care of above, these are nonstandard 0hops //if (cfg.getLength() <= 1) // i--; //0hop, we can keep going, as there's no worry about throttling + + // we want replies to go to the top of the queue + _handler.handleInboundReplies(); } else { i--; } @@ -129,16 +186,20 @@ class BuildExecutor implements Runnable { _log.debug("Nothin' doin, wait for a while"); try { synchronized (_currentlyBuilding) { - if (allowed <= 0) - _currentlyBuilding.wait(_context.random().nextInt(5*1000)); - else // wanted <= 0 - _currentlyBuilding.wait(_context.random().nextInt(30*1000)); + if (!_repoll) { + //if (allowed <= 0) + _currentlyBuilding.wait(_context.random().nextInt(5*1000)); + //else // wanted <= 0 + // _currentlyBuilding.wait(_context.random().nextInt(30*1000)); + } } } catch (InterruptedException ie) { // someone wanted to build something } } } + + _handler.handleInboundRequests(); wanted.clear(); pools.clear(); @@ -148,8 +209,8 @@ class BuildExecutor implements Runnable { } } - if (_log.shouldLog(Log.DEBUG)) - _log.debug("Done building"); + if (_log.shouldLog(Log.WARN)) + _log.warn("Done building"); _isRunning = false; } @@ -186,15 +247,21 @@ class BuildExecutor implements Runnable { public boolean isRunning() { return _isRunning; } void buildTunnel(TunnelPool pool, PooledTunnelCreatorConfig cfg) { - // old style here, replace with the new crypto stuff later - CompleteJob onCreated = new CompleteJob(_context, cfg, new SuccessJob(_context, cfg, pool), pool); - CompleteJob onFailed = new CompleteJob(_context, cfg, null, pool); - RequestTunnelJob j = new RequestTunnelJob(_context, cfg, onCreated, onFailed, cfg.getLength()-1, false, cfg.getDestination()==null); - if (cfg.getLength() <= 1) // length == 1 ==> hops = 0, so do it inline (as its immediate) - j.runJob(); + long beforeBuild = System.currentTimeMillis(); + BuildRequestor.request(_context, pool, cfg, this); + long buildTime = System.currentTimeMillis() - beforeBuild; + if (cfg.getLength() <= 1) + _context.statManager().addRateData("tunnel.buildRequestZeroHopTime", buildTime, buildTime); else - j.runJob(); // always inline, as this is on its own thread so it can block - //_context.jobQueue().addJob(j); + _context.statManager().addRateData("tunnel.buildRequestTime", buildTime, buildTime); + long id = cfg.getReplyMessageId(); + if (id > 0) { + synchronized (_recentBuildIds) { + while (_recentBuildIds.size() > 64) + _recentBuildIds.remove(0); + _recentBuildIds.add(new Long(id)); + } + } } public void buildComplete(PooledTunnelCreatorConfig cfg, TunnelPool pool) { @@ -205,61 +272,30 @@ class BuildExecutor implements Runnable { _currentlyBuilding.remove(cfg); _currentlyBuilding.notifyAll(); } + long expireBefore = _context.clock().now() + 10*60*1000 - BuildRequestor.REQUEST_TIMEOUT; + if (cfg.getExpiration() <= expireBefore) { + if (_log.shouldLog(Log.ERROR)) + _log.error("Build complete for expired tunnel: " + cfg); + } + } + + private List _recentBuildIds = new ArrayList(100); + public boolean wasRecentlyBuilding(long replyId) { + synchronized (_recentBuildIds) { + return _recentBuildIds.contains(new Long(replyId)); + } + } + + public void buildSuccessful(PooledTunnelCreatorConfig cfg) { + _manager.buildComplete(cfg); } public void repoll() { - synchronized (_currentlyBuilding) { _currentlyBuilding.notifyAll(); } + synchronized (_currentlyBuilding) { + _repoll = true; + _currentlyBuilding.notifyAll(); + } } - - private class CompleteJob extends JobImpl { - private PooledTunnelCreatorConfig _cfg; - private TunnelPool _pool; - private Job _onRun; - public CompleteJob(RouterContext ctx, PooledTunnelCreatorConfig cfg, Job onRun, TunnelPool pool) { - super(ctx); - _cfg = cfg; - _onRun = onRun; - _pool = pool; - } - public String getName() { return "Tunnel create complete"; } - public void runJob() { - if (_onRun != null) - _onRun.runJob(); - //getContext().jobQueue().addJob(_onRun); - buildComplete(_cfg, _pool); - } - } - private class SuccessJob extends JobImpl { - private PooledTunnelCreatorConfig _cfg; - private TunnelPool _pool; - public SuccessJob(RouterContext ctx, PooledTunnelCreatorConfig cfg, TunnelPool pool) { - super(ctx); - _cfg = cfg; - _pool = pool; - } - public String getName() { return "Tunnel create successful"; } - public void runJob() { - _log.debug("Created successfully: " + _cfg); - if (_cfg.isInbound()) { - getContext().tunnelDispatcher().joinInbound(_cfg); - } else { - getContext().tunnelDispatcher().joinOutbound(_cfg); - } - - _pool.addTunnel(_cfg); - _pool.getManager().buildComplete(_cfg); - TestJob testJob = (_cfg.getLength() > 1 ? new TestJob(getContext(), _cfg, _pool) : null); - //RebuildJob rebuildJob = new RebuildJob(getContext(), _cfg, _pool); - ExpireJob expireJob = new ExpireJob(getContext(), _cfg, _pool); - _cfg.setTunnelPool(_pool); - _cfg.setTestJob(testJob); - //_cfg.setRebuildJob(rebuildJob); - _cfg.setExpireJob(expireJob); - if (_cfg.getLength() > 1) // no need to test 0 hop tunnels - getContext().jobQueue().addJob(testJob); - //getContext().jobQueue().addJob(rebuildJob); // always try to rebuild (ignored if too many) - getContext().jobQueue().addJob(expireJob); - } - } + List locked_getCurrentlyBuilding() { return _currentlyBuilding; } } diff --git a/router/java/src/net/i2p/router/tunnel/pool/BuildHandler.java b/router/java/src/net/i2p/router/tunnel/pool/BuildHandler.java new file mode 100644 index 000000000..306ea9968 --- /dev/null +++ b/router/java/src/net/i2p/router/tunnel/pool/BuildHandler.java @@ -0,0 +1,586 @@ +package net.i2p.router.tunnel.pool; + +import java.util.*; +import net.i2p.data.*; +import net.i2p.data.i2np.*; +import net.i2p.router.*; +import net.i2p.router.tunnel.*; +import net.i2p.router.peermanager.TunnelHistory; +import net.i2p.util.Log; + +/** + * + */ +class BuildHandler { + private RouterContext _context; + private Log _log; + private BuildExecutor _exec; + private Job _buildMessageHandlerJob; + private Job _buildReplyMessageHandlerJob; + /** list of BuildMessageState, oldest first */ + private List _inboundBuildMessages; + /** list of BuildReplyMessageState, oldest first */ + private List _inboundBuildReplyMessages; + /** list of BuildEndMessageState, oldest first */ + private List _inboundBuildEndMessages; + private BuildMessageProcessor _processor; + + public BuildHandler(RouterContext ctx, BuildExecutor exec) { + _context = ctx; + _log = ctx.logManager().getLog(getClass()); + _exec = exec; + _inboundBuildMessages = new ArrayList(16); + _inboundBuildReplyMessages = new ArrayList(16); + _inboundBuildEndMessages = new ArrayList(16); + + _context.statManager().createRateStat("tunnel.reject.10", "How often we reject a tunnel probabalistically", "Tunnels", new long[] { 60*1000, 10*60*1000 }); + _context.statManager().createRateStat("tunnel.reject.20", "How often we reject a tunnel because of transient overload", "Tunnels", new long[] { 60*1000, 10*60*1000 }); + _context.statManager().createRateStat("tunnel.reject.30", "How often we reject a tunnel because of bandwidth overload", "Tunnels", new long[] { 60*1000, 10*60*1000 }); + _context.statManager().createRateStat("tunnel.reject.50", "How often we reject a tunnel because of a critical issue (shutdown, etc)", "Tunnels", new long[] { 60*1000, 10*60*1000 }); + + _context.statManager().createRateStat("tunnel.decryptRequestTime", "How long it takes to decrypt a new tunnel build request", "Tunnels", new long[] { 60*1000, 10*60*1000 }); + _context.statManager().createRateStat("tunnel.rejectTimeout", "How often we reject a tunnel because we can't find the next hop", "Tunnels", new long[] { 60*1000, 10*60*1000 }); + + _context.statManager().createRateStat("tunnel.rejectOverloaded", "How long we had to wait before processing the request (when it was rejected)", "Tunnels", new long[] { 60*1000, 10*60*1000 }); + _context.statManager().createRateStat("tunnel.acceptLoad", "How long we had to wait before processing the request (when it was accepted)", "Tunnels", new long[] { 60*1000, 10*60*1000 }); + + _context.statManager().createRateStat("tunnel.receiveRejectionProbabalistic", "How often we are rejected probabalistically?", "Tunnels", new long[] { 10*60*1000l, 60*60*1000l, 24*60*60*1000l }); + _context.statManager().createRateStat("tunnel.receiveRejectionTransient", "How often we are rejected due to transient overload?", "Tunnels", new long[] { 10*60*1000l, 60*60*1000l, 24*60*60*1000l }); + _context.statManager().createRateStat("tunnel.receiveRejectionBandwidth", "How often we are rejected due to bandwidth overload?", "Tunnels", new long[] { 10*60*1000l, 60*60*1000l, 24*60*60*1000l }); + _context.statManager().createRateStat("tunnel.receiveRejectionCritical", "How often we are rejected due to critical failure?", "Tunnels", new long[] { 10*60*1000l, 60*60*1000l, 24*60*60*1000l }); + + _processor = new BuildMessageProcessor(ctx); + _buildMessageHandlerJob = new TunnelBuildMessageHandlerJob(ctx); + _buildReplyMessageHandlerJob = new TunnelBuildReplyMessageHandlerJob(ctx); + ctx.inNetMessagePool().registerHandlerJobBuilder(TunnelBuildMessage.MESSAGE_TYPE, new TunnelBuildMessageHandlerJobBuilder()); + ctx.inNetMessagePool().registerHandlerJobBuilder(TunnelBuildReplyMessage.MESSAGE_TYPE, new TunnelBuildReplyMessageHandlerJobBuilder()); + } + + private static final int MAX_HANDLE_AT_ONCE = 5; + private static final int NEXT_HOP_LOOKUP_TIMEOUT = 5*1000; + + void handleInboundRequests() { + List handled = null; + synchronized (_inboundBuildMessages) { + int toHandle = _inboundBuildMessages.size(); + if (toHandle > 0) { + if (toHandle > MAX_HANDLE_AT_ONCE) + toHandle = MAX_HANDLE_AT_ONCE; + handled = new ArrayList(toHandle); + for (int i = 0; i < toHandle; i++) + handled.add(_inboundBuildMessages.remove(0)); + } + } + if (handled != null) { + if (_log.shouldLog(Log.DEBUG)) + _log.debug("Handling " + handled.size() + " requests"); + + for (int i = 0; i < handled.size(); i++) { + BuildMessageState state = (BuildMessageState)handled.get(i); + handleRequest(state); + } + handled.clear(); + } + synchronized (_inboundBuildEndMessages) { + int toHandle = _inboundBuildEndMessages.size(); + if (toHandle > 0) { + if (handled == null) + handled = new ArrayList(_inboundBuildEndMessages); + else + handled.addAll(_inboundBuildEndMessages); + _inboundBuildEndMessages.clear(); + } + } + if (handled != null) { + if (_log.shouldLog(Log.DEBUG)) + _log.debug("Handling " + handled.size() + " requests that are actually replies"); + // these are inbound build messages that actually contain the full replies, since + // they are for inbound tunnels we have created + for (int i = 0; i < handled.size(); i++) { + BuildEndMessageState state = (BuildEndMessageState)handled.get(i); + handleRequestAsInboundEndpoint(state); + } + } + } + + void handleInboundReplies() { + List handled = null; + synchronized (_inboundBuildReplyMessages) { + int toHandle = _inboundBuildReplyMessages.size(); + if (toHandle > 0) { + // always handle all of them - they're replies that we were waiting for! + handled = new ArrayList(_inboundBuildReplyMessages); + _inboundBuildReplyMessages.clear(); + } + } + if (handled != null) { + if (_log.shouldLog(Log.DEBUG)) + _log.debug("Handling " + handled.size() + " replies"); + + for (int i = 0; i < handled.size(); i++) { + BuildReplyMessageState state = (BuildReplyMessageState)handled.get(i); + handleReply(state); + } + } + } + + private void handleReply(BuildReplyMessageState state) { + // search through the tunnels for a reply + long replyMessageId = state.msg.getUniqueId(); + PooledTunnelCreatorConfig cfg = null; + List building = _exec.locked_getCurrentlyBuilding(); + StringBuffer buf = null; + synchronized (building) { + for (int i = 0; i < building.size(); i++) { + PooledTunnelCreatorConfig cur = (PooledTunnelCreatorConfig)building.get(i); + if (cur.getReplyMessageId() == replyMessageId) { + building.remove(i); + cfg = cur; + break; + } + } + if ( (cfg == null) && (_log.shouldLog(Log.DEBUG)) ) + buf = new StringBuffer(building.toString()); + } + + if (cfg == null) { + // cannot handle - not pending... took too long? + if (_log.shouldLog(Log.WARN)) + _log.warn("The reply " + replyMessageId + " did not match any pending tunnels"); + if (_log.shouldLog(Log.DEBUG)) + _log.debug("Pending tunnels: " + buf.toString()); + } else { + handleReply(state.msg, cfg, System.currentTimeMillis()-state.recvTime); + } + } + + private void handleReply(TunnelBuildReplyMessage msg, PooledTunnelCreatorConfig cfg, long delay) { + long requestedOn = cfg.getExpiration() - 10*60*1000; + long rtt = _context.clock().now() - requestedOn; + if (_log.shouldLog(Log.DEBUG)) + _log.debug(msg.getUniqueId() + ": Handling the reply after " + rtt + ", delayed " + delay + " waiting for " + cfg); + + BuildReplyHandler handler = new BuildReplyHandler(); + List order = cfg.getReplyOrder(); + int statuses[] = handler.decrypt(_context, msg, cfg, order); + if (statuses != null) { + boolean allAgree = true; + for (int i = 0; i < cfg.getLength(); i++) { + Hash peer = cfg.getPeer(i); + int record = order.indexOf(new Integer(i)); + int howBad = statuses[record]; + if (_log.shouldLog(Log.DEBUG)) + _log.debug(msg.getUniqueId() + ": Peer " + peer.toBase64() + " replied with status " + howBad); + + if (howBad == 0) { + // w3wt + } else { + allAgree = false; + switch (howBad) { + case TunnelHistory.TUNNEL_REJECT_BANDWIDTH: + _context.statManager().addRateData("tunnel.receiveRejectionBandwidth", 1, 0); + break; + case TunnelHistory.TUNNEL_REJECT_TRANSIENT_OVERLOAD: + _context.statManager().addRateData("tunnel.receiveRejectionTransient", 1, 0); + break; + case TunnelHistory.TUNNEL_REJECT_PROBABALISTIC_REJECT: + _context.statManager().addRateData("tunnel.receiveRejectionProbabalistic", 1, 0); + break; + case TunnelHistory.TUNNEL_REJECT_CRIT: + default: + _context.statManager().addRateData("tunnel.receiveRejectionCritical", 1, 0); + } + // penalize peer based on their bitchiness level + _context.profileManager().tunnelRejected(peer, rtt, howBad); + _context.messageHistory().tunnelParticipantRejected(peer, "peer rejected after " + rtt + " with " + howBad + ": " + cfg.toString()); + } + } + if (allAgree) { + // wikked, completely build + _exec.buildComplete(cfg, cfg.getTunnelPool()); + if (cfg.isInbound()) + _context.tunnelDispatcher().joinInbound(cfg); + else + _context.tunnelDispatcher().joinOutbound(cfg); + cfg.getTunnelPool().addTunnel(cfg); // self.self.self.foo! + _exec.buildSuccessful(cfg); + + ExpireJob expireJob = new ExpireJob(_context, cfg, cfg.getTunnelPool()); + cfg.setExpireJob(expireJob); + _context.jobQueue().addJob(expireJob); + if (cfg.getDestination() == null) + _context.statManager().addRateData("tunnel.buildExploratorySuccess", rtt, rtt); + else + _context.statManager().addRateData("tunnel.buildClientSuccess", rtt, rtt); + } else { + // someone is no fun + _exec.buildComplete(cfg, cfg.getTunnelPool()); + if (cfg.getDestination() == null) + _context.statManager().addRateData("tunnel.buildExploratoryReject", rtt, rtt); + else + _context.statManager().addRateData("tunnel.buildClientReject", rtt, rtt); + } + } else { + if (_log.shouldLog(Log.ERROR)) + _log.error(msg.getUniqueId() + ": Tunnel reply could not be decrypted for tunnel " + cfg); + } + } + + private void handleRequest(BuildMessageState state) { + long timeSinceReceived = System.currentTimeMillis()-state.recvTime; + if (_log.shouldLog(Log.DEBUG)) + _log.debug(state.msg.getUniqueId() + ": handling request after " + timeSinceReceived); + + if (timeSinceReceived > BuildRequestor.REQUEST_TIMEOUT*2) { + // don't even bother, since we are so overloaded locally + if (_log.shouldLog(Log.ERROR)) + _log.error("Not even trying to handle/decrypt the request " + state.msg.getUniqueId() + + ", since we received it a long time ago: " + timeSinceReceived); + return; + } + // ok, this is not our own tunnel, so we need to do some heavy lifting + // this not only decrypts the current hop's record, but encrypts the other records + // with the enclosed reply key + long beforeDecrypt = System.currentTimeMillis(); + BuildRequestRecord req = _processor.decrypt(_context, state.msg, _context.routerHash(), _context.keyManager().getPrivateKey()); + long decryptTime = System.currentTimeMillis() - beforeDecrypt; + _context.statManager().addRateData("tunnel.decryptRequestTime", decryptTime, decryptTime); + if (req == null) { + // no records matched, or the decryption failed. bah + if (_log.shouldLog(Log.ERROR)) + _log.error("The request " + state.msg.getUniqueId() + " could not be decrypted"); + return; + } + + Hash nextPeer = req.readNextIdentity(); + RouterInfo nextPeerInfo = _context.netDb().lookupRouterInfoLocally(nextPeer); + if (nextPeerInfo == null) { + if (_log.shouldLog(Log.DEBUG)) + _log.debug("Request " + state.msg.getUniqueId() + "/" + req.readReceiveTunnelId() + "/" + req.readNextTunnelId() + + " handled, looking for the next peer " + nextPeer.toBase64()); + _context.netDb().lookupRouterInfo(nextPeer, new HandleReq(_context, state, req, nextPeer), new TimeoutReq(_context, state, req, nextPeer), NEXT_HOP_LOOKUP_TIMEOUT); + } else { + if (_log.shouldLog(Log.DEBUG)) + _log.debug("Request " + state.msg.getUniqueId() + " handled and we know the next peer " + nextPeer.toBase64()); + handleReq(nextPeerInfo, state, req, nextPeer); + } + } + + /** + * This request is actually a reply, process it as such + */ + private void handleRequestAsInboundEndpoint(BuildEndMessageState state) { + TunnelBuildReplyMessage msg = new TunnelBuildReplyMessage(_context); + for (int i = 0; i < TunnelBuildMessage.RECORD_COUNT; i++) + msg.setRecord(i, state.msg.getRecord(i)); + msg.setUniqueId(state.msg.getUniqueId()); + handleReply(msg, state.cfg, System.currentTimeMillis() - state.recvTime); + } + + private class HandleReq extends JobImpl { + private BuildMessageState _state; + private BuildRequestRecord _req; + private Hash _nextPeer; + HandleReq(RouterContext ctx, BuildMessageState state, BuildRequestRecord req, Hash nextPeer) { + super(ctx); + _state = state; + _req = req; + _nextPeer = nextPeer; + } + public String getName() { return "Deferred tunnel join processing"; } + public void runJob() { + if (_log.shouldLog(Log.DEBUG)) + _log.debug("Request " + _state.msg.getUniqueId() + " handled with a successful deferred lookup for the next peer " + _nextPeer.toBase64()); + + handleReq(getContext().netDb().lookupRouterInfoLocally(_nextPeer), _state, _req, _nextPeer); + } + } + + private class TimeoutReq extends JobImpl { + private BuildMessageState _state; + private BuildRequestRecord _req; + private Hash _nextPeer; + TimeoutReq(RouterContext ctx, BuildMessageState state, BuildRequestRecord req, Hash nextPeer) { + super(ctx); + _state = state; + _req = req; + _nextPeer = nextPeer; + } + public String getName() { return "Timeout looking for next peer for tunnel join"; } + public void runJob() { + getContext().statManager().addRateData("tunnel.rejectTimeout", 1, 1); + if (_log.shouldLog(Log.ERROR)) + _log.error("Request " + _state.msg.getUniqueId() + + " could no be satisfied, as the next peer could not be found: " + _nextPeer.toBase64()); + getContext().messageHistory().tunnelRejected(_state.fromHash, new TunnelId(_req.readReceiveTunnelId()), _nextPeer, + "rejected because we couldn't find " + _nextPeer.toBase64() + ": " + + _state.msg.getUniqueId() + "/" + _req.readNextTunnelId()); + } + } + + private void handleReq(RouterInfo nextPeerInfo, BuildMessageState state, BuildRequestRecord req, Hash nextPeer) { + long ourId = req.readReceiveTunnelId(); + long nextId = req.readNextTunnelId(); + boolean isInGW = req.readIsInboundGateway(); + boolean isOutEnd = req.readIsOutboundEndpoint(); + long time = req.readRequestTime(); + long now = (_context.clock().now() / (60l*60l*1000l)) * (60*60*1000); + int ourSlot = -1; + + int response = _context.throttle().acceptTunnelRequest(); + if (_context.tunnelManager().getTunnelInfo(new TunnelId(ourId)) != null) { + if (_log.shouldLog(Log.ERROR)) + _log.error("Already participating in a tunnel with the given Id (" + ourId + "), so gotta reject"); + if (response == 0) + response = TunnelHistory.TUNNEL_REJECT_PROBABALISTIC_REJECT; + } + + //if ( (response == 0) && (_context.random().nextInt(50) <= 1) ) + // response = TunnelHistory.TUNNEL_REJECT_PROBABALISTIC_REJECT; + + long recvDelay = System.currentTimeMillis()-state.recvTime; + if ( (response == 0) && (recvDelay > BuildRequestor.REQUEST_TIMEOUT) ) { + _context.statManager().addRateData("tunnel.rejectOverloaded", recvDelay, recvDelay); + response = TunnelHistory.TUNNEL_REJECT_TRANSIENT_OVERLOAD; + } else if (response == 0) { + _context.statManager().addRateData("tunnel.acceptLoad", recvDelay, recvDelay); + } + + if (_log.shouldLog(Log.DEBUG)) + _log.debug("Responding to " + state.msg.getUniqueId() + "/" + ourId + + " after " + recvDelay + " with " + response + + " from " + (state.fromHash != null ? state.fromHash.toBase64() : + state.from != null ? state.from.calculateHash().toBase64() : "tunnel")); + + if (response == 0) { + HopConfig cfg = new HopConfig(); + cfg.setExpiration(_context.clock().now() + 10*60*1000); + cfg.setIVKey(req.readIVKey()); + cfg.setLayerKey(req.readLayerKey()); + if (isInGW) { + cfg.setReceiveFrom(null); + } else { + if (state.fromHash != null) { + cfg.setReceiveFrom(state.fromHash); + } else if (state.from != null) { + cfg.setReceiveFrom(state.from.calculateHash()); + } else { + // b0rk + return; + } + } + cfg.setReceiveTunnelId(DataHelper.toLong(4, ourId)); + if (isOutEnd) { + cfg.setSendTo(null); + cfg.setSendTunnelId(null); + } else { + cfg.setSendTo(req.readNextIdentity()); + cfg.setSendTunnelId(DataHelper.toLong(4, nextId)); + } + + if (_log.shouldLog(Log.DEBUG)) + _log.debug("Joining " + state.msg.getUniqueId() + "/" + cfg.getReceiveTunnel() + "/" + recvDelay + " as " + (isOutEnd ? "outbound endpoint" : isInGW ? "inbound gw" : "participant")); + + // now "actually" join + if (isOutEnd) + _context.tunnelDispatcher().joinOutboundEndpoint(cfg); + else if (isInGW) + _context.tunnelDispatcher().joinInboundGateway(cfg); + else + _context.tunnelDispatcher().joinParticipant(cfg); + } else { + _context.statManager().addRateData("tunnel.reject." + response, 1, 1); + _context.messageHistory().tunnelRejected(state.fromHash, new TunnelId(ourId), req.readNextIdentity(), + "rejecting for " + response + ": " + + state.msg.getUniqueId() + "/" + ourId + "/" + req.readNextTunnelId() + " delay " + + recvDelay + " as " + + (isOutEnd ? "outbound endpoint" : isInGW ? "inbound gw" : "participant")); + } + + BuildResponseRecord resp = new BuildResponseRecord(); + byte reply[] = resp.create(_context, response, req.readReplyKey(), req.readReplyIV(), state.msg.getUniqueId()); + for (int j = 0; j < TunnelBuildMessage.RECORD_COUNT; j++) { + if (state.msg.getRecord(j) == null) { + ourSlot = j; + state.msg.setRecord(j, new ByteArray(reply)); + if (_log.shouldLog(Log.DEBUG)) + _log.debug("Full reply record for slot " + ourSlot + "/" + ourId + "/" + nextId + "/" + req.readReplyMessageId() + + ": " + Base64.encode(reply)); + break; + } + } + + if (_log.shouldLog(Log.DEBUG)) + _log.debug("Read slot " + ourSlot + " containing our hop @ " + _context.routerHash().toBase64() + + " accepted? " + response + " receiving on " + ourId + + " sending to " + nextId + + " on " + nextPeer.toBase64() + + " inGW? " + isInGW + " outEnd? " + isOutEnd + " time difference " + (now-time) + + " recvDelay " + recvDelay + " replyMessage " + req.readReplyMessageId() + + " replyKey " + req.readReplyKey().toBase64() + " replyIV " + Base64.encode(req.readReplyIV())); + + // now actually send the response + if (!isOutEnd) { + state.msg.setUniqueId(req.readReplyMessageId()); + state.msg.setMessageExpiration(_context.clock().now() + 10*1000); + OutNetMessage msg = new OutNetMessage(_context); + msg.setMessage(state.msg); + msg.setExpiration(state.msg.getMessageExpiration()); + msg.setPriority(300); + msg.setTarget(nextPeerInfo); + _context.outNetMessagePool().add(msg); + } else { + // send it to the reply tunnel on the reply peer within a new TunnelBuildReplyMessage + // (enough layers jrandom?) + TunnelBuildReplyMessage replyMsg = new TunnelBuildReplyMessage(_context); + for (int i = 0; i < state.msg.RECORD_COUNT; i++) + replyMsg.setRecord(i, state.msg.getRecord(i)); + replyMsg.setUniqueId(req.readReplyMessageId()); + replyMsg.setMessageExpiration(_context.clock().now() + 10*1000); + TunnelGatewayMessage m = new TunnelGatewayMessage(_context); + m.setMessage(replyMsg); + m.setMessageExpiration(replyMsg.getMessageExpiration()); + m.setTunnelId(new TunnelId(nextId)); + if (_context.routerHash().equals(nextPeer)) { + // ok, we are the gateway, so inject it + if (_log.shouldLog(Log.DEBUG)) + _log.debug("We are the reply gateway for " + nextId + + " when replying to replyMessage " + req.readReplyMessageId()); + _context.tunnelDispatcher().dispatch(m); + } else { + // ok, the gateway is some other peer, shove 'er across + OutNetMessage outMsg = new OutNetMessage(_context); + outMsg.setExpiration(m.getMessageExpiration()); + outMsg.setMessage(m); + outMsg.setPriority(300); + outMsg.setTarget(nextPeerInfo); + _context.outNetMessagePool().add(outMsg); + } + } + } + + private static final boolean HANDLE_REPLIES_INLINE = true; + + private class TunnelBuildMessageHandlerJobBuilder implements HandlerJobBuilder { + public Job createJob(I2NPMessage receivedMessage, RouterIdentity from, Hash fromHash) { + // need to figure out if this is a reply to an inbound tunnel request (where we are the + // endpoint, receiving the request at the last hop) + long reqId = receivedMessage.getUniqueId(); + PooledTunnelCreatorConfig cfg = null; + List building = _exec.locked_getCurrentlyBuilding(); + List ids = new ArrayList(); + synchronized (building) { + for (int i = 0; i < building.size(); i++) { + PooledTunnelCreatorConfig cur = (PooledTunnelCreatorConfig)building.get(i); + ids.add(new Long(cur.getReplyMessageId())); + if ( (cur.isInbound()) && (cur.getReplyMessageId() == reqId) ) { + building.remove(i); + cfg = cur; + break; + } else if (cur.getReplyMessageId() == reqId) { + _log.error("received it, but its not inbound? " + cur); + } + } + } + if (_log.shouldLog(Log.DEBUG)) + _log.debug("Receive tunnel build message " + reqId + " from " + + (from != null ? from.calculateHash().toBase64() : fromHash != null ? fromHash.toBase64() : "tunnels") + + ", waiting ids: " + ids + ", found matching tunnel? " + (cfg != null), + new Exception("source")); + if (cfg != null) { + BuildEndMessageState state = new BuildEndMessageState(cfg, receivedMessage, from, fromHash); + if (HANDLE_REPLIES_INLINE) { + handleRequestAsInboundEndpoint(state); + } else { + synchronized (_inboundBuildEndMessages) { + _inboundBuildEndMessages.add(state); + } + _exec.repoll(); + } + } else { + if (_exec.wasRecentlyBuilding(reqId)) { + if (_log.shouldLog(Log.WARN)) + _log.warn("Dropping the reply " + reqId + ", as we used to be building that"); + } else { + synchronized (_inboundBuildMessages) { + _inboundBuildMessages.add(new BuildMessageState(receivedMessage, from, fromHash)); + } + _exec.repoll(); + } + } + return _buildMessageHandlerJob; + } + } + + private class TunnelBuildReplyMessageHandlerJobBuilder implements HandlerJobBuilder { + public Job createJob(I2NPMessage receivedMessage, RouterIdentity from, Hash fromHash) { + if (_log.shouldLog(Log.DEBUG)) + _log.debug("Receive tunnel build reply message " + receivedMessage.getUniqueId() + " from " + + (fromHash != null ? fromHash.toBase64() : from != null ? from.calculateHash().toBase64() : "a tunnel")); + if (HANDLE_REPLIES_INLINE) { + handleReply(new BuildReplyMessageState(receivedMessage, from, fromHash)); + } else { + synchronized (_inboundBuildReplyMessages) { + _inboundBuildReplyMessages.add(new BuildReplyMessageState(receivedMessage, from, fromHash)); + } + _exec.repoll(); + } + return _buildReplyMessageHandlerJob; + } + } + + /** normal inbound requests from other people */ + private class BuildMessageState { + TunnelBuildMessage msg; + RouterIdentity from; + Hash fromHash; + long recvTime; + public BuildMessageState(I2NPMessage m, RouterIdentity f, Hash h) { + msg = (TunnelBuildMessage)m; + from = f; + fromHash = h; + recvTime = System.currentTimeMillis(); + } + } + /** replies for outbound tunnels that we have created */ + private class BuildReplyMessageState { + TunnelBuildReplyMessage msg; + RouterIdentity from; + Hash fromHash; + long recvTime; + public BuildReplyMessageState(I2NPMessage m, RouterIdentity f, Hash h) { + msg = (TunnelBuildReplyMessage)m; + from = f; + fromHash = h; + recvTime = System.currentTimeMillis(); + } + } + /** replies for inbound tunnels we have created */ + private class BuildEndMessageState { + TunnelBuildMessage msg; + PooledTunnelCreatorConfig cfg; + RouterIdentity from; + Hash fromHash; + long recvTime; + public BuildEndMessageState(PooledTunnelCreatorConfig c, I2NPMessage m, RouterIdentity f, Hash h) { + cfg = c; + msg = (TunnelBuildMessage)m; + from = f; + fromHash = h; + recvTime = System.currentTimeMillis(); + } + } + + // noop + private class TunnelBuildMessageHandlerJob extends JobImpl { + private TunnelBuildMessageHandlerJob(RouterContext ctx) { super(ctx); } + public void runJob() {} + public String getName() { return "Receive tunnel build message"; } + } + // noop + private class TunnelBuildReplyMessageHandlerJob extends JobImpl { + private TunnelBuildReplyMessageHandlerJob(RouterContext ctx) { super(ctx); } + public void runJob() {} + public String getName() { return "Receive tunnel build reply message"; } + } +} diff --git a/router/java/src/net/i2p/router/tunnel/pool/BuildRequestor.java b/router/java/src/net/i2p/router/tunnel/pool/BuildRequestor.java new file mode 100644 index 000000000..e392a382d --- /dev/null +++ b/router/java/src/net/i2p/router/tunnel/pool/BuildRequestor.java @@ -0,0 +1,197 @@ +package net.i2p.router.tunnel.pool; + +import java.util.*; +import net.i2p.data.*; +import net.i2p.data.i2np.*; +import net.i2p.router.OutNetMessage; +import net.i2p.router.RouterContext; +import net.i2p.router.MessageSelector; +import net.i2p.router.JobImpl; +import net.i2p.router.ReplyJob; +import net.i2p.router.TunnelInfo; +import net.i2p.router.tunnel.BuildMessageGenerator; +import net.i2p.util.Log; + +/** + * + */ +class BuildRequestor { + private static final List ORDER = new ArrayList(BuildMessageGenerator.ORDER.length); + static { + for (int i = 0; i < BuildMessageGenerator.ORDER.length; i++) + ORDER.add(new Integer(i)); + } + private static final boolean USE_PAIRED_CLIENT_TUNNELS = true; + private static final int PRIORITY = 500; + static final int REQUEST_TIMEOUT = 20*1000; + + /** new style requests need to fill in the tunnel IDs before hand */ + public static void prepare(RouterContext ctx, PooledTunnelCreatorConfig cfg) { + for (int i = 0; i < cfg.getLength(); i++) { + if ( (!cfg.isInbound()) && (i == 0) ) { + // outbound gateway (us) doesn't receive on a tunnel id + if (cfg.getLength() <= 1) // zero hop, pretend to have a send id + cfg.getConfig(i).setSendTunnelId(DataHelper.toLong(4, ctx.random().nextLong(TunnelId.MAX_ID_VALUE))); + } else { + cfg.getConfig(i).setReceiveTunnelId(DataHelper.toLong(4, ctx.random().nextLong(TunnelId.MAX_ID_VALUE))); + } + + if (i > 0) + cfg.getConfig(i-1).setSendTunnelId(cfg.getConfig(i).getReceiveTunnelId()); + byte iv[] = new byte[16]; + ctx.random().nextBytes(iv); + cfg.getConfig(i).setReplyIV(new ByteArray(iv)); + cfg.getConfig(i).setReplyKey(ctx.keyGenerator().generateSessionKey()); + } + cfg.setReplyMessageId(ctx.random().nextLong(I2NPMessage.MAX_ID_VALUE)); + } + public static void request(RouterContext ctx, TunnelPool pool, PooledTunnelCreatorConfig cfg, BuildExecutor exec) { + // new style crypto fills in all the blanks, while the old style waits for replies to fill in the next hop, etc + prepare(ctx, cfg); + + if (cfg.getLength() <= 1) { + buildZeroHop(ctx, pool, cfg, exec); + return; + } + + Log log = ctx.logManager().getLog(BuildRequestor.class); + cfg.setTunnelPool(pool); + + TunnelInfo pairedTunnel = null; + if (pool.getSettings().isExploratory() || !USE_PAIRED_CLIENT_TUNNELS) { + if (pool.getSettings().isInbound()) + pairedTunnel = ctx.tunnelManager().selectOutboundTunnel(); + else + pairedTunnel = ctx.tunnelManager().selectInboundTunnel(); + } else { + if (pool.getSettings().isInbound()) + pairedTunnel = ctx.tunnelManager().selectOutboundTunnel(pool.getSettings().getDestination()); + else + pairedTunnel = ctx.tunnelManager().selectInboundTunnel(pool.getSettings().getDestination()); + } + if (pairedTunnel == null) { + if (log.shouldLog(Log.WARN)) + log.warn("Couldn't find a paired tunnel for " + cfg + ", fall back on exploratory tunnels for pairing"); + if (!pool.getSettings().isExploratory() && USE_PAIRED_CLIENT_TUNNELS) + if (pool.getSettings().isInbound()) + pairedTunnel = ctx.tunnelManager().selectOutboundTunnel(); + else + pairedTunnel = ctx.tunnelManager().selectInboundTunnel(); + } + if (pairedTunnel == null) { + if (log.shouldLog(Log.ERROR)) + log.error("Tunnel build failed, as we couldn't find a paired tunnel for " + cfg); + exec.buildComplete(cfg, pool); + return; + } + + TunnelBuildMessage msg = createTunnelBuildMessage(ctx, pool, cfg, pairedTunnel, exec); + if (msg == null) { + if (log.shouldLog(Log.ERROR)) + log.error("Tunnel build failed, as we couldn't create the tunnel build message for " + cfg); + exec.buildComplete(cfg, pool); + return; + } + + cfg.setPairedTunnel(pairedTunnel); + + if (cfg.isInbound()) { + if (log.shouldLog(Log.DEBUG)) + log.debug("Sending the tunnel build request out the tunnel " + pairedTunnel + " to " + + cfg.getPeer(0).toBase64() + " for " + cfg + " waiting for the reply of " + + cfg.getReplyMessageId()); + // send it out a tunnel targetting the first hop + ctx.tunnelDispatcher().dispatchOutbound(msg, pairedTunnel.getSendTunnelId(0), cfg.getPeer(0)); + } else { + if (log.shouldLog(Log.DEBUG)) + log.debug("Sending the tunnel build request directly to " + cfg.getPeer(1).toBase64() + + " for " + cfg + " waiting for the reply of " + cfg.getReplyMessageId() + + " with msgId=" + msg.getUniqueId()); + // send it directly to the first hop + OutNetMessage outMsg = new OutNetMessage(ctx); + outMsg.setExpiration(msg.getMessageExpiration()); + outMsg.setMessage(msg); + outMsg.setPriority(PRIORITY); + RouterInfo peer = ctx.netDb().lookupRouterInfoLocally(cfg.getPeer(1)); + if (peer == null) { + if (log.shouldLog(Log.ERROR)) + log.error("Could not find the next hop to send the outbound request to: " + cfg); + exec.buildComplete(cfg, pool); + return; + } + outMsg.setTarget(peer); + ctx.outNetMessagePool().add(outMsg); + } + } + + private static TunnelBuildMessage createTunnelBuildMessage(RouterContext ctx, TunnelPool pool, PooledTunnelCreatorConfig cfg, TunnelInfo pairedTunnel, BuildExecutor exec) { + Log log = ctx.logManager().getLog(BuildRequestor.class); + long replyTunnel = 0; + Hash replyRouter = null; + if (cfg.isInbound()) { + replyTunnel = 0; + replyRouter = ctx.routerHash(); + } else { + replyTunnel = pairedTunnel.getReceiveTunnelId(0).getTunnelId(); + replyRouter = pairedTunnel.getPeer(0); + } + + // populate and encrypt the message + BuildMessageGenerator gen = new BuildMessageGenerator(); + TunnelBuildMessage msg = new TunnelBuildMessage(ctx); + + long replyMessageId = ctx.random().nextLong(I2NPMessage.MAX_ID_VALUE); + cfg.setReplyMessageId(replyMessageId); + + List order = new ArrayList(ORDER); + Collections.shuffle(order, ctx.random()); // randomized placement within the message + cfg.setReplyOrder(order); + + if (log.shouldLog(Log.DEBUG)) + log.debug("Build order: " + order + " for " + cfg); + + for (int i = 0; i < BuildMessageGenerator.ORDER.length; i++) { + int hop = ((Integer)order.get(i)).intValue(); + PublicKey key = null; + + if (BuildMessageGenerator.isBlank(cfg, hop)) { + // erm, blank + } else { + Hash peer = cfg.getPeer(hop); + RouterInfo peerInfo = ctx.netDb().lookupRouterInfoLocally(peer); + if (peerInfo == null) { + if (log.shouldLog(Log.ERROR)) + log.error("Peer selected for hop " + i + "/" + hop + " was not found locally: " + + peer.toBase64() + " for " + cfg); + return null; + } else { + key = peerInfo.getIdentity().getPublicKey(); + } + } + if (log.shouldLog(Log.DEBUG)) + log.debug(cfg.getReplyMessageId() + ": record " + i + "/" + hop + " has key " + key + " for " + cfg); + gen.createRecord(i, hop, msg, cfg, replyRouter, replyTunnel, ctx, key); + } + gen.layeredEncrypt(ctx, msg, cfg, order); + + return msg; + } + + private static void buildZeroHop(RouterContext ctx, TunnelPool pool, PooledTunnelCreatorConfig cfg, BuildExecutor exec) { + Log log = ctx.logManager().getLog(BuildRequestor.class); + if (log.shouldLog(Log.DEBUG)) + log.debug("Build zero hop tunnel " + cfg); + + exec.buildComplete(cfg, pool); + if (cfg.isInbound()) + ctx.tunnelDispatcher().joinInbound(cfg); + else + ctx.tunnelDispatcher().joinOutbound(cfg); + pool.addTunnel(cfg); + exec.buildSuccessful(cfg); + ExpireJob expireJob = new ExpireJob(ctx, cfg, pool); + cfg.setExpireJob(expireJob); + ctx.jobQueue().addJob(expireJob); + // can it get much easier? + } +} diff --git a/router/java/src/net/i2p/router/tunnel/pool/HandleTunnelCreateMessageJob.java b/router/java/src/net/i2p/router/tunnel/pool/HandleTunnelCreateMessageJob.java index 67561cb7c..d5054a828 100644 --- a/router/java/src/net/i2p/router/tunnel/pool/HandleTunnelCreateMessageJob.java +++ b/router/java/src/net/i2p/router/tunnel/pool/HandleTunnelCreateMessageJob.java @@ -95,7 +95,7 @@ public class HandleTunnelCreateMessageJob extends JobImpl { return STATUS_DEFERRED; } } - return getContext().throttle().acceptTunnelRequest(_request); + return getContext().throttle().acceptTunnelRequest(); } private class DeferredAccept extends JobImpl { diff --git a/router/java/src/net/i2p/router/tunnel/pool/PooledTunnelCreatorConfig.java b/router/java/src/net/i2p/router/tunnel/pool/PooledTunnelCreatorConfig.java index b7032babc..65a179bfb 100644 --- a/router/java/src/net/i2p/router/tunnel/pool/PooledTunnelCreatorConfig.java +++ b/router/java/src/net/i2p/router/tunnel/pool/PooledTunnelCreatorConfig.java @@ -1,10 +1,12 @@ package net.i2p.router.tunnel.pool; -import java.util.Properties; +import java.util.*; import net.i2p.data.Hash; import net.i2p.router.Job; import net.i2p.router.RouterContext; +import net.i2p.router.TunnelInfo; import net.i2p.router.tunnel.TunnelCreatorConfig; +import net.i2p.util.Log; /** * @@ -15,6 +17,7 @@ public class PooledTunnelCreatorConfig extends TunnelCreatorConfig { private TestJob _testJob; private Job _expireJob; private int _failures; + private TunnelInfo _pairedTunnel; /** Creates a new instance of PooledTunnelCreatorConfig */ @@ -28,7 +31,6 @@ public class PooledTunnelCreatorConfig extends TunnelCreatorConfig { _failures = 0; } - public void testSuccessful(int ms) { if (_testJob != null) { _testJob.testSuccessful(ms); @@ -45,6 +47,10 @@ public class PooledTunnelCreatorConfig extends TunnelCreatorConfig { return _pool.getSettings().getUnknownOptions(); } + public String toString() { + return super.toString() + " with " + _failures + " failures"; + } + private static final int MAX_CONSECUTIVE_TEST_FAILURES = 2; /** @@ -66,9 +72,19 @@ public class PooledTunnelCreatorConfig extends TunnelCreatorConfig { } } public boolean getTunnelFailed() { return _failed; } - public void setTunnelPool(TunnelPool pool) { _pool = pool; } + public void setTunnelPool(TunnelPool pool) { + if (pool != null) { + _pool = pool; + } else { + Log log = _context.logManager().getLog(getClass()); + log.error("Null tunnel pool?", new Exception("foo")); + } + } public TunnelPool getTunnelPool() { return _pool; } public void setTestJob(TestJob job) { _testJob = job; } public void setExpireJob(Job job) { _expireJob = job; } + + public void setPairedTunnel(TunnelInfo tunnel) { _pairedTunnel = tunnel; } + public TunnelInfo getPairedTunnel() { return _pairedTunnel; } } diff --git a/router/java/src/net/i2p/router/tunnel/pool/RequestTunnelJob.java b/router/java/src/net/i2p/router/tunnel/pool/RequestTunnelJob.java deleted file mode 100644 index 07df695e2..000000000 --- a/router/java/src/net/i2p/router/tunnel/pool/RequestTunnelJob.java +++ /dev/null @@ -1,417 +0,0 @@ -package net.i2p.router.tunnel.pool; - -import java.util.HashSet; -import java.util.Set; - -import net.i2p.data.Certificate; -import net.i2p.data.DataHelper; -import net.i2p.data.Hash; -import net.i2p.data.PublicKey; -import net.i2p.data.RouterInfo; -import net.i2p.data.SessionKey; -import net.i2p.data.SessionTag; -import net.i2p.data.TunnelId; -import net.i2p.data.i2np.I2NPMessage; -import net.i2p.data.i2np.TunnelCreateMessage; -import net.i2p.data.i2np.TunnelCreateStatusMessage; - -import net.i2p.router.Job; -import net.i2p.router.JobImpl; -import net.i2p.router.MessageSelector; -import net.i2p.router.RouterContext; -import net.i2p.router.ReplyJob; -import net.i2p.router.TunnelInfo; -import net.i2p.router.tunnel.TunnelCreatorConfig; -import net.i2p.router.tunnel.HopConfig; -import net.i2p.router.peermanager.TunnelHistory; -import net.i2p.util.Log; - -/** - * queue up a job to request the endpoint to join the tunnel, which then - * requeues up another job for earlier hops, etc, until it reaches the - * gateway. after the gateway is confirmed, onCreated is fired. - * - */ -public class RequestTunnelJob extends JobImpl { - private Log _log; - private Job _onCreated; - private Job _onFailed; - private int _currentHop; - private RouterInfo _currentPeer; - private HopConfig _currentConfig; - private int _lookups; - private TunnelCreatorConfig _config; - private long _lastSendTime; - private boolean _isFake; - private boolean _isExploratory; - - static final int HOP_REQUEST_TIMEOUT_CLIENT = 15*1000; - static final int HOP_REQUEST_TIMEOUT_EXPLORATORY = 10*1000; - private static final int LOOKUP_TIMEOUT = 5*1000; - - public RequestTunnelJob(RouterContext ctx, TunnelCreatorConfig cfg, Job onCreated, Job onFailed, int hop, boolean isFake, boolean isExploratory) { - super(ctx); - _log = ctx.logManager().getLog(RequestTunnelJob.class); - _config = cfg; - _onCreated = onCreated; - _onFailed = onFailed; - _currentHop = hop; - _currentPeer = null; - _lookups = 0; - _lastSendTime = 0; - _isFake = isFake || (cfg.getLength() <= 1); - _isExploratory = isExploratory; - - ctx.statManager().createRateStat("tunnel.receiveRejectionProbabalistic", "How often we are rejected probabalistically?", "Tunnels", new long[] { 10*60*1000l, 60*60*1000l, 24*60*60*1000l }); - ctx.statManager().createRateStat("tunnel.receiveRejectionTransient", "How often we are rejected due to transient overload?", "Tunnels", new long[] { 10*60*1000l, 60*60*1000l, 24*60*60*1000l }); - ctx.statManager().createRateStat("tunnel.receiveRejectionBandwidth", "How often we are rejected due to bandwidth overload?", "Tunnels", new long[] { 10*60*1000l, 60*60*1000l, 24*60*60*1000l }); - ctx.statManager().createRateStat("tunnel.receiveRejectionCritical", "How often we are rejected due to critical failure?", "Tunnels", new long[] { 10*60*1000l, 60*60*1000l, 24*60*60*1000l }); - ctx.statManager().createRateStat("tunnel.buildFailure", "What hop was being requested when a nonexploratory tunnel request failed?", "Tunnels", new long[] { 10*60*1000l, 60*60*1000l, 24*60*60*1000l }); - ctx.statManager().createRateStat("tunnel.buildExploratoryFailure", "What hop was being requested when an exploratory tunnel request failed?", "Tunnels", new long[] { 10*60*1000l, 60*60*1000l, 24*60*60*1000l }); - ctx.statManager().createRateStat("tunnel.buildExploratoryFailure1Hop", "What hop was being requested when a 1 hop exploratory tunnel request failed?", "Tunnels", new long[] { 10*60*1000l, 60*60*1000l, 24*60*60*1000l }); - ctx.statManager().createRateStat("tunnel.buildExploratoryFailure2Hop", "What hop was being requested when a 2 hop exploratory tunnel request failed?", "Tunnels", new long[] { 10*60*1000l, 60*60*1000l, 24*60*60*1000l }); - ctx.statManager().createRateStat("tunnel.buildExploratoryFailure3Hop", "What hop was being requested when a 3 hop exploratory tunnel request failed?", "Tunnels", new long[] { 10*60*1000l, 60*60*1000l, 24*60*60*1000l }); - ctx.statManager().createRateStat("tunnel.buildSuccess", "How often we succeed building a non-exploratory tunnel?", "Tunnels", new long[] { 10*60*1000l, 60*60*1000l, 24*60*60*1000l }); - ctx.statManager().createRateStat("tunnel.buildExploratorySuccess", "How often we succeed building an exploratory tunnel?", "Tunnels", new long[] { 10*60*1000l, 60*60*1000l, 24*60*60*1000l }); - ctx.statManager().createRateStat("tunnel.buildExploratorySuccess1Hop", "How often we succeed building a 1 hop exploratory tunnel?", "Tunnels", new long[] { 10*60*1000l, 60*60*1000l, 24*60*60*1000l }); - ctx.statManager().createRateStat("tunnel.buildExploratorySuccess2Hop", "How often we succeed building a 2 hop exploratory tunnel?", "Tunnels", new long[] { 10*60*1000l, 60*60*1000l, 24*60*60*1000l }); - ctx.statManager().createRateStat("tunnel.buildExploratorySuccess3Hop", "How often we succeed building a 3 hop exploratory tunnel?", "Tunnels", new long[] { 10*60*1000l, 60*60*1000l, 24*60*60*1000l }); - ctx.statManager().createRateStat("tunnel.buildPartialTime", "How long a non-exploratory request took to be accepted?", "Tunnels", new long[] { 10*60*1000l, 60*60*1000l, 24*60*60*1000l }); - ctx.statManager().createRateStat("tunnel.buildExploratoryPartialTime", "How long an exploratory request took to be accepted?", "Tunnels", new long[] { 10*60*1000l, 60*60*1000l, 24*60*60*1000l }); - ctx.statManager().createRateStat("tunnel.buildExploratoryTimeout", "How often a request for an exploratory tunnel's peer times out?", "Tunnels", new long[] { 10*60*1000l, 60*60*1000l, 24*60*60*1000l }); - ctx.statManager().createRateStat("tunnel.buildClientTimeout", "How often a request for a client tunnel's peer times out?", "Tunnels", new long[] { 10*60*1000l, 60*60*1000l, 24*60*60*1000l }); - - if (_log.shouldLog(Log.DEBUG)) - _log.debug("Requesting hop " + hop + " in " + cfg); - if (hop < 0) - throw new IllegalArgumentException("invalid endpoint hop [" + hop + "] cfg: " + cfg); - } - - public String getName() { return "Request tunnel participation"; } - public void runJob() { - _currentConfig = _config.getConfig(_currentHop); - Hash peer = _config.getPeer(_currentHop); - if (getContext().routerHash().equals(peer)) { - requestSelf(); - } else { - if (_currentPeer == null) { - _currentPeer = getContext().netDb().lookupRouterInfoLocally(peer); - if (_currentPeer == null) { - _lookups++; - if (_lookups > 1) { - peerFail(0); - return; - } - getContext().netDb().lookupRouterInfo(peer, this, this, LOOKUP_TIMEOUT); - return; - } - } - requestRemote(peer); - } - } - - private void requestSelf() { - if (_config.isInbound()) { - // inbound tunnel, which means we are the first person asked, and if - // it is a zero hop tunnel, then we are also the last person asked - - long id = getContext().random().nextLong(TunnelId.MAX_ID_VALUE-1) + 1; - _currentConfig.setReceiveTunnelId(DataHelper.toLong(4, id)); - if (_config.getLength() > 1) { - if (_log.shouldLog(Log.DEBUG)) - _log.debug("Requesting ourselves to join an inbound tunnel, receiving on " - + _currentConfig.getReceiveTunnel() + ": " + _config); - // inbound tunnel with more than just ourselves - RequestTunnelJob req = new RequestTunnelJob(getContext(), _config, _onCreated, - _onFailed, _currentHop - 1, _isFake, _isExploratory); - if (_isFake) - req.runJob(); - else - getContext().jobQueue().addJob(req); - } else if (_onCreated != null) { - if (_log.shouldLog(Log.DEBUG)) - _log.debug("Requesting ourselves to join a 0 hop inbound tunnel, receiving on " - + _currentConfig.getReceiveTunnel() + ": " + _config); - // 0 hop inbound tunnel - if (_onCreated != null) { - if (_isFake) - _onCreated.runJob(); - else - getContext().jobQueue().addJob(_onCreated); - } - //getContext().statManager().addRateData("tunnel.buildSuccess", 1, 0); - } - } else { - // outbound tunnel, we're the gateway and hence the last person asked - - if (_config.getLength() <= 1) { - // pick a random tunnelId which we "send" on - byte id[] = new byte[4]; - getContext().random().nextBytes(id); - _config.getConfig(0).setSendTunnelId(id); - if (_log.shouldLog(Log.DEBUG)) - _log.debug("Requesting ourselves to join an outbound tunnel, sending on " - + _config.getConfig(0).getSendTunnel() + ": " + _config); - } else { - if (_log.shouldLog(Log.DEBUG)) - _log.debug("Requesting ourselves to join an outbound tunnel, sending on " - + _config.getConfig(1).getReceiveTunnel() + ": " + _config); - // send to whatever the first remote hop receives on - _config.getConfig(0).setSendTunnelId(_config.getConfig(1).getReceiveTunnelId()); - if (_config.getConfig(0).getSendTunnelId() == null) { - _log.error("wtf, next hop: " + _config.getConfig(1) - + " didn't give us a tunnel to send to, but they passed on to us?"); - if (_onFailed != null) { - if (_isFake) - _onFailed.runJob(); - else - getContext().jobQueue().addJob(_onFailed); - } - return; - } - - } - // we are the outbound gateway, which is the last hop which is - // asked to participate in the tunnel. as such, fire off the - // onCreated immediately - if (_onCreated != null) { - if (_isFake) - _onCreated.runJob(); - else - getContext().jobQueue().addJob(_onCreated); - if (_config.getLength() > 1) - getContext().statManager().addRateData("tunnel.buildSuccess", 1, 0); - } - } - } - - private void requestRemote(Hash peer) { - HopConfig nextHop = (_config.getLength() > _currentHop + 1 ? _config.getConfig(_currentHop+1) : null); - Hash nextRouter = (nextHop != null ? _config.getPeer(_currentHop+1) : null); - TunnelId nextTunnel = (nextHop != null ? nextHop.getReceiveTunnel() : null); - - TunnelInfo replyTunnel = getContext().tunnelManager().selectInboundTunnel(); - if (replyTunnel == null) { - if (_log.shouldLog(Log.WARN)) - _log.warn("No inbound tunnels to build tunnels with!"); - tunnelFail(); - return; - } - Hash replyGateway = replyTunnel.getPeer(0); - - SessionKey replyKey = getContext().keyGenerator().generateSessionKey(); - SessionTag replyTag = new SessionTag(true); - - TunnelCreateMessage msg = new TunnelCreateMessage(getContext()); - msg.setCertificate(new Certificate(Certificate.CERTIFICATE_TYPE_NULL, null)); - msg.setIVKey(_currentConfig.getIVKey()); - msg.setLayerKey(_currentConfig.getLayerKey()); - msg.setNonce(getContext().random().nextLong(TunnelCreateMessage.MAX_NONCE_VALUE)); - msg.setNextRouter(nextRouter); - msg.setNextTunnelId(nextTunnel); - msg.setReplyGateway(replyGateway); - msg.setReplyTunnel(replyTunnel.getReceiveTunnelId(0)); - msg.setReplyKey(replyKey); - msg.setReplyTag(replyTag); - int duration = 10*60; // (int)((_config.getExpiration() - getContext().clock().now())/1000); - msg.setDurationSeconds(duration); - long now = getContext().clock().now(); - if (_isExploratory) - msg.setMessageExpiration(now + HOP_REQUEST_TIMEOUT_EXPLORATORY); - else - msg.setMessageExpiration(now + HOP_REQUEST_TIMEOUT_CLIENT); - if (_currentHop == 0) - msg.setIsGateway(true); - else - msg.setIsGateway(false); - - if (_log.shouldLog(Log.DEBUG)) - _log.debug("** Send remote request to " + peer.toBase64().substring(0,4) + " using nonce " - + msg.getNonce() + " with replies on " + replyTunnel); - - // now make sure we will decrypt the reply properly - HashSet sessionTags = new HashSet(1); - sessionTags.add(replyTag); - getContext().sessionKeyManager().tagsReceived(replyKey, sessionTags); - - HashSet sentTags = new HashSet(); - SessionKey sentKey = new SessionKey(); - ReplySelector selector = new ReplySelector(msg.getNonce()); - ReplyJob onReply = new RequestReplyJob(getContext(), sentKey, sentTags); - Job onTimeout = new RequestTimeoutJob(getContext(), msg.getNonce()); - Job j = new SendGarlicMessageJob(getContext(), msg, _currentPeer, selector, onReply, onTimeout, sentKey, sentTags); - getContext().jobQueue().addJob(j); - _lastSendTime = getContext().clock().now(); - } - - private void peerFail(int howBad) { - if (howBad > 0) { - switch (howBad) { - case TunnelHistory.TUNNEL_REJECT_CRIT: - getContext().statManager().addRateData("tunnel.receiveRejectionCritical", 1, 0); - break; - case TunnelHistory.TUNNEL_REJECT_BANDWIDTH: - getContext().statManager().addRateData("tunnel.receiveRejectionBandwidth", 1, 0); - break; - case TunnelHistory.TUNNEL_REJECT_TRANSIENT_OVERLOAD: - getContext().statManager().addRateData("tunnel.receiveRejectionTransient", 1, 0); - break; - case TunnelHistory.TUNNEL_REJECT_PROBABALISTIC_REJECT: - getContext().statManager().addRateData("tunnel.receiveRejectionProbabalistic", 1, 0); - break; - default: - // ignore - } - - if (_currentPeer != null) - // penalize peer based on their bitchiness level - getContext().profileManager().tunnelRejected(_currentPeer.getIdentity().calculateHash(), - getContext().clock().now() - _lastSendTime, - howBad); - } - if (_log.shouldLog(Log.INFO)) - _log.info("Tunnel request failed w/ cause=" + howBad + " for peer " - + (_currentPeer == null ? "[unknown]" : - _currentPeer.getIdentity().calculateHash().toBase64().substring(0,4))); - tunnelFail(); - } - - private void tunnelFail() { - if (_log.shouldLog(Log.INFO)) - _log.info("tunnel building failed: " + _config + " at hop " + _currentHop); - if (_onFailed != null) - getContext().jobQueue().addJob(_onFailed); - if (_isExploratory) { - int i = _config.getLength(); - getContext().statManager().addRateData("tunnel.buildExploratoryFailure", _currentHop, i); - if (i == 2) - getContext().statManager().addRateData("tunnel.buildExploratoryFailure1Hop", _currentHop, i); - else if (i == 3) - getContext().statManager().addRateData("tunnel.buildExploratoryFailure2Hop", _currentHop, i); - else if (i == 4) - getContext().statManager().addRateData("tunnel.buildExploratoryFailure3Hop", _currentHop, i); - } else - getContext().statManager().addRateData("tunnel.buildFailure", _currentHop, _config.getLength()); - } - - private void peerSuccess() { - long now = getContext().clock().now(); - getContext().profileManager().tunnelJoined(_currentPeer.getIdentity().calculateHash(), - now - _lastSendTime); - if (_isExploratory) - getContext().statManager().addRateData("tunnel.buildExploratoryPartialTime", now - _lastSendTime, 0); - else - getContext().statManager().addRateData("tunnel.buildPartialTime", now - _lastSendTime, 0); - - if (_currentHop > 0) { - RequestTunnelJob j = new RequestTunnelJob(getContext(), _config, _onCreated, _onFailed, _currentHop - 1, _isFake, _isExploratory); - getContext().jobQueue().addJob(j); - } else { - if (_onCreated != null) - getContext().jobQueue().addJob(_onCreated); - if (_isExploratory) { - int i = _config.getLength(); - if (i > 1) - getContext().statManager().addRateData("tunnel.buildExploratorySuccess", 1, 0); - if (i == 2) - getContext().statManager().addRateData("tunnel.buildExploratorySuccess1Hop", 1, 0); - else if (i == 3) - getContext().statManager().addRateData("tunnel.buildExploratorySuccess2Hop", 1, 0); - else if (i == 4) - getContext().statManager().addRateData("tunnel.buildExploratorySuccess3Hop", 1, 0); - } else { - if (_config.getLength() > 1) - getContext().statManager().addRateData("tunnel.buildSuccess", 1, 0); - } - } - } - - private class RequestReplyJob extends JobImpl implements ReplyJob { - private SessionKey _sentKey; - private Set _sentTags; - private TunnelCreateStatusMessage _reply; - - public RequestReplyJob(RouterContext ctx, SessionKey sentKey, Set sentTags) { - super(ctx); - _sentKey = sentKey; - _sentTags = sentTags; - } - public String getName() { return "handle tunnel request reply"; } - public void runJob() { - if (_log.shouldLog(Log.DEBUG)) - _log.debug("reply received: " + _config + " at hop " + _currentHop + ": " + _reply.getStatus()); - - if (_sentTags.size() > 0) { - PublicKey target = _currentPeer.getIdentity().getPublicKey(); - getContext().sessionKeyManager().tagsDelivered(target, _sentKey, _sentTags); - } - - if (_reply.getStatus() == TunnelCreateStatusMessage.STATUS_SUCCESS) { - _currentConfig.setReceiveTunnelId(_reply.getReceiveTunnelId()); - if (_currentHop >= 1) - _config.getConfig(_currentHop-1).setSendTunnelId(_currentConfig.getReceiveTunnelId()); - peerSuccess(); - } else { - peerFail(_reply.getStatus()); - } - } - - public void setMessage(I2NPMessage message) { _reply = (TunnelCreateStatusMessage)message; } - } - - private class RequestTimeoutJob extends JobImpl { - private long _nonce; - public RequestTimeoutJob(RouterContext ctx, long nonce) { - super(ctx); - _nonce = nonce; - } - public String getName() { return "tunnel request timeout"; } - public void runJob() { - if (_log.shouldLog(Log.WARN)) - _log.warn("request timeout: " + _config + " at hop " + _currentHop - + " with nonce " + _nonce); - if (_isExploratory) - getContext().statManager().addRateData("tunnel.buildExploratoryTimeout", 1, 0); - else - getContext().statManager().addRateData("tunnel.buildClientTimeout", 1, 0); - peerFail(0); - } - } - - private class ReplySelector implements MessageSelector { - private long _nonce; - private boolean _nonceFound; - private long _expiration; - - public ReplySelector(long nonce) { - _nonce = nonce; - _nonceFound = false; - _expiration = getContext().clock().now() + (_isExploratory ? HOP_REQUEST_TIMEOUT_EXPLORATORY : HOP_REQUEST_TIMEOUT_CLIENT); - } - public boolean continueMatching() { - return (!_nonceFound) && (getContext().clock().now() < _expiration); - } - - public long getExpiration() { return _expiration; } - public boolean isMatch(I2NPMessage message) { - if (message instanceof TunnelCreateStatusMessage) { - if (_nonce == ((TunnelCreateStatusMessage)message).getNonce()) { - _nonceFound = true; - return true; - } - } - return false; - } - - public String toString() { - StringBuffer buf = new StringBuffer(64); - buf.append("request "); - buf.append(_currentPeer.getIdentity().calculateHash().toBase64().substring(0,4)); - buf.append(" to join ").append(_config); - buf.append(" (request expired "); - buf.append(DataHelper.formatDuration(_expiration-getContext().clock().now())); - buf.append(" ago)"); - return buf.toString(); - } - } -} diff --git a/router/java/src/net/i2p/router/tunnel/pool/SendGarlicMessageJob.java b/router/java/src/net/i2p/router/tunnel/pool/SendGarlicMessageJob.java index dc37aff36..9b8c3ebbb 100644 --- a/router/java/src/net/i2p/router/tunnel/pool/SendGarlicMessageJob.java +++ b/router/java/src/net/i2p/router/tunnel/pool/SendGarlicMessageJob.java @@ -35,6 +35,9 @@ class SendGarlicMessageJob extends JobImpl { private SessionKey _sentKey; private Set _sentTags; + /** only elGamal the message, never use session tags */ + private static final boolean FORCE_ELGAMAL = false; + public SendGarlicMessageJob(RouterContext ctx, I2NPMessage payload, RouterInfo target, MessageSelector selector, ReplyJob onReply, Job onTimeout, SessionKey sentKey, Set sentTags) { super(ctx); _log = ctx.logManager().getLog(SendGarlicMessageJob.class); @@ -62,7 +65,11 @@ class SendGarlicMessageJob extends JobImpl { payload.setExpiration(_payload.getMessageExpiration()); int timeout = (int)(payload.getExpiration() - getContext().clock().now()); - GarlicMessage msg = GarlicMessageBuilder.buildMessage(getContext(), payload, _sentKey, _sentTags); + GarlicMessage msg = null; + if (FORCE_ELGAMAL) + msg = GarlicMessageBuilder.buildMessage(getContext(), payload, _sentKey, _sentTags, 0, true); + else + msg = GarlicMessageBuilder.buildMessage(getContext(), payload, _sentKey, _sentTags); // so we will look for the reply OutNetMessage dummyMessage = getContext().messageRegistry().registerPending(_replySelector, _onReply, _onTimeout, timeout); diff --git a/router/java/src/net/i2p/router/tunnel/pool/TestJob.java b/router/java/src/net/i2p/router/tunnel/pool/TestJob.java index 631a51471..0728ee4b3 100644 --- a/router/java/src/net/i2p/router/tunnel/pool/TestJob.java +++ b/router/java/src/net/i2p/router/tunnel/pool/TestJob.java @@ -29,13 +29,17 @@ class TestJob extends JobImpl { private TunnelInfo _replyTunnel; /** base to randomize the test delay on */ - private static final int TEST_DELAY = 60*1000; + private static final int TEST_DELAY = 30*1000; public TestJob(RouterContext ctx, PooledTunnelCreatorConfig cfg, TunnelPool pool) { super(ctx); _log = ctx.logManager().getLog(TestJob.class); _pool = pool; _cfg = cfg; + if (_pool == null) + _pool = cfg.getTunnelPool(); + if ( (_pool == null) && (_log.shouldLog(Log.ERROR)) ) + _log.error("Invalid tunnel test configuration: no pool for " + cfg, new Exception("origin")); getTiming().setStartAfter(getDelay() + ctx.clock().now()); ctx.statManager().createRateStat("tunnel.testFailedTime", "How long did the failure take (max of 60s for full timeout)?", "Tunnels", new long[] { 10*60*1000l, 60*60*1000l, 3*60*60*1000l, 24*60*60*1000l }); @@ -54,6 +58,8 @@ class TestJob extends JobImpl { } public String getName() { return "Test tunnel"; } public void runJob() { + if (_pool == null) + return; long lag = getContext().jobQueue().getMaxLag(); if (lag > 3000) { if (_log.shouldLog(Log.WARN)) @@ -65,6 +71,8 @@ class TestJob extends JobImpl { _found = false; // note: testing with exploratory tunnels always, even if the tested tunnel // is a client tunnel (per _cfg.getDestination()) + // should we test with the tunnel that we exposed the creation with? + // (accessible as _cfg.getPairedTunnel()) _replyTunnel = null; _outTunnel = null; if (_cfg.isInbound()) { @@ -85,64 +93,61 @@ class TestJob extends JobImpl { long testExpiration = getContext().clock().now() + testPeriod; DeliveryStatusMessage m = new DeliveryStatusMessage(getContext()); m.setArrival(getContext().clock().now()); - m.setMessageExpiration(testExpiration+2*testPeriod); + m.setMessageExpiration(testExpiration); m.setMessageId(getContext().random().nextLong(I2NPMessage.MAX_ID_VALUE)); - // keep an eye out for the message even after we fail the tunnel for another 40s - ReplySelector sel = new ReplySelector(getContext(), m.getMessageId(), testExpiration + 2*testPeriod); + ReplySelector sel = new ReplySelector(getContext(), m.getMessageId(), testExpiration); OnTestReply onReply = new OnTestReply(getContext()); OnTestTimeout onTimeout = new OnTestTimeout(getContext()); - OutNetMessage msg = getContext().messageRegistry().registerPending(sel, onReply, onTimeout, 3*testPeriod); + OutNetMessage msg = getContext().messageRegistry().registerPending(sel, onReply, onTimeout, testPeriod); onReply.setSentMessage(msg); sendTest(m); } } private void sendTest(I2NPMessage m) { - if (false) { - getContext().tunnelDispatcher().dispatchOutbound(m, _outTunnel.getSendTunnelId(0), - _replyTunnel.getReceiveTunnelId(0), - _replyTunnel.getPeer(0)); - } else { - // garlic route that DeliveryStatusMessage to ourselves so the endpoints and gateways - // can't tell its a test. to simplify this, we encrypt it with a random key and tag, - // remembering that key+tag so that we can decrypt it later. this means we can do the - // garlic encryption without any ElGamal (yay) - DeliveryInstructions instructions = new DeliveryInstructions(); - instructions.setDeliveryMode(DeliveryInstructions.DELIVERY_MODE_LOCAL); + // garlic route that DeliveryStatusMessage to ourselves so the endpoints and gateways + // can't tell its a test. to simplify this, we encrypt it with a random key and tag, + // remembering that key+tag so that we can decrypt it later. this means we can do the + // garlic encryption without any ElGamal (yay) + DeliveryInstructions instructions = new DeliveryInstructions(); + instructions.setDeliveryMode(DeliveryInstructions.DELIVERY_MODE_LOCAL); - PayloadGarlicConfig payload = new PayloadGarlicConfig(); - payload.setCertificate(new Certificate(Certificate.CERTIFICATE_TYPE_NULL, null)); - payload.setId(getContext().random().nextLong(I2NPMessage.MAX_ID_VALUE)); - payload.setPayload(m); - payload.setRecipient(getContext().router().getRouterInfo()); - payload.setDeliveryInstructions(instructions); - payload.setRequestAck(false); - payload.setExpiration(m.getMessageExpiration()); + PayloadGarlicConfig payload = new PayloadGarlicConfig(); + payload.setCertificate(new Certificate(Certificate.CERTIFICATE_TYPE_NULL, null)); + payload.setId(getContext().random().nextLong(I2NPMessage.MAX_ID_VALUE)); + payload.setPayload(m); + payload.setRecipient(getContext().router().getRouterInfo()); + payload.setDeliveryInstructions(instructions); + payload.setRequestAck(false); + payload.setExpiration(m.getMessageExpiration()); - SessionKey encryptKey = getContext().keyGenerator().generateSessionKey(); - SessionTag encryptTag = new SessionTag(true); - SessionKey sentKey = new SessionKey(); - Set sentTags = null; - GarlicMessage msg = GarlicMessageBuilder.buildMessage(getContext(), payload, sentKey, sentTags, - getContext().keyManager().getPublicKey(), - encryptKey, encryptTag); + SessionKey encryptKey = getContext().keyGenerator().generateSessionKey(); + SessionTag encryptTag = new SessionTag(true); + SessionKey sentKey = new SessionKey(); + Set sentTags = null; + GarlicMessage msg = GarlicMessageBuilder.buildMessage(getContext(), payload, sentKey, sentTags, + getContext().keyManager().getPublicKey(), + encryptKey, encryptTag); - Set encryptTags = new HashSet(1); - encryptTags.add(encryptTag); - getContext().sessionKeyManager().tagsReceived(encryptKey, encryptTags); - - if (_log.shouldLog(Log.DEBUG)) - _log.debug("Sending garlic test of " + _outTunnel + " / " + _replyTunnel); - getContext().tunnelDispatcher().dispatchOutbound(msg, _outTunnel.getSendTunnelId(0), - _replyTunnel.getReceiveTunnelId(0), - _replyTunnel.getPeer(0)); - } + Set encryptTags = new HashSet(1); + encryptTags.add(encryptTag); + getContext().sessionKeyManager().tagsReceived(encryptKey, encryptTags); + + if (_log.shouldLog(Log.DEBUG)) + _log.debug("Sending garlic test of " + _outTunnel + " / " + _replyTunnel); + getContext().tunnelDispatcher().dispatchOutbound(msg, _outTunnel.getSendTunnelId(0), + _replyTunnel.getReceiveTunnelId(0), + _replyTunnel.getPeer(0)); } public void testSuccessful(int ms) { getContext().statManager().addRateData("tunnel.testSuccessLength", _cfg.getLength(), 0); getContext().statManager().addRateData("tunnel.testSuccessTime", ms, 0); + _outTunnel.incrementVerifiedBytesTransferred(1024); + // reply tunnel is marked in the inboundEndpointProcessor + //_replyTunnel.incrementVerifiedBytesTransferred(1024); + noteSuccess(ms, _outTunnel); noteSuccess(ms, _replyTunnel); @@ -169,7 +174,7 @@ class TestJob extends JobImpl { _log.warn("Tunnel test failed in " + timeToFail + "ms: " + _cfg); boolean keepGoing = _cfg.tunnelFailed(); if (keepGoing) { - scheduleRetest(); + scheduleRetest(true); } else { if (_pool.getSettings().isExploratory()) getContext().statManager().addRateData("tunnel.testExploratoryFailedCompletelyTime", timeToFail, timeToFail); @@ -182,12 +187,17 @@ class TestJob extends JobImpl { private int getDelay() { return TEST_DELAY + getContext().random().nextInt(TEST_DELAY); } /** how long we allow tests to run for before failing them */ private int getTestPeriod() { return 20*1000; } - private void scheduleRetest() { + private void scheduleRetest() { scheduleRetest(false); } + private void scheduleRetest(boolean asap) { _outTunnel = null; _replyTunnel = null; - int delay = getDelay(); - if (_cfg.getExpiration() > getContext().clock().now() + delay + (3 * getTestPeriod()) + 30*1000) - requeue(delay); + if (asap) { + requeue(getContext().random().nextInt(TEST_DELAY)); + } else { + int delay = getDelay(); + if (_cfg.getExpiration() > getContext().clock().now() + delay + (3 * getTestPeriod())) + requeue(delay); + } } private class ReplySelector implements MessageSelector { diff --git a/router/java/src/net/i2p/router/tunnel/pool/TunnelPool.java b/router/java/src/net/i2p/router/tunnel/pool/TunnelPool.java index 225f7c78a..5d7348558 100644 --- a/router/java/src/net/i2p/router/tunnel/pool/TunnelPool.java +++ b/router/java/src/net/i2p/router/tunnel/pool/TunnelPool.java @@ -350,7 +350,7 @@ public class TunnelPool { * */ private LeaseSet locked_buildNewLeaseSet() { - long expireAfter = _context.clock().now() + _settings.getRebuildPeriod(); + long expireAfter = _context.clock().now(); // + _settings.getRebuildPeriod(); LeaseSet ls = new LeaseSet(); for (int i = 0; i < _tunnels.size(); i++) { @@ -406,6 +406,10 @@ public class TunnelPool { * */ public int countHowManyToBuild() { + if (_settings.getDestination() != null) { + if (!_context.clientManager().isLocal(_settings.getDestination())) + return 0; + } int wanted = getSettings().getBackupQuantity() + getSettings().getQuantity(); boolean allowZeroHop = ((getSettings().getLength() + getSettings().getLengthVariance()) <= 0); @@ -612,6 +616,8 @@ public class TunnelPool { private List _inProgress = new ArrayList(); void buildComplete(PooledTunnelCreatorConfig cfg) { synchronized (_inProgress) { _inProgress.remove(cfg); } + cfg.setTunnelPool(this); + //_manager.buildComplete(cfg); } public String toString() { diff --git a/router/java/src/net/i2p/router/tunnel/pool/TunnelPoolManager.java b/router/java/src/net/i2p/router/tunnel/pool/TunnelPoolManager.java index 9c5ab345e..e63d4f476 100644 --- a/router/java/src/net/i2p/router/tunnel/pool/TunnelPoolManager.java +++ b/router/java/src/net/i2p/router/tunnel/pool/TunnelPoolManager.java @@ -12,16 +12,9 @@ import net.i2p.data.DataHelper; import net.i2p.data.Destination; import net.i2p.data.Hash; import net.i2p.data.TunnelId; -import net.i2p.data.i2np.TunnelCreateMessage; +import net.i2p.data.i2np.*; import net.i2p.stat.RateStat; -import net.i2p.router.ClientTunnelSettings; -import net.i2p.router.HandlerJobBuilder; -import net.i2p.router.JobImpl; -import net.i2p.router.LoadTestManager; -import net.i2p.router.RouterContext; -import net.i2p.router.TunnelInfo; -import net.i2p.router.TunnelManagerFacade; -import net.i2p.router.TunnelPoolSettings; +import net.i2p.router.*; import net.i2p.router.tunnel.HopConfig; import net.i2p.router.tunnel.TunnelCreatorConfig; import net.i2p.util.I2PThread; @@ -47,8 +40,8 @@ public class TunnelPoolManager implements TunnelManagerFacade { _context = ctx; _log = ctx.logManager().getLog(TunnelPoolManager.class); - HandlerJobBuilder builder = new HandleTunnelCreateMessageJob.Builder(ctx); - ctx.inNetMessagePool().registerHandlerJobBuilder(TunnelCreateMessage.MESSAGE_TYPE, builder); + //HandlerJobBuilder builder = new HandleTunnelCreateMessageJob.Builder(ctx); + //ctx.inNetMessagePool().registerHandlerJobBuilder(TunnelCreateMessage.MESSAGE_TYPE, builder); //HandlerJobBuilder b = new TunnelMessageHandlerBuilder(ctx); //ctx.inNetMessagePool().registerHandlerJobBuilder(TunnelGatewayMessage.MESSAGE_TYPE, b); //ctx.inNetMessagePool().registerHandlerJobBuilder(TunnelDataMessage.MESSAGE_TYPE, b); @@ -300,9 +293,34 @@ public class TunnelPoolManager implements TunnelManagerFacade { outbound.shutdown(); } - void buildComplete(TunnelCreatorConfig cfg) { + void buildComplete(PooledTunnelCreatorConfig cfg) { buildComplete(); _loadTestManager.addTunnelTestCandidate(cfg); + if (cfg.getLength() > 1) { + TunnelPool pool = cfg.getTunnelPool(); + if (pool == null) { + _log.error("How does this not have a pool? " + cfg, new Exception("baf")); + if (cfg.getDestination() != null) { + if (cfg.isInbound()) { + synchronized (_clientInboundPools) { + pool = (TunnelPool)_clientInboundPools.get(cfg.getDestination()); + } + } else { + synchronized (_clientOutboundPools) { + pool = (TunnelPool)_clientOutboundPools.get(cfg.getDestination()); + } + } + } else { + if (cfg.isInbound()) { + pool = _inboundExploratory; + } else { + pool = _outboundExploratory; + } + } + cfg.setTunnelPool(pool); + } + _context.jobQueue().addJob(new TestJob(_context, cfg, pool)); + } } void buildComplete() {} @@ -333,10 +351,6 @@ public class TunnelPoolManager implements TunnelManagerFacade { // try to build up longer tunnels _context.jobQueue().addJob(new BootstrapPool(_context, _inboundExploratory)); _context.jobQueue().addJob(new BootstrapPool(_context, _outboundExploratory)); - - if (Boolean.valueOf(_context.getProperty(PROP_LOAD_TEST, "true")).booleanValue()) { - _context.jobQueue().addJob(_loadTestManager.getTestJob()); - } } private class BootstrapPool extends JobImpl {