forked from I2P_Developers/i2p.i2p
propagate from branch 'i2p.i2p.unittests' (head 0c5ea65761d9127f160bccb3d1d157f8947ca050)
to branch 'i2p.i2p' (head e36d5669f32ad1a0f66ab84f7f9ff8fa2937680b)
This commit is contained in:
@@ -16,7 +16,7 @@ package net.i2p;
|
||||
public class CoreVersion {
|
||||
/** deprecated */
|
||||
public final static String ID = "Monotone";
|
||||
public final static String VERSION = "0.8.13";
|
||||
public final static String VERSION = "0.9.1";
|
||||
|
||||
public static void main(String args[]) {
|
||||
System.out.println("I2P Core version: " + VERSION);
|
||||
|
@@ -38,6 +38,9 @@ public interface I2PSession {
|
||||
/** Send a new message to the given destination, containing the specified
|
||||
* payload, returning true if the router feels confident that the message
|
||||
* was delivered.
|
||||
*
|
||||
* WARNING: It is recommended that you use a method that specifies the protocol and ports.
|
||||
*
|
||||
* @param dest location to send the message
|
||||
* @param payload body of the message to be sent (unencrypted)
|
||||
* @return whether it was accepted by the router for delivery or not
|
||||
@@ -149,6 +152,9 @@ public interface I2PSession {
|
||||
public void reportAbuse(int msgId, int severity) throws I2PSessionException;
|
||||
|
||||
/** Instruct the I2PSession where it should send event notifications
|
||||
*
|
||||
* WARNING: It is recommended that you use a method that specifies the protocol and ports.
|
||||
*
|
||||
* @param lsnr listener to retrieve events
|
||||
*/
|
||||
public void setSessionListener(I2PSessionListener lsnr);
|
||||
|
@@ -43,7 +43,8 @@ public class I2PSessionDemultiplexer implements I2PSessionMuxedListener {
|
||||
if (_log.shouldLog(Log.WARN))
|
||||
_log.warn("No listeners for incoming message");
|
||||
} else {
|
||||
_log.error("No listener found for proto: " + proto + " port: " + toport + " msg id: " + msgId +
|
||||
if (_log.shouldLog(Log.WARN))
|
||||
_log.warn("No listener found for proto: " + proto + " port: " + toport + " msg id: " + msgId +
|
||||
" from pool of " + _listeners.size() + " listeners");
|
||||
}
|
||||
try {
|
||||
|
@@ -54,7 +54,7 @@ import net.i2p.util.SimpleTimer;
|
||||
* @author jrandom
|
||||
*/
|
||||
abstract class I2PSessionImpl implements I2PSession, I2CPMessageReader.I2CPMessageEventListener {
|
||||
protected Log _log;
|
||||
protected final Log _log;
|
||||
/** who we are */
|
||||
private Destination _myDestination;
|
||||
/** private key for decryption */
|
||||
@@ -104,16 +104,16 @@ abstract class I2PSessionImpl implements I2PSession, I2CPMessageReader.I2CPMessa
|
||||
protected I2PClientMessageHandlerMap _handlerMap;
|
||||
|
||||
/** used to seperate things out so we can get rid of singletons */
|
||||
protected I2PAppContext _context;
|
||||
protected final I2PAppContext _context;
|
||||
|
||||
/** monitor for waiting until a lease set has been granted */
|
||||
private final Object _leaseSetWait = new Object();
|
||||
|
||||
/** whether the session connection has already been closed (or not yet opened) */
|
||||
protected boolean _closed;
|
||||
protected volatile boolean _closed;
|
||||
|
||||
/** whether the session connection is in the process of being closed */
|
||||
protected boolean _closing;
|
||||
protected volatile boolean _closing;
|
||||
|
||||
/** have we received the current date from the router yet? */
|
||||
private boolean _dateReceived;
|
||||
@@ -121,7 +121,7 @@ abstract class I2PSessionImpl implements I2PSession, I2CPMessageReader.I2CPMessa
|
||||
private final Object _dateReceivedLock = new Object();
|
||||
|
||||
/** whether the session connection is in the process of being opened */
|
||||
protected boolean _opening;
|
||||
protected volatile boolean _opening;
|
||||
|
||||
/** monitor for waiting until opened */
|
||||
private final Object _openingWait = new Object();
|
||||
@@ -144,6 +144,8 @@ abstract class I2PSessionImpl implements I2PSession, I2CPMessageReader.I2CPMessa
|
||||
/** SSL interface (only) @since 0.8.3 */
|
||||
protected static final String PROP_ENABLE_SSL = "i2cp.SSL";
|
||||
|
||||
private static final long VERIFY_USAGE_TIME = 60*1000;
|
||||
|
||||
void dateUpdated() {
|
||||
_dateReceived = true;
|
||||
synchronized (_dateReceivedLock) {
|
||||
@@ -154,7 +156,14 @@ abstract class I2PSessionImpl implements I2PSession, I2CPMessageReader.I2CPMessa
|
||||
public static final int LISTEN_PORT = 7654;
|
||||
|
||||
/** for extension */
|
||||
public I2PSessionImpl() {}
|
||||
protected I2PSessionImpl(I2PAppContext context, Properties options) {
|
||||
_context = context;
|
||||
_log = context.logManager().getLog(getClass());
|
||||
_closed = true;
|
||||
if (options == null)
|
||||
options = System.getProperties();
|
||||
loadConfig(options);
|
||||
}
|
||||
|
||||
/**
|
||||
* Create a new session, reading the Destination, PrivateKey, and SigningPrivateKey
|
||||
@@ -166,12 +175,8 @@ abstract class I2PSessionImpl implements I2PSession, I2CPMessageReader.I2CPMessa
|
||||
* @throws I2PSessionException if there is a problem loading the private keys or
|
||||
*/
|
||||
public I2PSessionImpl(I2PAppContext context, InputStream destKeyStream, Properties options) throws I2PSessionException {
|
||||
_context = context;
|
||||
_log = context.logManager().getLog(I2PSessionImpl.class);
|
||||
this(context, options);
|
||||
_handlerMap = new I2PClientMessageHandlerMap(context);
|
||||
_closed = true;
|
||||
_opening = false;
|
||||
_closing = false;
|
||||
_producer = new I2CPMessageProducer(context);
|
||||
_availabilityNotifier = new AvailabilityNotifier();
|
||||
_availableMessages = new ConcurrentHashMap();
|
||||
@@ -182,18 +187,13 @@ abstract class I2PSessionImpl implements I2PSession, I2CPMessageReader.I2CPMessa
|
||||
} catch (IOException ioe) {
|
||||
throw new I2PSessionException("Error reading the destination key stream", ioe);
|
||||
}
|
||||
if (options == null)
|
||||
options = System.getProperties();
|
||||
loadConfig(options);
|
||||
_sessionId = null;
|
||||
_leaseSet = null;
|
||||
}
|
||||
|
||||
/**
|
||||
* Parse the config for anything we know about.
|
||||
* Also fill in the authorization properties if missing.
|
||||
*/
|
||||
protected void loadConfig(Properties options) {
|
||||
private void loadConfig(Properties options) {
|
||||
_options = new Properties();
|
||||
_options.putAll(filter(options));
|
||||
if (_context.isRouterContext()) {
|
||||
@@ -405,6 +405,7 @@ abstract class I2PSessionImpl implements I2PSession, I2CPMessageReader.I2CPMessa
|
||||
+ (connected - startConnect)
|
||||
+ "ms - ready to participate in the network!");
|
||||
startIdleMonitor();
|
||||
startVerifyUsage();
|
||||
setOpening(false);
|
||||
} catch (UnknownHostException uhe) {
|
||||
_closed = true;
|
||||
@@ -469,16 +470,38 @@ abstract class I2PSessionImpl implements I2PSession, I2CPMessageReader.I2CPMessa
|
||||
if (_log.shouldLog(Log.INFO))
|
||||
_log.info(getPrefix() + "Notified availability for session " + _sessionId + ", message " + id);
|
||||
}
|
||||
SimpleScheduler.getInstance().addEvent(new VerifyUsage(mid), 30*1000);
|
||||
}
|
||||
protected class VerifyUsage implements SimpleTimer.TimedEvent {
|
||||
private Long _msgId;
|
||||
public VerifyUsage(Long id) { _msgId = id; }
|
||||
|
||||
/**
|
||||
* Fire up a periodic task to check for unclamed messages
|
||||
* @since 0.9.1
|
||||
*/
|
||||
private void startVerifyUsage() {
|
||||
SimpleScheduler.getInstance().addEvent(new VerifyUsage(), VERIFY_USAGE_TIME);
|
||||
}
|
||||
|
||||
/**
|
||||
* Check for unclaimed messages, without wastefully setting a timer for each
|
||||
* message. Just copy all unclaimed ones and check 30 seconds later.
|
||||
*/
|
||||
private class VerifyUsage implements SimpleTimer.TimedEvent {
|
||||
private final List<Long> toCheck = new ArrayList();
|
||||
|
||||
public void timeReached() {
|
||||
MessagePayloadMessage removed = _availableMessages.remove(_msgId);
|
||||
if (removed != null && !isClosed())
|
||||
_log.error("Message NOT removed! id=" + _msgId + ": " + removed);
|
||||
if (isClosed())
|
||||
return;
|
||||
//if (_log.shouldLog(Log.DEBUG))
|
||||
// _log.debug(getPrefix() + " VerifyUsage of " + toCheck.size());
|
||||
if (!toCheck.isEmpty()) {
|
||||
for (Long msgId : toCheck) {
|
||||
MessagePayloadMessage removed = _availableMessages.remove(msgId);
|
||||
if (removed != null)
|
||||
_log.error("Message NOT removed! id=" + msgId + ": " + removed);
|
||||
}
|
||||
toCheck.clear();
|
||||
}
|
||||
toCheck.addAll(_availableMessages.keySet());
|
||||
SimpleScheduler.getInstance().addEvent(this, VERIFY_USAGE_TIME);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -561,7 +584,7 @@ abstract class I2PSessionImpl implements I2PSession, I2CPMessageReader.I2CPMessa
|
||||
} else {
|
||||
if (_log.shouldLog(Log.DEBUG))
|
||||
_log.debug(getPrefix() + "Message received of type " + message.getType()
|
||||
+ " to be handled by " + handler);
|
||||
+ " to be handled by " + handler.getClass().getSimpleName());
|
||||
handler.handleMessage(message, this);
|
||||
}
|
||||
}
|
||||
|
@@ -28,6 +28,8 @@ import net.i2p.util.Log;
|
||||
/**
|
||||
* Thread safe implementation of an I2P session running over TCP.
|
||||
*
|
||||
* Unused directly, see I2PSessionMuxedImpl extension.
|
||||
*
|
||||
* @author jrandom
|
||||
*/
|
||||
class I2PSessionImpl2 extends I2PSessionImpl {
|
||||
@@ -43,7 +45,9 @@ class I2PSessionImpl2 extends I2PSessionImpl {
|
||||
private boolean _noEffort;
|
||||
|
||||
/** for extension */
|
||||
public I2PSessionImpl2() {}
|
||||
protected I2PSessionImpl2(I2PAppContext context, Properties options) {
|
||||
super(context, options);
|
||||
}
|
||||
|
||||
/**
|
||||
* Create a new session, reading the Destination, PrivateKey, and SigningPrivateKey
|
||||
@@ -56,7 +60,6 @@ class I2PSessionImpl2 extends I2PSessionImpl {
|
||||
*/
|
||||
public I2PSessionImpl2(I2PAppContext ctx, InputStream destKeyStream, Properties options) throws I2PSessionException {
|
||||
super(ctx, destKeyStream, options);
|
||||
_log = ctx.logManager().getLog(I2PSessionImpl2.class);
|
||||
_sendingStates = new HashSet(32);
|
||||
// default is BestEffort
|
||||
_noEffort = "none".equals(getOptions().getProperty(I2PClient.PROP_RELIABILITY, "").toLowerCase(Locale.US));
|
||||
@@ -296,10 +299,10 @@ class I2PSessionImpl2 extends I2PSessionImpl {
|
||||
}
|
||||
**********/
|
||||
|
||||
if (_log.shouldLog(Log.DEBUG)) _log.debug("before creating nonce");
|
||||
//if (_log.shouldLog(Log.DEBUG)) _log.debug("before creating nonce");
|
||||
|
||||
long nonce = _context.random().nextInt(Integer.MAX_VALUE);
|
||||
if (_log.shouldLog(Log.DEBUG)) _log.debug("before sync state");
|
||||
//if (_log.shouldLog(Log.DEBUG)) _log.debug("before sync state");
|
||||
MessageState state = new MessageState(_context, nonce, getPrefix());
|
||||
//state.setKey(key);
|
||||
//state.setTags(sentTags);
|
||||
@@ -323,7 +326,7 @@ class I2PSessionImpl2 extends I2PSessionImpl {
|
||||
// }
|
||||
//}
|
||||
|
||||
if (_log.shouldLog(Log.DEBUG)) _log.debug("before sync state");
|
||||
//if (_log.shouldLog(Log.DEBUG)) _log.debug("before sync state");
|
||||
long beforeSendingSync = _context.clock().now();
|
||||
long inSendingSync = 0;
|
||||
synchronized (_sendingStates) {
|
||||
|
@@ -64,7 +64,7 @@ import net.i2p.util.SimpleScheduler;
|
||||
*
|
||||
* @author zzz
|
||||
*/
|
||||
class I2PSessionMuxedImpl extends I2PSessionImpl2 implements I2PSession {
|
||||
class I2PSessionMuxedImpl extends I2PSessionImpl2 {
|
||||
|
||||
private final I2PSessionDemultiplexer _demultiplexer;
|
||||
|
||||
@@ -233,7 +233,6 @@ class I2PSessionMuxedImpl extends I2PSessionImpl2 implements I2PSession {
|
||||
}
|
||||
((MuxedAvailabilityNotifier)_availabilityNotifier).available(id, size, getProto(msg),
|
||||
getFromPort(msg), getToPort(msg));
|
||||
SimpleScheduler.getInstance().addEvent(new VerifyUsage(mid), 30*1000);
|
||||
}
|
||||
|
||||
protected class MuxedAvailabilityNotifier extends AvailabilityNotifier {
|
||||
|
@@ -34,15 +34,8 @@ class I2PSimpleSession extends I2PSessionImpl2 {
|
||||
* @throws I2PSessionException if there is a problem
|
||||
*/
|
||||
public I2PSimpleSession(I2PAppContext context, Properties options) throws I2PSessionException {
|
||||
// Warning, does not call super()
|
||||
_context = context;
|
||||
_log = context.logManager().getLog(I2PSimpleSession.class);
|
||||
super(context, options);
|
||||
_handlerMap = new SimpleMessageHandlerMap(context);
|
||||
_closed = true;
|
||||
_closing = false;
|
||||
if (options == null)
|
||||
options = System.getProperties();
|
||||
loadConfig(options);
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -79,6 +72,7 @@ class I2PSimpleSession extends I2PSessionImpl2 {
|
||||
_reader = new I2CPMessageReader(in, this);
|
||||
}
|
||||
// we do not receive payload messages, so we do not need an AvailabilityNotifier
|
||||
// ... or an Idle timer, or a VerifyUsage
|
||||
_reader.startReading();
|
||||
|
||||
} catch (UnknownHostException uhe) {
|
||||
|
@@ -61,7 +61,8 @@ public class ElGamalAESEngine {
|
||||
}
|
||||
|
||||
/**
|
||||
* Decrypt the message using the given private key using tags from the default key manager.
|
||||
* Decrypt the message using the given private key using tags from the default key manager,
|
||||
* which is the router's key manager. Use extreme care if you aren't the router.
|
||||
*
|
||||
* @deprecated specify the key manager!
|
||||
*/
|
||||
@@ -75,6 +76,10 @@ public class ElGamalAESEngine {
|
||||
* This works according to the
|
||||
* ElGamal+AES algorithm in the data structure spec.
|
||||
*
|
||||
* Warning - use the correct SessionKeyManager. Clients should instantiate their own.
|
||||
* Clients using I2PAppContext.sessionKeyManager() may be correlated with the router,
|
||||
* unless you are careful to use different keys.
|
||||
*
|
||||
* @return decrypted data or null on failure
|
||||
*/
|
||||
public byte[] decrypt(byte data[], PrivateKey targetPrivateKey, SessionKeyManager keyManager) throws DataFormatException {
|
||||
@@ -100,7 +105,7 @@ public class ElGamalAESEngine {
|
||||
//if (_log.shouldLog(Log.DEBUG)) _log.debug("Key is known for tag " + st);
|
||||
long id = _context.random().nextLong();
|
||||
if (_log.shouldLog(Log.DEBUG))
|
||||
_log.debug(id + ": Decrypting existing session encrypted with tag: " + st.toString() + ": key: " + key.toBase64() + ": " + data.length + " bytes: " + Base64.encode(data, 0, 64));
|
||||
_log.debug(id + ": Decrypting existing session encrypted with tag: " + st.toString() + ": key: " + key.toBase64() + ": " + data.length + " bytes " /* + Base64.encode(data, 0, 64) */ );
|
||||
|
||||
decrypted = decryptExistingSession(data, key, targetPrivateKey, foundTags, usedKey, foundKey);
|
||||
if (decrypted != null) {
|
||||
@@ -389,7 +394,8 @@ public class ElGamalAESEngine {
|
||||
*
|
||||
* @param target public key to which the data should be encrypted.
|
||||
* @param key session key to use during encryption
|
||||
* @param tagsForDelivery session tags to be associated with the key (or newKey if specified), or null
|
||||
* @param tagsForDelivery session tags to be associated with the key (or newKey if specified), or null;
|
||||
* 200 max enforced at receiver
|
||||
* @param currentTag sessionTag to use, or null if it should use ElG (i.e. new session)
|
||||
* @param newKey key to be delivered to the target, with which the tagsForDelivery should be associated, or null
|
||||
* @param paddedSize minimum size in bytes of the body after padding it (if less than the
|
||||
@@ -410,7 +416,7 @@ public class ElGamalAESEngine {
|
||||
_context.statManager().updateFrequency("crypto.elGamalAES.encryptExistingSession");
|
||||
byte rv[] = encryptExistingSession(data, target, key, tagsForDelivery, currentTag, newKey, paddedSize);
|
||||
if (_log.shouldLog(Log.DEBUG))
|
||||
_log.debug("Existing session encrypted with tag: " + currentTag.toString() + ": " + rv.length + " bytes and key: " + key.toBase64() + ": " + Base64.encode(rv, 0, 64));
|
||||
_log.debug("Existing session encrypted with tag: " + currentTag.toString() + ": " + rv.length + " bytes and key: " + key.toBase64() /* + ": " + Base64.encode(rv, 0, 64) */);
|
||||
return rv;
|
||||
}
|
||||
|
||||
@@ -418,6 +424,30 @@ public class ElGamalAESEngine {
|
||||
* Encrypt the data to the target using the given key and deliver the specified tags
|
||||
* No new session key
|
||||
* This is the one called from GarlicMessageBuilder and is the primary entry point.
|
||||
*
|
||||
* Re: padded size: The AES block adds at least 39 bytes of overhead to the data, and
|
||||
* that is included in the minimum size calculation.
|
||||
*
|
||||
* In the router, we always use garlic messages. A garlic message with a single
|
||||
* clove and zero data is about 84 bytes, so that's 123 bytes minimum. So any paddingSize
|
||||
* <= 128 is a no-op as every message will be at least 128 bytes
|
||||
* (Streaming, if used, adds more overhead).
|
||||
*
|
||||
* Outside the router, with a client using its own message format, the minimum size
|
||||
* is 48, so any paddingSize <= 48 is a no-op.
|
||||
*
|
||||
* Not included in the minimum is a 32-byte session tag for an existing session,
|
||||
* or a 514-byte ElGamal block and several 32-byte session tags for a new session.
|
||||
* So the returned encrypted data will be at least 32 bytes larger than paddedSize.
|
||||
*
|
||||
* @param target public key to which the data should be encrypted.
|
||||
* @param key session key to use during encryption
|
||||
* @param tagsForDelivery session tags to be associated with the key or null;
|
||||
* 200 max enforced at receiver
|
||||
* @param currentTag sessionTag to use, or null if it should use ElG (i.e. new session)
|
||||
* @param paddedSize minimum size in bytes of the body after padding it (if less than the
|
||||
* body's real size, no bytes are appended but the body is not truncated)
|
||||
*
|
||||
*/
|
||||
public byte[] encrypt(byte data[], PublicKey target, SessionKey key, Set tagsForDelivery,
|
||||
SessionTag currentTag, long paddedSize) {
|
||||
@@ -599,7 +629,6 @@ public class ElGamalAESEngine {
|
||||
//_log.debug("Encrypting AES");
|
||||
if (tagsForDelivery == null) tagsForDelivery = Collections.EMPTY_SET;
|
||||
int size = 2 // sizeof(tags)
|
||||
+ tagsForDelivery.size()
|
||||
+ SessionTag.BYTE_LENGTH*tagsForDelivery.size()
|
||||
+ 4 // payload length
|
||||
+ Hash.HASH_LENGTH
|
||||
|
@@ -24,14 +24,14 @@ import net.i2p.data.SessionTag;
|
||||
* unknown (and hence always forces a full ElGamal encryption for each message).
|
||||
* A more intelligent subclass should manage and persist keys and tags.
|
||||
*
|
||||
* TODO if we aren't going to use this for testing, make it abstract.
|
||||
*/
|
||||
public class SessionKeyManager {
|
||||
/** session key managers must be created through an app context */
|
||||
protected SessionKeyManager(I2PAppContext context) { // nop
|
||||
}
|
||||
|
||||
/** see above */
|
||||
private SessionKeyManager() { // nop
|
||||
/**
|
||||
* Make this public if you need a dummy SessionKeyManager for testing
|
||||
*/
|
||||
protected SessionKeyManager(I2PAppContext context) { // nop
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -59,7 +59,8 @@ public class SessionKeyManager {
|
||||
* Associate a new session key with the specified target. Metrics to determine
|
||||
* when to expire that key begin with this call.
|
||||
*
|
||||
* @deprecated racy
|
||||
* Racy if called after getCurrentKey() to check for a current session;
|
||||
* use getCurrentOrNewKey() in that case.
|
||||
*/
|
||||
public void createSession(PublicKey target, SessionKey key) { // nop
|
||||
}
|
||||
@@ -67,7 +68,8 @@ public class SessionKeyManager {
|
||||
/**
|
||||
* Generate a new session key and associate it with the specified target.
|
||||
*
|
||||
* @deprecated racy
|
||||
* Racy if called after getCurrentKey() to check for a current session;
|
||||
* use getCurrentOrNewKey() in that case.
|
||||
*/
|
||||
public SessionKey createSession(PublicKey target) {
|
||||
SessionKey key = KeyGenerator.getInstance().generateSessionKey();
|
||||
@@ -86,6 +88,31 @@ public class SessionKeyManager {
|
||||
return null;
|
||||
}
|
||||
|
||||
/**
|
||||
* How many to send, IF we need to.
|
||||
* @since 0.9.2
|
||||
*/
|
||||
public int getTagsToSend() { return 0; };
|
||||
|
||||
/**
|
||||
* @since 0.9.2
|
||||
*/
|
||||
public int getLowThreshold() { return 0; };
|
||||
|
||||
/**
|
||||
* @return true if we have less than the threshold or what we have is about to expire
|
||||
* @since 0.9.2
|
||||
*/
|
||||
public boolean shouldSendTags(PublicKey target, SessionKey key) {
|
||||
return shouldSendTags(target, key, getLowThreshold());
|
||||
}
|
||||
|
||||
/**
|
||||
* @return true if we have less than the threshold or what we have is about to expire
|
||||
* @since 0.9.2
|
||||
*/
|
||||
public boolean shouldSendTags(PublicKey target, SessionKey key, int lowThreshold) { return false; }
|
||||
|
||||
/**
|
||||
* Determine (approximately) how many available session tags for the current target
|
||||
* have been confirmed and are available
|
||||
|
@@ -85,27 +85,74 @@ public class TransientSessionKeyManager extends SessionKeyManager {
|
||||
/** for debugging */
|
||||
private final AtomicInteger _rcvTagSetID = new AtomicInteger();
|
||||
private final AtomicInteger _sentTagSetID = new AtomicInteger();
|
||||
private final int _tagsToSend;
|
||||
private final int _lowThreshold;
|
||||
|
||||
/**
|
||||
* Let session tags sit around for 10 minutes before expiring them. We can now have such a large
|
||||
* Let session tags sit around for this long before expiring them. We can now have such a large
|
||||
* value since there is the persistent session key manager. This value is for outbound tags -
|
||||
* inbound tags are managed by SESSION_LIFETIME_MAX_MS
|
||||
*
|
||||
*/
|
||||
public final static long SESSION_TAG_DURATION_MS = 10 * 60 * 1000;
|
||||
private final static long SESSION_TAG_DURATION_MS = 12 * 60 * 1000;
|
||||
|
||||
/**
|
||||
* Keep unused inbound session tags around for up to 12 minutes (2 minutes longer than
|
||||
* Keep unused inbound session tags around for this long (a few minutes longer than
|
||||
* session tags are used on the outbound side so that no reasonable network lag
|
||||
* can cause failed decrypts)
|
||||
*
|
||||
*/
|
||||
public final static long SESSION_LIFETIME_MAX_MS = SESSION_TAG_DURATION_MS + 5 * 60 * 1000;
|
||||
private final static long SESSION_LIFETIME_MAX_MS = SESSION_TAG_DURATION_MS + 3 * 60 * 1000;
|
||||
|
||||
/**
|
||||
* Time to send more if we are this close to expiration
|
||||
*/
|
||||
private static final long SESSION_TAG_EXPIRATION_WINDOW = 90 * 1000;
|
||||
|
||||
/**
|
||||
* a few MB? how about 16MB!
|
||||
* This is the max size of _inboundTagSets.
|
||||
*/
|
||||
public final static int MAX_INBOUND_SESSION_TAGS = 500 * 1000; // this will consume at most a few MB
|
||||
|
||||
/**
|
||||
* This was 100 since 0.6.1.10 (50 before that). It's important because:
|
||||
* <pre>
|
||||
* - Tags are 32 bytes. So it previously added 3200 bytes to an initial message.
|
||||
* - Too many tags adds a huge overhead to short-duration connections
|
||||
* (like http, datagrams, etc.)
|
||||
* - Large messages have a much higher chance of being dropped due to
|
||||
* one of their 1KB fragments being discarded by a tunnel participant.
|
||||
* - This reduces the effective maximum datagram size because the client
|
||||
* doesn't know when tags will be bundled, so the tag size must be
|
||||
* subtracted from the maximum I2NP size or transport limit.
|
||||
* </pre>
|
||||
*
|
||||
* Issues with too small a value:
|
||||
* <pre>
|
||||
* - When tags are sent, a reply leaseset (~1KB) is always bundled.
|
||||
* Maybe don't need to bundle more than every minute or so
|
||||
* rather than every time?
|
||||
* - Does the number of tags (and the threshold of 20) limit the effective
|
||||
* streaming lib window size? Should the threshold and the number of
|
||||
* sent tags be variable based on the message rate?
|
||||
* </pre>
|
||||
*
|
||||
* We have to be very careful if we implement an adaptive scheme,
|
||||
* since the key manager is per-router, not per-local-dest.
|
||||
* Or maybe that's a bad idea, and we need to move to a per-dest manager.
|
||||
* This needs further investigation.
|
||||
*
|
||||
* So a value somewhat higher than the low threshold
|
||||
* seems appropriate.
|
||||
*
|
||||
* Use care when adjusting these values. See ConnectionOptions in streaming,
|
||||
* and TransientSessionKeyManager in crypto, for more information.
|
||||
*
|
||||
* @since 0.9.2 moved from GarlicMessageBuilder to per-SKM config
|
||||
*/
|
||||
public static final int DEFAULT_TAGS = 40;
|
||||
/** ditto */
|
||||
public static final int LOW_THRESHOLD = 30;
|
||||
|
||||
/**
|
||||
* The session key manager should only be constructed and accessed through the
|
||||
* application context. This constructor should only be used by the
|
||||
@@ -113,11 +160,24 @@ public class TransientSessionKeyManager extends SessionKeyManager {
|
||||
*
|
||||
*/
|
||||
public TransientSessionKeyManager(I2PAppContext context) {
|
||||
this(context, DEFAULT_TAGS, LOW_THRESHOLD);
|
||||
}
|
||||
|
||||
/**
|
||||
* @param tagsToSend how many to send at a time, may be lower or higher than lowThreshold. 1-128
|
||||
* @param lowThreshold below this, send more. 1-128
|
||||
* @since 0.9.2
|
||||
*/
|
||||
public TransientSessionKeyManager(I2PAppContext context, int tagsToSend, int lowThreshold) {
|
||||
super(context);
|
||||
if (tagsToSend <= 0 || tagsToSend > 128 || lowThreshold <= 0 || lowThreshold > 128)
|
||||
throw new IllegalArgumentException();
|
||||
_tagsToSend = tagsToSend;
|
||||
_lowThreshold = lowThreshold;
|
||||
_log = context.logManager().getLog(TransientSessionKeyManager.class);
|
||||
_context = context;
|
||||
_outboundSessions = new HashMap(64);
|
||||
_inboundTagSets = new HashMap(1024);
|
||||
_inboundTagSets = new HashMap(128);
|
||||
context.statManager().createRateStat("crypto.sessionTagsExpired", "How many tags/sessions are expired?", "Encryption", new long[] { 10*60*1000, 60*60*1000, 3*60*60*1000 });
|
||||
context.statManager().createRateStat("crypto.sessionTagsRemaining", "How many tags/sessions are remaining after a cleanup?", "Encryption", new long[] { 10*60*1000, 60*60*1000, 3*60*60*1000 });
|
||||
_alive = true;
|
||||
@@ -243,7 +303,8 @@ public class TransientSessionKeyManager extends SessionKeyManager {
|
||||
* Associate a new session key with the specified target. Metrics to determine
|
||||
* when to expire that key begin with this call.
|
||||
*
|
||||
* @deprecated racy
|
||||
* Racy if called after getCurrentKey() to check for a current session;
|
||||
* use getCurrentOrNewKey() in that case.
|
||||
*/
|
||||
@Override
|
||||
public void createSession(PublicKey target, SessionKey key) {
|
||||
@@ -291,6 +352,31 @@ public class TransientSessionKeyManager extends SessionKeyManager {
|
||||
return null;
|
||||
}
|
||||
|
||||
/**
|
||||
* How many to send, IF we need to.
|
||||
* @return the configured value (not adjusted for current available)
|
||||
* @since 0.9.2
|
||||
*/
|
||||
@Override
|
||||
public int getTagsToSend() { return _tagsToSend; };
|
||||
|
||||
/**
|
||||
* @return the configured value
|
||||
* @since 0.9.2
|
||||
*/
|
||||
@Override
|
||||
public int getLowThreshold() { return _lowThreshold; };
|
||||
|
||||
/**
|
||||
* @return true if we have less than the threshold or what we have is about to expire
|
||||
* @since 0.9.2
|
||||
*/
|
||||
@Override
|
||||
public boolean shouldSendTags(PublicKey target, SessionKey key, int lowThreshold) {
|
||||
return getAvailableTags(target, key) < lowThreshold ||
|
||||
getAvailableTimeLeft(target, key) < SESSION_TAG_EXPIRATION_WINDOW;
|
||||
}
|
||||
|
||||
/**
|
||||
* Determine (approximately) how many available session tags for the current target
|
||||
* have been confirmed and are available
|
||||
|
@@ -45,17 +45,21 @@ public class RoutingKeyGenerator {
|
||||
public RoutingKeyGenerator(I2PAppContext context) {
|
||||
_log = context.logManager().getLog(RoutingKeyGenerator.class);
|
||||
_context = context;
|
||||
// ensure non-null mod data
|
||||
generateDateBasedModData();
|
||||
}
|
||||
|
||||
public static RoutingKeyGenerator getInstance() {
|
||||
return I2PAppContext.getGlobalContext().routingKeyGenerator();
|
||||
}
|
||||
|
||||
private byte _currentModData[];
|
||||
private long _lastChanged;
|
||||
private volatile byte _currentModData[];
|
||||
private volatile long _lastChanged;
|
||||
|
||||
private final static Calendar _cal = GregorianCalendar.getInstance(TimeZone.getTimeZone("GMT"));
|
||||
private final static SimpleDateFormat _fmt = new SimpleDateFormat("yyyyMMdd");
|
||||
private static final String FORMAT = "yyyyMMdd";
|
||||
private static final int LENGTH = FORMAT.length();
|
||||
private final static SimpleDateFormat _fmt = new SimpleDateFormat(FORMAT);
|
||||
|
||||
public byte[] getModData() {
|
||||
return _currentModData;
|
||||
@@ -65,20 +69,14 @@ public class RoutingKeyGenerator {
|
||||
return _lastChanged;
|
||||
}
|
||||
|
||||
public void setModData(byte modData[]) {
|
||||
_currentModData = modData;
|
||||
_lastChanged = _context.clock().now();
|
||||
}
|
||||
|
||||
/**
|
||||
* Update the current modifier data with some bytes derived from the current
|
||||
* date (yyyyMMdd in GMT)
|
||||
*
|
||||
* @return true if changed
|
||||
*/
|
||||
public void generateDateBasedModData() {
|
||||
Date today = null;
|
||||
public synchronized boolean generateDateBasedModData() {
|
||||
long now = _context.clock().now();
|
||||
synchronized (_cal) {
|
||||
_cal.setTime(new Date(now));
|
||||
_cal.set(Calendar.YEAR, _cal.get(Calendar.YEAR)); // gcj <= 4.0 workaround
|
||||
_cal.set(Calendar.DAY_OF_YEAR, _cal.get(Calendar.DAY_OF_YEAR)); // gcj <= 4.0 workaround
|
||||
@@ -86,20 +84,22 @@ public class RoutingKeyGenerator {
|
||||
_cal.set(Calendar.MINUTE, 0);
|
||||
_cal.set(Calendar.SECOND, 0);
|
||||
_cal.set(Calendar.MILLISECOND, 0);
|
||||
today = _cal.getTime();
|
||||
}
|
||||
Date today = _cal.getTime();
|
||||
|
||||
byte mod[] = null;
|
||||
String modVal = null;
|
||||
synchronized (_fmt) {
|
||||
modVal = _fmt.format(today);
|
||||
}
|
||||
mod = new byte[modVal.length()];
|
||||
for (int i = 0; i < modVal.length(); i++)
|
||||
String modVal = _fmt.format(today);
|
||||
if (modVal.length() != LENGTH)
|
||||
throw new IllegalStateException();
|
||||
byte[] mod = new byte[LENGTH];
|
||||
for (int i = 0; i < LENGTH; i++)
|
||||
mod[i] = (byte)(modVal.charAt(i) & 0xFF);
|
||||
if (_log.shouldLog(Log.INFO))
|
||||
_log.info("Routing modifier generated: " + modVal);
|
||||
setModData(mod);
|
||||
boolean changed = !DataHelper.eq(_currentModData, mod);
|
||||
if (changed) {
|
||||
_currentModData = mod;
|
||||
_lastChanged = now;
|
||||
if (_log.shouldLog(Log.INFO))
|
||||
_log.info("Routing modifier generated: " + modVal);
|
||||
}
|
||||
return changed;
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -113,10 +113,9 @@ public class RoutingKeyGenerator {
|
||||
*/
|
||||
public Hash getRoutingKey(Hash origKey) {
|
||||
if (origKey == null) throw new IllegalArgumentException("Original key is null");
|
||||
if (_currentModData == null) generateDateBasedModData();
|
||||
byte modVal[] = new byte[Hash.HASH_LENGTH + _currentModData.length];
|
||||
byte modVal[] = new byte[Hash.HASH_LENGTH + LENGTH];
|
||||
System.arraycopy(origKey.getData(), 0, modVal, 0, Hash.HASH_LENGTH);
|
||||
System.arraycopy(_currentModData, 0, modVal, Hash.HASH_LENGTH, _currentModData.length);
|
||||
System.arraycopy(_currentModData, 0, modVal, Hash.HASH_LENGTH, LENGTH);
|
||||
return SHA256Generator.getInstance().calculateHash(modVal);
|
||||
}
|
||||
|
||||
|
@@ -13,7 +13,7 @@ import net.i2p.crypto.KeyGenerator;
|
||||
|
||||
/**
|
||||
* Defines the SigningPrivateKey as defined by the I2P data structure spec.
|
||||
* A private key is 256byte Integer. The private key represents only the
|
||||
* A signing private key is 20 byte Integer. The private key represents only the
|
||||
* exponent, not the primes, which are constant and defined in the crypto spec.
|
||||
* This key varies from the PrivateKey in its usage (signing, not decrypting)
|
||||
*
|
||||
|
@@ -14,7 +14,7 @@ import java.io.IOException;
|
||||
|
||||
/**
|
||||
* Defines the SigningPublicKey as defined by the I2P data structure spec.
|
||||
* A public key is 256byte Integer. The public key represents only the
|
||||
* A signing public key is 128 byte Integer. The public key represents only the
|
||||
* exponent, not the primes, which are constant and defined in the crypto spec.
|
||||
* This key varies from the PrivateKey in its usage (verifying signatures, not encrypting)
|
||||
*
|
||||
|
@@ -1,209 +0,0 @@
|
||||
package net.i2p.time;
|
||||
/*
|
||||
* Copyright (c) 2004, Adam Buckley
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions are met:
|
||||
*
|
||||
* - Redistributions of source code must retain the above copyright notice,
|
||||
* this list of conditions and the following disclaimer.
|
||||
* - Redistributions in binary form must reproduce the above copyright notice,
|
||||
* this list of conditions and the following disclaimer in the documentation
|
||||
* and/or other materials provided with the distribution.
|
||||
* - Neither the name of Adam Buckley nor the names of its contributors may be
|
||||
* used to endorse or promote products derived from this software without
|
||||
* specific prior written permission.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
||||
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
||||
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
|
||||
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
|
||||
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
|
||||
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
|
||||
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
|
||||
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
|
||||
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
|
||||
* POSSIBILITY OF SUCH DAMAGE.
|
||||
*
|
||||
*/
|
||||
import java.io.IOException;
|
||||
import java.io.InterruptedIOException;
|
||||
import java.net.DatagramPacket;
|
||||
import java.net.DatagramSocket;
|
||||
import java.net.InetAddress;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collections;
|
||||
|
||||
|
||||
/**
|
||||
* NtpClient - an NTP client for Java. This program connects to an NTP server
|
||||
* and prints the response to the console.
|
||||
*
|
||||
* The local clock offset calculation is implemented according to the SNTP
|
||||
* algorithm specified in RFC 2030.
|
||||
*
|
||||
* Note that on windows platforms, the curent time-of-day timestamp is limited
|
||||
* to an resolution of 10ms and adversely affects the accuracy of the results.
|
||||
*
|
||||
* @author Adam Buckley
|
||||
* (minor refactoring by jrandom)
|
||||
*/
|
||||
public class NtpClient {
|
||||
/** difference between the unix epoch and jan 1 1900 (NTP uses that) */
|
||||
private final static double SECONDS_1900_TO_EPOCH = 2208988800.0;
|
||||
private final static int NTP_PORT = 123;
|
||||
|
||||
/**
|
||||
* Query the ntp servers, returning the current time from first one we find
|
||||
*
|
||||
* @return milliseconds since january 1, 1970 (UTC)
|
||||
* @throws IllegalArgumentException if none of the servers are reachable
|
||||
*/
|
||||
public static long currentTime(String serverNames[]) {
|
||||
if (serverNames == null)
|
||||
throw new IllegalArgumentException("No NTP servers specified");
|
||||
ArrayList names = new ArrayList(serverNames.length);
|
||||
for (int i = 0; i < serverNames.length; i++)
|
||||
names.add(serverNames[i]);
|
||||
Collections.shuffle(names);
|
||||
for (int i = 0; i < names.size(); i++) {
|
||||
long now = currentTime((String)names.get(i));
|
||||
if (now > 0)
|
||||
return now;
|
||||
}
|
||||
throw new IllegalArgumentException("No reachable NTP servers specified");
|
||||
}
|
||||
|
||||
/**
|
||||
* Query the ntp servers, returning the current time from first one we find
|
||||
* Hack to return time and stratum
|
||||
* @return time in rv[0] and stratum in rv[1]
|
||||
* @throws IllegalArgumentException if none of the servers are reachable
|
||||
* @since 0.7.12
|
||||
*/
|
||||
public static long[] currentTimeAndStratum(String serverNames[]) {
|
||||
if (serverNames == null)
|
||||
throw new IllegalArgumentException("No NTP servers specified");
|
||||
ArrayList names = new ArrayList(serverNames.length);
|
||||
for (int i = 0; i < serverNames.length; i++)
|
||||
names.add(serverNames[i]);
|
||||
Collections.shuffle(names);
|
||||
for (int i = 0; i < names.size(); i++) {
|
||||
long[] rv = currentTimeAndStratum((String)names.get(i));
|
||||
if (rv != null && rv[0] > 0)
|
||||
return rv;
|
||||
}
|
||||
throw new IllegalArgumentException("No reachable NTP servers specified");
|
||||
}
|
||||
|
||||
/**
|
||||
* Query the given NTP server, returning the current internet time
|
||||
*
|
||||
* @return milliseconds since january 1, 1970 (UTC), or -1 on error
|
||||
*/
|
||||
public static long currentTime(String serverName) {
|
||||
long[] la = currentTimeAndStratum(serverName);
|
||||
if (la != null)
|
||||
return la[0];
|
||||
return -1;
|
||||
}
|
||||
|
||||
/**
|
||||
* Hack to return time and stratum
|
||||
* @return time in rv[0] and stratum in rv[1], or null for error
|
||||
* @since 0.7.12
|
||||
*/
|
||||
private static long[] currentTimeAndStratum(String serverName) {
|
||||
try {
|
||||
// Send request
|
||||
DatagramSocket socket = new DatagramSocket();
|
||||
InetAddress address = InetAddress.getByName(serverName);
|
||||
byte[] buf = new NtpMessage().toByteArray();
|
||||
DatagramPacket packet = new DatagramPacket(buf, buf.length, address, NTP_PORT);
|
||||
|
||||
// Set the transmit timestamp *just* before sending the packet
|
||||
// ToDo: Does this actually improve performance or not?
|
||||
NtpMessage.encodeTimestamp(packet.getData(), 40,
|
||||
(System.currentTimeMillis()/1000.0)
|
||||
+ SECONDS_1900_TO_EPOCH);
|
||||
|
||||
socket.send(packet);
|
||||
|
||||
// Get response
|
||||
packet = new DatagramPacket(buf, buf.length);
|
||||
socket.setSoTimeout(10*1000);
|
||||
try {
|
||||
socket.receive(packet);
|
||||
} catch (InterruptedIOException iie) {
|
||||
socket.close();
|
||||
return null;
|
||||
}
|
||||
|
||||
// Immediately record the incoming timestamp
|
||||
double destinationTimestamp = (System.currentTimeMillis()/1000.0) + SECONDS_1900_TO_EPOCH;
|
||||
|
||||
// Process response
|
||||
NtpMessage msg = new NtpMessage(packet.getData());
|
||||
|
||||
//double roundTripDelay = (destinationTimestamp-msg.originateTimestamp) -
|
||||
// (msg.receiveTimestamp-msg.transmitTimestamp);
|
||||
double localClockOffset = ((msg.receiveTimestamp - msg.originateTimestamp) +
|
||||
(msg.transmitTimestamp - destinationTimestamp)) / 2;
|
||||
socket.close();
|
||||
|
||||
// Stratum must be between 1 (atomic) and 15 (maximum defined value)
|
||||
// Anything else is right out, treat such responses like errors
|
||||
if ((msg.stratum < 1) || (msg.stratum > 15)) {
|
||||
//System.out.println("Response from NTP server of unacceptable stratum " + msg.stratum + ", failing.");
|
||||
return null;
|
||||
}
|
||||
|
||||
long[] rv = new long[2];
|
||||
rv[0] = (long)(System.currentTimeMillis() + localClockOffset*1000);
|
||||
rv[1] = msg.stratum;
|
||||
//System.out.println("host: " + address.getHostAddress() + " rtt: " + roundTripDelay + " offset: " + localClockOffset + " seconds");
|
||||
return rv;
|
||||
} catch (IOException ioe) {
|
||||
//ioe.printStackTrace();
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
public static void main(String[] args) throws IOException {
|
||||
// Process command-line args
|
||||
if(args.length <= 0) {
|
||||
printUsage();
|
||||
return;
|
||||
// args = new String[] { "ntp1.sth.netnod.se", "ntp2.sth.netnod.se" };
|
||||
}
|
||||
|
||||
long now = currentTime(args);
|
||||
System.out.println("Current time: " + new java.util.Date(now));
|
||||
}
|
||||
|
||||
|
||||
|
||||
/**
|
||||
* Prints usage
|
||||
*/
|
||||
static void printUsage() {
|
||||
System.out.println(
|
||||
"NtpClient - an NTP client for Java.\n" +
|
||||
"\n" +
|
||||
"This program connects to an NTP server and prints the current time to the console.\n" +
|
||||
"\n" +
|
||||
"\n" +
|
||||
"Usage: java NtpClient server[ server]*\n" +
|
||||
"\n" +
|
||||
"\n" +
|
||||
"This program is copyright (c) Adam Buckley 2004 and distributed under the terms\n" +
|
||||
"of the GNU General Public License. This program is distributed in the hope\n" +
|
||||
"that it will be useful, but WITHOUT ANY WARRANTY; without even the implied\n" +
|
||||
"warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU\n" +
|
||||
"General Public License available at http://www.gnu.org/licenses/gpl.html for\n" +
|
||||
"more details.");
|
||||
|
||||
}
|
||||
}
|
@@ -1,467 +0,0 @@
|
||||
package net.i2p.time;
|
||||
/*
|
||||
* Copyright (c) 2004, Adam Buckley
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions are met:
|
||||
*
|
||||
* - Redistributions of source code must retain the above copyright notice,
|
||||
* this list of conditions and the following disclaimer.
|
||||
* - Redistributions in binary form must reproduce the above copyright notice,
|
||||
* this list of conditions and the following disclaimer in the documentation
|
||||
* and/or other materials provided with the distribution.
|
||||
* - Neither the name of Adam Buckley nor the names of its contributors may be
|
||||
* used to endorse or promote products derived from this software without
|
||||
* specific prior written permission.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
||||
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
||||
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
|
||||
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
|
||||
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
|
||||
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
|
||||
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
|
||||
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
|
||||
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
|
||||
* POSSIBILITY OF SUCH DAMAGE.
|
||||
*
|
||||
*/
|
||||
|
||||
import java.text.DecimalFormat;
|
||||
import java.text.SimpleDateFormat;
|
||||
import java.util.Date;
|
||||
|
||||
import net.i2p.util.RandomSource;
|
||||
|
||||
/**
|
||||
* This class represents a NTP message, as specified in RFC 2030. The message
|
||||
* format is compatible with all versions of NTP and SNTP.
|
||||
*
|
||||
* This class does not support the optional authentication protocol, and
|
||||
* ignores the key ID and message digest fields.
|
||||
*
|
||||
* For convenience, this class exposes message values as native Java types, not
|
||||
* the NTP-specified data formats. For example, timestamps are
|
||||
* stored as doubles (as opposed to the NTP unsigned 64-bit fixed point
|
||||
* format).
|
||||
*
|
||||
* However, the contructor NtpMessage(byte[]) and the method toByteArray()
|
||||
* allow the import and export of the raw NTP message format.
|
||||
*
|
||||
*
|
||||
* Usage example
|
||||
*
|
||||
* // Send message
|
||||
* DatagramSocket socket = new DatagramSocket();
|
||||
* InetAddress address = InetAddress.getByName("ntp.cais.rnp.br");
|
||||
* byte[] buf = new NtpMessage().toByteArray();
|
||||
* DatagramPacket packet = new DatagramPacket(buf, buf.length, address, 123);
|
||||
* socket.send(packet);
|
||||
*
|
||||
* // Get response
|
||||
* socket.receive(packet);
|
||||
* System.out.println(msg.toString());
|
||||
*
|
||||
* Comments for member variables are taken from RFC2030 by David Mills,
|
||||
* University of Delaware.
|
||||
*
|
||||
* Number format conversion code in NtpMessage(byte[] array) and toByteArray()
|
||||
* inspired by http://www.pps.jussieu.fr/~jch/enseignement/reseaux/
|
||||
* NTPMessage.java which is copyright (c) 2003 by Juliusz Chroboczek
|
||||
*
|
||||
* @author Adam Buckley
|
||||
*/
|
||||
public class NtpMessage {
|
||||
/**
|
||||
* This is a two-bit code warning of an impending leap second to be
|
||||
* inserted/deleted in the last minute of the current day. It's values
|
||||
* may be as follows:
|
||||
*
|
||||
* Value Meaning
|
||||
* ----- -------
|
||||
* 0 no warning
|
||||
* 1 last minute has 61 seconds
|
||||
* 2 last minute has 59 seconds)
|
||||
* 3 alarm condition (clock not synchronized)
|
||||
*/
|
||||
public byte leapIndicator = 0;
|
||||
|
||||
|
||||
/**
|
||||
* This value indicates the NTP/SNTP version number. The version number
|
||||
* is 3 for Version 3 (IPv4 only) and 4 for Version 4 (IPv4, IPv6 and OSI).
|
||||
* If necessary to distinguish between IPv4, IPv6 and OSI, the
|
||||
* encapsulating context must be inspected.
|
||||
*/
|
||||
public byte version = 3;
|
||||
|
||||
|
||||
/**
|
||||
* This value indicates the mode, with values defined as follows:
|
||||
*
|
||||
* Mode Meaning
|
||||
* ---- -------
|
||||
* 0 reserved
|
||||
* 1 symmetric active
|
||||
* 2 symmetric passive
|
||||
* 3 client
|
||||
* 4 server
|
||||
* 5 broadcast
|
||||
* 6 reserved for NTP control message
|
||||
* 7 reserved for private use
|
||||
*
|
||||
* In unicast and anycast modes, the client sets this field to 3 (client)
|
||||
* in the request and the server sets it to 4 (server) in the reply. In
|
||||
* multicast mode, the server sets this field to 5 (broadcast).
|
||||
*/
|
||||
public byte mode = 0;
|
||||
|
||||
|
||||
/**
|
||||
* This value indicates the stratum level of the local clock, with values
|
||||
* defined as follows:
|
||||
*
|
||||
* Stratum Meaning
|
||||
* ----------------------------------------------
|
||||
* 0 unspecified or unavailable
|
||||
* 1 primary reference (e.g., radio clock)
|
||||
* 2-15 secondary reference (via NTP or SNTP)
|
||||
* 16-255 reserved
|
||||
*/
|
||||
public short stratum = 0;
|
||||
|
||||
|
||||
/**
|
||||
* This value indicates the maximum interval between successive messages,
|
||||
* in seconds to the nearest power of two. The values that can appear in
|
||||
* this field presently range from 4 (16 s) to 14 (16284 s); however, most
|
||||
* applications use only the sub-range 6 (64 s) to 10 (1024 s).
|
||||
*/
|
||||
public byte pollInterval = 0;
|
||||
|
||||
|
||||
/**
|
||||
* This value indicates the precision of the local clock, in seconds to
|
||||
* the nearest power of two. The values that normally appear in this field
|
||||
* range from -6 for mains-frequency clocks to -20 for microsecond clocks
|
||||
* found in some workstations.
|
||||
*/
|
||||
public byte precision = 0;
|
||||
|
||||
|
||||
/**
|
||||
* This value indicates the total roundtrip delay to the primary reference
|
||||
* source, in seconds. Note that this variable can take on both positive
|
||||
* and negative values, depending on the relative time and frequency
|
||||
* offsets. The values that normally appear in this field range from
|
||||
* negative values of a few milliseconds to positive values of several
|
||||
* hundred milliseconds.
|
||||
*/
|
||||
public double rootDelay = 0;
|
||||
|
||||
|
||||
/**
|
||||
* This value indicates the nominal error relative to the primary reference
|
||||
* source, in seconds. The values that normally appear in this field
|
||||
* range from 0 to several hundred milliseconds.
|
||||
*/
|
||||
public double rootDispersion = 0;
|
||||
|
||||
|
||||
/**
|
||||
* This is a 4-byte array identifying the particular reference source.
|
||||
* In the case of NTP Version 3 or Version 4 stratum-0 (unspecified) or
|
||||
* stratum-1 (primary) servers, this is a four-character ASCII string, left
|
||||
* justified and zero padded to 32 bits. In NTP Version 3 secondary
|
||||
* servers, this is the 32-bit IPv4 address of the reference source. In NTP
|
||||
* Version 4 secondary servers, this is the low order 32 bits of the latest
|
||||
* transmit timestamp of the reference source. NTP primary (stratum 1)
|
||||
* servers should set this field to a code identifying the external
|
||||
* reference source according to the following list. If the external
|
||||
* reference is one of those listed, the associated code should be used.
|
||||
* Codes for sources not listed can be contrived as appropriate.
|
||||
*
|
||||
* Code External Reference Source
|
||||
* ---- -------------------------
|
||||
* LOCL uncalibrated local clock used as a primary reference for
|
||||
* a subnet without external means of synchronization
|
||||
* PPS atomic clock or other pulse-per-second source
|
||||
* individually calibrated to national standards
|
||||
* ACTS NIST dialup modem service
|
||||
* USNO USNO modem service
|
||||
* PTB PTB (Germany) modem service
|
||||
* TDF Allouis (France) Radio 164 kHz
|
||||
* DCF Mainflingen (Germany) Radio 77.5 kHz
|
||||
* MSF Rugby (UK) Radio 60 kHz
|
||||
* WWV Ft. Collins (US) Radio 2.5, 5, 10, 15, 20 MHz
|
||||
* WWVB Boulder (US) Radio 60 kHz
|
||||
* WWVH Kaui Hawaii (US) Radio 2.5, 5, 10, 15 MHz
|
||||
* CHU Ottawa (Canada) Radio 3330, 7335, 14670 kHz
|
||||
* LORC LORAN-C radionavigation system
|
||||
* OMEG OMEGA radionavigation system
|
||||
* GPS Global Positioning Service
|
||||
* GOES Geostationary Orbit Environment Satellite
|
||||
*/
|
||||
public byte[] referenceIdentifier = {0, 0, 0, 0};
|
||||
|
||||
|
||||
/**
|
||||
* This is the time at which the local clock was last set or corrected, in
|
||||
* seconds since 00:00 1-Jan-1900.
|
||||
*/
|
||||
public double referenceTimestamp = 0;
|
||||
|
||||
|
||||
/**
|
||||
* This is the time at which the request departed the client for the
|
||||
* server, in seconds since 00:00 1-Jan-1900.
|
||||
*/
|
||||
public double originateTimestamp = 0;
|
||||
|
||||
|
||||
/**
|
||||
* This is the time at which the request arrived at the server, in seconds
|
||||
* since 00:00 1-Jan-1900.
|
||||
*/
|
||||
public double receiveTimestamp = 0;
|
||||
|
||||
|
||||
/**
|
||||
* This is the time at which the reply departed the server for the client,
|
||||
* in seconds since 00:00 1-Jan-1900.
|
||||
*/
|
||||
public double transmitTimestamp = 0;
|
||||
|
||||
|
||||
|
||||
/**
|
||||
* Constructs a new NtpMessage from an array of bytes.
|
||||
*/
|
||||
public NtpMessage(byte[] array) {
|
||||
// See the packet format diagram in RFC 2030 for details
|
||||
leapIndicator = (byte) ((array[0] >> 6) & 0x3);
|
||||
version = (byte) ((array[0] >> 3) & 0x7);
|
||||
mode = (byte) (array[0] & 0x7);
|
||||
stratum = unsignedByteToShort(array[1]);
|
||||
pollInterval = array[2];
|
||||
precision = array[3];
|
||||
|
||||
rootDelay = (array[4] * 256.0) +
|
||||
unsignedByteToShort(array[5]) +
|
||||
(unsignedByteToShort(array[6]) / 256.0) +
|
||||
(unsignedByteToShort(array[7]) / 65536.0);
|
||||
|
||||
rootDispersion = (unsignedByteToShort(array[8]) * 256.0) +
|
||||
unsignedByteToShort(array[9]) +
|
||||
(unsignedByteToShort(array[10]) / 256.0) +
|
||||
(unsignedByteToShort(array[11]) / 65536.0);
|
||||
|
||||
referenceIdentifier[0] = array[12];
|
||||
referenceIdentifier[1] = array[13];
|
||||
referenceIdentifier[2] = array[14];
|
||||
referenceIdentifier[3] = array[15];
|
||||
|
||||
referenceTimestamp = decodeTimestamp(array, 16);
|
||||
originateTimestamp = decodeTimestamp(array, 24);
|
||||
receiveTimestamp = decodeTimestamp(array, 32);
|
||||
transmitTimestamp = decodeTimestamp(array, 40);
|
||||
}
|
||||
|
||||
|
||||
|
||||
/**
|
||||
* Constructs a new NtpMessage in client -> server mode, and sets the
|
||||
* transmit timestamp to the current time.
|
||||
*/
|
||||
public NtpMessage() {
|
||||
// Note that all the other member variables are already set with
|
||||
// appropriate default values.
|
||||
this.mode = 3;
|
||||
this.transmitTimestamp = (System.currentTimeMillis()/1000.0) + 2208988800.0;
|
||||
}
|
||||
|
||||
|
||||
|
||||
/**
|
||||
* This method constructs the data bytes of a raw NTP packet.
|
||||
*/
|
||||
public byte[] toByteArray() {
|
||||
// All bytes are automatically set to 0
|
||||
byte[] p = new byte[48];
|
||||
|
||||
p[0] = (byte) (leapIndicator << 6 | version << 3 | mode);
|
||||
p[1] = (byte) stratum;
|
||||
p[2] = pollInterval;
|
||||
p[3] = precision;
|
||||
|
||||
// root delay is a signed 16.16-bit FP, in Java an int is 32-bits
|
||||
int l = (int) (rootDelay * 65536.0);
|
||||
p[4] = (byte) ((l >> 24) & 0xFF);
|
||||
p[5] = (byte) ((l >> 16) & 0xFF);
|
||||
p[6] = (byte) ((l >> 8) & 0xFF);
|
||||
p[7] = (byte) (l & 0xFF);
|
||||
|
||||
// root dispersion is an unsigned 16.16-bit FP, in Java there are no
|
||||
// unsigned primitive types, so we use a long which is 64-bits
|
||||
long ul = (long) (rootDispersion * 65536.0);
|
||||
p[8] = (byte) ((ul >> 24) & 0xFF);
|
||||
p[9] = (byte) ((ul >> 16) & 0xFF);
|
||||
p[10] = (byte) ((ul >> 8) & 0xFF);
|
||||
p[11] = (byte) (ul & 0xFF);
|
||||
|
||||
p[12] = referenceIdentifier[0];
|
||||
p[13] = referenceIdentifier[1];
|
||||
p[14] = referenceIdentifier[2];
|
||||
p[15] = referenceIdentifier[3];
|
||||
|
||||
encodeTimestamp(p, 16, referenceTimestamp);
|
||||
encodeTimestamp(p, 24, originateTimestamp);
|
||||
encodeTimestamp(p, 32, receiveTimestamp);
|
||||
encodeTimestamp(p, 40, transmitTimestamp);
|
||||
|
||||
return p;
|
||||
}
|
||||
|
||||
|
||||
|
||||
/**
|
||||
* Returns a string representation of a NtpMessage
|
||||
*/
|
||||
@Override
|
||||
public String toString() {
|
||||
String precisionStr = new DecimalFormat("0.#E0").format(Math.pow(2, precision));
|
||||
|
||||
return "Leap indicator: " + leapIndicator + "\n" +
|
||||
"Version: " + version + "\n" +
|
||||
"Mode: " + mode + "\n" +
|
||||
"Stratum: " + stratum + "\n" +
|
||||
"Poll: " + pollInterval + "\n" +
|
||||
"Precision: " + precision + " (" + precisionStr + " seconds)\n" +
|
||||
"Root delay: " + new DecimalFormat("0.00").format(rootDelay*1000) + " ms\n" +
|
||||
"Root dispersion: " + new DecimalFormat("0.00").format(rootDispersion*1000) + " ms\n" +
|
||||
"Reference identifier: " + referenceIdentifierToString(referenceIdentifier, stratum, version) + "\n" +
|
||||
"Reference timestamp: " + timestampToString(referenceTimestamp) + "\n" +
|
||||
"Originate timestamp: " + timestampToString(originateTimestamp) + "\n" +
|
||||
"Receive timestamp: " + timestampToString(receiveTimestamp) + "\n" +
|
||||
"Transmit timestamp: " + timestampToString(transmitTimestamp);
|
||||
}
|
||||
|
||||
|
||||
|
||||
/**
|
||||
* Converts an unsigned byte to a short. By default, Java assumes that
|
||||
* a byte is signed.
|
||||
*/
|
||||
public static short unsignedByteToShort(byte b) {
|
||||
if((b & 0x80)==0x80)
|
||||
return (short) (128 + (b & 0x7f));
|
||||
else
|
||||
return b;
|
||||
}
|
||||
|
||||
|
||||
|
||||
/**
|
||||
* Will read 8 bytes of a message beginning at <code>pointer</code>
|
||||
* and return it as a double, according to the NTP 64-bit timestamp
|
||||
* format.
|
||||
*/
|
||||
public static double decodeTimestamp(byte[] array, int pointer) {
|
||||
double r = 0.0;
|
||||
|
||||
for(int i=0; i<8; i++) {
|
||||
r += unsignedByteToShort(array[pointer+i]) * Math.pow(2, (3-i)*8);
|
||||
}
|
||||
|
||||
return r;
|
||||
}
|
||||
|
||||
|
||||
|
||||
/**
|
||||
* Encodes a timestamp in the specified position in the message
|
||||
*/
|
||||
public static void encodeTimestamp(byte[] array, int pointer, double timestamp) {
|
||||
// Converts a double into a 64-bit fixed point
|
||||
for(int i=0; i<8; i++) {
|
||||
// 2^24, 2^16, 2^8, .. 2^-32
|
||||
double base = Math.pow(2, (3-i)*8);
|
||||
|
||||
// Capture byte value
|
||||
array[pointer+i] = (byte) (timestamp / base);
|
||||
|
||||
// Subtract captured value from remaining total
|
||||
timestamp = timestamp - (unsignedByteToShort(array[pointer+i]) * base);
|
||||
}
|
||||
|
||||
// From RFC 2030: It is advisable to fill the non-significant
|
||||
// low order bits of the timestamp with a random, unbiased
|
||||
// bitstring, both to avoid systematic roundoff errors and as
|
||||
// a means of loop detection and replay detection.
|
||||
array[7+pointer] = (byte) (RandomSource.getInstance().nextInt());
|
||||
}
|
||||
|
||||
|
||||
|
||||
/**
|
||||
* Returns a timestamp (number of seconds since 00:00 1-Jan-1900) as a
|
||||
* formatted date/time string.
|
||||
*/
|
||||
public static String timestampToString(double timestamp) {
|
||||
if(timestamp==0) return "0";
|
||||
|
||||
// timestamp is relative to 1900, utc is used by Java and is relative
|
||||
// to 1970
|
||||
double utc = timestamp - (2208988800.0);
|
||||
|
||||
// milliseconds
|
||||
long ms = (long) (utc * 1000.0);
|
||||
|
||||
// date/time
|
||||
String date = new SimpleDateFormat("dd-MMM-yyyy HH:mm:ss").format(new Date(ms));
|
||||
|
||||
// fraction
|
||||
double fraction = timestamp - ((long) timestamp);
|
||||
String fractionSting = new DecimalFormat(".000000").format(fraction);
|
||||
|
||||
return date + fractionSting;
|
||||
}
|
||||
|
||||
|
||||
|
||||
/**
|
||||
* Returns a string representation of a reference identifier according
|
||||
* to the rules set out in RFC 2030.
|
||||
*/
|
||||
public static String referenceIdentifierToString(byte[] ref, short stratum, byte version) {
|
||||
// From the RFC 2030:
|
||||
// In the case of NTP Version 3 or Version 4 stratum-0 (unspecified)
|
||||
// or stratum-1 (primary) servers, this is a four-character ASCII
|
||||
// string, left justified and zero padded to 32 bits.
|
||||
if(stratum==0 || stratum==1) {
|
||||
return new String(ref);
|
||||
}
|
||||
|
||||
// In NTP Version 3 secondary servers, this is the 32-bit IPv4
|
||||
// address of the reference source.
|
||||
else if(version==3) {
|
||||
return unsignedByteToShort(ref[0]) + "." +
|
||||
unsignedByteToShort(ref[1]) + "." +
|
||||
unsignedByteToShort(ref[2]) + "." +
|
||||
unsignedByteToShort(ref[3]);
|
||||
}
|
||||
|
||||
// In NTP Version 4 secondary servers, this is the low order 32 bits
|
||||
// of the latest transmit timestamp of the reference source.
|
||||
else if(version==4) {
|
||||
return "" + ((unsignedByteToShort(ref[0]) / 256.0) +
|
||||
(unsignedByteToShort(ref[1]) / 65536.0) +
|
||||
(unsignedByteToShort(ref[2]) / 16777216.0) +
|
||||
(unsignedByteToShort(ref[3]) / 4294967296.0));
|
||||
}
|
||||
|
||||
return "";
|
||||
}
|
||||
}
|
@@ -1,336 +1,31 @@
|
||||
package net.i2p.time;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
import java.util.Locale;
|
||||
import java.util.StringTokenizer;
|
||||
import java.util.concurrent.CopyOnWriteArrayList;
|
||||
|
||||
import net.i2p.I2PAppContext;
|
||||
import net.i2p.util.I2PThread;
|
||||
import net.i2p.util.Log;
|
||||
|
||||
/**
|
||||
* Periodically query a series of NTP servers and update any associated
|
||||
* listeners. It tries the NTP servers in order, contacting them using
|
||||
* SNTP (UDP port 123). By default, it does this every 5 minutes,
|
||||
* forever.
|
||||
* Dummy. Real thing moved to net.i2p.router.time.RouterTimestamper.
|
||||
* What remains here is essentially an interface,
|
||||
* containing only what is needed to keep external apps
|
||||
* compiled with old libs from breaking, since
|
||||
* net.i2p.util.Clock returns a Timestamper in getTimestamper()
|
||||
*
|
||||
* Deprecated outside of the router.
|
||||
*/
|
||||
public class Timestamper implements Runnable {
|
||||
private final I2PAppContext _context;
|
||||
private Log _log;
|
||||
private final List<String> _servers;
|
||||
private List<String> _priorityServers;
|
||||
private final List<UpdateListener> _listeners;
|
||||
private int _queryFrequency;
|
||||
private int _concurringServers;
|
||||
private int _consecutiveFails;
|
||||
private volatile boolean _disabled;
|
||||
private final boolean _daemon;
|
||||
private boolean _initialized;
|
||||
private boolean _wellSynced;
|
||||
private volatile boolean _isRunning;
|
||||
private Thread _timestamperThread;
|
||||
|
||||
private static final int MIN_QUERY_FREQUENCY = 5*60*1000;
|
||||
private static final int DEFAULT_QUERY_FREQUENCY = 5*60*1000;
|
||||
private static final String DEFAULT_SERVER_LIST = "0.pool.ntp.org,1.pool.ntp.org,2.pool.ntp.org";
|
||||
private static final String DEFAULT_DISABLED = "true";
|
||||
/** how many times do we have to query if we are changing the clock? */
|
||||
private static final int DEFAULT_CONCURRING_SERVERS = 3;
|
||||
private static final int MAX_CONSECUTIVE_FAILS = 10;
|
||||
/** dummy */
|
||||
public Timestamper() {}
|
||||
|
||||
public static final String PROP_QUERY_FREQUENCY = "time.queryFrequencyMs";
|
||||
public static final String PROP_SERVER_LIST = "time.sntpServerList";
|
||||
public static final String PROP_DISABLED = "time.disabled";
|
||||
public static final String PROP_CONCURRING_SERVERS = "time.concurringServers";
|
||||
public static final String PROP_IP_COUNTRY = "i2np.lastCountry";
|
||||
|
||||
/** if different SNTP servers differ by more than 10s, someone is b0rked */
|
||||
private static final int MAX_VARIANCE = 10*1000;
|
||||
|
||||
public Timestamper(I2PAppContext ctx) {
|
||||
this(ctx, null, true);
|
||||
}
|
||||
|
||||
public Timestamper(I2PAppContext ctx, UpdateListener lsnr) {
|
||||
this(ctx, lsnr, true);
|
||||
}
|
||||
public Timestamper(I2PAppContext ctx, UpdateListener lsnr, boolean daemon) {
|
||||
// moved here to prevent problems with synchronized statements.
|
||||
_servers = new ArrayList(3);
|
||||
_listeners = new CopyOnWriteArrayList();
|
||||
_context = ctx;
|
||||
_daemon = daemon;
|
||||
// DO NOT initialize _log here, stack overflow via LogManager init loop
|
||||
|
||||
// Don't bother starting a thread if we are disabled.
|
||||
// This means we no longer check every 5 minutes to see if we got enabled,
|
||||
// so the property must be set at startup.
|
||||
// We still need to be instantiated since the router calls clock().getTimestamper().waitForInitialization()
|
||||
String disabled = ctx.getProperty(PROP_DISABLED, DEFAULT_DISABLED);
|
||||
if (Boolean.valueOf(disabled).booleanValue()) {
|
||||
_initialized = true;
|
||||
return;
|
||||
}
|
||||
if (lsnr != null)
|
||||
_listeners.add(lsnr);
|
||||
updateConfig();
|
||||
startTimestamper();
|
||||
}
|
||||
|
||||
public int getServerCount() {
|
||||
synchronized (_servers) {
|
||||
return _servers.size();
|
||||
}
|
||||
}
|
||||
public String getServer(int index) {
|
||||
synchronized (_servers) {
|
||||
return _servers.get(index);
|
||||
}
|
||||
}
|
||||
|
||||
public int getQueryFrequencyMs() { return _queryFrequency; }
|
||||
|
||||
public boolean getIsDisabled() { return _disabled; }
|
||||
|
||||
public void addListener(UpdateListener lsnr) {
|
||||
_listeners.add(lsnr);
|
||||
}
|
||||
public void removeListener(UpdateListener lsnr) {
|
||||
_listeners.remove(lsnr);
|
||||
}
|
||||
public int getListenerCount() {
|
||||
return _listeners.size();
|
||||
}
|
||||
public UpdateListener getListener(int index) {
|
||||
return _listeners.get(index);
|
||||
}
|
||||
|
||||
private void startTimestamper() {
|
||||
_timestamperThread = new I2PThread(this, "Timestamper", _daemon);
|
||||
_timestamperThread.setPriority(I2PThread.MIN_PRIORITY);
|
||||
_isRunning = true;
|
||||
_timestamperThread.start();
|
||||
_context.addShutdownTask(new Shutdown());
|
||||
}
|
||||
|
||||
public void waitForInitialization() {
|
||||
try {
|
||||
synchronized (this) {
|
||||
if (!_initialized)
|
||||
wait();
|
||||
}
|
||||
} catch (InterruptedException ie) {}
|
||||
}
|
||||
/** dummy */
|
||||
public void waitForInitialization() {}
|
||||
|
||||
/**
|
||||
* Update the time immediately.
|
||||
* Dummy
|
||||
* @since 0.8.8
|
||||
*/
|
||||
public void timestampNow() {
|
||||
if (_initialized && _isRunning && (!_disabled) && _timestamperThread != null)
|
||||
_timestamperThread.interrupt();
|
||||
}
|
||||
public void timestampNow() {}
|
||||
|
||||
/** @since 0.8.8 */
|
||||
private class Shutdown implements Runnable {
|
||||
public void run() {
|
||||
_isRunning = false;
|
||||
if (_timestamperThread != null)
|
||||
_timestamperThread.interrupt();
|
||||
}
|
||||
}
|
||||
|
||||
public void run() {
|
||||
try { Thread.sleep(1000); } catch (InterruptedException ie) {}
|
||||
_log = _context.logManager().getLog(Timestamper.class);
|
||||
if (_log.shouldLog(Log.INFO))
|
||||
_log.info("Starting timestamper");
|
||||
boolean lastFailed = false;
|
||||
try {
|
||||
while (_isRunning) {
|
||||
updateConfig();
|
||||
if (!_disabled) {
|
||||
// first the servers for our country, if we know what country we're in...
|
||||
if (_priorityServers != null) {
|
||||
if (_log.shouldLog(Log.DEBUG))
|
||||
_log.debug("Querying servers " + _priorityServers);
|
||||
try {
|
||||
lastFailed = !queryTime(_priorityServers.toArray(new String[_priorityServers.size()]));
|
||||
} catch (IllegalArgumentException iae) {
|
||||
if ( (!lastFailed) && (_log.shouldLog(Log.WARN)) )
|
||||
_log.warn("Unable to reach country-specific NTP servers");
|
||||
lastFailed = true;
|
||||
}
|
||||
}
|
||||
// ... and then the global list, if that failed
|
||||
if (_priorityServers == null || lastFailed) {
|
||||
if (_log.shouldLog(Log.DEBUG))
|
||||
_log.debug("Querying servers " + _servers);
|
||||
try {
|
||||
lastFailed = !queryTime(_servers.toArray(new String[_servers.size()]));
|
||||
} catch (IllegalArgumentException iae) {
|
||||
if ( (!_initialized) && (_log.shouldLog(Log.ERROR)) ) {
|
||||
List<String> all = new ArrayList();
|
||||
if (_priorityServers != null)
|
||||
all.addAll(_priorityServers);
|
||||
all.addAll(_servers);
|
||||
_log.error("Unable to reach any of the NTP servers " + all + " - network disconnected? Or set time.sntpServerList=myserver1.com,myserver2.com in advanced configuration.");
|
||||
}
|
||||
lastFailed = true;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
_initialized = true;
|
||||
synchronized (this) { notifyAll(); }
|
||||
long sleepTime;
|
||||
if (lastFailed) {
|
||||
if (++_consecutiveFails >= MAX_CONSECUTIVE_FAILS)
|
||||
sleepTime = 30*60*1000;
|
||||
else
|
||||
sleepTime = 30*1000;
|
||||
} else {
|
||||
_consecutiveFails = 0;
|
||||
sleepTime = _context.random().nextInt(_queryFrequency) + _queryFrequency;
|
||||
if (_wellSynced)
|
||||
sleepTime *= 3;
|
||||
}
|
||||
try { Thread.sleep(sleepTime); } catch (InterruptedException ie) {}
|
||||
}
|
||||
} catch (Throwable t) {
|
||||
_log.log(Log.CRIT, "Timestamper died!", t);
|
||||
synchronized (this) { notifyAll(); }
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* True if the time was queried successfully, false if it couldn't be
|
||||
*/
|
||||
private boolean queryTime(String serverList[]) throws IllegalArgumentException {
|
||||
long found[] = new long[_concurringServers];
|
||||
long now = -1;
|
||||
int stratum = -1;
|
||||
long expectedDelta = 0;
|
||||
_wellSynced = false;
|
||||
for (int i = 0; i < _concurringServers; i++) {
|
||||
if (i > 0) {
|
||||
// this delays startup when net is disconnected or the timeserver list is bad, don't make it too long
|
||||
try { Thread.sleep(2*1000); } catch (InterruptedException ie) {}
|
||||
}
|
||||
long[] timeAndStratum = NtpClient.currentTimeAndStratum(serverList);
|
||||
now = timeAndStratum[0];
|
||||
stratum = (int) timeAndStratum[1];
|
||||
long delta = now - _context.clock().now();
|
||||
found[i] = delta;
|
||||
if (i == 0) {
|
||||
if (Math.abs(delta) < MAX_VARIANCE) {
|
||||
if (_log.shouldLog(Log.INFO))
|
||||
_log.info("a single SNTP query was within the tolerance (" + delta + "ms)");
|
||||
// If less than a half second on the first try, we're in good shape
|
||||
_wellSynced = Math.abs(delta) < 500;
|
||||
break;
|
||||
} else {
|
||||
// outside the tolerance, lets iterate across the concurring queries
|
||||
expectedDelta = delta;
|
||||
}
|
||||
} else {
|
||||
if (Math.abs(delta - expectedDelta) > MAX_VARIANCE) {
|
||||
if (_log.shouldLog(Log.ERROR)) {
|
||||
StringBuilder err = new StringBuilder(96);
|
||||
err.append("SNTP client variance exceeded at query ").append(i);
|
||||
err.append(". expected = ");
|
||||
err.append(expectedDelta);
|
||||
err.append(", found = ");
|
||||
err.append(delta);
|
||||
err.append(" all deltas: ");
|
||||
for (int j = 0; j < found.length; j++)
|
||||
err.append(found[j]).append(' ');
|
||||
_log.error(err.toString());
|
||||
}
|
||||
return false;
|
||||
}
|
||||
}
|
||||
}
|
||||
stampTime(now, stratum);
|
||||
if (_log.shouldLog(Log.DEBUG)) {
|
||||
StringBuilder buf = new StringBuilder(64);
|
||||
buf.append("Deltas: ");
|
||||
for (int i = 0; i < found.length; i++)
|
||||
buf.append(found[i]).append(' ');
|
||||
_log.debug(buf.toString());
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
/**
|
||||
* Notify the listeners
|
||||
*
|
||||
* @since stratum param added in 0.7.12
|
||||
*/
|
||||
private void stampTime(long now, int stratum) {
|
||||
long before = _context.clock().now();
|
||||
for (UpdateListener lsnr : _listeners) {
|
||||
lsnr.setNow(now, stratum);
|
||||
}
|
||||
if (_log.shouldLog(Log.DEBUG))
|
||||
_log.debug("Stamped the time as " + now + " (delta=" + (now-before) + ")");
|
||||
}
|
||||
|
||||
/**
|
||||
* Reload all the config elements from the appContext
|
||||
*
|
||||
*/
|
||||
private void updateConfig() {
|
||||
String serverList = _context.getProperty(PROP_SERVER_LIST);
|
||||
if ( (serverList == null) || (serverList.trim().length() <= 0) ) {
|
||||
serverList = DEFAULT_SERVER_LIST;
|
||||
String country = _context.getProperty(PROP_IP_COUNTRY);
|
||||
if (country == null) {
|
||||
country = Locale.getDefault().getCountry();
|
||||
if (country != null)
|
||||
country = country.toLowerCase(Locale.US);
|
||||
}
|
||||
if (country != null && country.length() > 0) {
|
||||
_priorityServers = new ArrayList(3);
|
||||
for (int i = 0; i < 3; i++)
|
||||
_priorityServers.add(i + "." + country + ".pool.ntp.org");
|
||||
} else {
|
||||
_priorityServers = null;
|
||||
}
|
||||
} else {
|
||||
_priorityServers = null;
|
||||
}
|
||||
_servers.clear();
|
||||
StringTokenizer tok = new StringTokenizer(serverList, ", ");
|
||||
while (tok.hasMoreTokens()) {
|
||||
String val = tok.nextToken();
|
||||
val = val.trim();
|
||||
if (val.length() > 0)
|
||||
_servers.add(val);
|
||||
}
|
||||
|
||||
_queryFrequency = Math.max(MIN_QUERY_FREQUENCY,
|
||||
_context.getProperty(PROP_QUERY_FREQUENCY, DEFAULT_QUERY_FREQUENCY));
|
||||
|
||||
String disabled = _context.getProperty(PROP_DISABLED, DEFAULT_DISABLED);
|
||||
_disabled = Boolean.valueOf(disabled).booleanValue();
|
||||
|
||||
_concurringServers = Math.min(4, Math.max(1,
|
||||
_context.getProperty(PROP_CONCURRING_SERVERS, DEFAULT_CONCURRING_SERVERS)));
|
||||
}
|
||||
|
||||
/****
|
||||
public static void main(String args[]) {
|
||||
System.setProperty(PROP_DISABLED, "false");
|
||||
System.setProperty(PROP_QUERY_FREQUENCY, "30000");
|
||||
I2PAppContext.getGlobalContext();
|
||||
for (int i = 0; i < 5*60*1000; i += 61*1000) {
|
||||
try { Thread.sleep(61*1000); } catch (InterruptedException ie) {}
|
||||
}
|
||||
}
|
||||
****/
|
||||
/** dummy */
|
||||
public void run() {}
|
||||
|
||||
/**
|
||||
* Interface to receive update notifications for when we query the time
|
||||
|
@@ -1,7 +1,8 @@
|
||||
<html>
|
||||
<body>
|
||||
<p>
|
||||
Provides classes for time synchronisation using NTP.
|
||||
Provides a stub class for time synchronization.
|
||||
Full implementation is now in net.i2p.router.time.
|
||||
</p>
|
||||
</body>
|
||||
</html>
|
||||
|
@@ -19,7 +19,6 @@ import net.i2p.time.Timestamper;
|
||||
*/
|
||||
public class Clock implements Timestamper.UpdateListener {
|
||||
protected final I2PAppContext _context;
|
||||
private final Timestamper _timestamper;
|
||||
protected long _startedOn;
|
||||
protected boolean _statCreated;
|
||||
protected volatile long _offset;
|
||||
@@ -29,7 +28,6 @@ public class Clock implements Timestamper.UpdateListener {
|
||||
public Clock(I2PAppContext context) {
|
||||
_context = context;
|
||||
_listeners = new CopyOnWriteArraySet();
|
||||
_timestamper = new Timestamper(context, this);
|
||||
_startedOn = System.currentTimeMillis();
|
||||
}
|
||||
|
||||
@@ -37,7 +35,10 @@ public class Clock implements Timestamper.UpdateListener {
|
||||
return I2PAppContext.getGlobalContext().clock();
|
||||
}
|
||||
|
||||
public Timestamper getTimestamper() { return _timestamper; }
|
||||
/**
|
||||
* This is a dummy, see RouterClock and RouterTimestamper for the real thing
|
||||
*/
|
||||
public Timestamper getTimestamper() { return new Timestamper(); }
|
||||
|
||||
/** we fetch it on demand to avoid circular dependencies (logging uses the clock) */
|
||||
protected Log getLog() { return _context.logManager().getLog(Clock.class); }
|
||||
|
@@ -1,455 +0,0 @@
|
||||
package net.i2p.util;
|
||||
|
||||
import java.util.Random;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
import java.util.concurrent.locks.ReentrantReadWriteLock;
|
||||
|
||||
import net.i2p.I2PAppContext;
|
||||
import net.i2p.data.DataHelper;
|
||||
|
||||
import org.xlattice.crypto.filters.BloomSHA1;
|
||||
|
||||
/**
|
||||
* Series of bloom filters which decay over time, allowing their continual use
|
||||
* for time sensitive data. This has a fixed size (per
|
||||
* period, using two periods overall), allowing this to pump through hundreds of
|
||||
* entries per second with virtually no false positive rate. Down the line,
|
||||
* this may be refactored to allow tighter control of the size necessary for the
|
||||
* contained bloom filters.
|
||||
*
|
||||
* Deprecated for use outside of the router; to be moved to router.jar.
|
||||
*
|
||||
* See main() for an analysis of false positive rate.
|
||||
* See BloomFilterIVValidator for instantiation parameters.
|
||||
* See DecayingHashSet for a smaller and simpler version.
|
||||
* @see net.i2p.router.tunnel.BloomFilterIVValidator
|
||||
* @see net.i2p.util.DecayingHashSet
|
||||
*/
|
||||
public class DecayingBloomFilter {
|
||||
protected final I2PAppContext _context;
|
||||
protected final Log _log;
|
||||
private BloomSHA1 _current;
|
||||
private BloomSHA1 _previous;
|
||||
protected final int _durationMs;
|
||||
protected final int _entryBytes;
|
||||
private final byte _extenders[][];
|
||||
private final byte _extended[];
|
||||
private final byte _longToEntry[];
|
||||
private final long _longToEntryMask;
|
||||
protected long _currentDuplicates;
|
||||
protected volatile boolean _keepDecaying;
|
||||
protected final SimpleTimer.TimedEvent _decayEvent;
|
||||
/** just for logging */
|
||||
protected final String _name;
|
||||
/** synchronize against this lock when switching double buffers */
|
||||
protected final ReentrantReadWriteLock _reorganizeLock = new ReentrantReadWriteLock();
|
||||
|
||||
private static final int DEFAULT_M = 23;
|
||||
private static final int DEFAULT_K = 11;
|
||||
/** true for debugging */
|
||||
private static final boolean ALWAYS_MISS = false;
|
||||
|
||||
/** only for extension by DHS */
|
||||
protected DecayingBloomFilter(int durationMs, int entryBytes, String name, I2PAppContext context) {
|
||||
_context = context;
|
||||
_log = context.logManager().getLog(getClass());
|
||||
_entryBytes = entryBytes;
|
||||
_name = name;
|
||||
_durationMs = durationMs;
|
||||
// all final
|
||||
_extenders = null;
|
||||
_extended = null;
|
||||
_longToEntry = null;
|
||||
_longToEntryMask = 0;
|
||||
context.addShutdownTask(new Shutdown());
|
||||
_decayEvent = new DecayEvent();
|
||||
_keepDecaying = true;
|
||||
SimpleTimer.getInstance().addEvent(_decayEvent, _durationMs);
|
||||
}
|
||||
|
||||
/**
|
||||
* Create a bloom filter that will decay its entries over time.
|
||||
*
|
||||
* @param durationMs entries last for at least this long, but no more than twice this long
|
||||
* @param entryBytes how large are the entries to be added? if this is less than 32 bytes,
|
||||
* the entries added will be expanded by concatenating their XORing
|
||||
* against with sufficient random values.
|
||||
*/
|
||||
public DecayingBloomFilter(I2PAppContext context, int durationMs, int entryBytes) {
|
||||
this(context, durationMs, entryBytes, "DBF");
|
||||
}
|
||||
|
||||
/** @param name just for logging / debugging / stats */
|
||||
public DecayingBloomFilter(I2PAppContext context, int durationMs, int entryBytes, String name) {
|
||||
// this is instantiated in four different places, they may have different
|
||||
// requirements, but for now use this as a gross method of memory reduction.
|
||||
// m == 23 => 1MB each BloomSHA1 (4 pairs = 8MB total)
|
||||
this(context, durationMs, entryBytes, name, context.getProperty("router.decayingBloomFilterM", DEFAULT_M));
|
||||
}
|
||||
|
||||
/** @param m filter size exponent */
|
||||
public DecayingBloomFilter(I2PAppContext context, int durationMs, int entryBytes, String name, int m) {
|
||||
_context = context;
|
||||
_log = context.logManager().getLog(DecayingBloomFilter.class);
|
||||
_entryBytes = entryBytes;
|
||||
_name = name;
|
||||
int k = DEFAULT_K;
|
||||
// max is (23,11) or (26,10); see KeySelector for details
|
||||
if (m > DEFAULT_M)
|
||||
k--;
|
||||
_current = new BloomSHA1(m, k);
|
||||
_previous = new BloomSHA1(m, k);
|
||||
_durationMs = durationMs;
|
||||
int numExtenders = (32+ (entryBytes-1))/entryBytes - 1;
|
||||
if (numExtenders < 0)
|
||||
numExtenders = 0;
|
||||
_extenders = new byte[numExtenders][entryBytes];
|
||||
for (int i = 0; i < numExtenders; i++)
|
||||
_context.random().nextBytes(_extenders[i]);
|
||||
if (numExtenders > 0) {
|
||||
_extended = new byte[32];
|
||||
_longToEntry = new byte[_entryBytes];
|
||||
_longToEntryMask = (1l << (_entryBytes * 8l)) -1;
|
||||
} else {
|
||||
// final
|
||||
_extended = null;
|
||||
_longToEntry = null;
|
||||
_longToEntryMask = 0;
|
||||
}
|
||||
_decayEvent = new DecayEvent();
|
||||
_keepDecaying = true;
|
||||
SimpleTimer.getInstance().addEvent(_decayEvent, _durationMs);
|
||||
if (_log.shouldLog(Log.WARN))
|
||||
_log.warn("New DBF " + name + " m = " + m + " k = " + k + " entryBytes = " + entryBytes +
|
||||
" numExtenders = " + numExtenders + " cycle (s) = " + (durationMs / 1000));
|
||||
// try to get a handle on memory usage vs. false positives
|
||||
context.statManager().createRateStat("router.decayingBloomFilter." + name + ".size",
|
||||
"Size", "Router", new long[] { 10 * Math.max(60*1000, durationMs) });
|
||||
context.statManager().createRateStat("router.decayingBloomFilter." + name + ".dups",
|
||||
"1000000 * Duplicates/Size", "Router", new long[] { 10 * Math.max(60*1000, durationMs) });
|
||||
context.statManager().createRateStat("router.decayingBloomFilter." + name + ".log10(falsePos)",
|
||||
"log10 of the false positive rate (must have net.i2p.util.DecayingBloomFilter=DEBUG)",
|
||||
"Router", new long[] { 10 * Math.max(60*1000, durationMs) });
|
||||
context.addShutdownTask(new Shutdown());
|
||||
}
|
||||
|
||||
/**
|
||||
* @since 0.8.8
|
||||
*/
|
||||
private class Shutdown implements Runnable {
|
||||
public void run() {
|
||||
clear();
|
||||
}
|
||||
}
|
||||
|
||||
public long getCurrentDuplicateCount() { return _currentDuplicates; }
|
||||
|
||||
/** unsynchronized but only used for logging elsewhere */
|
||||
public int getInsertedCount() {
|
||||
return _current.size() + _previous.size();
|
||||
}
|
||||
|
||||
/** unsynchronized, only used for logging elsewhere */
|
||||
public double getFalsePositiveRate() {
|
||||
return _current.falsePositives();
|
||||
}
|
||||
|
||||
/**
|
||||
* @return true if the entry added is a duplicate
|
||||
*/
|
||||
public boolean add(byte entry[]) {
|
||||
return add(entry, 0, entry.length);
|
||||
}
|
||||
|
||||
/**
|
||||
* @return true if the entry added is a duplicate
|
||||
*/
|
||||
public boolean add(byte entry[], int off, int len) {
|
||||
if (ALWAYS_MISS) return false;
|
||||
if (entry == null)
|
||||
throw new IllegalArgumentException("Null entry");
|
||||
if (len != _entryBytes)
|
||||
throw new IllegalArgumentException("Bad entry [" + len + ", expected "
|
||||
+ _entryBytes + "]");
|
||||
getReadLock();
|
||||
try {
|
||||
return locked_add(entry, off, len, true);
|
||||
} finally { releaseReadLock(); }
|
||||
}
|
||||
|
||||
/**
|
||||
* @return true if the entry added is a duplicate. the number of low order
|
||||
* bits used is determined by the entryBytes parameter used on creation of the
|
||||
* filter.
|
||||
*
|
||||
*/
|
||||
public boolean add(long entry) {
|
||||
if (ALWAYS_MISS) return false;
|
||||
if (_entryBytes <= 7)
|
||||
entry = ((entry ^ _longToEntryMask) & ((1 << 31)-1)) | (entry ^ _longToEntryMask);
|
||||
//entry &= _longToEntryMask;
|
||||
if (entry < 0) {
|
||||
DataHelper.toLong(_longToEntry, 0, _entryBytes, 0-entry);
|
||||
_longToEntry[0] |= (1 << 7);
|
||||
} else {
|
||||
DataHelper.toLong(_longToEntry, 0, _entryBytes, entry);
|
||||
}
|
||||
getReadLock();
|
||||
try {
|
||||
return locked_add(_longToEntry, 0, _longToEntry.length, true);
|
||||
} finally { releaseReadLock(); }
|
||||
}
|
||||
|
||||
/**
|
||||
* @return true if the entry is already known. this does NOT add the
|
||||
* entry however.
|
||||
*
|
||||
*/
|
||||
public boolean isKnown(long entry) {
|
||||
if (ALWAYS_MISS) return false;
|
||||
if (_entryBytes <= 7)
|
||||
entry = ((entry ^ _longToEntryMask) & ((1 << 31)-1)) | (entry ^ _longToEntryMask);
|
||||
if (entry < 0) {
|
||||
DataHelper.toLong(_longToEntry, 0, _entryBytes, 0-entry);
|
||||
_longToEntry[0] |= (1 << 7);
|
||||
} else {
|
||||
DataHelper.toLong(_longToEntry, 0, _entryBytes, entry);
|
||||
}
|
||||
getReadLock();
|
||||
try {
|
||||
return locked_add(_longToEntry, 0, _longToEntry.length, false);
|
||||
} finally { releaseReadLock(); }
|
||||
}
|
||||
|
||||
private boolean locked_add(byte entry[], int offset, int len, boolean addIfNew) {
|
||||
if (_extended != null) {
|
||||
// extend the entry to 32 bytes
|
||||
System.arraycopy(entry, offset, _extended, 0, len);
|
||||
for (int i = 0; i < _extenders.length; i++)
|
||||
DataHelper.xor(entry, offset, _extenders[i], 0, _extended, _entryBytes * (i+1), _entryBytes);
|
||||
|
||||
BloomSHA1.FilterKey key = _current.getFilterKey(_extended, 0, 32);
|
||||
boolean seen = _current.locked_member(key);
|
||||
if (!seen)
|
||||
seen = _previous.locked_member(key);
|
||||
if (seen) {
|
||||
_currentDuplicates++;
|
||||
_current.release(key);
|
||||
return true;
|
||||
} else {
|
||||
if (addIfNew) {
|
||||
_current.locked_insert(key);
|
||||
}
|
||||
_current.release(key);
|
||||
return false;
|
||||
}
|
||||
} else {
|
||||
BloomSHA1.FilterKey key = _current.getFilterKey(entry, offset, len);
|
||||
boolean seen = _current.locked_member(key);
|
||||
if (!seen)
|
||||
seen = _previous.locked_member(key);
|
||||
if (seen) {
|
||||
_currentDuplicates++;
|
||||
_current.release(key);
|
||||
return true;
|
||||
} else {
|
||||
if (addIfNew) {
|
||||
_current.locked_insert(key);
|
||||
}
|
||||
_current.release(key);
|
||||
return false;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
public void clear() {
|
||||
if (!getWriteLock())
|
||||
return;
|
||||
try {
|
||||
_current.clear();
|
||||
_previous.clear();
|
||||
_currentDuplicates = 0;
|
||||
} finally { releaseWriteLock(); }
|
||||
}
|
||||
|
||||
public void stopDecaying() {
|
||||
_keepDecaying = false;
|
||||
SimpleTimer.getInstance().removeEvent(_decayEvent);
|
||||
}
|
||||
|
||||
protected void decay() {
|
||||
int currentCount = 0;
|
||||
long dups = 0;
|
||||
double fpr = 0d;
|
||||
if (!getWriteLock())
|
||||
return;
|
||||
try {
|
||||
BloomSHA1 tmp = _previous;
|
||||
currentCount = _current.size();
|
||||
if (_log.shouldLog(Log.DEBUG) && currentCount > 0)
|
||||
fpr = _current.falsePositives();
|
||||
_previous = _current;
|
||||
_current = tmp;
|
||||
_current.clear();
|
||||
dups = _currentDuplicates;
|
||||
_currentDuplicates = 0;
|
||||
} finally { releaseWriteLock(); }
|
||||
if (_log.shouldLog(Log.DEBUG))
|
||||
_log.debug("Decaying the filter " + _name + " after inserting " + currentCount
|
||||
+ " elements and " + dups + " false positives with FPR = " + fpr);
|
||||
_context.statManager().addRateData("router.decayingBloomFilter." + _name + ".size",
|
||||
currentCount);
|
||||
if (currentCount > 0)
|
||||
_context.statManager().addRateData("router.decayingBloomFilter." + _name + ".dups",
|
||||
1000l*1000*dups/currentCount);
|
||||
if (fpr > 0d) {
|
||||
// only if log.shouldLog(Log.DEBUG) ...
|
||||
long exponent = (long) Math.log10(fpr);
|
||||
_context.statManager().addRateData("router.decayingBloomFilter." + _name + ".log10(falsePos)",
|
||||
exponent);
|
||||
}
|
||||
}
|
||||
|
||||
private class DecayEvent implements SimpleTimer.TimedEvent {
|
||||
public void timeReached() {
|
||||
if (_keepDecaying) {
|
||||
decay();
|
||||
SimpleTimer.getInstance().addEvent(DecayEvent.this, _durationMs);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/** @since 0.8.11 moved from DecayingHashSet */
|
||||
protected void getReadLock() {
|
||||
_reorganizeLock.readLock().lock();
|
||||
}
|
||||
|
||||
/** @since 0.8.11 moved from DecayingHashSet */
|
||||
protected void releaseReadLock() {
|
||||
_reorganizeLock.readLock().unlock();
|
||||
}
|
||||
|
||||
/**
|
||||
* @return true if the lock was acquired
|
||||
* @since 0.8.11 moved from DecayingHashSet
|
||||
*/
|
||||
protected boolean getWriteLock() {
|
||||
try {
|
||||
boolean rv = _reorganizeLock.writeLock().tryLock(5000, TimeUnit.MILLISECONDS);
|
||||
if (!rv)
|
||||
_log.error("no lock, size is: " + _reorganizeLock.getQueueLength(), new Exception("rats"));
|
||||
return rv;
|
||||
} catch (InterruptedException ie) {}
|
||||
return false;
|
||||
}
|
||||
|
||||
/** @since 0.8.11 moved from DecayingHashSet */
|
||||
protected void releaseWriteLock() {
|
||||
_reorganizeLock.writeLock().unlock();
|
||||
}
|
||||
|
||||
/**
|
||||
* This filter is used only for participants and OBEPs, not
|
||||
* IBGWs, so depending on your assumptions of avg. tunnel length,
|
||||
* the performance is somewhat better than the gross share BW
|
||||
* would indicate.
|
||||
*
|
||||
*<pre>
|
||||
* Following stats for m=23, k=11:
|
||||
* Theoretical false positive rate for 16 KBps: 1.17E-21
|
||||
* Theoretical false positive rate for 24 KBps: 9.81E-20
|
||||
* Theoretical false positive rate for 32 KBps: 2.24E-18
|
||||
* Theoretical false positive rate for 256 KBps: 7.45E-9
|
||||
* Theoretical false positive rate for 512 KBps: 5.32E-6
|
||||
* Theoretical false positive rate for 1024 KBps: 1.48E-3
|
||||
* Then it gets bad: 1280 .67%; 1536 2.0%; 1792 4.4%; 2048 8.2%.
|
||||
*
|
||||
* Following stats for m=24, k=10:
|
||||
* 1280 4.5E-5; 1792 5.6E-4; 2048 0.14%
|
||||
*
|
||||
* Following stats for m=25, k=10:
|
||||
* 1792 2.4E-6; 4096 0.14%; 5120 0.6%; 6144 1.7%; 8192 6.8%; 10240 15%
|
||||
*</pre>
|
||||
*/
|
||||
public static void main(String args[]) {
|
||||
System.out.println("Usage: DecayingBloomFilter [kbps [m [iterations]]] (default 256 23 10)");
|
||||
int kbps = 256;
|
||||
if (args.length >= 1) {
|
||||
try {
|
||||
kbps = Integer.parseInt(args[0]);
|
||||
} catch (NumberFormatException nfe) {}
|
||||
}
|
||||
int m = DEFAULT_M;
|
||||
if (args.length >= 2) {
|
||||
try {
|
||||
m = Integer.parseInt(args[1]);
|
||||
} catch (NumberFormatException nfe) {}
|
||||
}
|
||||
int iterations = 10;
|
||||
if (args.length >= 3) {
|
||||
try {
|
||||
iterations = Integer.parseInt(args[2]);
|
||||
} catch (NumberFormatException nfe) {}
|
||||
}
|
||||
testByLong(kbps, m, iterations);
|
||||
testByBytes(kbps, m, iterations);
|
||||
}
|
||||
|
||||
private static void testByLong(int kbps, int m, int numRuns) {
|
||||
int messages = 60 * 10 * kbps;
|
||||
Random r = new Random();
|
||||
DecayingBloomFilter filter = new DecayingBloomFilter(I2PAppContext.getGlobalContext(), 600*1000, 8, "test", m);
|
||||
int falsePositives = 0;
|
||||
long totalTime = 0;
|
||||
double fpr = 0d;
|
||||
for (int j = 0; j < numRuns; j++) {
|
||||
long start = System.currentTimeMillis();
|
||||
for (int i = 0; i < messages; i++) {
|
||||
if (filter.add(r.nextLong())) {
|
||||
falsePositives++;
|
||||
//System.out.println("False positive " + falsePositives + " (testByLong j=" + j + " i=" + i + ")");
|
||||
}
|
||||
}
|
||||
totalTime += System.currentTimeMillis() - start;
|
||||
fpr = filter.getFalsePositiveRate();
|
||||
filter.clear();
|
||||
}
|
||||
filter.stopDecaying();
|
||||
System.out.println("False postive rate should be " + fpr);
|
||||
System.out.println("After " + numRuns + " runs pushing " + messages + " entries in "
|
||||
+ DataHelper.formatDuration(totalTime/numRuns) + " per run, there were "
|
||||
+ falsePositives + " false positives");
|
||||
|
||||
}
|
||||
|
||||
private static void testByBytes(int kbps, int m, int numRuns) {
|
||||
byte iv[][] = new byte[60*10*kbps][16];
|
||||
Random r = new Random();
|
||||
for (int i = 0; i < iv.length; i++)
|
||||
r.nextBytes(iv[i]);
|
||||
|
||||
DecayingBloomFilter filter = new DecayingBloomFilter(I2PAppContext.getGlobalContext(), 600*1000, 16, "test", m);
|
||||
int falsePositives = 0;
|
||||
long totalTime = 0;
|
||||
double fpr = 0d;
|
||||
for (int j = 0; j < numRuns; j++) {
|
||||
long start = System.currentTimeMillis();
|
||||
for (int i = 0; i < iv.length; i++) {
|
||||
if (filter.add(iv[i])) {
|
||||
falsePositives++;
|
||||
//System.out.println("False positive " + falsePositives + " (testByBytes j=" + j + " i=" + i + ")");
|
||||
}
|
||||
}
|
||||
totalTime += System.currentTimeMillis() - start;
|
||||
fpr = filter.getFalsePositiveRate();
|
||||
filter.clear();
|
||||
}
|
||||
filter.stopDecaying();
|
||||
System.out.println("False postive rate should be " + fpr);
|
||||
System.out.println("After " + numRuns + " runs pushing " + iv.length + " entries in "
|
||||
+ DataHelper.formatDuration(totalTime/numRuns) + " per run, there were "
|
||||
+ falsePositives + " false positives");
|
||||
//System.out.println("inserted: " + bloom.size() + " with " + bloom.capacity()
|
||||
// + " (" + bloom.falsePositives()*100.0d + "% false positive)");
|
||||
}
|
||||
}
|
@@ -1,335 +0,0 @@
|
||||
package net.i2p.util;
|
||||
|
||||
import java.util.Random;
|
||||
|
||||
import net.i2p.I2PAppContext;
|
||||
import net.i2p.data.DataHelper;
|
||||
|
||||
|
||||
/**
|
||||
* Double buffered hash set.
|
||||
* Since DecayingBloomFilter was instantiated 4 times for a total memory usage
|
||||
* of 8MB, it seemed like we could do a lot better, given these usage stats
|
||||
* on a class L router:
|
||||
*
|
||||
* ./router/java/src/net/i2p/router/tunnel/BuildMessageProcessor.java:
|
||||
* 32 bytes, peak 10 entries in 1m
|
||||
* (320 peak entries seen on fast router)
|
||||
*
|
||||
* ./router/java/src/net/i2p/router/transport/udp/InboundMessageFragments.java:
|
||||
* 4 bytes, peak 150 entries in 10s
|
||||
* (1600 peak entries seen on fast router)
|
||||
*
|
||||
* ./router/java/src/net/i2p/router/MessageValidator.java:
|
||||
* 8 bytes, peak 1K entries in 2m
|
||||
* (36K peak entries seen on fast router)
|
||||
*
|
||||
* ./router/java/src/net/i2p/router/tunnel/BloomFilterIVValidator.java:
|
||||
* 16 bytes, peak 15K entries in 10m
|
||||
*
|
||||
* If the ArrayWrapper object in the HashSet is 50 bytes, and BloomSHA1(23, 11) is 1MB,
|
||||
* then for less than 20K entries this is smaller.
|
||||
* And this uses space proportional to traffiic, so it doesn't penalize small routers
|
||||
* with a fixed 8MB.
|
||||
* So let's try it for the first 2 or 3, for now.
|
||||
*
|
||||
* Also, DBF is syncrhonized, and uses SimpleTimer.
|
||||
* Here we use a read/write lock, with synchronization only
|
||||
* when switching double buffers, and we use SimpleScheduler.
|
||||
*
|
||||
* Yes, we could stare at stats all day, and try to calculate an acceptable
|
||||
* false-positive rate for each of the above uses, then estimate the DBF size
|
||||
* required to meet that rate for a given usage. Or even start adjusting the
|
||||
* Bloom filter m and k values on a per-DBF basis. But it's a whole lot easier
|
||||
* to implement something with a zero false positive rate, and uses less memory
|
||||
* for almost all bandwidth classes.
|
||||
*
|
||||
* This has a strictly zero false positive rate for <= 8 byte keys.
|
||||
* For larger keys, it is 1 / (2**64) ~= 5E-20, which is better than
|
||||
* DBF for any entry count greater than about 14K.
|
||||
*
|
||||
* DBF has a zero false negative rate over the period
|
||||
* 2 * durationMs. And a 100% false negative rate beyond that period.
|
||||
* This has the same properties.
|
||||
*
|
||||
* This performs about twice as fast as DBF in the test below.
|
||||
*
|
||||
* Deprecated for use outside of the router; to be moved to router.jar.
|
||||
*
|
||||
* @author zzz
|
||||
*/
|
||||
public class DecayingHashSet extends DecayingBloomFilter {
|
||||
private ConcurrentHashSet<ArrayWrapper> _current;
|
||||
private ConcurrentHashSet<ArrayWrapper> _previous;
|
||||
|
||||
/**
|
||||
* Create a double-buffered hash set that will decay its entries over time.
|
||||
*
|
||||
* @param durationMs entries last for at least this long, but no more than twice this long
|
||||
* @param entryBytes how large are the entries to be added? 1 to 32 bytes
|
||||
*/
|
||||
public DecayingHashSet(I2PAppContext context, int durationMs, int entryBytes) {
|
||||
this(context, durationMs, entryBytes, "DHS");
|
||||
}
|
||||
|
||||
/** @param name just for logging / debugging / stats */
|
||||
public DecayingHashSet(I2PAppContext context, int durationMs, int entryBytes, String name) {
|
||||
super(durationMs, entryBytes, name, context);
|
||||
if (entryBytes <= 0 || entryBytes > 32)
|
||||
throw new IllegalArgumentException("Bad size");
|
||||
_current = new ConcurrentHashSet(128);
|
||||
_previous = new ConcurrentHashSet(128);
|
||||
if (_log.shouldLog(Log.WARN))
|
||||
_log.warn("New DHS " + name + " entryBytes = " + entryBytes +
|
||||
" cycle (s) = " + (durationMs / 1000));
|
||||
// try to get a handle on memory usage vs. false positives
|
||||
context.statManager().createRateStat("router.decayingHashSet." + name + ".size",
|
||||
"Size", "Router", new long[] { 10 * Math.max(60*1000, durationMs) });
|
||||
context.statManager().createRateStat("router.decayingHashSet." + name + ".dups",
|
||||
"1000000 * Duplicates/Size", "Router", new long[] { 10 * Math.max(60*1000, durationMs) });
|
||||
}
|
||||
|
||||
/** unsynchronized but only used for logging elsewhere */
|
||||
@Override
|
||||
public int getInsertedCount() {
|
||||
return _current.size() + _previous.size();
|
||||
}
|
||||
|
||||
/** pointless, only used for logging elsewhere */
|
||||
@Override
|
||||
public double getFalsePositiveRate() {
|
||||
if (_entryBytes <= 8)
|
||||
return 0d;
|
||||
return 1d / Math.pow(2d, 64d); // 5.4E-20
|
||||
}
|
||||
|
||||
/**
|
||||
* @return true if the entry added is a duplicate
|
||||
*/
|
||||
@Override
|
||||
public boolean add(byte entry[], int off, int len) {
|
||||
if (entry == null)
|
||||
throw new IllegalArgumentException("Null entry");
|
||||
if (len != _entryBytes)
|
||||
throw new IllegalArgumentException("Bad entry [" + len + ", expected "
|
||||
+ _entryBytes + "]");
|
||||
ArrayWrapper w = new ArrayWrapper(entry, off, len);
|
||||
getReadLock();
|
||||
try {
|
||||
return locked_add(w, true);
|
||||
} finally { releaseReadLock(); }
|
||||
}
|
||||
|
||||
/**
|
||||
* @return true if the entry added is a duplicate. the number of low order
|
||||
* bits used is determined by the entryBytes parameter used on creation of the
|
||||
* filter.
|
||||
*
|
||||
*/
|
||||
@Override
|
||||
public boolean add(long entry) {
|
||||
return add(entry, true);
|
||||
}
|
||||
|
||||
/**
|
||||
* @return true if the entry is already known. this does NOT add the
|
||||
* entry however.
|
||||
*
|
||||
*/
|
||||
@Override
|
||||
public boolean isKnown(long entry) {
|
||||
return add(entry, false);
|
||||
}
|
||||
|
||||
private boolean add(long entry, boolean addIfNew) {
|
||||
ArrayWrapper w = new ArrayWrapper(entry);
|
||||
getReadLock();
|
||||
try {
|
||||
return locked_add(w, addIfNew);
|
||||
} finally { releaseReadLock(); }
|
||||
}
|
||||
|
||||
/**
|
||||
* @param addIfNew if true, add the element to current if it is not already there or in previous;
|
||||
* if false, only check
|
||||
* @return if the element is in either the current or previous set
|
||||
*/
|
||||
private boolean locked_add(ArrayWrapper w, boolean addIfNew) {
|
||||
boolean seen = _previous.contains(w);
|
||||
// only access _current once.
|
||||
if (!seen) {
|
||||
if (addIfNew)
|
||||
seen = !_current.add(w);
|
||||
else
|
||||
seen = _current.contains(w);
|
||||
}
|
||||
if (seen) {
|
||||
// why increment if addIfNew == false? Only used for stats...
|
||||
_currentDuplicates++;
|
||||
}
|
||||
return seen;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void clear() {
|
||||
_current.clear();
|
||||
_previous.clear();
|
||||
_currentDuplicates = 0;
|
||||
}
|
||||
|
||||
/** super doesn't call clear, but neither do the users, so it seems like we should here */
|
||||
@Override
|
||||
public void stopDecaying() {
|
||||
_keepDecaying = false;
|
||||
clear();
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void decay() {
|
||||
int currentCount = 0;
|
||||
long dups = 0;
|
||||
if (!getWriteLock())
|
||||
return;
|
||||
try {
|
||||
ConcurrentHashSet<ArrayWrapper> tmp = _previous;
|
||||
currentCount = _current.size();
|
||||
_previous = _current;
|
||||
_current = tmp;
|
||||
_current.clear();
|
||||
dups = _currentDuplicates;
|
||||
_currentDuplicates = 0;
|
||||
} finally { releaseWriteLock(); }
|
||||
|
||||
if (_log.shouldLog(Log.DEBUG))
|
||||
_log.debug("Decaying the filter " + _name + " after inserting " + currentCount
|
||||
+ " elements and " + dups + " false positives");
|
||||
_context.statManager().addRateData("router.decayingHashSet." + _name + ".size",
|
||||
currentCount);
|
||||
if (currentCount > 0)
|
||||
_context.statManager().addRateData("router.decayingHashSet." + _name + ".dups",
|
||||
1000l*1000*dups/currentCount);
|
||||
}
|
||||
|
||||
/**
|
||||
* This saves the data as-is if the length is <= 8 bytes,
|
||||
* otherwise it stores an 8-byte hash.
|
||||
* Hash function is from DataHelper, modded to get
|
||||
* the maximum entropy given the length of the data.
|
||||
*/
|
||||
private static class ArrayWrapper {
|
||||
private final long _longhashcode;
|
||||
|
||||
public ArrayWrapper(byte[] b, int offset, int len) {
|
||||
int idx = offset;
|
||||
int shift = Math.min(8, 64 / len);
|
||||
long lhc = 0;
|
||||
for (int i = 0; i < len; i++) {
|
||||
// xor better than + in tests
|
||||
lhc ^= (((long) b[idx++]) << (i * shift));
|
||||
}
|
||||
_longhashcode = lhc;
|
||||
}
|
||||
|
||||
/** faster version for when storing <= 8 bytes */
|
||||
public ArrayWrapper(long b) {
|
||||
_longhashcode = b;
|
||||
}
|
||||
|
||||
public int hashCode() {
|
||||
return (int) _longhashcode;
|
||||
}
|
||||
|
||||
public long longHashCode() {
|
||||
return _longhashcode;
|
||||
}
|
||||
|
||||
public boolean equals(Object o) {
|
||||
if (o == null || !(o instanceof ArrayWrapper))
|
||||
return false;
|
||||
return ((ArrayWrapper) o).longHashCode() == _longhashcode;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* vs. DBF, this measures 1.93x faster for testByLong and 2.46x faster for testByBytes.
|
||||
*/
|
||||
public static void main(String args[]) {
|
||||
/** KBytes per sec, 1 message per KByte */
|
||||
int kbps = 256;
|
||||
int iterations = 10;
|
||||
//testSize();
|
||||
testByLong(kbps, iterations);
|
||||
testByBytes(kbps, iterations);
|
||||
}
|
||||
|
||||
/** and the answer is: 49.9 bytes. The ArrayWrapper alone measured 16, so that's 34 for the HashSet entry. */
|
||||
/*****
|
||||
private static void testSize() {
|
||||
int qty = 256*1024;
|
||||
byte b[] = new byte[8];
|
||||
Random r = new Random();
|
||||
long old = Runtime.getRuntime().totalMemory() - Runtime.getRuntime().freeMemory();
|
||||
ConcurrentHashSet foo = new ConcurrentHashSet(qty);
|
||||
for (int i = 0; i < qty; i++) {
|
||||
r.nextBytes(b);
|
||||
foo.add(new ArrayWrapper(b, 0, 8));
|
||||
}
|
||||
long used = Runtime.getRuntime().totalMemory() - Runtime.getRuntime().freeMemory();
|
||||
System.out.println("Memory per ArrayWrapper: " + (((double) (used - old)) / qty));
|
||||
}
|
||||
*****/
|
||||
|
||||
/** 8 bytes, simulate the router message validator */
|
||||
private static void testByLong(int kbps, int numRuns) {
|
||||
int messages = 60 * 10 * kbps;
|
||||
Random r = new Random();
|
||||
DecayingBloomFilter filter = new DecayingHashSet(I2PAppContext.getGlobalContext(), 600*1000, 8);
|
||||
int falsePositives = 0;
|
||||
long totalTime = 0;
|
||||
for (int j = 0; j < numRuns; j++) {
|
||||
long start = System.currentTimeMillis();
|
||||
for (int i = 0; i < messages; i++) {
|
||||
if (filter.add(r.nextLong())) {
|
||||
falsePositives++;
|
||||
System.out.println("False positive " + falsePositives + " (testByLong j=" + j + " i=" + i + ")");
|
||||
}
|
||||
}
|
||||
totalTime += System.currentTimeMillis() - start;
|
||||
filter.clear();
|
||||
}
|
||||
System.out.println("False postive rate should be " + filter.getFalsePositiveRate());
|
||||
filter.stopDecaying();
|
||||
System.out.println("After " + numRuns + " runs pushing " + messages + " entries in "
|
||||
+ DataHelper.formatDuration(totalTime/numRuns) + " per run, there were "
|
||||
+ falsePositives + " false positives");
|
||||
|
||||
}
|
||||
|
||||
/** 16 bytes, simulate the tunnel IV validator */
|
||||
private static void testByBytes(int kbps, int numRuns) {
|
||||
byte iv[][] = new byte[60*10*kbps][16];
|
||||
Random r = new Random();
|
||||
for (int i = 0; i < iv.length; i++)
|
||||
r.nextBytes(iv[i]);
|
||||
|
||||
DecayingBloomFilter filter = new DecayingHashSet(I2PAppContext.getGlobalContext(), 600*1000, 16);
|
||||
int falsePositives = 0;
|
||||
long totalTime = 0;
|
||||
for (int j = 0; j < numRuns; j++) {
|
||||
long start = System.currentTimeMillis();
|
||||
for (int i = 0; i < iv.length; i++) {
|
||||
if (filter.add(iv[i])) {
|
||||
falsePositives++;
|
||||
System.out.println("False positive " + falsePositives + " (testByBytes j=" + j + " i=" + i + ")");
|
||||
}
|
||||
}
|
||||
totalTime += System.currentTimeMillis() - start;
|
||||
filter.clear();
|
||||
}
|
||||
System.out.println("False postive rate should be " + filter.getFalsePositiveRate());
|
||||
filter.stopDecaying();
|
||||
System.out.println("After " + numRuns + " runs pushing " + iv.length + " entries in "
|
||||
+ DataHelper.formatDuration(totalTime/numRuns) + " per run, there were "
|
||||
+ falsePositives + " false positives");
|
||||
}
|
||||
}
|
@@ -458,19 +458,32 @@ public class EepGet {
|
||||
}
|
||||
|
||||
public void stopFetching() { _keepFetching = false; }
|
||||
|
||||
/**
|
||||
* Blocking fetch, returning true if the URL was retrieved, false if all retries failed
|
||||
* Blocking fetch, returning true if the URL was retrieved, false if all retries failed.
|
||||
*
|
||||
* Header timeout default 45 sec, total timeout default none, inactivity timeout default 60 sec.
|
||||
*/
|
||||
public boolean fetch() { return fetch(_fetchHeaderTimeout); }
|
||||
|
||||
/**
|
||||
* Blocking fetch, timing out individual attempts if the HTTP response headers
|
||||
* don't come back in the time given. If the timeout is zero or less, this will
|
||||
* wait indefinitely.
|
||||
*
|
||||
* Total timeout default none, inactivity timeout default 60 sec.
|
||||
*/
|
||||
public boolean fetch(long fetchHeaderTimeout) {
|
||||
return fetch(fetchHeaderTimeout, -1, -1);
|
||||
}
|
||||
|
||||
/**
|
||||
* Blocking fetch.
|
||||
*
|
||||
* @param fetchHeaderTimeout <= 0 for none (proxy will timeout if none, none isn't recommended if no proxy)
|
||||
* @param totalTimeout <= 0 for default none
|
||||
* @param inactivityTimeout <= 0 for default 60 sec
|
||||
*/
|
||||
public boolean fetch(long fetchHeaderTimeout, long totalTimeout, long inactivityTimeout) {
|
||||
_fetchHeaderTimeout = fetchHeaderTimeout;
|
||||
_fetchEndTime = (totalTimeout > 0 ? System.currentTimeMillis() + totalTimeout : -1);
|
||||
|
@@ -9,12 +9,13 @@ import net.i2p.I2PAppContext;
|
||||
*/
|
||||
class Executor implements Runnable {
|
||||
private final I2PAppContext _context;
|
||||
private Log _log;
|
||||
private final List _readyEvents;
|
||||
private final Log _log;
|
||||
private final List<SimpleTimer.TimedEvent> _readyEvents;
|
||||
private final SimpleStore runn;
|
||||
|
||||
public Executor(I2PAppContext ctx, Log log, List events, SimpleStore x) {
|
||||
public Executor(I2PAppContext ctx, Log log, List<SimpleTimer.TimedEvent> events, SimpleStore x) {
|
||||
_context = ctx;
|
||||
_log = log;
|
||||
_readyEvents = events;
|
||||
runn = x;
|
||||
}
|
||||
@@ -26,7 +27,7 @@ class Executor implements Runnable {
|
||||
if (_readyEvents.isEmpty())
|
||||
try { _readyEvents.wait(); } catch (InterruptedException ie) {}
|
||||
if (!_readyEvents.isEmpty())
|
||||
evt = (SimpleTimer.TimedEvent)_readyEvents.remove(0);
|
||||
evt = _readyEvents.remove(0);
|
||||
}
|
||||
|
||||
if (evt != null) {
|
||||
@@ -34,21 +35,12 @@ class Executor implements Runnable {
|
||||
try {
|
||||
evt.timeReached();
|
||||
} catch (Throwable t) {
|
||||
log("Executing task " + evt + " exited unexpectedly, please report", t);
|
||||
_log.error("Executing task " + evt + " exited unexpectedly, please report", t);
|
||||
}
|
||||
long time = _context.clock().now() - before;
|
||||
// FIXME _log won't be non-null unless we already had a CRIT
|
||||
if ( (time > 1000) && (_log != null) && (_log.shouldLog(Log.WARN)) )
|
||||
if ( (time > 1000) && (_log.shouldLog(Log.WARN)) )
|
||||
_log.warn("wtf, event execution took " + time + ": " + evt);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private void log(String msg, Throwable t) {
|
||||
synchronized (this) {
|
||||
if (_log == null)
|
||||
_log = I2PAppContext.getGlobalContext().logManager().getLog(SimpleTimer.class);
|
||||
}
|
||||
_log.log(Log.CRIT, msg, t);
|
||||
}
|
||||
}
|
||||
|
@@ -44,7 +44,7 @@ public class LookaheadInputStream extends FilterInputStream {
|
||||
Arrays.fill(_footerLookahead, (byte)0x00);
|
||||
int footerRead = 0;
|
||||
while (footerRead < _footerLookahead.length) {
|
||||
int read = in.read(_footerLookahead);
|
||||
int read = in.read(_footerLookahead, footerRead, _footerLookahead.length - footerRead);
|
||||
if (read == -1) throw new IOException("EOF reading the footer lookahead");
|
||||
footerRead += read;
|
||||
}
|
||||
|
@@ -8,18 +8,22 @@ package net.i2p.util;
|
||||
*
|
||||
*/
|
||||
|
||||
import java.io.BufferedReader;
|
||||
import java.io.File;
|
||||
import java.io.FileInputStream;
|
||||
import java.io.FileOutputStream;
|
||||
import java.io.IOException;
|
||||
import java.io.InputStream;
|
||||
import java.io.InputStreamReader;
|
||||
import java.math.BigInteger;
|
||||
import java.net.URL;
|
||||
import java.security.SecureRandom;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collections;
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
import java.util.Locale;
|
||||
import java.util.Map;
|
||||
import java.util.Random;
|
||||
|
||||
import freenet.support.CPUInformation.AMDCPUInfo;
|
||||
@@ -627,30 +631,26 @@ public class NativeBigInteger extends BigInteger {
|
||||
}
|
||||
|
||||
if (_isArm) {
|
||||
InputStream in = null;
|
||||
try {
|
||||
in = new FileInputStream("/proc/cpuinfo");
|
||||
while (true) {
|
||||
String line = DataHelper.readLine(in);
|
||||
if (line == null)
|
||||
break;
|
||||
if (!line.startsWith("CPU architecture"))
|
||||
continue;
|
||||
//CPU architecture: 5TEJ
|
||||
//CPU architecture: 7
|
||||
int colon = line.indexOf(": ");
|
||||
String sver = line.substring(colon + 2, colon + 3);
|
||||
int ver = Integer.parseInt(sver);
|
||||
// add libjbigi-linux-armv7.so, libjbigi-linux-armv6.so, ...
|
||||
for (int i = ver; i >= 3; i--) {
|
||||
rv.add(_libPrefix + getMiddleName1() + primary + 'v' + i + _libSuffix);
|
||||
}
|
||||
break;
|
||||
}
|
||||
} catch (NumberFormatException nfe) {
|
||||
} catch (IOException ioe) {
|
||||
} finally {
|
||||
if (in != null) try { in.close(); } catch (IOException ioe) {}
|
||||
Map<String, String> cpuinfo = getCPUInfo();
|
||||
int ver = 0;
|
||||
String proc = cpuinfo.get("processor");
|
||||
String arch = cpuinfo.get("cpu architecture");
|
||||
if (proc != null && proc.contains("ARMv6")) {
|
||||
// Raspberry Pi workaround
|
||||
// Processor : ARMv6-compatible processor rev 7 (v6l)
|
||||
// CPU architecture: 7
|
||||
ver = 6;
|
||||
} else if (arch != null && arch.length() > 0) {
|
||||
//CPU architecture: 5TEJ
|
||||
//CPU architecture: 7
|
||||
String sver = arch.substring(0, 1);
|
||||
try {
|
||||
ver = Integer.parseInt(sver);
|
||||
} catch (NumberFormatException nfe) {}
|
||||
}
|
||||
// add libjbigi-linux-armv7.so, libjbigi-linux-armv6.so, ...
|
||||
for (int i = ver; i >= 3; i--) {
|
||||
rv.add(_libPrefix + getMiddleName1() + primary + 'v' + i + _libSuffix);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -689,6 +689,37 @@ public class NativeBigInteger extends BigInteger {
|
||||
return rv;
|
||||
}
|
||||
|
||||
/**
|
||||
* Return /proc/cpuinfo as a key-value mapping.
|
||||
* All keys mapped to lower case.
|
||||
* All keys and values trimmed.
|
||||
* For dup keys, first one wins.
|
||||
* Currently used for ARM only.
|
||||
* @return non-null, empty on failure
|
||||
* @since 0.9.1
|
||||
*/
|
||||
private static Map<String, String> getCPUInfo() {
|
||||
Map<String, String> rv = new HashMap(32);
|
||||
BufferedReader in = null;
|
||||
try {
|
||||
in = new BufferedReader(new InputStreamReader(new FileInputStream("/proc/cpuinfo"), "ISO-8859-1"), 4096);
|
||||
String line = null;
|
||||
while ( (line = in.readLine()) != null) {
|
||||
String[] parts = line.split(":", 2);
|
||||
if (parts.length < 2)
|
||||
continue;
|
||||
String key = parts[0].trim().toLowerCase(Locale.US);
|
||||
if (!rv.containsKey(key))
|
||||
rv.put(key, parts[1].trim());
|
||||
}
|
||||
} catch (IOException ioe) {
|
||||
warn("Unable to read /proc/cpuinfo", ioe);
|
||||
} finally {
|
||||
if (in != null) try { in.close(); } catch (IOException ioe) {}
|
||||
}
|
||||
return rv;
|
||||
}
|
||||
|
||||
/**
|
||||
* @return may be null if optimized is true
|
||||
*/
|
||||
|
@@ -90,6 +90,8 @@ public class SSLEepGet extends EepGet {
|
||||
/** may be null if init failed */
|
||||
private SavingTrustManager _stm;
|
||||
|
||||
private static final boolean _isAndroid = System.getProperty("java.vendor").contains("Android");
|
||||
|
||||
/**
|
||||
* A new SSLEepGet with a new SSLState
|
||||
*/
|
||||
@@ -192,12 +194,23 @@ public class SSLEepGet extends EepGet {
|
||||
String override = System.getProperty("javax.net.ssl.keyStore");
|
||||
if (override != null)
|
||||
success = loadCerts(new File(override), ks);
|
||||
if (!success)
|
||||
success = loadCerts(new File(System.getProperty("java.home"), "lib/security/jssecacerts"), ks);
|
||||
if (!success)
|
||||
success = loadCerts(new File(System.getProperty("java.home"), "lib/security/cacerts"), ks);
|
||||
if (!success) {
|
||||
if (_isAndroid) {
|
||||
// thru API 13. As of API 14 (ICS), the file is gone, but
|
||||
// ks.load(null, pw) will bring in the default certs?
|
||||
success = loadCerts(new File(System.getProperty("java.home"), "etc/security/cacerts.bks"), ks);
|
||||
} else {
|
||||
success = loadCerts(new File(System.getProperty("java.home"), "lib/security/jssecacerts"), ks);
|
||||
if (!success)
|
||||
success = loadCerts(new File(System.getProperty("java.home"), "lib/security/cacerts"), ks);
|
||||
}
|
||||
}
|
||||
|
||||
if (!success) {
|
||||
try {
|
||||
// must be initted
|
||||
ks.load(null, "changeit".toCharArray());
|
||||
} catch (Exception e) {}
|
||||
_log.error("All key store loads failed, will only load local certificates");
|
||||
} else if (_log.shouldLog(Log.INFO)) {
|
||||
int count = 0;
|
||||
|
@@ -1,343 +0,0 @@
|
||||
package org.xlattice.crypto.filters;
|
||||
|
||||
import java.util.Arrays;
|
||||
import java.util.concurrent.BlockingQueue;
|
||||
import java.util.concurrent.LinkedBlockingQueue;
|
||||
|
||||
/**
|
||||
* A Bloom filter for sets of SHA1 digests. A Bloom filter uses a set
|
||||
* of k hash functions to determine set membership. Each hash function
|
||||
* produces a value in the range 0..M-1. The filter is of size M. To
|
||||
* add a member to the set, apply each function to the new member and
|
||||
* set the corresponding bit in the filter. For M very large relative
|
||||
* to k, this will normally set k bits in the filter. To check whether
|
||||
* x is a member of the set, apply each of the k hash functions to x
|
||||
* and check whether the corresponding bits are set in the filter. If
|
||||
* any are not set, x is definitely not a member. If all are set, x
|
||||
* may be a member. The probability of error (the false positive rate)
|
||||
* is f = (1 - e^(-kN/M))^k, where N is the number of set members.
|
||||
*
|
||||
* This class takes advantage of the fact that SHA1 digests are good-
|
||||
* quality pseudo-random numbers. The k hash functions are the values
|
||||
* of distinct sets of bits taken from the 20-byte SHA1 hash. The
|
||||
* number of bits in the filter, M, is constrained to be a power of
|
||||
* 2; M == 2^m. The number of bits in each hash function may not
|
||||
* exceed floor(m/k).
|
||||
*
|
||||
* This class is designed to be thread-safe, but this has not been
|
||||
* exhaustively tested.
|
||||
*
|
||||
* @author < A HREF="mailto:jddixon@users.sourceforge.net">Jim Dixon</A>
|
||||
*
|
||||
* BloomSHA1.java and KeySelector.java are BSD licensed from the xlattice
|
||||
* app - http://xlattice.sourceforge.net/
|
||||
*
|
||||
* minor tweaks by jrandom, exposing unsynchronized access and
|
||||
* allowing larger M and K. changes released into the public domain.
|
||||
*
|
||||
* Note that this is used only by DecayingBloomFilter, which uses only
|
||||
* the unsynchronized locked_foo() methods.
|
||||
* Deprecated for use outside of the router; to be moved to router.jar.
|
||||
*
|
||||
* As of 0.8.11, the locked_foo() methods are thread-safe, in that they work,
|
||||
* but there is a minor risk of false-negatives if two threads are
|
||||
* accessing the same bloom filter integer.
|
||||
*/
|
||||
|
||||
public class BloomSHA1 {
|
||||
protected final int m;
|
||||
protected final int k;
|
||||
protected int count;
|
||||
|
||||
protected final int[] filter;
|
||||
protected final KeySelector ks;
|
||||
|
||||
// convenience variables
|
||||
protected final int filterBits;
|
||||
protected final int filterWords;
|
||||
|
||||
private final BlockingQueue<int[]> buf;
|
||||
|
||||
/* (24,11) too big - see KeySelector
|
||||
|
||||
public static void main(String args[]) {
|
||||
BloomSHA1 b = new BloomSHA1(24, 11);
|
||||
for (int i = 0; i < 100; i++) {
|
||||
byte v[] = new byte[32];
|
||||
v[0] = (byte)i;
|
||||
b.insert(v);
|
||||
}
|
||||
}
|
||||
*/
|
||||
|
||||
|
||||
/**
|
||||
* Creates a filter with 2^m bits and k 'hash functions', where
|
||||
* each hash function is portion of the 160-bit SHA1 hash.
|
||||
|
||||
* @param m determines number of bits in filter
|
||||
* @param k number of hash functionsx
|
||||
*
|
||||
* See KeySelector for important restriction on max m and k
|
||||
*/
|
||||
public BloomSHA1( int m, int k) {
|
||||
// XXX need to devise more reasonable set of checks
|
||||
//if ( m < 2 || m > 20) {
|
||||
// throw new IllegalArgumentException("m out of range");
|
||||
//}
|
||||
//if ( k < 1 || ( k * m > 160 )) {
|
||||
// throw new IllegalArgumentException(
|
||||
// "too many hash functions for filter size");
|
||||
//}
|
||||
this.m = m;
|
||||
this.k = k;
|
||||
filterBits = 1 << m;
|
||||
filterWords = (filterBits + 31)/32; // round up
|
||||
filter = new int[filterWords];
|
||||
ks = new KeySelector(m, k);
|
||||
buf = new LinkedBlockingQueue(16);
|
||||
|
||||
// DEBUG
|
||||
//System.out.println("Bloom constructor: m = " + m + ", k = " + k
|
||||
// + "\n filterBits = " + filterBits
|
||||
// + ", filterWords = " + filterWords);
|
||||
// END
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates a filter of 2^m bits, with the number of 'hash functions"
|
||||
* k defaulting to 8.
|
||||
* @param m determines size of filter
|
||||
*/
|
||||
public BloomSHA1 (int m) {
|
||||
this(m, 8);
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates a filter of 2^20 bits with k defaulting to 8.
|
||||
*/
|
||||
public BloomSHA1 () {
|
||||
this (20, 8);
|
||||
}
|
||||
/** Clear the filter, unsynchronized */
|
||||
protected void doClear() {
|
||||
Arrays.fill(filter, 0);
|
||||
count = 0;
|
||||
}
|
||||
/** Synchronized version */
|
||||
public void clear() {
|
||||
synchronized (this) {
|
||||
doClear();
|
||||
}
|
||||
}
|
||||
/**
|
||||
* Returns the number of keys which have been inserted. This
|
||||
* class (BloomSHA1) does not guarantee uniqueness in any sense; if the
|
||||
* same key is added N times, the number of set members reported
|
||||
* will increase by N.
|
||||
*
|
||||
* @return number of set members
|
||||
*/
|
||||
public final int size() {
|
||||
synchronized (this) {
|
||||
return count;
|
||||
}
|
||||
}
|
||||
/**
|
||||
* @return number of bits in filter
|
||||
*/
|
||||
public final int capacity () {
|
||||
return filterBits;
|
||||
}
|
||||
|
||||
/**
|
||||
* Add a key to the set represented by the filter.
|
||||
*
|
||||
* XXX This version does not maintain 4-bit counters, it is not
|
||||
* a counting Bloom filter.
|
||||
*
|
||||
* @param b byte array representing a key (SHA1 digest)
|
||||
*/
|
||||
public void insert (byte[]b) { insert(b, 0, b.length); }
|
||||
|
||||
public void insert (byte[]b, int offset, int len) {
|
||||
synchronized(this) {
|
||||
locked_insert(b, offset, len);
|
||||
}
|
||||
}
|
||||
|
||||
public final void locked_insert(byte[]b) { locked_insert(b, 0, b.length); }
|
||||
|
||||
public final void locked_insert(byte[]b, int offset, int len) {
|
||||
int[] bitOffset = acquire();
|
||||
int[] wordOffset = acquire();
|
||||
ks.getOffsets(b, offset, len, bitOffset, wordOffset);
|
||||
for (int i = 0; i < k; i++) {
|
||||
filter[wordOffset[i]] |= 1 << bitOffset[i];
|
||||
}
|
||||
count++;
|
||||
buf.offer(bitOffset);
|
||||
buf.offer(wordOffset);
|
||||
}
|
||||
|
||||
/**
|
||||
* Is a key in the filter. Sets up the bit and word offset arrays.
|
||||
*
|
||||
* @param b byte array representing a key (SHA1 digest)
|
||||
* @return true if b is in the filter
|
||||
*/
|
||||
protected final boolean isMember(byte[] b) { return isMember(b, 0, b.length); }
|
||||
|
||||
protected final boolean isMember(byte[] b, int offset, int len) {
|
||||
int[] bitOffset = acquire();
|
||||
int[] wordOffset = acquire();
|
||||
ks.getOffsets(b, offset, len, bitOffset, wordOffset);
|
||||
for (int i = 0; i < k; i++) {
|
||||
if (! ((filter[wordOffset[i]] & (1 << bitOffset[i])) != 0) ) {
|
||||
buf.offer(bitOffset);
|
||||
buf.offer(wordOffset);
|
||||
return false;
|
||||
}
|
||||
}
|
||||
buf.offer(bitOffset);
|
||||
buf.offer(wordOffset);
|
||||
return true;
|
||||
}
|
||||
|
||||
public final boolean locked_member(byte[]b) { return isMember(b); }
|
||||
public final boolean locked_member(byte[]b, int offset, int len) { return isMember(b, offset, len); }
|
||||
|
||||
/**
|
||||
* Is a key in the filter. External interface, internally synchronized.
|
||||
*
|
||||
* @param b byte array representing a key (SHA1 digest)
|
||||
* @return true if b is in the filter
|
||||
*/
|
||||
public final boolean member(byte[]b) { return member(b, 0, b.length); }
|
||||
public final boolean member(byte[]b, int offset, int len) {
|
||||
synchronized (this) {
|
||||
return isMember(b, offset, len);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the bloom filter offsets for reuse.
|
||||
* Caller should call release(rv) when done with it.
|
||||
* @since 0.8.11
|
||||
*/
|
||||
public FilterKey getFilterKey(byte[] b, int offset, int len) {
|
||||
int[] bitOffset = acquire();
|
||||
int[] wordOffset = acquire();
|
||||
ks.getOffsets(b, offset, len, bitOffset, wordOffset);
|
||||
return new FilterKey(bitOffset, wordOffset);
|
||||
}
|
||||
|
||||
/**
|
||||
* Add the key to the filter.
|
||||
* @since 0.8.11
|
||||
*/
|
||||
public void locked_insert(FilterKey fk) {
|
||||
for (int i = 0; i < k; i++) {
|
||||
filter[fk.wordOffset[i]] |= 1 << fk.bitOffset[i];
|
||||
}
|
||||
count++;
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Is the key in the filter.
|
||||
* @since 0.8.11
|
||||
*/
|
||||
public boolean locked_member(FilterKey fk) {
|
||||
for (int i = 0; i < k; i++) {
|
||||
if (! ((filter[fk.wordOffset[i]] & (1 << fk.bitOffset[i])) != 0) )
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
/**
|
||||
* @since 0.8.11
|
||||
*/
|
||||
private int[] acquire() {
|
||||
int[] rv = buf.poll();
|
||||
if (rv != null)
|
||||
return rv;
|
||||
return new int[k];
|
||||
}
|
||||
|
||||
/**
|
||||
* @since 0.8.11
|
||||
*/
|
||||
public void release(FilterKey fk) {
|
||||
buf.offer(fk.bitOffset);
|
||||
buf.offer(fk.wordOffset);
|
||||
}
|
||||
|
||||
/**
|
||||
* Store the (opaque) bloom filter offsets for reuse.
|
||||
* @since 0.8.11
|
||||
*/
|
||||
public static class FilterKey {
|
||||
|
||||
private final int[] bitOffset;
|
||||
private final int[] wordOffset;
|
||||
|
||||
private FilterKey(int[] bitOffset, int[] wordOffset) {
|
||||
this.bitOffset = bitOffset;
|
||||
this.wordOffset = wordOffset;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* @param n number of set members
|
||||
* @return approximate false positive rate
|
||||
*/
|
||||
public final double falsePositives(int n) {
|
||||
// (1 - e(-kN/M))^k
|
||||
return java.lang.Math.pow (
|
||||
(1l - java.lang.Math.exp(0d- ((double)k) * (long)n / filterBits)), k);
|
||||
}
|
||||
|
||||
public final double falsePositives() {
|
||||
return falsePositives(count);
|
||||
}
|
||||
|
||||
/*****
|
||||
// DEBUG METHODS
|
||||
public static String keyToString(byte[] key) {
|
||||
StringBuilder sb = new StringBuilder().append(key[0]);
|
||||
for (int i = 1; i < key.length; i++) {
|
||||
sb.append(".").append(Integer.toString(key[i], 16));
|
||||
}
|
||||
return sb.toString();
|
||||
}
|
||||
*****/
|
||||
|
||||
/** convert 64-bit integer to hex String */
|
||||
/*****
|
||||
public static String ltoh (long i) {
|
||||
StringBuilder sb = new StringBuilder().append("#")
|
||||
.append(Long.toString(i, 16));
|
||||
return sb.toString();
|
||||
}
|
||||
*****/
|
||||
|
||||
/** convert 32-bit integer to String */
|
||||
/*****
|
||||
public static String itoh (int i) {
|
||||
StringBuilder sb = new StringBuilder().append("#")
|
||||
.append(Integer.toString(i, 16));
|
||||
return sb.toString();
|
||||
}
|
||||
*****/
|
||||
|
||||
/** convert single byte to String */
|
||||
/*****
|
||||
public static String btoh (byte b) {
|
||||
int i = 0xff & b;
|
||||
return itoh(i);
|
||||
}
|
||||
*****/
|
||||
}
|
||||
|
@@ -1,279 +0,0 @@
|
||||
package org.xlattice.crypto.filters;
|
||||
|
||||
/**
|
||||
* Given a key, populates arrays determining word and bit offsets into
|
||||
* a Bloom filter.
|
||||
*
|
||||
* @author <A HREF="mailto:jddixon@users.sourceforge.net">Jim Dixon</A>
|
||||
*
|
||||
* BloomSHA1.java and KeySelector.java are BSD licensed from the xlattice
|
||||
* app - http://xlattice.sourceforge.net/
|
||||
*
|
||||
* minor tweaks by jrandom, exposing unsynchronized access and
|
||||
* allowing larger M and K. changes released into the public domain.
|
||||
*
|
||||
* As of 0.8.11, bitoffset and wordoffset out parameters moved from fields
|
||||
* to selector arguments, to allow concurrency.
|
||||
* ALl methods are now thread-safe.
|
||||
*/
|
||||
public class KeySelector {
|
||||
|
||||
private final int m;
|
||||
private final int k;
|
||||
private final BitSelector bitSel;
|
||||
private final WordSelector wordSel;
|
||||
|
||||
public interface BitSelector {
|
||||
/**
|
||||
* @param bitOffset Out parameter of length k
|
||||
* @since 0.8.11 out parameter added
|
||||
*/
|
||||
public void getBitSelectors(byte[] b, int offset, int length, int[] bitOffset);
|
||||
}
|
||||
|
||||
public interface WordSelector {
|
||||
/**
|
||||
* @param wordOffset Out parameter of length k
|
||||
* @since 0.8.11 out parameter added
|
||||
*/
|
||||
public void getWordSelectors(byte[] b, int offset, int length, int[] wordOffset);
|
||||
}
|
||||
|
||||
/** AND with byte to expose index-many bits */
|
||||
public final static int[] UNMASK = {
|
||||
// 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
|
||||
0, 1, 3, 7, 15, 31, 63, 127, 255, 511, 1023, 2047, 4095, 8191, 16383, 32767};
|
||||
/** AND with byte to zero out index-many bits */
|
||||
public final static int[] MASK = {
|
||||
~0,~1,~3,~7,~15,~31,~63,~127,~255,~511,~1023,~2047,~4095,~8191,~16383,~32767};
|
||||
|
||||
public final static int TWO_UP_15 = 32 * 1024;
|
||||
|
||||
/**
|
||||
* Creates a key selector for a Bloom filter. When a key is presented
|
||||
* to the getOffsets() method, the k 'hash function' values are
|
||||
* extracted and used to populate bitOffset and wordOffset arrays which
|
||||
* specify the k flags to be set or examined in the filter.
|
||||
*
|
||||
* @param m size of the filter as a power of 2
|
||||
* @param k number of 'hash functions'
|
||||
*
|
||||
* Note that if k and m are too big, the GenericWordSelector blows up -
|
||||
* The max for 32-byte keys is m=23 and k=11.
|
||||
* The precise restriction appears to be:
|
||||
* ((5k + (k-1)(m-5)) / 8) + 2 < keySizeInBytes
|
||||
*
|
||||
* It isn't clear how to fix this.
|
||||
*/
|
||||
public KeySelector (int m, int k) {
|
||||
//if ( (m < 2) || (m > 20)|| (k < 1)
|
||||
// || (bitOffset == null) || (wordOffset == null)) {
|
||||
// throw new IllegalArgumentException();
|
||||
//}
|
||||
this.m = m;
|
||||
this.k = k;
|
||||
bitSel = new GenericBitSelector();
|
||||
wordSel = new GenericWordSelector();
|
||||
}
|
||||
|
||||
/**
|
||||
* Extracts the k bit offsets from a key, suitable for general values
|
||||
* of m and k.
|
||||
*/
|
||||
public class GenericBitSelector implements BitSelector {
|
||||
/** Do the extraction */
|
||||
public void getBitSelectors(byte[] b, int offset, int length, int[] bitOffset) {
|
||||
int curBit = 8 * offset;
|
||||
int curByte;
|
||||
for (int j = 0; j < k; j++) {
|
||||
curByte = curBit / 8;
|
||||
int bitsUnused = ((curByte + 1) * 8) - curBit; // left in byte
|
||||
|
||||
// // DEBUG
|
||||
// System.out.println (
|
||||
// "this byte = " + btoh(b[curByte])
|
||||
// + ", next byte = " + btoh(b[curByte + 1])
|
||||
// + "; curBit=" + curBit + ", curByte= " + curByte
|
||||
// + ", bitsUnused=" + bitsUnused);
|
||||
// // END
|
||||
if (bitsUnused > 5) {
|
||||
bitOffset[j] = ((0xff & b[curByte])
|
||||
>> (bitsUnused - 5)) & UNMASK[5];
|
||||
// // DEBUG
|
||||
// System.out.println(
|
||||
// " before shifting: " + btoh(b[curByte])
|
||||
// + "\n after shifting: "
|
||||
// + itoh( (0xff & b[curByte]) >> (bitsUnused - 5))
|
||||
// + "\n mask: " + itoh(UNMASK[5]) );
|
||||
// // END
|
||||
} else if (bitsUnused == 5) {
|
||||
bitOffset[j] = b[curByte] & UNMASK[5];
|
||||
} else {
|
||||
bitOffset[j] = (b[curByte] & UNMASK[bitsUnused])
|
||||
| (((0xff & b[curByte + 1]) >> 3)
|
||||
& MASK[bitsUnused]);
|
||||
// // DEBUG
|
||||
// System.out.println(
|
||||
// " contribution from first byte: "
|
||||
// + itoh(b[curByte] & UNMASK[bitsUnused])
|
||||
// + "\n second byte: " + btoh(b[curByte + 1])
|
||||
// + "\n shifted: " + itoh((0xff & b[curByte + 1]) >> 3)
|
||||
// + "\n mask: " + itoh(MASK[bitsUnused])
|
||||
// + "\n contribution from second byte: "
|
||||
// + itoh((0xff & b[curByte + 1] >> 3) & MASK[bitsUnused]));
|
||||
// // END
|
||||
}
|
||||
// // DEBUG
|
||||
// System.out.println (" bitOffset[j] = " + bitOffset[j]);
|
||||
// // END
|
||||
curBit += 5;
|
||||
}
|
||||
}
|
||||
}
|
||||
/**
|
||||
* Extracts the k word offsets from a key. Suitable for general
|
||||
* values of m and k. See above for formula for max m and k.
|
||||
*/
|
||||
public class GenericWordSelector implements WordSelector {
|
||||
/** Extract the k offsets into the word offset array */
|
||||
public void getWordSelectors(byte[] b, int offset, int length, int[] wordOffset) {
|
||||
int stride = m - 5;
|
||||
//assert true: stride<16;
|
||||
int curBit = (k * 5) + (offset * 8);
|
||||
int curByte;
|
||||
for (int j = 0; j < k; j++) {
|
||||
curByte = curBit / 8;
|
||||
int bitsUnused = ((curByte + 1) * 8) - curBit; // left in byte
|
||||
|
||||
// // DEBUG
|
||||
// System.out.println (
|
||||
// "curr 3 bytes: " + btoh(b[curByte])
|
||||
// + (curByte < 19 ?
|
||||
// " " + btoh(b[curByte + 1]) : "")
|
||||
// + (curByte < 18 ?
|
||||
// " " + btoh(b[curByte + 2]) : "")
|
||||
// + "; curBit=" + curBit + ", curByte= " + curByte
|
||||
// + ", bitsUnused=" + bitsUnused);
|
||||
// // END
|
||||
|
||||
if (bitsUnused > stride) {
|
||||
// the value is entirely within the current byte
|
||||
wordOffset[j] = ((0xff & b[curByte])
|
||||
>> (bitsUnused - stride))
|
||||
& UNMASK[stride];
|
||||
} else if (bitsUnused == stride) {
|
||||
// the value fills the current byte
|
||||
wordOffset[j] = b[curByte] & UNMASK[stride];
|
||||
} else { // bitsUnused < stride
|
||||
// value occupies more than one byte
|
||||
// bits from first byte, right-aligned in result
|
||||
wordOffset[j] = b[curByte] & UNMASK[bitsUnused];
|
||||
// // DEBUG
|
||||
// System.out.println(" first byte contributes "
|
||||
// + itoh(wordOffset[j]));
|
||||
// // END
|
||||
// bits from second byte
|
||||
int bitsToGet = stride - bitsUnused;
|
||||
if (bitsToGet >= 8) {
|
||||
// 8 bits from second byte
|
||||
wordOffset[j] |= (0xff & b[curByte + 1]) << bitsUnused;
|
||||
// // DEBUG
|
||||
// System.out.println(" second byte contributes "
|
||||
// + itoh(
|
||||
// (0xff & b[curByte + 1]) << bitsUnused
|
||||
// ));
|
||||
// // END
|
||||
|
||||
// bits from third byte
|
||||
bitsToGet -= 8;
|
||||
if (bitsToGet > 0) {
|
||||
// AIOOBE here if m and k too big (23,11 is the max)
|
||||
// for a 32-byte key - see above
|
||||
wordOffset[j] |=
|
||||
((0xff & b[curByte + 2]) >> (8 - bitsToGet))
|
||||
<< (stride - bitsToGet) ;
|
||||
// // DEBUG
|
||||
// System.out.println(" third byte contributes "
|
||||
// + itoh(
|
||||
// (((0xff & b[curByte + 2]) >> (8 - bitsToGet))
|
||||
// << (stride - bitsToGet))
|
||||
// ));
|
||||
// // END
|
||||
}
|
||||
} else {
|
||||
// all remaining bits are within second byte
|
||||
wordOffset[j] |= ((b[curByte + 1] >> (8 - bitsToGet))
|
||||
& UNMASK[bitsToGet])
|
||||
<< bitsUnused;
|
||||
// // DEBUG
|
||||
// System.out.println(" second byte contributes "
|
||||
// + itoh(
|
||||
// ((b[curByte + 1] >> (8 - bitsToGet))
|
||||
// & UNMASK[bitsToGet])
|
||||
// << bitsUnused
|
||||
// ));
|
||||
// // END
|
||||
}
|
||||
}
|
||||
// // DEBUG
|
||||
// System.out.println (
|
||||
// " wordOffset[" + j + "] = " + wordOffset[j]
|
||||
// + ", " + itoh(wordOffset[j])
|
||||
// );
|
||||
// // END
|
||||
curBit += stride;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Given a key, populate the word and bit offset arrays, each
|
||||
* of which has k elements.
|
||||
*
|
||||
* @param key cryptographic key used in populating the arrays
|
||||
* @param bitOffset Out parameter of length k
|
||||
* @param wordOffset Out parameter of length k
|
||||
* @since 0.8.11 out parameters added
|
||||
*/
|
||||
public void getOffsets (byte[] key, int[] bitOffset, int[] wordOffset) {
|
||||
getOffsets(key, 0, key.length, bitOffset, wordOffset);
|
||||
}
|
||||
|
||||
/**
|
||||
* Given a key, populate the word and bit offset arrays, each
|
||||
* of which has k elements.
|
||||
*
|
||||
* @param key cryptographic key used in populating the arrays
|
||||
* @param bitOffset Out parameter of length k
|
||||
* @param wordOffset Out parameter of length k
|
||||
* @since 0.8.11 out parameters added
|
||||
*/
|
||||
public void getOffsets (byte[] key, int off, int len, int[] bitOffset, int[] wordOffset) {
|
||||
// skip these checks for speed
|
||||
//if (key == null) {
|
||||
// throw new IllegalArgumentException("null key");
|
||||
//}
|
||||
//if (len < 20) {
|
||||
// throw new IllegalArgumentException(
|
||||
// "key must be at least 20 bytes long");
|
||||
//}
|
||||
// // DEBUG
|
||||
// System.out.println("KeySelector.getOffsets for "
|
||||
// + BloomSHA1.keyToString(b));
|
||||
// // END
|
||||
bitSel.getBitSelectors(key, off, len, bitOffset);
|
||||
wordSel.getWordSelectors(key, off, len, wordOffset);
|
||||
}
|
||||
|
||||
/*****
|
||||
// DEBUG METHODS ////////////////////////////////////////////////
|
||||
String itoh(int i) {
|
||||
return BloomSHA1.itoh(i);
|
||||
}
|
||||
String btoh(byte b) {
|
||||
return BloomSHA1.btoh(b);
|
||||
}
|
||||
*****/
|
||||
}
|
||||
|
||||
|
Reference in New Issue
Block a user