propagate from branch 'i2p.i2p.zzz.test' (head f402c08d0b2796653b559711cb1ea8c3c0204372)

to branch 'i2p.i2p' (head 4d0babb75e3c5237b10ff49f57599c53c581bb83)
This commit is contained in:
zzz
2009-05-17 12:20:34 +00:00
14 changed files with 190 additions and 102 deletions

View File

@@ -12,6 +12,8 @@ import java.util.Set;
import net.i2p.data.DataStructure;
import net.i2p.data.Hash;
import net.i2p.data.LeaseSet;
import net.i2p.data.RouterInfo;
import net.i2p.router.Job;
import net.i2p.router.RouterContext;
@@ -45,10 +47,23 @@ class FloodfillStoreJob extends StoreJob {
@Override
protected void succeed() {
super.succeed();
if (_state != null)
getContext().jobQueue().addJob(new FloodfillVerifyStoreJob(getContext(), _state.getTarget(), _facade));
if (_state != null) {
// Get the time stamp from the data we sent, so the Verify job can meke sure that
// it finds something stamped with that time or newer.
long published = 0;
boolean isRouterInfo = false;
DataStructure data = _state.getData();
if (data instanceof RouterInfo) {
published = ((RouterInfo) data).getPublished();
isRouterInfo = true;
} else if (data instanceof LeaseSet) {
published = ((LeaseSet) data).getEarliestLeaseDate();
}
getContext().jobQueue().addJob(new FloodfillVerifyStoreJob(getContext(), _state.getTarget(),
published, isRouterInfo, _facade));
}
}
@Override
public String getName() { return "Floodfill netDb store"; }
}
}

View File

@@ -5,6 +5,8 @@ import java.util.List;
import net.i2p.data.DataStructure;
import net.i2p.data.Hash;
import net.i2p.data.LeaseSet;
import net.i2p.data.RouterInfo;
import net.i2p.data.i2np.DatabaseLookupMessage;
import net.i2p.data.i2np.DatabaseSearchReplyMessage;
import net.i2p.data.i2np.DatabaseStoreMessage;
@@ -29,21 +31,33 @@ public class FloodfillVerifyStoreJob extends JobImpl {
private FloodfillNetworkDatabaseFacade _facade;
private long _expiration;
private long _sendTime;
private long _published;
private boolean _isRouterInfo;
private static final int VERIFY_TIMEOUT = 10*1000;
public FloodfillVerifyStoreJob(RouterContext ctx, Hash key, FloodfillNetworkDatabaseFacade facade) {
public FloodfillVerifyStoreJob(RouterContext ctx, Hash key, long published, boolean isRouterInfo, FloodfillNetworkDatabaseFacade facade) {
super(ctx);
_key = key;
_published = published;
_isRouterInfo = isRouterInfo;
_log = ctx.logManager().getLog(getClass());
_facade = facade;
// wait 10 seconds before trying to verify the store
getTiming().setStartAfter(ctx.clock().now() + VERIFY_TIMEOUT);
getContext().statManager().createRateStat("netDb.floodfillVerifyOK", "How long a floodfill verify takes when it succeeds", "NetworkDatabase", new long[] { 60*1000, 10*60*1000, 60*60*1000 });
getContext().statManager().createRateStat("netDb.floodfillVerifyFail", "How long a floodfill verify takes when it fails", "NetworkDatabase", new long[] { 60*1000, 10*60*1000, 60*60*1000 });
getContext().statManager().createRateStat("netDb.floodfillVerifyTimeout", "How long a floodfill verify takes when it times out", "NetworkDatabase", new long[] { 60*1000, 10*60*1000, 60*60*1000 });
getContext().statManager().createRateStat("netDb.floodfillVerifyOK", "How long a floodfill verify takes when it succeeds", "NetworkDatabase", new long[] { 60*60*1000 });
getContext().statManager().createRateStat("netDb.floodfillVerifyFail", "How long a floodfill verify takes when it fails", "NetworkDatabase", new long[] { 60*60*1000 });
getContext().statManager().createRateStat("netDb.floodfillVerifyTimeout", "How long a floodfill verify takes when it times out", "NetworkDatabase", new long[] { 60*60*1000 });
}
public String getName() { return "Verify netdb store"; }
/**
* Wait 10 seconds, then query a random floodfill for the leaseset or routerinfo
* that we just stored to a (hopefully different) floodfill peer.
*
* If it fails (after waiting up to another 10 seconds), resend the data.
* If the queried data is older than what we stored, that counts as a fail.
**/
public void runJob() {
_target = pickTarget();
if (_target == null) return;
@@ -118,20 +132,29 @@ public class FloodfillVerifyStoreJob extends JobImpl {
public void runJob() {
long delay = getContext().clock().now() - _sendTime;
if (_message instanceof DatabaseStoreMessage) {
// store ok, w00t!
// Hmm should we verify it's as recent as the one we sent???
getContext().profileManager().dbLookupSuccessful(_target, delay);
getContext().statManager().addRateData("netDb.floodfillVerifyOK", delay, 0);
} else {
// store failed, boo, hiss!
if (_message instanceof DatabaseSearchReplyMessage) {
// assume 0 old, all new, 0 invalid, 0 dup
getContext().profileManager().dbLookupReply(_target, 0,
((DatabaseSearchReplyMessage)_message).getNumReplies(), 0, 0, delay);
// Verify it's as recent as the one we sent
boolean success = false;
DatabaseStoreMessage dsm = (DatabaseStoreMessage)_message;
if (_isRouterInfo && dsm.getValueType() == DatabaseStoreMessage.KEY_TYPE_ROUTERINFO)
success = dsm.getRouterInfo().getPublished() >= _published;
else if ((!_isRouterInfo) && dsm.getValueType() == DatabaseStoreMessage.KEY_TYPE_LEASESET)
success = dsm.getLeaseSet().getEarliestLeaseDate() >= _published;
if (success) {
// store ok, w00t!
getContext().profileManager().dbLookupSuccessful(_target, delay);
getContext().statManager().addRateData("netDb.floodfillVerifyOK", delay, 0);
return;
}
getContext().statManager().addRateData("netDb.floodfillVerifyFail", delay, 0);
resend();
if (_log.shouldLog(Log.WARN))
_log.warn("Verify failed - older");
} else if (_message instanceof DatabaseSearchReplyMessage) {
// assume 0 old, all new, 0 invalid, 0 dup
getContext().profileManager().dbLookupReply(_target, 0,
((DatabaseSearchReplyMessage)_message).getNumReplies(), 0, 0, delay);
}
// store failed, boo, hiss!
getContext().statManager().addRateData("netDb.floodfillVerifyFail", delay, 0);
resend();
}
public void setMessage(I2NPMessage message) { _message = message; }
}

View File

@@ -627,9 +627,9 @@ public class KademliaNetworkDatabaseFacade extends NetworkDatabaseFacade {
String validate(Hash key, RouterInfo routerInfo) throws IllegalArgumentException {
long now = _context.clock().now();
boolean upLongEnough = _context.router().getUptime() > 60*60*1000;
// Once we're over 300 routers, reduce the expiration time down from the default,
// Once we're over 150 routers, reduce the expiration time down from the default,
// as a crude way of limiting memory usage.
// i.e. at 600 routers the expiration time will be about half the default, etc.
// i.e. at 300 routers the expiration time will be about half the default, etc.
// And if we're floodfill, we can keep the expiration really short, since
// we are always getting the latest published to us.
// As the net grows this won't be sufficient, and we'll have to implement
@@ -638,9 +638,10 @@ public class KademliaNetworkDatabaseFacade extends NetworkDatabaseFacade {
if (FloodfillNetworkDatabaseFacade.floodfillEnabled(_context))
adjustedExpiration = ROUTER_INFO_EXPIRATION_FLOODFILL;
else
// _kb.size() includes leasesets but that's ok
adjustedExpiration = Math.min(ROUTER_INFO_EXPIRATION,
ROUTER_INFO_EXPIRATION_MIN +
((ROUTER_INFO_EXPIRATION - ROUTER_INFO_EXPIRATION_MIN) * 300 / (_kb.size() + 1)));
((ROUTER_INFO_EXPIRATION - ROUTER_INFO_EXPIRATION_MIN) * 150 / (_kb.size() + 1)));
if (!key.equals(routerInfo.getIdentity().getHash())) {
if (_log.shouldLog(Log.WARN))