forked from I2P_Developers/i2p.i2p
Compare commits
23 Commits
i2p-2.5.2-
...
i2p.i2p.2.
Author | SHA1 | Date | |
---|---|---|---|
bb3a58c658 | |||
6b2994b59e | |||
0fe36c8a70 | |||
6b06f28474 | |||
97afc8204d | |||
2c727d09e2 | |||
fa10194612 | |||
d845135484 | |||
64808cb4fe | |||
33d19a128c | |||
e55721fe2c | |||
1f35d9f881 | |||
469c4ee846 | |||
109277143e | |||
7bbe38504f | |||
253db3b9be | |||
0fdbf15f58 | |||
d35b4e0f1e | |||
36d94733e2 | |||
4b40314a62 | |||
16d4d0625c | |||
b3d0b91db0 | |||
336a01752b |
@ -115,66 +115,24 @@ public class HandleDatabaseLookupMessageJob extends JobImpl {
|
|||||||
if (DatabaseEntry.isLeaseSet(type) &&
|
if (DatabaseEntry.isLeaseSet(type) &&
|
||||||
(lookupType == DatabaseLookupMessage.Type.ANY || lookupType == DatabaseLookupMessage.Type.LS)) {
|
(lookupType == DatabaseLookupMessage.Type.ANY || lookupType == DatabaseLookupMessage.Type.LS)) {
|
||||||
LeaseSet ls = (LeaseSet) dbe;
|
LeaseSet ls = (LeaseSet) dbe;
|
||||||
// We have to be very careful here to decide whether or not to send out the leaseSet,
|
// Answer any request for a LeaseSet if it has been published to us.
|
||||||
// to avoid anonymity vulnerabilities.
|
|
||||||
// As this is complex, lots of comments follow...
|
|
||||||
|
|
||||||
boolean isLocal = getContext().clientManager().isLocal(ls.getHash());
|
|
||||||
boolean shouldPublishLocal = isLocal && getContext().clientManager().shouldPublishLeaseSet(searchKey);
|
|
||||||
|
|
||||||
// Only answer a request for a LeaseSet if it has been published
|
|
||||||
// to us, or, if its local, if we would have published to ourselves
|
|
||||||
|
|
||||||
// answerAllQueries: We are floodfill
|
// answerAllQueries: We are floodfill
|
||||||
// getReceivedAsPublished:
|
// getReceivedAsPublished:
|
||||||
// false for local
|
// false for received over a client tunnel(if associated with a client, goes to client subDB)
|
||||||
// false for received over a tunnel
|
// true for received in a DatabaseStoreMessage unsolicited(goes to main Db)
|
||||||
// false for received in response to our lookups
|
|
||||||
// true for received in a DatabaseStoreMessage unsolicited
|
|
||||||
if (ls.getReceivedAsPublished()) {
|
if (ls.getReceivedAsPublished()) {
|
||||||
// Answer anything that was stored to us directly
|
//* Answer anything that was stored to us directly.
|
||||||
// (i.e. "received as published" - not the result of a query, or received
|
//(i.e. "received as published" - not the result of a query).
|
||||||
// over a client tunnel).
|
//* LeaseSets recieved over a client tunnel will be routed into subDbs.
|
||||||
// This is probably because we are floodfill, but also perhaps we used to be floodfill,
|
// subDbs are responsible for publishing their "own" client LeaseSets.
|
||||||
// so we don't check the answerAllQueries() flag.
|
//* The "main" netDb can safely store it's own copies of a LeaseSet
|
||||||
// Local leasesets are not handled here
|
// belonging to a Local client, when it is published back to it. Therefore,
|
||||||
|
// they do not require special handling and are handled here.
|
||||||
if (_log.shouldLog(Log.INFO))
|
if (_log.shouldLog(Log.INFO))
|
||||||
_log.info("We have the published LS " + searchKey + ", answering query");
|
_log.info("We have the published LS " + searchKey + ", answering query");
|
||||||
getContext().statManager().addRateData("netDb.lookupsMatchedReceivedPublished", 1);
|
getContext().statManager().addRateData("netDb.lookupsMatchedReceivedPublished", 1);
|
||||||
sendData(searchKey, ls, fromKey, toTunnel);
|
sendData(searchKey, ls, fromKey, toTunnel);
|
||||||
} else if (shouldPublishLocal && answerAllQueries()) {
|
|
||||||
// We are floodfill, and this is our local leaseset, and we publish it.
|
|
||||||
// Only send it out if it is in our estimated keyspace.
|
|
||||||
// For this, we do NOT use their dontInclude list as it can't be trusted
|
|
||||||
// (i.e. it could mess up the closeness calculation)
|
|
||||||
Set<Hash> closestHashes = getContext().netDb().findNearestRouters(searchKey,
|
|
||||||
CLOSENESS_THRESHOLD, null);
|
|
||||||
if (weAreClosest(closestHashes)) {
|
|
||||||
// It's in our keyspace, so give it to them
|
|
||||||
if (_log.shouldLog(Log.INFO))
|
|
||||||
_log.info("We have local LS " + searchKey + ", answering query, in our keyspace");
|
|
||||||
getContext().statManager().addRateData("netDb.lookupsMatchedLocalClosest", 1);
|
|
||||||
sendData(searchKey, ls, fromKey, toTunnel);
|
|
||||||
} else {
|
|
||||||
// Lie, pretend we don't have it
|
|
||||||
if (_log.shouldLog(Log.INFO))
|
|
||||||
_log.info("We have local LS " + searchKey + ", NOT answering query, out of our keyspace");
|
|
||||||
getContext().statManager().addRateData("netDb.lookupsMatchedLocalNotClosest", 1);
|
|
||||||
Set<Hash> routerHashSet = getNearestRouters(lookupType);
|
|
||||||
sendClosest(searchKey, routerHashSet, fromKey, toTunnel);
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
// It was not published to us (we looked it up, for example)
|
|
||||||
// or it's local and we aren't floodfill,
|
|
||||||
// or it's local and we don't publish it.
|
|
||||||
// Lie, pretend we don't have it
|
|
||||||
if (_log.shouldLog(Log.INFO))
|
|
||||||
_log.info("We have LS " + searchKey +
|
|
||||||
", NOT answering query - local? " + isLocal + " shouldPublish? " + shouldPublishLocal +
|
|
||||||
" RAP? " + ls.getReceivedAsPublished() + " RAR? " + ls.getReceivedAsReply());
|
|
||||||
getContext().statManager().addRateData("netDb.lookupsMatchedRemoteNotClosest", 1);
|
|
||||||
Set<Hash> routerHashSet = getNearestRouters(lookupType);
|
|
||||||
sendClosest(searchKey, routerHashSet, fromKey, toTunnel);
|
|
||||||
}
|
}
|
||||||
} else if (type == DatabaseEntry.KEY_TYPE_ROUTERINFO &&
|
} else if (type == DatabaseEntry.KEY_TYPE_ROUTERINFO &&
|
||||||
lookupType != DatabaseLookupMessage.Type.LS) {
|
lookupType != DatabaseLookupMessage.Type.LS) {
|
||||||
@ -251,10 +209,6 @@ public class HandleDatabaseLookupMessageJob extends JobImpl {
|
|||||||
MAX_ROUTERS_RETURNED,
|
MAX_ROUTERS_RETURNED,
|
||||||
dontInclude);
|
dontInclude);
|
||||||
}
|
}
|
||||||
|
|
||||||
private boolean weAreClosest(Set<Hash> routerHashSet) {
|
|
||||||
return routerHashSet.contains(_us);
|
|
||||||
}
|
|
||||||
|
|
||||||
private void sendData(Hash key, DatabaseEntry data, Hash toPeer, TunnelId replyTunnel) {
|
private void sendData(Hash key, DatabaseEntry data, Hash toPeer, TunnelId replyTunnel) {
|
||||||
if (!key.equals(data.getHash())) {
|
if (!key.equals(data.getHash())) {
|
||||||
|
@ -27,6 +27,7 @@ import net.i2p.data.i2np.I2NPMessage;
|
|||||||
import net.i2p.data.i2np.TunnelGatewayMessage;
|
import net.i2p.data.i2np.TunnelGatewayMessage;
|
||||||
import net.i2p.router.Job;
|
import net.i2p.router.Job;
|
||||||
import net.i2p.router.JobImpl;
|
import net.i2p.router.JobImpl;
|
||||||
|
import net.i2p.router.NetworkDatabaseFacade;
|
||||||
import net.i2p.router.OutNetMessage;
|
import net.i2p.router.OutNetMessage;
|
||||||
import net.i2p.router.Router;
|
import net.i2p.router.Router;
|
||||||
import net.i2p.router.RouterContext;
|
import net.i2p.router.RouterContext;
|
||||||
@ -75,7 +76,6 @@ class HandleFloodfillDatabaseStoreMessageJob extends JobImpl {
|
|||||||
// set if invalid store but not his fault
|
// set if invalid store but not his fault
|
||||||
boolean dontBlamePeer = false;
|
boolean dontBlamePeer = false;
|
||||||
boolean wasNew = false;
|
boolean wasNew = false;
|
||||||
boolean blockStore = false;
|
|
||||||
RouterInfo prevNetDb = null;
|
RouterInfo prevNetDb = null;
|
||||||
Hash key = _message.getKey();
|
Hash key = _message.getKey();
|
||||||
DatabaseEntry entry = _message.getEntry();
|
DatabaseEntry entry = _message.getEntry();
|
||||||
@ -91,36 +91,28 @@ class HandleFloodfillDatabaseStoreMessageJob extends JobImpl {
|
|||||||
// local LeaseSets has changed substantially, based on the role
|
// local LeaseSets has changed substantially, based on the role
|
||||||
// being assumed.
|
// being assumed.
|
||||||
// Role #1) The 'floodfill' netDb when the router is a FloodFill
|
// Role #1) The 'floodfill' netDb when the router is a FloodFill
|
||||||
// In this case, the router would actually de-anonymize
|
|
||||||
// the clients it is hosting if it refuses LeaseSets for
|
|
||||||
// these clients.
|
|
||||||
// The LS will be checked to make sure it arrived directly,
|
// The LS will be checked to make sure it arrived directly,
|
||||||
// and handled as a normal LS.
|
// and handled as a normal LS.
|
||||||
// Role #2) The 'floodfill' netDb when the router is *NOT* an I2P
|
// Role #2) The 'floodfill' netDb when the router is *NOT* an I2P
|
||||||
// network Floodfill.
|
// network Floodfill.
|
||||||
// In this case, the 'floodfill' netDb only stores RouterInfo.
|
// In this case, the 'floodfill' netDb primarily stores RouterInfos.
|
||||||
// There is no use case for the 'floodfill' netDb to store any
|
// However, there are a number of normal cases where it might contain
|
||||||
// LeaseSets when the router is not a FloodFill.
|
// one or more LeaseSets:
|
||||||
// Role #3) Client netDb should only receive LeaseSets from their
|
// 1. We used to be a floodfill but aren't anymore
|
||||||
// tunnels. And clients will only publish their LeaseSet
|
// 2. We performed a lookup without an active session locally(It won't be RAP)
|
||||||
// out their client tunnel.
|
// Role #3) Client netDb will only receive LeaseSets from their client
|
||||||
// In this role, the only LeaseSet that should be rejected
|
// tunnels, and clients will only publish their LeaseSet out
|
||||||
// is its own LeaseSet.
|
// their client tunnel.
|
||||||
|
// In this role, the only LeaseSet store that should be rejected
|
||||||
|
// is the subDb's client's own LeaseSet.
|
||||||
//
|
//
|
||||||
// ToDo: Currently, the 'floodfill' netDb will be excluded
|
// Currently, the 'floodfill' netDb will be excluded
|
||||||
// from directly receiving a client LeaseSet, due to the
|
// from directly receiving a client LeaseSet, due to the
|
||||||
// way the selection of FloodFill routers are selected
|
// way the selection of FloodFill routers are selected
|
||||||
// when flooding a LS.
|
// when flooding a LS.
|
||||||
// But even if the host router does not directly receive the
|
// But even if the host router does not directly receive the
|
||||||
// LeaseSets of the clients it hosts, those LeaseSets will
|
// LeaseSets of the clients it hosts, those LeaseSets will
|
||||||
// usually be flooded back to it.
|
// usually be flooded back to it.
|
||||||
// Is this enough, or do we need to pierce the segmentation
|
|
||||||
// under certain conditions?
|
|
||||||
//
|
|
||||||
// ToDo: What considerations are needed for multihoming?
|
|
||||||
// with multihoming, it's really important to prevent the
|
|
||||||
// client netDb from storing the other guy's LeaseSet.
|
|
||||||
// It will confuse us badly.
|
|
||||||
|
|
||||||
LeaseSet ls = (LeaseSet) entry;
|
LeaseSet ls = (LeaseSet) entry;
|
||||||
// If this was received as a response to a query,
|
// If this was received as a response to a query,
|
||||||
@ -133,30 +125,18 @@ class HandleFloodfillDatabaseStoreMessageJob extends JobImpl {
|
|||||||
// See ../HDLMJ for more info
|
// See ../HDLMJ for more info
|
||||||
if (!ls.getReceivedAsReply())
|
if (!ls.getReceivedAsReply())
|
||||||
ls.setReceivedAsPublished();
|
ls.setReceivedAsPublished();
|
||||||
if (_facade.isClientDb())
|
if (_facade.isClientDb()) {
|
||||||
blockStore = false;
|
// This is where we deal with what happens if a client subDB tries to store
|
||||||
else if (getContext().clientManager().isLocal(key))
|
// a leaseSet which it is the owner/publisher of.
|
||||||
// Non-client context
|
// Look up a ls hash in the netDbSegmentor, and compare it to the _facade that we have.
|
||||||
if (_facade.floodfillEnabled() && (_fromHash != null))
|
// If they are equal, reject the store.
|
||||||
blockStore = false;
|
if (getContext().netDbSegmentor().clientNetDB(ls.getHash()).equals(_facade)) {
|
||||||
else
|
getContext().statManager().addRateData("netDb.storeLocalLeaseSetToLocalClient", 1, 0);
|
||||||
// FloodFill disabled, but in the 'floodfill' netDb context.
|
dontBlamePeer = true;
|
||||||
// We should never get here, the 'floodfill' netDb doesn't
|
|
||||||
// store LS when FloodFill is disabled.
|
|
||||||
blockStore = true;
|
|
||||||
else
|
|
||||||
blockStore = false;
|
|
||||||
if (blockStore) {
|
|
||||||
getContext().statManager().addRateData("netDb.storeLocalLeaseSetAttempt", 1, 0);
|
|
||||||
// If we're using subdbs, store the leaseSet in the multihome DB.
|
|
||||||
// otherwise, throw rather than return, so that we send the ack below (prevent easy attack)
|
|
||||||
dontBlamePeer = true;
|
|
||||||
//if (getContext().netDbSegmentor().useSubDbs())
|
|
||||||
//getContext().multihomeNetDb().store(key, ls);
|
|
||||||
//else
|
|
||||||
throw new IllegalArgumentException("(dbid: " + _facade._dbid
|
throw new IllegalArgumentException("(dbid: " + _facade._dbid
|
||||||
+ ") Peer attempted to store local leaseSet: "
|
+ ") Peer attempted to store local leaseSet: "
|
||||||
+ key.toBase32());
|
+ key.toBase32() + " to client subDB " + _facade + "which is it's own publisher");
|
||||||
|
}
|
||||||
}
|
}
|
||||||
//boolean oldrar = ls.getReceivedAsReply();
|
//boolean oldrar = ls.getReceivedAsReply();
|
||||||
//boolean oldrap = ls.getReceivedAsPublished();
|
//boolean oldrap = ls.getReceivedAsPublished();
|
||||||
@ -202,16 +182,6 @@ class HandleFloodfillDatabaseStoreMessageJob extends JobImpl {
|
|||||||
invalidMessage = uce.getMessage();
|
invalidMessage = uce.getMessage();
|
||||||
dontBlamePeer = true;
|
dontBlamePeer = true;
|
||||||
} catch (IllegalArgumentException iae) {
|
} catch (IllegalArgumentException iae) {
|
||||||
// This is somewhat normal behavior in client netDb context,
|
|
||||||
// and safely handled.
|
|
||||||
// This is more worrisome in the floodfill netDb context.
|
|
||||||
// It is not expected to happen since we check if it was sent directly.
|
|
||||||
if (_facade.isClientDb())
|
|
||||||
if (_log.shouldInfo())
|
|
||||||
_log.info("LS Store IAE (safely handled): ", iae);
|
|
||||||
else
|
|
||||||
if (_log.shouldError())
|
|
||||||
_log.error("LS Store IAE (unexpected): ", iae);
|
|
||||||
invalidMessage = iae.getMessage();
|
invalidMessage = iae.getMessage();
|
||||||
}
|
}
|
||||||
} else if (type == DatabaseEntry.KEY_TYPE_ROUTERINFO) {
|
} else if (type == DatabaseEntry.KEY_TYPE_ROUTERINFO) {
|
||||||
@ -281,11 +251,11 @@ class HandleFloodfillDatabaseStoreMessageJob extends JobImpl {
|
|||||||
dontBlamePeer = true;
|
dontBlamePeer = true;
|
||||||
throw new IllegalArgumentException("Peer attempted to store our RouterInfo");
|
throw new IllegalArgumentException("Peer attempted to store our RouterInfo");
|
||||||
}
|
}
|
||||||
// If we're in the client netDb context, log a warning since
|
// If we're in the client netDb context, log a warning since this is not expected.
|
||||||
// it should be rare that RI DSM are handled in the client context.
|
// This is probably impossible but log it if we ever see it so it can be investigated.
|
||||||
if (_facade.isClientDb() && _log.shouldWarn())
|
if (_facade.isClientDb() && _log.shouldWarn())
|
||||||
_log.warn("[dbid: " + _facade._dbid
|
_log.warn("[dbid: " + _facade._dbid
|
||||||
+ "]: Handling RI dbStore in client netDb context of router " + key.toBase64());
|
+ "]: Handling RI dbStore in client netDb context of router " + key.toBase64());
|
||||||
boolean shouldStore = true;
|
boolean shouldStore = true;
|
||||||
if (ri.getReceivedAsPublished()) {
|
if (ri.getReceivedAsPublished()) {
|
||||||
// these are often just dup stores from concurrent lookups
|
// these are often just dup stores from concurrent lookups
|
||||||
@ -395,7 +365,7 @@ class HandleFloodfillDatabaseStoreMessageJob extends JobImpl {
|
|||||||
+ ") and new ("
|
+ ") and new ("
|
||||||
+ ri.getIdentity().getSigningPublicKey()
|
+ ri.getIdentity().getSigningPublicKey()
|
||||||
+ ") signing public keys do not match!");
|
+ ") signing public keys do not match!");
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if (shouldStore) {
|
if (shouldStore) {
|
||||||
@ -638,12 +608,6 @@ class HandleFloodfillDatabaseStoreMessageJob extends JobImpl {
|
|||||||
out2 = tgm2;
|
out2 = tgm2;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if (_facade.isClientDb()) {
|
|
||||||
// We shouldn't be reaching this point given the above conditional.
|
|
||||||
_log.error("Error! SendMessageDirectJob (isEstab) attempted in Client netDb ("
|
|
||||||
+ _facade._dbid + ")! Message: " + out1);
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
Job send = new SendMessageDirectJob(getContext(), out1, toPeer, REPLY_TIMEOUT, MESSAGE_PRIORITY, _msgIDBloomXor);
|
Job send = new SendMessageDirectJob(getContext(), out1, toPeer, REPLY_TIMEOUT, MESSAGE_PRIORITY, _msgIDBloomXor);
|
||||||
send.runJob();
|
send.runJob();
|
||||||
if (msg2 != null) {
|
if (msg2 != null) {
|
||||||
|
Reference in New Issue
Block a user