NetDB: Encrypt exploratory lookups too

SearchUpdateReplyFoundJob: finals
This commit is contained in:
zzz
2014-09-03 23:26:34 +00:00
parent a9802eb6a7
commit 5af749a226
4 changed files with 52 additions and 23 deletions

View File

@@ -15,6 +15,8 @@ import java.util.Set;
import net.i2p.data.Hash; import net.i2p.data.Hash;
import net.i2p.data.TunnelId; import net.i2p.data.TunnelId;
import net.i2p.data.i2np.DatabaseLookupMessage; import net.i2p.data.i2np.DatabaseLookupMessage;
import net.i2p.data.i2np.I2NPMessage;
import net.i2p.data.router.RouterInfo;
import net.i2p.kademlia.KBucketSet; import net.i2p.kademlia.KBucketSet;
import net.i2p.router.RouterContext; import net.i2p.router.RouterContext;
import net.i2p.util.Log; import net.i2p.util.Log;
@@ -71,15 +73,15 @@ class ExploreJob extends SearchJob {
* and PeerSelector doesn't include the floodfill peers, * and PeerSelector doesn't include the floodfill peers,
* so we add the ff peers ourselves and then use the regular PeerSelector. * so we add the ff peers ourselves and then use the regular PeerSelector.
* *
* TODO should we encrypt this also like we do for normal lookups?
* Could the OBEP capture it and reply with a reference to a hostile peer?
*
* @param replyTunnelId tunnel to receive replies through * @param replyTunnelId tunnel to receive replies through
* @param replyGateway gateway for the reply tunnel * @param replyGateway gateway for the reply tunnel
* @param expiration when the search should stop * @param expiration when the search should stop
* @param peer the peer to send it to
*
* @return a DatabaseLookupMessage or GarlicMessage
*/ */
@Override @Override
protected DatabaseLookupMessage buildMessage(TunnelId replyTunnelId, Hash replyGateway, long expiration) { protected I2NPMessage buildMessage(TunnelId replyTunnelId, Hash replyGateway, long expiration, RouterInfo peer) {
DatabaseLookupMessage msg = new DatabaseLookupMessage(getContext(), true); DatabaseLookupMessage msg = new DatabaseLookupMessage(getContext(), true);
msg.setSearchKey(getState().getTarget()); msg.setSearchKey(getState().getTarget());
msg.setFrom(replyGateway); msg.setFrom(replyGateway);
@@ -127,7 +129,27 @@ class ExploreJob extends SearchJob {
_log.debug("Peers we don't want to hear about: " + dontIncludePeers); _log.debug("Peers we don't want to hear about: " + dontIncludePeers);
msg.setDontIncludePeers(dontIncludePeers); msg.setDontIncludePeers(dontIncludePeers);
return msg;
// Now encrypt if we can
I2NPMessage outMsg;
if (getContext().getProperty(IterativeSearchJob.PROP_ENCRYPT_RI, IterativeSearchJob.DEFAULT_ENCRYPT_RI)) {
// request encrypted reply?
if (DatabaseLookupMessage.supportsEncryptedReplies(peer)) {
MessageWrapper.OneTimeSession sess;
sess = MessageWrapper.generateSession(getContext());
if (_log.shouldLog(Log.INFO))
_log.info(getJobId() + ": Requesting encrypted reply from " + peer.getIdentity().calculateHash() +
' ' + sess.key + ' ' + sess.tag);
msg.setReplySession(sess.key, sess.tag);
}
outMsg = MessageWrapper.wrap(getContext(), msg, peer);
if (_log.shouldLog(Log.DEBUG))
_log.debug(getJobId() + ": Encrypted exploratory DLM for " + getState().getTarget() + " to " +
peer.getIdentity().calculateHash());
} else {
outMsg = msg;
}
return outMsg;
} }
/** max # of concurrent searches */ /** max # of concurrent searches */

View File

@@ -90,10 +90,10 @@ class IterativeSearchJob extends FloodSearchJob {
*/ */
private static final int MAX_CONCURRENT = 1; private static final int MAX_CONCURRENT = 1;
private static final String PROP_ENCRYPT_RI = "router.encryptRouterLookups"; public static final String PROP_ENCRYPT_RI = "router.encryptRouterLookups";
/** only on fast boxes, for now */ /** only on fast boxes, for now */
private static final boolean DEFAULT_ENCRYPT_RI = public static final boolean DEFAULT_ENCRYPT_RI =
SystemVersion.isX86() && SystemVersion.is64Bit() && SystemVersion.isX86() && SystemVersion.is64Bit() &&
!SystemVersion.isApache() && !SystemVersion.isGNU() && !SystemVersion.isApache() && !SystemVersion.isGNU() &&
NativeBigInteger.isNative(); NativeBigInteger.isNative();

View File

@@ -17,11 +17,12 @@ import net.i2p.data.DatabaseEntry;
import net.i2p.data.DataHelper; import net.i2p.data.DataHelper;
import net.i2p.data.Hash; import net.i2p.data.Hash;
import net.i2p.data.LeaseSet; import net.i2p.data.LeaseSet;
import net.i2p.data.router.RouterInfo;
import net.i2p.data.TunnelId; import net.i2p.data.TunnelId;
import net.i2p.data.i2np.DatabaseLookupMessage; import net.i2p.data.i2np.DatabaseLookupMessage;
import net.i2p.data.i2np.DatabaseSearchReplyMessage; import net.i2p.data.i2np.DatabaseSearchReplyMessage;
import net.i2p.data.i2np.DatabaseStoreMessage; import net.i2p.data.i2np.DatabaseStoreMessage;
import net.i2p.data.i2np.I2NPMessage;
import net.i2p.data.router.RouterInfo;
import net.i2p.router.Job; import net.i2p.router.Job;
import net.i2p.router.JobImpl; import net.i2p.router.JobImpl;
import net.i2p.router.OutNetMessage; import net.i2p.router.OutNetMessage;
@@ -424,7 +425,7 @@ class SearchJob extends JobImpl {
int timeout = getPerPeerTimeoutMs(to); int timeout = getPerPeerTimeoutMs(to);
long expiration = getContext().clock().now() + timeout; long expiration = getContext().clock().now() + timeout;
DatabaseLookupMessage msg = buildMessage(inTunnelId, inTunnel.getPeer(0), expiration); I2NPMessage msg = buildMessage(inTunnelId, inTunnel.getPeer(0), expiration, router);
TunnelInfo outTunnel = getContext().tunnelManager().selectOutboundExploratoryTunnel(to); TunnelInfo outTunnel = getContext().tunnelManager().selectOutboundExploratoryTunnel(to);
if (outTunnel == null) { if (outTunnel == null) {
@@ -437,9 +438,9 @@ class SearchJob extends JobImpl {
if (_log.shouldLog(Log.DEBUG)) if (_log.shouldLog(Log.DEBUG))
_log.debug(getJobId() + ": Sending search to " + to _log.debug(getJobId() + ": Sending search to " + to
+ " for " + msg.getSearchKey().toBase64() + " w/ replies through [" + " for " + getState().getTarget() + " w/ replies through "
+ msg.getFrom().toBase64() + "] via tunnel [" + inTunnel.getPeer(0) + " via tunnel "
+ msg.getReplyTunnel() + "]"); + inTunnelId);
SearchMessageSelector sel = new SearchMessageSelector(getContext(), router, _expiration, _state); SearchMessageSelector sel = new SearchMessageSelector(getContext(), router, _expiration, _state);
SearchUpdateReplyFoundJob reply = new SearchUpdateReplyFoundJob(getContext(), router, _state, _facade, SearchUpdateReplyFoundJob reply = new SearchUpdateReplyFoundJob(getContext(), router, _state, _facade,
@@ -482,8 +483,11 @@ class SearchJob extends JobImpl {
* @param replyTunnelId tunnel to receive replies through * @param replyTunnelId tunnel to receive replies through
* @param replyGateway gateway for the reply tunnel * @param replyGateway gateway for the reply tunnel
* @param expiration when the search should stop * @param expiration when the search should stop
* @param peer unused here; see ExploreJob extension
*
* @return a DatabaseLookupMessage
*/ */
protected DatabaseLookupMessage buildMessage(TunnelId replyTunnelId, Hash replyGateway, long expiration) { protected I2NPMessage buildMessage(TunnelId replyTunnelId, Hash replyGateway, long expiration, RouterInfo peer) {
DatabaseLookupMessage msg = new DatabaseLookupMessage(getContext(), true); DatabaseLookupMessage msg = new DatabaseLookupMessage(getContext(), true);
msg.setSearchKey(_state.getTarget()); msg.setSearchKey(_state.getTarget());
//msg.setFrom(replyGateway.getIdentity().getHash()); //msg.setFrom(replyGateway.getIdentity().getHash());

View File

@@ -18,24 +18,26 @@ import net.i2p.util.Log;
/** /**
* Called after a match to a db search is found * Called after a match to a db search is found
* *
* Used only by SearchJob which is only used by ExploreJob
*/ */
class SearchUpdateReplyFoundJob extends JobImpl implements ReplyJob { class SearchUpdateReplyFoundJob extends JobImpl implements ReplyJob {
private Log _log; private final Log _log;
private I2NPMessage _message; private I2NPMessage _message;
private Hash _peer; private final Hash _peer;
private SearchState _state; private final SearchState _state;
private KademliaNetworkDatabaseFacade _facade; private final KademliaNetworkDatabaseFacade _facade;
private SearchJob _job; private final SearchJob _job;
private TunnelInfo _outTunnel; private final TunnelInfo _outTunnel;
private TunnelInfo _replyTunnel; private final TunnelInfo _replyTunnel;
private boolean _isFloodfillPeer; private final boolean _isFloodfillPeer;
private long _sentOn; private final long _sentOn;
public SearchUpdateReplyFoundJob(RouterContext context, RouterInfo peer, public SearchUpdateReplyFoundJob(RouterContext context, RouterInfo peer,
SearchState state, KademliaNetworkDatabaseFacade facade, SearchState state, KademliaNetworkDatabaseFacade facade,
SearchJob job) { SearchJob job) {
this(context, peer, state, facade, job, null, null); this(context, peer, state, facade, job, null, null);
} }
public SearchUpdateReplyFoundJob(RouterContext context, RouterInfo peer, public SearchUpdateReplyFoundJob(RouterContext context, RouterInfo peer,
SearchState state, KademliaNetworkDatabaseFacade facade, SearchState state, KademliaNetworkDatabaseFacade facade,
SearchJob job, TunnelInfo outTunnel, TunnelInfo replyTunnel) { SearchJob job, TunnelInfo outTunnel, TunnelInfo replyTunnel) {
@@ -52,6 +54,7 @@ class SearchUpdateReplyFoundJob extends JobImpl implements ReplyJob {
} }
public String getName() { return "Update Reply Found for Kademlia Search"; } public String getName() { return "Update Reply Found for Kademlia Search"; }
public void runJob() { public void runJob() {
if (_isFloodfillPeer) if (_isFloodfillPeer)
_job.decrementOutstandingFloodfillSearches(); _job.decrementOutstandingFloodfillSearches();
@@ -59,7 +62,7 @@ class SearchUpdateReplyFoundJob extends JobImpl implements ReplyJob {
I2NPMessage message = _message; I2NPMessage message = _message;
if (_log.shouldLog(Log.INFO)) if (_log.shouldLog(Log.INFO))
_log.info(getJobId() + ": Reply from " + _peer.toBase64() _log.info(getJobId() + ": Reply from " + _peer.toBase64()
+ " with message " + message.getClass().getName()); + " with message " + message.getClass().getSimpleName());
long howLong = System.currentTimeMillis() - _sentOn; long howLong = System.currentTimeMillis() - _sentOn;
// assume requests are 1KB (they're almost always much smaller, but tunnels have a fixed size) // assume requests are 1KB (they're almost always much smaller, but tunnels have a fixed size)