NetDB: Encrypt exploratory lookups too

SearchUpdateReplyFoundJob: finals
This commit is contained in:
zzz
2014-09-03 23:26:34 +00:00
parent a9802eb6a7
commit 5af749a226
4 changed files with 52 additions and 23 deletions

View File

@@ -15,6 +15,8 @@ import java.util.Set;
import net.i2p.data.Hash;
import net.i2p.data.TunnelId;
import net.i2p.data.i2np.DatabaseLookupMessage;
import net.i2p.data.i2np.I2NPMessage;
import net.i2p.data.router.RouterInfo;
import net.i2p.kademlia.KBucketSet;
import net.i2p.router.RouterContext;
import net.i2p.util.Log;
@@ -71,15 +73,15 @@ class ExploreJob extends SearchJob {
* and PeerSelector doesn't include the floodfill peers,
* so we add the ff peers ourselves and then use the regular PeerSelector.
*
* TODO should we encrypt this also like we do for normal lookups?
* Could the OBEP capture it and reply with a reference to a hostile peer?
*
* @param replyTunnelId tunnel to receive replies through
* @param replyGateway gateway for the reply tunnel
* @param expiration when the search should stop
* @param peer the peer to send it to
*
* @return a DatabaseLookupMessage or GarlicMessage
*/
@Override
protected DatabaseLookupMessage buildMessage(TunnelId replyTunnelId, Hash replyGateway, long expiration) {
protected I2NPMessage buildMessage(TunnelId replyTunnelId, Hash replyGateway, long expiration, RouterInfo peer) {
DatabaseLookupMessage msg = new DatabaseLookupMessage(getContext(), true);
msg.setSearchKey(getState().getTarget());
msg.setFrom(replyGateway);
@@ -127,7 +129,27 @@ class ExploreJob extends SearchJob {
_log.debug("Peers we don't want to hear about: " + dontIncludePeers);
msg.setDontIncludePeers(dontIncludePeers);
return msg;
// Now encrypt if we can
I2NPMessage outMsg;
if (getContext().getProperty(IterativeSearchJob.PROP_ENCRYPT_RI, IterativeSearchJob.DEFAULT_ENCRYPT_RI)) {
// request encrypted reply?
if (DatabaseLookupMessage.supportsEncryptedReplies(peer)) {
MessageWrapper.OneTimeSession sess;
sess = MessageWrapper.generateSession(getContext());
if (_log.shouldLog(Log.INFO))
_log.info(getJobId() + ": Requesting encrypted reply from " + peer.getIdentity().calculateHash() +
' ' + sess.key + ' ' + sess.tag);
msg.setReplySession(sess.key, sess.tag);
}
outMsg = MessageWrapper.wrap(getContext(), msg, peer);
if (_log.shouldLog(Log.DEBUG))
_log.debug(getJobId() + ": Encrypted exploratory DLM for " + getState().getTarget() + " to " +
peer.getIdentity().calculateHash());
} else {
outMsg = msg;
}
return outMsg;
}
/** max # of concurrent searches */

View File

@@ -90,10 +90,10 @@ class IterativeSearchJob extends FloodSearchJob {
*/
private static final int MAX_CONCURRENT = 1;
private static final String PROP_ENCRYPT_RI = "router.encryptRouterLookups";
public static final String PROP_ENCRYPT_RI = "router.encryptRouterLookups";
/** only on fast boxes, for now */
private static final boolean DEFAULT_ENCRYPT_RI =
public static final boolean DEFAULT_ENCRYPT_RI =
SystemVersion.isX86() && SystemVersion.is64Bit() &&
!SystemVersion.isApache() && !SystemVersion.isGNU() &&
NativeBigInteger.isNative();

View File

@@ -17,11 +17,12 @@ import net.i2p.data.DatabaseEntry;
import net.i2p.data.DataHelper;
import net.i2p.data.Hash;
import net.i2p.data.LeaseSet;
import net.i2p.data.router.RouterInfo;
import net.i2p.data.TunnelId;
import net.i2p.data.i2np.DatabaseLookupMessage;
import net.i2p.data.i2np.DatabaseSearchReplyMessage;
import net.i2p.data.i2np.DatabaseStoreMessage;
import net.i2p.data.i2np.I2NPMessage;
import net.i2p.data.router.RouterInfo;
import net.i2p.router.Job;
import net.i2p.router.JobImpl;
import net.i2p.router.OutNetMessage;
@@ -424,7 +425,7 @@ class SearchJob extends JobImpl {
int timeout = getPerPeerTimeoutMs(to);
long expiration = getContext().clock().now() + timeout;
DatabaseLookupMessage msg = buildMessage(inTunnelId, inTunnel.getPeer(0), expiration);
I2NPMessage msg = buildMessage(inTunnelId, inTunnel.getPeer(0), expiration, router);
TunnelInfo outTunnel = getContext().tunnelManager().selectOutboundExploratoryTunnel(to);
if (outTunnel == null) {
@@ -437,9 +438,9 @@ class SearchJob extends JobImpl {
if (_log.shouldLog(Log.DEBUG))
_log.debug(getJobId() + ": Sending search to " + to
+ " for " + msg.getSearchKey().toBase64() + " w/ replies through ["
+ msg.getFrom().toBase64() + "] via tunnel ["
+ msg.getReplyTunnel() + "]");
+ " for " + getState().getTarget() + " w/ replies through "
+ inTunnel.getPeer(0) + " via tunnel "
+ inTunnelId);
SearchMessageSelector sel = new SearchMessageSelector(getContext(), router, _expiration, _state);
SearchUpdateReplyFoundJob reply = new SearchUpdateReplyFoundJob(getContext(), router, _state, _facade,
@@ -482,8 +483,11 @@ class SearchJob extends JobImpl {
* @param replyTunnelId tunnel to receive replies through
* @param replyGateway gateway for the reply tunnel
* @param expiration when the search should stop
* @param peer unused here; see ExploreJob extension
*
* @return a DatabaseLookupMessage
*/
protected DatabaseLookupMessage buildMessage(TunnelId replyTunnelId, Hash replyGateway, long expiration) {
protected I2NPMessage buildMessage(TunnelId replyTunnelId, Hash replyGateway, long expiration, RouterInfo peer) {
DatabaseLookupMessage msg = new DatabaseLookupMessage(getContext(), true);
msg.setSearchKey(_state.getTarget());
//msg.setFrom(replyGateway.getIdentity().getHash());

View File

@@ -18,24 +18,26 @@ import net.i2p.util.Log;
/**
* Called after a match to a db search is found
*
* Used only by SearchJob which is only used by ExploreJob
*/
class SearchUpdateReplyFoundJob extends JobImpl implements ReplyJob {
private Log _log;
private final Log _log;
private I2NPMessage _message;
private Hash _peer;
private SearchState _state;
private KademliaNetworkDatabaseFacade _facade;
private SearchJob _job;
private TunnelInfo _outTunnel;
private TunnelInfo _replyTunnel;
private boolean _isFloodfillPeer;
private long _sentOn;
private final Hash _peer;
private final SearchState _state;
private final KademliaNetworkDatabaseFacade _facade;
private final SearchJob _job;
private final TunnelInfo _outTunnel;
private final TunnelInfo _replyTunnel;
private final boolean _isFloodfillPeer;
private final long _sentOn;
public SearchUpdateReplyFoundJob(RouterContext context, RouterInfo peer,
SearchState state, KademliaNetworkDatabaseFacade facade,
SearchJob job) {
this(context, peer, state, facade, job, null, null);
}
public SearchUpdateReplyFoundJob(RouterContext context, RouterInfo peer,
SearchState state, KademliaNetworkDatabaseFacade facade,
SearchJob job, TunnelInfo outTunnel, TunnelInfo replyTunnel) {
@@ -52,6 +54,7 @@ class SearchUpdateReplyFoundJob extends JobImpl implements ReplyJob {
}
public String getName() { return "Update Reply Found for Kademlia Search"; }
public void runJob() {
if (_isFloodfillPeer)
_job.decrementOutstandingFloodfillSearches();
@@ -59,7 +62,7 @@ class SearchUpdateReplyFoundJob extends JobImpl implements ReplyJob {
I2NPMessage message = _message;
if (_log.shouldLog(Log.INFO))
_log.info(getJobId() + ": Reply from " + _peer.toBase64()
+ " with message " + message.getClass().getName());
+ " with message " + message.getClass().getSimpleName());
long howLong = System.currentTimeMillis() - _sentOn;
// assume requests are 1KB (they're almost always much smaller, but tunnels have a fixed size)