Patches all INMP.add() calls to use replay "Contexts" specific to where they were called from.

This commit is contained in:
idk
2023-06-04 01:38:15 +00:00
parent f308ebee74
commit 82aa4e19fb
25 changed files with 141 additions and 80 deletions

View File

@ -1,3 +1,8 @@
2023-05-29 idk
* adds "virtual contexts" to bloom filter, where each entity that
passes an i2np message to the bloom filter xor's the messageID with a random, local value.
credit Xe Iaso for discovering the issue, obscuratus for the solution
2023-04-12 idk
* Fix missing Java options in docker/rootfs/startapp.sh
* Detect when running in Podman instead of regular Docker

View File

@ -87,7 +87,8 @@ public interface I2NPMessage {
/**
* Replay resistant message ID
*/
public long getUniqueId();
public long getUniqueId(long msgIDBloomXor);
public long getUniqueId();
public void setUniqueId(long id);
/**

View File

@ -180,6 +180,10 @@ public abstract class I2NPMessageImpl implements I2NPMessage {
/**
* Replay resistant message Id
*/
public synchronized long getUniqueId(long msgIDBloomXor) {
return getUniqueId() ^ msgIDBloomXor;
}
public synchronized long getUniqueId() {
// Lazy initialization of value
if (_uniqueId < 0) {

View File

@ -120,9 +120,9 @@ public class InNetMessagePool implements Service {
return old;
}
public int add(I2NPMessage messageBody, RouterIdentity fromRouter, Hash fromRouterHash) {
return add(messageBody, fromRouter, fromRouterHash, 0);
}
//public int add(I2NPMessage messageBody, RouterIdentity fromRouter, Hash fromRouterHash) {
//return add(messageBody, fromRouter, fromRouterHash, 0);
//}
/**
* Add a new message to the pool.
@ -134,6 +134,7 @@ public class InNetMessagePool implements Service {
* @param messageBody non-null
* @param fromRouter may be null
* @param fromRouterHash may be null, calculated from fromRouter if null
* @param msgIDBloomXor constant value to XOR with the messageID before passing to the bloom filter.
*
* @return -1 for some types of errors but not all; 0 otherwise
* (was queue length, long ago)
@ -150,6 +151,7 @@ public class InNetMessagePool implements Service {
if (_log.shouldDebug())
_log.debug("Rcvd"
+ " ID " + messageBody.getUniqueId()
+ " xor-ed ID " + messageBody.getUniqueId(msgIDBloomXor)
+ " exp. " + new Date(exp)
+ " type " + messageBody.getClass().getSimpleName());
@ -165,11 +167,7 @@ public class InNetMessagePool implements Service {
// just validate the expiration
invalidReason = _context.messageValidator().validateMessage(exp);
} else {
if (msgIDBloomXor == 0)
invalidReason = _context.messageValidator().validateMessage(messageBody.getUniqueId(), exp);
else
invalidReason = _context.messageValidator().validateMessage(messageBody.getUniqueId()
^ msgIDBloomXor, exp);
invalidReason = _context.messageValidator().validateMessage(messageBody.getUniqueId(msgIDBloomXor), exp);
}
if (invalidReason != null) {
@ -177,7 +175,7 @@ public class InNetMessagePool implements Service {
//if (messageBody instanceof TunnelCreateMessage)
// level = Log.INFO;
if (_log.shouldLog(level))
_log.log(level, "Dropping message [" + messageBody.getUniqueId()
_log.log(level, "Dropping message ID [" + messageBody.getUniqueId() + " xor-ed: " + messageBody.getUniqueId(msgIDBloomXor)
+ " expiring on " + exp + "]: " + messageBody.getClass().getSimpleName() + ": " + invalidReason
+ ": " + messageBody);
_context.statManager().addRateData("inNetPool.dropped", 1);
@ -185,7 +183,7 @@ public class InNetMessagePool implements Service {
_context.statManager().addRateData("inNetPool.duplicate", 1);
if (doHistory) {
history.droppedOtherMessage(messageBody, (fromRouter != null ? fromRouter.calculateHash() : fromRouterHash));
history.messageProcessingError(messageBody.getUniqueId(),
history.messageProcessingError(messageBody.getUniqueId(msgIDBloomXor),
messageBody.getClass().getSimpleName(),
"Duplicate/expired");
}
@ -309,7 +307,7 @@ public class InNetMessagePool implements Service {
} else {
if (doHistory) {
String mtype = messageBody.getClass().getName();
history.receiveMessage(mtype, messageBody.getUniqueId(),
history.receiveMessage(mtype, messageBody.getUniqueId(msgIDBloomXor),
messageBody.getMessageExpiration(),
fromRouterHash, true);
}
@ -320,7 +318,7 @@ public class InNetMessagePool implements Service {
if (doHistory) {
String mtype = messageBody.getClass().getName();
history.receiveMessage(mtype, messageBody.getUniqueId(),
history.receiveMessage(mtype, messageBody.getUniqueId(msgIDBloomXor),
messageBody.getMessageExpiration(),
fromRouterHash, true);
}

View File

@ -86,7 +86,7 @@ public class TunnelPoolSettings {
private static final int MAX_PRIORITY = 25;
private static final int EXPLORATORY_PRIORITY = 30;
private final long _msgIdBloomXor;
private final long _msgIDBloomXor;
/**
* Exploratory tunnel
@ -118,7 +118,7 @@ public class TunnelPoolSettings {
_IPRestriction = DEFAULT_IP_RESTRICTION;
_unknownOptions = new Properties();
_randomKey = generateRandomKey();
_msgIdBloomXor = RandomSource.getInstance().nextLong();
_msgIDBloomXor = RandomSource.getInstance().nextLong();
if (_isExploratory && !_isInbound)
_priority = EXPLORATORY_PRIORITY;
@ -290,7 +290,7 @@ public class TunnelPoolSettings {
*/
public Properties getUnknownOptions() { return _unknownOptions; }
public long getMsgIdBloomXor() { return _msgIdBloomXor; }
public long getMsgIdBloomXor() { return _msgIDBloomXor; }
/**
* Defaults in props are NOT honored.

View File

@ -165,7 +165,7 @@ public class VMCommSystem extends CommSystemFacade {
else
ReceiveJob.this.getContext().statManager().addRateData("transport.receiveMessageLarge", 1, 1);
_ctx.inNetMessagePool().add(msg, null, _from);
_ctx.inNetMessagePool().add(msg, null, _from, 0);
} catch (I2NPMessageException e) {
_log.error("Error reading/formatting a VM message? Something is not right...", e);
}

View File

@ -8,6 +8,8 @@ package net.i2p.router.message;
*
*/
import java.util.Random;
import net.i2p.data.Hash;
import net.i2p.data.router.RouterIdentity;
import net.i2p.data.i2np.GarlicMessage;
@ -27,13 +29,26 @@ import net.i2p.router.RouterContext;
*/
public class GarlicMessageHandler implements HandlerJobBuilder {
private final RouterContext _context;
private final long _msgIDBloomXorLocal;
private final long _msgIDBloomXorRouter;
private final long _msgIDBloomXorTunnel;
public GarlicMessageHandler(RouterContext context) {
_context = context;
_msgIDBloomXorLocal = new Random().nextLong();
_msgIDBloomXorRouter = new Random().nextLong();
_msgIDBloomXorTunnel = new Random().nextLong();
}
public GarlicMessageHandler(RouterContext context, long msgIDBloomXorLocal, long msgIDBloomXorRouter, long msgIDBloomXorTunnel) {
_context = context;
_msgIDBloomXorLocal = msgIDBloomXorLocal;
_msgIDBloomXorRouter = msgIDBloomXorRouter;
_msgIDBloomXorTunnel = msgIDBloomXorTunnel;
}
public Job createJob(I2NPMessage receivedMessage, RouterIdentity from, Hash fromHash) {
HandleGarlicMessageJob job = new HandleGarlicMessageJob(_context, (GarlicMessage)receivedMessage, from, fromHash);
HandleGarlicMessageJob job = new HandleGarlicMessageJob(_context, (GarlicMessage)receivedMessage, from, fromHash, _msgIDBloomXorLocal, _msgIDBloomXorRouter, _msgIDBloomXorTunnel);
return job;
}

View File

@ -36,6 +36,9 @@ import net.i2p.util.Log;
public class HandleGarlicMessageJob extends JobImpl implements GarlicMessageReceiver.CloveReceiver {
private final Log _log;
private final GarlicMessage _message;
private final long _msgIDBloomXorLocal;
private final long _msgIDBloomXorRouter;
private final long _msgIDBloomXorTunnel;
//private RouterIdentity _from;
//private Hash _fromHash;
//private Map _cloves; // map of clove Id --> Expiration of cloves we've already seen
@ -49,12 +52,15 @@ public class HandleGarlicMessageJob extends JobImpl implements GarlicMessageRece
* @param from ignored
* @param fromHash ignored
*/
public HandleGarlicMessageJob(RouterContext context, GarlicMessage msg, RouterIdentity from, Hash fromHash) {
public HandleGarlicMessageJob(RouterContext context, GarlicMessage msg, RouterIdentity from, Hash fromHash, long msgIDBloomXorLocal, long msgIDBloomXorRouter, long msgIDBloomXorTunnel) {
super(context);
_log = context.logManager().getLog(HandleGarlicMessageJob.class);
if (_log.shouldLog(Log.DEBUG))
_log.debug("Garlic Message not down a tunnel from [" + from + "]");
_message = msg;
_msgIDBloomXorLocal = msgIDBloomXorLocal;
_msgIDBloomXorRouter = msgIDBloomXorRouter;
_msgIDBloomXorTunnel = msgIDBloomXorTunnel;
//_from = from;
//_fromHash = fromHash;
//_cloves = new HashMap();
@ -74,7 +80,9 @@ public class HandleGarlicMessageJob extends JobImpl implements GarlicMessageRece
case DeliveryInstructions.DELIVERY_MODE_LOCAL:
if (_log.shouldLog(Log.DEBUG))
_log.debug("local delivery instructions for clove: " + data);
getContext().inNetMessagePool().add(data, null, null);
// Here we are adding the message to the InNetMessagePool and it is Local. Xor the messageID with
// a long unique to the router/session.
getContext().inNetMessagePool().add(data, null, null, _msgIDBloomXorLocal);
return;
case DeliveryInstructions.DELIVERY_MODE_DESTINATION:
// i2pd bug with DLM to ratchet router
@ -86,14 +94,18 @@ public class HandleGarlicMessageJob extends JobImpl implements GarlicMessageRece
if (getContext().routerHash().equals(instructions.getRouter())) {
if (_log.shouldLog(Log.DEBUG))
_log.debug("router delivery instructions targetting us");
getContext().inNetMessagePool().add(data, null, null);
// Here we are adding the message to the InNetMessagePool and it is for us. Xor the messageID with
// a long unique to the router/session.
getContext().inNetMessagePool().add(data, null, null, _msgIDBloomXorRouter);
} else {
if (_log.shouldLog(Log.DEBUG))
_log.debug("router delivery instructions targetting "
+ instructions.getRouter().toBase64().substring(0,4) + " for " + data);
// we don't need to use the msgIDBloomXorRouter here because we have already handled the case
// where the message will be added to the InNetMessagePool(see SendMessageDirectJob 159-179)
SendMessageDirectJob j = new SendMessageDirectJob(getContext(), data,
instructions.getRouter(),
10*1000, ROUTER_PRIORITY);
10*1000, ROUTER_PRIORITY, _msgIDBloomXorRouter);
// run it inline (adds to the outNetPool if it has the router info, otherwise queue a lookup)
j.runJob();
//getContext().jobQueue().addJob(j);
@ -107,9 +119,10 @@ public class HandleGarlicMessageJob extends JobImpl implements GarlicMessageRece
if (_log.shouldLog(Log.DEBUG))
_log.debug("tunnel delivery instructions targetting "
+ instructions.getRouter().toBase64().substring(0,4) + " for " + data);
// Here we do Xor the messageID in case it is added to the InNetMessagePool(see SendMessageDirectJob 159-179)
SendMessageDirectJob job = new SendMessageDirectJob(getContext(), gw,
instructions.getRouter(),
10*1000, TUNNEL_PRIORITY);
10*1000, TUNNEL_PRIORITY, _msgIDBloomXorTunnel);
// run it inline (adds to the outNetPool if it has the router info, otherwise queue a lookup)
job.runJob();
// getContext().jobQueue().addJob(job);

View File

@ -42,12 +42,21 @@ public class SendMessageDirectJob extends JobImpl {
private boolean _alreadySearched;
private boolean _sent;
private long _searchOn;
private final long _msgIDBloomXor;
/**
* @param toPeer may be ourselves
*/
public SendMessageDirectJob(RouterContext ctx, I2NPMessage message, Hash toPeer, int timeoutMs, int priority) {
this(ctx, message, toPeer, null, null, null, null, timeoutMs, priority);
this(ctx, message, toPeer, null, null, null, null, timeoutMs, priority, 0);
}
/**
* @param toPeer may be ourselves
* @param msgIDBloomXor value to xor the messageID with before passing to the InNetMessagePool, may be 0
*/
public SendMessageDirectJob(RouterContext ctx, I2NPMessage message, Hash toPeer, int timeoutMs, int priority, long msgIDBloomXor) {
this(ctx, message, toPeer, null, null, null, null, timeoutMs, priority, msgIDBloomXor);
}
/**
@ -58,7 +67,19 @@ public class SendMessageDirectJob extends JobImpl {
*/
public SendMessageDirectJob(RouterContext ctx, I2NPMessage message, Hash toPeer, ReplyJob onSuccess,
Job onFail, MessageSelector selector, int timeoutMs, int priority) {
this(ctx, message, toPeer, null, onSuccess, onFail, selector, timeoutMs, priority);
this(ctx, message, toPeer, null, onSuccess, onFail, selector, timeoutMs, priority, 0);
}
/**
* @param toPeer may be ourselves
* @param onSuccess may be null
* @param onFail may be null
* @param selector be null
* @param msgIDBloomXor value to xor the messageID with before passing to the InNetMessagePool, may be 0
*/
public SendMessageDirectJob(RouterContext ctx, I2NPMessage message, Hash toPeer, ReplyJob onSuccess,
Job onFail, MessageSelector selector, int timeoutMs, int priority, long msgIDBloomXor) {
this(ctx, message, toPeer, null, onSuccess, onFail, selector, timeoutMs, priority, msgIDBloomXor);
}
/**
@ -67,11 +88,13 @@ public class SendMessageDirectJob extends JobImpl {
* @param onSuccess may be null
* @param onFail may be null
* @param selector be null
* @param msgIDBloomXor value to xor the messageID with before passing to the InNetMessagePool, may be 0
*/
public SendMessageDirectJob(RouterContext ctx, I2NPMessage message, Hash toPeer, Job onSend, ReplyJob onSuccess,
Job onFail, MessageSelector selector, int timeoutMs, int priority) {
Job onFail, MessageSelector selector, int timeoutMs, int priority, long msgIDBloomXor) {
super(ctx);
_log = getContext().logManager().getLog(SendMessageDirectJob.class);
_msgIDBloomXor = msgIDBloomXor;
_message = message;
_targetHash = toPeer;
if (timeoutMs < 10*1000) {
@ -159,7 +182,7 @@ public class SendMessageDirectJob extends JobImpl {
if (_onSend != null)
getContext().jobQueue().addJob(_onSend);
getContext().inNetMessagePool().add(_message, _router.getIdentity(), null);
getContext().inNetMessagePool().add(_message, _router.getIdentity(), null, _msgIDBloomXor);
if (_log.shouldLog(Log.DEBUG))
_log.debug("Adding " + _message.getClass().getName()

View File

@ -45,6 +45,7 @@ public class HandleDatabaseLookupMessageJob extends JobImpl {
private final DatabaseLookupMessage _message;
private boolean _replyKeyConsumed;
private final Hash _us;
private final long _msgIDBloomXor;
private final static int MAX_ROUTERS_RETURNED = 3;
private final static int CLOSENESS_THRESHOLD = 8; // FNDF.MAX_TO_FLOOD + 1
@ -57,11 +58,12 @@ public class HandleDatabaseLookupMessageJob extends JobImpl {
*/
public final static long EXPIRE_DELAY = 60*60*1000;
public HandleDatabaseLookupMessageJob(RouterContext ctx, DatabaseLookupMessage receivedMessage, RouterIdentity from, Hash fromHash) {
public HandleDatabaseLookupMessageJob(RouterContext ctx, DatabaseLookupMessage receivedMessage, RouterIdentity from, Hash fromHash, long msgIDBloomXor) {
super(ctx);
_log = ctx.logManager().getLog(HandleDatabaseLookupMessageJob.class);
_message = receivedMessage;
_us = ctx.routerHash();
_msgIDBloomXor = msgIDBloomXor;
}
protected boolean answerAllQueries() { return false; }
@ -295,7 +297,7 @@ public class HandleDatabaseLookupMessageJob extends JobImpl {
} else {
if (_log.shouldLog(Log.DEBUG))
_log.debug("Sending reply directly to " + toPeer);
Job send = new SendMessageDirectJob(getContext(), message, toPeer, REPLY_TIMEOUT, MESSAGE_PRIORITY);
Job send = new SendMessageDirectJob(getContext(), message, toPeer, REPLY_TIMEOUT, MESSAGE_PRIORITY, _msgIDBloomXor);
send.runJob();
//getContext().netDb().lookupRouterInfo(toPeer, send, null, REPLY_TIMEOUT);
}
@ -338,7 +340,7 @@ public class HandleDatabaseLookupMessageJob extends JobImpl {
m.setMessage(message);
m.setMessageExpiration(message.getMessageExpiration());
m.setTunnelId(replyTunnel);
SendMessageDirectJob j = new SendMessageDirectJob(getContext(), m, toPeer, 10*1000, MESSAGE_PRIORITY);
SendMessageDirectJob j = new SendMessageDirectJob(getContext(), m, toPeer, 10*1000, MESSAGE_PRIORITY, _msgIDBloomXor);
j.runJob();
//getContext().jobQueue().addJob(j);
}

View File

@ -56,11 +56,11 @@ class ExploreJob extends SearchJob {
* @param isRealExplore if true, a standard exploration (no floodfills will be returned)
* if false, a standard lookup (floodfills will be returned, use if low on floodfills)
*/
public ExploreJob(RouterContext context, KademliaNetworkDatabaseFacade facade, Hash key, boolean isRealExplore) {
public ExploreJob(RouterContext context, KademliaNetworkDatabaseFacade facade, Hash key, boolean isRealExplore, long msgIDBloomXor) {
// note that we're treating the last param (isLease) as *false* since we're just exploring.
// if this collides with an actual leaseSet's key, neat, but that wouldn't imply we're actually
// attempting to send that lease a message!
super(context, facade, key, null, null, MAX_EXPLORE_TIME, false, false);
super(context, facade, key, null, null, MAX_EXPLORE_TIME, false, false, msgIDBloomXor);
_peerSelector = (FloodfillPeerSelector) (_facade.getPeerSelector());
_isRealExplore = isRealExplore;
}

View File

@ -8,6 +8,8 @@ package net.i2p.router.networkdb.kademlia;
*
*/
import java.util.Random;
import net.i2p.data.Hash;
import net.i2p.data.router.RouterIdentity;
import net.i2p.data.i2np.DatabaseLookupMessage;
@ -25,6 +27,7 @@ public class FloodfillDatabaseLookupMessageHandler implements HandlerJobBuilder
private RouterContext _context;
private FloodfillNetworkDatabaseFacade _facade;
private Log _log;
private final long _msgIDBloomXor = new Random().nextLong();
public FloodfillDatabaseLookupMessageHandler(RouterContext context, FloodfillNetworkDatabaseFacade facade) {
_context = context;
@ -47,7 +50,7 @@ public class FloodfillDatabaseLookupMessageHandler implements HandlerJobBuilder
DatabaseLookupMessage dlm = (DatabaseLookupMessage)receivedMessage;
if (!_facade.shouldThrottleLookup(dlm.getFrom(), dlm.getReplyTunnel())) {
Job j = new HandleFloodfillDatabaseLookupMessageJob(_context, dlm, from, fromHash);
Job j = new HandleFloodfillDatabaseLookupMessageJob(_context, dlm, from, fromHash, _msgIDBloomXor);
//if (false) {
// // might as well inline it, all the heavy lifting is queued up in later jobs, if necessary
// j.runJob();

View File

@ -8,6 +8,8 @@ package net.i2p.router.networkdb.kademlia;
*
*/
import java.util.Random;
import net.i2p.data.Hash;
import net.i2p.data.router.RouterIdentity;
import net.i2p.data.i2np.DatabaseStoreMessage;
@ -23,6 +25,7 @@ import net.i2p.router.RouterContext;
public class FloodfillDatabaseStoreMessageHandler implements HandlerJobBuilder {
private RouterContext _context;
private FloodfillNetworkDatabaseFacade _facade;
private final long _msgIDBloomXor = new Random().nextLong();
public FloodfillDatabaseStoreMessageHandler(RouterContext context, FloodfillNetworkDatabaseFacade facade) {
_context = context;
@ -35,7 +38,7 @@ public class FloodfillDatabaseStoreMessageHandler implements HandlerJobBuilder {
}
public Job createJob(I2NPMessage receivedMessage, RouterIdentity from, Hash fromHash) {
Job j = new HandleFloodfillDatabaseStoreMessageJob(_context, (DatabaseStoreMessage)receivedMessage, from, fromHash, _facade);
Job j = new HandleFloodfillDatabaseStoreMessageJob(_context, (DatabaseStoreMessage)receivedMessage, from, fromHash, _facade, _msgIDBloomXor);
if (false) {
j.runJob();
return null;

View File

@ -26,8 +26,8 @@ import net.i2p.router.networkdb.HandleDatabaseLookupMessageJob;
*
*/
public class HandleFloodfillDatabaseLookupMessageJob extends HandleDatabaseLookupMessageJob {
public HandleFloodfillDatabaseLookupMessageJob(RouterContext ctx, DatabaseLookupMessage receivedMessage, RouterIdentity from, Hash fromHash) {
super(ctx, receivedMessage, from, fromHash);
public HandleFloodfillDatabaseLookupMessageJob(RouterContext ctx, DatabaseLookupMessage receivedMessage, RouterIdentity from, Hash fromHash, long msgIDBloomXor) {
super(ctx, receivedMessage, from, fromHash, msgIDBloomXor);
}
/**

View File

@ -50,19 +50,21 @@ class HandleFloodfillDatabaseStoreMessageJob extends JobImpl {
// must be lower than LIMIT_ROUTERS in StartExplorersJob
// because exploration does not register a reply job
private static final int LIMIT_ROUTERS = SystemVersion.isSlow() ? 1000 : 4000;
private final long _msgIDBloomXor;
/**
* @param receivedMessage must never have reply token set if it came down a tunnel
*/
public HandleFloodfillDatabaseStoreMessageJob(RouterContext ctx, DatabaseStoreMessage receivedMessage,
RouterIdentity from, Hash fromHash,
FloodfillNetworkDatabaseFacade facade) {
FloodfillNetworkDatabaseFacade facade, long msgIDBloomXor) {
super(ctx);
_log = ctx.logManager().getLog(getClass());
_message = receivedMessage;
_from = from;
_fromHash = fromHash;
_facade = facade;
_msgIDBloomXor = msgIDBloomXor;
}
public void runJob() {
@ -417,10 +419,10 @@ class HandleFloodfillDatabaseStoreMessageJob extends JobImpl {
return;
}
if (toUs) {
Job send = new SendMessageDirectJob(getContext(), msg, toPeer, REPLY_TIMEOUT, MESSAGE_PRIORITY);
Job send = new SendMessageDirectJob(getContext(), msg, toPeer, REPLY_TIMEOUT, MESSAGE_PRIORITY, _msgIDBloomXor);
send.runJob();
if (msg2 != null) {
Job send2 = new SendMessageDirectJob(getContext(), msg2, toPeer, REPLY_TIMEOUT, MESSAGE_PRIORITY);
Job send2 = new SendMessageDirectJob(getContext(), msg2, toPeer, REPLY_TIMEOUT, MESSAGE_PRIORITY, _msgIDBloomXor);
send2.runJob();
}
return;
@ -492,10 +494,10 @@ class HandleFloodfillDatabaseStoreMessageJob extends JobImpl {
out2 = tgm2;
}
}
Job send = new SendMessageDirectJob(getContext(), out1, toPeer, REPLY_TIMEOUT, MESSAGE_PRIORITY);
Job send = new SendMessageDirectJob(getContext(), out1, toPeer, REPLY_TIMEOUT, MESSAGE_PRIORITY, _msgIDBloomXor);
send.runJob();
if (msg2 != null) {
Job send2 = new SendMessageDirectJob(getContext(), out2, toPeer, REPLY_TIMEOUT, MESSAGE_PRIORITY);
Job send2 = new SendMessageDirectJob(getContext(), out2, toPeer, REPLY_TIMEOUT, MESSAGE_PRIORITY, _msgIDBloomXor);
send2.runJob();
}
return;

View File

@ -56,6 +56,7 @@ class SearchJob extends JobImpl {
private long _startedOn;
private boolean _floodfillPeersExhausted;
private int _floodfillSearchesOutstanding;
private final long _msgIDBloomXor;
private static final int SEARCH_BREDTH = 3; // 10 peers at a time
/** only send the 10 closest "dont tell me about" refs */
@ -91,7 +92,7 @@ class SearchJob extends JobImpl {
*
*/
public SearchJob(RouterContext context, KademliaNetworkDatabaseFacade facade, Hash key,
Job onSuccess, Job onFailure, long timeoutMs, boolean keepStats, boolean isLease) {
Job onSuccess, Job onFailure, long timeoutMs, boolean keepStats, boolean isLease, long msgIDBloomXor) {
super(context);
if ( (key == null) || (key.getData() == null) )
throw new IllegalArgumentException("Search for null key?");
@ -107,6 +108,7 @@ class SearchJob extends JobImpl {
_peerSelector = facade.getPeerSelector();
_startedOn = -1;
_expiration = getContext().clock().now() + timeoutMs;
_msgIDBloomXor = msgIDBloomXor;
getContext().statManager().addRateData("netDb.searchCount", 1);
if (_log.shouldLog(Log.DEBUG))
_log.debug("Search (" + getClass().getName() + " for " + key, new Exception("Search enqueued by"));
@ -500,7 +502,7 @@ class SearchJob extends JobImpl {
SearchUpdateReplyFoundJob reply = new SearchUpdateReplyFoundJob(getContext(), router, _state, _facade, this);
SendMessageDirectJob j = new SendMessageDirectJob(getContext(), msg, to,
reply, new FailedJob(getContext(), router), sel, timeout,
OutNetMessage.PRIORITY_EXPLORATORY);
OutNetMessage.PRIORITY_EXPLORATORY, _msgIDBloomXor);
if (FloodfillNetworkDatabaseFacade.isFloodfill(router))
_floodfillSearchesOutstanding++;
j.runJob();

View File

@ -9,6 +9,7 @@ package net.i2p.router.networkdb.kademlia;
*/
import java.util.HashSet;
import java.util.Random;
import java.util.Set;
import net.i2p.data.Hash;
@ -57,6 +58,8 @@ class StartExplorersJob extends JobImpl {
private static final long MAX_LAG = 100;
private static final long MAX_MSG_DELAY = 1500;
private final long _msgIDBloomXor = new Random().nextLong();
public StartExplorersJob(RouterContext context, KademliaNetworkDatabaseFacade facade) {
super(context);
@ -99,7 +102,7 @@ class StartExplorersJob extends JobImpl {
// This is very effective so we don't need to do it often
boolean realexpl = !((needffs && getContext().random().nextInt(2) == 0) ||
(lowffs && getContext().random().nextInt(4) == 0));
ExploreJob j = new ExploreJob(getContext(), _facade, key, realexpl);
ExploreJob j = new ExploreJob(getContext(), _facade, key, realexpl, _msgIDBloomXor);
if (delay > 0)
j.getTiming().setStartAfter(getContext().clock().now() + delay);
getContext().jobQueue().addJob(j);

View File

@ -531,7 +531,7 @@ public abstract class TransportImpl implements Transport {
//// this functionality is built into the InNetMessagePool
//String type = inMsg.getClass().getName();
//MessageHistory.getInstance().receiveMessage(type, inMsg.getUniqueId(), inMsg.getMessageExpiration(), remoteIdentHash, true);
//MessageHistory.getInstance().receiveMessage(type, inMsg.getRawUniqueId(), inMsg.getMessageExpiration(), remoteIdentHash, true);
if (_listener != null) {
_listener.messageReceived(inMsg, remoteIdent, remoteIdentHash);

View File

@ -111,7 +111,7 @@ public class TransportManager implements TransportEventListener {
private static final long UPNP_REFRESH_TIME = UPnP.LEASE_TIME_SECONDS * 1000L / 3;
private final long _msgIdBloomXor;
private final long _msgIDBloomXor;
public TransportManager(RouterContext context) {
_context = context;
@ -136,7 +136,7 @@ public class TransportManager implements TransportEventListener {
_dhThread = (_enableUDP || enableNTCP2) ? new DHSessionKeyBuilder.PrecalcRunner(context) : null;
// always created, even if NTCP2 is not enabled, because ratchet needs it
_xdhThread = new X25519KeyFactory(context);
_msgIdBloomXor = _context.random().nextLong();
_msgIDBloomXor = _context.random().nextLong();
}
/**
@ -968,7 +968,7 @@ public class TransportManager implements TransportEventListener {
if (_log.shouldLog(Log.DEBUG))
_log.debug("I2NPMessage received: " + message.getClass().getSimpleName() /*, new Exception("Where did I come from again?") */ );
try {
_context.inNetMessagePool().add(message, fromRouter, fromRouterHash, _msgIdBloomXor);
_context.inNetMessagePool().add(message, fromRouter, fromRouterHash, _msgIDBloomXor);
//if (_log.shouldLog(Log.DEBUG))
// _log.debug("Added to in pool");
} catch (IllegalArgumentException iae) {

View File

@ -1,5 +1,7 @@
package net.i2p.router.tunnel;
import java.util.Random;
import net.i2p.data.DatabaseEntry;
import net.i2p.data.Hash;
import net.i2p.data.LeaseSet;
@ -33,8 +35,8 @@ class InboundMessageDistributor implements GarlicMessageReceiver.CloveReceiver {
private final Log _log;
private final Hash _client;
private final GarlicMessageReceiver _receiver;
private String _clientNickname;
private final long _msgIdBloomXor;
private final String _clientNickname;
private final long _msgIDBloomXor;
/**
* @param client null for router tunnel
*/
@ -53,10 +55,10 @@ class InboundMessageDistributor implements GarlicMessageReceiver.CloveReceiver {
+ " b32: " + _client.toBase32()
+ ") InboundMessageDistributor with tunnel pool settings: " + clienttps);
_clientNickname = clienttps.getDestinationNickname();
_msgIdBloomXor = clienttps.getMsgIdBloomXor();
_msgIDBloomXor = clienttps.getMsgIdBloomXor();
} else {
_clientNickname = "NULL/Expl";
_msgIdBloomXor = 0;
_msgIDBloomXor = new Random().nextLong();
if (_log.shouldLog(Log.DEBUG))
_log.debug("Initializing null or exploratory InboundMessageDistributor");
}
@ -215,10 +217,7 @@ class InboundMessageDistributor implements GarlicMessageReceiver.CloveReceiver {
+ " (for client " + _clientNickname + " ("
+ ((_client != null) ? _client.toBase32() : "null")
+ ") to target=NULL/tunnel=NULL " + msg);
if (_msgIdBloomXor == 0)
_context.inNetMessagePool().add(msg, null, null);
else
_context.inNetMessagePool().add(msg, null, null, _msgIdBloomXor);
_context.inNetMessagePool().add(msg, null, null, _msgIDBloomXor);
}
} else if (_context.routerHash().equals(target)) {
if (type == GarlicMessage.MESSAGE_TYPE)
@ -292,10 +291,7 @@ class InboundMessageDistributor implements GarlicMessageReceiver.CloveReceiver {
_log.info("Storing garlic LS down tunnel for: " + dsm.getKey() + " sent to: "
+ _clientNickname + " ("
+ (_client != null ? _client.toBase32() : ") router"));
if (_msgIdBloomXor == 0)
_context.inNetMessagePool().add(dsm, null, null);
else
_context.inNetMessagePool().add(dsm, null, null, _msgIdBloomXor);
_context.inNetMessagePool().add(dsm, null, null, _msgIDBloomXor);
} else {
if (_client != null) {
// drop it, since the data we receive shouldn't include router
@ -317,10 +313,7 @@ class InboundMessageDistributor implements GarlicMessageReceiver.CloveReceiver {
if (_log.shouldLog(Log.INFO))
_log.info("Storing garlic RI down tunnel (" + _clientNickname
+ ") for: " + dsm.getKey());
if (_msgIdBloomXor == 0)
_context.inNetMessagePool().add(dsm, null, null);
else
_context.inNetMessagePool().add(dsm, null, null, _msgIdBloomXor);
_context.inNetMessagePool().add(dsm, null, null, _msgIDBloomXor);
}
} else if (_client != null && type == DatabaseSearchReplyMessage.MESSAGE_TYPE) {
// DSRMs show up here now that replies are encrypted
@ -339,10 +332,7 @@ class InboundMessageDistributor implements GarlicMessageReceiver.CloveReceiver {
orig = newMsg;
}
****/
if (_msgIdBloomXor == 0)
_context.inNetMessagePool().add(orig, null, null);
else
_context.inNetMessagePool().add(orig, null, null, _msgIdBloomXor);
_context.inNetMessagePool().add(orig, null, null, _msgIDBloomXor);
} else if (type == DataMessage.MESSAGE_TYPE) {
// a data message targetting the local router is how we send load tests (real
// data messages target destinations)
@ -359,10 +349,7 @@ class InboundMessageDistributor implements GarlicMessageReceiver.CloveReceiver {
+ _clientNickname + " (" + _client.toBase32() + ") : "
+ data, new Exception("cause"));
} else {
if (_msgIdBloomXor == 0)
_context.inNetMessagePool().add(data, null, null);
else
_context.inNetMessagePool().add(data, null, null, _msgIdBloomXor);
_context.inNetMessagePool().add(data, null, null, _msgIDBloomXor);
}
return;

View File

@ -113,7 +113,7 @@ class OutboundMessageDistributor {
_log.debug("queueing inbound message to ourselves: " + m);
// TODO if UnknownI2NPMessage, convert it.
// See FragmentHandler.receiveComplete()
_context.inNetMessagePool().add(m, null, null);
_context.inNetMessagePool().add(m, null, null, 0);
return;
} else {
OutNetMessage out = new OutNetMessage(_context, m, _context.clock().now() + MAX_DISTRIBUTE_TIME, _priority, target);

View File

@ -598,7 +598,7 @@ public class TunnelDispatcher implements Service {
+ " messageType: " + submsg.getClass().getSimpleName());
return;
}
//_context.messageHistory().tunnelDispatched("message " + msg.getUniqueId() + "/" + msg.getMessage().getUniqueId() + " on tunnel "
//_context.messageHistory().tunnelDispatched("message " + msg.getRawUniqueId() + "/" + msg.getMessage().getRawUniqueId() + " on tunnel "
// + msg.getTunnelId().getTunnelId() + " as inbound gateway");
_context.messageHistory().tunnelDispatched(msg.getUniqueId(),
submsg.getUniqueId(),

View File

@ -642,7 +642,7 @@ class BuildHandler implements Runnable {
getContext().messageHistory().tunnelRejected(_state.fromHash, new TunnelId(_req.readReceiveTunnelId()), _nextPeer,
// this is all disabled anyway
//"rejected because we couldn't find " + _nextPeer + ": " +
//_state.msg.getUniqueId() + "/" + _req.readNextTunnelId());
//_state.msg.getRawUniqueId() + "/" + _req.readNextTunnelId());
"lookup fail");
}
}
@ -962,7 +962,7 @@ class BuildHandler implements Runnable {
_context.messageHistory().tunnelRejected(from, new TunnelId(ourId), nextPeer,
// this is all disabled anyway
//"rejecting for " + response + ": " +
//state.msg.getUniqueId() + "/" + ourId + "/" + req.readNextTunnelId() + " delay " +
//state.msg.getRawUniqueId() + "/" + ourId + "/" + req.readNextTunnelId() + " delay " +
//recvDelay + " as " +
//(isOutEnd ? "outbound endpoint" : isInGW ? "inbound gw" : "participant"));
Integer.toString(response));

View File

@ -141,12 +141,12 @@ class BuildReplyHandler {
if (log.shouldDebug()) {
log.debug(reply.getUniqueId() + ": Decrypting AES record " + recordNum + "/" + hop + "/" + j + " with replyKey "
+ replyKey.toBase64() + "/" + Base64.encode(replyIV) + ": " + cfg);
//log.debug(reply.getUniqueId() + ": before decrypt: " + Base64.encode(data));
//log.debug(reply.getUniqueId() + ": Full reply rec: sz=" + data.length + " data=" + Base64.encode(data));
//log.debug(reply.getRawUniqueId() + ": before decrypt: " + Base64.encode(data));
//log.debug(reply.getRawUniqueId() + ": Full reply rec: sz=" + data.length + " data=" + Base64.encode(data));
}
ctx.aes().decrypt(data, 0, data, 0, replyKey, replyIV, 0, data.length);
//if (log.shouldLog(Log.DEBUG))
// log.debug(reply.getUniqueId() + ": after decrypt: " + Base64.encode(data));
// log.debug(reply.getRawUniqueId() + ": after decrypt: " + Base64.encode(data));
}
}
// ok, all of the layered encryption is stripped, so lets verify it

View File

@ -325,7 +325,7 @@ abstract class BuildRequestor {
}
}
//if (log.shouldLog(Log.DEBUG))
// log.debug("Tunnel build message " + msg.getUniqueId() + " created in " + createTime
// log.debug("Tunnel build message " + msg.getRawUniqueId() + " created in " + createTime
// + "ms and dispatched in " + (System.currentTimeMillis()-beforeDispatch));
return true;
}