diff --git a/apps/i2psnark/java/src/org/klomp/snark/web/I2PSnarkServlet.java b/apps/i2psnark/java/src/org/klomp/snark/web/I2PSnarkServlet.java index b302bd4b4..e7e25260b 100644 --- a/apps/i2psnark/java/src/org/klomp/snark/web/I2PSnarkServlet.java +++ b/apps/i2psnark/java/src/org/klomp/snark/web/I2PSnarkServlet.java @@ -240,7 +240,7 @@ public class I2PSnarkServlet extends HttpServlet { if ( (announceURLOther != null) && (announceURLOther.trim().length() > "http://.i2p/announce".length()) ) announceURL = announceURLOther; - if (baseFile.exists()) { + if (baseFile.exists() && baseFile.isFile()) { try { Storage s = new Storage(baseFile, announceURL, null); s.create(); @@ -258,6 +258,8 @@ public class I2PSnarkServlet extends HttpServlet { } catch (IOException ioe) { _manager.addMessage("Error creating a torrent for " + baseFile.getAbsolutePath() + ": " + ioe.getMessage()); } + } else if (baseFile.exists()) { + _manager.addMessage("I2PSnark doesn't yet support creating multifile torrents"); } else { _manager.addMessage("Cannot create a torrent for the nonexistant data: " + baseFile.getAbsolutePath()); } diff --git a/history.txt b/history.txt index edefb44f3..8a456113b 100644 --- a/history.txt +++ b/history.txt @@ -1,4 +1,10 @@ -$Id: history.txt,v 1.375 2005/12/30 18:33:54 jrandom Exp $ +$Id: history.txt,v 1.376 2005/12/31 18:40:23 jrandom Exp $ + +2006-01-01 jrandom + * Disable multifile torrent creation in I2PSnark's web UI for the moment + (though it can still seed and participate in multifile swarms) + * Enable a new speed calculation for profiling peers, using their peak + 1 minute average tunnel throughput as their speed. 2005-12-31 jrandom * Include a simple torrent creator in the I2PSnark web UI diff --git a/router/java/src/net/i2p/router/LoadTestManager.java b/router/java/src/net/i2p/router/LoadTestManager.java index 42d5fb573..75316e420 100644 --- a/router/java/src/net/i2p/router/LoadTestManager.java +++ b/router/java/src/net/i2p/router/LoadTestManager.java @@ -399,7 +399,7 @@ public class LoadTestManager { else buf.append("[unknown_peer]"); buf.append(" "); - TunnelId id = tunnel.getReceiveTunnelId(i); + TunnelId id = info.getReceiveTunnelId(i); if (id != null) buf.append(id.getTunnelId()); else @@ -418,7 +418,7 @@ public class LoadTestManager { else buf.append("[unknown_peer]"); buf.append(" "); - TunnelId id = tunnel.getReceiveTunnelId(i); + TunnelId id = info.getReceiveTunnelId(i); if (id != null) buf.append(id.getTunnelId()); else diff --git a/router/java/src/net/i2p/router/ProfileManager.java b/router/java/src/net/i2p/router/ProfileManager.java index b8707cb57..0c8348d60 100644 --- a/router/java/src/net/i2p/router/ProfileManager.java +++ b/router/java/src/net/i2p/router/ProfileManager.java @@ -60,6 +60,13 @@ public interface ProfileManager { * */ void tunnelTestSucceeded(Hash peer, long responseTimeMs); + + /** + * Note that we were able to push some data through a tunnel that the peer + * is participating in (detected after rtt). + * + */ + void tunnelDataPushed(Hash peer, long rtt, int size); /** * Note that the peer participated in a tunnel that failed. Its failure may not have diff --git a/router/java/src/net/i2p/router/RouterVersion.java b/router/java/src/net/i2p/router/RouterVersion.java index 3633c2586..835342fe2 100644 --- a/router/java/src/net/i2p/router/RouterVersion.java +++ b/router/java/src/net/i2p/router/RouterVersion.java @@ -15,9 +15,9 @@ import net.i2p.CoreVersion; * */ public class RouterVersion { - public final static String ID = "$Revision: 1.322 $ $Date: 2005/12/30 18:33:54 $"; + public final static String ID = "$Revision: 1.323 $ $Date: 2005/12/31 18:40:22 $"; public final static String VERSION = "0.6.1.8"; - public final static long BUILD = 6; + public final static long BUILD = 7; public static void main(String args[]) { System.out.println("I2P Router version: " + VERSION + "-" + BUILD); System.out.println("Router ID: " + RouterVersion.ID); diff --git a/router/java/src/net/i2p/router/message/OutboundClientMessageOneShotJob.java b/router/java/src/net/i2p/router/message/OutboundClientMessageOneShotJob.java index 01d95bbae..7a700656a 100644 --- a/router/java/src/net/i2p/router/message/OutboundClientMessageOneShotJob.java +++ b/router/java/src/net/i2p/router/message/OutboundClientMessageOneShotJob.java @@ -562,12 +562,20 @@ public class OutboundClientMessageOneShotJob extends JobImpl { getContext().messageHistory().sendPayloadMessage(dataMsgId, true, sendTime); getContext().clientManager().messageDeliveryStatusUpdate(_from, _clientMessageId, true); _lease.setNumSuccess(_lease.getNumSuccess()+1); + + int size = _clientMessageSize; getContext().statManager().addRateData("client.sendAckTime", sendTime, 0); getContext().statManager().addRateData("client.sendMessageSize", _clientMessageSize, sendTime); - if (_outTunnel != null) - for (int i = 0; i < _outTunnel.getLength(); i++) + if (_outTunnel != null) { + if (_outTunnel.getLength() > 0) + size = ((size + 1023) / 1024) * 1024; // messages are in ~1KB blocks + + for (int i = 0; i < _outTunnel.getLength(); i++) { getContext().profileManager().tunnelTestSucceeded(_outTunnel.getPeer(i), sendTime); + getContext().profileManager().tunnelDataPushed(_outTunnel.getPeer(i), sendTime, size); + } + } if (_inTunnel != null) for (int i = 0; i < _inTunnel.getLength(); i++) getContext().profileManager().tunnelTestSucceeded(_inTunnel.getPeer(i), sendTime); diff --git a/router/java/src/net/i2p/router/networkdb/kademlia/StoreJob.java b/router/java/src/net/i2p/router/networkdb/kademlia/StoreJob.java index 0bc7c8183..6fc9f7d5c 100644 --- a/router/java/src/net/i2p/router/networkdb/kademlia/StoreJob.java +++ b/router/java/src/net/i2p/router/networkdb/kademlia/StoreJob.java @@ -136,12 +136,12 @@ class StoreJob extends JobImpl { List closestHashes = getClosestRouters(_state.getTarget(), toCheck, _state.getAttempted()); if ( (closestHashes == null) || (closestHashes.size() <= 0) ) { if (_state.getPending().size() <= 0) { - if (_log.shouldLog(Log.WARN)) - _log.warn(getJobId() + ": No more peers left and none pending"); + if (_log.shouldLog(Log.INFO)) + _log.info(getJobId() + ": No more peers left and none pending"); fail(); } else { - if (_log.shouldLog(Log.WARN)) - _log.warn(getJobId() + ": No more peers left but some are pending, so keep waiting"); + if (_log.shouldLog(Log.INFO)) + _log.info(getJobId() + ": No more peers left but some are pending, so keep waiting"); return; } } else { @@ -152,8 +152,8 @@ class StoreJob extends JobImpl { Hash peer = (Hash)iter.next(); DataStructure ds = _facade.getDataStore().get(peer); if ( (ds == null) || !(ds instanceof RouterInfo) ) { - if (_log.shouldLog(Log.WARN)) - _log.warn(getJobId() + ": Error selecting closest hash that wasnt a router! " + peer + " : " + ds); + if (_log.shouldLog(Log.INFO)) + _log.info(getJobId() + ": Error selecting closest hash that wasnt a router! " + peer + " : " + ds); _state.addSkipped(peer); } else { int peerTimeout = _facade.getPeerTimeout(peer); @@ -295,10 +295,6 @@ class StoreJob extends JobImpl { _state.addPending(peer.getIdentity().getHash()); - SendSuccessJob onReply = new SendSuccessJob(getContext(), peer); - FailedJob onFail = new FailedJob(getContext(), peer, getContext().clock().now()); - StoreMessageSelector selector = new StoreMessageSelector(getContext(), getJobId(), peer, token, expiration); - TunnelInfo outTunnel = selectOutboundTunnel(); if (outTunnel != null) { //if (_log.shouldLog(Log.DEBUG)) @@ -306,7 +302,11 @@ class StoreJob extends JobImpl { // + peer.getIdentity().getHash().toBase64()); TunnelId targetTunnelId = null; // not needed Job onSend = null; // not wanted - + + SendSuccessJob onReply = new SendSuccessJob(getContext(), peer, outTunnel, msg.getMessageSize()); + FailedJob onFail = new FailedJob(getContext(), peer, getContext().clock().now()); + StoreMessageSelector selector = new StoreMessageSelector(getContext(), getJobId(), peer, token, expiration); + if (_log.shouldLog(Log.DEBUG)) _log.debug("sending store to " + peer.getIdentity().getHash() + " through " + outTunnel + ": " + msg); getContext().messageRegistry().registerPending(selector, onReply, onFail, (int)(expiration - getContext().clock().now())); @@ -333,10 +333,20 @@ class StoreJob extends JobImpl { */ private class SendSuccessJob extends JobImpl implements ReplyJob { private RouterInfo _peer; + private TunnelInfo _sendThrough; + private int _msgSize; public SendSuccessJob(RouterContext enclosingContext, RouterInfo peer) { + this(enclosingContext, peer, null, 0); + } + public SendSuccessJob(RouterContext enclosingContext, RouterInfo peer, TunnelInfo sendThrough, int size) { super(enclosingContext); _peer = peer; + _sendThrough = sendThrough; + if (size <= 0) + _msgSize = 0; + else + _msgSize = ((size + 1023) / 1024) * 1024; } public String getName() { return "Kademlia Store Send Success"; } @@ -348,6 +358,13 @@ class StoreJob extends JobImpl { getContext().profileManager().dbStoreSent(_peer.getIdentity().getHash(), howLong); getContext().statManager().addRateData("netDb.ackTime", howLong, howLong); + if ( (_sendThrough != null) && (_msgSize > 0) ) { + if (_log.shouldLog(Log.WARN)) + _log.warn("sent a " + _msgSize + "byte netDb message through tunnel " + _sendThrough + " after " + howLong); + for (int i = 0; i < _sendThrough.getLength(); i++) + getContext().profileManager().tunnelDataPushed(_sendThrough.getPeer(i), howLong, _msgSize); + } + if (_state.getCompleteCount() >= getRedundancy()) { succeed(); } else { @@ -375,8 +392,8 @@ class StoreJob extends JobImpl { _sendOn = sendOn; } public void runJob() { - if (_log.shouldLog(Log.WARN)) - _log.warn(StoreJob.this.getJobId() + ": Peer " + _peer.getIdentity().getHash().toBase64() + if (_log.shouldLog(Log.INFO)) + _log.info(StoreJob.this.getJobId() + ": Peer " + _peer.getIdentity().getHash().toBase64() + " timed out sending " + _state.getTarget()); _state.replyTimeout(_peer.getIdentity().getHash()); getContext().profileManager().dbStoreFailed(_peer.getIdentity().getHash()); @@ -406,8 +423,8 @@ class StoreJob extends JobImpl { * Send totally failed */ protected void fail() { - if (_log.shouldLog(Log.WARN)) - _log.warn(getJobId() + ": Failed sending key " + _state.getTarget()); + if (_log.shouldLog(Log.INFO)) + _log.info(getJobId() + ": Failed sending key " + _state.getTarget()); if (_log.shouldLog(Log.DEBUG)) _log.debug(getJobId() + ": State of failed send: " + _state, new Exception("Who failed me?")); if (_onFailure != null) diff --git a/router/java/src/net/i2p/router/peermanager/PeerProfile.java b/router/java/src/net/i2p/router/peermanager/PeerProfile.java index 5d830c625..8a743a27b 100644 --- a/router/java/src/net/i2p/router/peermanager/PeerProfile.java +++ b/router/java/src/net/i2p/router/peermanager/PeerProfile.java @@ -233,6 +233,13 @@ public class PeerProfile { _log.info("Updating tunnel test time for " + _peer.toBase64().substring(0,6) + " to " + _tunnelTestResponseTimeAvg + " via " + ms); } + + /** bytes per minute */ + private volatile double _peakThroughput; + private volatile long _peakThroughputCurrentTotal; + public double getPeakThroughputKBps() { return _peakThroughput / (60d*1024d); } + public void setPeakThroughputKBps(double kBps) { _peakThroughput = kBps*60; } + void dataPushed(int size) { _peakThroughputCurrentTotal += size; } /** * when the given peer is performing so poorly that we don't want to bother keeping @@ -301,6 +308,22 @@ public class PeerProfile { _expanded = true; } + private long _lastCoalesceDate = System.currentTimeMillis(); + private void coalesceThroughput() { + long now = System.currentTimeMillis(); + long measuredPeriod = now - _lastCoalesceDate; + if (measuredPeriod >= 60*1000) { + long tot = _peakThroughputCurrentTotal; + double peak = _peakThroughput; + if (tot >= peak) + _peakThroughput = tot; + _peakThroughputCurrentTotal = 0; + if ( (tot > 0) && _log.shouldLog(Log.WARN) ) + _log.warn("updating throughput after " + tot + " to " + (_peakThroughput/60d) + " for " + _peer.toBase64()); + _lastCoalesceDate = now; + } + } + /** update the stats and rates (this should be called once a minute) */ public void coalesceStats() { if (!_expanded) return; @@ -316,6 +339,8 @@ public class PeerProfile { _dbHistory.coalesceStats(); _tunnelHistory.coalesceStats(); + coalesceThroughput(); + _speedValue = calculateSpeed(); _oldSpeedValue = calculateOldSpeed(); _reliabilityValue = calculateReliability(); diff --git a/router/java/src/net/i2p/router/peermanager/ProfileManagerImpl.java b/router/java/src/net/i2p/router/peermanager/ProfileManagerImpl.java index 474588fd3..58952b6c2 100644 --- a/router/java/src/net/i2p/router/peermanager/ProfileManagerImpl.java +++ b/router/java/src/net/i2p/router/peermanager/ProfileManagerImpl.java @@ -117,6 +117,14 @@ public class ProfileManagerImpl implements ProfileManager { data.getTunnelTestResponseTimeSlow().addData(responseTimeMs, responseTimeMs); } + public void tunnelDataPushed(Hash peer, long rtt, int size) { + if (_context.routerHash().equals(peer)) + return; + PeerProfile data = getProfile(peer); + if (data != null) + data.dataPushed(size); // ignore rtt, as we are averaging over a minute + } + private int getSlowThreshold() { // perhaps we should have this compare vs. tunnel.testSuccessTime? return 5*1000; diff --git a/router/java/src/net/i2p/router/peermanager/ProfileOrganizerRenderer.java b/router/java/src/net/i2p/router/peermanager/ProfileOrganizerRenderer.java index 614eaf798..a3565f770 100644 --- a/router/java/src/net/i2p/router/peermanager/ProfileOrganizerRenderer.java +++ b/router/java/src/net/i2p/router/peermanager/ProfileOrganizerRenderer.java @@ -124,7 +124,7 @@ class ProfileOrganizerRenderer { } buf.append(""); buf.append("Definitions: