diff --git a/LICENSE.txt b/LICENSE.txt new file mode 100644 index 000000000..e93798548 --- /dev/null +++ b/LICENSE.txt @@ -0,0 +1,183 @@ +This product includes both public domain code and licensed code as described below. +For all code, unless otherwise stated in the appropriate license, the following applies: + + + NO WARRANTY + + BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY +FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN +OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES +PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED +OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF +MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS +TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE +PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, +REPAIR OR CORRECTION. + + IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR +REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, +INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING +OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED +TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY +YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER +PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE +POSSIBILITY OF SUCH DAMAGES. + + + +LICENSES +-------- + +Core: +Public domain except as listed below: + + ElGamal and DSA code: + Copyright (c) 2003, TheCrypto + See licenses/LICENSE-ElGamalDSA.txt + + SHA256 and HMAC-SHA256: + Copyright (c) 2000 - 2004 The Legion Of The Bouncy Castle + See licenses/LICENSE-SHA256.txt + + AES code: + Under the Cryptix (MIT) license, written by the Cryptix team + (That's what our website says but all our AES code looks like it is public domain) + + Crypto filters: + From the xlattice app - http://xlattice.sourceforge.net/ + See licenses/LICENSE-BSD.txt + + SNTP code: + Copyright (c) 2004, Adam Buckley + See licenses/LICENSE-SNTP.txt + + PRNG: + Copyright (C) 2001, 2002, Free Software Foundation, Inc. + See licenses/LICENSE-LGPLv2.1.txt + + GMP 4.1.3: + Copyright 1991, 1996, 1999, 2000 Free Software Foundation, Inc. + See licenses/LICENSE-LGPLv2.1.txt + + HashCash code: + Copyright 2006 Gregory Rubin grrubin@gmail.com + See licenses/LICENSE-HashCash.txt + + + +Router: +Public domain + + + +Installer: + Launch4j: + Copyright (C) 2005 Grzegorz Kowal + See licenses/LICENSE-GPLv2.txt + + Izpack: + See licenses/LICENSE-Apache1.1.txt + + + +Wrapper: +Copyright (c) 1999, 2004 Tanuki Software +See licenses/LICENSE-Wrapper.txt + + + +Applications: + + Addressbook: + Copyright (c) 2004 Ragnarok + See licenses/LICENSE-Addressbook.txt + + BOB: + Copyright (C) sponge + DWTFYWTPL + + I2PSnark: + Copyright (C) 2003 Mark J. Wielaard + See licenses/LICENSE-GPLv2.txt + + I2PTunnel: + (c) 2003 - 2004 mihi + GPLv2 with exception. + See licenses/LICENSE-I2PTunnel.txt + See licenses/LICENSE-GPLv2.txt + + I2PTunnel SOCKS Proxy: + Copyright (c) 2004 by human + GPLv2 with exception. + See licenses/LICENSE-I2PTunnel.txt + See licenses/LICENSE-GPLv2.txt + + I2PTunnel UDP and Streamr: + By welterde. + See licenses/LICENSE-GPLv2.txt + + Jetty 5.1.12: + Copyright 2000-2004 Mort Bay Consulting Pty. Ltd. + See licenses/LICENSE-Apache1.1.txt + See licenses/LICENSE-Apache2.0.txt + See licenses/NOTICE-Ant.txt + See licenses/NOTICE-Commons-Logging.txt + + JRobin 1.4.0: + See licenses/LICENSE-LGPLv2.1.txt + + Ministreaming Lib: + By mihi. + See licenses/LICENSE-BSD.txt + + Proxyscript: + By Cervantes. + Public domain. + + Router console: + Public domain. + + SAM: + Public domain. + + Streaming Lib: + Public domain. + + SusiDNS: + Copyright (C) 2005 + See licenses/LICENSE-GPLv2.txt + + SusiMail: + Copyright (C) 2004-2005 + See licenses/LICENSE-GPLv2.txt + + Systray: + Public domain. + Bundles systray4j code: + See licenses/LICENSE-GPLv2.txt + + + +Other Applications and Libraries +-------------------------------- +The following applications and libraries are not used or bundled in +binary packages, therefore the licenses are not included in binary +distributions. See the source package for the additional license information. + + Atalk: + Public domain + + SAM C Library: + Copyright (c) 2004, Matthew P. Cashdollar + See apps/sam/c/doc/license.txt + + SAM C# Library: + Public domain. + See apps/sam/csharp/README + + SAM Perl Library: + See licenses/LICENSE-GPLv2.txt + + SAM Python Library: + Public domain. diff --git a/README.txt b/README.txt new file mode 100644 index 000000000..3aa2141bb --- /dev/null +++ b/README.txt @@ -0,0 +1,29 @@ +Prerequisites to build from source: + Java SDK (preferably Sun) 1.5.0 or higher (1.6 recommended) + Apache Ant 1.7.0 or higher + +To build: + ant pkg + Run 'ant' with no arguments to see other build options. + See http://www.i2p2.de/download.html for installation instructions. + +Documentation: + http://www.i2p2.de/ + API: run 'ant javadoc' then start at build/javadoc/index.html + +Latest release: + http://www.i2p2.de/download.html + +To get development branch from source control: + http://www.i2p2.de/newdevelopers.html + +FAQ: + http://www.i2p2.de/faq.html + +Need help? + IRC irc.freenode.net #i2p + http://forum.i2p2.de/ + +Licenses: + See LICENSE.txt + diff --git a/Slackware/README b/Slackware/README new file mode 100644 index 000000000..494ffa420 --- /dev/null +++ b/Slackware/README @@ -0,0 +1,30 @@ +ou will need atleast monotone > = 0.41 to get the most recent build source +and connect it to an already running i2p router. + +OR: + +You may download the actual "stable" source from +http://code.google.com/p/i2p/downloads/list + +You will need to follwing tools to build the i2p and i2p-base packages: + +bash >= 3.1.017 +requiredbuilder >= 0.16.3 ( http://www.stabellini.net/requiredbuilder.html ) +jre >= 6u11 +jdk >= 6u11 +apache-ant >= 1.7.1 +perl >= 5.10.0 +python >= 2.5.2 + +Reccomended: +monotone >= 0.41 ( http://pkgs.dr.ea.ms ) + +See also: + +i2p/readme.txt + +AND + +i2p-base/readme.txt + +for information and handy tips. diff --git a/Slackware/i2p-base/build.xml b/Slackware/i2p-base/build.xml new file mode 100644 index 000000000..f8def337e --- /dev/null +++ b/Slackware/i2p-base/build.xml @@ -0,0 +1,8 @@ + + + + + + + + diff --git a/Slackware/i2p-base/doinst.sh b/Slackware/i2p-base/doinst.sh new file mode 100644 index 000000000..087ee112c --- /dev/null +++ b/Slackware/i2p-base/doinst.sh @@ -0,0 +1,45 @@ +#!/bin/sh +touch /etc/rc.d/rc.local +touch /etc/rc.d/rc.local_shutdown + +I2PRCA=`grep -c /etc/rc.d/rc.local -e i2p` +I2PRCB=`grep -c /etc/rc.d/rc.local_shutdown -e i2p` + +echo + +if [ $I2PRCA -eq 0 ] ; then + echo "if [ -x /etc/rc.d/rc.i2p ] ; then" >> /etc/rc.d/rc.local + echo " sh /etc/rc.d/rc.i2p start" >> /etc/rc.d/rc.local + echo "fi" >> /etc/rc.d/rc.local + echo "/etc/rc.d/rc.local modified." +else + echo "/etc/rc.d/rc.local looks OK" +fi + +if [ $I2PRCB -eq 0 ] ; then + echo "if [ -x /etc/rc.d/rc.i2p ] ; then" >> /etc/rc.d/rc.local_shutdown + echo " sh /etc/rc.d/rc.i2p stop" >> /etc/rc.d/rc.local_shutdown + echo "fi" >> /etc/rc.d/rc.local_shutdown + echo "/etc/rc.d/rc.local_shutdown modified." +else + echo "/etc/rc.d/rc.local_shutdown looks OK" +fi + +if [ -f /etc/rc.d/rc.i2p ] ; then + if [ -x /etc/rc.d/rc.i2p ] ; then + chmod +x /etc/rc.d/rc.i2p.new + fi + echo + echo "It apears that you already have /etc/rc.d/rc.i2p" + echo "You may wish to replace it with /etc/rc.d/rc.i2p.new" + echo +else + mv /etc/rc.d/rc.i2p.new /etc/rc.d/rc.i2p + echo + echo "Installation finished. The i2p start/stop script has been" + echo "installed on /etc/rc.d directory. You should chmod +x" + echo '/etc/rc.d/rc.i2p to start it on boot.' + echo +fi + +exit diff --git a/Slackware/i2p-base/i2p-base.SlackBuild b/Slackware/i2p-base/i2p-base.SlackBuild new file mode 100644 index 000000000..d91d87263 --- /dev/null +++ b/Slackware/i2p-base/i2p-base.SlackBuild @@ -0,0 +1,42 @@ +#!/bin/sh +# Heavily based on the Slackware 12.1 SlackBuild +# Slackware build script for i2p + +# PLEASE READ THIS: +# Probably you will never have to update i2p packages with upgradepkg, +# just because i2p have an auto-update function. +# How to start i2p: +# After installpkg command, doinst.sh will execute a postinstallation script +# needed by i2p. After that you have to chmod +x /etc/rc.d/rc.i2p and start +# i2p service with /etc/rc.d/rc.i2p start. +# Now tell your browser to user this proxy: localhost on port 4444 and open +# this page: http://localhost:7657/index.jsp +# Here you can configure i2p, watch network status and navigate anonimously. +# It's suggested to subscribe to various dns host, like i2host.i2p +# For any additional information, visit i2host.i2p and forum.i2p + +CWD=$(pwd) +TMP=/tmp +PKG=/$TMP/package-base-i2p +rm -rf $PKG +mkdir -p $PKG +# put here installation dir, without first and last / +# es: usr/local +NAME=i2p-base +VERSION=0.0.1 +BUILD=1sim +ARCH=noarch +INSTALL_DIR=opt +cd $PKG +chown -R root:root . + +mkdir -p $PKG/etc/rc.d +mkdir -p $PKG/install +sed "s|directory|/$INSTALL_DIR/i2p/i2prouter|g" $CWD/rc.i2p_def > $PKG/etc/rc.d/rc.i2p.new +chmod 644 $PKG/etc/rc.d/rc.i2p.new +sed "s|directory|/$INSTALL_DIR/i2p/|g" $CWD/doinst.sh > $PKG/install/doinst.sh +cat $CWD/slack-desc > $PKG/install/slack-desc + +cd $PKG +requiredbuilder -v -y -s $CWD $PKG +makepkg -l y -c n $CWD/${NAME}-$VERSION-$ARCH-$BUILD.tgz diff --git a/Slackware/i2p-base/rc.i2p_def b/Slackware/i2p-base/rc.i2p_def new file mode 100644 index 000000000..268968042 --- /dev/null +++ b/Slackware/i2p-base/rc.i2p_def @@ -0,0 +1,27 @@ +#!/bin/sh +# Start/stop i2p service. + +i2p_start() { + /bin/su - -c "( export PATH=\"$PATH:/usr/lib/java/bin:/usr/lib/java/jre/bin\"; directory start )" +} + +i2p_stop() { + /bin/su - -c "( export PATH=\"$PATH:/usr/lib/java/bin:/usr/lib/java/jre/bin\"; directory stop )" +} + +case "$1" in +'start') + i2p_start + ;; +'stop') + i2p_stop + ;; +'restart') + i2p_stop + i2p_start + ;; +*) + echo "usage $0 start|stop|restart" + ;; +esac + diff --git a/Slackware/i2p-base/readme.txt b/Slackware/i2p-base/readme.txt new file mode 100644 index 000000000..6575387f7 --- /dev/null +++ b/Slackware/i2p-base/readme.txt @@ -0,0 +1,10 @@ +An rc file called rc.i2p has been placed into the /etc/rc.d directory. +If you want to change installation dir, change the variable INSTALL_DIR +on base-i2p.SlackBuild and rebuild the package. You also will need to do the +same for the i2p package. + +The install script will insert everything needed into /etc/rc.d/rc.local and +into /etc/rc.d/rc.local_shutdown automatically. + +If you want to start I2P at boot you have to chmod +x /etc/rc.d/rc.i2p + diff --git a/Slackware/i2p-base/slack-desc b/Slackware/i2p-base/slack-desc new file mode 100644 index 000000000..4e94753a9 --- /dev/null +++ b/Slackware/i2p-base/slack-desc @@ -0,0 +1,19 @@ +# HOW TO EDIT THIS FILE: +# The "handy ruler" below makes it easier to edit a package description. Line +# up the first '|' above the ':' following the base package name, and the '|' on +# the right side marks the last column you can put a character in. You must make +# exactly 11 lines for the formatting to be correct. It's also customary to +# leave one space after the ':'. + + |-----handy-ruler------------------------------------------------------| +base-i2p: base-i2p (I2P anonymizing network base config files) +base-i2p: +base-i2p: I2P is an anonymizing network, offering a simple layer that +base-i2p: identity-sensitive applications can use to securely communicate. All +base-i2p: data is wrapped with several layers of encryption, and the network is +base-i2p: both distributed and dynamic, with no trusted parties. +base-i2p: Many applications are available that interface with I2P, including +base-i2p: mail, peer-peer file sharing, IRC chat, and others. +base-i2p: +base-i2p: This package provides the startup files. +base-i2p: diff --git a/Slackware/i2p-base/slack-required b/Slackware/i2p-base/slack-required new file mode 100644 index 000000000..7a7220e75 --- /dev/null +++ b/Slackware/i2p-base/slack-required @@ -0,0 +1 @@ +bash >= 3.1.017 diff --git a/Slackware/i2p/build.xml b/Slackware/i2p/build.xml new file mode 100644 index 000000000..0683bdeb0 --- /dev/null +++ b/Slackware/i2p/build.xml @@ -0,0 +1,8 @@ + + + + + + + + diff --git a/Slackware/i2p/doinst.sh b/Slackware/i2p/doinst.sh new file mode 100644 index 000000000..91dd09721 --- /dev/null +++ b/Slackware/i2p/doinst.sh @@ -0,0 +1,67 @@ +#!/bin/sh + +INST_DIR=directory + +( cd install + +echo +for i in *.config ; { + if [ -f $INST_DIR/$i ] ; then + echo "Please check ${INST_DIR}${i}, as there is a new version." + cp $i $INST_DIR/$i.new + else + cp $i $INST_DIR/$i + fi +} + +) + +( cd $INST_DIR + if [ -f blocklist.txt ] ; then + echo "Please check ${INST_DIR}blocklist.txt, as there is a new version." + else + mv blocklist.txt.new blocklist.txt + fi +) + +( cd $INST_DIR/eepsite + if [ -f jetty.xml ] ; then + rm jetty.xml.new + else + mv jetty.xml.new jetty.xml + fi +) + +( cd $INST_DIR/eepsite/docroot + if [ -f index.html ] ; then + rm index.html.new + else + mv index.html.new index.html + fi + if [ -f favicon.ico ] ; then + rm favicon.ico.new + else + mv favicon.ico.new favicon.ico + fi +) + +echo +echo "FINISHING I2P INSTALLATION. PLEASE WAIT." + +cd $INST_DIR +sh postinstall.sh || ( + echo "ERROR: failed execution of postinstall.sh. Please" + echo "cd into i2p installation directory and run " + echo "postinstall.sh manually with ./postinstall.sh" + exit 1 +) + +sleep 10 + +sh i2prouter stop || exit 1 + +echo +echo "Installation finished." +echo + +exit diff --git a/Slackware/i2p/i2p.SlackBuild b/Slackware/i2p/i2p.SlackBuild new file mode 100755 index 000000000..421033356 --- /dev/null +++ b/Slackware/i2p/i2p.SlackBuild @@ -0,0 +1,88 @@ +#!/bin/sh +# Heavily based on the Slackware 12.1 SlackBuild +# Slackware build script for i2p + +# PLEASE READ THIS: +# Probably you will never have to update i2p packages with upgradepkg, +# just because i2p have an auto-update function. +# How to start i2p: +# After installpkg command, doinst.sh will execute a postinstallation script +# needed by i2p. After that you have to chmod +x /etc/rc.d/rc.i2p and start +# i2p service with /etc/rc.d/rc.i2p start. +# Now tell your browser to user this proxy: localhost on port 4444 and open +# this page: http://localhost:7657/index.jsp +# Here you can configure i2p, watch network status and navigate anonimously. +# It's suggested to subscribe to various dns host, like i2host.i2p +# For any additional information, visit i2host.i2p and forum.i2p + +BUILD=1sim + +# put here installation dir, without first and last / +# es: usr/local +INSTALL_DIR=opt +NAME=i2p +ARCH=noarch + + +# +# This mess is here due to the totally moronic way i2p does versioning. +# We correct it here. +# +ROUTER=$(echo -ne "_")$(cat ../../router/java/src/net/i2p/router/RouterVersion.java | grep -e "public final static long BUILD" | cut -f2 -d"=" | cut -f1 -d";" | sed -re "s/ //g") +if [ "$ROUTER" == "_" ] ; then + ROUTER="_0" +fi + +# +# That was the easy one, now for the tough one. +# + +CORE=$(cat ../../core/java/src/net/i2p/CoreVersion.java | grep -e "public final static String VERSION" | cut -f2 -d'"' | sed -re "s/ //g") +CORE1=$(echo -n $CORE.x.x | sed -re "s/(.*)\.(.*)\.(.*)\.(.*)/\1/") +CORE2=$(echo -n $CORE.x | sed -re "s/(.*)\.(.*)\.(.*)\.(.*)/\1/") + +if [ "$CORE.x.x" == "$CORE1" ] ; then + CORE=$(echo -ne $CORE".0.0") +fi +if [ "$CORE.x" == "$CORE2" ] ; then + CORE=$(echo -ne $CORE".0") +fi + +VERSION=$(echo $CORE$ROUTER) +# +# Whew! +# OK, let's build i2p +# + +CWD=$(pwd) +TMP=/tmp + +PKG=$TMP/package-i2p +rm -rf $PKG +mkdir -p $PKG + +cd $CWD/../../ + +ant distclean +ant dist + + +tar xjvf i2p.tar.bz2 -C $TMP + +cd $TMP/i2p +chown -R root:root . + +mkdir -p $PKG/$INSTALL_DIR/ +cp -a ../i2p $PKG/$INSTALL_DIR/ + +mkdir -p $PKG/install +mv $PKG/$INSTALL_DIR/i2p/*.config $PKG/install +mv $PKG/$INSTALL_DIR/i2p/blocklist.txt $PKG/$INSTALL_DIR/i2p/blocklist.txt.new +mv $PKG/$INSTALL_DIR/i2p/eepsite/jetty.xml $PKG/$INSTALL_DIR/i2p/eepsite/jetty.xml.new +mv $PKG/$INSTALL_DIR/i2p/eepsite/docroot/index.html $PKG/$INSTALL_DIR/i2p/eepsite/docroot/index.html.new +mv $PKG/$INSTALL_DIR/i2p/eepsite/docroot/favicon.ico $PKG/$INSTALL_DIR/i2p/eepsite/docroot/favicon.ico.new +sed "s|directory|/$INSTALL_DIR/i2p/|g" $CWD/doinst.sh > $PKG/install/doinst.sh +cat $CWD/slack-desc > $PKG/install/slack-desc +cd $PKG +requiredbuilder -v -y -s $CWD $PKG +makepkg -l y -c n $CWD/${NAME}-$VERSION-$ARCH-$BUILD.tgz diff --git a/Slackware/i2p/readme.txt b/Slackware/i2p/readme.txt new file mode 100644 index 000000000..2c6d4fa50 --- /dev/null +++ b/Slackware/i2p/readme.txt @@ -0,0 +1,47 @@ +Building: +The i2p package will be installed in /opt/i2p + +If you want to change installation dir, change the variable INSTALL_DIR +on i2p.SlackBuild and rebuild the package. You will also need to do the same +in the base-i2p package. + +Installation and Upgrade: +Probably you will never have to update i2p packages. However if you do, +be sure to installpkg first, then removepkg or custom config files can +be lost with upgradepkg. I2P has an auto-update function. However using +installpkg then removepkg lowers the demand on the I2P network as a +whole, and is by far faster. + +After installpkg command, doinst.sh will execute a postinstallation script +needed by I2P. Be sure to also install the base-i2p package. + +Optional: + +chmod +x /etc/rc.d/rc.i2p only if you want it to start on boot and stop on +shutdown. + +How to start I2P: + +Start I2P service with- +sh /etc/rc.d/rc.i2p start + +Now tell your browser to user this proxy: localhost on port 4444 and open +this page: http://localhost:7657/index.jsp +Here you can configure I2P, watch network status and navigate anonimously. +It's suggested to subscribe to various addressbook hosts so that you can +get to the many available eepsites and other service on I2P. These are not +set up by default for security reasons. + +Please see the faqs on http://www.i2p2.i2p/ or http://www.i2p2.de/ on how +to subscribe to the various addressbook services. + +To stop I2P: + /etc/rc.d/rc.i2p stop + + +For any additional information: + +Within I2P- http://www.i2p2.i2p/, http://forum.i2p/, http://zzz.i2p + +Internet (not reccomended!) - http://www.i2p2.de/, http://forum.i2p2.de/ + diff --git a/Slackware/i2p/slack-desc b/Slackware/i2p/slack-desc new file mode 100644 index 000000000..281e5e894 --- /dev/null +++ b/Slackware/i2p/slack-desc @@ -0,0 +1,19 @@ +# HOW TO EDIT THIS FILE: +# The "handy ruler" below makes it easier to edit a package description. Line +# up the first '|' above the ':' following the base package name, and the '|' on +# the right side marks the last column you can put a character in. You must make +# exactly 11 lines for the formatting to be correct. It's also customary to +# leave one space after the ':'. + + |-----handy-ruler----------------------------------------------------------| +i2p: i2p (an anonymizing network) +i2p: +i2p: I2P is an anonymizing network, offering a simple layer that +i2p: identity-sensitive applications can use to securely communicate. All +i2p: data is wrapped with several layers of encryption, and the network is +i2p: both distributed and dynamic, with no trusted parties. +i2p: Many applications are available that interface with I2P, including +i2p: mail, peer-peer file sharing, IRC chat, and others. +i2p: WARNING: To upgrade installpkg FIRST _THEN_ removepkg. +i2p: For more information, see: http://www.i2p2.de/ +i2p: diff --git a/Slackware/i2p/slack-required b/Slackware/i2p/slack-required new file mode 100644 index 000000000..3dcf36221 --- /dev/null +++ b/Slackware/i2p/slack-required @@ -0,0 +1,2 @@ +glibc >= 2.7-i486-17 | glibc-solibs >= 2.7-i486-17 +perl >= 5.10.0-i486-1 diff --git a/apps/BOB/bob.config b/apps/BOB/bob.config new file mode 100644 index 000000000..f9c28d382 --- /dev/null +++ b/apps/BOB/bob.config @@ -0,0 +1,14 @@ +#bob.config +#Tue Dec 30 00:00:00 UTC 2008 +# Please leave this file here for testing. +# Thank you, +# Sponge +i2cp.tcp.port=7654 +BOB.host=localhost +inbound.lengthVariance=0 +i2cp.messageReliability=BestEffort +BOB.port=45067 +outbound.length=1 +inbound.length=1 +outbound.lengthVariance=0 +i2cp.tcp.host=localhost diff --git a/apps/BOB/nbproject/project.properties b/apps/BOB/nbproject/project.properties index aa2df5ffd..76e318ff0 100644 --- a/apps/BOB/nbproject/project.properties +++ b/apps/BOB/nbproject/project.properties @@ -1,5 +1,11 @@ application.title=BOB application.vendor=root +auxiliary.org-netbeans-modules-editor-indent.CodeStyle.project.expand-tabs=false +auxiliary.org-netbeans-modules-editor-indent.CodeStyle.project.indent-shift-width=8 +auxiliary.org-netbeans-modules-editor-indent.CodeStyle.project.spaces-per-tab=8 +auxiliary.org-netbeans-modules-editor-indent.CodeStyle.project.tab-size=8 +auxiliary.org-netbeans-modules-editor-indent.CodeStyle.project.text-limit-width=80 +auxiliary.org-netbeans-modules-editor-indent.CodeStyle.usedProfile=project build.classes.dir=${build.dir}/classes build.classes.excludes=**/*.java,**/*.form # This directory is removed when the project is cleaned: @@ -76,6 +82,12 @@ javadoc.splitindex=true javadoc.use=true javadoc.version=false javadoc.windowtitle= +jnlp.codebase.type=local +jnlp.codebase.url=file:/root/NetBeansProjects/i2p.i2p/apps/BOB/dist/ +jnlp.descriptor=application +jnlp.enabled=false +jnlp.offline-allowed=false +jnlp.signed=false main.class=net.i2p.BOB.Main manifest.file=manifest.mf meta.inf.dir=${src.dir}/META-INF diff --git a/apps/BOB/src/net/i2p/BOB/BOB.java b/apps/BOB/src/net/i2p/BOB/BOB.java index c4a4f7539..59b46b8d7 100644 --- a/apps/BOB/src/net/i2p/BOB/BOB.java +++ b/apps/BOB/src/net/i2p/BOB/BOB.java @@ -34,7 +34,6 @@ import java.util.Properties; import net.i2p.client.I2PClient; import net.i2p.client.streaming.RetransmissionTimer; import net.i2p.util.Log; -import net.i2p.util.SimpleTimer; /** * * ################################################################################
@@ -114,6 +113,8 @@ public class BOB { public final static String PROP_BOB_HOST = "BOB.host"; private static int maxConnections = 0; private static NamedDB database; + private static Properties props = new Properties(); + /** * Log a warning @@ -157,11 +158,10 @@ public class BOB { // Set up all defaults to be passed forward to other threads. // Re-reading the config file in each thread is pretty damn stupid. // I2PClient client = I2PClientFactory.createClient(); - Properties props = new Properties(); String configLocation = System.getProperty(PROP_CONFIG_LOCATION, "bob.config"); // This is here just to ensure there is no interference with our threadgroups. - SimpleTimer Y = RetransmissionTimer.getInstance(); + RetransmissionTimer Y = RetransmissionTimer.getInstance(); i = Y.hashCode(); { try { @@ -216,6 +216,7 @@ public class BOB { } } + i = 0; try { info("BOB is now running."); ServerSocket listener = new ServerSocket(Integer.parseInt(props.getProperty(PROP_BOB_PORT)), 10, InetAddress.getByName(props.getProperty(PROP_BOB_HOST))); diff --git a/apps/BOB/src/net/i2p/BOB/DoCMDS.java b/apps/BOB/src/net/i2p/BOB/DoCMDS.java index bb29b1394..099d69fec 100644 --- a/apps/BOB/src/net/i2p/BOB/DoCMDS.java +++ b/apps/BOB/src/net/i2p/BOB/DoCMDS.java @@ -46,7 +46,7 @@ public class DoCMDS implements Runnable { // FIX ME // I need a better way to do versioning, but this will do for now. - public static final String BMAJ = "00", BMIN = "00", BREV = "02", BEXT = ""; + public static final String BMAJ = "00", BMIN = "00", BREV = "04", BEXT = ""; public static final String BOBversion = BMAJ + "." + BMIN + "." + BREV + BEXT; private Socket server; private Properties props; @@ -89,6 +89,7 @@ public class DoCMDS implements Runnable { private static final String C_setkeys = "setkeys"; private static final String C_setnick = "setnick"; private static final String C_show = "show"; + private static final String C_show_props = "showprops"; private static final String C_start = "start"; private static final String C_status = "status"; private static final String C_stop = "stop"; @@ -113,32 +114,34 @@ public class DoCMDS implements Runnable { {C_setkeys, C_setkeys + " BASE64_keypair * Sets the keypair for the current nickname."}, {C_setnick, C_setnick + " nickname * Create a new nickname."}, {C_show, C_show + " * Display the status of the current nickname."}, + {C_show_props, C_show_props + " * Display the properties of the current nickname."}, {C_start, C_start + " * Start the current nickname tunnel."}, {C_status, C_status + " nickname * Display status of a nicknamed tunnel."}, {C_stop, C_stop + " * Stops the current nicknamed tunnel."}, {C_verify, C_verify + " BASE64_key * Verifies BASE64 destination."}, {"", "COMMANDS: " + // this is ugly, but... - C_help + " " + - C_clear + " " + - C_getdest + " " + - C_getkeys + " " + - C_getnick + " " + - C_inhost + " " + - C_inport + " " + - C_list + " " + - C_newkeys + " " + - C_option + " " + - C_outhost + " " + - C_outport + " " + - C_quiet + " " + - C_quit + " " + - C_setkeys + " " + - C_setnick + " " + - C_show + " " + - C_start + " " + - C_status + " " + - C_stop + " " + - C_verify + C_help + " " + + C_clear + " " + + C_getdest + " " + + C_getkeys + " " + + C_getnick + " " + + C_inhost + " " + + C_inport + " " + + C_list + " " + + C_newkeys + " " + + C_option + " " + + C_outhost + " " + + C_outport + " " + + C_quiet + " " + + C_quit + " " + + C_setkeys + " " + + C_setnick + " " + + C_show + " " + + C_show_props + " " + + C_start + " " + + C_status + " " + + C_stop + " " + + C_verify }, {" ", " "} // end of list }; @@ -152,9 +155,10 @@ public class DoCMDS implements Runnable { */ DoCMDS(Socket server, Properties props, NamedDB database, Log _log) { this.server = server; - this.props = new Properties(props); + this.props = new Properties(); this.database = database; this._log = _log; + Lifted.copyProperties(props, this.props); } private void rlock() throws Exception { @@ -204,17 +208,17 @@ public class DoCMDS implements Runnable { private void trypnt(PrintStream out, NamedDB info, Object key) throws Exception { try { rlock(info); - } catch(Exception e) { + } catch (Exception e) { throw new Exception(e); } try { out.print(" " + key + ": "); - if(info.exists(key)) { + if (info.exists(key)) { out.print(info.get(key)); } else { out.print("not_set"); } - } catch(Exception e) { + } catch (Exception e) { runlock(info); throw new Exception(e); } @@ -232,13 +236,13 @@ public class DoCMDS implements Runnable { private void tfpnt(PrintStream out, NamedDB info, Object key) throws Exception { try { rlock(info); - } catch(Exception e) { + } catch (Exception e) { throw new Exception(e); } try { out.print(" " + key + ": "); out.print(info.exists(key)); - } catch(Exception e) { + } catch (Exception e) { runlock(info); throw new Exception(e); } @@ -264,7 +268,7 @@ public class DoCMDS implements Runnable { private void nickprint(PrintStream out, NamedDB info) throws Exception { try { rlock(info); - } catch(Exception e) { + } catch (Exception e) { throw new Exception(e); } try { @@ -280,7 +284,32 @@ public class DoCMDS implements Runnable { trypnt(out, info, P_OUTPORT); trypnt(out, info, P_OUTHOST); out.println(); - } catch(Exception e) { + } catch (Exception e) { + runlock(info); + throw new Exception(e); + } + + runlock(info); + } + + /** + * Dump properties information from the database + * + * @param out + * @param info + * @throws Exception + */ + private void propprint(PrintStream out, NamedDB info) throws Exception { + try { + rlock(info); + } catch (Exception e) { + throw new Exception(e); + } + try { + + trypnt(out, info, P_PROPERTIES); + out.println(); + } catch (Exception e) { runlock(info); throw new Exception(e); } @@ -297,16 +326,16 @@ public class DoCMDS implements Runnable { private void ttlpnt(PrintStream out, Object Arg) throws Exception { try { database.getReadLock(); - } catch(Exception e) { + } catch (Exception e) { throw new Exception(e); } try { - if(database.exists(Arg)) { + if (database.exists(Arg)) { out.print("DATA"); - nickprint(out, (NamedDB)database.get(Arg)); + nickprint(out, (NamedDB) database.get(Arg)); } - } catch(Exception e) { + } catch (Exception e) { database.releaseReadLock(); throw new Exception(e); } @@ -325,7 +354,7 @@ public class DoCMDS implements Runnable { boolean retval; try { rlock(Arg); - } catch(Exception e) { + } catch (Exception e) { throw new Exception(e); } @@ -333,7 +362,7 @@ public class DoCMDS implements Runnable { retval = (Arg.get(P_STARTING).equals(Boolean.TRUE) || Arg.get(P_STOPPING).equals(Boolean.TRUE) || Arg.get(P_RUNNING).equals(Boolean.TRUE)); - } catch(Exception e) { + } catch (Exception e) { runlock(); throw new Exception(e); } @@ -352,7 +381,7 @@ public class DoCMDS implements Runnable { try { Destination x = new Destination(data); return true; - } catch(Exception e) { + } catch (Exception e) { return false; } } @@ -369,50 +398,52 @@ public class DoCMDS implements Runnable { // Get input from the client BufferedReader in = new BufferedReader(new InputStreamReader(server.getInputStream())); PrintStream out = new PrintStream(server.getOutputStream()); -quit: { -die: { + quit: + { + die: + { prikey = new ByteArrayOutputStream(); out.println("BOB " + BOBversion); out.println("OK"); - while((line = in.readLine()) != null) { + while ((line = in.readLine()) != null) { StringTokenizer token = new StringTokenizer(line, " "); // use a space as a delimiter String Command = ""; String Arg = ""; NamedDB info; - if(token.countTokens() != 0) { + if (token.countTokens() != 0) { Command = token.nextToken(); Command = Command.toLowerCase(); - if(token.countTokens() != 0) { + if (token.countTokens() != 0) { Arg = token.nextToken(); } else { Arg = ""; } // The rest of the tokens are considered junk, // and discarded without any warnings. - if(Command.equals(C_help)) { - for(int i = 0; !C_ALL[i][0].equals(" "); i++) { - if(C_ALL[i][0].equalsIgnoreCase(Arg)) { + if (Command.equals(C_help)) { + for (int i = 0; !C_ALL[i][0].equals(" "); i++) { + if (C_ALL[i][0].equalsIgnoreCase(Arg)) { out.println("OK " + C_ALL[i][1]); } } - } else if(Command.equals(C_getdest)) { - if(ns) { - if(dk) { + } else if (Command.equals(C_getdest)) { + if (ns) { + if (dk) { try { rlock(); - } catch(Exception ex) { + } catch (Exception ex) { break die; } try { out.println("OK " + nickinfo.get(P_DEST)); - } catch(Exception e) { + } catch (Exception e) { try { runlock(); - } catch(Exception ex) { + } catch (Exception ex) { break die; } break die; @@ -420,7 +451,7 @@ die: { try { runlock(); - } catch(Exception ex) { + } catch (Exception ex) { break die; } @@ -432,31 +463,31 @@ die: { nns(out); } - } else if(Command.equals(C_list)) { + } else if (Command.equals(C_list)) { // Produce a formatted list of all nicknames database.getReadLock(); - for(int i = 0; i < + for (int i = 0; i < database.getcount(); i++) { try { - info = (NamedDB)database.getnext(i); + info = (NamedDB) database.getnext(i); out.print("DATA"); - } catch(Exception e) { + } catch (Exception e) { database.releaseReadLock(); break die; } try { info.getReadLock(); - } catch(Exception ex) { + } catch (Exception ex) { break die; } try { nickprint(out, info); - } catch(Exception e) { + } catch (Exception e) { try { info.releaseReadLock(); database.releaseReadLock(); - } catch(Exception ex) { + } catch (Exception ex) { break die; } break die; @@ -464,24 +495,24 @@ die: { try { info.releaseReadLock(); - } catch(Exception ex) { + } catch (Exception ex) { break die; } } try { database.releaseReadLock(); - } catch(Exception ex) { + } catch (Exception ex) { break die; } out.println("OK Listing done"); - } else if(Command.equals(C_quit)) { + } else if (Command.equals(C_quit)) { // End the command session break quit; - } else if(Command.equals(C_newkeys)) { - if(ns) { + } else if (Command.equals(C_newkeys)) { + if (ns) { try { - if(tunnelactive(nickinfo)) { + if (tunnelactive(nickinfo)) { out.println("ERROR tunnel is active"); } else { try { @@ -490,17 +521,17 @@ die: { d = I2PClientFactory.createClient().createDestination(prikey); try { wlock(); - } catch(Exception e) { + } catch (Exception e) { break die; } try { nickinfo.add(P_KEYS, prikey.toByteArray()); nickinfo.add(P_DEST, d.toBase64()); - } catch(Exception e) { + } catch (Exception e) { try { wunlock(); - } catch(Exception ex) { + } catch (Exception ex) { break die; } break die; @@ -509,68 +540,68 @@ die: { dk = true; try { wunlock(); - } catch(Exception ex) { + } catch (Exception ex) { break die; } try { rlock(); - } catch(Exception ex) { + } catch (Exception ex) { break die; } try { out.println("OK " + nickinfo.get(P_DEST)); - } catch(Exception e) { + } catch (Exception e) { runlock(); break die; } try { runlock(); - } catch(Exception ex) { + } catch (Exception ex) { break die; } - } catch(I2PException ipe) { + } catch (I2PException ipe) { BOB.error("Error generating keys" + ipe); out.println("ERROR generating keys"); } } - } catch(Exception e) { + } catch (Exception e) { break die; } } else { try { nns(out); - } catch(Exception ex) { + } catch (Exception ex) { break die; } } - } else if(Command.equals(C_getkeys)) { + } else if (Command.equals(C_getkeys)) { // Return public key - if(dk) { + if (dk) { prikey = new ByteArrayOutputStream(); try { rlock(); - } catch(Exception e) { + } catch (Exception e) { break die; } try { - prikey.write(((byte[])nickinfo.get(P_KEYS))); - } catch(Exception ex) { + prikey.write(((byte[]) nickinfo.get(P_KEYS))); + } catch (Exception ex) { try { runlock(); - } catch(Exception ee) { + } catch (Exception ee) { break die; } break die; } try { runlock(); - } catch(Exception e) { + } catch (Exception e) { break die; } @@ -579,23 +610,23 @@ die: { out.println("ERROR no public key has been set"); } - } else if(Command.equals(C_quiet)) { - if(ns) { + } else if (Command.equals(C_quiet)) { + if (ns) { try { - if(tunnelactive(nickinfo)) { + if (tunnelactive(nickinfo)) { out.println("ERROR tunnel is active"); } else { try { wlock(); - } catch(Exception ex) { + } catch (Exception ex) { break die; } try { nickinfo.add(P_QUIET, new Boolean(Boolean.parseBoolean(Arg) == true)); - } catch(Exception ex) { + } catch (Exception ex) { try { wunlock(); - } catch(Exception ee) { + } catch (Exception ee) { break die; } break die; @@ -603,59 +634,59 @@ die: { try { wunlock(); - } catch(Exception ex) { + } catch (Exception ex) { break die; } out.println("OK Quiet set"); } - } catch(Exception ex) { + } catch (Exception ex) { break die; } } else { try { nns(out); - } catch(Exception ex) { + } catch (Exception ex) { break die; } } - }else if(Command.equals(C_verify)) { - if(is64ok(Arg)) { + } else if (Command.equals(C_verify)) { + if (is64ok(Arg)) { out.println("OK"); } else { out.println("ERROR not in BASE64 format"); } - } else if(Command.equals(C_setkeys)) { + } else if (Command.equals(C_setkeys)) { // Set the NamedDB to a privatekey in BASE64 format - if(ns) { + if (ns) { try { - if(tunnelactive(nickinfo)) { + if (tunnelactive(nickinfo)) { out.println("ERROR tunnel is active"); } else { try { prikey = new ByteArrayOutputStream(); prikey.write(net.i2p.data.Base64.decode(Arg)); d.fromBase64(Arg); - } catch(Exception ex) { + } catch (Exception ex) { Arg = ""; } - if((Arg.length() == 884) && is64ok(Arg)) { + if ((Arg.length() == 884) && is64ok(Arg)) { try { wlock(); - } catch(Exception ex) { + } catch (Exception ex) { break die; } try { nickinfo.add(P_KEYS, prikey.toByteArray()); nickinfo.add(P_DEST, d.toBase64()); - } catch(Exception ex) { + } catch (Exception ex) { try { wunlock(); - } catch(Exception ee) { + } catch (Exception ee) { break die; } break die; @@ -663,22 +694,22 @@ die: { dk = true; try { wunlock(); - } catch(Exception ex) { + } catch (Exception ex) { break die; } try { rlock(); - } catch(Exception ex) { + } catch (Exception ex) { break die; } try { out.println("OK " + nickinfo.get(P_DEST)); - } catch(Exception e) { + } catch (Exception e) { try { runlock(); - } catch(Exception ex) { + } catch (Exception ex) { break die; } break die; @@ -686,7 +717,7 @@ die: { try { runlock(); - } catch(Exception ex) { + } catch (Exception ex) { break die; } } else { @@ -694,34 +725,34 @@ die: { } } - } catch(Exception ex) { + } catch (Exception ex) { break die; } } else { try { nns(out); - } catch(Exception ex) { + } catch (Exception ex) { break die; } } - } else if(Command.equals(C_setnick)) { + } else if (Command.equals(C_setnick)) { ns = dk = ip = op = false; try { database.getReadLock(); - } catch(Exception ex) { + } catch (Exception ex) { break die; } try { - nickinfo = (NamedDB)database.get(Arg); - if(!tunnelactive(nickinfo)) { + nickinfo = (NamedDB) database.get(Arg); + if (!tunnelactive(nickinfo)) { nickinfo = null; ns = true; } - } catch(Exception b) { + } catch (Exception b) { nickinfo = null; ns = true; @@ -729,15 +760,15 @@ die: { try { database.releaseReadLock(); - } catch(Exception ex) { + } catch (Exception ex) { break die; } // Clears and Sets the initial NamedDB structure to work with - if(ns) { + if (ns) { nickinfo = new NamedDB(); try { wlock(); - } catch(Exception e) { + } catch (Exception e) { break die; } @@ -750,22 +781,23 @@ die: { nickinfo.add(P_QUIET, Boolean.FALSE); nickinfo.add(P_INHOST, "localhost"); nickinfo.add(P_OUTHOST, "localhost"); - Properties Q = new Properties(props); + Properties Q = new Properties(); + Lifted.copyProperties(this.props, Q); Q.setProperty("inbound.nickname", Arg); Q.setProperty("outbound.nickname", Arg); nickinfo.add(P_PROPERTIES, Q); - } catch(Exception e) { + } catch (Exception e) { try { wunlock(); break die; - } catch(Exception ee) { + } catch (Exception ee) { break die; } } try { wunlock(); - } catch(Exception e) { + } catch (Exception e) { break die; } @@ -774,51 +806,51 @@ die: { out.println("ERROR tunnel is active"); } - } else if(Command.equals(C_option)) { - if(ns) { + } else if (Command.equals(C_option)) { + if (ns) { try { - if(tunnelactive(nickinfo)) { + if (tunnelactive(nickinfo)) { out.println("ERROR tunnel is active"); } else { StringTokenizer otoken = new StringTokenizer(Arg, "="); // use an equal sign as a delimiter - if(otoken.countTokens() != 2) { + if (otoken.countTokens() != 2) { out.println("ERROR to many or no options."); } else { String pname = otoken.nextToken(); String pval = otoken.nextToken(); try { rlock(); - } catch(Exception ex) { + } catch (Exception ex) { break die; } - Properties Q = (Properties)nickinfo.get(P_PROPERTIES); + Properties Q = (Properties) nickinfo.get(P_PROPERTIES); try { runlock(); - } catch(Exception ex) { + } catch (Exception ex) { break die; } Q.setProperty(pname, pval); try { wlock(); - } catch(Exception ex) { + } catch (Exception ex) { break die; } try { nickinfo.add(P_PROPERTIES, Q); - } catch(Exception ex) { + } catch (Exception ex) { try { wunlock(); - } catch(Exception ee) { + } catch (Exception ee) { break die; } break die; } try { wunlock(); - } catch(Exception ex) { + } catch (Exception ex) { break die; } @@ -826,7 +858,7 @@ die: { } } - } catch(Exception ex) { + } catch (Exception ex) { break die; } @@ -834,23 +866,23 @@ die: { nns(out); } - } else if(Command.equals(C_getnick)) { + } else if (Command.equals(C_getnick)) { // Get the NamedDB to work with... try { database.getReadLock(); - } catch(Exception ex) { + } catch (Exception ex) { break die; } try { - nickinfo = (NamedDB)database.get(Arg); + nickinfo = (NamedDB) database.get(Arg); ns = true; - } catch(RuntimeException b) { + } catch (RuntimeException b) { try { nns(out); - } catch(Exception ex) { + } catch (Exception ex) { try { database.releaseReadLock(); - } catch(Exception ee) { + } catch (Exception ee) { break die; } break die; @@ -858,54 +890,54 @@ die: { } database.releaseReadLock(); - if(ns) { + if (ns) { try { rlock(); - } catch(Exception e) { + } catch (Exception e) { break die; } try { dk = nickinfo.exists(P_KEYS); ip = nickinfo.exists(P_INPORT); op = nickinfo.exists(P_OUTPORT); - } catch(Exception ex) { + } catch (Exception ex) { try { runlock(); - } catch(Exception ee) { + } catch (Exception ee) { break die; } break die; } try { runlock(); - } catch(Exception e) { + } catch (Exception e) { break die; } // Finally say OK. out.println("OK Nickname set to " + Arg); } - } else if(Command.equals(C_inport)) { + } else if (Command.equals(C_inport)) { // Set the NamedDB inbound TO the router port // app --> BOB - if(ns) { + if (ns) { try { - if(tunnelactive(nickinfo)) { + if (tunnelactive(nickinfo)) { out.println("ERROR tunnel is active"); } else { int prt; try { wlock(); - } catch(Exception ex) { + } catch (Exception ex) { break die; } try { nickinfo.kill(P_INPORT); - } catch(Exception ex) { + } catch (Exception ex) { try { wunlock(); - } catch(Exception ee) { + } catch (Exception ee) { break die; } @@ -913,13 +945,13 @@ die: { } try { prt = Integer.parseInt(Arg); - if(prt > 1 && prt < 65536) { + if (prt > 1 && prt < 65536) { try { nickinfo.add(P_INPORT, new Integer(prt)); - } catch(Exception ex) { + } catch (Exception ex) { try { wunlock(); - } catch(Exception ee) { + } catch (Exception ee) { break die; } @@ -927,45 +959,45 @@ die: { } } - } catch(NumberFormatException nfe) { + } catch (NumberFormatException nfe) { out.println("ERROR not a number"); } try { wunlock(); - } catch(Exception ex) { + } catch (Exception ex) { break die; } try { rlock(); - } catch(Exception ex) { + } catch (Exception ex) { break die; } try { ip = nickinfo.exists(P_INPORT); - } catch(Exception ex) { + } catch (Exception ex) { try { runlock(); - } catch(Exception ee) { + } catch (Exception ee) { break die; } break die; } try { runlock(); - } catch(Exception ex) { + } catch (Exception ex) { break die; } - if(ip) { + if (ip) { out.println("OK inbound port set"); } else { out.println("ERROR port out of range"); } } - } catch(Exception ex) { + } catch (Exception ex) { break die; } @@ -973,196 +1005,196 @@ die: { nns(out); } - } else if(Command.equals(C_outport)) { + } else if (Command.equals(C_outport)) { // Set the NamedDB outbound FROM the router port // BOB --> app - if(ns) { + if (ns) { try { - if(tunnelactive(nickinfo)) { + if (tunnelactive(nickinfo)) { out.println("ERROR tunnel is active"); } else { int prt; try { wlock(); - } catch(Exception ex) { + } catch (Exception ex) { break die; } try { nickinfo.kill(P_OUTPORT); - } catch(Exception ex) { + } catch (Exception ex) { try { wunlock(); - } catch(Exception ee) { + } catch (Exception ee) { break die; } break die; } try { prt = Integer.parseInt(Arg); - if(prt > 1 && prt < 65536) { + if (prt > 1 && prt < 65536) { try { nickinfo.add(P_OUTPORT, new Integer(prt)); - } catch(Exception ex) { + } catch (Exception ex) { try { wunlock(); - } catch(Exception ee) { + } catch (Exception ee) { break die; } break die; } } - } catch(NumberFormatException nfe) { + } catch (NumberFormatException nfe) { out.println("ERROR not a number"); } try { wunlock(); - } catch(Exception ex) { + } catch (Exception ex) { break die; } try { rlock(); - } catch(Exception ex) { + } catch (Exception ex) { break die; } try { ip = nickinfo.exists(P_OUTPORT); - } catch(Exception ex) { + } catch (Exception ex) { try { runlock(); - } catch(Exception ee) { + } catch (Exception ee) { break die; } break die; } try { runlock(); - } catch(Exception ex) { + } catch (Exception ex) { break die; } - if(ip) { + if (ip) { out.println("OK outbound port set"); } else { out.println("ERROR port out of range"); } } - } catch(Exception ex) { + } catch (Exception ex) { break die; } } else { try { nns(out); - } catch(Exception ex) { + } catch (Exception ex) { break die; } } - } else if(Command.equals(C_inhost)) { - if(ns) { + } else if (Command.equals(C_inhost)) { + if (ns) { try { - if(tunnelactive(nickinfo)) { + if (tunnelactive(nickinfo)) { out.println("ERROR tunnel is active"); } else { try { wlock(); - } catch(Exception ex) { + } catch (Exception ex) { break die; } try { nickinfo.add(P_INHOST, Arg); - } catch(Exception ex) { + } catch (Exception ex) { try { wunlock(); - } catch(Exception ee) { + } catch (Exception ee) { break die; } break die; } try { wunlock(); - } catch(Exception ex) { + } catch (Exception ex) { break die; } out.println("OK inhost set"); } - } catch(Exception ex) { + } catch (Exception ex) { break die; } } else { try { nns(out); - } catch(Exception ex) { + } catch (Exception ex) { break die; } } - } else if(Command.equals(C_outhost)) { - if(ns) { + } else if (Command.equals(C_outhost)) { + if (ns) { try { - if(tunnelactive(nickinfo)) { + if (tunnelactive(nickinfo)) { out.println("ERROR tunnel is active"); } else { try { wlock(); - } catch(Exception ex) { + } catch (Exception ex) { break die; } try { nickinfo.add(P_OUTHOST, Arg); - } catch(Exception ex) { + } catch (Exception ex) { try { wunlock(); - } catch(Exception ee) { + } catch (Exception ee) { break die; } break die; } try { wunlock(); - } catch(Exception ex) { + } catch (Exception ex) { break die; } out.println("OK outhost set"); } - } catch(Exception ex) { + } catch (Exception ex) { break die; } } else { try { nns(out); - } catch(Exception ex) { + } catch (Exception ex) { break die; } } - } else if(Command.equals(C_show)) { + } else if (Command.equals(C_show)) { // Get the current NamedDB properties - if(ns) { + if (ns) { out.print("OK"); try { rlock(); - } catch(Exception e) { + } catch (Exception e) { break die; } try { nickprint(out, nickinfo); - } catch(Exception e) { + } catch (Exception e) { try { runlock(); - } catch(Exception ee) { + } catch (Exception ee) { break die; } @@ -1172,23 +1204,60 @@ die: { try { runlock(); - } catch(Exception e) { + } catch (Exception e) { break die; } } else { try { nns(out); - } catch(Exception e) { + } catch (Exception e) { break die; } } - } else if(Command.equals(C_start)) { - // Start the tunnel, if we have all the information - if(ns && dk && (ip || op)) { + } else if (Command.equals(C_show_props)) { + // Get the current options properties + if (ns) { + out.print("OK"); try { - if(tunnelactive(nickinfo)) { + rlock(); + } catch (Exception e) { + break die; + } + + try { + propprint(out, nickinfo); + } catch (Exception e) { + try { + runlock(); + } catch (Exception ee) { + break die; + } + + out.println(); // this will cause an IOE if IOE + break die; + } + + try { + runlock(); + } catch (Exception e) { + break die; + } + + } else { + try { + nns(out); + } catch (Exception e) { + break die; + } + } + + } else if (Command.equals(C_start)) { + // Start the tunnel, if we have all the information + if (ns && dk && (ip || op)) { + try { + if (tunnelactive(nickinfo)) { out.println("ERROR tunnel is active"); } else { MUXlisten tunnel; @@ -1197,14 +1266,14 @@ die: { Thread t = new Thread(tunnel); t.start(); out.println("OK tunnel starting"); - } catch(I2PException e) { + } catch (I2PException e) { out.println("ERROR starting tunnel: " + e); - } catch(IOException e) { + } catch (IOException e) { out.println("ERROR starting tunnel: " + e); } } - } catch(Exception ex) { + } catch (Exception ex) { break die; } @@ -1212,26 +1281,26 @@ die: { out.println("ERROR tunnel settings incomplete"); } - } else if(Command.equals(C_stop)) { + } else if (Command.equals(C_stop)) { // Stop the tunnel, if it is running - if(ns) { + if (ns) { try { rlock(); - } catch(Exception e) { + } catch (Exception e) { break die; } try { - if(nickinfo.get(P_RUNNING).equals(Boolean.TRUE) && nickinfo.get(P_STOPPING).equals(Boolean.FALSE) && nickinfo.get(P_STARTING).equals(Boolean.FALSE)) { + if (nickinfo.get(P_RUNNING).equals(Boolean.TRUE) && nickinfo.get(P_STOPPING).equals(Boolean.FALSE) && nickinfo.get(P_STARTING).equals(Boolean.FALSE)) { try { runlock(); - } catch(Exception e) { + } catch (Exception e) { break die; } try { wlock(); - } catch(Exception e) { + } catch (Exception e) { break die; } @@ -1239,7 +1308,7 @@ die: { try { wunlock(); - } catch(Exception e) { + } catch (Exception e) { break die; } @@ -1247,16 +1316,16 @@ die: { } else { try { runlock(); - } catch(Exception e) { + } catch (Exception e) { break die; } out.println("ERROR tunnel is inactive"); } - } catch(Exception e) { + } catch (Exception e) { try { runlock(); - } catch(Exception ee) { + } catch (Exception ee) { break die; } break die; @@ -1265,62 +1334,62 @@ die: { } else { try { nns(out); - } catch(Exception e) { + } catch (Exception e) { break die; } } - } else if(Command.equals(C_clear)) { + } else if (Command.equals(C_clear)) { // Clear use of the NamedDB if stopped - if(ns) { + if (ns) { try { - if(tunnelactive(nickinfo)) { + if (tunnelactive(nickinfo)) { out.println("ERROR tunnel is active"); } else { try { database.getWriteLock(); - } catch(Exception e) { + } catch (Exception e) { break die; } try { database.kill(nickinfo.get(P_NICKNAME)); - } catch(Exception e) { + } catch (Exception e) { try { database.releaseWriteLock(); - } catch(Exception ee) { + } catch (Exception ee) { break die; } break die; } try { database.releaseWriteLock(); - } catch(Exception e) { + } catch (Exception e) { break die; } dk = ns = ip = op = false; out.println("OK cleared"); } - } catch(Exception ex) { + } catch (Exception ex) { break die; } } else { try { nns(out); - } catch(Exception e) { + } catch (Exception e) { break die; } } - } else if(Command.equals(C_status)) { + } else if (Command.equals(C_status)) { try { - if(database.exists(Arg)) { + if (database.exists(Arg)) { // Show status of a NamedDB out.print("OK "); try { ttlpnt(out, Arg); - } catch(Exception e) { + } catch (Exception e) { out.println(); // this will cause an IOE if IOE break die; } @@ -1328,11 +1397,11 @@ die: { } else { try { nns(out); - } catch(Exception e) { + } catch (Exception e) { break die; } } - } catch(Exception e) { + } catch (Exception e) { break die; } @@ -1350,7 +1419,7 @@ die: { out.println("OK Bye!"); server.close(); - } catch(IOException ioe) { + } catch (IOException ioe) { BOB.warn("IOException on socket listen: " + ioe); ioe.printStackTrace(); } diff --git a/apps/BOB/src/net/i2p/BOB/I2Plistener.java b/apps/BOB/src/net/i2p/BOB/I2Plistener.java index 1561b7a22..c59683270 100644 --- a/apps/BOB/src/net/i2p/BOB/I2Plistener.java +++ b/apps/BOB/src/net/i2p/BOB/I2Plistener.java @@ -70,7 +70,7 @@ public class I2Plistener implements Runnable { boolean g = false; I2PSocket sessSocket = null; - serverSocket.setSoTimeout(100); + serverSocket.setSoTimeout(50); database.getReadLock(); info.getReadLock(); if(info.exists("INPORT")) { diff --git a/apps/BOB/src/net/i2p/BOB/Lifted.java b/apps/BOB/src/net/i2p/BOB/Lifted.java new file mode 100644 index 000000000..fbd23cba5 --- /dev/null +++ b/apps/BOB/src/net/i2p/BOB/Lifted.java @@ -0,0 +1,56 @@ +/** + * DO WHAT THE FUCK YOU WANT TO PUBLIC LICENSE + * Version 2, December 2004 + * + * Copyright (C) sponge + * Planet Earth + * Everyone is permitted to copy and distribute verbatim or modified + * copies of this license document, and changing it is allowed as long + * as the name is changed. + * + * DO WHAT THE FUCK YOU WANT TO PUBLIC LICENSE + * TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION + * + * 0. You just DO WHAT THE FUCK YOU WANT TO. + * + * See... + * + * http://sam.zoy.org/wtfpl/ + * and + * http://en.wikipedia.org/wiki/WTFPL + * + * ...for any additional details and liscense questions. + */ +package net.i2p.BOB; + +import java.util.Enumeration; +import java.util.Properties; + +/** + * Sets of "friendly" utilities to make life easier. + * Any "Lifted" code will apear here, and credits given. + * It's better to "Lift" a small chunk of "free" code than add in piles of + * code we don't need, and don't want. + * + * @author sponge + */ +public class Lifted { + + /** + * Copy a set of properties from one Property to another. + * Lifted from Apache Derby code svn repository. + * Liscenced as follows: + * http://svn.apache.org/repos/asf/db/derby/code/trunk/LICENSE + * + * @param src_prop Source set of properties to copy from. + * @param dest_prop Dest Properties to copy into. + * + **/ + public static void copyProperties(Properties src_prop, Properties dest_prop) { + for (Enumeration propertyNames = src_prop.propertyNames(); + propertyNames.hasMoreElements();) { + Object key = propertyNames.nextElement(); + dest_prop.put(key, src_prop.get(key)); + } + } +} diff --git a/apps/BOB/src/net/i2p/BOB/MUXlisten.java b/apps/BOB/src/net/i2p/BOB/MUXlisten.java index 7694803d6..89ab53fe6 100644 --- a/apps/BOB/src/net/i2p/BOB/MUXlisten.java +++ b/apps/BOB/src/net/i2p/BOB/MUXlisten.java @@ -74,7 +74,10 @@ public class MUXlisten implements Runnable { this.info.getReadLock(); N = this.info.get("NICKNAME").toString(); prikey = new ByteArrayInputStream((byte[])info.get("KEYS")); - Properties Q = (Properties)info.get("PROPERTIES"); + // Make a new copy so that anything else won't muck with our database. + Properties R = (Properties)info.get("PROPERTIES"); + Properties Q = new Properties(); + Lifted.copyProperties(R, Q); this.database.releaseReadLock(); this.info.releaseReadLock(); @@ -170,7 +173,7 @@ die: { boolean spin = true; while(spin) { try { - Thread.sleep(1000); //sleep for 1000 ms (One second) + Thread.sleep(200); //sleep for 200 ms (Two thenths second) } catch(InterruptedException e) { // nop } @@ -210,14 +213,21 @@ die: { } } // die + try { + Thread.sleep(500); //sleep for 500 ms (One half second) + } catch(InterruptedException ex) { + // nop + } // wait for child threads and thread groups to die // System.out.println("MUXlisten: waiting for children"); - while(tg.activeCount() + tg.activeGroupCount() != 0) { + if(tg.activeCount() + tg.activeGroupCount() != 0) { tg.interrupt(); // unwedge any blocking threads. - try { - Thread.sleep(100); //sleep for 100 ms (One tenth second) - } catch(InterruptedException ex) { - // nop + while(tg.activeCount() + tg.activeGroupCount() != 0) { + try { + Thread.sleep(100); //sleep for 100 ms (One tenth second) + } catch(InterruptedException ex) { + // nop + } } } tg.destroy(); @@ -234,7 +244,7 @@ die: { System.out.println("BOB: MUXlisten: Please email the following dump to sponge@mail.i2p"); WrapperManager.requestThreadDump(); System.out.println("BOB: MUXlisten: Something fucked up REALLY bad!"); - System.out.println("BOB: MUXlisten: Please email the avove dump to sponge@mail.i2p"); + System.out.println("BOB: MUXlisten: Please email the above dump to sponge@mail.i2p"); } // zero out everything, just incase. try { @@ -257,17 +267,33 @@ die: { } // This is here to catch when something fucks up REALLY bad. if(tg != null) { - while(tg.activeCount() + tg.activeGroupCount() != 0) { + if(tg.activeCount() + tg.activeGroupCount() != 0) { tg.interrupt(); // unwedge any blocking threads. + while(tg.activeCount() + tg.activeGroupCount() != 0) { try { Thread.sleep(100); //sleep for 100 ms (One tenth second) } catch(InterruptedException ex) { // nop } + } } tg.destroy(); // Zap reference to the ThreadGroup so the JVM can GC it. tg = null; } + + // Lastly try to close things again. + if(this.come_in) { + try { + listener.close(); + } catch(IOException e) { + } + } + try { + socketManager.destroySocketManager(); + } catch(Exception e) { + // nop + } + } } diff --git a/apps/BOB/src/net/i2p/BOB/Main.java b/apps/BOB/src/net/i2p/BOB/Main.java index 26823ff39..2d81fb30e 100644 --- a/apps/BOB/src/net/i2p/BOB/Main.java +++ b/apps/BOB/src/net/i2p/BOB/Main.java @@ -24,7 +24,6 @@ package net.i2p.BOB; import net.i2p.client.streaming.RetransmissionTimer; -import net.i2p.util.SimpleTimer; /** * Start from command line @@ -39,8 +38,8 @@ public class Main { */ public static void main(String[] args) { // THINK THINK THINK THINK THINK THINK - SimpleTimer Y = RetransmissionTimer.getInstance(); + RetransmissionTimer Y = RetransmissionTimer.getInstance(); BOB.main(args); - Y.removeSimpleTimer(); + Y.stop(); } } diff --git a/apps/BOB/src/net/i2p/BOB/TCPio.java b/apps/BOB/src/net/i2p/BOB/TCPio.java index 25290bcdc..41bb7cbe4 100644 --- a/apps/BOB/src/net/i2p/BOB/TCPio.java +++ b/apps/BOB/src/net/i2p/BOB/TCPio.java @@ -56,9 +56,28 @@ public class TCPio implements Runnable { * Copy from source to destination... * and yes, we are totally OK to block here on writes, * The OS has buffers, and I intend to use them. + * We send an interrupt signal to the threadgroup to + * unwedge any pending writes. * */ public void run() { + /* + * NOTE: + * The write method of OutputStream calls the write method of + * one argument on each of the bytes to be written out. + * Subclasses are encouraged to override this method and provide + * a more efficient implementation. + * + * So, is this really a performance problem? + * Should we expand to several bytes? + * I don't believe there would be any gain, since read method + * has the same reccomendations. If anyone has a better way to + * do this, I'm interested in performance improvements. + * + * --Sponge + * + */ + int b; byte a[] = new byte[1]; boolean spin = true; diff --git a/apps/BOB/src/net/i2p/BOB/TCPlistener.java b/apps/BOB/src/net/i2p/BOB/TCPlistener.java index 99ae047d3..30380a55d 100644 --- a/apps/BOB/src/net/i2p/BOB/TCPlistener.java +++ b/apps/BOB/src/net/i2p/BOB/TCPlistener.java @@ -77,7 +77,7 @@ public class TCPlistener implements Runnable { } try { Socket server = new Socket(); - listener.setSoTimeout(1000); + listener.setSoTimeout(50); // Half of the expected time from MUXlisten info.releaseReadLock(); database.releaseReadLock(); while(spin) { diff --git a/apps/addressbook/java/src/addressbook/AddressBook.java b/apps/addressbook/java/src/addressbook/AddressBook.java index ca1e1916e..a46c256c8 100644 --- a/apps/addressbook/java/src/addressbook/AddressBook.java +++ b/apps/addressbook/java/src/addressbook/AddressBook.java @@ -179,6 +179,11 @@ public class AddressBook { // IDN - basic check, not complete validation (host.indexOf("--") < 0 || host.startsWith("xn--") || host.indexOf(".xn--") > 0) && host.replaceAll("[a-z0-9.-]", "").length() == 0 && + // Base32 spoofing (52chars.i2p) + (! (host.length() == 56 && host.substring(0,52).replaceAll("[a-z2-7]", "").length() == 0)) && + // ... or maybe we do Base32 this way ... + (! host.equals("b32.i2p")) && + (! host.endsWith(".b32.i2p")) && // some reserved names that may be used for local configuration someday (! host.equals("proxy.i2p")) && (! host.equals("router.i2p")) && diff --git a/apps/bogobot/Bogobot.java b/apps/bogobot/Bogobot.java deleted file mode 100644 index 0c0c40c92..000000000 --- a/apps/bogobot/Bogobot.java +++ /dev/null @@ -1,347 +0,0 @@ -/* - * bogobot - A simple join/part stats logger bot for I2P IRC. - * - * Bogobot.java - * 2004 The I2P Project - * http://www.i2p.net - * This code is public domain. - */ - -import java.io.File; -import java.io.FileInputStream; -import java.io.IOException; -import java.util.Properties; -import java.util.Timer; -import java.util.TimerTask; - -import org.apache.log4j.DailyRollingFileAppender; -import org.apache.log4j.Level; -import org.apache.log4j.Logger; -import org.apache.log4j.PatternLayout; -import org.jibble.pircbot.IrcException; -import org.jibble.pircbot.NickAlreadyInUseException; -import org.jibble.pircbot.PircBot; -import org.jibble.pircbot.User; - -/** - * TODO 0.5 Add multi-server capability. - * - * @author hypercubus, oOo - * @version 0.4 - */ -public class Bogobot extends PircBot { - - private static final String INTERVAL_DAILY = "daily"; - private static final String INTERVAL_MONTHLY = "monthly"; - private static final String INTERVAL_WEEKLY = "weekly"; - - private boolean _isIntentionalDisconnect = false; - private long _lastUserlistCommandTimestamp = 0; - private Logger _logger = Logger.getLogger(Bogobot.class); - - private int _currentAutoRoundTripTag = 0; - private long _lastAutoRoundTripSentTime = 0; - private Timer _tickTimer; - - private String _configFile; - - private String _botPrimaryNick; - private String _botSecondaryNick; - private String _botNickservPassword; - private String _botUsername; - private String _ownerPrimaryNick; - private String _ownerSecondaryNick; - private String _botShutdownPassword; - private String _ircChannel; - private String _ircServer; - private int _ircServerPort; - private boolean _isLoggerEnabled; - private String _loggedHostnamePattern; - private boolean _isUserlistCommandEnabled; - private String _logFilePrefix; - private String _logFileRotationInterval; - private long _commandAntiFloodInterval; - private String _userlistCommandTrigger; - private boolean _isRoundTripDelayEnabled; - private int _roundTripDelayPeriod; - - class BogobotTickTask extends TimerTask { - private Bogobot _caller; - - public BogobotTickTask(Bogobot caller) { - _caller = caller; - } - - public void run() { - _caller.onTick(); - } - } - - private void loadConfigFile(String configFileName) { - - _configFile = configFileName; - - Properties config = new Properties(); - FileInputStream fis = null; - - try { - fis = new FileInputStream(configFileName); - config.load(fis); - } catch (IOException ioe) { - System.err.println("Error loading configuration file"); - System.exit(2); - - } finally { - if (fis != null) try { - fis.close(); - } catch (IOException ioe) { // nop - } - } - - _botPrimaryNick = config.getProperty("botPrimaryNick", "somebot"); - _botSecondaryNick = config.getProperty("botSecondaryNick", "somebot_"); - _botNickservPassword = config.getProperty("botNickservPassword", ""); - _botUsername = config.getProperty("botUsername", "somebot"); - - _ownerPrimaryNick = config.getProperty("ownerPrimaryNick", "somenick"); - _ownerSecondaryNick = config.getProperty("ownerSecondaryNick", "somenick_"); - - _botShutdownPassword = config.getProperty("botShutdownPassword", "take off eh"); - - _ircChannel = config.getProperty("ircChannel", "#i2p-chat"); - _ircServer = config.getProperty("ircServer", "irc.postman.i2p"); - _ircServerPort = Integer.parseInt(config.getProperty("ircServerPort", "6668")); - - _isLoggerEnabled = Boolean.valueOf(config.getProperty("isLoggerEnabled", "true")).booleanValue(); - _loggedHostnamePattern = config.getProperty("loggedHostnamePattern", ""); - _logFilePrefix = config.getProperty("logFilePrefix", "irc.postman.i2p.i2p-chat"); - _logFileRotationInterval = config.getProperty("logFileRotationInterval", INTERVAL_DAILY); - - _isRoundTripDelayEnabled = Boolean.valueOf(config.getProperty("isRoundTripDelayEnabled", "false")).booleanValue(); - _roundTripDelayPeriod = Integer.parseInt(config.getProperty("roundTripDelayPeriod", "300")); - - _isUserlistCommandEnabled = Boolean.valueOf(config.getProperty("isUserlistCommandEnabled", "true")).booleanValue(); - _userlistCommandTrigger = config.getProperty("userlistCommandTrigger", "!who"); - _commandAntiFloodInterval = Long.parseLong(config.getProperty("commandAntiFloodInterval", "60")); - } - - public Bogobot(String configFileName) { - - loadConfigFile(configFileName); - - this.setName(_botPrimaryNick); - this.setLogin(_botUsername); - _tickTimer = new Timer(); - _tickTimer.scheduleAtFixedRate(new BogobotTickTask(this), 1000, 10 * 1000); - } - - public static void main(String[] args) { - - Bogobot bogobot; - - if (args.length > 1) { - System.err.println("Too many arguments, the only allowed parameter is configuration file name"); - System.exit(3); - } - if (args.length == 1) { - bogobot = new Bogobot(args[0]); - } else { - bogobot = new Bogobot("bogobot.config"); - } - - bogobot.setVerbose(true); - - if (bogobot._isLoggerEnabled) - bogobot.initLogger(); - - bogobot.connectToServer(); - } - - protected void onTick() { - // Tick about once every ten seconds - - if (this.isConnected() && _isRoundTripDelayEnabled) { - if( ( (System.currentTimeMillis() - _lastAutoRoundTripSentTime) >= (_roundTripDelayPeriod * 1000) ) && (this.getOutgoingQueueSize() == 0) ) { - // Connected, sending queue is empty and last RoundTrip is more then 5 minutes old -> Send a new one - _currentAutoRoundTripTag ++; - _lastAutoRoundTripSentTime = System.currentTimeMillis(); - sendNotice(this.getNick(),"ROUNDTRIP " + _currentAutoRoundTripTag); - } - } - } - - protected void onDisconnect() { - - if (_isIntentionalDisconnect) - System.exit(0); - - if (_isLoggerEnabled) - _logger.info(System.currentTimeMillis() + " quits *** " + this.getName() + " *** (Lost connection)"); - - try { - Thread.sleep(60000); - } catch (InterruptedException e) { - // No worries. - } - connectToServer(); - } - - protected void onJoin(String channel, String sender, String login, String hostname) { - - if (_isLoggerEnabled) { - if (sender.equals(this.getName())) { - - _logger.info(System.currentTimeMillis() + " joins *** " + _botPrimaryNick + " ***"); - - } else { - - String prependedHostname = "@" + hostname; - if (prependedHostname.endsWith(_loggedHostnamePattern)) { - _logger.info(System.currentTimeMillis() + " joins " + sender); - } - - } - } - } - - protected void onMessage(String channel, String sender, String login, String hostname, String message) { - message = message.replaceFirst("<.+?> ", ""); - if (_isUserlistCommandEnabled && message.equals(_userlistCommandTrigger)) { - - if (System.currentTimeMillis() - _lastUserlistCommandTimestamp < _commandAntiFloodInterval * 1000) - return; - - Object[] users = getUsers(_ircChannel); - String output = "Userlist for " + _ircChannel + ": "; - - for (int i = 0; i < users.length; i++) - output += "[" + ((User) users[i]).getNick() + "] "; - - sendMessage(_ircChannel, output); - _lastUserlistCommandTimestamp = System.currentTimeMillis(); - } - } - - protected void onPart(String channel, String sender, String login, String hostname) { - - if (_isLoggerEnabled) { - if (sender.equals(this.getName())) { - _logger.info(System.currentTimeMillis() + " parts *** " + _botPrimaryNick + " ***"); - } else { - String prependedHostname = "@" + hostname; - if (prependedHostname.endsWith(_loggedHostnamePattern)) { - _logger.info(System.currentTimeMillis() + " parts " + sender); - } - } - } - - } - - protected void onPrivateMessage(String sender, String login, String hostname, String message) { - /* - * Nobody else except the bot's owner can shut it down, unless of - * course the owner's nick isn't registered and someone's spoofing it. - */ - if ((sender.equals(_ownerPrimaryNick) || sender.equals(_ownerSecondaryNick)) && message.equals(_botShutdownPassword)) { - - if (_isLoggerEnabled) - _logger.info(System.currentTimeMillis() + " quits *** " + this.getName() + " ***"); - - _isIntentionalDisconnect = true; - disconnect(); - } - } - - protected void onQuit(String sourceNick, String sourceLogin, String sourceHostname, String reason) { - String prependedHostname = "@" + sourceHostname; - - if (sourceNick.equals(_botPrimaryNick)) - changeNick(_botPrimaryNick); - - if (_isLoggerEnabled) { - if (prependedHostname.endsWith(_loggedHostnamePattern)) { - _logger.info(System.currentTimeMillis() + " quits " + sourceNick + " " + reason); - } - } - - } - - private void connectToServer() { - - int loginAttempts = 0; - - while (true) { - try { - connect(_ircServer, _ircServerPort); - break; - } catch (NickAlreadyInUseException e) { - if (loginAttempts == 1) { - System.out.println("Sorry, the primary and secondary bot nicks are already taken. Exiting."); - System.exit(1); - } - loginAttempts++; - try { - Thread.sleep(5000); - } catch (InterruptedException e1) { - // Hmph. - } - - if (getName().equals(_botPrimaryNick)) - setName(_botSecondaryNick); - else - setName(_botPrimaryNick); - - continue; - } catch (IOException e) { - System.out.println("Error during login: "); - e.printStackTrace(); - System.exit(1); - } catch (IrcException e) { - System.out.println("Error during login: "); - e.printStackTrace(); - System.exit(1); - } - } - joinChannel(_ircChannel); - } - - protected void onNotice(String sourceNick, String sourceLogin, String sourceHostname, String target, String notice) { - - if (sourceNick.equals("NickServ") && (notice.indexOf("/msg NickServ IDENTIFY") >= 0) && (_botNickservPassword != "")) { - sendRawLineViaQueue("NICKSERV IDENTIFY " + _botNickservPassword); - } - - if (sourceNick.equals(getNick()) && notice.equals( "ROUNDTRIP " + _currentAutoRoundTripTag)) { - int delay = (int)((System.currentTimeMillis() - _lastAutoRoundTripSentTime) / 100); -// sendMessage(_ircChannel, "Round-trip delay = " + (delay / 10.0f) + " seconds"); - if (_isLoggerEnabled) - _logger.info(System.currentTimeMillis() + " roundtrip " + delay); - } - } - - private void initLogger() { - - String logFilePath = "logs" + File.separator + _logFilePrefix; - DailyRollingFileAppender rollingFileAppender = null; - - if (!(new File("logs").exists())) - (new File("logs")).mkdirs(); - - try { - - if (_logFileRotationInterval.equals("monthly")) - rollingFileAppender = new DailyRollingFileAppender(new PatternLayout("%m%n"), logFilePath, "'.'yyyy-MM'.log'"); - else if (_logFileRotationInterval.equals("weekly")) - rollingFileAppender = new DailyRollingFileAppender(new PatternLayout("%m%n"), logFilePath, "'.'yyyy-ww'.log'"); - else - rollingFileAppender = new DailyRollingFileAppender(new PatternLayout("%m%n"), logFilePath, "'.'yyyy-MM-dd'.log'"); - - rollingFileAppender.setThreshold(Level.INFO); - _logger.addAppender(rollingFileAppender); - } catch (IOException ex) { - System.out.println("Error: Couldn't create or open an existing log file. Exiting."); - System.exit(1); - } - } - -} diff --git a/apps/bogobot/Bogoparser.java b/apps/bogobot/Bogoparser.java deleted file mode 100644 index 9b1944396..000000000 --- a/apps/bogobot/Bogoparser.java +++ /dev/null @@ -1,353 +0,0 @@ -/* - * bogoparser - A simple logfile analyzer for bogobot. - * - * Bogoparser.java - * 2004 The I2P Project - * http://www.i2p.net - * This code is public domain. - */ - -import java.io.BufferedReader; -import java.io.FileInputStream; -import java.io.FileNotFoundException; -import java.io.IOException; -import java.io.InputStreamReader; -import java.util.ArrayList; -import java.util.regex.Matcher; -import java.util.regex.Pattern; - -/** - * @author hypercubus - * @version 0.4 - */ -public class Bogoparser { - - private static void displayUsageAndExit() { - System.out.println("\r\nUsage:\r\n\r\n java Bogoparser [--by-duration] \r\n"); - System.exit(1); - } - - public static void main(String[] args) { - - Bogoparser bogoparser; - - if (args.length < 1 || args.length > 2) - displayUsageAndExit(); - - if (args.length == 2) { - if (!args[0].equals("--by-duration")) - displayUsageAndExit(); - bogoparser = new Bogoparser(args[1], true); - } - - if (args.length == 1) - bogoparser = new Bogoparser(args[0], false); - } - - private Bogoparser(String logfile, boolean sortByDuration) { - - ArrayList sortedSessions; - - if (sortByDuration) { - sortedSessions = sortSessionsByDuration(calculateSessionDurations(sortSessionsByTime(readLogfile(logfile)))); - formatAndOutputByDuration(sortedSessions); - } else { - sortedSessions = calculateSessionDurations(sortSessionsByQuitReason(sortSessionsByNick(sortSessionsByTime(readLogfile(logfile))))); - formatAndOutput(sortedSessions); - } - } - - private ArrayList calculateSessionDurations(ArrayList sortedSessionsByQuitReasonOrDuration) { - - ArrayList calculatedSessionDurations = new ArrayList(); - - for (int i = 0; i+1 < sortedSessionsByQuitReasonOrDuration.size(); i += 2) { - - String joinsEntry = (String) sortedSessionsByQuitReasonOrDuration.get(i); - String[] joinsEntryFields = joinsEntry.split(" "); - - String quitsEntry = (String) sortedSessionsByQuitReasonOrDuration.get(i+1); - Pattern p = Pattern.compile("^([^ ]+) [^ ]+ ([^ ]+) (.*)$"); - Matcher m = p.matcher(quitsEntry); - - if (m.matches()) { - - String currentJoinTime = joinsEntryFields[0]; - String currentNick = m.group(2); - String currentQuitReason = m.group(3); - String currentQuitTime = m.group(1); - long joinsTimeInMilliseconds; - long quitsTimeInMilliseconds; - long sessionLengthInMilliseconds; - - joinsTimeInMilliseconds = Long.parseLong(currentJoinTime); - quitsTimeInMilliseconds = Long.parseLong(currentQuitTime); - sessionLengthInMilliseconds = quitsTimeInMilliseconds - joinsTimeInMilliseconds; - - String hours = "" + sessionLengthInMilliseconds/1000/60/60; - String minutes = "" + (sessionLengthInMilliseconds/1000/60)%60; - - if (hours.length() < 2) - hours = "0" + hours; - - if (hours.length() < 3) - hours = "0" + hours; - - if (minutes.length() < 2) - minutes = "0" + minutes; - - int columnPadding = 19-currentNick.length(); - String columnPaddingString = " "; - - for (int j = 0; j < columnPadding; j++) - columnPaddingString = columnPaddingString + " "; - - calculatedSessionDurations.add(sessionLengthInMilliseconds + " " + currentNick + columnPaddingString + " online " + hours + " hours " + minutes + " minutes " + currentQuitReason); - } else { - System.out.println("\r\nError: Unexpected entry in logfile: " + quitsEntry); - System.exit(1); - } - } - return calculatedSessionDurations; - } - - private void formatAndOutput(ArrayList sortedSessions) { - - String quitReason = null; - - for (int i = 0; i < sortedSessions.size(); i++) { - - String entry = (String) sortedSessions.get(i); - Pattern p = Pattern.compile("^[\\d]+ ([^ ]+ +online [\\d]+ hours [\\d]+ minutes) (.*)$"); - Matcher m = p.matcher(entry); - - if (m.matches()) { - - if (quitReason == null) { - quitReason = m.group(2); - System.out.println("\r\nQUIT: " + ((m.group(2).equals("")) ? "No Reason Given" : quitReason) + "\r\n"); - } - - String tempQuitReason = m.group(2); - String tempSession = m.group(1); - - if (tempQuitReason.equals(quitReason)) { - System.out.println(" " + tempSession); - } else { - quitReason = null; - i -= 1; - continue; - } - } else { - System.out.println("\r\nError: Unexpected entry in logfile: " + entry); - System.exit(1); - } - } - System.out.println("\r\n"); - } - - private void formatAndOutputByDuration(ArrayList sortedSessions) { - System.out.println("\r\n"); - - for (int i = 0; i < sortedSessions.size(); i++) { - String[] columns = ((String) sortedSessions.get(i)).split(" ", 2); - System.out.println(columns[1]); - } - - System.out.println("\r\n"); - } - - private ArrayList readLogfile(String logfile) { - - ArrayList log = new ArrayList(); - - try { - BufferedReader in = new BufferedReader(new InputStreamReader(new FileInputStream(logfile))); - - for (String line; (line = in.readLine()) != null; ) - log.add(line); - - in.close(); - - } catch (FileNotFoundException e) { - System.out.println("\r\nError: Can't find logfile '" + logfile + "'.\r\n"); - System.exit(1); - - } catch (IOException e) { - System.out.println("\r\nError: Can't read logfile '" + logfile + "'.\r\n"); - System.exit(1); - } - return log; - } - - /* - * Performs an odd-even transposition sort. - */ - private ArrayList sortSessionsByDuration(ArrayList calculatedSessionDurations) { - - for (int i = 0; i < calculatedSessionDurations.size()/2; i++) { - for (int j = 0; j+1 < calculatedSessionDurations.size(); j += 2) { - - String[] currentDurationString = ((String) calculatedSessionDurations.get(j)).split(" ", 2); - long currentDuration = Long.parseLong(currentDurationString[0]); - String[] nextDurationString = ((String) calculatedSessionDurations.get(j+1)).split(" ", 2); - long nextDuration = Long.parseLong(nextDurationString[0]); - - if (currentDuration > nextDuration) { - calculatedSessionDurations.add(j, calculatedSessionDurations.get(j+1)); - calculatedSessionDurations.remove(j+2); - } - } - - for (int j = 1; j+1 < calculatedSessionDurations.size(); j += 2) { - - String[] currentDurationString = ((String) calculatedSessionDurations.get(j)).split(" ", 2); - long currentDuration = Long.parseLong(currentDurationString[0]); - String[] nextDurationString = ((String) calculatedSessionDurations.get(j+1)).split(" ", 2); - long nextDuration = Long.parseLong(nextDurationString[0]); - - if (currentDuration > nextDuration) { - calculatedSessionDurations.add(j, calculatedSessionDurations.get(j+1)); - calculatedSessionDurations.remove(j+2); - } - } - } - return calculatedSessionDurations; - } - - private ArrayList sortSessionsByNick(ArrayList sortedSessionsByTime) { - - ArrayList sortedSessionsByNick = new ArrayList(); - - while (sortedSessionsByTime.size() != 0) { - - String entry = (String) sortedSessionsByTime.get(0); - String[] entryFields = entry.split(" "); - String currentNick = entryFields[2]; - - sortedSessionsByNick.add(entry); - sortedSessionsByNick.add(sortedSessionsByTime.get(1)); - sortedSessionsByTime.remove(0); - sortedSessionsByTime.remove(0); - for (int i = 0; i+1 < sortedSessionsByTime.size(); i += 2) { - - String nextEntry = (String) sortedSessionsByTime.get(i); - String[] nextEntryFields = nextEntry.split(" "); - - if (nextEntryFields[2].equals(currentNick)) { - sortedSessionsByNick.add(nextEntry); - sortedSessionsByNick.add(sortedSessionsByTime.get(i+1)); - sortedSessionsByTime.remove(i); - sortedSessionsByTime.remove(i); - i -= 2; - } - } - } - return sortedSessionsByNick; - } - - private ArrayList sortSessionsByQuitReason(ArrayList sortedSessionsByNick) { - - ArrayList sortedSessionsByQuitReason = new ArrayList(); - - while (sortedSessionsByNick.size() != 0) { - - String entry = (String) sortedSessionsByNick.get(1); - Pattern p = Pattern.compile("^[^ ]+ [^ ]+ [^ ]+ (.*)$"); - Matcher m = p.matcher(entry); - - if (m.matches()) { - - String currentQuitReason = m.group(1); - - sortedSessionsByQuitReason.add(sortedSessionsByNick.get(0)); - sortedSessionsByQuitReason.add(entry); - sortedSessionsByNick.remove(0); - sortedSessionsByNick.remove(0); - for (int i = 0; i+1 < sortedSessionsByNick.size(); i += 2) { - - String nextEntry = (String) sortedSessionsByNick.get(i+1); - Pattern p2 = Pattern.compile("^[^ ]+ [^ ]+ [^ ]+ (.*)$"); - Matcher m2 = p2.matcher(nextEntry); - - if (m2.matches()) { - - String nextQuitReason = m2.group(1); - - if (nextQuitReason.equals(currentQuitReason)) { - sortedSessionsByQuitReason.add(sortedSessionsByNick.get(i)); - sortedSessionsByQuitReason.add(nextEntry); - sortedSessionsByNick.remove(i); - sortedSessionsByNick.remove(i); - i -= 2; - } - } else { - System.out.println("\r\nError: Unexpected entry in logfile: " + nextEntry); - System.exit(1); - } - } - } else { - System.out.println("\r\nError: Unexpected entry in logfile: " + entry); - System.exit(1); - } - } - return sortedSessionsByQuitReason; - } - - /** - * Sessions terminated with "parts" messages instead of "quits" are filtered - * out. - */ - private ArrayList sortSessionsByTime(ArrayList log) { - - ArrayList sortedSessionsByTime = new ArrayList(); - - mainLoop: - while (log.size() > 0) { - - String entry = (String) log.get(0); - String[] entryFields = entry.split(" "); - - if (entryFields[1].equals("quits") && !entryFields[1].equals("joins")) { - /* - * Discard entry. The specified log either doesn't contain - * the corresponding "joins" time for this quit entry or the - * entry is a "parts" or unknown message, and in both cases - * the entry's data is useless. - */ - log.remove(0); - continue; - } - - for (int i = 1; i < log.size(); i++) { // Find corresponding "quits" entry. - - String tempEntry = (String) log.get(i); - String[] tempEntryFields = tempEntry.split(" "); - - if (tempEntryFields[2].equals(entryFields[2])) { // Check if the nick fields for the two entries match. - if (!tempEntryFields[1].equals("quits")) { - if (tempEntryFields[1].equals("joins")) { // Don't discard a subsequent "joins" entry. - log.remove(0); - continue mainLoop; - } - log.remove(i); - continue; - } - sortedSessionsByTime.add(entry); - sortedSessionsByTime.add(tempEntry); - log.remove(i); - break; - } - } - /* - * Discard "joins" entry. The specified log doesn't contain the - * corresponding "quits" time for this entry so the entry's - * data is useless. - */ - - log.remove(0); - } - - return sortedSessionsByTime; - } -} diff --git a/apps/bogobot/LICENSE.log4j.txt b/apps/bogobot/LICENSE.log4j.txt deleted file mode 100644 index 030564fc1..000000000 --- a/apps/bogobot/LICENSE.log4j.txt +++ /dev/null @@ -1,48 +0,0 @@ -/* - * ============================================================================ - * The Apache Software License, Version 1.1 - * ============================================================================ - * - * Copyright (C) 1999 The Apache Software Foundation. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without modifica- - * tion, are permitted provided that the following conditions are met: - * - * 1. Redistributions of source code must retain the above copyright notice, - * this list of conditions and the following disclaimer. - * - * 2. Redistributions in binary form must reproduce the above copyright notice, - * this list of conditions and the following disclaimer in the documentation - * and/or other materials provided with the distribution. - * - * 3. The end-user documentation included with the redistribution, if any, must - * include the following acknowledgment: "This product includes software - * developed by the Apache Software Foundation (http://www.apache.org/)." - * Alternately, this acknowledgment may appear in the software itself, if - * and wherever such third-party acknowledgments normally appear. - * - * 4. The names "log4j" and "Apache Software Foundation" must not be used to - * endorse or promote products derived from this software without prior - * written permission. For written permission, please contact - * apache@apache.org. - * - * 5. Products derived from this software may not be called "Apache", nor may - * "Apache" appear in their name, without prior written permission of the - * Apache Software Foundation. - * - * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESSED OR IMPLIED WARRANTIES, - * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND - * FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE - * APACHE SOFTWARE FOUNDATION OR ITS CONTRIBUTORS BE LIABLE FOR ANY DIRECT, - * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLU- - * DING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS - * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON - * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF - * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - * This software consists of voluntary contributions made by many individuals - * on behalf of the Apache Software Foundation. For more information on the - * Apache Software Foundation, please see . - * - */ diff --git a/apps/bogobot/bogobot.bat b/apps/bogobot/bogobot.bat deleted file mode 100644 index 4c17f7f48..000000000 --- a/apps/bogobot/bogobot.bat +++ /dev/null @@ -1 +0,0 @@ -java -cp .;log4j-1.2.8.jar;pircbot.jar Bogobot diff --git a/apps/bogobot/bogobot.config b/apps/bogobot/bogobot.config deleted file mode 100644 index 647ca88ba..000000000 --- a/apps/bogobot/bogobot.config +++ /dev/null @@ -1,101 +0,0 @@ -##### -# Bogobot user configuration -##### - -### -# The bot's nick and backup nick. You will probably want to register these with -# the IRC server's NickServ.(a NickServ interface is forthcoming). -# -botPrimaryNick=somebot -botSecondaryNick=somebot_ - -### -# The bot's password required by Nickserv service's identify command. -# You have to register the nickname yourself first, the bot will not. -# -botNickservPassword= - -### -# The bot's username. Appears in the whois replies -# -botUsername=somebot - -##### -# The bot owner's nick and backup nick. One of these must match the owner's -# currently-used nick or else remote shutdown will not be possible. You will -# probably want to register these with the IRC server's NickServ. -# -ownerPrimaryNick=somenick -ownerSecondaryNick=somenick_ - -### -# The bot will disconnect and shut down when sent this password via private -# message (aka query) from either of the owner nicks specified above. DO NOT USE -# THIS DEFAULT VALUE! -# -botShutdownPassword=take off eh - -### -# The server, channel, and port the bot will connect to. -# -ircChannel=#i2p-chat -ircServer=irc.duck.i2p -ircServerPort=6668 - -### -# Set to "true" to enable logging, else "false" (but don't use quotation marks). -# -isLoggerEnabled=true - -### -# Restrict logging of joins and parts on the user hostname. -# Leave empty to log all of them -# Prepend with a @ for a perfect match -# Otherwise, specify the required end of the user hostname -# -loggedHostnamePattern=@free.duck.i2p - -### -# The prefix to be used for the filenames of logs. -# -logFilePrefix=irc.duck.i2p.i2p-chat - -### -# How often the logs should be rotated. Either "daily", "weekly", or "monthly" -# (but don't use quotation marks). -# -logFileRotationInterval=daily - -### -# Set to "true" to enable the regular round-trip delay computation, -# else "false" (but don't use quotation marks). -# -isRoundTripDelayEnabled=false - -### -# How often should the round-trip delay be recorded. -# (in seconds) -# -roundTripDelayPeriod=300 - -### -# Set to "true" to enable the userlist command, else "false" (but don't use -# quotation marks). -# -isUserlistCommandEnabled=true - -### -# The userlist trigger command to listen for. It is a good idea to prefix -# triggers with some non-alphanumeric character in order to avoid accidental -# trigger use during normal channel conversation. In most cases you will -# probably want to choose a unique trigger here that no other bots in the -# channel will respond to. -# -userlistCommandTrigger=!who - -### -# The number of seconds to rest after replying to a userlist command issued by -# a user in the channel. The bot will ignore subsequent userlist commands during -# this period. This helps prevent flooding. -# -commandAntiFloodInterval=60 diff --git a/apps/bogobot/bogobot.sh b/apps/bogobot/bogobot.sh deleted file mode 100644 index 7da4e2b3d..000000000 --- a/apps/bogobot/bogobot.sh +++ /dev/null @@ -1,2 +0,0 @@ -#!/bin/sh -java -cp .:log4j-1.2.8.jar:pircbot.jar Bogobot diff --git a/apps/bogobot/build-eclipse.xml b/apps/bogobot/build-eclipse.xml deleted file mode 100644 index ee101d324..000000000 --- a/apps/bogobot/build-eclipse.xml +++ /dev/null @@ -1,58 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - diff --git a/apps/bogobot/build.xml b/apps/bogobot/build.xml deleted file mode 100644 index 13c0253bf..000000000 --- a/apps/bogobot/build.xml +++ /dev/null @@ -1,64 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - diff --git a/apps/bogobot/log4j-1.2.8.jar b/apps/bogobot/log4j-1.2.8.jar deleted file mode 100644 index 493a3ccc1..000000000 Binary files a/apps/bogobot/log4j-1.2.8.jar and /dev/null differ diff --git a/apps/bogobot/pircbot.jar b/apps/bogobot/pircbot.jar deleted file mode 100644 index d936d70ca..000000000 Binary files a/apps/bogobot/pircbot.jar and /dev/null differ diff --git a/apps/i2psnark/java/src/org/klomp/snark/ConnectionAcceptor.java b/apps/i2psnark/java/src/org/klomp/snark/ConnectionAcceptor.java index 6d4aad1a3..2e45749b8 100644 --- a/apps/i2psnark/java/src/org/klomp/snark/ConnectionAcceptor.java +++ b/apps/i2psnark/java/src/org/klomp/snark/ConnectionAcceptor.java @@ -152,6 +152,7 @@ public class ConnectionAcceptor implements Runnable _util.debug("Error while accepting: " + ioe, Snark.ERROR); stop = true; } + // catch oom? } try diff --git a/apps/i2psnark/java/src/org/klomp/snark/I2PSnarkUtil.java b/apps/i2psnark/java/src/org/klomp/snark/I2PSnarkUtil.java index 7b674b0a0..b7e623060 100644 --- a/apps/i2psnark/java/src/org/klomp/snark/I2PSnarkUtil.java +++ b/apps/i2psnark/java/src/org/klomp/snark/I2PSnarkUtil.java @@ -23,7 +23,9 @@ import net.i2p.data.DataFormatException; import net.i2p.data.Destination; import net.i2p.data.Hash; import net.i2p.util.EepGet; +import net.i2p.util.FileUtil; import net.i2p.util.Log; +import net.i2p.util.SimpleScheduler; import net.i2p.util.SimpleTimer; /** @@ -48,6 +50,7 @@ public class I2PSnarkUtil { private int _maxUploaders; private int _maxUpBW; private int _maxConnections; + private File _tmpDir; public static final String PROP_USE_OPENTRACKERS = "i2psnark.useOpentrackers"; public static final boolean DEFAULT_USE_OPENTRACKERS = true; @@ -67,6 +70,12 @@ public class I2PSnarkUtil { _maxUploaders = Snark.MAX_TOTAL_UPLOADERS; _maxUpBW = DEFAULT_MAX_UP_BW; _maxConnections = MAX_CONNECTIONS; + // This is used for both announce replies and .torrent file downloads, + // so it must be available even if not connected to I2CP. + // so much for multiple instances + _tmpDir = new File("tmp", "i2psnark"); + FileUtil.rmdir(_tmpDir, false); + _tmpDir.mkdirs(); } /** @@ -94,6 +103,7 @@ public class I2PSnarkUtil { _i2cpHost = i2cpHost; if (i2cpPort > 0) _i2cpPort = i2cpPort; + // can't remove any options this way... if (opts != null) _opts.putAll(opts); _configured = true; @@ -166,6 +176,10 @@ public class I2PSnarkUtil { _manager = null; _shitlist.clear(); mgr.destroySocketManager(); + // this will delete a .torrent file d/l in progress so don't do that... + FileUtil.rmdir(_tmpDir, false); + // in case the user will d/l a .torrent file next... + _tmpDir.mkdirs(); } /** connect to the given destination */ @@ -183,7 +197,7 @@ public class I2PSnarkUtil { synchronized (_shitlist) { _shitlist.add(dest); } - SimpleTimer.getInstance().addEvent(new Unshitlist(dest), 10*60*1000); + SimpleScheduler.getInstance().addEvent(new Unshitlist(dest), 10*60*1000); throw new IOException("Unable to reach the peer " + peer + ": " + ie.getMessage()); } } @@ -204,13 +218,15 @@ public class I2PSnarkUtil { _log.debug("Fetching [" + url + "] proxy=" + _proxyHost + ":" + _proxyPort + ": " + _shouldProxy); File out = null; try { - out = File.createTempFile("i2psnark", "url", new File(".")); + // we could use the system tmp dir but deleteOnExit() doesn't seem to work on all platforms... + out = File.createTempFile("i2psnark", null, _tmpDir); } catch (IOException ioe) { ioe.printStackTrace(); if (out != null) out.delete(); return null; } + out.deleteOnExit(); String fetchURL = url; if (rewrite) fetchURL = rewriteAnnounce(url); diff --git a/apps/i2psnark/java/src/org/klomp/snark/PeerConnectionOut.java b/apps/i2psnark/java/src/org/klomp/snark/PeerConnectionOut.java index 8fed9577a..1a53c342f 100644 --- a/apps/i2psnark/java/src/org/klomp/snark/PeerConnectionOut.java +++ b/apps/i2psnark/java/src/org/klomp/snark/PeerConnectionOut.java @@ -28,6 +28,7 @@ import java.util.List; import net.i2p.util.I2PAppThread; import net.i2p.util.Log; +import net.i2p.util.SimpleScheduler; import net.i2p.util.SimpleTimer; class PeerConnectionOut implements Runnable @@ -215,7 +216,7 @@ class PeerConnectionOut implements Runnable private void addMessage(Message m) { if (m.type == Message.PIECE) - SimpleTimer.getInstance().addEvent(new RemoveTooSlow(m), SEND_TIMEOUT); + SimpleScheduler.getInstance().addEvent(new RemoveTooSlow(m), SEND_TIMEOUT); synchronized(sendQueue) { sendQueue.add(m); diff --git a/apps/i2psnark/java/src/org/klomp/snark/PeerState.java b/apps/i2psnark/java/src/org/klomp/snark/PeerState.java index 1b4feee75..054b58262 100644 --- a/apps/i2psnark/java/src/org/klomp/snark/PeerState.java +++ b/apps/i2psnark/java/src/org/klomp/snark/PeerState.java @@ -60,7 +60,7 @@ class PeerState // If we have te resend outstanding requests (true after we got choked). private boolean resend = false; - private final static int MAX_PIPELINE = 2; // this is for outbound requests + private final static int MAX_PIPELINE = 3; // this is for outbound requests private final static int MAX_PIPELINE_BYTES = 128*1024; // this is for inbound requests public final static int PARTSIZE = 32*1024; // Snark was 16K, i2p-bt uses 64KB private final static int MAX_PARTSIZE = 64*1024; // Don't let anybody request more than this diff --git a/apps/i2psnark/java/src/org/klomp/snark/Snark.java b/apps/i2psnark/java/src/org/klomp/snark/Snark.java index f10ef41d3..e124955cf 100644 --- a/apps/i2psnark/java/src/org/klomp/snark/Snark.java +++ b/apps/i2psnark/java/src/org/klomp/snark/Snark.java @@ -232,7 +232,7 @@ public class Snark } // Explicit shutdown. - Runtime.getRuntime().removeShutdownHook(snarkhook); + //Runtime.getRuntime().removeShutdownHook(snarkhook); snarkhook.start(); } } diff --git a/apps/i2psnark/java/src/org/klomp/snark/SnarkManager.java b/apps/i2psnark/java/src/org/klomp/snark/SnarkManager.java index dc10c3b1a..54367af1a 100644 --- a/apps/i2psnark/java/src/org/klomp/snark/SnarkManager.java +++ b/apps/i2psnark/java/src/org/klomp/snark/SnarkManager.java @@ -81,8 +81,7 @@ public class SnarkManager implements Snark.CompleteListener { I2PAppThread monitor = new I2PAppThread(new DirMonitor(), "Snark DirMonitor"); monitor.setDaemon(true); monitor.start(); - if (_context instanceof RouterContext) - ((RouterContext)_context).router().addShutdownTask(new SnarkManagerShutdown()); + _context.addShutdownTask(new SnarkManagerShutdown()); } /** hook to I2PSnarkUtil for the servlet */ @@ -141,7 +140,7 @@ public class SnarkManager implements Snark.CompleteListener { if (!_config.containsKey(PROP_I2CP_PORT)) _config.setProperty(PROP_I2CP_PORT, "7654"); if (!_config.containsKey(PROP_I2CP_OPTS)) - _config.setProperty(PROP_I2CP_OPTS, "inbound.length=2 inbound.lengthVariance=0 outbound.length=2 outbound.lengthVariance=0"); + _config.setProperty(PROP_I2CP_OPTS, "inbound.length=2 inbound.lengthVariance=0 outbound.length=2 outbound.lengthVariance=0 inbound.quantity=3 outbound.quantity=3"); if (!_config.containsKey(PROP_EEP_HOST)) _config.setProperty(PROP_EEP_HOST, "localhost"); if (!_config.containsKey(PROP_EEP_PORT)) @@ -539,7 +538,7 @@ public class SnarkManager implements Snark.CompleteListener { String announce = info.getAnnounce(); // basic validation of url if ((!announce.startsWith("http://")) || - (announce.indexOf(".i2p/") < 0)) + (announce.indexOf(".i2p/") < 0)) // need to do better than this return "Non-i2p tracker in " + info.getName() + ", deleting it"; List files = info.getFiles(); if ( (files != null) && (files.size() > MAX_FILES_PER_TORRENT) ) { @@ -693,6 +692,7 @@ public class SnarkManager implements Snark.CompleteListener { , "welterde", "http://BGKmlDOoH3RzFbPRfRpZV2FjpVj8~3moFftw5-dZfDf2070TOe8Tf2~DAVeaM6ZRLdmFEt~9wyFL8YMLMoLoiwGEH6IGW6rc45tstN68KsBDWZqkTohV1q9XFgK9JnCwE~Oi89xLBHsLMTHOabowWM6dkC8nI6QqJC2JODqLPIRfOVrDdkjLwtCrsckzLybNdFmgfoqF05UITDyczPsFVaHtpF1sRggOVmdvCM66otyonlzNcJbn59PA-R808vUrCPMGU~O9Wys0i-NoqtIbtWfOKnjCRFMNw5ex4n9m5Sxm9e20UkpKG6qzEuvKZWi8vTLe1NW~CBrj~vG7I3Ok4wybUFflBFOaBabxYJLlx4xTE1zJIVxlsekmAjckB4v-cQwulFeikR4LxPQ6mCQknW2HZ4JQIq6hL9AMabxjOlYnzh7kjOfRGkck8YgeozcyTvcDUcUsOuSTk06L4kdrv8h2Cozjbloi5zl6KTbj5ZTciKCxi73Pn9grICn-HQqEAAAA.i2p/a=http://tracker.welterde.i2p/stats?mode=top5" // , "mastertracker", "http://VzXD~stRKbL3MOmeTn1iaCQ0CFyTmuFHiKYyo0Rd~dFPZFCYH-22rT8JD7i-C2xzYFa4jT5U2aqHzHI-Jre4HL3Ri5hFtZrLk2ax3ji7Qfb6qPnuYkuiF2E2UDmKUOppI8d9Ye7tjdhQVCy0izn55tBaB-U7UWdcvSK2i85sauyw3G0Gfads1Rvy5-CAe2paqyYATcDmGjpUNLoxbfv9KH1KmwRTNH6k1v4PyWYYnhbT39WfKMbBjSxVQRdi19cyJrULSWhjxaQfJHeWx5Z8Ev4bSPByBeQBFl2~4vqy0S5RypINsRSa3MZdbiAAyn5tr5slWR6QdoqY3qBQgBJFZppy-3iWkFqqKgSxCPundF8gdDLC5ddizl~KYcYKl42y9SGFHIukH-TZs8~em0~iahzsqWVRks3zRG~tlBcX2U3M2~OJs~C33-NKhyfZT7-XFBREvb8Szmd~p66jDxrwOnKaku-G6DyoQipJqIz4VHmY9-y5T8RrUcJcM-5lVoMpAAAA.i2p/announce.php=http://tracker.mastertracker.i2p/" // , "Galen", "http://5jpwQMI5FT303YwKa5Rd38PYSX04pbIKgTaKQsWbqoWjIfoancFdWCShXHLI5G5ofOb0Xu11vl2VEMyPsg1jUFYSVnu4-VfMe3y4TKTR6DTpetWrnmEK6m2UXh91J5DZJAKlgmO7UdsFlBkQfR2rY853-DfbJtQIFl91tbsmjcA5CGQi4VxMFyIkBzv-pCsuLQiZqOwWasTlnzey8GcDAPG1LDcvfflGV~6F5no9mnuisZPteZKlrv~~TDoXTj74QjByWc4EOYlwqK8sbU9aOvz~s31XzErbPTfwiawiaZ0RUI-IDrKgyvmj0neuFTWgjRGVTH8bz7cBZIc3viy6ioD-eMQOrXaQL0TCWZUelRwHRvgdPiQrxdYQs7ixkajeHzxi-Pq0EMm5Vbh3j3Q9kfUFW3JjFDA-MLB4g6XnjCbM5J1rC0oOBDCIEfhQkszru5cyLjHiZ5yeA0VThgu~c7xKHybv~OMXION7V8pBKOgET7ZgAkw1xgYe3Kkyq5syAAAA.i2p/tr/announce.php=http://galen.i2p/tr/" + , "crstrack", "http://b4G9sCdtfvccMAXh~SaZrPqVQNyGQbhbYMbw6supq2XGzbjU4NcOmjFI0vxQ8w1L05twmkOvg5QERcX6Mi8NQrWnR0stLExu2LucUXg1aYjnggxIR8TIOGygZVIMV3STKH4UQXD--wz0BUrqaLxPhrm2Eh9Hwc8TdB6Na4ShQUq5Xm8D4elzNUVdpM~RtChEyJWuQvoGAHY3ppX-EJJLkiSr1t77neS4Lc-KofMVmgI9a2tSSpNAagBiNI6Ak9L1T0F9uxeDfEG9bBSQPNMOSUbAoEcNxtt7xOW~cNOAyMyGydwPMnrQ5kIYPY8Pd3XudEko970vE0D6gO19yoBMJpKx6Dh50DGgybLQ9CpRaynh2zPULTHxm8rneOGRcQo8D3mE7FQ92m54~SvfjXjD2TwAVGI~ae~n9HDxt8uxOecAAvjjJ3TD4XM63Q9TmB38RmGNzNLDBQMEmJFpqQU8YeuhnS54IVdUoVQFqui5SfDeLXlSkh4vYoMU66pvBfWbAAAA.i2p/tracker/announce.php=http://crstrack.i2p/tracker/" }; /** comma delimited list of name=announceURL=baseURL for the trackers to be displayed */ diff --git a/apps/i2psnark/java/src/org/klomp/snark/Storage.java b/apps/i2psnark/java/src/org/klomp/snark/Storage.java index a8f023295..69e5a198f 100644 --- a/apps/i2psnark/java/src/org/klomp/snark/Storage.java +++ b/apps/i2psnark/java/src/org/klomp/snark/Storage.java @@ -696,9 +696,6 @@ public class Storage listener.setWantedPieces(this); _util.debug("WARNING: Not really done, missing " + needed + " pieces", Snark.WARNING); - } else { - if (listener != null) - listener.storageCompleted(this); } } diff --git a/apps/i2psnark/java/src/org/klomp/snark/TrackerClient.java b/apps/i2psnark/java/src/org/klomp/snark/TrackerClient.java index 21e6bcac2..3ba2f6be6 100644 --- a/apps/i2psnark/java/src/org/klomp/snark/TrackerClient.java +++ b/apps/i2psnark/java/src/org/klomp/snark/TrackerClient.java @@ -346,7 +346,6 @@ public class TrackerClient extends I2PAppThread if (fetched == null) { throw new IOException("Error fetching " + s); } - fetched.deleteOnExit(); InputStream in = null; try { diff --git a/apps/i2psnark/java/src/org/klomp/snark/web/I2PSnarkServlet.java b/apps/i2psnark/java/src/org/klomp/snark/web/I2PSnarkServlet.java index c5bb93b21..52e109573 100644 --- a/apps/i2psnark/java/src/org/klomp/snark/web/I2PSnarkServlet.java +++ b/apps/i2psnark/java/src/org/klomp/snark/web/I2PSnarkServlet.java @@ -5,6 +5,7 @@ import java.io.FileInputStream; import java.io.FileOutputStream; import java.io.IOException; import java.io.PrintWriter; +import java.text.Collator; import java.util.ArrayList; import java.util.Iterator; import java.util.List; @@ -62,7 +63,7 @@ public class I2PSnarkServlet extends HttpServlet { req.setCharacterEncoding("UTF-8"); resp.setCharacterEncoding("UTF-8"); resp.setContentType("text/html; charset=UTF-8"); - long stats[] = {0,0,0,0}; + long stats[] = {0,0,0,0,0}; String nonce = req.getParameter("nonce"); if ( (nonce != null) && (nonce.equals(String.valueOf(_nonce))) ) @@ -142,8 +143,10 @@ public class I2PSnarkServlet extends HttpServlet { if (snarks.size() <= 0) { out.write(TABLE_EMPTY); } else if (snarks.size() > 1) { - out.write(TABLE_TOTAL); - out.write(" " + formatSize(stats[0]) + "\n" + + out.write("\n" + + " Totals (" + snarks.size() + " torrents, " + stats[4] + " connected peers)\n" + + "  \n" + + " " + formatSize(stats[0]) + "\n" + " " + formatSize(stats[1]) + "\n" + " " + formatSize(stats[2]) + "ps\n" + " " + formatSize(stats[3]) + "ps\n" + @@ -199,10 +202,14 @@ public class I2PSnarkServlet extends HttpServlet { } catch (IOException ioe) { _log.warn("hrm: " + local, ioe); } - } else if ( (newURL != null) && (newURL.trim().length() > "http://.i2p/".length()) ) { - _manager.addMessage("Fetching " + newURL); - I2PAppThread fetch = new I2PAppThread(new FetchAndAdd(_manager, newURL), "Fetch and add"); - fetch.start(); + } else if (newURL != null) { + if (newURL.startsWith("http://")) { + _manager.addMessage("Fetching " + newURL); + I2PAppThread fetch = new I2PAppThread(new FetchAndAdd(_manager, newURL), "Fetch and add"); + fetch.start(); + } else { + _manager.addMessage("Invalid URL - must start with http://"); + } } else { // no file or URL specified } @@ -378,7 +385,8 @@ public class I2PSnarkServlet extends HttpServlet { private List getSortedSnarks(HttpServletRequest req) { Set files = _manager.listTorrentFiles(); - TreeSet fileNames = new TreeSet(files); // sorts it alphabetically + TreeSet fileNames = new TreeSet(Collator.getInstance()); // sorts it alphabetically + fileNames.addAll(files); ArrayList rv = new ArrayList(fileNames.size()); for (Iterator iter = fileNames.iterator(); iter.hasNext(); ) { String name = (String)iter.next(); @@ -437,6 +445,7 @@ public class I2PSnarkServlet extends HttpServlet { if (snark.coordinator != null) { err = snark.coordinator.trackerProblems; curPeers = snark.coordinator.getPeerCount(); + stats[4] += curPeers; knownPeers = snark.coordinator.trackerSeenPeers; } @@ -575,10 +584,10 @@ public class I2PSnarkServlet extends HttpServlet { client = "Azureus"; else if ("CwsL".equals(ch)) client = "I2PSnarkXL"; - else if ("AUZV".equals(ch)) + else if ("ZV".equals(ch.substring(2,4))) client = "Robert"; else - client = "Unknown"; + client = "Unknown (" + ch + ')'; out.write("" + client + "  " + peer.toString().substring(5, 9) + ""); if (showDebug) out.write(" inactive " + (peer.getInactiveTime() / 1000) + "s"); @@ -639,7 +648,7 @@ public class I2PSnarkServlet extends HttpServlet { private void writeAddForm(PrintWriter out, HttpServletRequest req) throws IOException { String uri = req.getRequestURI(); String newURL = req.getParameter("newURL"); - if ( (newURL == null) || (newURL.trim().length() <= 0) ) newURL = "http://"; + if ( (newURL == null) || (newURL.trim().length() <= 0) ) newURL = ""; String newFile = req.getParameter("newFile"); if ( (newFile == null) || (newFile.trim().length() <= 0) ) newFile = ""; @@ -767,7 +776,7 @@ public class I2PSnarkServlet extends HttpServlet { return bytes + "B"; else if (bytes < 5*1024*1024) return ((bytes + 512)/1024) + "KB"; - else if (bytes < 5*1024*1024*1024l) + else if (bytes < 10*1024*1024*1024l) return ((bytes + 512*1024)/(1024*1024)) + "MB"; else return ((bytes + 512*1024*1024)/(1024*1024*1024)) + "GB"; @@ -856,11 +865,6 @@ public class I2PSnarkServlet extends HttpServlet { " Down Rate\n" + " Up Rate\n"; - private static final String TABLE_TOTAL = "\n" + - "Totals\n" + - "  \n" + - "  \n"; - private static final String TABLE_EMPTY = "" + "No torrents\n"; diff --git a/apps/i2ptunnel/java/build.xml b/apps/i2ptunnel/java/build.xml index 635cad2e4..564f6fc4b 100644 --- a/apps/i2ptunnel/java/build.xml +++ b/apps/i2ptunnel/java/build.xml @@ -42,7 +42,7 @@ + basedir="../jsp/" excludes="web.xml, **/*.java, *.jsp"> diff --git a/apps/i2ptunnel/java/src/net/i2p/i2ptunnel/I2PTunnel.java b/apps/i2ptunnel/java/src/net/i2p/i2ptunnel/I2PTunnel.java index 560475e42..b11f954e7 100644 --- a/apps/i2ptunnel/java/src/net/i2p/i2ptunnel/I2PTunnel.java +++ b/apps/i2ptunnel/java/src/net/i2p/i2ptunnel/I2PTunnel.java @@ -62,6 +62,8 @@ import net.i2p.data.DataFormatException; import net.i2p.data.DataHelper; import net.i2p.data.Destination; import net.i2p.i2ptunnel.socks.I2PSOCKSTunnel; +import net.i2p.i2ptunnel.streamr.StreamrConsumer; +import net.i2p.i2ptunnel.streamr.StreamrProducer; import net.i2p.util.EventDispatcher; import net.i2p.util.EventDispatcherImpl; import net.i2p.util.Log; @@ -234,6 +236,8 @@ public class I2PTunnel implements Logging, EventDispatcher { runServer(args, l); } else if ("httpserver".equals(cmdname)) { runHttpServer(args, l); + } else if ("ircserver".equals(cmdname)) { + runIrcServer(args, l); } else if ("textserver".equals(cmdname)) { runTextServer(args, l); } else if ("client".equals(cmdname)) { @@ -244,6 +248,12 @@ public class I2PTunnel implements Logging, EventDispatcher { runIrcClient(args, l); } else if ("sockstunnel".equals(cmdname)) { runSOCKSTunnel(args, l); + } else if ("connectclient".equals(cmdname)) { + runConnectClient(args, l); + } else if ("streamrclient".equals(cmdname)) { + runStreamrClient(args, l); + } else if ("streamrserver".equals(cmdname)) { + runStreamrServer(args, l); } else if ("config".equals(cmdname)) { runConfig(args, l); } else if ("listen_on".equals(cmdname)) { @@ -296,6 +306,7 @@ public class I2PTunnel implements Logging, EventDispatcher { l.log("client [, []"); l.log("ircclient [, []"); l.log("httpclient [] []"); + l.log("connectclient [] []"); l.log("lookup "); l.log("quit"); l.log("close [forced] |all"); @@ -380,6 +391,53 @@ public class I2PTunnel implements Logging, EventDispatcher { } } + /** + * Same args as runServer + * (we should stop duplicating all this code...) + */ + public void runIrcServer(String args[], Logging l) { + if (args.length == 3) { + InetAddress serverHost = null; + int portNum = -1; + File privKeyFile = null; + try { + serverHost = InetAddress.getByName(args[0]); + } catch (UnknownHostException uhe) { + l.log("unknown host"); + _log.error(getPrefix() + "Error resolving " + args[0], uhe); + notifyEvent("serverTaskId", Integer.valueOf(-1)); + return; + } + + try { + portNum = Integer.parseInt(args[1]); + } catch (NumberFormatException nfe) { + l.log("invalid port"); + _log.error(getPrefix() + "Port specified is not valid: " + args[1], nfe); + notifyEvent("serverTaskId", Integer.valueOf(-1)); + return; + } + + privKeyFile = new File(args[2]); + if (!privKeyFile.canRead()) { + l.log("private key file does not exist"); + _log.error(getPrefix() + "Private key file does not exist or is not readable: " + args[2]); + notifyEvent("serverTaskId", Integer.valueOf(-1)); + return; + } + I2PTunnelServer serv = new I2PTunnelIRCServer(serverHost, portNum, privKeyFile, args[2], l, (EventDispatcher) this, this); + serv.setReadTimeout(readTimeout); + serv.startRunning(); + addtask(serv); + notifyEvent("serverTaskId", Integer.valueOf(serv.getId())); + return; + } else { + l.log("server "); + l.log(" creates a server that sends all incoming data\n" + " of its destination to host:port."); + notifyEvent("serverTaskId", Integer.valueOf(-1)); + } + } + /** * Run the HTTP server pointing at the host and port specified using the private i2p * destination loaded from the specified file, replacing the HTTP headers @@ -494,14 +552,14 @@ public class I2PTunnel implements Logging, EventDispatcher { * Integer port number if the client is listening * sharedClient parameter is a String "true" or "false" * - * @param args {portNumber, destinationBase64 or "file:filename"[, sharedClient]} + * @param args {portNumber, destinationBase64 or "file:filename"[, sharedClient [, privKeyFile]]} * @param l logger to receive events and output */ public void runClient(String args[], Logging l) { boolean isShared = true; - if (args.length == 3) + if (args.length >= 3) isShared = Boolean.valueOf(args[2].trim()).booleanValue(); - if ( (args.length == 2) || (args.length == 3) ) { + if (args.length >= 2) { int portNum = -1; try { portNum = Integer.parseInt(args[0]); @@ -514,7 +572,10 @@ public class I2PTunnel implements Logging, EventDispatcher { I2PTunnelTask task; ownDest = !isShared; try { - task = new I2PTunnelClient(portNum, args[1], l, ownDest, (EventDispatcher) this, this); + String privateKeyFile = null; + if (args.length >= 4) + privateKeyFile = args[3]; + task = new I2PTunnelClient(portNum, args[1], l, ownDest, (EventDispatcher) this, this, privateKeyFile); addtask(task); notifyEvent("clientTaskId", Integer.valueOf(task.getId())); } catch (IllegalArgumentException iae) { @@ -523,7 +584,7 @@ public class I2PTunnel implements Logging, EventDispatcher { notifyEvent("clientTaskId", Integer.valueOf(-1)); } } else { - l.log("client [,]|file:[ ]"); + l.log("client [,]|file:[ ] []"); l.log(" creates a client that forwards port to the pubkey.\n" + " use 0 as port to get a free port assigned. If you specify\n" + " a comma delimited list of pubkeys, it will rotate among them\n" @@ -555,7 +616,7 @@ public class I2PTunnel implements Logging, EventDispatcher { return; } - String proxy = "squid.i2p"; + String proxy = ""; boolean isShared = true; if (args.length > 1) { if ("true".equalsIgnoreCase(args[1].trim())) { @@ -595,11 +656,66 @@ public class I2PTunnel implements Logging, EventDispatcher { l.log(" (optional) indicates if this client shares tunnels with other clients (true of false)"); l.log(" (optional) indicates a proxy server to be used"); l.log(" when trying to access an address out of the .i2p domain"); - l.log(" (the default proxy is squid.i2p)."); notifyEvent("httpclientTaskId", Integer.valueOf(-1)); } } + /** + * Run a CONNECT client on the given port number + * + * @param args {portNumber[, sharedClient][, proxy to be used for the WWW]} + * @param l logger to receive events and output + */ + public void runConnectClient(String args[], Logging l) { + if (args.length >= 1 && args.length <= 3) { + int port = -1; + try { + port = Integer.parseInt(args[0]); + } catch (NumberFormatException nfe) { + _log.error(getPrefix() + "Port specified is not valid: " + args[0], nfe); + return; + } + + String proxy = ""; + boolean isShared = true; + if (args.length > 1) { + if ("true".equalsIgnoreCase(args[1].trim())) { + isShared = true; + if (args.length == 3) + proxy = args[2]; + } else if ("false".equalsIgnoreCase(args[1].trim())) { + _log.warn("args[1] == [" + args[1] + "] and rejected explicitly"); + isShared = false; + if (args.length == 3) + proxy = args[2]; + } else if (args.length == 3) { + isShared = false; // not "true" + proxy = args[2]; + _log.warn("args[1] == [" + args[1] + "] but rejected"); + } else { + // isShared not specified, default to true + isShared = true; + proxy = args[1]; + } + } + + I2PTunnelTask task; + ownDest = !isShared; + try { + task = new I2PTunnelConnectClient(port, l, ownDest, proxy, (EventDispatcher) this, this); + addtask(task); + } catch (IllegalArgumentException iae) { + _log.error(getPrefix() + "Invalid I2PTunnel config to create an httpclient [" + host + ":"+ port + "]", iae); + } + } else { + l.log("connectclient [] []"); + l.log(" creates a client that for SSL/HTTPS requests."); + l.log(" (optional) indicates if this client shares tunnels with other clients (true of false)"); + l.log(" (optional) indicates a proxy server to be used"); + l.log(" when trying to access an address out of the .i2p domain"); + } + } + /** * Run an IRC client on the given port number * @@ -607,11 +723,11 @@ public class I2PTunnel implements Logging, EventDispatcher { * Also sets "ircclientStatus" = "ok" or "error" after the client tunnel has started. * parameter sharedClient is a String, either "true" or "false" * - * @param args {portNumber,destinationBase64 or "file:filename" [, sharedClient]} + * @param args {portNumber,destinationBase64 or "file:filename" [, sharedClient [, privKeyFile]]} * @param l logger to receive events and output */ public void runIrcClient(String args[], Logging l) { - if (args.length >= 2 && args.length <= 3) { + if (args.length >= 2) { int port = -1; try { port = Integer.parseInt(args[0]); @@ -638,7 +754,10 @@ public class I2PTunnel implements Logging, EventDispatcher { I2PTunnelTask task; ownDest = !isShared; try { - task = new I2PTunnelIRCClient(port, args[1],l, ownDest, (EventDispatcher) this, this); + String privateKeyFile = null; + if (args.length >= 4) + privateKeyFile = args[3]; + task = new I2PTunnelIRCClient(port, args[1], l, ownDest, (EventDispatcher) this, this, privateKeyFile); addtask(task); notifyEvent("ircclientTaskId", Integer.valueOf(task.getId())); } catch (IllegalArgumentException iae) { @@ -647,7 +766,7 @@ public class I2PTunnel implements Logging, EventDispatcher { notifyEvent("ircclientTaskId", Integer.valueOf(-1)); } } else { - l.log("ircclient []"); + l.log("ircclient [ []]"); l.log(" creates a client that filter IRC protocol."); l.log(" (optional) indicates if this client shares tunnels with other clients (true of false)"); notifyEvent("ircclientTaskId", Integer.valueOf(-1)); @@ -662,7 +781,7 @@ public class I2PTunnel implements Logging, EventDispatcher { * "openSOCKSTunnelResult" = "ok" or "error" after the client tunnel has * started. * - * @param args {portNumber} + * @param args {portNumber [, sharedClient]} * @param l logger to receive events and output */ public void runSOCKSTunnel(String args[], Logging l) { @@ -677,6 +796,11 @@ public class I2PTunnel implements Logging, EventDispatcher { return; } + boolean isShared = false; + if (args.length > 1) + isShared = "true".equalsIgnoreCase(args[1].trim()); + + ownDest = !isShared; I2PTunnelTask task; task = new I2PSOCKSTunnel(port, l, ownDest, (EventDispatcher) this, this); addtask(task); @@ -688,6 +812,82 @@ public class I2PTunnel implements Logging, EventDispatcher { } } + /** + * Streamr client + * + * @param args {targethost, targetport, destinationString} + * @param l logger to receive events and output + */ + public void runStreamrClient(String args[], Logging l) { + if (args.length == 3) { + InetAddress host; + try { + host = InetAddress.getByName(args[0]); + } catch (UnknownHostException uhe) { + l.log("unknown host"); + _log.error(getPrefix() + "Error resolving " + args[0], uhe); + notifyEvent("streamrtunnelTaskId", Integer.valueOf(-1)); + return; + } + + int port = -1; + try { + port = Integer.parseInt(args[1]); + } catch (NumberFormatException nfe) { + l.log("invalid port"); + _log.error(getPrefix() + "Port specified is not valid: " + args[0], nfe); + notifyEvent("streamrtunnelTaskId", Integer.valueOf(-1)); + return; + } + + StreamrConsumer task = new StreamrConsumer(host, port, args[2], l, (EventDispatcher) this, this); + task.startRunning(); + addtask(task); + notifyEvent("streamrtunnelTaskId", Integer.valueOf(task.getId())); + } else { + l.log("streamrclient "); + l.log(" creates a tunnel that receives streaming data."); + notifyEvent("streamrtunnelTaskId", Integer.valueOf(-1)); + } + } + + /** + * Streamr server + * + * @param args {port, privkeyfile} + * @param l logger to receive events and output + */ + public void runStreamrServer(String args[], Logging l) { + if (args.length == 2) { + int port = -1; + try { + port = Integer.parseInt(args[0]); + } catch (NumberFormatException nfe) { + l.log("invalid port"); + _log.error(getPrefix() + "Port specified is not valid: " + args[0], nfe); + notifyEvent("streamrtunnelTaskId", Integer.valueOf(-1)); + return; + } + + File privKeyFile = new File(args[1]); + if (!privKeyFile.canRead()) { + l.log("private key file does not exist"); + _log.error(getPrefix() + "Private key file does not exist or is not readable: " + args[3]); + notifyEvent("serverTaskId", Integer.valueOf(-1)); + return; + } + + StreamrProducer task = new StreamrProducer(port, privKeyFile, args[1], l, (EventDispatcher) this, this); + task.startRunning(); + addtask(task); + notifyEvent("streamrtunnelTaskId", Integer.valueOf(task.getId())); + } else { + l.log("streamrserver "); + l.log(" creates a tunnel that sends streaming data."); + notifyEvent("streamrtunnelTaskId", Integer.valueOf(-1)); + } + } + /** * Specify the i2cp host and port * diff --git a/apps/i2ptunnel/java/src/net/i2p/i2ptunnel/I2PTunnelClient.java b/apps/i2ptunnel/java/src/net/i2p/i2ptunnel/I2PTunnelClient.java index 4739a07f4..502bb28d5 100644 --- a/apps/i2ptunnel/java/src/net/i2p/i2ptunnel/I2PTunnelClient.java +++ b/apps/i2ptunnel/java/src/net/i2p/i2ptunnel/I2PTunnelClient.java @@ -31,8 +31,8 @@ public class I2PTunnelClient extends I2PTunnelClientBase { */ public I2PTunnelClient(int localPort, String destinations, Logging l, boolean ownDest, EventDispatcher notifyThis, - I2PTunnel tunnel) throws IllegalArgumentException { - super(localPort, ownDest, l, notifyThis, "SynSender", tunnel); + I2PTunnel tunnel, String pkf) throws IllegalArgumentException { + super(localPort, ownDest, l, notifyThis, "SynSender", tunnel, pkf); if (waitEventValue("openBaseClientResult").equals("error")) { notifyEvent("openClientResult", "error"); diff --git a/apps/i2ptunnel/java/src/net/i2p/i2ptunnel/I2PTunnelClientBase.java b/apps/i2ptunnel/java/src/net/i2p/i2ptunnel/I2PTunnelClientBase.java index d6e5bf9f9..b6eb39224 100644 --- a/apps/i2ptunnel/java/src/net/i2p/i2ptunnel/I2PTunnelClientBase.java +++ b/apps/i2ptunnel/java/src/net/i2p/i2ptunnel/I2PTunnelClientBase.java @@ -3,6 +3,7 @@ */ package net.i2p.i2ptunnel; +import java.io.FileInputStream; import java.io.IOException; import java.io.InterruptedIOException; import java.net.ConnectException; @@ -27,6 +28,7 @@ import net.i2p.data.Destination; import net.i2p.util.EventDispatcher; import net.i2p.util.I2PThread; import net.i2p.util.Log; +import net.i2p.util.SimpleScheduler; import net.i2p.util.SimpleTimer; public abstract class I2PTunnelClientBase extends I2PTunnelTask implements Runnable { @@ -58,6 +60,7 @@ public abstract class I2PTunnelClientBase extends I2PTunnelTask implements Runna private byte[] pubkey; private String handlerName; + private String privKeyFile; private Object conLock = new Object(); @@ -90,18 +93,28 @@ public abstract class I2PTunnelClientBase extends I2PTunnelTask implements Runna // I2PTunnelClientBase(localPort, ownDest, l, (EventDispatcher)null); //} + public I2PTunnelClientBase(int localPort, boolean ownDest, Logging l, + EventDispatcher notifyThis, String handlerName, + I2PTunnel tunnel) throws IllegalArgumentException { + this(localPort, ownDest, l, notifyThis, handlerName, tunnel, null); + } + /** + * @param privKeyFile null to generate a transient key + * * @throws IllegalArgumentException if the I2CP configuration is b0rked so * badly that we cant create a socketManager */ public I2PTunnelClientBase(int localPort, boolean ownDest, Logging l, EventDispatcher notifyThis, String handlerName, - I2PTunnel tunnel) throws IllegalArgumentException{ + I2PTunnel tunnel, String pkf) throws IllegalArgumentException{ super(localPort + " (uninitialized)", notifyThis, tunnel); _clientId = ++__clientId; this.localPort = localPort; this.l = l; this.handlerName = handlerName + _clientId; + this.privKeyFile = pkf; + _context = tunnel.getContext(); _context.statManager().createRateStat("i2ptunnel.client.closeBacklog", "How many pending sockets remain when we close one due to backlog?", "I2PTunnel", new long[] { 60*1000, 10*60*1000, 60*60*1000 }); @@ -113,26 +126,28 @@ public abstract class I2PTunnelClientBase extends I2PTunnelTask implements Runna // be looked up tunnel.getClientOptions().setProperty("i2cp.dontPublishLeaseSet", "true"); - while (sockMgr == null) { - synchronized (sockLock) { - if (ownDest) { - sockMgr = buildSocketManager(); - } else { - sockMgr = getSocketManager(); + boolean openNow = !Boolean.valueOf(tunnel.getClientOptions().getProperty("i2cp.delayOpen")).booleanValue(); + if (openNow) { + while (sockMgr == null) { + synchronized (sockLock) { + if (ownDest) { + sockMgr = buildSocketManager(); + } else { + sockMgr = getSocketManager(); + } + } + if (sockMgr == null) { + _log.log(Log.CRIT, "Unable to create socket manager (our own? " + ownDest + ")"); + try { Thread.sleep(10*1000); } catch (InterruptedException ie) {} } } if (sockMgr == null) { - _log.log(Log.CRIT, "Unable to create socket manager (our own? " + ownDest + ")"); - try { Thread.sleep(10*1000); } catch (InterruptedException ie) {} + l.log("Invalid I2CP configuration"); + throw new IllegalArgumentException("Socket manager could not be created"); } - } - if (sockMgr == null) { - l.log("Invalid I2CP configuration"); - throw new IllegalArgumentException("Socket manager could not be created"); - } - l.log("I2P session created"); + l.log("I2P session created"); - getTunnel().addSession(sockMgr.getSession()); + } // else delay creating session until createI2PSocket() is called Thread t = new I2PThread(this); t.setName("Client " + _clientId); @@ -152,7 +167,10 @@ public abstract class I2PTunnelClientBase extends I2PTunnelTask implements Runna configurePool(tunnel); if (open && listenerReady) { - l.log("Ready! Port " + getLocalPort()); + if (openNow) + l.log("Ready! Port " + getLocalPort()); + else + l.log("Listening on port " + getLocalPort() + ", delaying tunnel open until required"); notifyEvent("openBaseClientResult", "ok"); } else { l.log("Error listening - please see the logs!"); @@ -194,28 +212,36 @@ public abstract class I2PTunnelClientBase extends I2PTunnelTask implements Runna private static I2PSocketManager socketManager; protected synchronized I2PSocketManager getSocketManager() { - return getSocketManager(getTunnel()); + return getSocketManager(getTunnel(), this.privKeyFile); } protected static synchronized I2PSocketManager getSocketManager(I2PTunnel tunnel) { + return getSocketManager(tunnel, null); + } + protected static synchronized I2PSocketManager getSocketManager(I2PTunnel tunnel, String pkf) { if (socketManager != null) { I2PSession s = socketManager.getSession(); if ( (s == null) || (s.isClosed()) ) { _log.info("Building a new socket manager since the old one closed [s=" + s + "]"); - socketManager = buildSocketManager(tunnel); + if (s != null) + tunnel.removeSession(s); + socketManager = buildSocketManager(tunnel, pkf); } else { _log.info("Not building a new socket manager since the old one is open [s=" + s + "]"); } } else { _log.info("Building a new socket manager since there is no other one"); - socketManager = buildSocketManager(tunnel); + socketManager = buildSocketManager(tunnel, pkf); } return socketManager; } protected I2PSocketManager buildSocketManager() { - return buildSocketManager(getTunnel()); + return buildSocketManager(getTunnel(), this.privKeyFile); } protected static I2PSocketManager buildSocketManager(I2PTunnel tunnel) { + return buildSocketManager(tunnel, null); + } + protected static I2PSocketManager buildSocketManager(I2PTunnel tunnel, String pkf) { Properties props = new Properties(); props.putAll(tunnel.getClientOptions()); int portNum = 7654; @@ -229,7 +255,22 @@ public abstract class I2PTunnelClientBase extends I2PTunnelTask implements Runna I2PSocketManager sockManager = null; while (sockManager == null) { - sockManager = I2PSocketManagerFactory.createManager(tunnel.host, portNum, props); + if (pkf != null) { + // Persistent client dest + FileInputStream fis = null; + try { + fis = new FileInputStream(pkf); + sockManager = I2PSocketManagerFactory.createManager(fis, tunnel.host, portNum, props); + } catch (IOException ioe) { + _log.error("Error opening key file", ioe); + // this is going to loop but if we break we'll get a NPE + } finally { + if (fis != null) + try { fis.close(); } catch (IOException ioe) {} + } + } else { + sockManager = I2PSocketManagerFactory.createManager(tunnel.host, portNum, props); + } if (sockManager == null) { _log.log(Log.CRIT, "Unable to create socket manager"); @@ -301,6 +342,10 @@ public abstract class I2PTunnelClientBase extends I2PTunnelTask implements Runna * @return a new I2PSocket */ public I2PSocket createI2PSocket(Destination dest) throws I2PException, ConnectException, NoRouteToHostException, InterruptedIOException { + if (sockMgr == null) { + // we need this before getDefaultOptions() + sockMgr = getSocketManager(); + } return createI2PSocket(dest, getDefaultOptions()); } @@ -321,6 +366,19 @@ public abstract class I2PTunnelClientBase extends I2PTunnelTask implements Runna public I2PSocket createI2PSocket(Destination dest, I2PSocketOptions opt) throws I2PException, ConnectException, NoRouteToHostException, InterruptedIOException { I2PSocket i2ps; + if (sockMgr == null) { + // delayed open - call get instead of build because the locking is up there + sockMgr = getSocketManager(); + } else if (Boolean.valueOf(getTunnel().getClientOptions().getProperty("i2cp.newDestOnResume")).booleanValue()) { + synchronized(sockMgr) { + I2PSocketManager oldSockMgr = sockMgr; + // This will build a new socket manager and a new dest if the session is closed. + sockMgr = getSocketManager(); + if (oldSockMgr != sockMgr) { + _log.warn("Built a new destination on resume"); + } + } + } // else the old socket manager will reconnect the old session if necessary i2ps = sockMgr.connect(dest, opt); synchronized (sockLock) { mySockets.add(i2ps); @@ -373,8 +431,10 @@ public abstract class I2PTunnelClientBase extends I2PTunnelTask implements Runna _context.statManager().addRateData("i2ptunnel.client.manageTime", total, total); } } catch (IOException ex) { - _log.error("Error listening for connections on " + localPort, ex); - notifyEvent("openBaseClientResult", "error"); + if (open) { + _log.error("Error listening for connections on " + localPort, ex); + notifyEvent("openBaseClientResult", "error"); + } synchronized (sockLock) { mySockets.clear(); } @@ -401,7 +461,7 @@ public abstract class I2PTunnelClientBase extends I2PTunnelTask implements Runna } if (_maxWaitTime > 0) - SimpleTimer.getInstance().addEvent(new CloseEvent(s), _maxWaitTime); + SimpleScheduler.getInstance().addEvent(new CloseEvent(s), _maxWaitTime); synchronized (_waitingSockets) { _waitingSockets.add(s); @@ -455,20 +515,23 @@ public abstract class I2PTunnelClientBase extends I2PTunnelTask implements Runna // might risk to create an orphan socket. Would be better // to return with an error in that situation quickly. synchronized (sockLock) { - mySockets.retainAll(sockMgr.listSockets()); - if (!forced && mySockets.size() != 0) { - l.log("There are still active connections!"); - _log.debug("can't close: there are still active connections!"); - for (Iterator it = mySockets.iterator(); it.hasNext();) { - l.log("->" + it.next()); + if (sockMgr != null) { + mySockets.retainAll(sockMgr.listSockets()); + if (!forced && mySockets.size() != 0) { + l.log("There are still active connections!"); + _log.debug("can't close: there are still active connections!"); + for (Iterator it = mySockets.iterator(); it.hasNext();) { + l.log("->" + it.next()); + } + return false; + } + I2PSession session = sockMgr.getSession(); + if (session != null) { + getTunnel().removeSession(session); } - return false; - } - I2PSession session = sockMgr.getSession(); - if (session != null) { - getTunnel().removeSession(session); } l.log("Closing client " + toString()); + open = false; try { if (ss != null) ss.close(); } catch (IOException ex) { @@ -476,7 +539,6 @@ public abstract class I2PTunnelClientBase extends I2PTunnelTask implements Runna return false; } l.log("Client closed."); - open = false; } synchronized (_waitingSockets) { _waitingSockets.notifyAll(); } diff --git a/apps/i2ptunnel/java/src/net/i2p/i2ptunnel/I2PTunnelConnectClient.java b/apps/i2ptunnel/java/src/net/i2p/i2ptunnel/I2PTunnelConnectClient.java new file mode 100644 index 000000000..bf7ebf0a7 --- /dev/null +++ b/apps/i2ptunnel/java/src/net/i2p/i2ptunnel/I2PTunnelConnectClient.java @@ -0,0 +1,369 @@ +/* I2PTunnel is GPL'ed (with the exception mentioned in I2PTunnel.java) + * (c) 2003 - 2004 mihi + */ +package net.i2p.i2ptunnel; + +import java.io.IOException; +import java.io.InputStream; +import java.io.OutputStream; +import java.net.Socket; +import java.net.SocketException; +import java.util.ArrayList; +import java.util.List; +import java.util.Properties; +import java.util.StringTokenizer; + +import net.i2p.I2PAppContext; +import net.i2p.I2PException; +import net.i2p.client.streaming.I2PSocket; +import net.i2p.client.streaming.I2PSocketOptions; +import net.i2p.data.DataFormatException; +import net.i2p.data.DataHelper; +import net.i2p.data.Destination; +import net.i2p.util.EventDispatcher; +import net.i2p.util.FileUtil; +import net.i2p.util.Log; + +/** + * Supports the following: + * (where protocol is generally HTTP/1.1 but is ignored) + * (where host is one of: + * example.i2p + * 52chars.b32.i2p + * 516+charsbase64 + * example.com (sent to one of the configured proxies) + * ) + * + * (port and protocol are ignored for i2p destinations) + * CONNECT host + * CONNECT host protocol + * CONNECT host:port + * CONNECT host:port protocol (this is the standard) + * + * Additional lines after the CONNECT line but before the blank line are ignored and stripped. + * The CONNECT line is removed for .i2p accesses + * but passed along for outproxy accesses. + * + * Ref: + * INTERNET-DRAFT Ari Luotonen + * Expires: September 26, 1997 Netscape Communications Corporation + * March 26, 1997 + * Tunneling SSL Through a WWW Proxy + * + * @author zzz a stripped-down I2PTunnelHTTPClient + */ +public class I2PTunnelConnectClient extends I2PTunnelClientBase implements Runnable { + private static final Log _log = new Log(I2PTunnelConnectClient.class); + + private List _proxyList; + + private final static byte[] ERR_DESTINATION_UNKNOWN = + ("HTTP/1.1 503 Service Unavailable\r\n"+ + "Content-Type: text/html; charset=iso-8859-1\r\n"+ + "Cache-control: no-cache\r\n"+ + "\r\n"+ + "

I2P ERROR: DESTINATION NOT FOUND

"+ + "That I2P Destination was not found. "+ + "The host (or the outproxy, if you're using one) could also "+ + "be temporarily offline. You may want to retry. "+ + "Could not find the following Destination:

") + .getBytes(); + + private final static byte[] ERR_NO_OUTPROXY = + ("HTTP/1.1 503 Service Unavailable\r\n"+ + "Content-Type: text/html; charset=iso-8859-1\r\n"+ + "Cache-control: no-cache\r\n"+ + "\r\n"+ + "

I2P ERROR: No outproxy found

"+ + "Your request was for a site outside of I2P, but you have no "+ + "HTTP outproxy configured. Please configure an outproxy in I2PTunnel") + .getBytes(); + + private final static byte[] ERR_BAD_PROTOCOL = + ("HTTP/1.1 405 Bad Method\r\n"+ + "Content-Type: text/html; charset=iso-8859-1\r\n"+ + "Cache-control: no-cache\r\n"+ + "\r\n"+ + "

I2P ERROR: METHOD NOT ALLOWED

"+ + "The request uses a bad protocol. "+ + "The Connect Proxy supports CONNECT requests ONLY. Other methods such as GET are not allowed - Maybe you wanted the HTTP Proxy?.
") + .getBytes(); + + private final static byte[] ERR_LOCALHOST = + ("HTTP/1.1 403 Access Denied\r\n"+ + "Content-Type: text/html; charset=iso-8859-1\r\n"+ + "Cache-control: no-cache\r\n"+ + "\r\n"+ + "

I2P ERROR: REQUEST DENIED

"+ + "Your browser is misconfigured. Do not use the proxy to access the router console or other localhost destinations.
") + .getBytes(); + + private final static byte[] SUCCESS_RESPONSE = + ("HTTP/1.1 200 Connection Established\r\n"+ + "Proxy-agent: I2P\r\n"+ + "\r\n") + .getBytes(); + + /** used to assign unique IDs to the threads / clients. no logic or functionality */ + private static volatile long __clientId = 0; + + /** + * @throws IllegalArgumentException if the I2PTunnel does not contain + * valid config to contact the router + */ + public I2PTunnelConnectClient(int localPort, Logging l, boolean ownDest, + String wwwProxy, EventDispatcher notifyThis, + I2PTunnel tunnel) throws IllegalArgumentException { + super(localPort, ownDest, l, notifyThis, "HTTPHandler " + (++__clientId), tunnel); + + if (waitEventValue("openBaseClientResult").equals("error")) { + notifyEvent("openConnectClientResult", "error"); + return; + } + + _proxyList = new ArrayList(); + if (wwwProxy != null) { + StringTokenizer tok = new StringTokenizer(wwwProxy, ","); + while (tok.hasMoreTokens()) + _proxyList.add(tok.nextToken().trim()); + } + + setName(getLocalPort() + " -> ConnectClient [Outproxy list: " + wwwProxy + "]"); + + startRunning(); + } + + private String getPrefix(long requestId) { return "Client[" + _clientId + "/" + requestId + "]: "; } + + private String selectProxy() { + synchronized (_proxyList) { + int size = _proxyList.size(); + if (size <= 0) + return null; + int index = I2PAppContext.getGlobalContext().random().nextInt(size); + return _proxyList.get(index); + } + } + + private static final int DEFAULT_READ_TIMEOUT = 60*1000; + + /** + * create the default options (using the default timeout, etc) + * + */ + protected I2PSocketOptions getDefaultOptions() { + Properties defaultOpts = getTunnel().getClientOptions(); + if (!defaultOpts.contains(I2PSocketOptions.PROP_READ_TIMEOUT)) + defaultOpts.setProperty(I2PSocketOptions.PROP_READ_TIMEOUT, ""+DEFAULT_READ_TIMEOUT); + if (!defaultOpts.contains("i2p.streaming.inactivityTimeout")) + defaultOpts.setProperty("i2p.streaming.inactivityTimeout", ""+DEFAULT_READ_TIMEOUT); + I2PSocketOptions opts = sockMgr.buildOptions(defaultOpts); + if (!defaultOpts.containsKey(I2PSocketOptions.PROP_CONNECT_TIMEOUT)) + opts.setConnectTimeout(DEFAULT_CONNECT_TIMEOUT); + return opts; + } + + private static long __requestId = 0; + protected void clientConnectionRun(Socket s) { + InputStream in = null; + OutputStream out = null; + String targetRequest = null; + boolean usingWWWProxy = false; + String currentProxy = null; + long requestId = ++__requestId; + try { + out = s.getOutputStream(); + in = s.getInputStream(); + String line, method = null, host = null, destination = null, restofline = null; + StringBuffer newRequest = new StringBuffer(); + int ahelper = 0; + while (true) { + // Use this rather than BufferedReader because we can't have readahead, + // since we are passing the stream on to I2PTunnelRunner + line = DataHelper.readLine(in); + line = line.trim(); + if (_log.shouldLog(Log.DEBUG)) + _log.debug(getPrefix(requestId) + "Line=[" + line + "]"); + + if (method == null) { // first line CONNECT blah.i2p:80 HTTP/1.1 + int pos = line.indexOf(" "); + if (pos == -1) break; // empty first line + method = line.substring(0, pos); + String request = line.substring(pos + 1); + + pos = request.indexOf(":"); + if (pos == -1) + pos = request.indexOf(" "); + if (pos == -1) { + host = request; + restofline = ""; + } else { + host = request.substring(0, pos); + restofline = request.substring(pos); // ":80 HTTP/1.1" or " HTTP/1.1" + } + + if (host.toLowerCase().endsWith(".i2p")) { + // Destination gets the host name + destination = host; + } else if (host.indexOf(".") != -1) { + // The request must be forwarded to a outproxy + currentProxy = selectProxy(); + if (currentProxy == null) { + if (_log.shouldLog(Log.WARN)) + _log.warn(getPrefix(requestId) + "Host wants to be outproxied, but we dont have any!"); + writeErrorMessage(ERR_NO_OUTPROXY, out); + s.close(); + return; + } + destination = currentProxy; + usingWWWProxy = true; + newRequest.append("CONNECT ").append(host).append(restofline).append("\r\n\r\n"); // HTTP spec + } else if (host.toLowerCase().equals("localhost")) { + writeErrorMessage(ERR_LOCALHOST, out); + s.close(); + return; + } else { // full b64 address (hopefully) + destination = host; + } + targetRequest = host; + + if (_log.shouldLog(Log.DEBUG)) { + _log.debug(getPrefix(requestId) + "METHOD:" + method + ":"); + _log.debug(getPrefix(requestId) + "HOST :" + host + ":"); + _log.debug(getPrefix(requestId) + "REST :" + restofline + ":"); + _log.debug(getPrefix(requestId) + "DEST :" + destination + ":"); + } + + } else if (line.length() > 0) { + // Additional lines - shouldn't be too many. Firefox sends: + // User-Agent: blabla + // Proxy-Connection: keep-alive + // Host: blabla.i2p + // + // We could send these (filtered like in HTTPClient) on to the outproxy, + // but for now just chomp them all. + line = null; + } else { + // do it + break; + } + } + + if (destination == null || !"CONNECT".equalsIgnoreCase(method)) { + writeErrorMessage(ERR_BAD_PROTOCOL, out); + s.close(); + return; + } + + Destination dest = I2PTunnel.destFromName(destination); + if (dest == null) { + String str; + byte[] header; + if (usingWWWProxy) + str = FileUtil.readTextFile("docs/dnfp-header.ht", 100, true); + else + str = FileUtil.readTextFile("docs/dnfh-header.ht", 100, true); + if (str != null) + header = str.getBytes(); + else + header = ERR_DESTINATION_UNKNOWN; + writeErrorMessage(header, out, targetRequest, usingWWWProxy, destination); + s.close(); + return; + } + + I2PSocket i2ps = createI2PSocket(dest, getDefaultOptions()); + byte[] data = null; + byte[] response = null; + if (usingWWWProxy) + data = newRequest.toString().getBytes("ISO-8859-1"); + else + response = SUCCESS_RESPONSE; + Runnable onTimeout = new OnTimeout(s, s.getOutputStream(), targetRequest, usingWWWProxy, currentProxy, requestId); + I2PTunnelRunner runner = new I2PTunnelRunner(s, i2ps, sockLock, data, response, mySockets, onTimeout); + } catch (SocketException ex) { + _log.info(getPrefix(requestId) + "Error trying to connect", ex); + handleConnectClientException(ex, out, targetRequest, usingWWWProxy, currentProxy, requestId); + closeSocket(s); + } catch (IOException ex) { + _log.info(getPrefix(requestId) + "Error trying to connect", ex); + handleConnectClientException(ex, out, targetRequest, usingWWWProxy, currentProxy, requestId); + closeSocket(s); + } catch (I2PException ex) { + _log.info("getPrefix(requestId) + Error trying to connect", ex); + handleConnectClientException(ex, out, targetRequest, usingWWWProxy, currentProxy, requestId); + closeSocket(s); + } catch (OutOfMemoryError oom) { + IOException ex = new IOException("OOM"); + _log.info("getPrefix(requestId) + Error trying to connect", ex); + handleConnectClientException(ex, out, targetRequest, usingWWWProxy, currentProxy, requestId); + closeSocket(s); + } + } + + private static class OnTimeout implements Runnable { + private Socket _socket; + private OutputStream _out; + private String _target; + private boolean _usingProxy; + private String _wwwProxy; + private long _requestId; + public OnTimeout(Socket s, OutputStream out, String target, boolean usingProxy, String wwwProxy, long id) { + _socket = s; + _out = out; + _target = target; + _usingProxy = usingProxy; + _wwwProxy = wwwProxy; + _requestId = id; + } + public void run() { + if (_log.shouldLog(Log.DEBUG)) + _log.debug("Timeout occured requesting " + _target); + handleConnectClientException(new RuntimeException("Timeout"), _out, + _target, _usingProxy, _wwwProxy, _requestId); + closeSocket(_socket); + } + } + + private static void writeErrorMessage(byte[] errMessage, OutputStream out) throws IOException { + if (out == null) + return; + out.write(errMessage); + out.write("\n\n".getBytes()); + out.flush(); + } + + private static void writeErrorMessage(byte[] errMessage, OutputStream out, String targetRequest, + boolean usingWWWProxy, String wwwProxy) throws IOException { + if (out != null) { + out.write(errMessage); + if (targetRequest != null) { + out.write(targetRequest.getBytes()); + if (usingWWWProxy) + out.write(("
WWW proxy: " + wwwProxy).getBytes()); + } + out.write("
".getBytes()); + out.write("\n\n".getBytes()); + out.flush(); + } + } + + private static void handleConnectClientException(Exception ex, OutputStream out, String targetRequest, + boolean usingWWWProxy, String wwwProxy, long requestId) { + if (out == null) + return; + try { + String str; + byte[] header; + if (usingWWWProxy) + str = FileUtil.readTextFile("docs/dnfp-header.ht", 100, true); + else + str = FileUtil.readTextFile("docs/dnf-header.ht", 100, true); + if (str != null) + header = str.getBytes(); + else + header = ERR_DESTINATION_UNKNOWN; + writeErrorMessage(header, out, targetRequest, usingWWWProxy, wwwProxy); + } catch (IOException ioe) {} + } +} diff --git a/apps/i2ptunnel/java/src/net/i2p/i2ptunnel/I2PTunnelHTTPClient.java b/apps/i2ptunnel/java/src/net/i2p/i2ptunnel/I2PTunnelHTTPClient.java index a7c76e2f3..8e5ef9f4d 100644 --- a/apps/i2ptunnel/java/src/net/i2p/i2ptunnel/I2PTunnelHTTPClient.java +++ b/apps/i2ptunnel/java/src/net/i2p/i2ptunnel/I2PTunnelHTTPClient.java @@ -5,6 +5,7 @@ package net.i2p.i2ptunnel; import java.io.BufferedReader; import java.io.IOException; +import java.io.InputStream; import java.io.InputStreamReader; import java.io.OutputStream; import java.net.Socket; @@ -21,6 +22,7 @@ import net.i2p.I2PException; import net.i2p.client.streaming.I2PSocket; import net.i2p.client.streaming.I2PSocketOptions; import net.i2p.data.DataFormatException; +import net.i2p.data.DataHelper; import net.i2p.data.Destination; import net.i2p.util.EventDispatcher; import net.i2p.util.FileUtil; @@ -131,16 +133,6 @@ public class I2PTunnelHTTPClient extends I2PTunnelClientBase implements Runnable "Your browser is misconfigured. Do not use the proxy to access the router console or other localhost destinations.
") .getBytes(); - private final static int MAX_POSTBYTES = 20*1024*1024; // arbitrary but huge - all in memory, no temp file - private final static byte[] ERR_MAXPOST = - ("HTTP/1.1 503 Bad POST\r\n"+ - "Content-Type: text/html; charset=iso-8859-1\r\n"+ - "Cache-control: no-cache\r\n"+ - "\r\n"+ - "

I2P ERROR: REQUEST DENIED

"+ - "The maximum POST size is " + MAX_POSTBYTES + " bytes.
") - .getBytes(); - /** used to assign unique IDs to the threads / clients. no logic or functionality */ private static volatile long __clientId = 0; @@ -193,7 +185,7 @@ public class I2PTunnelHTTPClient extends I2PTunnelClientBase implements Runnable /** * create the default options (using the default timeout, etc) - * + * unused? */ protected I2PSocketOptions getDefaultOptions() { Properties defaultOpts = getTunnel().getClientOptions(); @@ -218,6 +210,9 @@ public class I2PTunnelHTTPClient extends I2PTunnelClientBase implements Runnable defaultOpts.setProperty(I2PSocketOptions.PROP_READ_TIMEOUT, ""+DEFAULT_READ_TIMEOUT); if (!defaultOpts.contains("i2p.streaming.inactivityTimeout")) defaultOpts.setProperty("i2p.streaming.inactivityTimeout", ""+DEFAULT_READ_TIMEOUT); + // delayed start + if (sockMgr == null) + sockMgr = getSocketManager(); I2PSocketOptions opts = sockMgr.buildOptions(defaultOpts); if (!defaultOpts.containsKey(I2PSocketOptions.PROP_CONNECT_TIMEOUT)) opts.setConnectTimeout(DEFAULT_CONNECT_TIMEOUT); @@ -232,6 +227,7 @@ public class I2PTunnelHTTPClient extends I2PTunnelClientBase implements Runnable private static long __requestId = 0; protected void clientConnectionRun(Socket s) { + InputStream in = null; OutputStream out = null; String targetRequest = null; boolean usingWWWProxy = false; @@ -239,11 +235,12 @@ public class I2PTunnelHTTPClient extends I2PTunnelClientBase implements Runnable long requestId = ++__requestId; try { out = s.getOutputStream(); - BufferedReader br = new BufferedReader(new InputStreamReader(s.getInputStream(), "ISO-8859-1")); + InputReader reader = new InputReader(s.getInputStream()); String line, method = null, protocol = null, host = null, destination = null; StringBuffer newRequest = new StringBuffer(); int ahelper = 0; - while ((line = br.readLine()) != null) { + while ((line = reader.readLine(method)) != null) { + line = line.trim(); if (_log.shouldLog(Log.DEBUG)) _log.debug(getPrefix(requestId) + "Line=[" + line + "]"); @@ -257,7 +254,6 @@ public class I2PTunnelHTTPClient extends I2PTunnelClientBase implements Runnable if (_log.shouldLog(Log.DEBUG)) _log.debug(getPrefix(requestId) + "Method is null for [" + line + "]"); - line = line.trim(); int pos = line.indexOf(" "); if (pos == -1) break; method = line.substring(0, pos); @@ -470,7 +466,7 @@ public class I2PTunnelHTTPClient extends I2PTunnelClientBase implements Runnable if (_log.shouldLog(Log.INFO)) _log.info(getPrefix(requestId) + "Setting host = " + host); } else if (lowercaseLine.startsWith("user-agent: ") && - !Boolean.valueOf(getTunnel().getContext().getProperty(PROP_USER_AGENT)).booleanValue()) { + !Boolean.valueOf(getTunnel().getClientOptions().getProperty(PROP_USER_AGENT)).booleanValue()) { line = null; continue; } else if (lowercaseLine.startsWith("accept")) { @@ -479,13 +475,13 @@ public class I2PTunnelHTTPClient extends I2PTunnelClientBase implements Runnable line = null; continue; } else if (lowercaseLine.startsWith("referer: ") && - !Boolean.valueOf(getTunnel().getContext().getProperty(PROP_REFERER)).booleanValue()) { + !Boolean.valueOf(getTunnel().getClientOptions().getProperty(PROP_REFERER)).booleanValue()) { // Shouldn't we be more specific, like accepting in-site referers ? //line = "Referer: i2p"; line = null; continue; // completely strip the line } else if (lowercaseLine.startsWith("via: ") && - !Boolean.valueOf(getTunnel().getContext().getProperty(PROP_VIA)).booleanValue()) { + !Boolean.valueOf(getTunnel().getClientOptions().getProperty(PROP_VIA)).booleanValue()) { //line = "Via: i2p"; line = null; continue; // completely strip the line @@ -498,7 +494,7 @@ public class I2PTunnelHTTPClient extends I2PTunnelClientBase implements Runnable if (line.length() == 0) { - String ok = getTunnel().getContext().getProperty("i2ptunnel.gzip"); + String ok = getTunnel().getClientOptions().getProperty("i2ptunnel.gzip"); boolean gzip = DEFAULT_GZIP; if (ok != null) gzip = Boolean.valueOf(ok).booleanValue(); @@ -509,35 +505,16 @@ public class I2PTunnelHTTPClient extends I2PTunnelClientBase implements Runnable newRequest.append("Accept-Encoding: \r\n"); newRequest.append("X-Accept-Encoding: x-i2p-gzip;q=1.0, identity;q=0.5, deflate;q=0, gzip;q=0, *;q=0\r\n"); } - if (!Boolean.valueOf(getTunnel().getContext().getProperty(PROP_USER_AGENT)).booleanValue()) + if (!Boolean.valueOf(getTunnel().getClientOptions().getProperty(PROP_USER_AGENT)).booleanValue()) newRequest.append("User-Agent: MYOB/6.66 (AN/ON)\r\n"); newRequest.append("Connection: close\r\n\r\n"); break; } else { - newRequest.append(line.trim()).append("\r\n"); // HTTP spec + newRequest.append(line).append("\r\n"); // HTTP spec } } if (_log.shouldLog(Log.DEBUG)) _log.debug(getPrefix(requestId) + "NewRequest header: [" + newRequest.toString() + "]"); - - int postbytes = 0; - while (br.ready()) { // empty the buffer (POST requests) - int i = br.read(); - if (i != -1) { - newRequest.append((char) i); - if (++postbytes > MAX_POSTBYTES) { - if (out != null) { - out.write(ERR_MAXPOST); - out.write("

Generated on: ".getBytes()); - out.write(new Date().toString().getBytes()); - out.write("\n".getBytes()); - out.flush(); - } - s.close(); - return; - } - } - } if (method == null || destination == null) { l.log("No HTTP method found in the request."); @@ -560,7 +537,7 @@ public class I2PTunnelHTTPClient extends I2PTunnelClientBase implements Runnable Destination dest = I2PTunnel.destFromName(destination); if (dest == null) { - l.log("Could not resolve " + destination + "."); + //l.log("Could not resolve " + destination + "."); if (_log.shouldLog(Log.WARN)) _log.warn("Unable to resolve " + destination + " (proxy? " + usingWWWProxy + ", request: " + targetRequest); String str; @@ -570,6 +547,8 @@ public class I2PTunnelHTTPClient extends I2PTunnelClientBase implements Runnable str = FileUtil.readTextFile("docs/dnfp-header.ht", 100, true); else if(ahelper != 0) str = FileUtil.readTextFile("docs/dnfb-header.ht", 100, true); + else if (destination.length() == 60 && destination.endsWith(".b32.i2p")) + str = FileUtil.readTextFile("docs/dnf-header.ht", 100, true); else { str = FileUtil.readTextFile("docs/dnfh-header.ht", 100, true); showAddrHelper = true; @@ -608,8 +587,8 @@ public class I2PTunnelHTTPClient extends I2PTunnelClientBase implements Runnable l.log(ex.getMessage()); handleHTTPClientException(ex, out, targetRequest, usingWWWProxy, currentProxy, requestId); closeSocket(s); - } catch (OutOfMemoryError oom) { // mainly for huge POSTs - IOException ex = new IOException("OOM (in POST?)"); + } catch (OutOfMemoryError oom) { + IOException ex = new IOException("OOM"); _log.info("getPrefix(requestId) + Error trying to connect", ex); l.log(ex.getMessage()); handleHTTPClientException(ex, out, targetRequest, usingWWWProxy, currentProxy, requestId); @@ -617,6 +596,29 @@ public class I2PTunnelHTTPClient extends I2PTunnelClientBase implements Runnable } } + /** + * Read the first line unbuffered. + * After that, switch to a BufferedReader, unless the method is "POST". + * We can't use BufferedReader for POST because we can't have readahead, + * since we are passing the stream on to I2PTunnelRunner for the POST data. + * + */ + private static class InputReader { + BufferedReader _br; + InputStream _s; + public InputReader(InputStream s) { + _br = null; + _s = s; + } + String readLine(String method) throws IOException { + if (method == null || "POST".equals(method)) + return DataHelper.readLine(_s); + if (_br == null) + _br = new BufferedReader(new InputStreamReader(_s, "ISO-8859-1")); + return _br.readLine(); + } + } + private final static String getHostName(String host) { if (host == null) return null; try { @@ -628,7 +630,7 @@ public class I2PTunnelHTTPClient extends I2PTunnelClientBase implements Runnable } } - private class OnTimeout implements Runnable { + private static class OnTimeout implements Runnable { private Socket _socket; private OutputStream _out; private String _target; @@ -706,11 +708,12 @@ public class I2PTunnelHTTPClient extends I2PTunnelClientBase implements Runnable } } - private void handleHTTPClientException(Exception ex, OutputStream out, String targetRequest, + private static void handleHTTPClientException(Exception ex, OutputStream out, String targetRequest, boolean usingWWWProxy, String wwwProxy, long requestId) { - if (_log.shouldLog(Log.WARN)) - _log.warn(getPrefix(requestId) + "Error sending to " + wwwProxy + " (proxy? " + usingWWWProxy + ", request: " + targetRequest, ex); + // static + //if (_log.shouldLog(Log.WARN)) + // _log.warn(getPrefix(requestId) + "Error sending to " + wwwProxy + " (proxy? " + usingWWWProxy + ", request: " + targetRequest, ex); if (out != null) { try { String str; @@ -725,16 +728,18 @@ public class I2PTunnelHTTPClient extends I2PTunnelClientBase implements Runnable header = ERR_DESTINATION_UNKNOWN; writeErrorMessage(header, out, targetRequest, usingWWWProxy, wwwProxy, false); } catch (IOException ioe) { - _log.warn(getPrefix(requestId) + "Error writing out the 'destination was unknown' " + "message", ioe); + // static + //_log.warn(getPrefix(requestId) + "Error writing out the 'destination was unknown' " + "message", ioe); } } else { - _log.warn(getPrefix(requestId) + "Client disconnected before we could say that destination " + "was unknown", ex); + // static + //_log.warn(getPrefix(requestId) + "Client disconnected before we could say that destination " + "was unknown", ex); } } private final static String SUPPORTED_HOSTS[] = { "i2p", "www.i2p.com", "i2p."}; - private boolean isSupportedAddress(String host, String protocol) { + private static boolean isSupportedAddress(String host, String protocol) { if ((host == null) || (protocol == null)) return false; boolean found = false; String lcHost = host.toLowerCase(); diff --git a/apps/i2ptunnel/java/src/net/i2p/i2ptunnel/I2PTunnelHTTPServer.java b/apps/i2ptunnel/java/src/net/i2p/i2ptunnel/I2PTunnelHTTPServer.java index 658dd5e32..d6cb40a25 100644 --- a/apps/i2ptunnel/java/src/net/i2p/i2ptunnel/I2PTunnelHTTPServer.java +++ b/apps/i2ptunnel/java/src/net/i2p/i2ptunnel/I2PTunnelHTTPServer.java @@ -124,8 +124,17 @@ public class I2PTunnelHTTPServer extends I2PTunnelServer { _log.error("Error while closing the received i2p con", ex); } } catch (IOException ex) { + try { + socket.close(); + } catch (IOException ioe) {} if (_log.shouldLog(Log.WARN)) _log.warn("Error while receiving the new HTTP request", ex); + } catch (OutOfMemoryError oom) { + try { + socket.close(); + } catch (IOException ioe) {} + if (_log.shouldLog(Log.ERROR)) + _log.error("OOM in HTTP server", oom); } long afterHandle = getTunnel().getContext().clock().now(); @@ -162,7 +171,24 @@ public class I2PTunnelHTTPServer extends I2PTunnelServer { sender.start(); browserout = _browser.getOutputStream(); - serverin = _webserver.getInputStream(); + // NPE seen here in 0.7-7, caused by addition of socket.close() in the + // catch (IOException ioe) block above in blockingHandle() ??? + // CRIT [ad-130280.hc] net.i2p.util.I2PThread : Killing thread Thread-130280.hc + // java.lang.NullPointerException + // at java.io.FileInputStream.(FileInputStream.java:131) + // at java.net.SocketInputStream.(SocketInputStream.java:44) + // at java.net.PlainSocketImpl.getInputStream(PlainSocketImpl.java:401) + // at java.net.Socket$2.run(Socket.java:779) + // at java.security.AccessController.doPrivileged(Native Method) + // at java.net.Socket.getInputStream(Socket.java:776) + // at net.i2p.i2ptunnel.I2PTunnelHTTPServer$CompressedRequestor.run(I2PTunnelHTTPServer.java:174) + // at java.lang.Thread.run(Thread.java:619) + // at net.i2p.util.I2PThread.run(I2PThread.java:71) + try { + serverin = _webserver.getInputStream(); + } catch (NullPointerException npe) { + throw new IOException("getInputStream NPE"); + } CompressedResponseOutputStream compressedOut = new CompressedResponseOutputStream(browserout); Sender s = new Sender(compressedOut, serverin, "server: server to browser"); if (_log.shouldLog(Log.INFO)) diff --git a/apps/i2ptunnel/java/src/net/i2p/i2ptunnel/I2PTunnelIRCClient.java b/apps/i2ptunnel/java/src/net/i2p/i2ptunnel/I2PTunnelIRCClient.java index e6708aa21..732c222a7 100644 --- a/apps/i2ptunnel/java/src/net/i2p/i2ptunnel/I2PTunnelIRCClient.java +++ b/apps/i2ptunnel/java/src/net/i2p/i2ptunnel/I2PTunnelIRCClient.java @@ -39,12 +39,12 @@ public class I2PTunnelIRCClient extends I2PTunnelClientBase implements Runnable Logging l, boolean ownDest, EventDispatcher notifyThis, - I2PTunnel tunnel) throws IllegalArgumentException { + I2PTunnel tunnel, String pkf) throws IllegalArgumentException { super(localPort, ownDest, l, notifyThis, - "IRCHandler " + (++__clientId), tunnel); + "IRCHandler " + (++__clientId), tunnel, pkf); StringTokenizer tok = new StringTokenizer(destinations, ","); dests = new ArrayList(1); @@ -83,9 +83,9 @@ public class I2PTunnelIRCClient extends I2PTunnelClientBase implements Runnable i2ps = createI2PSocket(dest); i2ps.setReadTimeout(readTimeout); StringBuffer expectedPong = new StringBuffer(); - Thread in = new I2PThread(new IrcInboundFilter(s,i2ps, expectedPong)); + Thread in = new I2PThread(new IrcInboundFilter(s,i2ps, expectedPong), "IRC Client " + __clientId + " in"); in.start(); - Thread out = new I2PThread(new IrcOutboundFilter(s,i2ps, expectedPong)); + Thread out = new I2PThread(new IrcOutboundFilter(s,i2ps, expectedPong), "IRC Client " + __clientId + " out"); out.start(); } catch (Exception ex) { if (_log.shouldLog(Log.ERROR)) diff --git a/apps/i2ptunnel/java/src/net/i2p/i2ptunnel/I2PTunnelIRCServer.java b/apps/i2ptunnel/java/src/net/i2p/i2ptunnel/I2PTunnelIRCServer.java new file mode 100644 index 000000000..7e12aa30a --- /dev/null +++ b/apps/i2ptunnel/java/src/net/i2p/i2ptunnel/I2PTunnelIRCServer.java @@ -0,0 +1,191 @@ +package net.i2p.i2ptunnel; + +import java.io.File; +import java.io.IOException; +import java.io.InputStream; +import java.io.OutputStream; +import java.net.InetAddress; +import java.net.Socket; +import java.net.SocketException; +import java.util.Properties; + +import net.i2p.I2PAppContext; +import net.i2p.client.streaming.I2PSocket; +import net.i2p.crypto.SHA256Generator; +import net.i2p.data.DataFormatException; +import net.i2p.data.DataHelper; +import net.i2p.data.Destination; +import net.i2p.data.Hash; +import net.i2p.data.Base32; +import net.i2p.util.EventDispatcher; +import net.i2p.util.I2PThread; +import net.i2p.util.Log; + +/** + * Simple extension to the I2PTunnelServer that filters the registration + * sequence to pass the destination hash of the client through as the hostname, + * so an IRC Server may track users across nick changes. + * + * Of course, this requires the ircd actually use the hostname sent by + * the client rather than the IP. It is common for ircds to ignore the + * hostname in the USER message (unless it's coming from another server) + * since it is easily spoofed. So you have to fix or, if you are lucky, + * configure your ircd first. At least in unrealircd and ngircd this is + * not configurable. + * + * There are three options for mangling the desthash. Put the option in the + * "custom options" section of i2ptunnel. + * - ircserver.cloakKey unset: Cloak with a random value that is persistent for + * the life of this tunnel. This is the default. + * - ircserver.cloakKey=somepassphrase: Cloak with the hash of the passphrase. Use this to + * have consistent mangling across restarts, or to + * have multiple IRC servers cloak consistently to + * be able to track users even when they switch servers. + * Note: don't quote or put spaces in the passphrase, + * the i2ptunnel gui can't handle it. + * - ircserver.fakeHostname=%f.b32.i2p: Set the fake hostname sent by I2PTunnel, + * %f is the full B32 destination hash + * %c is the cloaked hash. + * + * There is no outbound filtering. + * + * @author zzz + */ +public class I2PTunnelIRCServer extends I2PTunnelServer implements Runnable { + public static final String PROP_CLOAK="ircserver.cloakKey"; + public static final String PROP_HOSTNAME="ircserver.fakeHostname"; + public static final String PROP_HOSTNAME_DEFAULT="%f.b32.i2p"; + + private static final Log _log = new Log(I2PTunnelIRCServer.class); + + + /** + * @throws IllegalArgumentException if the I2PTunnel does not contain + * valid config to contact the router + */ + + public I2PTunnelIRCServer(InetAddress host, int port, File privkey, String privkeyname, Logging l, EventDispatcher notifyThis, I2PTunnel tunnel) { + super(host, port, privkey, privkeyname, l, notifyThis, tunnel); + initCloak(tunnel); + } + + /** generate a random 32 bytes, or the hash of the passphrase */ + private void initCloak(I2PTunnel tunnel) { + Properties opts = tunnel.getClientOptions(); + String passphrase = opts.getProperty(PROP_CLOAK); + if (passphrase == null) { + this.cloakKey = new byte[Hash.HASH_LENGTH]; + tunnel.getContext().random().nextBytes(this.cloakKey); + } else { + this.cloakKey = SHA256Generator.getInstance().calculateHash(passphrase.trim().getBytes()).getData(); + } + + this.hostname = opts.getProperty(PROP_HOSTNAME, PROP_HOSTNAME_DEFAULT); + } + + protected void blockingHandle(I2PSocket socket) { + try { + // give them 15 seconds to send in the request + socket.setReadTimeout(15*1000); + InputStream in = socket.getInputStream(); + String modifiedRegistration = filterRegistration(in, cloakDest(socket.getPeerDestination())); + socket.setReadTimeout(readTimeout); + Socket s = new Socket(remoteHost, remotePort); + new I2PTunnelRunner(s, socket, slock, null, modifiedRegistration.getBytes(), null); + } catch (SocketException ex) { + try { + socket.close(); + } catch (IOException ioe) { + if (_log.shouldLog(Log.ERROR)) + _log.error("Error while closing the received i2p con", ex); + } + } catch (IOException ex) { + try { + socket.close(); + } catch (IOException ioe) {} + if (_log.shouldLog(Log.WARN)) + _log.warn("Error while receiving the new IRC Connection", ex); + } catch (OutOfMemoryError oom) { + try { + socket.close(); + } catch (IOException ioe) {} + if (_log.shouldLog(Log.ERROR)) + _log.error("OOM in IRC server", oom); + } + } + + /** + * (Optionally) append 32 bytes of crap to the destination then return + * the first few characters of the hash of the whole thing, + ".i2p". + * Or do we want the full hash if the ircd is going to use this for + * nickserv auto-login? Or even Base32 if it will be used in a + * case-insensitive manner? + * + */ + String cloakDest(Destination d) { + String hf; + String hc; + + byte[] b = new byte[d.size() + this.cloakKey.length]; + System.arraycopy(b, 0, d.toByteArray(), 0, d.size()); + System.arraycopy(b, d.size(), this.cloakKey, 0, this.cloakKey.length); + hc = Base32.encode(SHA256Generator.getInstance().calculateHash(b).getData()); + + hf = Base32.encode(d.calculateHash().getData()); + + return this.hostname.replace("%f", hf).replace("%c", hc); + } + + /** keep reading until we see USER or SERVER */ + private String filterRegistration(InputStream in, String newHostname) throws IOException { + StringBuffer buf = new StringBuffer(128); + int lineCount = 0; + + while (true) { + String s = DataHelper.readLine(in); + if (s == null) + throw new IOException("EOF reached before the end of the headers [" + buf.toString() + "]"); + if (++lineCount > 10) + throw new IOException("Too many lines before USER or SERVER, giving up"); + s = s.trim(); + if (_log.shouldLog(Log.DEBUG)) + _log.debug("Got line: " + s); + + String field[]=s.split(" ",5); + String command; + int idx=0; + + if(field[0].charAt(0)==':') + idx++; + + try { + command = field[idx++]; + } catch (IndexOutOfBoundsException ioobe) { + // wtf, server sent borked command? + throw new IOException("Dropping defective message: index out of bounds while extracting command."); + } + + if ("USER".equalsIgnoreCase(command)) { + if (field.length < idx + 4) + throw new IOException("Too few parameters in USER message: " + s); + // USER zzz1 hostname localhost :zzz + // => + // USER zzz1 abcd1234.i2p localhost :zzz + // this whole class is for these two lines... + buf.append("USER ").append(field[idx]).append(' ').append(newHostname); + buf.append(' '); + buf.append(field[idx+2]).append(' ').append(field[idx+3]).append("\r\n"); + break; + } + buf.append(s).append("\r\n"); + if ("SERVER".equalsIgnoreCase(command)) + break; + } + if (_log.shouldLog(Log.DEBUG)) + _log.debug("All done, sending: " + buf.toString()); + return buf.toString(); + } + + private byte[] cloakKey; // 32 bytes of stuff to scramble the dest with + private String hostname; +} diff --git a/apps/i2ptunnel/java/src/net/i2p/i2ptunnel/TunnelController.java b/apps/i2ptunnel/java/src/net/i2p/i2ptunnel/TunnelController.java index 614bc1f74..419e5a899 100644 --- a/apps/i2ptunnel/java/src/net/i2p/i2ptunnel/TunnelController.java +++ b/apps/i2ptunnel/java/src/net/i2p/i2ptunnel/TunnelController.java @@ -14,6 +14,7 @@ import net.i2p.I2PException; import net.i2p.client.I2PClient; import net.i2p.client.I2PClientFactory; import net.i2p.client.I2PSession; +import net.i2p.data.Base32; import net.i2p.data.Destination; import net.i2p.util.I2PThread; import net.i2p.util.Log; @@ -57,7 +58,7 @@ public class TunnelController implements Logging { setConfig(config, prefix); _messages = new ArrayList(4); _running = false; - if (createKey && ("server".equals(getType()) || "httpserver".equals(getType())) ) + if (createKey && (getType().endsWith("server") || getPersistentClientKey())) createPrivateKey(); _starting = getStartOnLoad(); } @@ -72,7 +73,7 @@ public class TunnelController implements Logging { File keyFile = new File(getPrivKeyFile()); if (keyFile.exists()) { - log("Not overwriting existing private keys in " + keyFile.getAbsolutePath()); + //log("Not overwriting existing private keys in " + keyFile.getAbsolutePath()); return; } else { File parent = keyFile.getParentFile(); @@ -86,6 +87,7 @@ public class TunnelController implements Logging { String destStr = dest.toBase64(); log("Private key created and saved in " + keyFile.getAbsolutePath()); log("New destination: " + destStr); + log("Base32: " + Base32.encode(dest.calculateHash().getData()) + ".b32.i2p"); } catch (I2PException ie) { if (_log.shouldLog(Log.ERROR)) _log.error("Error creating new destination", ie); @@ -132,25 +134,38 @@ public class TunnelController implements Logging { _log.warn("Cannot start the tunnel - no type specified"); return; } + setI2CPOptions(); + setSessionOptions(); if ("httpclient".equals(type)) { startHttpClient(); - }else if("ircclient".equals(type)) { + } else if("ircclient".equals(type)) { startIrcClient(); + } else if("sockstunnel".equals(type)) { + startSocksClient(); + } else if("connectclient".equals(type)) { + startConnectClient(); } else if ("client".equals(type)) { startClient(); + } else if ("streamrclient".equals(type)) { + startStreamrClient(); } else if ("server".equals(type)) { startServer(); } else if ("httpserver".equals(type)) { startHttpServer(); + } else if ("ircserver".equals(type)) { + startIrcServer(); + } else if ("streamrserver".equals(type)) { + startStreamrServer(); } else { if (_log.shouldLog(Log.ERROR)) _log.error("Cannot start tunnel - unknown type [" + type + "]"); + return; } + acquire(); + _running = true; } private void startHttpClient() { - setI2CPOptions(); - setSessionOptions(); setListenOn(); String listenPort = getListenPort(); String proxyList = getProxyList(); @@ -159,20 +174,62 @@ public class TunnelController implements Logging { _tunnel.runHttpClient(new String[] { listenPort, sharedClient }, this); else _tunnel.runHttpClient(new String[] { listenPort, sharedClient, proxyList }, this); - acquire(); - _running = true; + } + + private void startConnectClient() { + setListenOn(); + String listenPort = getListenPort(); + String proxyList = getProxyList(); + String sharedClient = getSharedClient(); + if (proxyList == null) + _tunnel.runConnectClient(new String[] { listenPort, sharedClient }, this); + else + _tunnel.runConnectClient(new String[] { listenPort, sharedClient, proxyList }, this); } private void startIrcClient() { - setI2CPOptions(); - setSessionOptions(); setListenOn(); String listenPort = getListenPort(); String dest = getTargetDestination(); String sharedClient = getSharedClient(); - _tunnel.runIrcClient(new String[] { listenPort, dest, sharedClient }, this); - acquire(); - _running = true; + if (getPersistentClientKey()) { + String privKeyFile = getPrivKeyFile(); + _tunnel.runIrcClient(new String[] { listenPort, dest, sharedClient, privKeyFile }, this); + } else { + _tunnel.runIrcClient(new String[] { listenPort, dest, sharedClient }, this); + } + } + + private void startSocksClient() { + setListenOn(); + String listenPort = getListenPort(); + String sharedClient = getSharedClient(); + _tunnel.runSOCKSTunnel(new String[] { listenPort, sharedClient }, this); + } + + /* + * Streamr client is a UDP server, use the listenPort field for targetPort + * and the listenOnInterface field for the targetHost + */ + private void startStreamrClient() { + String targetHost = getListenOnInterface(); + String targetPort = getListenPort(); + String dest = getTargetDestination(); + _tunnel.runStreamrClient(new String[] { targetHost, targetPort, dest }, this); + } + + /** + * Streamr server is a UDP client, use the targetPort field for listenPort + * and the targetHost field for the listenOnInterface + */ + private void startStreamrServer() { + String listenOn = getTargetHost(); + if ( (listenOn != null) && (listenOn.length() > 0) ) { + _tunnel.runListenOn(new String[] { listenOn }, this); + } + String listenPort = getTargetPort(); + String privKeyFile = getPrivKeyFile(); + _tunnel.runStreamrServer(new String[] { listenPort, privKeyFile }, this); } /** @@ -208,38 +265,38 @@ public class TunnelController implements Logging { } private void startClient() { - setI2CPOptions(); - setSessionOptions(); setListenOn(); String listenPort = getListenPort(); String dest = getTargetDestination(); String sharedClient = getSharedClient(); - _tunnel.runClient(new String[] { listenPort, dest, sharedClient }, this); - acquire(); - _running = true; + if (getPersistentClientKey()) { + String privKeyFile = getPrivKeyFile(); + _tunnel.runClient(new String[] { listenPort, dest, sharedClient, privKeyFile }, this); + } else { + _tunnel.runClient(new String[] { listenPort, dest, sharedClient }, this); + } } private void startServer() { - setI2CPOptions(); - setSessionOptions(); String targetHost = getTargetHost(); String targetPort = getTargetPort(); String privKeyFile = getPrivKeyFile(); _tunnel.runServer(new String[] { targetHost, targetPort, privKeyFile }, this); - acquire(); - _running = true; } private void startHttpServer() { - setI2CPOptions(); - setSessionOptions(); String targetHost = getTargetHost(); String targetPort = getTargetPort(); String spoofedHost = getSpoofedHost(); String privKeyFile = getPrivKeyFile(); _tunnel.runHttpServer(new String[] { targetHost, targetPort, spoofedHost, privKeyFile }, this); - acquire(); - _running = true; + } + + private void startIrcServer() { + String targetHost = getTargetHost(); + String targetPort = getTargetPort(); + String privKeyFile = getPrivKeyFile(); + _tunnel.runIrcServer(new String[] { targetHost, targetPort, privKeyFile }, this); } private void setListenOn() { @@ -348,6 +405,7 @@ public class TunnelController implements Logging { public String getProxyList() { return _config.getProperty("proxyList"); } public String getSharedClient() { return _config.getProperty("sharedClient", "true"); } public boolean getStartOnLoad() { return "true".equalsIgnoreCase(_config.getProperty("startOnLoad", "true")); } + public boolean getPersistentClientKey() { return Boolean.valueOf(_config.getProperty("option.persistentClientKey")).booleanValue(); } public String getMyDestination() { if (_tunnel != null) { List sessions = _tunnel.getSessions(); @@ -361,6 +419,19 @@ public class TunnelController implements Logging { return null; } + public String getMyDestHashBase32() { + if (_tunnel != null) { + List sessions = _tunnel.getSessions(); + for (int i = 0; i < sessions.size(); i++) { + I2PSession session = (I2PSession)sessions.get(i); + Destination dest = session.getMyDestination(); + if (dest != null) + return Base32.encode(dest.calculateHash().getData()); + } + } + return null; + } + public boolean getIsRunning() { return _running; } public boolean getIsStarting() { return _starting; } diff --git a/apps/i2ptunnel/java/src/net/i2p/i2ptunnel/socks/I2PSOCKSTunnel.java b/apps/i2ptunnel/java/src/net/i2p/i2ptunnel/socks/I2PSOCKSTunnel.java index 9b216e13a..be398f770 100644 --- a/apps/i2ptunnel/java/src/net/i2p/i2ptunnel/socks/I2PSOCKSTunnel.java +++ b/apps/i2ptunnel/java/src/net/i2p/i2ptunnel/socks/I2PSOCKSTunnel.java @@ -7,6 +7,12 @@ package net.i2p.i2ptunnel.socks; import java.net.Socket; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Properties; +import java.util.StringTokenizer; import net.i2p.client.streaming.I2PSocket; import net.i2p.data.Destination; @@ -20,7 +26,7 @@ import net.i2p.util.Log; public class I2PSOCKSTunnel extends I2PTunnelClientBase { private static final Log _log = new Log(I2PSOCKSTunnel.class); - + private HashMap> proxies = null; // port# + "" or "default" -> hostname list protected Destination outProxyDest = null; //public I2PSOCKSTunnel(int localPort, Logging l, boolean ownDest) { @@ -36,7 +42,7 @@ public class I2PSOCKSTunnel extends I2PTunnelClientBase { } setName(getLocalPort() + " -> SOCKSTunnel"); - + parseOptions(); startRunning(); notifyEvent("openSOCKSTunnelResult", "ok"); @@ -46,11 +52,49 @@ public class I2PSOCKSTunnel extends I2PTunnelClientBase { try { SOCKSServer serv = SOCKSServerFactory.createSOCKSServer(s); Socket clientSock = serv.getClientSocket(); - I2PSocket destSock = serv.getDestinationI2PSocket(); + I2PSocket destSock = serv.getDestinationI2PSocket(this); new I2PTunnelRunner(clientSock, destSock, sockLock, null, mySockets); } catch (SOCKSException e) { _log.error("Error from SOCKS connection: " + e.getMessage()); closeSocket(s); } } -} \ No newline at end of file + + private static final String PROP_PROXY = "i2ptunnel.socks.proxy."; + private void parseOptions() { + Properties opts = getTunnel().getClientOptions(); + proxies = new HashMap(0); + for (Map.Entry e : opts.entrySet()) { + String prop = (String)e.getKey(); + if ((!prop.startsWith(PROP_PROXY)) || prop.length() <= PROP_PROXY.length()) + continue; + String port = prop.substring(PROP_PROXY.length()); + List proxyList = new ArrayList(1); + StringTokenizer tok = new StringTokenizer((String)e.getValue(), ", \t"); + while (tok.hasMoreTokens()) { + String proxy = tok.nextToken().trim(); + if (proxy.endsWith(".i2p")) + proxyList.add(proxy); + else + _log.error("Non-i2p SOCKS outproxy: " + proxy); + } + proxies.put(port, proxyList); + } + } + + public HashMap> getProxyMap() { + return proxies; + } + + public List getProxies(int port) { + List rv = proxies.get(port + ""); + if (rv == null) + rv = getDefaultProxies(); + return rv; + } + + public List getDefaultProxies() { + return proxies.get("default"); + } + +} diff --git a/apps/i2ptunnel/java/src/net/i2p/i2ptunnel/socks/MultiSink.java b/apps/i2ptunnel/java/src/net/i2p/i2ptunnel/socks/MultiSink.java new file mode 100644 index 000000000..3c63758c1 --- /dev/null +++ b/apps/i2ptunnel/java/src/net/i2p/i2ptunnel/socks/MultiSink.java @@ -0,0 +1,35 @@ +package net.i2p.i2ptunnel.socks; + +import java.util.Map; + +import net.i2p.data.Destination; +import net.i2p.i2ptunnel.udp.*; +import net.i2p.util.Log; + +/** + * Sends to one of many Sinks + * @author zzz modded from streamr/MultiSource + */ +public class MultiSink implements Source, Sink { + private static final Log _log = new Log(MultiSink.class); + + public MultiSink(Map cache) { + this.cache = cache; + } + + /** Don't use this - put sinks in the cache */ + public void setSink(Sink sink) {} + + public void start() {} + + public void send(Destination from, byte[] data) { + Sink s = this.cache.get(from); + if (s == null) { + _log.error("No where to go for " + from.calculateHash().toBase64().substring(0, 6)); + return; + } + s.send(from, data); + } + + private Map cache; +} diff --git a/apps/i2ptunnel/java/src/net/i2p/i2ptunnel/socks/ReplyTracker.java b/apps/i2ptunnel/java/src/net/i2p/i2ptunnel/socks/ReplyTracker.java new file mode 100644 index 000000000..f6a124c95 --- /dev/null +++ b/apps/i2ptunnel/java/src/net/i2p/i2ptunnel/socks/ReplyTracker.java @@ -0,0 +1,36 @@ +package net.i2p.i2ptunnel.socks; + +import java.util.concurrent.ConcurrentHashMap; +import java.util.Map; + +import net.i2p.data.Destination; +import net.i2p.i2ptunnel.udp.*; +import net.i2p.util.Log; + +/** + * Track who the reply goes to + * @author zzz + */ +public class ReplyTracker implements Source, Sink { + private static final Log _log = new Log(MultiSink.class); + + public ReplyTracker(Sink reply, Map cache) { + this.reply = reply; + this.cache = cache; + } + + public void setSink(Sink sink) { + this.sink = sink; + } + + public void start() {} + + public void send(Destination to, byte[] data) { + this.cache.put(to, this.reply); + this.sink.send(to, data); + } + + private Sink reply; + private Map cache; + private Sink sink; +} diff --git a/apps/i2ptunnel/java/src/net/i2p/i2ptunnel/socks/SOCKS4aServer.java b/apps/i2ptunnel/java/src/net/i2p/i2ptunnel/socks/SOCKS4aServer.java new file mode 100644 index 000000000..23ec70c3f --- /dev/null +++ b/apps/i2ptunnel/java/src/net/i2p/i2ptunnel/socks/SOCKS4aServer.java @@ -0,0 +1,284 @@ +/* I2PSOCKSTunnel is released under the terms of the GNU GPL, + * with an additional exception. For further details, see the + * licensing terms in I2PTunnel.java. + * + * Copyright (c) 2004 by human + */ +package net.i2p.i2ptunnel.socks; + +import java.io.ByteArrayOutputStream; +import java.io.DataInputStream; +import java.io.DataOutputStream; +import java.io.IOException; +import java.net.InetAddress; +import java.net.Socket; +import java.net.SocketException; +import java.util.List; + +import net.i2p.I2PAppContext; +import net.i2p.I2PException; +import net.i2p.client.streaming.I2PSocket; +import net.i2p.data.DataFormatException; +import net.i2p.i2ptunnel.I2PTunnel; +import net.i2p.util.HexDump; +import net.i2p.util.Log; + +/* + * Class that manages SOCKS 4/4a connections, and forwards them to + * destination hosts or (eventually) some outproxy. + * + * @author zzz modded from SOCKS5Server + */ +public class SOCKS4aServer extends SOCKSServer { + private static final Log _log = new Log(SOCKS4aServer.class); + + private Socket clientSock = null; + private boolean setupCompleted = false; + + /** + * Create a SOCKS4a server that communicates with the client using + * the specified socket. This method should not be invoked + * directly: new SOCKS4aServer objects should be created by using + * SOCKSServerFactory.createSOCSKServer(). It is assumed that the + * SOCKS VER field has been stripped from the input stream of the + * client socket. + * + * @param clientSock client socket + */ + public SOCKS4aServer(Socket clientSock) { + this.clientSock = clientSock; + } + + public Socket getClientSocket() throws SOCKSException { + setupServer(); + + return clientSock; + } + + protected void setupServer() throws SOCKSException { + if (setupCompleted) { return; } + + DataInputStream in; + DataOutputStream out; + try { + in = new DataInputStream(clientSock.getInputStream()); + out = new DataOutputStream(clientSock.getOutputStream()); + + manageRequest(in, out); + } catch (IOException e) { + throw new SOCKSException("Connection error (" + e.getMessage() + ")"); + } + + setupCompleted = true; + } + + /** + * SOCKS4a request management. This method assumes that all the + * stuff preceding or enveloping the actual request + * has been stripped out of the input/output streams. + */ + private void manageRequest(DataInputStream in, DataOutputStream out) throws IOException, SOCKSException { + + int command = in.readByte() & 0xff; + switch (command) { + case Command.CONNECT: + break; + case Command.BIND: + _log.debug("BIND command is not supported!"); + sendRequestReply(Reply.CONNECTION_REFUSED, InetAddress.getByName("127.0.0.1"), 0, out); + throw new SOCKSException("BIND command not supported"); + default: + _log.debug("unknown command in request (" + Integer.toHexString(command) + ")"); + sendRequestReply(Reply.CONNECTION_REFUSED, InetAddress.getByName("127.0.0.1"), 0, out); + throw new SOCKSException("Invalid command in request"); + } + + connPort = in.readUnsignedShort(); + if (connPort == 0) { + _log.debug("trying to connect to TCP port 0? Dropping!"); + sendRequestReply(Reply.CONNECTION_REFUSED, InetAddress.getByName("127.0.0.1"), 0, out); + throw new SOCKSException("Invalid port number in request"); + } + + connHostName = new String(""); + boolean alreadyWarned = false; + for (int i = 0; i < 4; ++i) { + int octet = in.readByte() & 0xff; + connHostName += Integer.toString(octet); + if (i != 3) { + connHostName += "."; + if (octet != 0 && !alreadyWarned) { + _log.warn("IPV4 address type in request: " + connHostName + ". Is your client secure?"); + alreadyWarned = true; + } + } + } + + // discard user name + readString(in); + + // SOCKS 4a + if (connHostName.startsWith("0.0.0.") && !connHostName.equals("0.0.0.0")) + connHostName = readString(in); + } + + private String readString(DataInputStream in) throws IOException { + StringBuffer sb = new StringBuffer(16); + char c; + while ((c = (char) (in.readByte() & 0xff)) != 0) + sb.append(c); + return sb.toString(); + } + + protected void confirmConnection() throws SOCKSException { + DataInputStream in; + DataOutputStream out; + try { + out = new DataOutputStream(clientSock.getOutputStream()); + + sendRequestReply(Reply.SUCCEEDED, InetAddress.getByName("127.0.0.1"), 1, out); + } catch (IOException e) { + throw new SOCKSException("Connection error (" + e.getMessage() + ")"); + } + } + + /** + * Send the specified reply to a request of the client. Either + * one of inetAddr or domainName can be null, depending on + * addressType. + */ + private void sendRequestReply(int replyCode, InetAddress inetAddr, + int bindPort, DataOutputStream out) throws IOException { + ByteArrayOutputStream reps = new ByteArrayOutputStream(); + DataOutputStream dreps = new DataOutputStream(reps); + + // Reserved byte, should be 0x00 + dreps.write(0x00); + dreps.write(replyCode); + dreps.writeShort(bindPort); + dreps.write(inetAddr.getAddress()); + + byte[] reply = reps.toByteArray(); + + if (_log.shouldLog(Log.DEBUG)) { + _log.debug("Sending request reply:\n" + HexDump.dump(reply)); + } + + out.write(reply); + } + + /** + * Get an I2PSocket that can be used to send/receive 8-bit clean data + * to/from the destination of the SOCKS connection. + * + * @return an I2PSocket connected with the destination + */ + public I2PSocket getDestinationI2PSocket(I2PSOCKSTunnel t) throws SOCKSException { + setupServer(); + + if (connHostName == null) { + _log.error("BUG: destination host name has not been initialized!"); + throw new SOCKSException("BUG! See the logs!"); + } + if (connPort == 0) { + _log.error("BUG: destination port has not been initialized!"); + throw new SOCKSException("BUG! See the logs!"); + } + + DataOutputStream out; // for errors + try { + out = new DataOutputStream(clientSock.getOutputStream()); + } catch (IOException e) { + throw new SOCKSException("Connection error (" + e.getMessage() + ")"); + } + + // FIXME: here we should read our config file, select an + // outproxy, and instantiate the proper socket class that + // handles the outproxy itself (SOCKS4a, SOCKS4a, HTTP CONNECT...). + I2PSocket destSock; + + try { + if (connHostName.toLowerCase().endsWith(".i2p") || + connHostName.toLowerCase().endsWith(".onion")) { + _log.debug("connecting to " + connHostName + "..."); + // Let's not due a new Dest for every request, huh? + //I2PSocketManager sm = I2PSocketManagerFactory.createManager(); + //destSock = sm.connect(I2PTunnel.destFromName(connHostName), null); + destSock = t.createI2PSocket(I2PTunnel.destFromName(connHostName)); + } else if ("localhost".equals(connHostName) || "127.0.0.1".equals(connHostName)) { + String err = "No localhost accesses allowed through the Socks Proxy"; + _log.error(err); + try { + sendRequestReply(Reply.CONNECTION_REFUSED, InetAddress.getByName("127.0.0.1"), 0, out); + } catch (IOException ioe) {} + throw new SOCKSException(err); + } else if (connPort == 80) { + // rewrite GET line to include hostname??? or add Host: line??? + // or forward to local eepProxy (but that's a Socket not an I2PSocket) + // use eepProxy configured outproxies? + String err = "No handler for HTTP outproxy implemented - to: " + connHostName; + _log.error(err); + try { + sendRequestReply(Reply.CONNECTION_REFUSED, InetAddress.getByName("127.0.0.1"), 0, out); + } catch (IOException ioe) {} + throw new SOCKSException(err); + } else { + List proxies = t.getProxies(connPort); + if (proxies == null || proxies.size() <= 0) { + String err = "No outproxy configured for port " + connPort + " and no default configured either - host: " + connHostName; + _log.error(err); + try { + sendRequestReply(Reply.CONNECTION_REFUSED, InetAddress.getByName("127.0.0.1"), 0, out); + } catch (IOException ioe) {} + throw new SOCKSException(err); + } + int p = I2PAppContext.getGlobalContext().random().nextInt(proxies.size()); + String proxy = proxies.get(p); + _log.debug("connecting to port " + connPort + " proxy " + proxy + " for " + connHostName + "..."); + // this isn't going to work, these need to be socks outproxies so we need + // to do a socks session to them? + destSock = t.createI2PSocket(I2PTunnel.destFromName(proxy)); + } + confirmConnection(); + _log.debug("connection confirmed - exchanging data..."); + } catch (DataFormatException e) { + try { + sendRequestReply(Reply.CONNECTION_REFUSED, InetAddress.getByName("127.0.0.1"), 0, out); + } catch (IOException ioe) {} + throw new SOCKSException("Error in destination format"); + } catch (SocketException e) { + try { + sendRequestReply(Reply.CONNECTION_REFUSED, InetAddress.getByName("127.0.0.1"), 0, out); + } catch (IOException ioe) {} + throw new SOCKSException("Error connecting (" + + e.getMessage() + ")"); + } catch (IOException e) { + try { + sendRequestReply(Reply.CONNECTION_REFUSED, InetAddress.getByName("127.0.0.1"), 0, out); + } catch (IOException ioe) {} + throw new SOCKSException("Error connecting (" + + e.getMessage() + ")"); + } catch (I2PException e) { + try { + sendRequestReply(Reply.CONNECTION_REFUSED, InetAddress.getByName("127.0.0.1"), 0, out); + } catch (IOException ioe) {} + throw new SOCKSException("Error connecting (" + + e.getMessage() + ")"); + } + + return destSock; + } + + /* + * Some namespaces to enclose SOCKS protocol codes + */ + private static class Command { + private static final int CONNECT = 0x01; + private static final int BIND = 0x02; + } + + private static class Reply { + private static final int SUCCEEDED = 0x5a; + private static final int CONNECTION_REFUSED = 0x5b; + } +} diff --git a/apps/i2ptunnel/java/src/net/i2p/i2ptunnel/socks/SOCKS5Server.java b/apps/i2ptunnel/java/src/net/i2p/i2ptunnel/socks/SOCKS5Server.java index 5efc51d9f..5e5292607 100644 --- a/apps/i2ptunnel/java/src/net/i2p/i2ptunnel/socks/SOCKS5Server.java +++ b/apps/i2ptunnel/java/src/net/i2p/i2ptunnel/socks/SOCKS5Server.java @@ -12,7 +12,17 @@ import java.io.DataOutputStream; import java.io.IOException; import java.net.InetAddress; import java.net.Socket; +import java.net.SocketException; +import java.net.UnknownHostException; +import java.util.ArrayList; +import java.util.List; +import net.i2p.I2PAppContext; +import net.i2p.I2PException; +import net.i2p.client.streaming.I2PSocket; +import net.i2p.data.DataFormatException; +import net.i2p.data.Destination; +import net.i2p.i2ptunnel.I2PTunnel; import net.i2p.util.HexDump; import net.i2p.util.Log; @@ -28,7 +38,6 @@ public class SOCKS5Server extends SOCKSServer { private static final int SOCKS_VERSION_5 = 0x05; private Socket clientSock = null; - private boolean setupCompleted = false; /** @@ -61,7 +70,8 @@ public class SOCKS5Server extends SOCKSServer { out = new DataOutputStream(clientSock.getOutputStream()); init(in, out); - manageRequest(in, out); + if (manageRequest(in, out) == Command.UDP_ASSOCIATE) + handleUDP(in, out); } catch (IOException e) { throw new SOCKSException("Connection error (" + e.getMessage() + ")"); } @@ -105,7 +115,7 @@ public class SOCKS5Server extends SOCKSServer { * initialization, integrity/confidentiality encapsulations, etc) * has been stripped out of the input/output streams. */ - private void manageRequest(DataInputStream in, DataOutputStream out) throws IOException, SOCKSException { + private int manageRequest(DataInputStream in, DataOutputStream out) throws IOException, SOCKSException { int socksVer = in.readByte() & 0xff; if (socksVer != SOCKS_VERSION_5) { _log.debug("error in SOCKS5 request (protocol != 5? wtf?)"); @@ -121,11 +131,15 @@ public class SOCKS5Server extends SOCKSServer { sendRequestReply(Reply.COMMAND_NOT_SUPPORTED, AddressType.DOMAINNAME, null, "0.0.0.0", 0, out); throw new SOCKSException("BIND command not supported"); case Command.UDP_ASSOCIATE: + /*** if(!Boolean.valueOf(tunnel.getOptions().getProperty("i2ptunnel.socks.allowUDP")).booleanValue()) { _log.debug("UDP ASSOCIATE command is not supported!"); sendRequestReply(Reply.COMMAND_NOT_SUPPORTED, AddressType.DOMAINNAME, null, "0.0.0.0", 0, out); throw new SOCKSException("UDP ASSOCIATE command not supported"); + ***/ + break; default: _log.debug("unknown command in request (" + Integer.toHexString(command) + ")"); + sendRequestReply(Reply.COMMAND_NOT_SUPPORTED, AddressType.DOMAINNAME, null, "0.0.0.0", 0, out); throw new SOCKSException("Invalid command in request"); } @@ -145,7 +159,8 @@ public class SOCKS5Server extends SOCKSServer { connHostName += "."; } } - _log.warn("IPV4 address type in request: " + connHostName + ". Is your client secure?"); + if (command != Command.UDP_ASSOCIATE) + _log.warn("IPV4 address type in request: " + connHostName + ". Is your client secure?"); break; case AddressType.DOMAINNAME: { @@ -161,19 +176,25 @@ public class SOCKS5Server extends SOCKSServer { _log.debug("DOMAINNAME address type in request: " + connHostName); break; case AddressType.IPV6: - _log.warn("IP V6 address type in request! Is your client secure?" + " (IPv6 is not supported, anyway :-)"); - sendRequestReply(Reply.ADDRESS_TYPE_NOT_SUPPORTED, AddressType.DOMAINNAME, null, "0.0.0.0", 0, out); - throw new SOCKSException("IPV6 addresses not supported"); + if (command != Command.UDP_ASSOCIATE) { + _log.warn("IP V6 address type in request! Is your client secure?" + " (IPv6 is not supported, anyway :-)"); + sendRequestReply(Reply.ADDRESS_TYPE_NOT_SUPPORTED, AddressType.DOMAINNAME, null, "0.0.0.0", 0, out); + throw new SOCKSException("IPV6 addresses not supported"); + } + break; default: _log.debug("unknown address type in request (" + Integer.toHexString(command) + ")"); + sendRequestReply(Reply.ADDRESS_TYPE_NOT_SUPPORTED, AddressType.DOMAINNAME, null, "0.0.0.0", 0, out); throw new SOCKSException("Invalid addresses type in request"); } connPort = in.readUnsignedShort(); if (connPort == 0) { _log.debug("trying to connect to TCP port 0? Dropping!"); + sendRequestReply(Reply.CONNECTION_NOT_ALLOWED_BY_RULESET, AddressType.DOMAINNAME, null, "0.0.0.0", 0, out); throw new SOCKSException("Invalid port number in request"); } + return command; } protected void confirmConnection() throws SOCKSException { @@ -248,27 +269,188 @@ public class SOCKS5Server extends SOCKSServer { out.write(reply); } + /** + * Get an I2PSocket that can be used to send/receive 8-bit clean data + * to/from the destination of the SOCKS connection. + * + * @return an I2PSocket connected with the destination + */ + public I2PSocket getDestinationI2PSocket(I2PSOCKSTunnel t) throws SOCKSException { + setupServer(); + + if (connHostName == null) { + _log.error("BUG: destination host name has not been initialized!"); + throw new SOCKSException("BUG! See the logs!"); + } + if (connPort == 0) { + _log.error("BUG: destination port has not been initialized!"); + throw new SOCKSException("BUG! See the logs!"); + } + + DataOutputStream out; // for errors + try { + out = new DataOutputStream(clientSock.getOutputStream()); + } catch (IOException e) { + throw new SOCKSException("Connection error (" + e.getMessage() + ")"); + } + + // FIXME: here we should read our config file, select an + // outproxy, and instantiate the proper socket class that + // handles the outproxy itself (SOCKS4a, SOCKS5, HTTP CONNECT...). + I2PSocket destSock; + + try { + if (connHostName.toLowerCase().endsWith(".i2p")) { + _log.debug("connecting to " + connHostName + "..."); + // Let's not due a new Dest for every request, huh? + //I2PSocketManager sm = I2PSocketManagerFactory.createManager(); + //destSock = sm.connect(I2PTunnel.destFromName(connHostName), null); + Destination dest = I2PTunnel.destFromName(connHostName); + if (dest == null) { + try { + sendRequestReply(Reply.HOST_UNREACHABLE, AddressType.DOMAINNAME, null, "0.0.0.0", 0, out); + } catch (IOException ioe) {} + throw new SOCKSException("Host not found"); + } + destSock = t.createI2PSocket(I2PTunnel.destFromName(connHostName)); + } else if ("localhost".equals(connHostName) || "127.0.0.1".equals(connHostName)) { + String err = "No localhost accesses allowed through the Socks Proxy"; + _log.error(err); + try { + sendRequestReply(Reply.CONNECTION_NOT_ALLOWED_BY_RULESET, AddressType.DOMAINNAME, null, "0.0.0.0", 0, out); + } catch (IOException ioe) {} + throw new SOCKSException(err); + } else if (connPort == 80) { + // rewrite GET line to include hostname??? or add Host: line??? + // or forward to local eepProxy (but that's a Socket not an I2PSocket) + // use eepProxy configured outproxies? + String err = "No handler for HTTP outproxy implemented"; + _log.error(err); + try { + sendRequestReply(Reply.CONNECTION_NOT_ALLOWED_BY_RULESET, AddressType.DOMAINNAME, null, "0.0.0.0", 0, out); + } catch (IOException ioe) {} + throw new SOCKSException(err); + } else { + List proxies = t.getProxies(connPort); + if (proxies == null || proxies.size() <= 0) { + String err = "No outproxy configured for port " + connPort + " and no default configured either"; + _log.error(err); + try { + sendRequestReply(Reply.CONNECTION_NOT_ALLOWED_BY_RULESET, AddressType.DOMAINNAME, null, "0.0.0.0", 0, out); + } catch (IOException ioe) {} + throw new SOCKSException(err); + } + int p = I2PAppContext.getGlobalContext().random().nextInt(proxies.size()); + String proxy = proxies.get(p); + _log.debug("connecting to port " + connPort + " proxy " + proxy + " for " + connHostName + "..."); + // this isn't going to work, these need to be socks outproxies so we need + // to do a socks session to them? + destSock = t.createI2PSocket(I2PTunnel.destFromName(proxy)); + } + confirmConnection(); + _log.debug("connection confirmed - exchanging data..."); + } catch (DataFormatException e) { + try { + sendRequestReply(Reply.HOST_UNREACHABLE, AddressType.DOMAINNAME, null, "0.0.0.0", 0, out); + } catch (IOException ioe) {} + throw new SOCKSException("Error in destination format"); + } catch (SocketException e) { + try { + sendRequestReply(Reply.HOST_UNREACHABLE, AddressType.DOMAINNAME, null, "0.0.0.0", 0, out); + } catch (IOException ioe) {} + throw new SOCKSException("Error connecting (" + + e.getMessage() + ")"); + } catch (IOException e) { + try { + sendRequestReply(Reply.HOST_UNREACHABLE, AddressType.DOMAINNAME, null, "0.0.0.0", 0, out); + } catch (IOException ioe) {} + throw new SOCKSException("Error connecting (" + + e.getMessage() + ")"); + } catch (I2PException e) { + try { + sendRequestReply(Reply.HOST_UNREACHABLE, AddressType.DOMAINNAME, null, "0.0.0.0", 0, out); + } catch (IOException ioe) {} + throw new SOCKSException("Error connecting (" + + e.getMessage() + ")"); + } + + return destSock; + } + + // This isn't really the right place for this, we can't stop the tunnel once it starts. + static SOCKSUDPTunnel _tunnel; + static Object _startLock = new Object(); + static byte[] dummyIP = new byte[4]; + /** + * We got a UDP associate command. + * Loop here looking for more, never return normally, + * or else I2PSocksTunnel will create a streaming lib connection. + * + * Do UDP Socks clients actually send more than one Associate request? + * RFC 1928 isn't clear... maybe not. + */ + private void handleUDP(DataInputStream in, DataOutputStream out) throws SOCKSException { + List ports = new ArrayList(1); + synchronized (_startLock) { + if (_tunnel == null) { + // tunnel options? + _tunnel = new SOCKSUDPTunnel(new I2PTunnel()); + _tunnel.startRunning(); + } + } + while (true) { + // Set it up. connHostName and connPort are the client's info. + InetAddress ia = null; + try { + ia = InetAddress.getByAddress(connHostName, dummyIP); + } catch (UnknownHostException uhe) {} // won't happen, no resolving done here + int myPort = _tunnel.add(ia, connPort); + ports.add(Integer.valueOf(myPort)); + try { + sendRequestReply(Reply.SUCCEEDED, AddressType.IPV4, InetAddress.getByName("127.0.0.1"), null, myPort, out); + } catch (IOException ioe) { break; } + + // wait for more ??? + try { + int command = manageRequest(in, out); + // don't do this... + if (command != Command.UDP_ASSOCIATE) + break; + } catch (IOException ioe) { break; } + catch (SOCKSException ioe) { break; } + } + + for (Integer i : ports) + _tunnel.remove(i); + + // Prevent I2PSocksTunnel from calling getDestinationI2PSocket() above + // to create a streaming lib connection... + // This isn't very elegant... + // + throw new SOCKSException("End of UDP Processing"); + } + /* * Some namespaces to enclose SOCKS protocol codes */ - private class Method { + private static class Method { private static final int NO_AUTH_REQUIRED = 0x00; private static final int NO_ACCEPTABLE_METHODS = 0xff; } - private class AddressType { + private static class AddressType { private static final int IPV4 = 0x01; private static final int DOMAINNAME = 0x03; private static final int IPV6 = 0x04; } - private class Command { + private static class Command { private static final int CONNECT = 0x01; private static final int BIND = 0x02; private static final int UDP_ASSOCIATE = 0x03; } - private class Reply { + private static class Reply { private static final int SUCCEEDED = 0x00; private static final int GENERAL_SOCKS_SERVER_FAILURE = 0x01; private static final int CONNECTION_NOT_ALLOWED_BY_RULESET = 0x02; @@ -279,4 +461,4 @@ public class SOCKS5Server extends SOCKSServer { private static final int COMMAND_NOT_SUPPORTED = 0x07; private static final int ADDRESS_TYPE_NOT_SUPPORTED = 0x08; } -} \ No newline at end of file +} diff --git a/apps/i2ptunnel/java/src/net/i2p/i2ptunnel/socks/SOCKSHeader.java b/apps/i2ptunnel/java/src/net/i2p/i2ptunnel/socks/SOCKSHeader.java new file mode 100644 index 000000000..763b9aa10 --- /dev/null +++ b/apps/i2ptunnel/java/src/net/i2p/i2ptunnel/socks/SOCKSHeader.java @@ -0,0 +1,89 @@ +package net.i2p.i2ptunnel.socks; + +import net.i2p.data.Base32; +import net.i2p.data.DataFormatException; +import net.i2p.data.Destination; +import net.i2p.i2ptunnel.I2PTunnel; + +/** + * Save the SOCKS header from a datagram + * Ref: RFC 1928 + * + * @author zzz + */ +public class SOCKSHeader { + + /** + * @param data the whole packet + */ + public SOCKSHeader(byte[] data) { + if (data.length <= 8) + throw new IllegalArgumentException("Header too short: " + data.length); + if (data[0] != 0 || data[1] != 0) + throw new IllegalArgumentException("Not a SOCKS datagram?"); + if (data[2] != 0) + throw new IllegalArgumentException("We can't handle fragments!"); + int headerlen = 0; + int addressType = data[3]; + if (addressType == 1) { + // this will fail in getDestination() + headerlen = 6 + 4; + } else if (addressType == 3) { + headerlen = 6 + 1 + (data[4] & 0xff); + } else if (addressType == 4) { + // this will fail in getDestination() + // but future garlicat partial hash lookup possible? + headerlen = 6 + 16; + } else { + throw new IllegalArgumentException("Unknown address type: " + addressType); + } + if (data.length < headerlen) + throw new IllegalArgumentException("Header too short: " + data.length); + + this.header = new byte[headerlen]; + System.arraycopy(this.header, 0, data, 0, headerlen); + } + + private static final byte[] beg = {0,0,0,3,60}; + private static final byte[] end = {'.','b','3','2','.','i','2','p',0,0}; + + /** + * Make a dummy header from a dest, + * for those cases where we want to receive unsolicited datagrams. + * Unused for now. + */ + public SOCKSHeader(Destination dest) { + this.header = new byte[beg.length + 52 + end.length]; + System.arraycopy(this.header, 0, beg, 0, beg.length); + String b32 = Base32.encode(dest.calculateHash().getData()); + System.arraycopy(this.header, beg.length, b32.getBytes(), 0, 52); + System.arraycopy(this.header, beg.length + 52, end, 0, end.length); + } + + public String getHost() { + int addressType = this.header[3]; + if (addressType != 3) + return null; + int namelen = (this.header[4] & 0xff); + byte[] nameBytes = new byte[namelen]; + System.arraycopy(nameBytes, 0, this.header, 5, namelen); + return new String(nameBytes); + } + + public Destination getDestination() { + String name = getHost(); + if (name == null) + return null; + try { + // the naming service does caching (thankfully) + return I2PTunnel.destFromName(name); + } catch (DataFormatException dfe) {} + return null; + } + + public byte[] getBytes() { + return header; + } + + private byte[] header; +} diff --git a/apps/i2ptunnel/java/src/net/i2p/i2ptunnel/socks/SOCKSServer.java b/apps/i2ptunnel/java/src/net/i2p/i2ptunnel/socks/SOCKSServer.java index caf4d1ce3..06c3fab55 100644 --- a/apps/i2ptunnel/java/src/net/i2p/i2ptunnel/socks/SOCKSServer.java +++ b/apps/i2ptunnel/java/src/net/i2p/i2ptunnel/socks/SOCKSServer.java @@ -6,15 +6,9 @@ */ package net.i2p.i2ptunnel.socks; -import java.io.IOException; import java.net.Socket; -import java.net.SocketException; -import net.i2p.I2PException; import net.i2p.client.streaming.I2PSocket; -import net.i2p.client.streaming.I2PSocketManager; -import net.i2p.client.streaming.I2PSocketManagerFactory; -import net.i2p.data.DataFormatException; import net.i2p.i2ptunnel.I2PTunnel; import net.i2p.util.Log; @@ -30,10 +24,6 @@ public abstract class SOCKSServer { protected String connHostName = null; protected int connPort = 0; - I2PSocket destSocket = null; - - Object FIXME = new Object(); - /** * Perform server initialization (expecially regarding protected * variables). @@ -59,47 +49,6 @@ public abstract class SOCKSServer { * * @return an I2PSocket connected with the destination */ - public I2PSocket getDestinationI2PSocket() throws SOCKSException { - setupServer(); + public abstract I2PSocket getDestinationI2PSocket(I2PSOCKSTunnel t) throws SOCKSException; - if (connHostName == null) { - _log.error("BUG: destination host name has not been initialized!"); - throw new SOCKSException("BUG! See the logs!"); - } - if (connPort == 0) { - _log.error("BUG: destination port has not been initialized!"); - throw new SOCKSException("BUG! See the logs!"); - } - - // FIXME: here we should read our config file, select an - // outproxy, and instantiate the proper socket class that - // handles the outproxy itself (SOCKS4a, SOCKS5, HTTP CONNECT...). - I2PSocket destSock; - - try { - if (connHostName.toLowerCase().endsWith(".i2p")) { - _log.debug("connecting to " + connHostName + "..."); - I2PSocketManager sm = I2PSocketManagerFactory.createManager(); - destSock = sm.connect(I2PTunnel.destFromName(connHostName), null); - confirmConnection(); - _log.debug("connection confirmed - exchanging data..."); - } else { - _log.error("We don't support outproxies (yet)"); - throw new SOCKSException("Ouproxies not supported (yet)"); - } - } catch (DataFormatException e) { - throw new SOCKSException("Error in destination format"); - } catch (SocketException e) { - throw new SOCKSException("Error connecting (" - + e.getMessage() + ")"); - } catch (IOException e) { - throw new SOCKSException("Error connecting (" - + e.getMessage() + ")"); - } catch (I2PException e) { - throw new SOCKSException("Error connecting (" - + e.getMessage() + ")"); - } - - return destSock; - } } diff --git a/apps/i2ptunnel/java/src/net/i2p/i2ptunnel/socks/SOCKSServerFactory.java b/apps/i2ptunnel/java/src/net/i2p/i2ptunnel/socks/SOCKSServerFactory.java index 357149652..80dfacb6a 100644 --- a/apps/i2ptunnel/java/src/net/i2p/i2ptunnel/socks/SOCKSServerFactory.java +++ b/apps/i2ptunnel/java/src/net/i2p/i2ptunnel/socks/SOCKSServerFactory.java @@ -7,6 +7,7 @@ package net.i2p.i2ptunnel.socks; import java.io.DataInputStream; +import java.io.DataOutputStream; import java.io.IOException; import java.net.Socket; @@ -18,6 +19,15 @@ import net.i2p.util.Log; public class SOCKSServerFactory { private final static Log _log = new Log(SOCKSServerFactory.class); + private final static String ERR_REQUEST_DENIED = + "HTTP/1.1 403 Access Denied\r\n" + + "Content-Type: text/html; charset=iso-8859-1\r\n" + + "Cache-control: no-cache\r\n" + + "\r\n" + + "

I2P SOCKS PROXY ERROR: REQUEST DENIED

" + + "Your browser is misconfigured. This is a SOCKS proxy, not a HTTP proxy" + + ""; + /** * Create a new SOCKS server, using the provided socket (that must * be connected to a client) to select the proper SOCKS protocol @@ -34,13 +44,23 @@ public class SOCKSServerFactory { int socksVer = in.readByte(); switch (socksVer) { + case 0x04: + // SOCKS version 4/4a + serv = new SOCKS4aServer(s); + break; case 0x05: // SOCKS version 5 serv = new SOCKS5Server(s); break; + case 'C': + case 'G': + case 'H': + case 'P': + DataOutputStream out = new DataOutputStream(s.getOutputStream()); + out.write(ERR_REQUEST_DENIED.getBytes()); + throw new SOCKSException("HTTP request to socks"); default: - _log.debug("SOCKS protocol version not supported (" + Integer.toHexString(socksVer) + ")"); - return null; + throw new SOCKSException("SOCKS protocol version not supported (" + Integer.toHexString(socksVer) + ")"); } } catch (IOException e) { _log.debug("error reading SOCKS protocol version"); @@ -49,4 +69,4 @@ public class SOCKSServerFactory { return serv; } -} \ No newline at end of file +} diff --git a/apps/i2ptunnel/java/src/net/i2p/i2ptunnel/socks/SOCKSUDPPort.java b/apps/i2ptunnel/java/src/net/i2p/i2ptunnel/socks/SOCKSUDPPort.java new file mode 100644 index 000000000..b56c9082f --- /dev/null +++ b/apps/i2ptunnel/java/src/net/i2p/i2ptunnel/socks/SOCKSUDPPort.java @@ -0,0 +1,77 @@ +package net.i2p.i2ptunnel.socks; + +import java.net.DatagramSocket; +import java.net.InetAddress; +import java.util.concurrent.ConcurrentHashMap; +import java.util.Map; + +import net.i2p.data.Destination; +import net.i2p.i2ptunnel.udp.*; + +/** + * Implements a UDP port and Socks encapsulation / decapsulation. + * This is for a single port. If there is demuxing for multiple + * ports, it happens outside of here. + * + * TX: + * UDPSource -> SOCKSUDPUnwrapper -> ReplyTracker ( -> I2PSink in SOCKSUDPTunnel) + * + * RX: + * UDPSink <- SOCKSUDPWrapper ( <- MultiSink <- I2PSource in SOCKSUDPTunnel) + * + * The Unwrapper passes headers to the Wrapper through a cache. + * The ReplyTracker passes sinks to MultiSink through a cache. + * + * @author zzz + */ +public class SOCKSUDPPort implements Source, Sink { + + public SOCKSUDPPort(InetAddress host, int port, Map replyMap) { + + // this passes the host and port from UDPUnwrapper to UDPWrapper + Map cache = new ConcurrentHashMap(4); + + // rcv from I2P and send to a port + this.wrapper = new SOCKSUDPWrapper(cache); + this.udpsink = new UDPSink(host, port); + this.wrapper.setSink(this.udpsink); + + // rcv from the same port and send to I2P + DatagramSocket sock = this.udpsink.getSocket(); + this.udpsource = new UDPSource(sock); + this.unwrapper = new SOCKSUDPUnwrapper(cache); + this.udpsource.setSink(this.unwrapper); + this.udptracker = new ReplyTracker(this, replyMap); + this.unwrapper.setSink(this.udptracker); + } + + /** Socks passes this back to the client on the TCP connection */ + public int getPort() { + return this.udpsink.getPort(); + } + + public void setSink(Sink sink) { + this.udptracker.setSink(sink); + } + + public void start() { + // the other Sources don't use start + this.udpsource.start(); + } + + public void stop() { + this.udpsink.stop(); + this.udpsource.stop(); + } + + public void send(Destination from, byte[] data) { + this.wrapper.send(from, data); + } + + + private UDPSink udpsink; + private UDPSource udpsource; + private SOCKSUDPWrapper wrapper; + private SOCKSUDPUnwrapper unwrapper; + private ReplyTracker udptracker; +} diff --git a/apps/i2ptunnel/java/src/net/i2p/i2ptunnel/socks/SOCKSUDPTunnel.java b/apps/i2ptunnel/java/src/net/i2p/i2ptunnel/socks/SOCKSUDPTunnel.java new file mode 100644 index 000000000..0adaa1950 --- /dev/null +++ b/apps/i2ptunnel/java/src/net/i2p/i2ptunnel/socks/SOCKSUDPTunnel.java @@ -0,0 +1,94 @@ +package net.i2p.i2ptunnel.socks; + +import java.net.InetAddress; +import java.util.concurrent.ConcurrentHashMap; +import java.util.Iterator; +import java.util.Map; + +import net.i2p.data.Destination; +import net.i2p.i2ptunnel.I2PTunnel; +import net.i2p.i2ptunnel.Logging; +import net.i2p.i2ptunnel.udp.*; +import net.i2p.i2ptunnel.udpTunnel.I2PTunnelUDPClientBase; +import net.i2p.util.EventDispatcher; + +/** + * A Datagram Tunnel that can have multiple bidirectional ports on the UDP side. + * + * TX: + * (ReplyTracker in multiple SOCKSUDPPorts -> ) I2PSink + * + * RX: + * (SOCKSUDPWrapper in multiple SOCKSUDPPorts <- ) MultiSink <- I2PSource + * + * The reply from a dest goes to the last SOCKSUDPPort that sent to that dest. + * If multiple ports are talking to a dest at the same time, this isn't + * going to work very well. + * + * @author zzz modded from streamr/StreamrConsumer + */ +public class SOCKSUDPTunnel extends I2PTunnelUDPClientBase { + + /** + * Set up a tunnel with no UDP side yet. + * Use add() for each port. + */ + public SOCKSUDPTunnel(I2PTunnel tunnel) { + super(null, tunnel, tunnel, tunnel); + + this.ports = new ConcurrentHashMap(1); + this.cache = new ConcurrentHashMap(1); + this.demuxer = new MultiSink(this.cache); + setSink(this.demuxer); + } + + + /** @return the UDP port number */ + public int add(InetAddress host, int port) { + SOCKSUDPPort sup = new SOCKSUDPPort(host, port, this.cache); + this.ports.put(Integer.valueOf(sup.getPort()), sup); + sup.setSink(this); + sup.start(); + return sup.getPort(); + } + + public void remove(Integer port) { + SOCKSUDPPort sup = this.ports.remove(port); + if (sup != null) + sup.stop(); + for (Iterator iter = cache.entrySet().iterator(); iter.hasNext();) { + Map.Entry e = (Map.Entry) iter.next(); + if (e.getValue() == sup) + iter.remove(); + } + } + + public final void startRunning() { + super.startRunning(); + // demuxer start() doesn't do anything + startall(); + } + + public boolean close(boolean forced) { + stopall(); + return super.close(forced); + } + + /** you should really add() after startRunning() */ + private void startall() { + } + + private void stopall() { + for (SOCKSUDPPort sup : this.ports.values()) { + sup.stop(); + } + this.ports.clear(); + this.cache.clear(); + } + + + + private Map ports; + private Map cache; + private MultiSink demuxer; +} diff --git a/apps/i2ptunnel/java/src/net/i2p/i2ptunnel/socks/SOCKSUDPUnwrapper.java b/apps/i2ptunnel/java/src/net/i2p/i2ptunnel/socks/SOCKSUDPUnwrapper.java new file mode 100644 index 000000000..2720b6fd4 --- /dev/null +++ b/apps/i2ptunnel/java/src/net/i2p/i2ptunnel/socks/SOCKSUDPUnwrapper.java @@ -0,0 +1,59 @@ +package net.i2p.i2ptunnel.socks; + +import java.util.Map; + +import net.i2p.data.Destination; +import net.i2p.i2ptunnel.udp.*; +import net.i2p.util.Log; + +/** + * Strip a SOCKS header off a datagram, convert it to a Destination + * Ref: RFC 1928 + * + * @author zzz + */ +public class SOCKSUDPUnwrapper implements Source, Sink { + private static final Log _log = new Log(SOCKSUDPUnwrapper.class); + + /** + * @param cache put headers here to pass to SOCKSUDPWrapper + */ + public SOCKSUDPUnwrapper(Map cache) { + this.cache = cache; + } + + public void setSink(Sink sink) { + this.sink = sink; + } + + public void start() {} + + /** + * + */ + public void send(Destination ignored_from, byte[] data) { + SOCKSHeader h; + try { + h = new SOCKSHeader(data); + } catch (IllegalArgumentException iae) { + _log.error(iae.toString()); + return; + } + Destination dest = h.getDestination(); + if (dest == null) { + // no, we aren't going to send non-i2p traffic to a UDP outproxy :) + _log.error("Destination not found: " + h.getHost()); + return; + } + + cache.put(dest, h); + + int headerlen = h.getBytes().length; + byte unwrapped[] = new byte[data.length - headerlen]; + System.arraycopy(unwrapped, 0, data, headerlen, unwrapped.length); + this.sink.send(dest, unwrapped); + } + + private Sink sink; + private Map cache; +} diff --git a/apps/i2ptunnel/java/src/net/i2p/i2ptunnel/socks/SOCKSUDPWrapper.java b/apps/i2ptunnel/java/src/net/i2p/i2ptunnel/socks/SOCKSUDPWrapper.java new file mode 100644 index 000000000..4ec836157 --- /dev/null +++ b/apps/i2ptunnel/java/src/net/i2p/i2ptunnel/socks/SOCKSUDPWrapper.java @@ -0,0 +1,49 @@ +package net.i2p.i2ptunnel.socks; + +import java.util.Map; + +import net.i2p.data.Destination; +import net.i2p.i2ptunnel.udp.*; + +/** + * Put a SOCKS header on a datagram + * Ref: RFC 1928 + * + * @author zzz + */ +public class SOCKSUDPWrapper implements Source, Sink { + public SOCKSUDPWrapper(Map cache) { + this.cache = cache; + } + + public void setSink(Sink sink) { + this.sink = sink; + } + + public void start() {} + + /** + * Use the cached header, which should have the host string and port + * + */ + public void send(Destination from, byte[] data) { + if (this.sink == null) + return; + + SOCKSHeader h = cache.get(from); + if (h == null) { + // RFC 1928 says drop + // h = new SOCKSHeader(from); + return; + } + + byte[] header = h.getBytes(); + byte wrapped[] = new byte[header.length + data.length]; + System.arraycopy(wrapped, 0, header, 0, header.length); + System.arraycopy(wrapped, header.length, data, 0, data.length); + this.sink.send(from, wrapped); + } + + private Sink sink; + private Map cache; +} diff --git a/apps/i2ptunnel/java/src/net/i2p/i2ptunnel/streamr/MultiSource.java b/apps/i2ptunnel/java/src/net/i2p/i2ptunnel/streamr/MultiSource.java new file mode 100644 index 000000000..5c5a08027 --- /dev/null +++ b/apps/i2ptunnel/java/src/net/i2p/i2ptunnel/streamr/MultiSource.java @@ -0,0 +1,64 @@ +/* + * To change this template, choose Tools | Templates + * and open the template in the editor. + */ + +package net.i2p.i2ptunnel.streamr; + +import java.util.concurrent.CopyOnWriteArrayList; +import java.util.List; + +import net.i2p.data.Destination; +import net.i2p.i2ptunnel.udp.*; + +/** + * Sends to many Sinks + * @author welterde + * @author zzz modded for I2PTunnel + */ +public class MultiSource implements Source, Sink { + public MultiSource() { + this.sinks = new CopyOnWriteArrayList(); + } + + public void setSink(Sink sink) { + this.sink = sink; + } + + public void start() {} + + public void stop() { + this.sinks.clear(); + } + + public void send(Destination ignored_from, byte[] data) { + for(Destination dest : this.sinks) { + this.sink.send(dest, data); + } + } + + public void add(Destination sink) { + this.sinks.add(sink); + } + + public void remove(Destination sink) { + this.sinks.remove(sink); + } + + + + + + + + + + + + + + + + private Sink sink; + private List sinks; +} diff --git a/apps/i2ptunnel/java/src/net/i2p/i2ptunnel/streamr/Pinger.java b/apps/i2ptunnel/java/src/net/i2p/i2ptunnel/streamr/Pinger.java new file mode 100644 index 000000000..a3a797536 --- /dev/null +++ b/apps/i2ptunnel/java/src/net/i2p/i2ptunnel/streamr/Pinger.java @@ -0,0 +1,59 @@ +package net.i2p.i2ptunnel.streamr; + +import net.i2p.i2ptunnel.udp.*; + +/** + * + * @author welterde/zzz + */ +public class Pinger implements Source, Runnable { + public Pinger() { + this.thread = new Thread(this); + } + public void setSink(Sink sink) { + this.sink = sink; + } + + public void start() { + this.running = true; + this.waitlock = new Object(); + this.thread.start(); + } + + public void stop() { + this.running = false; + synchronized(this.waitlock) { + this.waitlock.notifyAll(); + } + // send unsubscribe-message + byte[] data = new byte[1]; + data[0] = 1; + this.sink.send(null, data); + } + + public void run() { + // send subscribe-message + byte[] data = new byte[1]; + data[0] = 0; + int i = 0; + while(this.running) { + //System.out.print("p"); + this.sink.send(null, data); + synchronized(this.waitlock) { + int delay = 10000; + if (i < 5) { + i++; + delay = 2000; + } + try { + this.waitlock.wait(delay); + } catch(InterruptedException ie) {} + } + } + } + + protected Sink sink; + protected Thread thread; + protected Object waitlock; + protected boolean running; +} diff --git a/apps/i2ptunnel/java/src/net/i2p/i2ptunnel/streamr/StreamrConsumer.java b/apps/i2ptunnel/java/src/net/i2p/i2ptunnel/streamr/StreamrConsumer.java new file mode 100644 index 000000000..87ea0eefe --- /dev/null +++ b/apps/i2ptunnel/java/src/net/i2p/i2ptunnel/streamr/StreamrConsumer.java @@ -0,0 +1,66 @@ +/* + * To change this template, choose Tools | Templates + * and open the template in the editor. + */ + +package net.i2p.i2ptunnel.streamr; + +import java.net.InetAddress; + +import net.i2p.data.Destination; +import net.i2p.i2ptunnel.I2PTunnel; +import net.i2p.i2ptunnel.Logging; +import net.i2p.i2ptunnel.udp.*; +import net.i2p.i2ptunnel.udpTunnel.I2PTunnelUDPClientBase; +import net.i2p.util.EventDispatcher; + +/** + * Compared to a standard I2PTunnel, + * this acts like a client on the I2P side (no privkey file) + * but a server on the UDP side (sends to a configured host/port) + * + * @author welterde + * @author zzz modded for I2PTunnel + */ +public class StreamrConsumer extends I2PTunnelUDPClientBase { + + public StreamrConsumer(InetAddress host, int port, String destination, + Logging l, EventDispatcher notifyThis, + I2PTunnel tunnel) { + super(destination, l, notifyThis, tunnel); + + // create udp-destination + this.sink = new UDPSink(host, port); + setSink(this.sink); + + // create pinger + this.pinger = new Pinger(); + this.pinger.setSink(this); + } + + public final void startRunning() { + super.startRunning(); + // send subscribe-message + this.pinger.start(); + l.log("Streamr client ready"); + } + + public boolean close(boolean forced) { + // send unsubscribe-message + this.pinger.stop(); + this.sink.stop(); + return super.close(forced); + } + + + + + + + + + + + private UDPSink sink; + private Pinger pinger; +} diff --git a/apps/i2ptunnel/java/src/net/i2p/i2ptunnel/streamr/StreamrProducer.java b/apps/i2ptunnel/java/src/net/i2p/i2ptunnel/streamr/StreamrProducer.java new file mode 100644 index 000000000..b801cb94f --- /dev/null +++ b/apps/i2ptunnel/java/src/net/i2p/i2ptunnel/streamr/StreamrProducer.java @@ -0,0 +1,72 @@ +/* + * To change this template, choose Tools | Templates + * and open the template in the editor. + */ + +package net.i2p.i2ptunnel.streamr; + +// system +import java.io.File; + +// i2p +import net.i2p.client.I2PSession; +import net.i2p.i2ptunnel.I2PTunnel; +import net.i2p.i2ptunnel.Logging; +import net.i2p.i2ptunnel.udp.*; +import net.i2p.i2ptunnel.udpTunnel.I2PTunnelUDPServerBase; +import net.i2p.util.EventDispatcher; + +/** + * Compared to a standard I2PTunnel, + * this acts like a server on the I2P side (persistent privkey file) + * but a client on the UDP side (receives on a configured port) + * + * @author welterde + * @author zzz modded for I2PTunnel + */ +public class StreamrProducer extends I2PTunnelUDPServerBase { + + public StreamrProducer(int port, + File privkey, String privkeyname, Logging l, + EventDispatcher notifyThis, I2PTunnel tunnel) { + // verify subscription requests + super(true, privkey, privkeyname, l, notifyThis, tunnel); + + // The broadcaster + this.multi = new MultiSource(); + this.multi.setSink(this); + + // The listener + this.subscriber = new Subscriber(this.multi); + setSink(this.subscriber); + + // now start udp-server + this.server = new UDPSource(port); + this.server.setSink(this.multi); + } + + public final void startRunning() { + super.startRunning(); + this.server.start(); + l.log("Streamr server ready"); + } + + public boolean close(boolean forced) { + this.server.stop(); + this.multi.stop(); + return super.close(forced); + } + + + + + + + + + + + private MultiSource multi; + private UDPSource server; + private Sink subscriber; +} diff --git a/apps/i2ptunnel/java/src/net/i2p/i2ptunnel/streamr/Subscriber.java b/apps/i2ptunnel/java/src/net/i2p/i2ptunnel/streamr/Subscriber.java new file mode 100644 index 000000000..97abdb889 --- /dev/null +++ b/apps/i2ptunnel/java/src/net/i2p/i2ptunnel/streamr/Subscriber.java @@ -0,0 +1,75 @@ +/* + * To change this template, choose Tools | Templates + * and open the template in the editor. + */ + +package net.i2p.i2ptunnel.streamr; + +// system +import java.io.File; +import java.util.Set; + +// i2p +import net.i2p.client.I2PSession; +import net.i2p.data.Destination; +import net.i2p.i2ptunnel.I2PTunnel; +import net.i2p.i2ptunnel.Logging; +import net.i2p.i2ptunnel.udp.*; +import net.i2p.i2ptunnel.udpTunnel.I2PTunnelUDPServerBase; +import net.i2p.util.EventDispatcher; +import net.i2p.util.ConcurrentHashSet; + +/** + * server-mode + * @author welterde + * @author zzz modded from Producer for I2PTunnel + */ +public class Subscriber implements Sink { + + public Subscriber(MultiSource multi) { + this.multi = multi; + // subscriptions + this.subscriptions = new ConcurrentHashSet(); + } + + public void send(Destination dest, byte[] data) { + if(dest == null || data.length < 1) { + // invalid packet + // TODO: write to log + } else { + byte ctrl = data[0]; + if(ctrl == 0) { + if (!this.subscriptions.contains(dest)) { + // subscribe + System.out.println("Add subscription: " + dest.toBase64().substring(0,4)); + this.subscriptions.add(dest); + this.multi.add(dest); + } // else already subscribed + } else if(ctrl == 1) { + // unsubscribe + System.out.println("Remove subscription: " + dest.toBase64().substring(0,4)); + boolean removed = this.subscriptions.remove(dest); + if(removed) + multi.remove(dest); + } else { + // invalid packet + // TODO: write to log + } + } + } + + + + + + + + + + + private I2PSession sess; + private Source listener; + private Set subscriptions; + private MultiSource multi; + private Source server; +} diff --git a/apps/i2ptunnel/java/src/net/i2p/i2ptunnel/udp/I2PSink.java b/apps/i2ptunnel/java/src/net/i2p/i2ptunnel/udp/I2PSink.java new file mode 100644 index 000000000..6a32801b7 --- /dev/null +++ b/apps/i2ptunnel/java/src/net/i2p/i2ptunnel/udp/I2PSink.java @@ -0,0 +1,71 @@ +/* + * To change this template, choose Tools | Templates + * and open the template in the editor. + */ + +package net.i2p.i2ptunnel.udp; + +// i2p +import net.i2p.client.I2PSession; +import net.i2p.client.I2PSessionException; +import net.i2p.data.Destination; +import net.i2p.client.datagram.I2PDatagramMaker; + +/** + * Producer + * + * This sends to a fixed destination specified in the constructor + * + * @author welterde + */ +public class I2PSink implements Sink { + public I2PSink(I2PSession sess, Destination dest) { + this(sess, dest, false); + } + public I2PSink(I2PSession sess, Destination dest, boolean raw) { + this.sess = sess; + this.dest = dest; + this.raw = raw; + + // create maker + if (!raw) + this.maker = new I2PDatagramMaker(this.sess); + } + + /** @param src ignored */ + public synchronized void send(Destination src, byte[] data) { + //System.out.print("w"); + // create payload + byte[] payload; + if(!this.raw) + payload = this.maker.makeI2PDatagram(data); + else + payload = data; + + // send message + try { + this.sess.sendMessage(this.dest, payload, I2PSession.PROTO_DATAGRAM, + I2PSession.PORT_UNSPECIFIED, I2PSession.PORT_UNSPECIFIED); + } catch(I2PSessionException exc) { + // TODO: handle better + exc.printStackTrace(); + } + } + + + + + + + + + + + + + + protected boolean raw; + protected I2PSession sess; + protected Destination dest; + protected I2PDatagramMaker maker; +} diff --git a/apps/i2ptunnel/java/src/net/i2p/i2ptunnel/udp/I2PSinkAnywhere.java b/apps/i2ptunnel/java/src/net/i2p/i2ptunnel/udp/I2PSinkAnywhere.java new file mode 100644 index 000000000..8707d9779 --- /dev/null +++ b/apps/i2ptunnel/java/src/net/i2p/i2ptunnel/udp/I2PSinkAnywhere.java @@ -0,0 +1,69 @@ +/* + * To change this template, choose Tools | Templates + * and open the template in the editor. + */ + +package net.i2p.i2ptunnel.udp; + +// i2p +import net.i2p.client.I2PSession; +import net.i2p.client.I2PSessionException; +import net.i2p.data.Destination; +import net.i2p.client.datagram.I2PDatagramMaker; + +/** + * Producer + * + * This sends to any destination specified in send() + * + * @author zzz modded from I2PSink by welterde + */ +public class I2PSinkAnywhere implements Sink { + public I2PSinkAnywhere(I2PSession sess) { + this(sess, false); + } + public I2PSinkAnywhere(I2PSession sess, boolean raw) { + this.sess = sess; + this.raw = raw; + + // create maker + if (!raw) + this.maker = new I2PDatagramMaker(this.sess); + } + + /** @param to - where it's going */ + public synchronized void send(Destination to, byte[] data) { + // create payload + byte[] payload; + if(!this.raw) + payload = this.maker.makeI2PDatagram(data); + else + payload = data; + + // send message + try { + this.sess.sendMessage(to, payload, I2PSession.PROTO_DATAGRAM, + I2PSession.PORT_UNSPECIFIED, I2PSession.PORT_UNSPECIFIED); + } catch(I2PSessionException exc) { + // TODO: handle better + exc.printStackTrace(); + } + } + + + + + + + + + + + + + + protected boolean raw; + protected I2PSession sess; + protected Destination dest; + protected I2PDatagramMaker maker; +} diff --git a/apps/i2ptunnel/java/src/net/i2p/i2ptunnel/udp/I2PSource.java b/apps/i2ptunnel/java/src/net/i2p/i2ptunnel/udp/I2PSource.java new file mode 100644 index 000000000..0b5474777 --- /dev/null +++ b/apps/i2ptunnel/java/src/net/i2p/i2ptunnel/udp/I2PSource.java @@ -0,0 +1,123 @@ +/* + * To change this template, choose Tools | Templates + * and open the template in the editor. + */ + +package net.i2p.i2ptunnel.udp; + +// system +import java.util.concurrent.ArrayBlockingQueue; +import java.util.concurrent.BlockingQueue; + +// i2p +import net.i2p.client.I2PSession; +import net.i2p.client.I2PSessionListener; +import net.i2p.client.datagram.I2PDatagramDissector; + +/** + * + * @author welterde + */ +public class I2PSource implements Source, Runnable { + public I2PSource(I2PSession sess) { + this(sess, true, false); + } + public I2PSource(I2PSession sess, boolean verify) { + this(sess, verify, false); + } + public I2PSource(I2PSession sess, boolean verify, boolean raw) { + this.sess = sess; + this.sink = null; + this.verify = verify; + this.raw = raw; + + // create queue + this.queue = new ArrayBlockingQueue(256); + + // create listener + this.sess.setSessionListener(new Listener()); + + // create thread + this.thread = new Thread(this); + } + + public void setSink(Sink sink) { + this.sink = sink; + } + + public void start() { + this.thread.start(); + } + + public void run() { + // create dissector + I2PDatagramDissector diss = new I2PDatagramDissector(); + while(true) { + try { + // get id + int id = this.queue.take(); + + // receive message + byte[] msg = this.sess.receiveMessage(id); + + if(!this.raw) { + // load datagram into it + diss.loadI2PDatagram(msg); + + // now call sink + if(this.verify) + this.sink.send(diss.getSender(), diss.getPayload()); + else + this.sink.send(diss.extractSender(), diss.extractPayload()); + } else { + // verify is ignored + this.sink.send(null, msg); + } + //System.out.print("r"); + } catch(Exception e) { + e.printStackTrace(); + } + } + } + + + + + + + protected class Listener implements I2PSessionListener { + + public void messageAvailable(I2PSession sess, int id, long size) { + try { + queue.put(id); + } catch(Exception e) { + // ignore + } + } + + public void reportAbuse(I2PSession arg0, int arg1) { + // ignore + } + + public void disconnected(I2PSession arg0) { + // ignore + } + + public void errorOccurred(I2PSession arg0, String arg1, Throwable arg2) { + // ignore + } + + } + + + + + + + protected I2PSession sess; + protected BlockingQueue queue; + protected Sink sink; + protected Thread thread; + protected boolean verify; + protected boolean raw; +} diff --git a/apps/i2ptunnel/java/src/net/i2p/i2ptunnel/udp/Sink.java b/apps/i2ptunnel/java/src/net/i2p/i2ptunnel/udp/Sink.java new file mode 100644 index 000000000..49e3e47a3 --- /dev/null +++ b/apps/i2ptunnel/java/src/net/i2p/i2ptunnel/udp/Sink.java @@ -0,0 +1,17 @@ +/* + * To change this template, choose Tools | Templates + * and open the template in the editor. + */ + +package net.i2p.i2ptunnel.udp; + +// i2p +import net.i2p.data.Destination; + +/** + * + * @author welterde + */ +public interface Sink { + public void send(Destination src, byte[] data); +} diff --git a/apps/i2ptunnel/java/src/net/i2p/i2ptunnel/udp/Source.java b/apps/i2ptunnel/java/src/net/i2p/i2ptunnel/udp/Source.java new file mode 100644 index 000000000..f65d03b19 --- /dev/null +++ b/apps/i2ptunnel/java/src/net/i2p/i2ptunnel/udp/Source.java @@ -0,0 +1,15 @@ +/* + * To change this template, choose Tools | Templates + * and open the template in the editor. + */ + +package net.i2p.i2ptunnel.udp; + +/** + * + * @author welterde + */ +public interface Source { + public void setSink(Sink sink); + public void start(); +} diff --git a/apps/i2ptunnel/java/src/net/i2p/i2ptunnel/udp/Stream.java b/apps/i2ptunnel/java/src/net/i2p/i2ptunnel/udp/Stream.java new file mode 100644 index 000000000..b8b57e696 --- /dev/null +++ b/apps/i2ptunnel/java/src/net/i2p/i2ptunnel/udp/Stream.java @@ -0,0 +1,15 @@ +/* + * To change this template, choose Tools | Templates + * and open the template in the editor. + */ + +package net.i2p.i2ptunnel.udp; + +/** + * + * @author welterde + */ +public interface Stream { + public void start(); + public void stop(); +} diff --git a/apps/i2ptunnel/java/src/net/i2p/i2ptunnel/udp/UDPSink.java b/apps/i2ptunnel/java/src/net/i2p/i2ptunnel/udp/UDPSink.java new file mode 100644 index 000000000..d2e8e8924 --- /dev/null +++ b/apps/i2ptunnel/java/src/net/i2p/i2ptunnel/udp/UDPSink.java @@ -0,0 +1,77 @@ +/* + * To change this template, choose Tools | Templates + * and open the template in the editor. + */ + +package net.i2p.i2ptunnel.udp; + +// system +import java.net.DatagramSocket; +import java.net.DatagramPacket; +import java.net.InetAddress; + +// i2p +import net.i2p.data.Destination; + +/** + * + * @author welterde + */ +public class UDPSink implements Sink { + public UDPSink(InetAddress host, int port) { + // create socket + try { + this.sock = new DatagramSocket(); + } catch(Exception e) { + // TODO: fail better + throw new RuntimeException("failed to open udp-socket", e); + } + + this.remoteHost = host; + + // remote port + this.remotePort = port; + } + + public void send(Destination src, byte[] data) { + // if data.length > this.sock.getSendBufferSize() ... + + // create packet + DatagramPacket packet = new DatagramPacket(data, data.length, this.remoteHost, this.remotePort); + + // send packet + try { + this.sock.send(packet); + } catch(Exception e) { + // TODO: fail a bit better + e.printStackTrace(); + } + } + + public int getPort() { + return this.sock.getLocalPort(); + } + + /** to pass to UDPSource constructor */ + public DatagramSocket getSocket() { + return this.sock; + } + + public void stop() { + this.sock.close(); + } + + + + + + + + + + + protected DatagramSocket sock; + protected InetAddress remoteHost; + protected int remotePort; + +} diff --git a/apps/i2ptunnel/java/src/net/i2p/i2ptunnel/udp/UDPSource.java b/apps/i2ptunnel/java/src/net/i2p/i2ptunnel/udp/UDPSource.java new file mode 100644 index 000000000..fc1dd5bf2 --- /dev/null +++ b/apps/i2ptunnel/java/src/net/i2p/i2ptunnel/udp/UDPSource.java @@ -0,0 +1,91 @@ +/* + * To change this template, choose Tools | Templates + * and open the template in the editor. + */ + +package net.i2p.i2ptunnel.udp; + +// system +import java.net.DatagramSocket; +import java.net.DatagramPacket; + +/** + * + * @author welterde + */ +public class UDPSource implements Source, Runnable { + public static final int MAX_SIZE = 15360; + public UDPSource(int port) { + this.sink = null; + + // create udp-socket + try { + this.sock = new DatagramSocket(port); + } catch(Exception e) { + throw new RuntimeException("failed to listen...", e); + } + + // create thread + this.thread = new Thread(this); + } + + /** use socket from UDPSink */ + public UDPSource(DatagramSocket sock) { + this.sink = null; + this.sock = sock; + this.thread = new Thread(this); + } + + public void setSink(Sink sink) { + this.sink = sink; + } + + public void start() { + this.thread.start(); + } + + public void run() { + // create packet + byte[] buf = new byte[MAX_SIZE]; + DatagramPacket pack = new DatagramPacket(buf, buf.length); + while(true) { + try { + // receive... + this.sock.receive(pack); + + // create new data array + byte[] nbuf = new byte[pack.getLength()]; + + // copy over + System.arraycopy(pack.getData(), 0, nbuf, 0, nbuf.length); + + // transfer to sink + this.sink.send(null, nbuf); + //System.out.print("i"); + } catch(Exception e) { + e.printStackTrace(); + break; + } + } + } + + public void stop() { + this.sock.close(); + } + + + + + + + + + + + + + + protected DatagramSocket sock; + protected Sink sink; + protected Thread thread; +} diff --git a/apps/i2ptunnel/java/src/net/i2p/i2ptunnel/udpTunnel/I2PTunnelUDPClientBase.java b/apps/i2ptunnel/java/src/net/i2p/i2ptunnel/udpTunnel/I2PTunnelUDPClientBase.java new file mode 100644 index 000000000..c92da6ae8 --- /dev/null +++ b/apps/i2ptunnel/java/src/net/i2p/i2ptunnel/udpTunnel/I2PTunnelUDPClientBase.java @@ -0,0 +1,210 @@ +/* I2PTunnel is GPL'ed (with the exception mentioned in I2PTunnel.java) + * (c) 2003 - 2004 mihi + */ +package net.i2p.i2ptunnel.udpTunnel; + +import java.io.ByteArrayOutputStream; +import java.io.ByteArrayInputStream; +import java.io.IOException; +import java.io.InterruptedIOException; +import java.net.ConnectException; +import java.net.InetAddress; +import java.net.NoRouteToHostException; +import java.net.ServerSocket; +import java.net.Socket; +import java.net.UnknownHostException; +import java.util.ArrayList; +import java.util.Iterator; +import java.util.List; +import java.util.Properties; + +import net.i2p.I2PAppContext; +import net.i2p.I2PException; +import net.i2p.client.I2PClient; +import net.i2p.client.I2PClientFactory; +import net.i2p.client.I2PSession; +import net.i2p.client.I2PSessionException; +import net.i2p.data.DataFormatException; +import net.i2p.data.Destination; +import net.i2p.i2ptunnel.I2PTunnel; +import net.i2p.i2ptunnel.I2PTunnelTask; +import net.i2p.i2ptunnel.Logging; +import net.i2p.i2ptunnel.udp.*; +import net.i2p.util.EventDispatcher; +import net.i2p.util.I2PThread; +import net.i2p.util.Log; + +public abstract class I2PTunnelUDPClientBase extends I2PTunnelTask implements Source, Sink { + + private static final Log _log = new Log(I2PTunnelUDPClientBase.class); + protected I2PAppContext _context; + protected Logging l; + + static final long DEFAULT_CONNECT_TIMEOUT = 60 * 1000; + + private static volatile long __clientId = 0; + protected long _clientId; + + protected Destination dest = null; + + private boolean listenerReady = false; + + private ServerSocket ss; + + private Object startLock = new Object(); + private boolean startRunning = false; + + private byte[] pubkey; + + private String handlerName; + + private Object conLock = new Object(); + + /** How many connections will we allow to be in the process of being built at once? */ + private int _numConnectionBuilders; + /** How long will we allow sockets to sit in the _waitingSockets map before killing them? */ + private int _maxWaitTime; + + private I2PSession _session; + private Source _i2pSource; + private Sink _i2pSink; + private Destination _otherDest; + + /** + * Base client class that sets up an I2P Datagram client destination. + * The UDP side is not implemented here, as there are at least + * two possibilities: + * + * 1) UDP side is a "server" + * Example: Streamr Consumer + * - Configure a destination host and port + * - External application sends no data + * - Extending class must have a constructor with host and port arguments + * + * 2) UDP side is a client/server + * Example: SOCKS UDP (DNS requests?) + * - configure an inbound port and a destination host and port + * - External application sends and receives data + * - Extending class must have a constructor with host and 2 port arguments + * + * So the implementing class must create a UDPSource and/or UDPSink, + * and must call setSink(). + * + * @throws IllegalArgumentException if the I2CP configuration is b0rked so + * badly that we cant create a socketManager + * + * @author zzz with portions from welterde's streamr + */ + public I2PTunnelUDPClientBase(String destination, Logging l, EventDispatcher notifyThis, + I2PTunnel tunnel) throws IllegalArgumentException { + super("UDPServer", notifyThis, tunnel); + _clientId = ++__clientId; + this.l = l; + + _context = tunnel.getContext(); + + tunnel.getClientOptions().setProperty("i2cp.dontPublishLeaseSet", "true"); + + // create i2pclient and destination + I2PClient client = I2PClientFactory.createClient(); + Destination dest; + byte[] key; + try { + ByteArrayOutputStream out = new ByteArrayOutputStream(512); + dest = client.createDestination(out); + key = out.toByteArray(); + } catch(Exception exc) { + throw new RuntimeException("failed to create i2p-destination", exc); + } + + // create a session + try { + ByteArrayInputStream in = new ByteArrayInputStream(key); + _session = client.createSession(in, tunnel.getClientOptions()); + } catch(Exception exc) { + throw new RuntimeException("failed to create session", exc); + } + + // Setup the source. Always expect raw unverified datagrams. + _i2pSource = new I2PSource(_session, false, true); + + // Setup the sink. Always send repliable datagrams. + if (destination != null && destination.length() > 0) { + try { + _otherDest = I2PTunnel.destFromName(destination); + } catch (DataFormatException dfe) {} + if (_otherDest == null) { + l.log("Could not resolve " + destination); + throw new RuntimeException("failed to create session - could not resolve " + destination); + } + _i2pSink = new I2PSink(_session, _otherDest, false); + } else { + _i2pSink = new I2PSinkAnywhere(_session, false); + } + } + + /** + * Actually start working on outgoing connections. + * Classes should override to start UDP side as well. + * + * Not specified in I2PTunnelTask but used in both + * I2PTunnelClientBase and I2PTunnelServer so let's + * implement it here too. + */ + public void startRunning() { + synchronized (startLock) { + try { + _session.connect(); + } catch(I2PSessionException exc) { + throw new RuntimeException("failed to connect session", exc); + } + start(); + startRunning = true; + startLock.notify(); + } + open = true; + } + + /** + * I2PTunnelTask Methods + * + * Classes should override to close UDP side as well + */ + public boolean close(boolean forced) { + if (!open) return true; + if (_session != null) { + try { + _session.destroySession(); + } catch (I2PSessionException ise) {} + } + l.log("Closing client " + toString()); + open = false; + return true; + } + + /** + * Source Methods + * + * Sets the receiver of the UDP datagrams from I2P + * Subclass must call this after constructor + * and before start() + */ + public void setSink(Sink s) { + _i2pSource.setSink(s); + } + + /** start the source */ + public void start() { + _i2pSource.start(); + } + + /** + * Sink Methods + * + * @param to - ignored if configured for a single destination + * (we use the dest specified in the constructor) + */ + public void send(Destination to, byte[] data) { + _i2pSink.send(to, data); + } +} diff --git a/apps/i2ptunnel/java/src/net/i2p/i2ptunnel/udpTunnel/I2PTunnelUDPServerBase.java b/apps/i2ptunnel/java/src/net/i2p/i2ptunnel/udpTunnel/I2PTunnelUDPServerBase.java new file mode 100644 index 000000000..8dcd66a36 --- /dev/null +++ b/apps/i2ptunnel/java/src/net/i2p/i2ptunnel/udpTunnel/I2PTunnelUDPServerBase.java @@ -0,0 +1,211 @@ +/* I2PTunnel is GPL'ed (with the exception mentioned in I2PTunnel.java) + * (c) 2003 - 2004 mihi + */ +package net.i2p.i2ptunnel.udpTunnel; + +import java.io.ByteArrayInputStream; +import java.io.File; +import java.io.FileInputStream; +import java.io.IOException; +import java.io.InputStream; +import java.net.ConnectException; +import java.net.InetAddress; +import java.net.Socket; +import java.net.SocketException; +import java.net.SocketTimeoutException; +import java.util.Iterator; +import java.util.Properties; + +import net.i2p.I2PAppContext; +import net.i2p.I2PException; +import net.i2p.client.I2PClient; +import net.i2p.client.I2PClientFactory; +import net.i2p.client.I2PSession; +import net.i2p.client.I2PSessionException; +import net.i2p.data.Base64; +import net.i2p.data.Destination; +import net.i2p.i2ptunnel.I2PTunnel; +import net.i2p.i2ptunnel.I2PTunnelTask; +import net.i2p.i2ptunnel.Logging; +import net.i2p.i2ptunnel.udp.*; +import net.i2p.util.EventDispatcher; +import net.i2p.util.I2PThread; +import net.i2p.util.Log; + +public class I2PTunnelUDPServerBase extends I2PTunnelTask implements Source, Sink { + + private final static Log _log = new Log(I2PTunnelUDPServerBase.class); + + private Object lock = new Object(); + protected Object slock = new Object(); + + private static volatile long __serverId = 0; + + protected Logging l; + + private static final long DEFAULT_READ_TIMEOUT = -1; // 3*60*1000; + /** default timeout to 3 minutes - override if desired */ + protected long readTimeout = DEFAULT_READ_TIMEOUT; + + private I2PSession _session; + private Source _i2pSource; + private Sink _i2pSink; + + /** + * Base client class that sets up an I2P Datagram server destination. + * The UDP side is not implemented here, as there are at least + * two possibilities: + * + * 1) UDP side is a "client" + * Example: Streamr Producer + * - configure an inbound port + * - External application receives no data + * - Extending class must have a constructor with a port argument + * + * 2) UDP side is a client/server + * Example: DNS + * - configure an inbound port and a destination host and port + * - External application sends and receives data + * - Extending class must have a constructor with host and 2 port arguments + * + * So the implementing class must create a UDPSource and/or UDPSink, + * and must call setSink(). + * + * @throws IllegalArgumentException if the I2CP configuration is b0rked so + * badly that we cant create a socketManager + * + * @author zzz with portions from welterde's streamr + */ + + public I2PTunnelUDPServerBase(boolean verify, File privkey, String privkeyname, Logging l, + EventDispatcher notifyThis, I2PTunnel tunnel) { + super("UDPServer <- " + privkeyname, notifyThis, tunnel); + FileInputStream fis = null; + try { + fis = new FileInputStream(privkey); + init(verify, fis, privkeyname, l); + } catch (IOException ioe) { + _log.error("Error starting server", ioe); + notifyEvent("openServerResult", "error"); + } finally { + if (fis != null) + try { fis.close(); } catch (IOException ioe) {} + } + } + + private void init(boolean verify, InputStream privData, String privkeyname, Logging l) { + this.l = l; + int portNum = 7654; + if (getTunnel().port != null) { + try { + portNum = Integer.parseInt(getTunnel().port); + } catch (NumberFormatException nfe) { + _log.log(Log.CRIT, "Invalid port specified [" + getTunnel().port + "], reverting to " + portNum); + } + } + + // create i2pclient + I2PClient client = I2PClientFactory.createClient(); + + try { + _session = client.createSession(privData, getTunnel().getClientOptions()); + } catch(I2PSessionException exc) { + throw new RuntimeException("failed to create session", exc); + } + + // Setup the source. Always expect repliable datagrams, optionally verify + _i2pSource = new I2PSource(_session, verify, false); + + // Setup the sink. Always send raw datagrams. + _i2pSink = new I2PSinkAnywhere(_session, true); + } + + /** + * Classes should override to start UDP side as well. + * + * Not specified in I2PTunnelTask but used in both + * I2PTunnelClientBase and I2PTunnelServer so let's + * implement it here too. + */ + public void startRunning() { + //synchronized (startLock) { + try { + _session.connect(); + } catch(I2PSessionException exc) { + throw new RuntimeException("failed to connect session", exc); + } + start(); + //} + + notifyEvent("openServerResult", "ok"); + open = true; + } + + /** + * Set the read idle timeout for newly-created connections (in + * milliseconds). After this time expires without data being reached from + * the I2P network, the connection itself will be closed. + */ + public void setReadTimeout(long ms) { + readTimeout = ms; + } + + /** + * Get the read idle timeout for newly-created connections (in + * milliseconds). + * + * @return The read timeout used for connections + */ + public long getReadTimeout() { + return readTimeout; + } + + /** + * I2PTunnelTask Methods + * + * Classes should override to close UDP side as well + */ + public boolean close(boolean forced) { + if (!open) return true; + synchronized (lock) { + l.log("Shutting down server " + toString()); + try { + if (_session != null) { + _session.destroySession(); + } + } catch (I2PException ex) { + _log.error("Error destroying the session", ex); + } + l.log("Server shut down."); + open = false; + return true; + } + } + + /** + * Source Methods + * + * Sets the receiver of the UDP datagrams from I2P + * Subclass must call this after constructor + * and before start() + */ + public void setSink(Sink s) { + _i2pSource.setSink(s); + } + + /** start the source */ + public void start() { + _i2pSource.start(); + } + + /** + * Sink Methods + * + * @param to + * + */ + public void send(Destination to, byte[] data) { + _i2pSink.send(to, data); + } +} + diff --git a/apps/i2ptunnel/java/src/net/i2p/i2ptunnel/web/EditBean.java b/apps/i2ptunnel/java/src/net/i2p/i2ptunnel/web/EditBean.java index 90e5f7e20..004114b56 100644 --- a/apps/i2ptunnel/java/src/net/i2p/i2ptunnel/web/EditBean.java +++ b/apps/i2ptunnel/java/src/net/i2p/i2ptunnel/web/EditBean.java @@ -8,6 +8,7 @@ package net.i2p.i2ptunnel.web; * */ +import java.util.HashSet; import java.util.Iterator; import java.util.List; import java.util.Properties; @@ -28,9 +29,7 @@ public class EditBean extends IndexBean { if (controllers.size() > tunnel) { TunnelController cur = (TunnelController)controllers.get(tunnel); if (cur == null) return false; - return ( ("client".equals(cur.getType())) || - ("httpclient".equals(cur.getType()))|| - ("ircclient".equals(cur.getType()))); + return isClient(cur.getType()); } else { return false; } @@ -38,7 +37,7 @@ public class EditBean extends IndexBean { public String getTargetHost(int tunnel) { TunnelController tun = getController(tunnel); - if (tun != null) + if (tun != null && tun.getTargetHost() != null) return tun.getTargetHost(); else return "127.0.0.1"; @@ -52,7 +51,7 @@ public class EditBean extends IndexBean { } public String getSpoofedHost(int tunnel) { TunnelController tun = getController(tunnel); - if (tun != null) + if (tun != null && tun.getSpoofedHost() != null) return tun.getSpoofedHost(); else return ""; @@ -61,8 +60,9 @@ public class EditBean extends IndexBean { TunnelController tun = getController(tunnel); if (tun != null && tun.getPrivKeyFile() != null) return tun.getPrivKeyFile(); - else - return ""; + if (tunnel < 0) + tunnel = _group.getControllers().size(); + return "i2ptunnel" + tunnel + "-privKeys.dat"; } public boolean startAutomatically(int tunnel) { @@ -82,119 +82,123 @@ public class EditBean extends IndexBean { } public boolean shouldDelay(int tunnel) { - TunnelController tun = getController(tunnel); - if (tun != null) { - Properties opts = getOptions(tun); - if (opts != null) { - String delay = opts.getProperty("i2p.streaming.connectDelay"); - if ( (delay == null) || ("0".equals(delay)) ) - return false; - else - return true; - } else { - return false; - } - } else { - return false; - } + return getProperty(tunnel, "i2p.streaming.connectDelay", 0) > 0; } public boolean isInteractive(int tunnel) { - TunnelController tun = getController(tunnel); - if (tun != null) { - Properties opts = getOptions(tun); - if (opts != null) { - String wsiz = opts.getProperty("i2p.streaming.maxWindowSize"); - if ( (wsiz == null) || (!"1".equals(wsiz)) ) - return false; - else - return true; - } else { - return false; - } - } else { - return false; - } + return getProperty(tunnel, "i2p.streaming.maxWindowSize", 128) == 12; } public int getTunnelDepth(int tunnel, int defaultLength) { - TunnelController tun = getController(tunnel); - if (tun != null) { - Properties opts = getOptions(tun); - if (opts != null) { - String len = opts.getProperty("inbound.length"); - if (len == null) return defaultLength; - try { - return Integer.parseInt(len); - } catch (NumberFormatException nfe) { - return defaultLength; - } - } else { - return defaultLength; - } - } else { - return defaultLength; - } + return getProperty(tunnel, "inbound.length", defaultLength); } public int getTunnelQuantity(int tunnel, int defaultQuantity) { - TunnelController tun = getController(tunnel); - if (tun != null) { - Properties opts = getOptions(tun); - if (opts != null) { - String len = opts.getProperty("inbound.quantity"); - if (len == null) return defaultQuantity; - try { - return Integer.parseInt(len); - } catch (NumberFormatException nfe) { - return defaultQuantity; - } - } else { - return defaultQuantity; - } - } else { - return defaultQuantity; - } + return getProperty(tunnel, "inbound.quantity", defaultQuantity); } public int getTunnelBackupQuantity(int tunnel, int defaultBackupQuantity) { - TunnelController tun = getController(tunnel); - if (tun != null) { - Properties opts = getOptions(tun); - if (opts != null) { - String len = opts.getProperty("inbound.backupQuantity"); - if (len == null) return defaultBackupQuantity; - try { - return Integer.parseInt(len); - } catch (NumberFormatException nfe) { - return defaultBackupQuantity; - } - } else { - return defaultBackupQuantity; - } - } else { - return defaultBackupQuantity; - } + return getProperty(tunnel, "inbound.backupQuantity", defaultBackupQuantity); } public int getTunnelVariance(int tunnel, int defaultVariance) { + return getProperty(tunnel, "inbound.lengthVariance", defaultVariance); + } + + public boolean getReduce(int tunnel) { + return getBooleanProperty(tunnel, "i2cp.reduceOnIdle"); + } + + public int getReduceCount(int tunnel) { + return getProperty(tunnel, "i2cp.reduceQuantity", 1); + } + + public int getReduceTime(int tunnel) { + return getProperty(tunnel, "i2cp.reduceIdleTime", 20*60*1000) / (60*1000); + } + + public int getCert(int tunnel) { + return 0; + } + + public int getEffort(int tunnel) { + return 23; + } + + public String getSigner(int tunnel) { + return ""; + } + + public boolean getEncrypt(int tunnel) { + return getBooleanProperty(tunnel, "i2cp.encryptLeaseSet"); + } + + public String getEncryptKey(int tunnel) { + return getProperty(tunnel, "i2cp.leaseSetKey", ""); + } + + public boolean getAccess(int tunnel) { + return getBooleanProperty(tunnel, "i2cp.enableAccessList"); + } + + public String getAccessList(int tunnel) { + return getProperty(tunnel, "i2cp.accessList", "").replaceAll(",", "\n"); + } + + public boolean getClose(int tunnel) { + return getBooleanProperty(tunnel, "i2cp.closeOnIdle"); + } + + public int getCloseTime(int tunnel) { + return getProperty(tunnel, "i2cp.closeIdleTime", 30*60*1000) / (60*1000); + } + + public boolean getNewDest(int tunnel) { + return getBooleanProperty(tunnel, "i2cp.newDestOnResume"); + } + + public boolean getPersistentClientKey(int tunnel) { + return getBooleanProperty(tunnel, "persistentClientKey"); + } + + public boolean getDelayOpen(int tunnel) { + return getBooleanProperty(tunnel, "i2cp.delayOpen"); + } + + private int getProperty(int tunnel, String prop, int def) { TunnelController tun = getController(tunnel); if (tun != null) { Properties opts = getOptions(tun); if (opts != null) { - String len = opts.getProperty("inbound.lengthVariance"); - if (len == null) return defaultVariance; + String s = opts.getProperty(prop); + if (s == null) return def; try { - return Integer.parseInt(len); - } catch (NumberFormatException nfe) { - return defaultVariance; - } - } else { - return defaultVariance; + return Integer.parseInt(s); + } catch (NumberFormatException nfe) {} } - } else { - return defaultVariance; } + return def; + } + + private String getProperty(int tunnel, String prop, String def) { + TunnelController tun = getController(tunnel); + if (tun != null) { + Properties opts = getOptions(tun); + if (opts != null) + return opts.getProperty(prop, def); + } + return def; + } + + /** default is false */ + private boolean getBooleanProperty(int tunnel, String prop) { + TunnelController tun = getController(tunnel); + if (tun != null) { + Properties opts = getOptions(tun); + if (opts != null) + return Boolean.valueOf(opts.getProperty(prop)).booleanValue(); + } + return false; } public String getI2CPHost(int tunnel) { @@ -222,19 +226,9 @@ public class EditBean extends IndexBean { int i = 0; for (Iterator iter = opts.keySet().iterator(); iter.hasNext(); ) { String key = (String)iter.next(); + if (_noShowSet.contains(key)) + continue; String val = opts.getProperty(key); - if ("inbound.length".equals(key)) continue; - if ("outbound.length".equals(key)) continue; - if ("inbound.lengthVariance".equals(key)) continue; - if ("outbound.lengthVariance".equals(key)) continue; - if ("inbound.backupQuantity".equals(key)) continue; - if ("outbound.backupQuantity".equals(key)) continue; - if ("inbound.quantity".equals(key)) continue; - if ("outbound.quantity".equals(key)) continue; - if ("inbound.nickname".equals(key)) continue; - if ("outbound.nickname".equals(key)) continue; - if ("i2p.streaming.connectDelay".equals(key)) continue; - if ("i2p.streaming.maxWindowSize".equals(key)) continue; if (i != 0) buf.append(' '); buf.append(key).append('=').append(val); i++; diff --git a/apps/i2ptunnel/java/src/net/i2p/i2ptunnel/web/IndexBean.java b/apps/i2ptunnel/java/src/net/i2p/i2ptunnel/web/IndexBean.java index c8d321ea2..fcf45d94d 100644 --- a/apps/i2ptunnel/java/src/net/i2p/i2ptunnel/web/IndexBean.java +++ b/apps/i2ptunnel/java/src/net/i2p/i2ptunnel/web/IndexBean.java @@ -8,13 +8,24 @@ package net.i2p.i2ptunnel.web; * */ +import java.util.concurrent.ConcurrentHashMap; +import java.util.Arrays; +import java.util.HashSet; import java.util.List; +import java.util.Map; import java.util.Properties; +import java.util.Set; import java.util.StringTokenizer; import net.i2p.I2PAppContext; +import net.i2p.data.Base32; +import net.i2p.data.Certificate; +import net.i2p.data.Destination; +import net.i2p.data.PrivateKeyFile; +import net.i2p.data.SessionKey; import net.i2p.i2ptunnel.TunnelController; import net.i2p.i2ptunnel.TunnelControllerGroup; +import net.i2p.util.ConcurrentHashSet; import net.i2p.util.Log; /** @@ -57,6 +68,11 @@ public class IndexBean { private boolean _sharedClient; private boolean _privKeyGenerate; private boolean _removeConfirmed; + private Set _booleanOptions; + private Map _otherOptions; + private int _hashCashValue; + private int _certType; + private String _certSigner; public static final int RUNNING = 1; public static final int STARTING = 2; @@ -85,6 +101,8 @@ public class IndexBean { } catch (NumberFormatException nfe) {} _nextNonce = _context.random().nextLong(); System.setProperty(PROP_NONCE, Long.toString(_nextNonce)); + _booleanOptions = new ConcurrentHashSet(4); + _otherOptions = new ConcurrentHashMap(4); } public long getNextNonce() { return _nextNonce; } @@ -146,6 +164,12 @@ public class IndexBean { else if ("Delete this proxy".equals(_action) || // IE workaround: (_action.toLowerCase().indexOf("d
elete") >= 0)) return deleteTunnel(); + else if ("Estimate".equals(_action)) + return PrivateKeyFile.estimateHashCashTime(_hashCashValue); + else if ("Modify".equals(_action)) + return modifyDestination(); + else if ("Generate".equals(_action)) + return generateNewEncryptionKey(); else return "Action " + _action + " unknown"; } @@ -209,10 +233,7 @@ public class IndexBean { } // Only modify other shared tunnels // if the current tunnel is shared, and of supported type - if ("true".equalsIgnoreCase(cur.getSharedClient()) && - ("ircclient".equals(cur.getType()) || - "httpclient".equals(cur.getType()) || - "client".equals(cur.getType()))) { + if ("true".equalsIgnoreCase(cur.getSharedClient()) && isClient(cur.getType())) { // all clients use the same I2CP session, and as such, use the same I2CP options List controllers = _group.getControllers(); @@ -224,11 +245,7 @@ public class IndexBean { // Only modify this non-current tunnel // if it belongs to a shared destination, and is of supported type - if ("true".equalsIgnoreCase(c.getSharedClient()) && - ("httpclient".equals(c.getType()) || - "ircclient".equals(c.getType()) || - "client".equals(c.getType()))) { - + if ("true".equalsIgnoreCase(c.getSharedClient()) && isClient(c.getType())) { Properties cOpt = c.getConfig(""); if (_tunnelQuantity != null) { cOpt.setProperty("option.inbound.quantity", _tunnelQuantity); @@ -326,9 +343,16 @@ public class IndexBean { public boolean isClient(int tunnelNum) { TunnelController cur = getController(tunnelNum); if (cur == null) return false; - return ( ("client".equals(cur.getType())) || - ("httpclient".equals(cur.getType())) || - ("ircclient".equals(cur.getType()))); + return isClient(cur.getType()); + } + + public static boolean isClient(String type) { + return ( ("client".equals(type)) || + ("httpclient".equals(type)) || + ("sockstunnel".equals(type)) || + ("connectclient".equals(type)) || + ("streamrclient".equals(type)) || + ("ircclient".equals(type))); } public String getTunnelName(int tunnel) { @@ -361,6 +385,11 @@ public class IndexBean { else if ("ircclient".equals(internalType)) return "IRC client"; else if ("server".equals(internalType)) return "Standard server"; else if ("httpserver".equals(internalType)) return "HTTP server"; + else if ("sockstunnel".equals(internalType)) return "SOCKS 4/4a/5 proxy"; + else if ("connectclient".equals(internalType)) return "CONNECT/SSL/HTTPS proxy"; + else if ("ircserver".equals(internalType)) return "IRC server"; + else if ("streamrclient".equals(internalType)) return "Streamr client"; + else if ("streamrserver".equals(internalType)) return "Streamr server"; else return internalType; } @@ -407,13 +436,13 @@ public class IndexBean { public String getClientDestination(int tunnel) { TunnelController tun = getController(tunnel); if (tun == null) return ""; - if ("client".equals(tun.getType())||"ircclient".equals(tun.getType())) { - if (tun.getTargetDestination() != null) - return tun.getTargetDestination(); - else - return ""; - } - else return tun.getProxyList(); + String rv; + if ("client".equals(tun.getType()) || "ircclient".equals(tun.getType()) || + "streamrclient".equals(tun.getType())) + rv = tun.getTargetDestination(); + else + rv = tun.getProxyList(); + return rv != null ? rv : ""; } public String getServerTarget(int tunnel) { @@ -430,11 +459,28 @@ public class IndexBean { String rv = tun.getMyDestination(); if (rv != null) return rv; - else - return ""; - } else { - return ""; + // if not running, do this the hard way + String keyFile = tun.getPrivKeyFile(); + if (keyFile != null && keyFile.trim().length() > 0) { + PrivateKeyFile pkf = new PrivateKeyFile(keyFile); + try { + Destination d = pkf.getDestination(); + if (d != null) + return d.toBase64(); + } catch (Exception e) {} + } } + return ""; + } + + public String getDestHashBase32(int tunnel) { + TunnelController tun = getController(tunnel); + if (tun != null) { + String rv = tun.getMyDestHashBase32(); + if (rv != null) + return rv; + } + return ""; } /// @@ -556,6 +602,164 @@ public class IndexBean { _profile = profile; } + public void setReduce(String moo) { + _booleanOptions.add("i2cp.reduceOnIdle"); + } + public void setClose(String moo) { + _booleanOptions.add("i2cp.closeOnIdle"); + } + public void setEncrypt(String moo) { + _booleanOptions.add("i2cp.encryptLeaseSet"); + } + public void setAccess(String moo) { + _booleanOptions.add("i2cp.enableAccessList"); + } + public void setDelayOpen(String moo) { + _booleanOptions.add("i2cp.delayOpen"); + } + public void setNewDest(String val) { + if ("1".equals(val)) + _booleanOptions.add("i2cp.newDestOnResume"); + else if ("2".equals(val)) + _booleanOptions.add("persistentClientKey"); + } + + public void setReduceTime(String val) { + if (val != null) { + try { + _otherOptions.put("i2cp.reduceIdleTime", "" + (Integer.parseInt(val.trim()) * 60*1000)); + } catch (NumberFormatException nfe) {} + } + } + public void setReduceCount(String val) { + if (val != null) + _otherOptions.put("i2cp.reduceQuantity", val.trim()); + } + public void setEncryptKey(String val) { + if (val != null) + _otherOptions.put("i2cp.leaseSetKey", val.trim()); + } + public void setAccessList(String val) { + if (val != null) + _otherOptions.put("i2cp.accessList", val.trim().replaceAll("\r\n", ",").replaceAll("\n", ",").replaceAll(" ", ",")); + } + public void setCloseTime(String val) { + if (val != null) { + try { + _otherOptions.put("i2cp.closeIdleTime", "" + (Integer.parseInt(val.trim()) * 60*1000)); + } catch (NumberFormatException nfe) {} + } + } + + /** params needed for hashcash and dest modification */ + public void setEffort(String val) { + if (val != null) { + try { + _hashCashValue = Integer.parseInt(val.trim()); + } catch (NumberFormatException nfe) {} + } + } + public void setCert(String val) { + if (val != null) { + try { + _certType = Integer.parseInt(val.trim()); + } catch (NumberFormatException nfe) {} + } + } + public void setSigner(String val) { + _certSigner = val; + } + + /** Modify or create a destination */ + private String modifyDestination() { + if (_privKeyFile == null || _privKeyFile.trim().length() <= 0) + return "Private Key File not specified"; + + TunnelController tun = getController(_tunnel); + Properties config = getConfig(); + if (config == null) + return "Invalid params"; + if (tun == null) { + // creating new + tun = new TunnelController(config, "", true); + _group.addController(tun); + saveChanges(); + } else if (tun.getIsRunning() || tun.getIsStarting()) { + return "Tunnel must be stopped before modifying destination"; + } + PrivateKeyFile pkf = new PrivateKeyFile(_privKeyFile); + try { + pkf.createIfAbsent(); + } catch (Exception e) { + return "Create private key file failed: " + e; + } + switch (_certType) { + case Certificate.CERTIFICATE_TYPE_NULL: + case Certificate.CERTIFICATE_TYPE_HIDDEN: + pkf.setCertType(_certType); + break; + case Certificate.CERTIFICATE_TYPE_HASHCASH: + pkf.setHashCashCert(_hashCashValue); + break; + case Certificate.CERTIFICATE_TYPE_SIGNED: + if (_certSigner == null || _certSigner.trim().length() <= 0) + return "No signing destination specified"; + // find the signer's key file... + String signerPKF = null; + for (int i = 0; i < getTunnelCount(); i++) { + TunnelController c = getController(i); + if (_certSigner.equals(c.getConfig("").getProperty("name")) || + _certSigner.equals(c.getConfig("").getProperty("spoofedHost"))) { + signerPKF = c.getConfig("").getProperty("privKeyFile"); + break; + } + } + if (signerPKF == null || signerPKF.length() <= 0) + return "Signing destination " + _certSigner + " not found"; + if (_privKeyFile.equals(signerPKF)) + return "Self-signed destinations not allowed"; + Certificate c = pkf.setSignedCert(new PrivateKeyFile(signerPKF)); + if (c == null) + return "Signing failed - does signer destination exist?"; + break; + default: + return "Unknown certificate type"; + } + Destination newdest; + try { + pkf.write(); + newdest = pkf.getDestination(); + } catch (Exception e) { + return "Modification failed: " + e; + } + return "Destination modified - " + + "New Base32 is " + Base32.encode(newdest.calculateHash().getData()) + ".b32.i2p " + + "New Destination is " + newdest.toBase64(); + } + + /** New key */ + private String generateNewEncryptionKey() { + TunnelController tun = getController(_tunnel); + Properties config = getConfig(); + if (config == null) + return "Invalid params"; + if (tun == null) { + // creating new + tun = new TunnelController(config, "", true); + _group.addController(tun); + saveChanges(); + } else if (tun.getIsRunning() || tun.getIsStarting()) { + return "Tunnel must be stopped before modifying leaseset encryption key"; + } + byte[] data = new byte[SessionKey.KEYSIZE_BYTES]; + _context.random().nextBytes(data); + SessionKey sk = new SessionKey(data); + setEncryptKey(sk.toBase64()); + setEncrypt(""); + saveChanges(); + return "New Leaseset Encryption Key: " + sk.toBase64(); + } + /** * Based on all provided data, create a set of configuration parameters * suitable for use in a TunnelController. This will replace (not add to) @@ -566,82 +770,79 @@ public class IndexBean { Properties config = new Properties(); updateConfigGeneric(config); - if ("httpclient".equals(_type)) { + if (isClient(_type)) { + // generic client stuff if (_port != null) config.setProperty("listenPort", _port); if (_reachableByOther != null) config.setProperty("interface", _reachableByOther); else config.setProperty("interface", _reachableBy); - if (_proxyList != null) - config.setProperty("proxyList", _proxyList); - - config.setProperty("option.inbound.nickname", CLIENT_NICKNAME); - config.setProperty("option.outbound.nickname", CLIENT_NICKNAME); + config.setProperty("option.inbound.nickname", CLIENT_NICKNAME); + config.setProperty("option.outbound.nickname", CLIENT_NICKNAME); if (_name != null && !_sharedClient) { config.setProperty("option.inbound.nickname", _name); config.setProperty("option.outbound.nickname", _name); } - config.setProperty("sharedClient", _sharedClient + ""); - }else if ("ircclient".equals(_type)) { - if (_port != null) - config.setProperty("listenPort", _port); - if (_reachableByOther != null) - config.setProperty("interface", _reachableByOther); - else - config.setProperty("interface", _reachableBy); - if (_targetDestination != null) - config.setProperty("targetDestination", _targetDestination); + for (String p : _booleanClientOpts) + config.setProperty("option." + p, "" + _booleanOptions.contains(p)); + for (String p : _otherClientOpts) + if (_otherOptions.containsKey(p)) + config.setProperty("option." + p, _otherOptions.get(p)); + } else { + // generic server stuff + if (_targetHost != null) + config.setProperty("targetHost", _targetHost); + if (_targetPort != null) + config.setProperty("targetPort", _targetPort); + for (String p : _booleanServerOpts) + config.setProperty("option." + p, "" + _booleanOptions.contains(p)); + for (String p : _otherServerOpts) + if (_otherOptions.containsKey(p)) + config.setProperty("option." + p, _otherOptions.get(p)); + } - config.setProperty("option.inbound.nickname", CLIENT_NICKNAME); - config.setProperty("option.outbound.nickname", CLIENT_NICKNAME); - if (_name != null && !_sharedClient) { - config.setProperty("option.inbound.nickname", _name); - config.setProperty("option.outbound.nickname", _name); - } - - config.setProperty("sharedClient", _sharedClient + ""); - } else if ("client".equals(_type)) { - if (_port != null) - config.setProperty("listenPort", _port); - if (_reachableByOther != null) - config.setProperty("interface", _reachableByOther); - else - config.setProperty("interface", _reachableBy); + if ("httpclient".equals(_type) || "connectclient".equals(_type)) { + if (_proxyList != null) + config.setProperty("proxyList", _proxyList); + } else if ("ircclient".equals(_type) || "client".equals(_type) || "streamrclient".equals(_type)) { if (_targetDestination != null) config.setProperty("targetDestination", _targetDestination); - - config.setProperty("option.inbound.nickname", CLIENT_NICKNAME); - config.setProperty("option.outbound.nickname", CLIENT_NICKNAME); - if (_name != null && !_sharedClient) { - config.setProperty("option.inbound.nickname", _name); - config.setProperty("option.outbound.nickname", _name); - } - config.setProperty("sharedClient", _sharedClient + ""); - } else if ("server".equals(_type)) { - if (_targetHost != null) - config.setProperty("targetHost", _targetHost); - if (_targetPort != null) - config.setProperty("targetPort", _targetPort); - if (_privKeyFile != null) - config.setProperty("privKeyFile", _privKeyFile); } else if ("httpserver".equals(_type)) { - if (_targetHost != null) - config.setProperty("targetHost", _targetHost); - if (_targetPort != null) - config.setProperty("targetPort", _targetPort); - if (_privKeyFile != null) - config.setProperty("privKeyFile", _privKeyFile); if (_spoofedHost != null) config.setProperty("spoofedHost", _spoofedHost); - } else { - return null; } return config; } + private static final String _noShowOpts[] = { + "inbound.length", "outbound.length", "inbound.lengthVariance", "outbound.lengthVariance", + "inbound.backupQuantity", "outbound.backupQuantity", "inbound.quantity", "outbound.quantity", + "inbound.nickname", "outbound.nickname", "i2p.streaming.connectDelay", "i2p.streaming.maxWindowSize" + }; + private static final String _booleanClientOpts[] = { + "i2cp.reduceOnIdle", "i2cp.closeOnIdle", "i2cp.newDestOnResume", "persistentClientKey", "i2cp.delayOpen" + }; + private static final String _booleanServerOpts[] = { + "i2cp.reduceOnIdle", "i2cp.encryptLeaseSet", "i2cp.enableAccessList" + }; + private static final String _otherClientOpts[] = { + "i2cp.reduceIdleTime", "i2cp.reduceQuantity", "i2cp.closeIdleTime" + }; + private static final String _otherServerOpts[] = { + "i2cp.reduceIdleTime", "i2cp.reduceQuantity", "i2cp.leaseSetKey", "i2cp.accessList" + }; + protected static final Set _noShowSet = new HashSet(); + static { + _noShowSet.addAll(Arrays.asList(_noShowOpts)); + _noShowSet.addAll(Arrays.asList(_booleanClientOpts)); + _noShowSet.addAll(Arrays.asList(_booleanServerOpts)); + _noShowSet.addAll(Arrays.asList(_otherClientOpts)); + _noShowSet.addAll(Arrays.asList(_otherServerOpts)); + } + private void updateConfigGeneric(Properties config) { config.setProperty("type", _type); if (_name != null) @@ -655,6 +856,8 @@ public class IndexBean { } else { config.setProperty("i2cpPort", "7654"); } + if (_privKeyFile != null) + config.setProperty("privKeyFile", _privKeyFile); if (_customOptions != null) { StringTokenizer tok = new StringTokenizer(_customOptions); @@ -664,19 +867,9 @@ public class IndexBean { if ( (eq <= 0) || (eq >= pair.length()) ) continue; String key = pair.substring(0, eq); + if (_noShowSet.contains(key)) + continue; String val = pair.substring(eq+1); - if ("inbound.length".equals(key)) continue; - if ("outbound.length".equals(key)) continue; - if ("inbound.quantity".equals(key)) continue; - if ("outbound.quantity".equals(key)) continue; - if ("inbound.lengthVariance".equals(key)) continue; - if ("outbound.lengthVariance".equals(key)) continue; - if ("inbound.backupQuantity".equals(key)) continue; - if ("outbound.backupQuantity".equals(key)) continue; - if ("inbound.nickname".equals(key)) continue; - if ("outbound.nickname".equals(key)) continue; - if ("i2p.streaming.connectDelay".equals(key)) continue; - if ("i2p.streaming.maxWindowSize".equals(key)) continue; config.setProperty("option." + key, val); } } @@ -704,14 +897,14 @@ public class IndexBean { else config.setProperty("option.i2p.streaming.connectDelay", "0"); if (_name != null) { - if ( ((!"client".equals(_type)) && (!"httpclient".equals(_type))&& (!"ircclient".equals(_type))) || (!_sharedClient) ) { + if ( (!isClient(_type)) || (!_sharedClient) ) { config.setProperty("option.inbound.nickname", _name); config.setProperty("option.outbound.nickname", _name); } else { config.setProperty("option.inbound.nickname", CLIENT_NICKNAME); config.setProperty("option.outbound.nickname", CLIENT_NICKNAME); } - } + } if ("interactive".equals(_profile)) // This was 1 which doesn't make much sense // The real way to make it interactive is to make the streaming lib diff --git a/apps/i2ptunnel/jsp/edit.jsp b/apps/i2ptunnel/jsp/edit.jsp index 931629fb1..b58798b20 100644 --- a/apps/i2ptunnel/jsp/edit.jsp +++ b/apps/i2ptunnel/jsp/edit.jsp @@ -14,12 +14,10 @@ String tun = request.getParameter("tunnel"); } else { String type = request.getParameter("type"); int curTunnel = -1; - if ("client".equals(type) || "httpclient".equals(type) || "ircclient".equals(type)) { + if (EditBean.isClient(type)) { %><% - } else if ("server".equals(type) || "httpserver".equals(type)) { - %><% } else { - %>Invalid tunnel type<% + %><% } } -%> \ No newline at end of file +%> diff --git a/apps/i2ptunnel/jsp/editClient.jsp b/apps/i2ptunnel/jsp/editClient.jsp index 97f8adbe5..915da5db9 100644 --- a/apps/i2ptunnel/jsp/editClient.jsp +++ b/apps/i2ptunnel/jsp/editClient.jsp @@ -75,7 +75,11 @@
+ <% if ("streamrclient".equals(tunnelType)) { %> + + <% } else { %> + <% } %>
+ <% String otherInterface = ""; + String clientInterface = editBean.getClientInterface(curTunnel); + if ("streamrclient".equals(tunnelType)) { + otherInterface = clientInterface; + } else { %>
+ <% } // streamrclient %>
@@ -116,14 +132,14 @@
- <% if ("httpclient".equals(editBean.getInternalType(curTunnel))) { + <% if ("httpclient".equals(tunnelType) || "connectclient".equals(tunnelType)) { %>
- <% } else { + <% } else if ("client".equals(tunnelType) || "ircclient".equals(tunnelType) || "streamrclient".equals(tunnelType)) { %>
- <% } - %>
+ <% } %> + <% if (!"streamrclient".equals(tunnelType)) { %> +
@@ -160,6 +177,7 @@ class="tickbox" /> (Share tunnels with other clients and irc/httpclients? Change requires restart of client proxy)
+ <% } // !streamrclient %>
+ +
+
+
+ +
+ +
+
+ + class="tickbox" /> +
+
+ + +
+
+ + +

+
+ +
+
+ + class="tickbox" /> +
+
+ + +
class="tickbox" /> + Enable + class="tickbox" /> + Disable +
+
+
+ + +
+ +
+
+
+ +
+ +
+
+ + class="tickbox" /> +
+ +
+
+
+ + <% if ("client".equals(tunnelType) || "ircclient".equals(tunnelType)) { %> +
+ +
+
+ + class="tickbox" /> +
+
+ + +
+
+ + + (if known) +
+ +
+
+
+ <% } %> +
diff --git a/apps/i2ptunnel/jsp/editServer.jsp b/apps/i2ptunnel/jsp/editServer.jsp index 0cdf5e0e9..59a7b9cf7 100644 --- a/apps/i2ptunnel/jsp/editServer.jsp +++ b/apps/i2ptunnel/jsp/editServer.jsp @@ -82,11 +82,19 @@
+ <% if ("streamrserver".equals(tunnelType)) { %> + + <% } else { %> + <% } %>
@@ -110,7 +118,8 @@ - + + (leave blank for outproxies) <% } %>
@@ -123,6 +132,7 @@
+ <% if (!"streamrserver".equals(tunnelType)) { %>
+ <% } // !streamrserver %>
- (if known) + <% if (!"".equals(editBean.getDestinationBase64(curTunnel))) { %> + Add to local addressbook + <% } %>
+
+ + +
+
+ + + (Tunnel must be stopped first) +
+ +
+
+
+ +
+ +
+
+ + class="tickbox" /> +
+
+ + + (Restrict to these clients only) +
+ +
+
+
+ +
+ +
+
+ + class="tickbox" /> +
+
+ + +
+
+ + +
+ +
+
+
+ +
+ +
+
+
+ + class="tickbox" /> + +
+
+ + class="tickbox" /> + +
+
+
+ + +
+
+
+ + class="tickbox" /> + +
+
+ + class="tickbox" /> + + +
+
+
+ + + (Tunnel must be stopped first) +
+ +
+
+
+
diff --git a/apps/i2ptunnel/jsp/index.jsp b/apps/i2ptunnel/jsp/index.jsp index bcec39c17..4d9f6c57d 100644 --- a/apps/i2ptunnel/jsp/index.jsp +++ b/apps/i2ptunnel/jsp/index.jsp @@ -112,10 +112,18 @@ } %> + <% if (!"sockstunnel".equals(indexBean.getInternalType(curClient))) { %>
- +
+ <% } %>
@@ -140,6 +148,9 @@ + + +
@@ -159,10 +170,10 @@
-
+
-
+
@@ -178,15 +189,28 @@ <%=indexBean.getTunnelName(curServer)%>
-
- - <%=indexBean.getServerTarget(curServer)%> -
+ + + <% + if ("httpserver".equals(indexBean.getInternalType(curServer))) { + %> + <%=indexBean.getServerTarget(curServer)%> + <% + } else { + %><%=indexBean.getServerTarget(curServer)%> + <% + } + %> +
+
<% if ("httpserver".equals(indexBean.getInternalType(curServer)) && indexBean.getTunnelStatus(curServer) == IndexBean.RUNNING) { %> - Preview + Preview + <% + } else if (indexBean.getTunnelStatus(curServer) == IndexBean.RUNNING) { + %>Base32 Address:
<%=indexBean.getDestHashBase32(curServer)%>.b32.i2p
<% } else { %>No Preview @@ -237,6 +261,8 @@
diff --git a/apps/jdom/jdom.jar b/apps/jdom/jdom.jar deleted file mode 100644 index 288e64cb5..000000000 Binary files a/apps/jdom/jdom.jar and /dev/null differ diff --git a/apps/jdom/readme.txt b/apps/jdom/readme.txt deleted file mode 100644 index d360b3c25..000000000 --- a/apps/jdom/readme.txt +++ /dev/null @@ -1 +0,0 @@ -This is JDOM 1.0 from http://jdom.org/, released under an Apache style license diff --git a/apps/jetty/build.xml b/apps/jetty/build.xml index aaa978354..643dd79fd 100644 --- a/apps/jetty/build.xml +++ b/apps/jetty/build.xml @@ -78,7 +78,6 @@ - diff --git a/apps/pants/build.xml b/apps/pants/build.xml deleted file mode 100644 index 7770dae10..000000000 --- a/apps/pants/build.xml +++ /dev/null @@ -1,236 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -You currently have the recommended version installed. A newer -version will be installed if you continue and this may break some -applications which depend on this package. Are you sure you want -to update? [y/N] - - - - - - - - Update aborted. - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Pants usage: - - ant [--usejikes] [-Dpbuild={name}] [-Dpbuild.version={version}] - [-D{property}={value}] [-Dno.prompts=true] build | fetch | - help | install | uninstall | update | version - - build Build a pbuild and its dependencies - fetch Get package only - help Display usage synopsis - install Fetch, build and install a pbuild - uninstall Uninstall a pbuild - update Update pbuild(s) to the latest version(s) - version Display pbuild version information - - - - - - - - - - - - - - - - - - - - - - - - - diff --git a/apps/pants/pants/build.xml b/apps/pants/pants/build.xml deleted file mode 100644 index 3f8554c07..000000000 --- a/apps/pants/pants/build.xml +++ /dev/null @@ -1,45 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- - - - - -
-
-
- -
-
diff --git a/apps/pants/pants/java/src/net/i2p/pants/MatchTask.java b/apps/pants/pants/java/src/net/i2p/pants/MatchTask.java deleted file mode 100644 index 6750e6314..000000000 --- a/apps/pants/pants/java/src/net/i2p/pants/MatchTask.java +++ /dev/null @@ -1,212 +0,0 @@ -/* - * Ports + Ant = Pants, a simple Ant-based package manager - * - * free (adj.): unencumbered; not under the control of others - * - * Written by smeghead in 2005 and released into the public domain with no - * warranty of any kind, either expressed or implied. It probably won't make - * your computer catch on fire, or eat your children, but it might. Use at your - * own risk. - */ - -package net.i2p.pants; - -import java.io.FileNotFoundException; -import java.io.FileReader; -import java.io.FileWriter; -import java.io.IOException; -import java.io.PrintWriter; -import java.util.regex.Matcher; -import java.util.regex.Pattern; - -import org.apache.tools.ant.BuildException; -import org.apache.tools.ant.Task; - -/** - *

Custom Ant task for matching the contents of a file against a given - * regular expression and writing any matching groups to a file in - * java.util.Properties format. - *

- *

Each key in the properties file is named after the number corresponding to - * its matching group and its value is the contents of the matching group. - *

- *

Regular expressions passed to this task must conform to the specification - * used by Sun's java.util.regex package and thus are mostly - * compatible with Perl 5 regular expressions. - *

- *

When calling the match task, the attributes - * input, output, and regex are required. - *

- *

Optional boolean attributes may be used to toggle various modes for the - * regular expression engine (all are set to false by default): - *

- * - * - * - * - * - * - * - * - *
canonicaleqEnable canonical equivalence
caseinsensitiveEnable case-insensitive matching
commentsPermit whitespace and comments in pattern
dotallEnable dotall mode
multilineEnable multi-line mode
unicodecaseEnable Unicode-aware case folding
unixlinesEnable Unix lines mode
- *

There is one additional optional boolean attribute, - * failOnNoMatch. If this attribute is true it causes - * the match task to throw a - * org.apache.tools.ant.BuildException and fail if no matches for - * the regular expression are found. The default value is false, - * meaning a failed match will simply result in a warning message to - * STDERR and an empty (0 byte) output file being - * created. - *

- *

- *

Example

- *

- *

Contents of input file letter.txt: - *

- *      Dear Alice,
- * 
- *      How's about you and me gettin' together for some anonymous foo action?
- * 
- *      Kisses,
- *      Bob
- * 
- *

- *

Ant match task and a taskdef defining it: - *

- *      <taskdef name="match" classname="net.i2p.pants.MatchTask" classpath="../../lib/pants.jar" />
- *      <match input="letter.txt"
- *             output="matches.txt"
- *             regex="about (\S*?) and (\S*?) .+anonymous (\S*?)"
- *             />
- * 
- *

- *

Contents of properties file matches.txt written by this task: - *

- *      group.0=about you and me gettin' together for some anonymous foo
- *      group.1=you
- *      group.2=me
- *      group.3=foo
- * 
- *

- *

These values can be loaded from matches.txt into Ant - * properties like so: - *

- *      <loadproperties srcFile="matches.txt" />
- * 
- *

- * - * @author smeghead - */ -public class MatchTask extends Task { - - private boolean _failOnNoMatch; - private String _inputFile; - private String _outputFile; - private String _regex; - private int _regexFlags; - - public void execute() throws BuildException { - int charRead = 0; - FileReader fileReader = null; - FileWriter fileWriter = null; - Matcher matcher = null; - Pattern pattern = null; - PrintWriter printWriter = null; - StringBuffer text = new StringBuffer(); - - if (_inputFile == null) - throw new BuildException("Error: 'match' task requires 'input' attribute"); - - if (_outputFile == null) - throw new BuildException("Error: 'match' task requires 'output' attribute"); - - if (_regex == null) - throw new BuildException("Error: 'match' task requires 'regex' attribute"); - - pattern = Pattern.compile(_regex, _regexFlags); - - try { - fileReader = new FileReader(_inputFile); - - while ((charRead = fileReader.read()) != -1) - text.append((char) charRead); - - fileReader.close(); - matcher = pattern.matcher(text); - - if (matcher.find()) { - printWriter = new PrintWriter(new FileWriter(_outputFile)); - - for (int i = 0; i <= matcher.groupCount(); i++) - printWriter.println("group." + Integer.toString(i) + "=" + matcher.group(i)); - - printWriter.flush(); - printWriter.close(); - } else { - if (_failOnNoMatch) { - throw new BuildException("Error: No matches found in " + _inputFile); - } else { - System.err.println("Warning: No matches found in " + _inputFile); - // Create 0 byte output file. - fileWriter = new FileWriter(_outputFile); - fileWriter.close(); - } - } - } catch (FileNotFoundException fnfe) { - throw new BuildException("File " + _inputFile + " not found", fnfe); - } catch (IOException ioe) { - throw new BuildException(ioe); - } - } - - public void setCanonicalEq(boolean enableCanonicalEq) { - if (enableCanonicalEq) - _regexFlags |= Pattern.CANON_EQ; - } - - public void setCaseInsensitive(boolean enableCaseInsensitive) { - if (enableCaseInsensitive) - _regexFlags |= Pattern.CASE_INSENSITIVE; - } - - public void setComments(boolean enableComments) { - if (enableComments) - _regexFlags |= Pattern.COMMENTS; - } - - public void setDotall(boolean enableDotall) { - if (enableDotall) - _regexFlags |= Pattern.DOTALL; - } - - public void setFailOnNoMatch(boolean failOnNoMatch) { - _failOnNoMatch = failOnNoMatch; - } - - public void setInput(String inputFile) { - _inputFile = inputFile; - } - - public void setMultiLine(boolean enableMultiLine) { - if (enableMultiLine) - _regexFlags |= Pattern.MULTILINE; - } - - public void setOutput(String outputFile) { - _outputFile = outputFile; - } - - public void setRegex(String regex) { - _regex = regex; - } - - public void setUnicodeCase(boolean enableUnicodeCase) { - if (enableUnicodeCase) - _regexFlags |= Pattern.UNICODE_CASE; - } - - public void setUnixLines(boolean enableUnixLines) { - if (enableUnixLines) - _regexFlags |= Pattern.UNIX_LINES; - } -} diff --git a/apps/pants/pants/java/src/net/i2p/pants/MergeTypedPropertiesTask.java b/apps/pants/pants/java/src/net/i2p/pants/MergeTypedPropertiesTask.java deleted file mode 100644 index 90e9dcaad..000000000 --- a/apps/pants/pants/java/src/net/i2p/pants/MergeTypedPropertiesTask.java +++ /dev/null @@ -1,164 +0,0 @@ -/* - * Ports + Ant = Pants, a simple Ant-based package manager - * - * free (adj.): unencumbered; not under the control of others - * - * Written by smeghead in 2005 and released into the public domain with no - * warranty of any kind, either expressed or implied. It probably won't make - * your computer catch on fire, or eat your children, but it might. Use at your - * own risk. - */ - -package net.i2p.pants; - -import java.io.FileInputStream; -import java.io.FileOutputStream; -import java.io.IOException; -import java.util.Enumeration; -import java.util.Properties; -import java.util.StringTokenizer; - -import org.apache.tools.ant.BuildException; -import org.apache.tools.ant.Task; - -/** - *

Custom Ant task for loading properties from a - * java.util.Properties file then merging them with lists of - * expected properties. When an expected property is found in the properties - * file it is set to the value given for it in the file. If an expected property - * from a list isn't found in the properties file its value will be set to "" or - * "false", depending on the property's data type. - *

- *

A property's data type is determined by membership in one of two lists - * which can be passed into an instance of this class: a string-typed list and a - * boolean-typed list. Values for string-typed properties may be any valid - * string accepted by java.util.Properties, and values for - * boolean-typed properties must be either "false" or "true". - *

- *

Lists holding more than one property must be comma-delimited. - *

- *

The output of this class is a temporary java.util.Properties - * file which is suitable for reading by the standard Ant - * loadproperties task. - *

- *

Note that if any properties in the given lists have already been defined - * before the mergetypedproperties task is called, their values - * cannot be changed since Ant properties are immutable. - *

- *

Example

- *

- *

Contents of a properties file my.properties: - *

- *      some.property.exists=true
- *      hasValue=false
- *      some.property=this is a value
- *      property0=bork bork
- *      propertyX=this property wasn't passed in a list
- * 
- *

- *

Ant mergetypedproperties task and a taskdef - * defining it: - *

- *      <taskdef name="mergetypedproperties" classname="net.i2p.pants.MergeTypedPropertiesTask" classpath="../../lib/pants.jar" />
- *      <mergetypedproperties input="my.properties"
- *             output="merged-properties.temp"
- *             booleanList="some.property.exists,is.valid,hasValue"
- *             stringList="some.property,another.property,property0"
- *             />
- * 
- *

- *

Contents of properties file merged-properties.temp written by this task: - *

- *      some.property.exists=true
- *      is.valid=false
- *      hasValue=false
- *      some.property=this is a value
- *      another.property=
- *      property0=bork bork
- *      propertyX=this property wasn't passed in a list
- * 
- *

- *

If you don't want this task's output to include properties which weren't - * in the lists of expected properties, you can set the attribute - * onlyExpected to true. In the example, this would - * result in the file merged-properties.temp containing only the - * following properties: - *

- *      some.property.exists=true
- *      is.valid=false
- *      hasValue=false
- *      some.property=this is a value
- *      another.property=
- *      property0=bork bork
- * 
- *

- * - * @author smeghead - */ -public class MergeTypedPropertiesTask extends Task { - - private String _booleanList = ""; - private String _inputFile; - private boolean _onlyExpected; - private String _outputFile; - private Properties _propertiesIn = new Properties(); - private Properties _propertiesOut = new Properties(); - private String _stringList = ""; - - public void execute() throws BuildException { - StringTokenizer strtokBoolean = new StringTokenizer(_booleanList, ","); - StringTokenizer strtokString = new StringTokenizer(_stringList, ","); - String property = ""; - - if (_inputFile == null) - throw new BuildException("Error: 'mergetypedproperties' task requires 'input' attribute"); - - if (_outputFile == null) - throw new BuildException("Error: 'mergetypedproperties' task requires 'output' attribute"); - - // Add some type-checking on the list elements - - try { - _propertiesIn.load(new FileInputStream(_inputFile)); - - while (strtokBoolean.hasMoreTokens()) - _propertiesOut.setProperty(strtokBoolean.nextToken().trim(), "false"); - - while (strtokString.hasMoreTokens()) - _propertiesOut.setProperty(strtokString.nextToken().trim(), ""); - - for (Enumeration enumm = _propertiesIn.elements(); enumm.hasMoreElements(); ) { - property = (String) enumm.nextElement(); - - if (_onlyExpected && !_propertiesOut.containsKey(property)) - continue; - else - _propertiesOut.setProperty(property, _propertiesIn.getProperty(property)); - } - - _propertiesOut.store(new FileOutputStream(_inputFile), "This is a temporary file. It is safe to delete it."); - } catch (IOException ioe) { - throw new BuildException(ioe); - } - } - - public void setBooleanList(String booleanList) { - _booleanList = booleanList; - } - - public void setInput(String inputFile) { - _inputFile = inputFile; - } - - public void setOnlyExpected(boolean onlyExpected) { - _onlyExpected = onlyExpected; - } - - public void setOutput(String outputFile) { - _outputFile = outputFile; - } - - public void setStringList(String stringList) { - _stringList = stringList; - } -} diff --git a/apps/pants/pants/resources/README b/apps/pants/pants/resources/README deleted file mode 100644 index a11829f71..000000000 --- a/apps/pants/pants/resources/README +++ /dev/null @@ -1,116 +0,0 @@ -What is Pants? --------------- - - Pants is an Apache Ant-based package manager for the management of 3rd party - dependencies in Java development projects. It's loosely modeled after - FreeBSD's Ports and Gentoo Linux's Portage, with two major differences: - - * Pants isn't intended for system-wide package management. It's tailored for - per-project 3rd party package management. You will typically have one - Pants repository per project and each repository will be located somewhere - under your project's root directory. If you're familiar with Ports or - Portage, a Pants repository is roughly analogous to /usr/ports or - /usr/portage. - - * Pants is extremely portable. It goes anywhere Apache Ant goes. - - Pants takes a modular approach to the standard Ant buildfile, breaking it - into 3 files for functionality and convenience: - - 1. The Pants public interface, pants/build.xml, provides a single consistent - way to access and manipulate dependency packages and relieves some of the - developer's burden by providing implementations for some frequently-used - and complex Ant operations. - - 2. pbuild.xml is a specially-structured and slimmed-down Ant buildfile in - which you implement custom handling for a package your project depends - on. This is known as the "pbuild" and is roughly analogous to a FreeBSD - port or a Gentoo ebuild. A fairly explanatory template for pbuilds, - pbuild.template.xml, is provided. - - 3. pbuild.properties contains those properties for a specific pbuild which - are most likely to change over time. It uses the java.util.Properties - format which is more human-friendly for hand-editing than Ant/XML. A - fairly explanatory template, pbuild.template.properties, is provided. - - There is one more file that completes the Pants system: the metadata file - pants/world is a database for keeping track of all packages managed by Pants - for your project. - - Pants automatically handles versioning for your project's dependency - packages and keeps track of their recommended versions, currently used - versions, and latest available versions. This makes it extremely simple for - project developers to switch back and forth between different versions of a - dependency, and makes it just as easy to update a dependency. You can even - update all your project's Pants-managed packages with a single command. - - Pbuilds are designed to automatically handle the downloading, building, - repackaging and deployment of source archives, binary archives, and CVS - sources, all in a manner that's completely transparent to the project - developer. Pbuilds currently support tar + gzip, tar + bzip2, and zip - archives. - - Because it is based on Ant, Pants integrates very well with Ant buildfiles - and will fit easily into your project's Ant build framework. However, its - interface is simple enough to be called just as easily by more traditional - build systems such as GNU Make. - - -Why Should I Use Pants? ------------------------ - - There are many applications for Pants, but a few use cases should best serve - to illustrate its usefulness: - - 1. You have a project that you ship with several 3rd party libraries but the - versions you're using are stale. With a single command, Pants can - automatically discover the latest release versions for all of these, then - download, build, and place the fresh libraries where your project's main - build system expects them to be at build time. - - 2. You want to test multiple versions of a 3rd party library against your - project. Pants only requires you to issue a single command to switch - library versions, so can spend more time testing and less time hunting - packages down, unpackaging them, symlinking, etc. - - 3. Pants is public domain. You can ship it with your project if you need to - without having to worry about petty intellectual property or licensing - issues. - - -Minimum Requirements --------------------- - - * Apache Ant 1.6.2 or higher is recommended - - * Any Java runtime and operating system that will run Ant - - -Installation ------------- - - Not finished yet. - - -Why the Silly Name? -------------------- - - Ports + Ant = Pants. Any other explanation is purely a product of your - twisted imagination. - - -Miscellaneous Pocket Fluff --------------------------- - - Author: smeghead - - License: No license necessary. This work is released into the public domain. - - Price: Free! But if you really appreciate Pants, or you're just a sicko, - please send me a picture of your worst or most unusual pair of - pants so I can add it to the Whirling Hall of Pants on pants.i2p, - the official Pants eepsite (that's an anonymous website on I2P--see - http://www.i2p.net for more information). - - -$Id$ diff --git a/apps/pants/pants/resources/pbuild.template.properties b/apps/pants/pants/resources/pbuild.template.properties deleted file mode 100644 index b346816b6..000000000 --- a/apps/pants/pants/resources/pbuild.template.properties +++ /dev/null @@ -1,110 +0,0 @@ -# The properties defined in this file can be overridden on the command line by -# passing them in as parameters like so: -# -# ant -Dpbuild=myapp -Dversion.recommended=2.0.5 install -# -# *** DO NOT DEFINE A PROPERTY BUT LEAVE ITS VALUE BLANK. PANTS WILL BREAK! *** - - -# Recommended Package Version -# -# Set this property's value to the package version you want Pants to use for the -# pbuild by default. The version string specified must match the version -# substring from the package's filename if the filename contains a version -# number. -# -# Comment out this property to force use of the latest available version. -# -# If the pbuild is CVS-based rather than package-based, this property must be -# set to 'CVS'. -# -# Example: -# -# version.recommended=2.0.4 - - -# Latest Package Version -# -# There are currently two ways to inform Pants of the latest version number for -# your package. -# -# Method 1: Manually modify the property 'version.latest' to reflect the latest -# version number. -# -# Method 2: Provide a URL for a page on the package's website and a regular -# expression with which to parse it in order to extract the version -# number of the latest available package. For this you must define the -# properties 'version.latest.find.url', 'version.latest.find.regex', -# and any regular expression engine mode flags needed. The pattern -# defined must have exactly one capturing group to encapsulate the -# version string, otherwise the operation will fail. -# -# You may use both methods, in which case the version number specified by Method -# 1 will be used as the fallback value if Method 2 for some reason is -# unsuccessful. -# -# If neither method is enabled here or they fail to return a valid value to -# Pants, the 'ant update' operation for this pbuild may exit ungracefully unless -# the pbuild is CVS-based (none of the version.latest.* properties are used by -# CVS-based pbuilds). -# -# The following is a list of boolean properties for optional mode flags used by -# the regular expression engine. Set a value of "true" for any you wish to use. -# -# version.latest.find.regex.canonicaleq - Enable canonical equivalence -# version.latest.find.regex.caseinsensitive - Enable case-insensitive matching -# version.latest.find.regex.comments - Permit whitespace and comments -# version.latest.find.regex.dotall - Enable dotall mode -# version.latest.find.regex.multiline - Enable multi-line mode -# version.latest.find.regex.unicodecase - Enable Unicode-aware case folding -# version.latest.find.regex.unixlines - Enable Unix lines mode -# -# Examples: -# -# version.latest=5.1.2 -# version.latest.find.url=http://sourceforge.net/projects/jetty/ -# version.latest.find.regex=Stable.+?Jetty-(.+?) - - -# Package URL -# -# Specify the URL pointing to the pbuild's package from here. The token -# '${pbuild.version}' if used will automatically be expanded to the appropriate -# version string. -# -# The package URL property is not used by CVS-based pbuilds. -# -# Examples: -# -# package.url=ftp://borkbork.se/bork-${pbuild.version}.tar.bz2 -# package.url=http://bork.borkbork.se/bork-${pbuild.version}-src.tar.gz - - -# CVS Repository -# -# The values expected for CVS properties here are the same as those expected by -# their corresponding Apache Ant 'Cvs' task attributes. For details see: -# -# http://ant.apache.org/manual/CoreTasks/cvs.html -# -# Not all of the 'Cvs' task's attributes have corresponding Pants properties. -# The following is a list of all valid CVS properties for Pants (and their -# default values if applicable): -# -# cvs.compression.level -# cvs.date -# cvs.package -# cvs.passfile=~/.cvspass -# cvs.port=2401 -# cvs.root -# cvs.rsh -# cvs.tag -# -# Of these, only the 'cvs.root' property is required for CVS-based pbuilds. -# -# Examples: -# -# cvs.root=:pserver:anoncvs@borkbork.se:/cvsroot/bork -# cvs.rsh=ssh -# cvs.package=borkbork - diff --git a/apps/pants/pants/resources/pbuild.template.xml b/apps/pants/pants/resources/pbuild.template.xml deleted file mode 100644 index 17017f85b..000000000 --- a/apps/pants/pants/resources/pbuild.template.xml +++ /dev/null @@ -1,69 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - diff --git a/apps/pants/pbuilds/fortuna/pbuild.properties b/apps/pants/pbuilds/fortuna/pbuild.properties deleted file mode 100644 index acfc8b4fa..000000000 --- a/apps/pants/pbuilds/fortuna/pbuild.properties +++ /dev/null @@ -1,112 +0,0 @@ -# The properties defined in this file can be overridden on the command line by -# passing them in as parameters like so: -# -# ant -Dpbuild=myapp -Dversion.recommended=2.0.5 install -# -# *** DO NOT DEFINE A PROPERTY BUT LEAVE ITS VALUE BLANK. PANTS WILL BREAK! *** - - -# Recommended Package Version -# -# Set this property's value to the package version you want Pants to use for the -# pbuild by default. The version string specified must match the version -# substring from the package's filename if the filename contains a version -# number. -# -# Comment out this property to force use of the latest available version. -# -# If the pbuild is CVS-based rather than package-based, this property must be -# set to 'CVS'. -# -# Example: -# -# version.recommended=2.0.4 -version.recommended=CVS - -# Latest Package Version -# -# There are currently two ways to inform Pants of the latest version number for -# your package. -# -# Method 1: Manually modify the property 'version.latest' to reflect the latest -# version number. -# -# Method 2: Provide a URL for a page on the package's website and a regular -# expression with which to parse it in order to extract the version -# number of the latest available package. For this you must define the -# properties 'version.latest.find.url', 'version.latest.find.regex', -# and any regular expression engine mode flags needed. The pattern -# defined must have exactly one capturing group to encapsulate the -# version string, otherwise the operation will fail. -# -# You may use both methods, in which case the version number specified by Method -# 1 will be used as the fallback value if Method 2 for some reason is -# unsuccessful. -# -# If neither method is enabled here or they fail to return a valid value to -# Pants, the 'ant update' operation for this pbuild may exit ungracefully unless -# the pbuild is CVS-based (none of the version.latest.* properties are used by -# CVS-based pbuilds). -# -# The following is a list of boolean properties for optional mode flags used by -# the regular expression engine. Set a value of "true" for any you wish to use. -# -# version.latest.find.regex.canonicaleq - Enable canonical equivalence -# version.latest.find.regex.caseinsensitive - Enable case-insensitive matching -# version.latest.find.regex.comments - Permit whitespace and comments -# version.latest.find.regex.dotall - Enable dotall mode -# version.latest.find.regex.multiline - Enable multi-line mode -# version.latest.find.regex.unicodecase - Enable Unicode-aware case folding -# version.latest.find.regex.unixlines - Enable Unix lines mode -# -# Examples: -# -# version.latest=5.1.2 -# version.latest.find.url=http://sourceforge.net/projects/jetty/ -# version.latest.find.regex=Stable.+?Jetty-(.+?) - - -# Package URL -# -# Specify the URL pointing to the pbuild's package from here. The token -# '${pbuild.version}' if used will automatically be expanded to the appropriate -# version string. -# -# The package URL property is not used by CVS-based pbuilds. -# -# Examples: -# -# package.url=ftp://borkbork.se/bork-${pbuild.version}.tar.bz2 -# package.url=http://bork.borkbork.se/bork-${pbuild.version}-src.tar.gz - - -# CVS Repository -# -# The values expected for CVS properties here are the same as those expected by -# their corresponding Apache Ant 'Cvs' task attributes. For details see: -# -# http://ant.apache.org/manual/CoreTasks/cvs.html -# -# Not all of the 'Cvs' task's attributes have corresponding Pants properties. -# The following is a list of all valid CVS properties for Pants (and their -# default values if applicable): -# -# cvs.compression.level -# cvs.date -# cvs.package -# cvs.passfile=~/.cvspass -# cvs.port=2401 -# cvs.root -# cvs.rsh -# cvs.tag -# -# Of these, only the 'cvs.root' property is required for CVS-based pbuilds. -# -# Examples: -# -# cvs.root=:pserver:anoncvs@borkbork.se:/cvsroot/bork -# cvs.rsh=ssh -# cvs.package=borkbork -cvs.root=:ext:anoncvs@savannah.gnu.org:/cvsroot/gnu-crypto -cvs.rsh=ssh -cvs.package=gnu-crypto diff --git a/apps/pants/pbuilds/fortuna/pbuild.xml b/apps/pants/pbuilds/fortuna/pbuild.xml deleted file mode 100644 index 02a3e8ce5..000000000 --- a/apps/pants/pbuilds/fortuna/pbuild.xml +++ /dev/null @@ -1,127 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- - - - - -
-
-
- -
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
diff --git a/apps/pants/pbuilds/jetty/pbuild.properties b/apps/pants/pbuilds/jetty/pbuild.properties deleted file mode 100644 index c377e1299..000000000 --- a/apps/pants/pbuilds/jetty/pbuild.properties +++ /dev/null @@ -1,112 +0,0 @@ -# The properties defined in this file can be overridden on the command line by -# passing them in as parameters like so: -# -# ant -Dpbuild=myapp -Dversion.recommended=2.0.5 install -# -# *** DO NOT DEFINE A PROPERTY BUT LEAVE ITS VALUE BLANK. PANTS WILL BREAK! *** - - -# Recommended Package Version -# -# Set this property's value to the package version you want Pants to use for the -# pbuild by default. The version string specified must match the version -# substring from the package's filename if the filename contains a version -# number. -# -# Comment out this property to force use of the latest available version. -# -# If the pbuild is CVS-based rather than package-based, this property must be -# set to 'CVS'. -# -# Example: -# -# version.recommended=2.0.4 -version.recommended=5.1.2 - -# Latest Package Version -# -# There are currently two ways to inform Pants of the latest version number for -# your package. -# -# Method 1: Manually modify the property 'version.latest' to reflect the latest -# version number. -# -# Method 2: Provide a URL for a page on the package's website and a regular -# expression with which to parse it in order to extract the version -# number of the latest available package. For this you must define the -# properties 'version.latest.find.url', 'version.latest.find.regex', -# and any regular expression engine mode flags needed. The pattern -# defined must have exactly one capturing group to encapsulate the -# version string, otherwise the operation will fail. -# -# You may use both methods, in which case the version number specified by Method -# 1 will be used as the fallback value if Method 2 for some reason is -# unsuccessful. -# -# If neither method is enabled here or they fail to return a valid value to -# Pants, the 'ant update' operation for this pbuild may exit ungracefully unless -# the pbuild is CVS-based (none of the version.latest.* properties are used by -# CVS-based pbuilds). -# -# The following is a list of boolean properties for optional mode flags used by -# the regular expression engine. Set a value of "true" for any you wish to use. -# -# version.latest.find.regex.canonicaleq - Enable canonical equivalence -# version.latest.find.regex.caseinsensitive - Enable case-insensitive matching -# version.latest.find.regex.comments - Permit whitespace and comments -# version.latest.find.regex.dotall - Enable dotall mode -# version.latest.find.regex.multiline - Enable multi-line mode -# version.latest.find.regex.unicodecase - Enable Unicode-aware case folding -# version.latest.find.regex.unixlines - Enable Unix lines mode -# -# Examples: -# -# version.latest=5.1.2 -# version.latest.find.url=http://sourceforge.net/projects/jetty/ -# version.latest.find.regex=Stable.+?Jetty-(.+?) -version.latest=5.1.2 -version.latest.find.url=http://sourceforge.net/projects/jetty/ -version.latest.find.regex=Stable.+?Jetty-(.+?) - -# Package URL -# -# Specify the URL pointing to the pbuild's package from here. The token -# '${pbuild.version}' if used will automatically be expanded to the appropriate -# version string. -# -# The package URL property is not used by CVS-based pbuilds. -# -# Examples: -# -# package.url=ftp://borkbork.se/bork-${pbuild.version}.tar.bz2 -# package.url=http://bork.borkbork.se/bork-${pbuild.version}-src.tar.gz -package.url=http://mesh.dl.sourceforge.net/sourceforge/jetty/jetty-${pbuild.version}.zip - -# CVS Repository -# -# The values expected for CVS properties here are the same as those expected by -# their corresponding Apache Ant 'Cvs' task attributes. For details see: -# -# http://ant.apache.org/manual/CoreTasks/cvs.html -# -# Not all of the 'Cvs' task's attributes have corresponding Pants properties. -# The following is a list of all valid CVS properties for Pants (and their -# default values if applicable): -# -# cvs.compression.level -# cvs.date -# cvs.package -# cvs.passfile=~/.cvspass -# cvs.port=2401 -# cvs.root -# cvs.rsh -# cvs.tag -# -# Of these, only the 'cvs.root' property is required for CVS-based pbuilds. -# -# Examples: -# -# cvs.root=:pserver:anoncvs@borkbork.se:/cvsroot/bork -# cvs.rsh=ssh -# cvs.package=borkbork - diff --git a/apps/pants/pbuilds/jetty/pbuild.xml b/apps/pants/pbuilds/jetty/pbuild.xml deleted file mode 100644 index f10313512..000000000 --- a/apps/pants/pbuilds/jetty/pbuild.xml +++ /dev/null @@ -1,89 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - diff --git a/apps/pants/world b/apps/pants/world deleted file mode 100644 index 9d4335e57..000000000 --- a/apps/pants/world +++ /dev/null @@ -1,2 +0,0 @@ -version.using.fortuna=CVS -version.using.jetty=5.1.2 diff --git a/apps/q/doc/client.jpg b/apps/q/doc/client.jpg deleted file mode 100644 index 1d3702b50..000000000 Binary files a/apps/q/doc/client.jpg and /dev/null differ diff --git a/apps/q/doc/diagrams.html b/apps/q/doc/diagrams.html deleted file mode 100644 index 633dda014..000000000 --- a/apps/q/doc/diagrams.html +++ /dev/null @@ -1,26 +0,0 @@ - - - - Q System Diagrams - - - -

Q Diagrams

- Informal system diagrams of Q network, hubs and clients. -
-
- -
- -
- -
-
- -
aum
- - -Last modified: Mon Apr 18 14:06:02 NZST 2005 - - - diff --git a/apps/q/doc/hub.jpg b/apps/q/doc/hub.jpg deleted file mode 100644 index 10069e842..000000000 Binary files a/apps/q/doc/hub.jpg and /dev/null differ diff --git a/apps/q/doc/index.html b/apps/q/doc/index.html deleted file mode 100644 index 2fe9883d8..000000000 --- a/apps/q/doc/index.html +++ /dev/null @@ -1,80 +0,0 @@ - - - - Quartermaster - I2P Distributed File Store - - - -
-

Quartermaster
an I2P Distributed File Store

- -

STATUS

- Whole new (incompatible) version currently in development; - ETA for release approx 4-7 days; - view screenshots here - -
-
- - - User Manual | - Protocol Spec | - Metadata Spec | - Q Pr0n (diagrams) | - API Spec | - qnoderefs.txt | - Full Download | - Updated jar - -

- -
- -

Intro

- - Quartermaster, or Q for short, is a distributed file storage framework for I2P. - -

Features

- -
    -
  • Now features 'QSites' - the Q equivalent of Freenet freesites, - static websites which are retrievable even if author is offline
  • -
  • Easy web interface - interact with Q (and view/insert QSites) - from your web browser
  • -
  • Maximum expectations of content retrievability
  • -
  • Content security akin to Freenet CHK and SSK keys
  • -
  • Powerful, flexible search engine
  • -
  • Comfortably accommodates both permanent and transient - nodes without significant network disruption (for instance, - no flooding of the I2P network with futile - calls to offline nodes)
  • -
  • Rapid query resolution, due to distributed catalogue - mirroring which eliminates all in-network query traffic
  • -
  • Modular, extensible architecture
  • -
  • Simple interfaces for 3rd-party app developers
  • -
  • Is custom-designed and built around I2P, so no duplication of - I2P's encryption/anonymity features
  • -
  • Simple XML-RPC interface for all inter-node communication, makes it easy to - implement user-level clients in any language; also allows alternative - implementations of core server and/or client nodes.
  • -
- -
- -

Status

- - Q is presently under development, and a test release is expected soon. - -
- -

Architecture

- - Refer to the Protocol Specification for more information. - -
- - -Last modified: Mon Apr 18 18:55:19 NZST 2005 - - - diff --git a/apps/q/doc/manual/index.html b/apps/q/doc/manual/index.html deleted file mode 100644 index 3b88e8fd7..000000000 --- a/apps/q/doc/manual/index.html +++ /dev/null @@ -1,805 +0,0 @@ - - - - Q User/Programmer Manual - - - - -
-

Q User/Programmer Manual

- - A brief but hopefully easy guide to installing and using the Q distributed file - store within the I2P network - -

- (Return to Q Homepage) -
-
- - Introduction | - Checklist | - Server?orClient? | - Walkthrough | - Server Nodes | - About QMgr | - Contact us - -
- - - -
- -

1. Introduction

- -
- Q is a distributed Peer2Peer file storage/retrieval network that aims to deliver optimal - performance by respecting the properties of the I2P network.
-
- This manual serves as a 'walkthrough' guide, to take you through the steps from initial - download, to everyday usage. It also provides information for the benefit of higher-level - client application authors. -
- -
- -
- -

2. Preliminary Checklist

- -
- OK, we assume here that you've already cracked the tarball, and are looking at - the distribution files.
-
- In order to get Q set up and running, you'll need: -
    -
  1. An I2P router installed, set up and (permanently or transiently) running
  2. -
  3. Your system shell set up with at the environment variables: -
      -
    • CLASSPATH - this should include: -
        -
      • The regular I2P jar files and 3rd party support jar files (eg i2p.jar, - i2ptunnel.jar, streaming.jar, - mstreaming.jar, jbigi.jar)
      • -
      • Apache's XML-RPC support jarfile - included in this Q distro as - xmlrpc.jar
      • -
      • Aum's jarfile aum.jar, which includes Q and all needed support code
      • -
      -
    • -
    • PATH - your execution search path must include the directory - in which your main java VM execution program (java, or on windows systems, - java.exe) resides.
      - NOTE - if java[.exe] is not on your PATH, then Q will - not run.
    • -
    -
-
- -
- -
- -

3. Q Server or Q Client?

- -
- Nearly everyone will want to run a Q Client Node.
-
- It is only client nodes which provide users with full access to the Q network.
-
- However, if you have a (near-) permanently running I2P Router, and you're a kind and - generous soul, you might also be willing to run a Q Server Node in addition - to your Q Client Node.
-
- If you do choose to run a server node, you'll be expected to keep it running as near as - possible to 24/7. While transience of client nodes - frequent entering and leaving the - Q network - causes little or no disruption, transience of server nodes can significantly - impair Q's usability for everyone, particularly if this transience occurs frequently amongst - more than the smallest percentage of the server node pool.
-
- Until you're feeling well "settled in" with Q, your best approach is to just run a - client node for now, and add a server node later when you feel ready.
-
- -
- -
- -

4. Q Walkthrough

- -

4.1. Introduction

- -
- This chapter discusses the deployment and usage of a Q Client Node, and will take you - through the steps of: -
    -
  1. Double-checking that you've met the installation requirements
  2. -
  3. Launching a Q Client Node
  4. -
  5. Verifying that your Q Client Node is running
  6. -
  7. If your node fails to launch, figuring out why
  8. -
  9. Importing one or more noderefs into your node
  10. -
  11. Observing that your node is discovering other nodes on the network
  12. -
  13. Observing that your node is discovering content on the network
  14. -
  15. Searching for items of content that match chosen criteria
  16. -
  17. Retrieving stuff from the network
  18. -
  19. Inserting stuff to the network
  20. -
  21. Shutting down your client node
  22. -
- Setup and running of Q Server Nodes will be discussed in a later chapter. -
- -
- -

4.2. Verify Your Q Installation Is Correct

- -
- Ensure that all the needed I2P jarfiles, as well as xmlrpc.jar and - Q's very own aum.jar are correctly listed in your CLASSPATH environment - varaible, and your main java launcher is correctly listed in your PATH environment - variable.
-
- Typically, you will likely copy the jarfiles aum.jar and xmlrpc.jar - into the lib/ subdirectory of your I2P router installation, along with all - the other I2P jar files. Wherever you choose to put these files, make sure they're - correctly listed in your CLASSPATH. -
- Also, you'll want to add execute permission to your qmgr (or qmgr.bat) - wrapper script, and copy it into one of the directories listed in your PATH - environment variable.
-
- -
- -

4.3. Get Familiar With qmgr

- -
- qmgr (or qmgr.bat) is a convenience wrapper script to save your - sore fingers from needless typing. It's just a wrapper which passes arguments - to the java command java net.i2p.aum.q.QMgr
-
- You can verify you've set up qmgr correctly with the command: -
-qmgr help
- This displays a brief summary of qmgr commands. On the other hand, the command: -
-qmgr help verbose
- floods your terminal window with a detailed explanation of all the qmgr commands - and their arguments.
-
- -
- -

4.4. Running A Q Client Node For The First Time

- -
- Provided you've successfully completed the preliminaries, you can launch your - Q Client Node with the command: -
-qmgr start
- - All going well, you should have a Q Client Node now running in background. -
- -
- -

4.5. Verify that your Q Client Node is actually Running

- -
- After typed the qmgr start command, you will see little or no - evidence that Q is actually running.
-
- You can test if the node is actually up by typing the command: -
-qmgr status
- If your Q Client Node is running, this status command should produce - something like: -
-Pinging node at '/home/myusername/.quartermaster_client'...
-Node Ping:
-  status=ok
-  numPeers=0
-  dest=-3LQaE215uIYwl-DsirnzXGQBI31EZQj9u~xx45E823WqjN5i2Umi37GPFTWc8KyislDjF37J7jy5newLUp-qrDpY7BZum3bRyTXo3Udl8a3sUjuu4qR5oBEWFfoghQiqDGYDQyJV9Rtz7DEGaKHGlhtoGsAYRXGXEa8a43T2llqZx2fqaXs~836g8t6sLZjryA5A9fpq98nE5lT0hcTalPieFpluJVairZREXpUiAUmGHG7wAIjF6iszXLEHSZ8Qc622Xgwy0d1yrPojL2yhZ64o05aueYcr~xNCiFxYoHyEJO3XYmkx~q-W-mzS3nn6pRevRda74MnX1~3fFDZ0u~OG6cLZoFkWgnxrwrWGFUUVMR87Yz251xMCKJAX6zErcoGjGFpqGZsWxl4~yq7yfkjPnq3GuTxp2cB75bRAOZRIAieqBOVJDEodFYW5amCinu4AxYE7G1ezz4ghqHFe~0yaAdO74Q1XoUny138YT6P33oNOOlISO1cAAAA
-  uptime=4952
-  load=0.0
-  id=6LVZ9-~GgJJ52WUF1fLHt3UnH50TnXSoPQXy7WZ4GA=
-  numLocalItems=47
-  numRemoteItems=2173
- - If you see something like this, then smile, because Q is now up on your system.
-
- If the node launch failed, you might see something like: -
-Pinging node at '/home/myusername/.quartermaster_client'...
-java.io.IOException: Connection refused
-        at org.apache.xmlrpc.XmlRpcClient$Worker.execute(Unknown Source)
-        at org.apache.xmlrpc.XmlRpcClient.execute(Unknown Source)
-        at net.i2p.aum.q.QMgr.doStatus(QMgr.java:310)
-        at net.i2p.aum.q.QMgr.execute(QMgr.java:813)
-        at net.i2p.aum.q.QMgr.main(QMgr.java:869)
-Failed to ping node
- This indicates that your Q client node has either crashed, or failed to launch in the - first place.
-
- If you're having trouble like this, you might like to try running your Q client node - in foreground, instead of spawning it off into background.
-
- The command to run a Q client node in foreground is: -
-qmgr foreground
- You should see some meaningless startup messages, and no return to your shell prompt.
- -
- -
- -

4.6. Diversion - Q Storage Directories

- -
- By default, when you run a Q Client Node, it creates a datastore directory tree - at ~/.quartermaster_client. (Windows users note - you'll find this directory - wherever your user home directory is - this depends on what version of Windows - you have installed).
-
- Within this directory tree, you should see a file called node.log, which - will contain various debug log messages, and can help you to rectify any problems - with your Q installation. If you hit a wall and can't rectify the problems - yourself, you should send this file to the Q author (aum).
-
- It's possible to run your Q node from another directory, by passing that directory - as a -dir <path> argument to the - qmgr start, foreground and stop - commands. See qmgr help verbose for more information. -
- -
- -

4.7. Importing a Noderef

- -
- Note from the prior qmgr status command the line: -
-numPeers=0
- This means that your Q client node is running standalone, and doesn't have any contact - with any Q network. As such, your node is effectively useless. We need to hook up - your node with other nodes in the Q network.
-
- Q doesn't ship with any means for new client nodes to automatically connect to any Q - server nodes. This is deliberate.
-
- In all likelihood, there will be one 'main' Q network running within I2P, largely - based around the author's own Q server node, and most people will likely want to - use this Q network. But the author doesn't want to stop other people running their - own private Q networks, for whatever purpose has meaning for them. - -
-
- This is especially relevant for Q as opposed to Freenet. With Freenet, there's - no way for a user to know of the existence of any item of content without - first being given its 'key'. However, since Q works with published catalogs, - any user can know everything that's available on a Q network, which might - not be desirable to those wishing to share content in a private situation.
-
- The Q author anticipates, and warmly supports, people running their own - private Q networks within I2P, in addition to accessing the mainstream - 'official' Q network.
-
- The way Q is designed and implemented, there is no way for anyone, including - Q's author, to know of the existence of anyone else's private Q network. - It is beyond the author's control, (and thus arguably the author's - legal responsibility), what private Q networks people set up, and what - kind of content is trafficked on these networks. This claim of plausible - deniability on the part of Q's author parallels that of a hardware retailer - denying responsibility for what people do with tools that they purchase. -
-
-
- - Ok, getting back on topic - your brand new virgin Q client node is useless and lonely, - and desperately needs some Q server nodes to talk to. So let's hook up your node to - the mainstream Q network.
-
- You'll need to get one or more 'noderefs' for Q server nodes.
-
- There's nothing fancy about a Q noderef. It's just a regular I2P 'destination', with - which your Q Client Node can connect with a Q Server Node.
-
- A 'semi-official' list of noderefs for the mainstream Q network can be downloaded - from the url:
http://aum.i2p/q/qnoderefs.txt.
-
- Download this file, save it as (say) qnoderefs.txt. (Alternatively, if you're - wanting to subscribe into a private Q network, then get a noderef for at least one - of that network's server nodes from someone on that network who trusts you).
-
- Import these noderefs into your Q client node via the command: -
-qmgr addref qnoderefs.txt
- If all goes well, you should see no output from this command, or (possibly) a brief - line or two suggesting success.
-
- Your client node is now subscribed into the Q network of your choice. Verify this - with the command: -
-qmgr status
- In the output from that command, you should see the numPeers= line showing at least - 1 peer.
-
- If there is more than one Q Server Node on the Q network you've just subscribed to, - then your local node should sooner or later discover all these server nodes, and - the numPeers value should increase over time.
-
-
-
- While Q is in its early development and testing stages, the author may abdicate - the mainstream Q network, and publish nodrefs for a whole new mainstream Q network. - This will especially happen if the author makes any substantial changes to the - inter-node protocol, and/or releases incompatible new versions of Q client/server - nodes. Remember that - http://aum.i2p/q/qnoderefs.txt will - serve as the authoritative source for noderefs for the mainstream Q network within - the mainstream I2P network. -
-
- - When your client node gets its noderefs to a Q network, it will periodically, - from then on, retrieve differential peer list and catalog updates from servers - it knows about.
-
- Even if you only feed your client just one ref for a single server node, it will - in time discover all other operating server nodes on that Q network, and will - build up a full local catalog of everything that's available on that Q network.
-
- Provided that your client is running ok, and has been fed with at least one - ref for a live Q network that contains content, then over time, successive: -
-qmgr status
- commands should report increasing values in the fields: -
    -
  • numPeers - number of peers this client node knows about
  • -
  • numLocalItems - number of locally stored content items, ie items - which you have either inserted to, or retrieved from, your client node
  • -
  • numRemoteItems - number of unique data items which are available - on remote server nodes in the Q network, and which can be retrieved through - your local client node.
  • -
- -
-
- -

4.7.1. One Big Warning

- - If you are participating in more than one distinct Q network, then do not - insert noderefs for different networks into the same running instance of a - local Q client, unless you don't plan on inserting content via that client.
-
- For instance, let's say you are participating in two different Q networks: -
    -
  • The 'mainstream' Q netowrk
  • -
  • A secret Q network - "My friends' teen angst diaries"
  • -
- If you get a noderef for both these networks, and insert both of these into the - same running Q client node, then this local client node will be transparently - connected to both networks.
-
- If you only ever plan on retrieving content, and never inserting content, this - won't be a problem, except that you won't be able to tell which content - resides on the mainstream Q network, and which resides in the secret Q network.
-
- The big problem arises from inserting content. Whenever you insert data through this - 'contaminated' - Q client node, this node picks 3 different servers to which upload a copy of this - data. You won't have any control over whether the data gets inserted to the mainstream - Q network, or your secret Q network. You might insert something sensitive, intending it - to go only into the secret Q network, where in fact it also ends up in the mainstream - network, with consequences you might not want. -
-
- -
- -

4.8. Content Data and Metadata

- -
- Whenever content gets stored on Q, it is actually stored as two separate items: -
    -
  • The raw data - whether a text file, or the raw bytes of image files, - audio files etc
  • -
  • The metadata, which contains human-readable and machine-readable - descriptions of the data
  • -
- Metadata consists of a set of category=value pairs.
-
- Confused yet? Don't worry, I'm confused as well. Let's illustrate this with an - example of metadata for an MP3 audio recording: -
    -
  • title=Fight_Last_Thursday.mp3
  • -
  • type=audio
  • -
  • mimetype=audio/mpeg
  • -
  • abstract=upcoming single recorded in our garage last April
  • -
  • keywords=grunge,country,indie
  • -
  • artist=Ring of Fire
  • -
  • size=4379443
  • -
  • contact=ring-of-fire@mail.i2p
  • -
  • key=blah37blah24-yada23hfhyada
  • -
- All metadata categories are optional. In fact, you can insert content with no metadata - at all.
-
- If you fail to provide metadata when inserting an item, a blank set of metadata will - be created with at least the following categories: -
    -
  • key - the derived key, under which the item will later be retrievable - by yourself and others
  • -
  • title - if not provided at insert time, this will be set to the key
  • -
  • size - size of the item's raw data, in bytes
  • -
- Within Q, there is a convention to supply a minimal amount of metadata. While this - is not expected or enforced, including all these categories is most strongly - recommended. These core categories are: -
    -
  • title - a meaningful title for the data item, consisting only of characters - which are legal in filenames on all platforms, and which ends with a file extension.
  • -
  • type - one of a superset of eMule classifiers, such as: -
      -
    • text - plain text
    • -
    • html - HTML content
    • -
    • image - content is in an image format, such as .png, .jpg, .gif etc
    • -
    • audio - content is an audio sample, such as .ogg, .mp3, .wav etc
    • -
    • video - due to the sheer size of video files, and Q's present design, - it's unlikely people will be inserting video content anytime soon (unless it's - very short)
    • -
    • archive - packed file collections, such as .tar.gz, .zip, .rar etc
    • -
    • misc - content does not fit into any of the above categories
    • -
    -
  • -
  • mimetype - not as important as the type category, but providing - this category in your metadata is still strongly encouraged. Value for this category - should be one of the standard mimetypes, eg text/html, audio/ogg etc.
  • -
  • abstract - a short description (<255 characters), intended for human reading
  • -
  • keywords - a comma-separated list of keywords, intended for - machine-readability, should be all lowercase, no spaces
  • -
- Note that you can supply extra metadata categories in addition to the above, and that - people searching for content can search on these extra categories if they know about - them. -
- -
- -

4.9. Searching For Content

- -
- As mentioned earlier - in constrast with Freenet, local Q nodes build up a complete - catalog of all available content on whatever Q network they are connected to.
-
- This is a design decision, based on the choice to eliminate query traffic.
-
- The author hopes that this will result in a distributed storage network with a - high retrievability guarantee, in contrast with freenet which offers no such - guarantee.
-
- With Freenet, you only ever know of the existence of something if someone tells - you about it.
-
- But with Q, your local client node builds up a global catalog of everything that's - available within the whole network.
-
- The QMgr client has a command for searching your Q client node: -
-qmgr search -m category1=pattern1 category2=pattern2 ...
- For example: -
-qmgr search -m type=audio artist=Mozart keywords=symphony
- or: -
-qmgr search -m type=text title="bible|biblical|(Nag Hammadi)" keywords="apocrypha|Magdalene"
- As implied in the latter example, search patterns are regular expressions. This example will - locate all text items, whose title metadata category contains one of bible, biblical or Nag Hammadi, and whose keywords category contains either - or both the words apocrypha or Magdalene.
-
- Please use the search function carefully, otherwise (if and when Q usage grows) you - could be inundated with thousands or even millions of entries.
-
- If a search turns up nothing, qmgr will simply exit. But if it turns up one or more items, - it will the items out one at a time, with the key first, then each metadata entry - on an indented line following. -
- -
- -

4.10. Retrieving Content

- -
- Now, we're actually going to retrieve something.
-
- Presumably, after following the previous section, you will have seen one or more search - results come up, with the 'keys' under which the items can be accessed.
-
- Now, choose one of the keys, preferably for a short text item. Try either of the following - commands: -
-qmgr get <keystring> something.txt
-or: -
-qmgr get <keystring> > something.txt
- (both have the same effect - the first one explicitly writes to the named file, the second - one dumps the raw data to stdout, which we shell-redirect into the file.
-
- Note - redirection of fetched data to a file via shell is not working at present. Use only - the first form till we fix the bug. - -
- -
- -

4.11. Inserting Content

- -
- Our last example in this walkthrough relates to inserting content.
-
- Firstly, create a small text file with 2-3 lines of text, and save it as (say) - myqinsert.txt.
-
- Now, think of some metadata to insert along with the file. Or, you can just use - the set: -
-type=text
-keywords=test
-abstract=My simple test of inserting into Q
-title=myqinsert.txt
- - Now, let's insert the file. Ensure your Q client node is running, then type: -
-qmgr put myqinsert.txt -m type=text keywords=test title="myqinsert.txt" \
- abstract="My simple test of inserting into Q"
- If all went well, this command should produce half a line of gibberish, followed - immediately by your shell prompt, eg: -
-aRoFC~9MU~pM2C-uCTDBp5B7j79spFD8gUeu~BNkUf0=$
-
- The '$' at the end is your shell prompt, and all the characters before it are the 'key' - which was derived from the content you just inserted.
-
- To avoid the hassle of copying/pasting the key, you could just add output redirection - to the above command, eg: -
-qmgr put myqinsert.txt -m type=text keywords=test title="myqinsert.txt" \
- abstract="My simple test of inserting into Q" \
- > myqinsert.key
- This will cause the generated key to be written safe and sound into the file - myqinsert.key.
-
- You can verify that this insert worked by a 'get' command, as in: -
-qmgr get `cat myqinsert.key` somefilename.ext
- (Note that this won't work on windows because the DOS shell is irredeemably brain-damaged. If - you're using Windows, you will have to cut/paste the key. -
- -
- -

4.12. Shutting Down your Node

- -
- If you've worked through to here, then congratulations! You've got your Q Client Node set up - and working, and ready to meet all your distributed file storage and retrieval needs.
-
- You can leave your client node running 24/7 if you want. In fact, we recommend you keep your - client node running as much of the time as possible, so that you get prompt catalog updates, - and can more quickly stay in touch with new content.
-
- However, if you need to shut down your node, the command for doing this is: -
-qmgr stop
- This command will take a while to complete (since the node has to wait for the I2P - java shutdown hooks to complete before it can rest in peace). But once your node is - shut down, you can start it up again at any time and pick up where you left off. -
- - - -
- -

5. Running a Q Server Node

- -

5.1. Introduction

-
- This section describes the requirements for, and procedures involved with, running - a Q Server Node.
-
- We'll use a similar 'walkthrough' style to that which we used in the previous section - on client nodes. -
- -
- -

5.2. Requirements and Choices

-
- Running a Q server is a generous thing to do, and helps substantially with making - Q work at its best for everyone. However, please do make sure you can meet some - basic requirements: -
    -
  • You are running a permanent (24/7) I2P Router, on a box with at least (say) - 98% uptime.
  • -
  • You have a little bandwidth to spare, and don't mind the extra memory, disk and - CPU-usage footprint of running a fulltime Q server node
  • -
  • You have already been able to successfully run a Q client node.
  • -
- Also, please decide whether you want your server node to contribute to the mainstream - Q network, or whether you want to create your own private Q network, or join someone - else's private network. Your contribution will be most appreciated, though, if you - can run a server within the mainstream Q network. -
- -
- -

5.3. Starting Your Server Node

- -
- Starting up a Q Server node is very similar to starting up a Q client node, except - that with the qmgr command line, you must put the keyword arg server before the - command word. So the command is: -
-qmgr server start
- Similar to Q client nodes, you can check the status of a running Q server node with - the command: -
-qmgr server status
- (Note that this command will take longer to complete than with client nodes, because - the communication passes through a multi-hop I2P tunnel, rather than just through - localhost TCP).
-
- If the status command succeeds, then you'll know your new Q Server Node is happily - running in background. -
- -
- -

5.4. Joining A Q Network

- -
- When a Q Server node starts up for the first time, it is in a private network - all by itself.
-
- If you want to link your server into an existing Q network, you'll have to add a - noderef for at least one other server on that network. The command to do this - is similar to that for subscribing a client node to a network: -
-qmgr server addref <noderef-file>
- where <noderef-file> is a file into which you've saved the noderef for - the network you want to join. -
-
- Recall from the section on client nodes that the authoritative noderefs - for the mainstream Q network can be downloaded from -
http://aum.i2p/q/qnoderefs.txt. -
-
- After you've added the noderef, subsequent qmgr server status commands - should show numPeers having a value of at least 1 (and growing, as more - server nodes come online in the mainstream Q network.) - -
- -
- -

5.5. Private Networks - Exporting Your Server's Noderef

- -
- If you're planning to start your own private Q network, and want to include other - server operators in this network, then you'll have to export your server's noderef - and make it available to the others you want to invite into your network.
-
- The command to export your Q Server noderef is: -
-qmgr server getref <noderef-file>
- This will extract the I2P Destination of your running server node, and - write it into <noderef-file>. You can then privately share this file with - others who you want to invite into your private network. Each recipient of - this file will do a qmgr server addref <noderef-file> command - to import your ref into their servers.
-
- Don't forget that if you're running, or participating in, a private Q network, then - you'll need to run a separate client for accessing this network, separate from any - mainstream Q network client you may already be running.
-
- To start this extra client, you'll have to choose a directory where you want this - client to reside, a port number you want your client to listen on locally for - user commands, and run the command: -
-qmgr -dir /path/to/my/new/client -port <portnum> start
- You need the -port <portnum> command, because otherwise it'll fail - to launch (if you already have a client node running off the mainstream Q network).
-
- This will create, and launch, a new instance of a Q client, accessing your private - Q network. Don't forget to import your server's noderef into this client. Also, - note that you'll have to use this same -port <portnum> argument when - doing any operation on this client instance, such as get, put, status, search. - -
- - - -
- -

6. About the qmgr Utility

- - qmgr (or, to people fluent in Java, net.i2p.aum.q.QMgr), is just one simple - Q client application, that happens to be bundled in with the Q distro.
-
- It is by no means the only, or even main facility for accessing the Q network. We - anticipate that folks will write all manner of client apps, including fancy GUI - apps.
-
- Anyway, qmgr does give you a rudimentary yet workable client for basic access - to the Q network. Until fancy apps get written, qmgr will have to do.
-
- Don't forget that qmgr has very detailed inbuilt help. Run: -
-qmgr help
- for a quick help summary, or: -
-qmgr help verbose
- for the 'War and Peace' treatise.
-
-

- One crucial concept to remember with qmgr is that client and server node instances - are uniquely identified by the directories at which they reside. If you are running - multiple server and/or client instances, you can specify an instance with the - -dir <dirpath> option - see the help for details. -
- -
- - One last note - we strongly discourage any writing of client apps that spawn a qmgr - process, pass it arguments and parse its results. This is most definitely a path to - pain, since qmgr's shell interface is subject to radical change at any time without - notice.
-
- qmgr is for human usage, or at most, inclusion in init/at/cron scripts. Please respect - this.
-
- If you want to write higher-level clients, your best course of action is to use the - official client api library, which we anticipate will have versions available in - Java, Python, Perl and C++. If you want to write in another language, such as - OCaml, Scheme etc, then the existing api lib implementations should serve as an excellent - reference to support you in writing a native port for your own language. - -
- -
- -

8. Contacting the Author

- - I am aum, and can be reached as aum on in-I2P IRC networks, and also - at the in-I2P email address of aum@mail.i2p.
-
- -
- -
- Return to Q Homepage
-
- - Introduction | - Checklist | - Server?orClient? | - Walkthrough | - Server Nodes | - About QMgr | - Contact us - -
- -
- - - -Last modified: Sun Apr 3 20:06:53 NZST 2005 - - - diff --git a/apps/q/doc/manual/notes b/apps/q/doc/manual/notes deleted file mode 100644 index b75d421a9..000000000 --- a/apps/q/doc/manual/notes +++ /dev/null @@ -1,23 +0,0 @@ - - rise on each hit: - - dy = (1 - y) / kRise - - fall after each time unit: - - dy = y / kFall - - fall after time dt: - - dy = - y ** - (dt / kFall) - - after the next hit: - - y = y - y ** (- dt / kFall) + (1 - y) / kRise - -first attempt at a load measurement algorithm: - - kFall is an arbitrary constant which dictates decay rate of load - in the absence of hits - - kRise is another constant which dictates rise of load with each hit - - dt is the time between each hit - diff --git a/apps/q/doc/metadata.html b/apps/q/doc/metadata.html deleted file mode 100644 index ece77e536..000000000 --- a/apps/q/doc/metadata.html +++ /dev/null @@ -1,372 +0,0 @@ - - - - Q Metadata Specification - - - - - - -

Q Metadata Specification

- -

1. Introduction

- - This document lists the standard metadata keys for Q data items, - discussing the rules of metadata insertion, processing and validation.
- -
- -

1.1. Definitions

- - To avoid confusions in terminology, this document will strictly abide the following definitions: -
-
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
TermDefinition
keyA metadata category name, technically a key as the word is used with - Java Hashtable and Python dict objects.
uriA Uniform Resource Indicator for an item of content stored within the Q network.
- Q URIs have the form: Q:<basename>[,<cryptoKey>][<path>] -
-
- Some examples: -
    -
  • Q:fhvnr3HFSK234khsf90sdh42fsh (a plain hash uri, no cryptoKey)
  • -
  • Q:e54fhjeo39schr2kcy4osEH478D/files/johnny.mp3 (a secure space URI, - no cryptoKey)
  • -
  • Q:vhfh4se987WwfkhwWFEwkh3234S,47fhh2dkhseiyu (a plain hash URI, with - a cryptoKey)
  • -
basenameThe basic element of a Q uri. This will be a base64-encoded hash - refer below to - URI calculation procedures
cryptoKeyAn optional session encryption key for the stored data, encoded as base64. - This affords some protection to server node operators, and gives them a level - of plausible deniability for whatever gets stored in their server's - datastore without their direct human awareness.
pathWhever an item of content is inserted in secure space mode, this path - serves as a pseudo-pathname, and is conceptually similar to the path - component in (for example) standard HTTP URLs - http://<domainname>[:<port>][<path>], such as - http://slashdot.org/faq/editorial.shtml (whose path - is /faq/editorial.shtml).
-
- Paths, if not empty, should contain a leading slash ("/"). - If an application specifies a non-empty path that doesn't begin with a - leading '/', a '/' will be automatically prepended by the receiving node. -
plain hashA mode of inserting items, whereby the security of the resulting URI comes from - computing the URI from a hash of the item's data and metadata (and imposing a - mathematical barrier against spoofing content under a given URI). Corresponds to - Freenet's CHK@ keys.
secure spaceA mode of inserting items where the security of the URI is based not on a hash of the - item's data and metadata (as with plain hash mode), - but on the privateKey provided by the - application, and a content signature created from that private key. - Corresponds to Freenet's SSK@ keys. Within a secure space, you - can insert any number of items under different pseudo-pathnames (as is the case - with Freenet SSK keys). - -
- -

- -
- -

2.1. Keys Inserted By Application Before sending putItem RPCs

- - As the heading suggests, this is a list of metadata keys which should be inserted by a - Q application prior to invoking a putItem RPC on the local Q client node.
-
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
KeyData TypeDescription
titleStringOptional but strongly recommended. A free-text short description of the item, - should be less than 80 characters. The idea is that applications should - support a 'view' of catalogue data that shows item titles. (Prior Q convention of - titles expressed as valid filename syntax has been abandoned). -
pathStringOptional but strongly recommended. - A virtual 'pathname' for the item, which should be in valid *nix - absolute pathname syntax (beginning with '/', containing no '//', consisting - only of alphanumerics, '-', '_', '.' and '/'.
-
- In Q web interfaces, the filename component of this path will - serve as the recommended filename when downloading/saving the item.
-
- If the application also provides a - privateKey key, the path - is used in conjunction with the private key to generate publicKey - and signature keys (see below), and ultimately the final uri - under which the item can be retrieved by others.
-
- Refer also to mimetype below. -
encryptStringOptional. If this key is present, and has a value "1", "yes" or "true", - this indicates that the application wishes the data to be stored on servers in - encrypted form.
-
- If this key is present and set to a positive value, the Q node, on receiving the - putItem RPC, will: -
    -
  1. Generate a random symmetric encryption key
  2. -
  3. Encrypt the item's data using this encryption key
  4. -
  5. Delete the encrypt key from the metadata
  6. -
  7. Enclose a base64 representation of this encryption key in the RPC response - it sends back to the application (embedded in the uri
  8. -
-
typeStringOptional but strongly recommended. A standard ed2k specifier, one of text html image - audio video archive other
mimetypeStringOptional but moderately recommended. Mimetype designation of data, eg text/html, - image/jpeg etc. If not specified, an attempt will be made to guess - a mometype from the value of the path key. If this attempt fails, then - this key will be set to application/x-octet-stream by the node receiving - the putItem RPC.
keywordsStringOptional but moderately recommended. - A set of keywords, under which the inserting app would like this item to be - discoverable. Keywords should be entirely lower case and comma-separated. Content - inserts should consider keywords carefully, and only use space characters inside - keywords when necessary (eg, for flagging a distinctive phrase containing very - common words).
privateKeyStringOptional. A Base64-encoded signing private key, in cases where the application wishes - to insert an item in signed space mode. This can be accompanied by another key, - path, indicating a 'path' within the signed space. If 'path' - is not given, it will default to '/'.
-
- Either way, when a node receives a - putItem RPC containing a privateKey in its metadata, - it removes this key and replaces it with publicKey and - signature. -
pathStringOptional. The virtual pathname, within signed space, under which to store the item. - This gets ignored and deleted unless the application also provides a - privateKey as well. But if the private key is given, the path - is used in conjunction with the private key to generate publicKey - and signature keys (see below).
- path should be a 'unix-style pathname', ie, containing only slashes - as (pseudo) directory delimiters, and alphanumeric, '-', '_' and '.' characters, - and preferably ending in a meaningful file extension such as .html -
expiryintUnixtime at which the inserted item should expire. When this expiry time - is reached, the item won't necessarily be deleted straight away, but may - be deleted whenever a node's data store is full.
-
- If this is not provided, it will default to a given duration according to - the client node's configuration.
-
- If it is provided, by an application, then the client node will transparently - generate the required 'rent payment' before caching the data item and uploading - it to servers. -
- -

- -
- -

2.2. Keys Inserted By Node Upon Receipt Of putItem RPC

- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
KeyData TypeDescription
sizeIntegerSize of the data to be inserted, in bytes.
dataHashStringbase64-encoded SHA256 hash of data.
uriStringThis depends on whether the item is being inserted in plain or - signed space mode.
-
- If inserting in plain mode, then the uri is in the form - Q:somebase64hash, where the hash is computed according to - the plain hash calculation procedure.
-
- If inserting in signed space mode, then the uri will be in the form - Q:somebase64hash/path.ext, where the hash is computed as per - the signed space hash calculation procedure, and - the /path.ext is the verbatim value of the app-supplied - path key. -
publicKeyStringBase64-encoded signing public key. In cases where app provides - privateKey, - a node will derive the signing public key from the private key, - delete the private key from the metadata, and replace it with its corresponding - public key - key.
signatureStringBase64-encoded signature of path+dataHash, created using - the app-provided privateKey.
rentStringA rent payment for the data's accommodation on the server.
- Intention is to support a variety of payment tokens. Initially, the - only acceptable form of payment will be a hashcash-like token, - in the form hashcash:base64string. The hashcash: - prefix indicates that this payment is in hashcash currency, in which case - the base64String should decode to a 16-byte string whose - SHA256 hash partially collides with dataHash. - The greater the number of bits in the collision, - the longer the data's accommodation will be 'paid up for'.
-
- If this key is already present, a Q node will verify the hashcash, - and adjust the expiry key value to the time the item's accommodation - is paid up till.
-
- If the key is not present: -
    -
  • A client node will generate a value for this key with enough collision bits - to pay the accommodation up till the given app-specified expiry date.
  • -
  • A server node will grant temporary free accommodation, and adjust the expiry - key to the end of the free accommodation period.
  • -
-
- -

- - - -
- -

3. URI Determination Procedures

- -

3.1. Plain Hash URI Calculation Procedure

- - When items are inserted in plain mode, the final URI is determined from - a hash of the data and metadata. Security of the item is based on the mathematical difficulty - of creating an arbitrary data+metadata set whose hash collides with the target URI.
-
- Specifically, the recipe for calculating plain hash URIs is: -
    -
  1. If the key size is missing, set this to the size of the data, - in bytes
  2. -
  3. If the key dataHash is missing, set this to the base64-encoded - SHA256(data)
  4. -
  5. If the key title is missing, set this to the value of dataHash
  6. -
  7. From the metadata, create a set of strings, each in the form key=value, - where each line contains a metadata key and its value, and - is terminated by an ASCII linefeed (\n, 0x10).
  8. -
  9. Ensure that key uri is omitted
  10. -
  11. Sort the strings into ascending ASCII sort order
  12. -
  13. Concatenate the strings together into one big string
  14. -
  15. Calculate the SHA256 hash of this string
  16. -
  17. Encode the hash into Base64
  18. -
  19. Prepend the string Q: to this
  20. -
- -
- -
- -

3.2. Signed Space URI Calculation Procedure

- - This is much simpler than determining plain hash URI, since the security of the URI - is based not on hashes of data and metadata, but on the cryptographic privateKey - given by the application.
-
- Calculation recipe for Signed Space URIs is: -
    -
  1. Calculate the SHA256 hash of the private key's binary data (not its base64 representation)
  2. -
  3. Encode this hash into base64, dropping any trailing '=' characters
  4. -
  5. Append to this the value of metadata item path (recall that path, - if not empty, must begin with a '/')
  6. -
  7. Prepend the string Q: to this
  8. -
- The resulting URI then is in the form Q:pubkeyHash/path.ext - -
- - -Last modified: Wed Apr 6 00:36:37 NZST 2005 - - - diff --git a/apps/q/doc/overall.jpg b/apps/q/doc/overall.jpg deleted file mode 100644 index 16441f910..000000000 Binary files a/apps/q/doc/overall.jpg and /dev/null differ diff --git a/apps/q/doc/qnoderefs.txt b/apps/q/doc/qnoderefs.txt deleted file mode 100644 index d194dd65b..000000000 --- a/apps/q/doc/qnoderefs.txt +++ /dev/null @@ -1 +0,0 @@ -rxvXpHKfWGWsql4PJaHglAERSUYyrdKKAzK6jPHT4QXRf9jgcVd4mInq0j6H4inVOzT9dG4L6c9GrlQwe4ysUm5jSTyZemxiZpQDCAazsoRzNDv6gevA40J6uGl10JtVtOjqXW8Ej0JUKubz88g~ogPb1h4Xibc-RrtqrvsJebg5xYFkLlnr7DxDtiWzIMRSZ9Ri2P~eq0SwZzd81tvASPj5fb3nySHeABAuY8HrNu0gqRLjeayDpd3OK1ogrxf1lMvfutn5pnLrlVcvKHa~6rNWWGSulsuEYWtpUd4Itj9aKqIgF9ES7RF77Z73W1f6NRTHO48ZLyLLaKVLjDIsHQP-0mOevszcPjFWtheqRKvT2D28WEMpVC-mPtfw91BkdgBa3pwWhwG~7KIhvWhGs8bj2NOKkqrwYU7xhNVaHdDDkzv4gsweCutHNiiCF~4yL54WzCIfSKDjcHjQxxVkh2NKeaItzgw9E~mPAKNZD22X~2oAuuL9i~0lldEV1ddUAAAA \ No newline at end of file diff --git a/apps/q/doc/screenshot-home.jpg b/apps/q/doc/screenshot-home.jpg deleted file mode 100644 index 99eb41cd4..000000000 Binary files a/apps/q/doc/screenshot-home.jpg and /dev/null differ diff --git a/apps/q/doc/screenshot-iewarn.jpg b/apps/q/doc/screenshot-iewarn.jpg deleted file mode 100644 index b9b8d1f95..000000000 Binary files a/apps/q/doc/screenshot-iewarn.jpg and /dev/null differ diff --git a/apps/q/doc/screenshot-qsite.jpg b/apps/q/doc/screenshot-qsite.jpg deleted file mode 100644 index ba1e9c5bc..000000000 Binary files a/apps/q/doc/screenshot-qsite.jpg and /dev/null differ diff --git a/apps/q/doc/screenshot-search.jpg b/apps/q/doc/screenshot-search.jpg deleted file mode 100644 index cbba0a7c4..000000000 Binary files a/apps/q/doc/screenshot-search.jpg and /dev/null differ diff --git a/apps/q/doc/screenshot.jpg b/apps/q/doc/screenshot.jpg deleted file mode 100644 index fac0c886a..000000000 Binary files a/apps/q/doc/screenshot.jpg and /dev/null differ diff --git a/apps/q/doc/screenshot.png b/apps/q/doc/screenshot.png deleted file mode 100644 index 4115a84e8..000000000 Binary files a/apps/q/doc/screenshot.png and /dev/null differ diff --git a/apps/q/doc/screenshots.html b/apps/q/doc/screenshots.html deleted file mode 100644 index 6f003a12b..000000000 --- a/apps/q/doc/screenshots.html +++ /dev/null @@ -1,23 +0,0 @@ - - - - Q Screenshots - - - -

Q Screenshots

- -
- -
-
aum
- - -Last modified: Mon Apr 18 14:06:02 NZST 2005 - - - diff --git a/apps/q/doc/spec/index.html b/apps/q/doc/spec/index.html deleted file mode 100644 index 854ee8388..000000000 --- a/apps/q/doc/spec/index.html +++ /dev/null @@ -1,1460 +0,0 @@ - - - - Q Protocol Specification - - - - - - - -
-

Q Protocol Specification

- - (first draft by aum)
-
- Return to Q Homepage
-
- - Introduction | - XML-RPC | - Architecture | - Commands | - Example Code | - Metadata | - Security | - Contact us - - -
- - - -
- -

1. Introduction

- - This document describes details of the interfaces between the various entities - in the Q network - server nodes, client nodes and client applications.
-
- Purpose is to: -
    -
  • Assist with people writing user client applications, such as GUI apps, command-line - apps, or integrate Q in to existing apps. -
  • - -
  • Permit alternative implementations of any of these entities, in any - programming language. -
  • - -
  • Help interested parties to gain a quick understanding of Q's architecture, - perhaps with a view to contributing ideas for improvement.
  • -
- - - -
- -

2. XML-RPC Interface

- -

2.1. WTF? All those ugly complicated angle-brackets?!?

- - If you haven't come across XML-RPC before, the whole concept might seem frightening, like - you've gotta write thousands of lines of code for parsing and encoding XML, and - negotiate some mind-numbingly complex multi-layered protocol.
-
- This is most certainly not the case. XML-RPC libraries are way simple to use.
-
- XML-RPC client and server libraries are available for all major (and most minor) - programming languages, and are structured in a way that hides all the intricate - details and presents an extremely simple and quickly learnable API over the top. - -
- -

2.2. Why XML-RPC??

- - I've chosen XML-RPC as the node interface framework because: -
    -
  • It's easy and quick to learn, regardless of programming language
  • -
  • It's supported by free libraries in all major programming languages
  • -
  • It avoids the maintenance problems of home-brew interfaces (people writing - implementations in several languages, some falling into disuse then breaking)
  • -
  • It reduces the opportunity for writing vulnerable client code (compare to writing - raw socket handlers in C, and inadvertently opening oneself up to buffer - overruns etc)
  • -
  • It allows for rapid client development
  • -
- - - -
- -

3. Architectural Overview

- - The Q network is structured as a two-level hierarchy of server nodes and - client nodes. Additionally, client applications are run by users, and - form the human interface to Q.
-
- Let's quickly overview the difference between these three entities: -
    -
  • Server nodes: -
      -
    • Are exptected to stay up all or most of the time
    • -
    • Are suited for running on permanently-up I2P routers
    • -
    • Run an XML-RPC server, listening exclusively within the I2P network for - commands from other peer server nodes as well as from client - nodes
    • -
    • Run XML-RPC clients, for sending commands via I2P to other server nodes
    • -
    • When joining the network, announce themselves as peers to - other server nodes
    • -
    • Usually have no direct contact with client applications
    • -
    • Receive and execute commands from client nodes, as well as - from other peer server nodes.
    • -
    • Will never send commands to client nodes.
    • -
    • Store content, which is served up by request to client nodes
    • -
    • Send catalogues of their stored content on request to client nodes
    • -
    • Store lists of their known peer server nodes, and send these lists - on request to client nodes -
    • Manage load by advising client nodes, and peer server nodes, - in command replies, of the next advisable time for contact
    • -
    • Should preferably be implemented in platform-independent code
    • -
    -
  • -
    - -
  • Client nodes: -
      -
    • May run as continuously or as intermittently as desired without causing - disruption to the network
    • -
    • Run an XML-RPC server, listening exclusively within the user's local - TCP/IP network (usually a localhost port), as opposed to server nodes - which run their XML-RPC server listening within I2P
    • -
    • Run XML-RPC clients, for sending commands via I2P to server nodes
    • -
    • Never announce themselves as peers to server nodes
    • -
    • Never have contact with other client nodes -
    • Are suited for use over permanent or transient I2P routers
    • -
    • Periodically contact servers requesting differential updates to - content catalogues, as well as peer lists. From this info, they maintain - a local mirror of what's available globally
    • -
    • When receiving any command reply from a given server, are expected to - honour the next advised contact time specified by that server
    • -
    • Form the official point of access to the Q network for client - applications
    • -
    • Should preferably be implemented in platform-independent code
    • -
    -
  • -
    - -
  • Client applications: -
      -
    • Form the point of human (or third-party program) access to the Q network
    • -
    • Offer the user a means of searching for content, inserting content and - retrieving content
    • -
    • Include GUI apps, CLI apps, web apps, and apps with other user or program - interfaces.
    • -
    • Usually never run an XML-RPC server at all
    • -
    • Run a single XML-RPC client, for sending commands via TCP to a - local client node
    • -
    • Are implemented and maintained separately to the core Q framework, though - at any time might be included in official Q distributions
    • -
    • Can be freely implemented in platform-independent or platform-dependent - code. For instance, Macintosh-only, or Windows-only implementations are - perfectly acceptable (but not quite as welcome as platform-independent - implementations)
    • -
    -
  • - -
- - - -
- -

4. Q Command Interface Description

- -

4.1. Overview

- - As mentioned earlier, communication between all Q entities takes place via an - XML-RPC mechanism.
-
- This chapter describes the actual primitives which are supported by both server - nodes and client nodes.
-
- Although the primitives are the same for both server and client, the way they are actioned - internally may vary.
-
- - For example, with the getItem primitive, server nodes will only look in - their local content store for the item, returning either that item's data and - metadata, or a failure reply. On the other hand, client nodes will try their - local content store first, and if the item is not found, will look in their - peer catalogues. If the item is found in a peer catalogue, the client node will - then on-send getItem calls to all server nodes believed to hold that item, - until or unless it retrieves a verifiable copy of that item - -
- -
- -

4.2. XML-RPC Data Types

- -
-
- - It's possibly a good idea here to get a hold of the XML-RPC library for - your favourite programming language, as well as the manual, and look up - the description of data types. Also, if you're especially keen, - you might like to read up on XML-RPC in general: - - -
-
- - XML-RPC supports a canonical set of data types, which are seamlessly integrated into - all its high level language implementations. A quick overview of the XML-RPC data types - used in Q appears below.
-
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
XML-RPC Data TypeDescription
intPlain 32-bit integer
stringSequence of ASCII bytes, viewed as java.lang.String objects in java, and str - objects (strings) in Python. - Note that ASCII control chars, and high-bit-set chars, are highly illegal and will - cause failure.
binary dataRaw binary data, viewed as byte [] in java, and xmlrpclib.Binary objects - in Python. This is the format used for raw content data.
listSequence of objects, viewed as java.util.Vector in java, and list objects in Python. -
structAn unordered set of (key, value) pairs. - Represented as java.util.Hashtable objects in java, and - dict objects in Python, (associative array in perl, ...)
- -
- -

4.3. General Command/Response Format

- - With Q's XML-RPC usage, all commands are a sequence of zero or more arguments. All - responses are a struct with at least the key status, whose value, a - string, is one of: -
    -
  • "ok" - the command was successful; any additional data is included - under other keys, depending on the command
  • -
  • "error" - the command failed, and an additional key error - contains a terse description of the error
  • -
- Note that all commands are also implemented with an alternative entry point, one which - takes a single Hashtable (struct/dict/assoc-array) argument. Refer to the javadocs for - further info: - -
- -

4.4. Exceptions - XML-RPC and Otherwise

-
- In certain cases, XML-RPC calls to Q nodes may return an exception.
-
- For instance, any attempt to invoke any primitive other than those listed below - will most definitely cause an exception, because in the Q XML-RPC implementation, - no provision is made for default handlers.
-
- Apart from this, it's possible that calls to known legal methods may trigger an - exception. This is not supposed to happen, and the author will be working over - time to intercept all such exceptions and wrap them in appropriate response - structures. But in the meantime, client app developers should catch any exceptions - resulting from their XML-RPC calls and recover appropriately. -
- -
- -

4.5. Overview of Q XML-RPC Primitives

- - The XML-RPC primitives supported by Q server and client nodes include: -
    -
  • i2p.q.ping - test if a server node is alive
  • -
  • i2p.q.hello - one new server node introduces itself to another server node
  • -
  • i2p.q.getItem - retrieve an item of content
  • -
  • i2p.q.putItem - insert an item of content
  • -
  • i2p.q.getUpdate - retrieve a differential update of peers list (and optionally, catalog update)
  • -
  • i2.q.search - search a client node for data items matching certain patterns
  • -
- -
- -

4.6. i2p.q.ping

- -
- -

Overview

- -
- - The i2p.q.ping primitive is used to test if a given server or client node - is presently online. It can be sent by server nodes, client nodes and client apps. - -
- -

Arguments

- -
- This primitive accepts no arguments, and will fail if any arguments are given. -
- -

Server Behaviour

- -
- No action on the part of the receiving server is required, apart from sending back: -
- - - - - - - - - - - - - - - - - - - - - - - - - - - -
KeyTypeDescription
statusstring"ok"
idstringThe node's nodeId, as a base64 string
deststringNode's destination, represented as base64 string
uptimeintThe number of seconds that this node has been running for
loadfloatCurrent load this node is experiencing, as a float between - 0.0 (no load) to 1.0 (impossibly flatlined)
- -

Client Behaviour

- -
- Same as server. -
- -
- -
- -

4.7. i2p.q.hello

- -
- -

Overview

- -
- - The i2p.q.hello primitive is sent by new server nodes to advise other existing - server nodes of their existence. It is only sent by server nodes to other server - nodes. It is considered an abuse for a client node to send this command. - -
- -

Arguments

- -
    -
  • destination (string) - the base64 representation of the calling node's - I2P destination (on which the calling node's in-I2P XML-RPC server may be - subsequently reached). Same format as the I2P hosts.txt listing. -
- -

Server Behaviour

- -
- If the destination is valid, the receiving server will reply with: -
- - - - - - - - -
KeyTypeDescription
statusstring"ok"
- -
- If the destination is invalid, the receiving server will send back: -
- - - - - - - - - - - - - -
KeyTypeDescription
statusstring"error"
errorstring"baddest"
- -

Client Behaviour

- -
- i2p.q.hello calls to clients are illegal. Client nodes receiving such - calls will respond with: -
- - - - - - - - - - - - - -
KeyTypeDescription
statusstring"error"
errorstring"unimplemented"
- -
- -
- -

4.8. i2p.q.getItem

- -
- -

Overview

- -
- - The i2p.q.getItem primitive is used to attempt retrieval of an item of content - from a client or server node. - -
- -

Arguments

- -
    -
  • key (string) - the base64 key under which the item in question is - stored
  • -
- -

Server Behaviour

- -
- Servers receiving this command will only search their own datastore for the item. - They will never attempt to on-request this item from other servers.
-
- If the server possesses the requested item in its datastore, it will respond with: -
- - - - - - - - - - - - - - - - - - -
KeyTypeDescription
statusstring"ok"
metadatastructA nested struct, containing the metadata for the key. (Refer section on - metadata).
databinary dataThe raw data.
- -
- If the server doesn't possess the data, it will respond with: -
- - - - - - - - - - - - - -
KeyTypeDescription
statusstring"error"
errorstring"notfound"
- - -

Client Behaviour

- -
- If the client possesses the key in its own local datastore, it will send back - the full data immediately: -
- - - - - - - - - - - - - - - - - - -
KeyTypeDescription
statusstring"ok"
metadatastructA nested struct, containing the metadata for the key. (Refer section on - metadata).
databinary dataThe raw data.
- -
- If the client doesn't possess the key, it will search its internal catalogues - for a server which does have the key.
-
- If one or more servers possessing the key are found, the client will on-send - an i2p.q.getItem command to each of those servers in turn, until it - either successfully retrieves the data, or fails.
-
- If the client successfully retrieves the data from one or more of its servers, - it will add the data to its internal cache, and reply with the above success - response.
-
- If the client was unable to source the complete data from any of its servers, - it will reply with: -
- - - - - - - - - - - - - -
KeyTypeDescription
statusstring"error"
errorstring"notfound"
- -
- -
- -

4.9. i2p.q.putItem

- -
- -

Overview

- -
- - The i2p.q.putItem primitive is used by client nodes to insert a new item - of content onto a server node.
-
- It is also used by client apps to insert a new item onto their - client node.
-
- Also, if a server node is receiving a high traffic of requests for a given item, - it may at its discretion send i2p.q.putItem commands to peer servers - to mirror the item on those servers, and spread the load. -
- -

Arguments

- -
    -
  • data - (binary) - the raw data to insert. Refer earlier - the compatible - Java datatype is byte[], and Python datatype is xmlrpclib.Binary.
  • -
  • metadata - (struct) - optional - a struct of metadata to - insert alongside the data. If this is not given, a minimal metadata set will - be automatically created by the recipient. See the section on - metadata. -
- -

Server Behaviour

- -
- If the server successfully received and stored the data (and optionally provided - metadata), it will reply with: -
- - - - - - - - - - - - - -
KeyTypeDescription
statusstring"ok"
keystringThe base64 key under which this item has been stored, and which should - be used for any subsequent i2p.q.getItem requests for that item - within the Q network.
- -
- However, if the server's datastore is full, the server will not be able to store - this item, in which case it will respond with: -
- - - - - - - - - - - - - -
KeyTypeDescription
statusstring"error"
errorstring"storefull"
- -

Client Behaviour

- -
- Client nodes receiving this command will attempt to store the item in their own - datastore, and respond immediately with one of the above server responses.
-
- In addition, client nodes will enqueue a background job to upload this item to - one or more selected server nodes. -
- -
- -
- -

4.10. i2p.q.getUpdate

- -
- -

Overview

- -
- - The i2p.q.getUpdate primitive is used to request a differential peers list - update (which optionally can include a catalog update as well).
-
- Client apps invoke this primitive on client nodes to get up-to-date - listings of items available in the network. Note that client apps will not - hand over any peers list.
-
- Client nodes periodically schedule a background job to invoke this primitive - on their known servers, such that they keep the most recent possible view of - available data and other servers.
-
- -

Arguments

- -
    -
  • since - (int) - unix time in seconds to update from. The recipient - will send back a list of all content it has become aware of since this - time.
  • -
  • includePeers - (int) - set to 1 to include peer list update in the return - data, 0 to omit.
  • -
  • includeCatalog - (int) - set to 1 to include catalog update in the return - data, 0 to omit.
  • -
- -

Server Behaviour

- -
- On receiving this command, a server node will send back lists of metadata records - for all new content (and/or all new peers) it has become aware of since the given - date. The full response is formatted as follows: -
- - - - - - - - - - - - - - - - - - - - - - - - - - -
KeyTypeDescription
statusstring"ok"
itemslistA list of metadata records for new items. Refer to the section on - metadata for more information. If the server - has not become aware of any new data since the given date (or if the - includeCatalog argument was 0), this list will be empty.
peerslistA list of destinations of new peers. If the server has not discovered - any new peers since the given date (or if the includePeers argument - was 0), this list will be empty. -
timeUpdateEndsintunixtime in secs that this update ends. The peer receiving this - response should note this time, and quote it as the since argument - in the next getUpdate request
timeNextContactintAdvised time (unixtime in sec) for sending the next getUpdate command. The sending - peer should not issue any getCatalog commands before this time, but is - welcome to issue them after this time. The actual time value is guesstimated - by the server node, depending on its current load.
- -

4.11. i2p.q.search

- -
- -

Overview

- -
- - The i2p.q.search primitive is invoked by client apps to search a client node - for data items matching a set of criteria. -
- Only client nodes support this primitive. Server nodes will return an empty - result set and an error response. -
- -

Arguments

- -
    -
  • criteria - (hashtable) - a set of metadata criteria to match. Each key in - this hashtable is a metadata key (eg title, type etc), and the - corresponding value is a regular expression string to match. Regular expression - syntax is documented in the java API in the - section - on class 'Pattern'.
    -
    - The search criteria work 'AND-style', in that if more than one metadata key - match pattern is given, then only items matching all of the given criteria - will be returned.
    -
    - Python example (using XML-RPC proxy - see code samples below): -
    -result = mynode.i2p.q.search({"type":"text", "summary":"^War.*"})
    -metaRecs = result['items']
    -
    - Java Example (using XML-RPC proxy - see code examples below): -
    -Hashtable criteria = new Hashtable();
    -criteria.put("type", "text");
    -criteria.put("summary", "^War.*");
    -Vector args = new Vector();
    -args.addElement(criteria);
    -Hashtable result = (Hashtable)mynode.execute("i2p.q.search", args);
    -Vector metaRecs = (Vector)result.get("items");
    -
    - Note that if the criteria argument is empty (no keys/values), then the - client node will send back metadata for every item of content it knows of, which - (depending on the size of the Q network), could be quite a resource-hungry operation. -
  • -
- -

Server Behaviour

- -
- Servers receiving this command will send back an error response: -
- - - - - - - - - - - - - -
KeyTypeDescription
statusstring"error"
errorstring"unimplemented"
- -

Client Behaviour

- -
- Client nodes receiving this command will send back the following response: -
- - - - - - - - - - - - -
KeyTypeDescription
statusstring"ok"
itemsvectorA list of metadata records (Hashtables) for items which match the given - search criteria, and are retrievable through this client - node (ie, the client node either possesses the item, or knows one or more - servers which possess the item).
-
- -
- - -
- -

5. Client Program Examples

- -

5.1. Overview

- - This section provides a couple of simple examples of client app programming.
-
- At present, only Python and Java examples are given.
-
- (If you don't know either of these languages, you should be - able to get the general drift by studying the examples, sufficient to map the concepts to the - XML-RPC API available to your preferred language.)
-
- The examples below communicate with a client node XML-RPC server (running on the - local machine and listening on its default port of 7651), and perform simple - operations of data insertion, catalog fetching and data retrieval. - -
- -

5.2. Java Example

- - To run this example, you'll need: -
    -
  • A running I2P installation, with an instance of a Q client node. -
  • The I2P standard jarfiles declared in your java CLASSPATH
  • -
  • The standard Apache XML-RPC library jarfile in your CLASSPATH (which you will - already have on your CLASSPATH, because this is part of installing Q). Recall that you - can get a copy of Apache java XML-RPC lib jarfile from - http://ws.apache.org/xmlrpc).
  • -
- Now for the code (heavily annotated, so you don't necessarily need to know or understand Java), which - should be written to a source file called QDemo.java. Note that this client would be a - significantly shorter if it instantiated a QClientNode class directly and invoked its methods, - but that is not what we're showing here - we're demonstrating the use of the client node's XML-RPC - interface. -
-
-// QDemo.java
-//
-// A simple demo example of a Q client application, which 
-// communicates with a running Q client node on the local
-// machine via its TCP XML-RPC interface
-//
-// If your client node is not running on localhost, or
-// if it's listening on a port other than the default
-// 7651, you'll need to change the code below.
-//
-// Note that this demo is bloated by the fact we're using
-// raw XML-RPC.
-// 
-// The following exercises are left to the reader:
-//  1. Modify this app so that instead of using the XML-RPC
-//     interface, it instantiates a QClientNode, and
-//     invokes its methods directly.
-//  2. Write a thin wrapper class which instantiates an XML-RPC
-//     client, and offers simpler access methods (thus avoiding
-//     the need to create and populate Vectors of args before
-//     calling, and pick through a reply Hashtable after the call),
-//     and create a version of this demo which uses the wrapper.
-
-// pull in some standard java stuff
-import java.*;
-import java.lang.*;
-import java.util.*;
-import java.net.*;
-import java.io.*;
-
-// pull in some xml-rpc stuff
-import org.apache.xmlrpc.*;
-
-// since we're talking to the node via xmlrpc, and talking to
-// it in a separate VM, we don't need to import any Q packages
-
-// Define a minimal demo class, which kust defines a
-// main method enabling us to run the demo from a shell.
-//
-// For the purposes of this demo, we're assuming that your Q client node is
-// running on your local machine, and that you haven't altered the
-// listening port (default 7651) for the client's XML-RPC interface.
-
-public class QDemo {
-
-    // just define a main so we can run this from a shell
-    static public void main(String [] args)
-        throws MalformedURLException, XmlRpcException, IOException
-    {
-        // for getting and analysing replies from node
-        Hashtable result;
-        String status;
-
-        // Create a new client app object
-        XmlRpcClient myClient = new XmlRpcClient("http://127.0.0.1:7651");
-
-        // -------------------------------------
-        // First action - execute a 'ping' on this peer
-        // -------------------------------------
-
-        Vector noArgs = new Vector();
-        result = (Hashtable)myClient.execute("i2p.q.ping", noArgs);
-        print("ping: result=" + result);
-
-        // -------------------------------------
-        // Second action - insert an item of data
-        // -------------------------------------
-
-        // mark the current time, we'll use this later
-        Integer then = new Integer((int)(new Date().getTime() / 1000));
-
-        // create metadata
-        // (note from previous chapter that metadata is optional)
-        Hashtable meta = new Hashtable();
-        meta.put("type", "text");
-        meta.put("abstract", "a simple piece of demo data");
-        meta.put("mimetype", "text/plain");
-
-        // create some data
-        String data = "Hello, world";
-
-        // set up the arguments list
-        Vector insertArgs = new Vector();
-        insertArgs.addElement(meta);
-        insertArgs.addElement(data.getBytes()); // must insert data as byte[]
-            
-        // and do the insert
-        result = (Hashtable)myClient.execute("i2p.q.putItem", insertArgs);
-        print("putItem: result=" + result);
-
-        // check what happened
-        status = (String)result.get("status");
-        String key;
-        if (status.equals("ok")) {
-            // insert succeeded
-            key = (String)result.get("key");
-            print("Insert successful");
-        } else {
-            // insert failed, bail
-            print("Insert failed: error=" + (String)result.get("error"));
-            return;
-        }
-
-        // -------------------------------------
-        // Third action - check for catalog updates
-        // (which should include what we've just inserted)
-        // -------------------------------------
-
-        // create an args list, with just the date we noted before the insert
-        Vector updateArgs = new Vector();
-        updateArgs.addElement(then);
-        // add the flags
-        updateArgs.addElement(new Integer(0));   // 'includePeers'
-        updateArgs.addElement(new Integer(1));   // 'includeCatalog'
-
-        // execute the 'getCatalog'
-        result = (Hashtable)myClient.execute("i2p.q.getUpdate", updateArgs);
-        print("getUpdate: result="+result);
-
-        // pick out the results, and search for what we just inserted
-        int i;
-        Vector items = (Vector)result.get("items");
-        int nitems = items.size();
-        boolean foundit = false;
-        for (i = 0; i < nitems; i++) {
-            // get the nth item
-            Hashtable metaRec = (Hashtable)items.get(i);
-            String thisKey = (String)metaRec.get("key");
-            if (thisKey.equals(key)) {
-                // yay, got it!
-                foundit = true;
-                break;
-            }
-        }
-
-        // did we get it?
-        if (!foundit) {
-            print("wtf? we inserted it but it's not in the catalog!");
-            return;
-        }
-
-        // yep, we got it, so try to retrieve it back
-        Vector getArgs = new Vector();
-        getArgs.addElement(key);
-        result = (Hashtable)myClient.execute("i2p.q.getItem", getArgs);
-        print("getItem: result=" + result);
-
-        // did we get it?
-        status = (String)result.get("status");
-        if (!status.equals("ok")) {
-            print("getItem failed: " + (String)result.get("error"));
-            return;
-        }
-
-        // yep, got it
-        byte [] binData = (byte [])result.get("data");
-        String strData = new String(binData);
-        print("getItem: success, data='"+strData+"'");
-
-        print("--- END OF Q CLIENT DEMO ---");
-    }
-
-    // a convenient shorthand method for printing stuff to stdout
-    static void print(String msg) {
-        System.out.println(msg);
-    }
-}
-        
-
- -
- -

5.3. Python Example

- - To run this example, you will need a running I2P installation, including a running instance - of a Q client node.
-
- Note that, in contrast to Java, Python 2.3 and later have all the necessary XML-RPC libraries built in. -
- Now for some code (again, heavily annotated). This, together with the previous example, present an - interesting comparison between some of Java and Python's ways of doing things. -
-
-#!/usr/bin/env python
-"""
-QDemo.py
-
-A simple demo example of a Q client application, which 
-communicates with a running Q client node on the local
-machine via its TCP XML-RPC interface
-
-If your client node is not running on localhost, or
-if it's listening on a port other than the default
-7651, you'll need to change the code below.
-
-Note that this demo is bloated by the fact we're using
-raw XML-RPC.
-
-The following exercise is left to the reader:
- * Write a thin wrapper class which instantiates an XML-RPC
-   client, and offers simpler access methods (thus avoiding
-   the need to pick through a reply dict after the call),
-   and create a version of this demo which uses the wrapper.
-"""
-
-# a coupla needed imports
-from time import time
-from xmlrpclib import ServerProxy, Binary
-
-# For the purposes of this demo, we're assuming that your Q client node is
-# running on your local machine, and that you haven't altered the
-# listening port (default 7651) for the client's XML-RPC interface.
-
-def qdemo():
-    # Create a new client app object
-    myClient = ServerProxy("http://127.0.0.1:7651")
-
-    # -------------------------------------
-    # First action - execute a 'ping' on this peer
-    # -------------------------------------
-
-    result = myClient.i2p.q.ping()
-    print "ping: result=%s" % result
-
-    # -------------------------------------
-    # Second action - insert an item of data
-    # -------------------------------------
-
-    # mark the current time, we'll use this later
-    then = int(time())
-
-    # create metadata
-    # (note from previous chapter that metadata is optional)
-    meta = {
-        "type" : "text",
-        "abstract" : "a simple piece of demo data",
-        "mimetype" : "text/plain",
-        }
-
-    # create some data, and binary-wrap it
-    data = "Hello, world"
-    binData = Binary(data)
-
-    # and do the insert
-    result = myClient.i2p.q.putItem(meta, binData)
-    print "putItem: result=%s" % result
-
-    # check what happened
-    if result["status"] == "ok":
-        # insert succeeded
-        key = result["key"]
-        print "Insert successful"
-    else:
-        # insert failed, bail
-        print "Insert failed: error=%s" % result['error']
-        return;
-
-    # -------------------------------------
-    # Third action - check for catalog updates
-    # (which should include what we've just inserted)
-    # -------------------------------------
-
-    # execute the 'getUpdate'
-    result = myClient.i2p.q.getUpdate(then, 0, 1)
-    print "getUpdate: result=%s" % result
-
-    # pick out the results, and search for what we just inserted
-    foundit = False
-    for metaRec in result['items']:
-        if metaRec['key'] == key:
-            # yay, got it!
-            foundit = True
-            break
-
-    # did we get it?
-    if not foundit:
-        print "wtf? we inserted it but it's not in the catalog!"
-        return;
-
-    # yep, we got it, so try to retrieve it back
-    print "getCatalog: found the item we just inserted"
-    result = myClient.i2p.q.getItem(key)
-    print "getItem: result=%s" % result
-
-    # did we get it?
-    if result["status"] != "ok":
-        print "getItem failed: %s" + result["error"]
-        return;
-
-    # yep, got it (note that data is an xmlrpclib.Binary object,
-    # and the raw data we want is in its .data attribute)
-    print "getItem: success, data='%s'" % result['data'].data
-
-    print "--- END OF Q CLIENT DEMO ---"
-
-# run the demo func if this script is executed directly
-if __name__ == '__main__':
-    qdemo()
-        
-
- - - -
- -

6. Keys and Metadata

- -

6.1. Overview

- Like Freenet, content is stored in Q as (data, metadata) pairs.
-
- However, there's a difference. On Freenet, metadata is stored as a string of up to - 32k length, and must be parsed (and sometimes executed) by client code. On the other - hand, metadata is exposed in Q as an XML-RPC struct (Java Hashtable or - Properties object, or Python dict, or Perl associative array etc).
-
- If a content item gets inserted to the Q network without metadata, a minimal metadata set - will be transparently generated, and is guaranteed to contain at least the following - elements:
-
- - - - - - - - - - - - -
KeyTypeDescription
sizeintSize of the stored data item, in bytes
dataHashstringa base64 representation of the SHA256 hash of the full raw data, using the I2P - base64 alphabet
-
- -
- -

6.2. Node IDs

- - When Q nodes are first created, they generate themselves a random - I2P privKey/dest keypair using the in-I2P services.
-
- The I2P destination gets converted to what we call a Q Node ID, as follows: -
    -
  • Start with binary destination (not base64)
  • -
  • Determine the SHA256 binary digest of this dest
  • -
  • Encode the resulting binary string via I2P's base64 alphabet
  • -
- -
- -

6.3. Keys

- - Here, 'key' means the unique short string, by which items of content can be - retrieved, and which is returned from an i2p.q.putItem command.
-
- Like Freenet's CHK@ keytype, Q keys are hashes of the key's content and - metadata.
-
- The recipe for calculating the 'key' of a particular item of metadata+data is: -
    -
  1. If no metadata is submitted with the data, create a minimal metadata as per above
  2. -
  3. Serialise out the metadata into a string representation, with the fieldnames in - alphanumeric order. The format of such string is one line per metadata field/value - pair per line, in the format: -
    - metadatakeyname=metadatakeyvalue\n -
    -
  4. -
  5. Calculate the binary SHA1 digest of this serialised metadata string
  6. -
  7. Base64-encode this binary digest via the I2P Base64 alphabet
  8. -
- -
- -

6.4. Q Metadata Conventions

- - Additional to the core metadata defined above, there is a convention in Q that the - following optional extra metadata - keys be provided on insert, and recognised and honoured on retrieve.
-
- It is highly recommended that these keys be included - in metadata when content is inserted:
-
- - - - - - - - - - - - - - - - - - - - - - - - - - - -
KeyTypeDescription
titlestringA short and descriptive title for the item, preferably formatted as - a filename which is legal and convenient on all main operating systems, ie, - containing only alphanumerics, '-', '_' and '.'.
-
- It is highly advisable that an appropriate file extension appear at the - end of the title. Refer to the Security Considerations - section below. -
- It is expected that client applications will use this title field when - displaying available content lists to users. -
typestringGeneric type of material, using the following superset of the eMule/Donkey - classifications: -
    -
  • text
  • -
  • html
  • -
  • image
  • -
  • audio
  • -
  • video
  • -
  • software
  • -
  • archive
  • -
  • misc
  • -
-
mimetypestringA recognised mime-type, as per RFC1341, RFC1521, RFC1522, such as - audio/mpeg, text/plain etc.
-
- This will help client app developers devise ways of disposing with data items - they request from client nodes.
-
- For instance, client apps with http front ends - may send back this mimetype as the value of the Content-type: header, - (and possibly take preventative action with potentially hazardous mimetypes, such - as those which some browsers such as IE might trust and execute blindly as - binary code).
-
- Alternatively, gui-based or cli-based client apps may convert this mimetype to - an appropriate benign file extension (such as .txt, - .ogg, .jpg etc). See Security - Considerations below. -
keywordsstringA set of space-separated keywords describing this item, intended for - human reading, as well as automatic parsing by client apps.
abstractstringA short descriptive summary of the nature of the data, intended for - human reading, as well as automatic pattern matching searches by client - apps.
-
-
- -
- -

6.5. One Data Item, Many Metadata Sets?

- - It is perfectly possible, and legal, for one item of data to be referenced by two - completely different items of metadata.
-
- Since content keys are a hash of metadata, which in turn contains a hash of the data, - then two pieces of metadata referencing the same data item, but containing different - metadata values, will end up with different keys.
-
- So as far as key addresses go, there will be a many-to-1 relationship between raw - content keys, and the data returned under these keys.
-
- - - -
- -

7. Security Considerations

- - All Peer2Peer software (as with all networked software in general) carries with it a set of - devastating security risks which should be respected to the utmost.
-
- This applies in no small part to Q.
-
- So this brief sermon is addressed to anyone writing any client applications or - APIs talking to the Q network.
-
- Any material which involves the execution of code on a client machine is risky. - However, much of the risk can be managed if the code is open source and peer-reviewed.
-
- Perhaps the biggest issue as far as Q is concerned is this: - -
- Client app developers should never, NEVER implicitly - trust incoming content, and should always assume that malicious remote users - will insert content which attempts to compromise other users' systems. -
- - If a Q client app wants to offer filetype-specific support, then perhaps a good - strategy is for the client app to use a whitelist of - known low-risk file extensions, such as .txt, or (possibly) - .ogg, .png etc. Recall that in some Windows configurations, even - .jpg can carry an arbitrary code execution attack!
-
- Note that .html (text/html) is especially dangerous, and - should be respected accordingly.
-
- Support for .html could be a real boon. For instance, it could allow - I2P users to publish an I2P equivalent of freenet's freesites - static - HTML websites which are accessible even when the author goes offline.
-
- However, if a client app chooses to recognise .html, it should either - use a code-screening mechanism like freenet's fproxy and keep it - up to date with all the latest advisories, or use a mandatory-proxy - mechanism like I2P's eepProxy.
-
- One possiblility is to serve up such content via a totally in-I2P http interface, - such that Joe can view the content via his regular eeproxy-configured browser.
-
- This is a typical case where security and ease/convenience can end up in - direct conflict. Automatic handling of content according to data type - is great from a Joe Sixpack Windows User point of view, but it is a snake-pit - of risks that can potentially result in any of the following (or worse): -
    -
  • Set up Joe's computer as a spambot
  • -
  • Get Joe's personal credit card and other info, and use this criminally
  • -
  • Download child pornography or terrorist information onto Joe's PC, use an - exploit to get Joe's IP address and/or identity details, and report this to - authorities, thus framing Joe and sending him off undeservedly to Club Fed or - Her Majesty's
  • -
  • Mount a DDoS, anonymity or other attack on the I2P network
  • -
  • Further spread additional content for achieving more of the above on - other unsuspecting users.
  • -
- - The crux of this lecture is that client app writers have a huge responsibility to - ensure their apps are safe against malicious content.
-
- Perhaps the best and most - practical solution is to just store downloaded material into a directory - known to and owned by the user, and make it the user's task and responsibility to - manually copy materials out of this directory and take responsibility for how s/he uses - this content thereafter.
-
- -
- -
- -

8. Contacting the Author

- - I am aum, and can be reached as aum on in-I2P IRC networks, and also - at the in-I2P email address of aum@mail.i2p.
-
- -
-
- - Introduction | - XML-RPC | - Architecture | - Commands | - Example Code | - Metadata | - Security | - Contact us - -
-
-
- - - -Last modified: Sat Apr 2 13:31:08 NZST 2005 - - - diff --git a/apps/q/java/build.xml b/apps/q/java/build.xml deleted file mode 100644 index 8fed7c7a2..000000000 --- a/apps/q/java/build.xml +++ /dev/null @@ -1,90 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - diff --git a/apps/q/java/qresources/html/404.html b/apps/q/java/qresources/html/404.html deleted file mode 100644 index c25e9661c..000000000 --- a/apps/q/java/qresources/html/404.html +++ /dev/null @@ -1,10 +0,0 @@ - - - - - - - -
Item Not Found
Failed to retrieve item:
- -
diff --git a/apps/q/java/qresources/html/about.html b/apps/q/java/qresources/html/about.html deleted file mode 100644 index 39e0b5523..000000000 --- a/apps/q/java/qresources/html/about.html +++ /dev/null @@ -1,27 +0,0 @@ - - - - - - - -
About Q
-

Version - -

- - 0.0.1 - -

Designed and engineered by aum

- -

Copyright © 2005 by aum. -
Released under the -
GNU Lesser General Public License

- -

- Many thanks to jrandom, smeghead and frosk
- for their patient and knowledgeable support
- in helping this python programmer
- get partly proficient in java.

-
- diff --git a/apps/q/java/qresources/html/addrefform.html b/apps/q/java/qresources/html/addrefform.html deleted file mode 100644 index 02a96a4ad..000000000 --- a/apps/q/java/qresources/html/addrefform.html +++ /dev/null @@ -1,40 +0,0 @@ - - - - - - - - - - - - - - - -
Add Hub Noderef
- - - - - - - - - - - - - -
- In order for your node to join a Q network, it must have a ref to at - least one of the running Q Hubs. You need to get a Q Hub noderef and - paste it here. -
- -
- -
-
- diff --git a/apps/q/java/qresources/html/aiealert.html b/apps/q/java/qresources/html/aiealert.html deleted file mode 100644 index 01ddecbd5..000000000 --- a/apps/q/java/qresources/html/aiealert.html +++ /dev/null @@ -1,29 +0,0 @@ - - - - - - - -
Critical Anonymity Alert!
- - - - - - -
-

You are attempting to view a QSite via the Internet Explorer web browser. -

-

We have blocked the connection to protect your anonymity. -

-

As a matter of policy, Q does not support QSite browsing via Microsoft - Internet Explorer (MSIE), because of that browser's abysmal track record with - regard to security. If we did allow you to browse Q via MSIE, it would - be easy for a malicious QSite author to embed hostile content which - undermines your computer's security and compromises your anonymity. -

-

If you want to surf I2P QSites, you'll need to use a more secure web - browser such as Mozilla or Firefox. -

-
diff --git a/apps/q/java/qresources/html/anonalert.html b/apps/q/java/qresources/html/anonalert.html deleted file mode 100644 index 48a980d7f..000000000 --- a/apps/q/java/qresources/html/anonalert.html +++ /dev/null @@ -1,52 +0,0 @@ - - - - - - - -
Critical Anonymity Alert!
- - - - - - -
-

You are attempting to view a QSite via an unprotected link. -

-

We have blocked the connection to protect your anonymity. If we don't - do this, then a malicious QSite author can insert content into the QSite - which triggers oubound hits to arbitrary servers on the mainstream web, - which in turn can easily reveal a lot of personal information about you - and the QSite you are accessing. -

-

If you want to browse QSites with your web browser with greater safety, - you'll have to follow these simple steps: -

-
    -
  1. Edit your I2P hosts.txt file and add the following entry - (all on one line): -
    - (and make sure you do NOT reveal this entry to anyone else) -
  2. -
  3. Configure one of your web browsers to use the proxy server: -
    localhost:4444
    - (or whatever address you have configured your I2P EEProxy to listen on, - if you've changed it)

    -
  4. -
  5. Start up the browser you have just configured, and enter the web address: -
    http://q.i2p
    -
  6. -
-

Even if you do this, you still won't have a 100% guarantee of anonymity, since - a malicious QSite author can send code to your browser which exploits a vulnerability - in the browser to compromise your anonymity (eg, accessing third party cookies, executing - arbitrary code, accessing your local filesystem, uploading compromising information - about you to hostile I2P EEPsites etc). But you can relax (somewhat) in the knowledge - that such attacks are much more difficult, particularly if you use a decent web browser. -

-

Thank you for your co-operation. We wish you happy and safe browsing. -

-
-
diff --git a/apps/q/java/qresources/html/downloads.html b/apps/q/java/qresources/html/downloads.html deleted file mode 100644 index f30ed5d1e..000000000 --- a/apps/q/java/qresources/html/downloads.html +++ /dev/null @@ -1,9 +0,0 @@ - - - - - - - -
Q Downloads
Not implemented yet
- diff --git a/apps/q/java/qresources/html/genkeysform.html b/apps/q/java/qresources/html/genkeysform.html deleted file mode 100644 index 3ffaa1f1f..000000000 --- a/apps/q/java/qresources/html/genkeysform.html +++ /dev/null @@ -1,28 +0,0 @@ - - - - - - - - - - - - -
Generate New Keypair
- - - - -
- Click the button to generate a new keypair for - future inserts of signed space keys. -
-
-
- - -
-
- diff --git a/apps/q/java/qresources/html/genkeysresult.html b/apps/q/java/qresources/html/genkeysresult.html deleted file mode 100644 index c693d306f..000000000 --- a/apps/q/java/qresources/html/genkeysresult.html +++ /dev/null @@ -1,32 +0,0 @@ - - - - - - - - - - - - - - - - - - - -
Your New Keys
- - - - -
- Please save these keys in a safe place, - preferably on an encrypted partition: -
-
Public Key: - -
Private Key:
- diff --git a/apps/q/java/qresources/html/getform.html b/apps/q/java/qresources/html/getform.html deleted file mode 100644 index 313d7cdc6..000000000 --- a/apps/q/java/qresources/html/getform.html +++ /dev/null @@ -1,15 +0,0 @@ - - - - - - - - - - - - - - -
Retrieve Content
URI:
diff --git a/apps/q/java/qresources/html/help.html b/apps/q/java/qresources/html/help.html deleted file mode 100644 index b50fbcc81..000000000 --- a/apps/q/java/qresources/html/help.html +++ /dev/null @@ -1,11 +0,0 @@ - - - - - - - -
Q Help
- Not written yet -
- diff --git a/apps/q/java/qresources/html/jobs.html b/apps/q/java/qresources/html/jobs.html deleted file mode 100644 index 08191f931..000000000 --- a/apps/q/java/qresources/html/jobs.html +++ /dev/null @@ -1,34 +0,0 @@ - - - - - - - - - - - -
Current Background Jobs
- - - - - - - - - - - - - - - - - -
Secs From NowDescription
- - - -
(No current jobs)
diff --git a/apps/q/java/qresources/html/main.html b/apps/q/java/qresources/html/main.html deleted file mode 100644 index b0941de6a..000000000 --- a/apps/q/java/qresources/html/main.html +++ /dev/null @@ -1,122 +0,0 @@ - - - -Q Web Interface - - - - - - - - - -
- - - - - -
- - - - - - - - - - - - -
"tabcell""tabcell_inactive"> - - - Status - - Settings -
-
- Q Node -
-
-
-
- -
- -
-
-
-
- - diff --git a/apps/q/java/qresources/html/msiealert.html b/apps/q/java/qresources/html/msiealert.html deleted file mode 100644 index 01ddecbd5..000000000 --- a/apps/q/java/qresources/html/msiealert.html +++ /dev/null @@ -1,29 +0,0 @@ - - - - - - - -
Critical Anonymity Alert!
- - - - - - -
-

You are attempting to view a QSite via the Internet Explorer web browser. -

-

We have blocked the connection to protect your anonymity. -

-

As a matter of policy, Q does not support QSite browsing via Microsoft - Internet Explorer (MSIE), because of that browser's abysmal track record with - regard to security. If we did allow you to browse Q via MSIE, it would - be easy for a malicious QSite author to embed hostile content which - undermines your computer's security and compromises your anonymity. -

-

If you want to surf I2P QSites, you'll need to use a more secure web - browser such as Mozilla or Firefox. -

-
diff --git a/apps/q/java/qresources/html/puterror.html b/apps/q/java/qresources/html/puterror.html deleted file mode 100644 index 9b184d9d6..000000000 --- a/apps/q/java/qresources/html/puterror.html +++ /dev/null @@ -1,10 +0,0 @@ - - - - - - - -
Error Inserting QSiteItem
Your insert failed because:
- -
diff --git a/apps/q/java/qresources/html/putform.html b/apps/q/java/qresources/html/putform.html deleted file mode 100644 index 7ceaa3c66..000000000 --- a/apps/q/java/qresources/html/putform.html +++ /dev/null @@ -1,139 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Insert Content
Insert A QSite Instead

Type: - Text - HTML - Image - Audio - Video - Archive - Other -

Title: - -
- (A short descriptive title for this item) -
-

Path: - -
- (If you're inserting in plain-hash mode, this should be a suggested - filename for people to save the item as when downloading it; If you're - inserting in signed-space mode, this can be a longer pathname) -
-

Mimetype: - -
- (Leave blank unless you know exactly what this means) -
-

Keywords: - -
- (Comma-separated list of short descriptive keywords) -
-

Abstract: - -
- (A short descriptive summary for the item, preferably no - longer than 256 characters) -
-

File: - -
- (Leave this blank if you are entering the raw data in the text box below) -
-

Private Key: - -
- (Only if inserting in signed space mode) -
-

- -

Raw Data: -
- (You may enter the raw data here as text instead of selecting a - file above) -
- -
\ No newline at end of file diff --git a/apps/q/java/qresources/html/putok.html b/apps/q/java/qresources/html/putok.html deleted file mode 100644 index 0a4e8b97e..000000000 --- a/apps/q/java/qresources/html/putok.html +++ /dev/null @@ -1,10 +0,0 @@ - - - - - - - -
QSiteItem Inserted Successfully
Item URI:
- -
\ No newline at end of file diff --git a/apps/q/java/qresources/html/putsiteform.html b/apps/q/java/qresources/html/putsiteform.html deleted file mode 100644 index 20a0966f9..000000000 --- a/apps/q/java/qresources/html/putsiteform.html +++ /dev/null @@ -1,101 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Insert A QSite
Insert A Single File Instead

Name: - -
- (A short name for the QSite - should contain only alphanumerics, '-', and '_'; - should definitely not contain '/', ':', '\', ' ' etc) -
-

Private Key: - -
- (Mandatory - if you don't have a signed-space keypair, get one by - clicking on Tools) -
-

Directory: - -
- (Absolute directory path where the QSite's files reside) -
-

Title: - -
- (A short descriptive title for this QSite) -
-

Keywords: - -
- (Comma-separated list of short descriptive keywords) -
-

Abstract: - -
- (A short descriptive summary for the QSite, preferably no - longer than 256 characters) -
-

- -
diff --git a/apps/q/java/qresources/html/searchform.html b/apps/q/java/qresources/html/searchform.html deleted file mode 100644 index a40d6a55a..000000000 --- a/apps/q/java/qresources/html/searchform.html +++ /dev/null @@ -1,76 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Search For Content
Type: - ANY - QSite - Text - HTML - Image -
- Audio - Video - Archive - Other -
Title: checked>regexp
Path: checked>regexp
Mimetype: checked>regexp
Keywords: checked>regexp
Summary: checked>regexp
- Search Mode: - - AND - OR -

- -
\ No newline at end of file diff --git a/apps/q/java/qresources/html/searchresults.html b/apps/q/java/qresources/html/searchresults.html deleted file mode 100644 index 21ac24552..000000000 --- a/apps/q/java/qresources/html/searchresults.html +++ /dev/null @@ -1,27 +0,0 @@ -width="95%"> - - - - - - - - - -
Search Results
items found
- - - - - - -
-
- -
-
-
-
() bytes
-
-
-
diff --git a/apps/q/java/qresources/html/settings.html b/apps/q/java/qresources/html/settings.html deleted file mode 100644 index c6686e90f..000000000 --- a/apps/q/java/qresources/html/settings.html +++ /dev/null @@ -1,9 +0,0 @@ - - - - - - - -
Q Settings
Not implemented yet
- diff --git a/apps/q/java/qresources/html/status.html b/apps/q/java/qresources/html/status.html deleted file mode 100644 index f0fac2d6e..000000000 --- a/apps/q/java/qresources/html/status.html +++ /dev/null @@ -1,24 +0,0 @@ - - - - - - - -
Q Node Status
- - - - - - - - - - - - - - -
ItemValue
-
diff --git a/apps/q/java/qresources/html/tools.html b/apps/q/java/qresources/html/tools.html deleted file mode 100644 index 84aecb3fb..000000000 --- a/apps/q/java/qresources/html/tools.html +++ /dev/null @@ -1,6 +0,0 @@ - - - - -
Q Tools
- diff --git a/apps/q/java/qresources/html/widgets/itemtype.html b/apps/q/java/qresources/html/widgets/itemtype.html deleted file mode 100644 index e69de29bb..000000000 diff --git a/apps/q/java/src/HTML/Template.java b/apps/q/java/src/HTML/Template.java deleted file mode 100644 index 6b6ce914a..000000000 --- a/apps/q/java/src/HTML/Template.java +++ /dev/null @@ -1,1131 +0,0 @@ -/* -* HTML.Template: A module for using HTML Templates with java -* -* Copyright (c) 2002 Philip S Tellis (philip.tellis@iname.com) -* -* This module is free software; you can redistribute it -* and/or modify it under the terms of either: -* -* a) the GNU General Public License as published by the Free -* Software Foundation; either version 1, or (at your option) -* any later version, or -* -* b) the "Artistic License" which comes with this module. -* -* This program is distributed in the hope that it will be -* useful, but WITHOUT ANY WARRANTY; without even the implied -* warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR -* PURPOSE. See either the GNU General Public License or the -* Artistic License for more details. -* -* You should have received a copy of the Artistic License -* with this module, in the file ARTISTIC. If not, I'll be -* glad to provide one. -* -* You should have received a copy of the GNU General Public -* License along with this program; if not, write to the Free -* Software Foundation, Inc., 59 Temple Place, Suite 330, -* Boston, MA 02111-1307 USA -* -* Modified by David McNab (david@rebirthing.co.nz) to allow nesting of -* templates (ie, passing a child Template object as a value argument -* to a .setParam() invocation on a parent Template object). -* -*/ - -package HTML; -import java.io.BufferedReader; -import java.io.FileNotFoundException; -import java.io.FileReader; -import java.io.IOException; -import java.io.PrintWriter; -import java.io.Reader; -import java.util.EmptyStackException; -import java.util.Enumeration; -import java.util.Hashtable; -import java.util.NoSuchElementException; -import java.util.Properties; -import java.util.Stack; -import java.util.StringTokenizer; -import java.util.Vector; - -import HTML.Tmpl.Filter; -import HTML.Tmpl.Util; -import HTML.Tmpl.Element.Conditional; -import HTML.Tmpl.Element.Element; -import HTML.Tmpl.Element.If; -import HTML.Tmpl.Element.Var; -import HTML.Tmpl.Parsers.Parser; - -/** - * Use HTML Templates with java. - *

- * The HTML.Template class allows you to use HTML Templates from within - * your java programs. It makes it possible to change the look of your - * servlets without having to recompile them. Use HTML.Template to - * separate code from presentation in your servlets. - *

- *

- *	Hashtable args = new Hashtable();
- *	args.put("filename", "my_template.tmpl");
- *
- *	Template t = new Template(args);
- *
- *	t.setParam("title", "The HTML Template package");
- *	t.printTo(response.getWriter());
- * 
- *

- * HTML.Template is based on the perl module HTML::Template by Sam Tregar - *

- * Modified by David McNab (david@rebirthing.co.nz) to allow nesting of - * templates (ie, passing a child Template object as a value argument - * to a .setParam() invocation on a parent Template object). - *

- * @author Philip S Tellis - * @version 0.1.2 - */ -public class Template -{ - private If __template__ = new If("__template__"); - private Hashtable params = new Hashtable(); - - private boolean dirty = true; - - private boolean strict = true; - private boolean die_on_bad_params = false; - private boolean global_vars = false; - private boolean case_sensitive = false; - private boolean loop_context_vars = false; - private boolean debug = false; - private boolean no_includes = false; - private boolean search_path_on_include = false; - private int max_includes = 11; - private String filename = null; - private String scalarref = null; - private String [] arrayref = null; - private String [] path = null; - private Reader filehandle = null; - private Filter [] filters = null; - - private Stack elements = new Stack(); - private Parser parser; - - /** - * Initialises a new HTML.Template object with the contents of - * the given file. - * - * @param filename a string containing the name of - * the file to be used as a - * template. This may be an - * absolute or relative path to a - * template file. - * - * @throws FileNotFoundException If the file specified does not - * exist. - * @throws IllegalStateException If <tmpl_include> is - * used when no_includes is in - * effect. - * @throws IOException If an input or output Exception - * occurred while reading the - * template. - * - * @deprecated No replacement. You should use either - * {@link #Template(Object [])} or - * {@link #Template(Hashtable)} - */ - public Template(String filename) - throws FileNotFoundException, - IllegalStateException, - IOException - { - this.filename = filename; - init(); - } - - - /** - * Initialises a new Template object, using the name/value - * pairs passed as default values. - *

- * The parameters passed may be any combination of filename, - * scalarref, arrayref, path, case_sensitive, loop_context_vars, - * strict, die_on_bad_params, global_vars, max_includes, - * no_includes, search_path_on_include and debug. - * Each with its own value. Any one of filename, scalarref or - * arrayref must be passed. - *

- * Eg: - *

-	 *	String [] template_init = {
-	 *		"filename",  "my_template.tmpl",
-	 *		"case_sensitive", "true",
-	 *		"max_includes",   "5"
-	 *	};
-	 *
-	 *      Template t = new Template(template_init);
-	 * 
- *

- * The above code creates a new Template object, initialising - * its input file to my_template.tmpl, turning on case_sensitive - * parameter matching, and restricting maximum depth of includes - * to five. - *

- * Parameter values that take boolean values may either be a String - * containing the words true/false, or the Boolean values Boolean.TRUE - * and Boolean.FALSE. Numeric values may be Strings, or Integers. - * - * @since 0.0.8 - * - * @param args an array of name/value pairs to initialise - * this template with. Valid values for - * each element may be: - * @param filename [Required] a String containing the path to a - * template file - * @param scalarref [Required] a String containing the entire - * template as its contents - * @param arrayref [Required] an array of lines that make up - * the template - * @param path [Optional] an array of Strings specifying - * the directories in which to look for the - * template file. If not specified, the current - * working directory is used. If specified, - * only the directories in this array are used. - * If you want the current directory searched, - * include "." in the path. - *

- * If you have only a single path, it can be a - * plain String instead of a String array. - *

- * This is effective only for the template file, - * and not for included files, but see - * search_path_on_include for how to change that. - * @param case_sensitive [Optional] specifies whether parameter - * matching is case sensitive or not. A value - * of "false", "0" or "" is considered false. - * All other values are true. - *

- * Default: false - * @param loop_context_vars [Optional] when set to true four loop - * context variables are made available inside a - * loop: __FIRST__, __LAST__, __INNER__, __ODD__, __COUNTER__. - * They can be used with <TMPL_IF>, - * <TMPL_UNLESS> and <TMPL_ELSE> to - * control how a loop is output. Example: - *

-	 *	    <TMPL_LOOP NAME="FOO">
-	 *	       <TMPL_IF NAME="__FIRST__">
-	 *	         This only outputs on the first pass.
-	 *	       </TMPL_IF>
-	 *
-	 *	       <TMPL_IF NAME="__ODD__">
-	 *	         This outputs on the odd passes.
-	 *	       </TMPL_IF>
-	 *
-	 *	       <TMPL_UNLESS NAME="__ODD__">
-	 *	         This outputs on the even passes.
-	 *	       </TMPL_IF>
-	 *
-	 *	       <TMPL_IF NAME="__INNER__">
-	 *	         This outputs on passes that are 
-	 *		neither first nor last.
-	 *	       </TMPL_IF>
-	 *
-	 *	       <TMPL_IF NAME="__LAST__">
-	 *	         This only outputs on the last pass.
-	 *	       <TMPL_IF>
-	 *	    </TMPL_LOOP>
-	 *			
- *

- * NOTE: A loop with only a single pass will get - * both __FIRST__ and __LAST__ - * set to true, but not __INNER__. - *

- * Default: false - * @param strict [Optional] if set to false the module will - * allow things that look like they might be - * TMPL_* tags to get by without throwing - * an exception. Example: - *

-	 *          <TMPL_HUH NAME=ZUH>
-	 *			
- *

- * Would normally cause an error, but if you - * create the Template with strict == 0, - * HTML.Template will ignore it. - *

- * Default: true - * @param die_on_bad_params [Optional] if set to true - * the module will complain if you try to set - * tmpl.setParam("param_name", "value") and - * param_name doesn't exist in the template. - *

- * This effect doesn't descend into loops. - *

- * Default: false (may change in later versions) - * @param global_vars [Optional] normally variables declared outside - * a loop are not available inside a loop. This - * option makes TMPL_VARs global throughout - * the template. It also affects TMPL_IF and TMPL_UNLESS. - *

-	 *	    <p>This is a normal variable: <TMPL_VAR NORMAL>.</p>
-	 *
-	 *	    <TMPL_LOOP NAME="FROOT_LOOP>
-	 *	       Here it is inside the loop: <TMPL_VAR NORMAL>
-	 *	    </TMPL_LOOP>
-	 *			
- *

- * Normally this wouldn't work as expected, since - * <TMPL_VAR NORMAL>'s value outside the loop - * isn't available inside the loop. - *

- * Default: false (may change in later versions) - * @param max_includes [Optional] specifies the maximum depth that - * includes can reach. Including files to a - * depth greater than this value causes an error - * message to be displayed. Set to 0 to disable - * this protection. - *

- * Default: 10 - * @param no_includes [Optional] If set to true, disallows the - * <TMPL_INCLUDE> tag in the template - * file. This can be used to make opening - * untrusted templates slightly less dangerous. - *

- * Default: false - * @param search_path_on_include [Optional] if set, then the - * path is searched for included files as well - * as the template file. See the path parameter - * for more information. - *

- * Default: false - * @param debug [Optional] setting this option to true causes - * HTML.Template to print random error messages - * to STDERR. - * - * @throws ArrayIndexOutOfBoundsException If an odd number of - * parameters is passed. - * @throws FileNotFoundException If the file specified does not - * exist or no filename is passed. - * @throws IllegalArgumentException If an unknown parameter is - * passed. - * @throws IllegalStateException If <tmpl_include> is - * used when no_includes is in - * effect. - * @throws IOException If an input or output Exception - * occurred while reading the - * template. - */ - public Template(Object [] args) - throws ArrayIndexOutOfBoundsException, - FileNotFoundException, - IllegalArgumentException, - IllegalStateException, - IOException - - { - if(args.length%2 != 0) - throw new ArrayIndexOutOfBoundsException("odd number " + - "of arguments passed"); - - for(int i=0; i - * The parameters passed are the same as in the Template(Object []) - * constructor. Each with its own value. Any one of filename, - * scalarref or arrayref must be passed. - *

- * Eg: - *

-	 *	Hashtable args = new Hashtable();
-	 *	args.put("filename", "my_template.tmpl");
-	 *	args.put("case_sensitive", "true");
-	 *	args.put("loop_context_vars", Boolean.TRUE);
-	 *	// args.put("max_includes", "5");
-	 *	args.put("max_includes", new Integer(5));
-	 *
-	 *	Template t = new Template(args);
-	 * 
- *

- * The above code creates a new Template object, initialising - * its input file to my_template.tmpl, turning on case_sensitive - * parameter matching, and the loop context variables __FIRST__, - * __LAST__, __ODD__ and __INNER__, and restricting maximum depth of - * includes to five. - *

- * Parameter values that take boolean values may either be a String - * containing the words true/false, or the Boolean values Boolean.TRUE - * and Boolean.FALSE. Numeric values may be Strings, or Integers. - * - * @since 0.0.10 - * - * @param args a Hashtable of name/value pairs to initialise - * this template with. Valid values are the same - * as in the Template(Object []) constructor. - * - * @throws FileNotFoundException If the file specified does not - * exist or no filename is passed. - * @throws IllegalArgumentException If an unknown parameter is - * passed. - * @throws IllegalStateException If <tmpl_include> is - * used when no_includes is in - * effect. - * @throws IOException If an input or output Exception - * occurred while reading the - * template. - * - * @see #Template(Object []) - */ - public Template(Hashtable args) - throws FileNotFoundException, - IllegalArgumentException, - IllegalStateException, - IOException - - { - Enumeration e = args.keys(); - while(e.hasMoreElements()) { - String key = (String)e.nextElement(); - Object value = args.get(key); - - parseParam(key, value); - } - - init(); - } - - /** - * Prints the parsed template to the provided PrintWriter. - * - * @param out the PrintWriter that this template will be printed - * to - */ - public void printTo(PrintWriter out) - { - out.print(output()); - } - - /** - * Returns the parsed template as a String. - * - * @return a string containing the parsed template - */ - public String output() - { - return __template__.parse(params); - } - - /** - * Sets the values of parameters in this template from a Hashtable. - * - * @param params a Hashtable containing name/value pairs for - * this template. Keys in this hashtable must - * be Strings and values may be either Strings - * or Vectors. - *

- * Parameter names are currently not case - * sensitive. - *

- * Parameter names can contain only letters, - * digits, ., /, +, - and _ characters. - *

- * Parameter names starting and ending with - * a double underscore are not permitted. - * eg: __myparam__ is illegal. - * - * @return the number of parameters actually set. - * Illegal parameters will not be set, but - * no error/exception will be thrown. - */ - public int setParams(Hashtable params) - { - if(params == null || params.isEmpty()) - return 0; - int count=0; - for(Enumeration e = params.keys(); e.hasMoreElements();) { - Object key = e.nextElement(); - if(key.getClass().getName().endsWith(".String")) { - Object value = params.get(key); - try { - setParam((String)key, value); - count++; - } catch (Exception pe) { - // key was not a String or Vector - // or key was null - // don't increment count - } - } - } - if(count>0) { - dirty=true; - Util.debug_print("Now dirty: set params"); - } - - return count; - } - - /** - * Sets a single scalar parameter in this template. - * - * @param name a String containing the name of this parameter. - * Parameter names are currently not case sensitive. - * @param value a String containing the value of this parameter - * - * @return the value of the parameter set - * @throws IllegalArgumentException if the parameter name contains - * illegal characters - * @throws NullPointerException if the parameter name is null - * - * @see #setParams(Hashtable) - */ - public String setParam(String name, String value) - throws IllegalArgumentException, NullPointerException - { - try { - return (String)setParam(name, (Object)value); - } catch(ClassCastException iae) { - return null; - } - } - - /** - * Sets a single Integer parameter in this template. - * - * @param name a String containing the name of this parameter. - * Parameter names are currently not case sensitive. - * @param value an Integer containing the value of this parameter - * - * @return the value of the parameter set - * @throws IllegalArgumentException if the parameter name contains - * illegal characters - * @throws NullPointerException if the parameter name is null - * - * @see #setParams(Hashtable) - */ - public Integer setParam(String name, Integer value) - throws IllegalArgumentException, NullPointerException - { - try { - return (Integer)setParam(name, (Object)value); - } catch(ClassCastException iae) { - return null; - } - } - - /** - * Sets a single int parameter in this template. - * - * @param name a String containing the name of this parameter. - * Parameter names are currently not case sensitive. - * @param value an int containing the value of this parameter - * - * @return the value of the parameter set - * @throws IllegalArgumentException if the parameter name contains - * illegal characters - * @throws NullPointerException if the parameter name is null - * - * @see #setParams(Hashtable) - */ - public int setParam(String name, int value) - throws IllegalArgumentException, NullPointerException - { - return setParam(name, new Integer(value)).intValue(); - } - - /** - * Sets a single boolean parameter in this template. - * - * @param name a String containing the name of this parameter. - * Parameter names are currently not case sensitive. - * @param value a boolean containing the value of this parameter - * - * @return the value of the parameter set - * @throws IllegalArgumentException if the parameter name contains - * illegal characters - * @throws NullPointerException if the parameter name is null - * - * @see #setParams(Hashtable) - */ - public boolean setParam(String name, boolean value) - throws IllegalArgumentException, NullPointerException - { - return setParam(name, new Boolean(value)).booleanValue(); - } - - /** - * Sets a single Boolean parameter in this template. - * - * @param name a String containing the name of this parameter. - * Parameter names are currently not case sensitive. - * @param value a Boolean containing the value of this parameter - * - * @return the value of the parameter set - * @throws IllegalArgumentException if the parameter name contains - * illegal characters - * @throws NullPointerException if the parameter name is null - * - * @see #setParams(Hashtable) - */ - public Boolean setParam(String name, Boolean value) - throws IllegalArgumentException, NullPointerException - { - try { - return (Boolean)setParam(name, (Object)value); - } catch(ClassCastException iae) { - return null; - } - } - - - /** - * Sets a single parameter in this template to a nested Template - * - * @param name a String containing the name of this parameter. - * Parameter names are currently not case sensitive. - * @param value a Template object to be nested in - * - * @return the value of the parameter set - * @throws IllegalArgumentException if the parameter name contains - * illegal characters - * @throws NullPointerException if the parameter name is null - */ - public Template setParam(String name, Template value) - throws IllegalArgumentException, NullPointerException - { - try { - return (Template)setParam(name, (Object)value); - } catch(ClassCastException iae) { - return null; - } - } - - - /** - * Sets a single list parameter in this template. - * - * @param name a String containing the name of this parameter. - * Parameter names are not currently case sensitive. - * @param value a Vector containing a list of Hashtables of parameters - * - * @return the value of the parameter set - * @throws IllegalArgumentException if the parameter name contains - * illegal characters - * @throws NullPointerException if the parameter name is null - * - * @see #setParams(Hashtable) - */ - public Vector setParam(String name, Vector value) - throws IllegalArgumentException, NullPointerException - { - try { - return (Vector)setParam(name, (Object)value); - } catch(ClassCastException iae) { - return null; - } - } - - /** - * Returns a parameter from this template identified by the given name. - * - * @param name a String containing the name of the parameter to be - * returned. Parameter names are not currently case - * sensitive. - * - * @return the value of the requested parameter. If the parameter - * is a scalar, the return value is a String, if the - * parameter is a list, the return value is a Vector. - * - * @throws NoSuchElementException if the parameter does not exist - * in the template - * @throws NullPointerException if the parameter name is null - */ - public Object getParam(String name) - throws NoSuchElementException, NullPointerException - { - if(name == null) - throw new NullPointerException("name cannot be null"); - if(!params.containsKey(name)) - throw new NoSuchElementException(name + - " is not a parameter in this template"); - - if(case_sensitive) - return params.get(name); - else - return params.get(name.toLowerCase()); - } - - - private void parseParam(String key, Object value) - throws IllegalStateException - { - if(key.equals("case_sensitive")) - { - this.case_sensitive=boolify(value); - Util.debug_print("case_sensitive: "+value); - } - else if(key.equals("strict")) - { - this.strict=boolify(value); - Util.debug_print("strict: "+value); - } - else if(key.equals("global_vars")) - { - this.global_vars=boolify(value); - Util.debug_print("global_vars: "+value); - } - else if(key.equals("die_on_bad_params")) - { - this.die_on_bad_params=boolify(value); - Util.debug_print("die_obp: "+value); - } - else if(key.equals("max_includes")) - { - this.max_includes=intify(value)+1; - Util.debug_print("max_includes: "+value); - } - else if(key.equals("no_includes")) - { - this.no_includes=boolify(value); - Util.debug_print("no_includes: "+value); - } - else if(key.equals("search_path_on_include")) - { - this.search_path_on_include=boolify(value); - Util.debug_print("path_includes: "+value); - } - else if(key.equals("loop_context_vars")) - { - this.loop_context_vars=boolify(value); - Util.debug_print("loop_c_v: "+value); - } - else if(key.equals("debug")) - { - this.debug=boolify(value); - Util.debug=this.debug; - Util.debug_print("debug: "+value); - } - else if(key.equals("filename")) - { - this.filename = (String)value; - Util.debug_print("filename: "+value); - } - else if(key.equals("scalarref")) - { - this.scalarref = (String)value; - Util.debug_print("scalarref"); - } - else if(key.equals("arrayref")) - { - this.arrayref = (String [])value; - Util.debug_print("arrayref"); - } - else if(key.equals("path")) - { - if(value.getClass().getName().startsWith("[")) - this.path = (String [])value; - else { - this.path = new String[1]; - this.path[0] = (String)value; - } - Util.debug_print("path"); - for(int j=0; j not " + - "allowed when " + - "no_includes in effect" - ); - if(max_includes == 0) { - throw new IndexOutOfBoundsException( - "include too deep"); - } else { - // come here if positive - // or negative - elements.push(e); - read_file(p.getProperty("name")); - } - } - else if(type.equals("var")) - { - String name = p.getProperty("name"); - String escape = p.getProperty("escape"); - String def = p.getProperty("default"); - Util.debug_print("name: " + name); - Util.debug_print("escape: " + escape); - Util.debug_print("default: " + def); - e.add(new Var(name, escape, def)); - } - else if(type.equals("else")) - { - Util.debug_print("adding branch"); - ((Conditional)e).addBranch(); - } - else if(p.getProperty("close").equals("true")) - { - Util.debug_print("closing tag"); - if(!type.equals(e.Type())) - throw new EmptyStackException(); - - e = (Element)elements.pop(); - } - else - { - Element t = parser.getElement(p); - e.add(t); - elements.push(e); - e=t; - } - } - return e; - } - - private void read_file(String filename) - throws FileNotFoundException, - IllegalStateException, - IOException, - EmptyStackException - { - BufferedReader br=openFile(filename); - - String line; - - Element e = null; - if(elements.empty()) - e = __template__; - else - e = (Element)elements.pop(); - - max_includes--; - while((line=br.readLine()) != null) { - Util.debug_print("Line: " + line); - e = parseLine(line+"\n", e); - } - max_includes++; - - br.close(); - br=null; - - } - - private void read_line_array(String [] lines) - throws FileNotFoundException, - IllegalStateException, - IOException, - EmptyStackException - { - - Element e = __template__; - - max_includes--; - for(int i=0; i 0) - type = type.substring(type.lastIndexOf(".")+1); - - String valid_types = ",String,Vector,Boolean,Integer,Template"; - - if(valid_types.indexOf(type) < 0) - throw new ClassCastException( - "value is neither scalar nor list nor Template"); - - name=case_sensitive?name:name.toLowerCase(); - - if(!case_sensitive && type.equals("Vector")) { - value = lowerCaseAll((Vector)value); - } - - Util.debug_print("setting: " + name); - params.put(name, value); - - dirty=true; - return value; - } - - private static Vector lowerCaseAll(Vector v) - { - Vector v2 = new Vector(); - for(Enumeration e = v.elements(); e.hasMoreElements(); ) { - Hashtable h = (Hashtable)e.nextElement(); - if(h == null) { - v2.addElement(h); - continue; - } - Hashtable h2 = new Hashtable(); - for(Enumeration e2 = h.keys(); e2.hasMoreElements(); ) { - String key = (String)e2.nextElement(); - Object value = h.get(key); - String value_type = value.getClass().getName(); - Util.debug_print("to lower case: " + key + "(" + value_type + ")"); - if(value_type.endsWith(".Vector")) - value = lowerCaseAll((Vector)value); - h2.put(key.toLowerCase(), value); - } - v2.addElement(h2); - } - return v2; - } - - private static boolean boolify(Object o) - { - String s; - if(o.getClass().getName().endsWith(".Boolean")) - return ((Boolean)o).booleanValue(); - else if(o.getClass().getName().endsWith(".String")) - s = (String)o; - else - s = o.toString(); - - if(s.equals("0") || s.equals("") || s.equals("false")) - return false; - return true; - } - - private static int intify(Object o) - { - String s; - if(o.getClass().getName().endsWith(".Integer")) - return ((Integer)o).intValue(); - else if(o.getClass().getName().endsWith(".String")) - s = (String)o; - else - s = o.toString(); - - try { - return Integer.parseInt(s); - } catch(NumberFormatException nfe) { - return 0; - } - } - - private static String stringify(boolean b) - { - if(b) - return "1"; - else - return ""; - } - - private BufferedReader openFile(String filename) - throws FileNotFoundException - { - boolean add_path=true; - - if(!elements.empty() && !search_path_on_include) - add_path=false; - - if(filename.startsWith("/")) - add_path=false; - - if(this.path == null) - add_path=false; - - Util.debug_print("open " + filename); - if(!add_path) - return new BufferedReader(new FileReader(filename)); - - BufferedReader br=null; - - for(int i=0; i 0) - control_class = control_class.substring( - control_class.lastIndexOf(".")+1); - - if(control_class.equals("String")) { - return !(((String)control_val).equals("") || - ((String)control_val).equals("0")); - } else if(control_class.equals("Vector")) { - return !((Vector)control_val).isEmpty(); - } else if(control_class.equals("Boolean")) { - return ((Boolean)control_val).booleanValue(); - } else if(control_class.equals("Integer")) { - return (((Integer)control_val).intValue() != 0); - } else { - throw new IllegalArgumentException("Unrecognised type"); - } - } -} - diff --git a/apps/q/java/src/HTML/Tmpl/Element/Element.java b/apps/q/java/src/HTML/Tmpl/Element/Element.java deleted file mode 100644 index 2dea977fa..000000000 --- a/apps/q/java/src/HTML/Tmpl/Element/Element.java +++ /dev/null @@ -1,66 +0,0 @@ -/* -* HTML.Template: A module for using HTML Templates with java -* -* Copyright (c) 2002 Philip S Tellis (philip.tellis@iname.com) -* -* This module is free software; you can redistribute it -* and/or modify it under the terms of either: -* -* a) the GNU General Public License as published by the Free -* Software Foundation; either version 1, or (at your option) -* any later version, or -* -* b) the "Artistic License" which comes with this module. -* -* This program is distributed in the hope that it will be -* useful, but WITHOUT ANY WARRANTY; without even the implied -* warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR -* PURPOSE. See either the GNU General Public License or the -* Artistic License for more details. -* -* You should have received a copy of the Artistic License -* with this module, in the file ARTISTIC. If not, I'll be -* glad to provide one. -* -* You should have received a copy of the GNU General Public -* License along with this program; if not, write to the Free -* Software Foundation, Inc., 59 Temple Place, Suite 330, -* Boston, MA 02111-1307 USA -*/ - - -package HTML.Tmpl.Element; -import java.util.Hashtable; -import java.util.NoSuchElementException; - -public abstract class Element -{ - protected String type; - protected String name=""; - - public abstract String parse(Hashtable params); - public abstract String typeOfParam(String param) - throws NoSuchElementException; - - public void add(String data){} - public void add(Element node){} - - public boolean contains(String param) - { - try { - return (typeOfParam(param) != null?true:false); - } catch(NoSuchElementException nse) { - return false; - } - } - - public final String Type() - { - return type; - } - - public final String Name() - { - return name; - } -} diff --git a/apps/q/java/src/HTML/Tmpl/Element/If.java b/apps/q/java/src/HTML/Tmpl/Element/If.java deleted file mode 100644 index 4384e8fbd..000000000 --- a/apps/q/java/src/HTML/Tmpl/Element/If.java +++ /dev/null @@ -1,39 +0,0 @@ -/* -* HTML.Template: A module for using HTML Templates with java -* -* Copyright (c) 2002 Philip S Tellis (philip.tellis@iname.com) -* -* This module is free software; you can redistribute it -* and/or modify it under the terms of either: -* -* a) the GNU General Public License as published by the Free -* Software Foundation; either version 1, or (at your option) -* any later version, or -* -* b) the "Artistic License" which comes with this module. -* -* This program is distributed in the hope that it will be -* useful, but WITHOUT ANY WARRANTY; without even the implied -* warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR -* PURPOSE. See either the GNU General Public License or the -* Artistic License for more details. -* -* You should have received a copy of the Artistic License -* with this module, in the file ARTISTIC. If not, I'll be -* glad to provide one. -* -* You should have received a copy of the GNU General Public -* License along with this program; if not, write to the Free -* Software Foundation, Inc., 59 Temple Place, Suite 330, -* Boston, MA 02111-1307 USA -*/ - -package HTML.Tmpl.Element; - -public class If extends Conditional -{ - public If(String control_var) throws IllegalArgumentException - { - super("if", control_var); - } -} diff --git a/apps/q/java/src/HTML/Tmpl/Element/Loop.java b/apps/q/java/src/HTML/Tmpl/Element/Loop.java deleted file mode 100644 index cb1911a87..000000000 --- a/apps/q/java/src/HTML/Tmpl/Element/Loop.java +++ /dev/null @@ -1,183 +0,0 @@ -/* -* HTML.Template: A module for using HTML Templates with java -* -* Copyright (c) 2002 Philip S Tellis (philip.tellis@iname.com) -* -* This module is free software; you can redistribute it -* and/or modify it under the terms of either: -* -* a) the GNU General Public License as published by the Free -* Software Foundation; either version 1, or (at your option) -* any later version, or -* -* b) the "Artistic License" which comes with this module. -* -* This program is distributed in the hope that it will be -* useful, but WITHOUT ANY WARRANTY; without even the implied -* warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR -* PURPOSE. See either the GNU General Public License or the -* Artistic License for more details. -* -* You should have received a copy of the Artistic License -* with this module, in the file ARTISTIC. If not, I'll be -* glad to provide one. -* -* You should have received a copy of the GNU General Public -* License along with this program; if not, write to the Free -* Software Foundation, Inc., 59 Temple Place, Suite 330, -* Boston, MA 02111-1307 USA -*/ - -package HTML.Tmpl.Element; -import java.util.Enumeration; -import java.util.Hashtable; -import java.util.NoSuchElementException; -import java.util.Vector; - -public class Loop extends Element -{ - private boolean loop_context_vars=false; - private boolean global_vars=false; - - private Vector control_val = null; - private Vector data; - - public Loop(String name) - { - this.type = "loop"; - this.name = name; - this.data = new Vector(); - } - - public Loop(String name, boolean loop_context_vars) - { - this(name); - this.loop_context_vars=loop_context_vars; - } - - public Loop(String name, boolean loop_context_vars, boolean global_vars) - { - this(name); - this.loop_context_vars=loop_context_vars; - this.global_vars=global_vars; - } - - public void add(String text) - { - data.addElement(text); - } - - public void add(Element node) - { - data.addElement(node); - } - - public void setControlValue(Vector control_val) - throws IllegalArgumentException - { - this.control_val = process_var(control_val); - } - - public String parse(Hashtable p) - { - if(!p.containsKey(this.name)) - this.control_val = null; - else { - Object o = p.get(this.name); - if(!o.getClass().getName().endsWith(".Vector") && - !o.getClass().getName().endsWith(".List")) - throw new ClassCastException( - "Attempt to set with a non-list. tmpl_loop=" + this.name); - setControlValue((Vector)p.get(this.name)); - } - - if(control_val == null) - return ""; - - StringBuffer output = new StringBuffer(); - Enumeration iterator = control_val.elements(); - - boolean first=true; - boolean last=false; - boolean inner=false; - boolean odd=true; - int counter=1; - - while(iterator.hasMoreElements()) { - Hashtable params = (Hashtable)iterator.nextElement(); - - if(params==null) - params = new Hashtable(); - - if(global_vars) { - for(Enumeration e = p.keys(); e.hasMoreElements();) { - Object key = e.nextElement(); - if(!params.containsKey(key)) - params.put(key, p.get(key)); - } - } - - if(loop_context_vars) { - if(!iterator.hasMoreElements()) - last=true; - inner = !first && !last; - - params.put("__FIRST__", first?"1":""); - params.put("__LAST__", last?"1":""); - params.put("__ODD__", odd?"1":""); - params.put("__INNER__", inner?"1":""); - params.put("__COUNTER__", "" + (counter++)); - } - - Enumeration de = data.elements(); - while(de.hasMoreElements()) { - - Object e = de.nextElement(); - if(e.getClass().getName().indexOf("String")>-1) - output.append((String)e); - else - output.append(((Element)e).parse(params)); - } - first = false; - odd = !odd; - } - - return output.toString(); - } - - public String typeOfParam(String param) - throws NoSuchElementException - { - for(Enumeration e = data.elements(); e.hasMoreElements();) - { - Object o = e.nextElement(); - if(o.getClass().getName().endsWith(".String")) - continue; - if(((Element)o).Name().equals(param)) - return ((Element)o).Type(); - } - throw new NoSuchElementException(param); - } - - private Vector process_var(Vector control_val) - throws IllegalArgumentException - { - String control_class = ""; - - if(control_val == null) - return null; - - control_class=control_val.getClass().getName(); - - if(control_class.indexOf("Vector") > -1) { - if(control_val.isEmpty()) - return null; - } else { - throw new IllegalArgumentException("Unrecognised type"); - } - - return control_val; - } - -} - diff --git a/apps/q/java/src/HTML/Tmpl/Element/Unless.java b/apps/q/java/src/HTML/Tmpl/Element/Unless.java deleted file mode 100644 index 8caca00c6..000000000 --- a/apps/q/java/src/HTML/Tmpl/Element/Unless.java +++ /dev/null @@ -1,39 +0,0 @@ -/* -* HTML.Template: A module for using HTML Templates with java -* -* Copyright (c) 2002 Philip S Tellis (philip.tellis@iname.com) -* -* This module is free software; you can redistribute it -* and/or modify it under the terms of either: -* -* a) the GNU General Public License as published by the Free -* Software Foundation; either version 1, or (at your option) -* any later version, or -* -* b) the "Artistic License" which comes with this module. -* -* This program is distributed in the hope that it will be -* useful, but WITHOUT ANY WARRANTY; without even the implied -* warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR -* PURPOSE. See either the GNU General Public License or the -* Artistic License for more details. -* -* You should have received a copy of the Artistic License -* with this module, in the file ARTISTIC. If not, I'll be -* glad to provide one. -* -* You should have received a copy of the GNU General Public -* License along with this program; if not, write to the Free -* Software Foundation, Inc., 59 Temple Place, Suite 330, -* Boston, MA 02111-1307 USA -*/ - -package HTML.Tmpl.Element; - -public class Unless extends Conditional -{ - public Unless(String control_var) throws IllegalArgumentException - { - super("unless", control_var); - } -} diff --git a/apps/q/java/src/HTML/Tmpl/Element/Var.java b/apps/q/java/src/HTML/Tmpl/Element/Var.java deleted file mode 100644 index bf761b9c0..000000000 --- a/apps/q/java/src/HTML/Tmpl/Element/Var.java +++ /dev/null @@ -1,145 +0,0 @@ -/* -* HTML.Template: A module for using HTML Templates with java -* -* Copyright (c) 2002 Philip S Tellis (philip.tellis@iname.com) -* -* This module is free software; you can redistribute it -* and/or modify it under the terms of either: -* -* a) the GNU General Public License as published by the Free -* Software Foundation; either version 1, or (at your option) -* any later version, or -* -* b) the "Artistic License" which comes with this module. -* -* This program is distributed in the hope that it will be -* useful, but WITHOUT ANY WARRANTY; without even the implied -* warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR -* PURPOSE. See either the GNU General Public License or the -* Artistic License for more details. -* -* You should have received a copy of the Artistic License -* with this module, in the file ARTISTIC. If not, I'll be -* glad to provide one. -* -* You should have received a copy of the GNU General Public -* License along with this program; if not, write to the Free -* Software Foundation, Inc., 59 Temple Place, Suite 330, -* Boston, MA 02111-1307 USA -* -* Modified by David McNab (david@rebirthing.co.nz) to allow nesting of -* templates (ie, passing a child Template object as a value argument -* to a .setParam() invocation on a parent Template object). -*/ - -package HTML.Tmpl.Element; -import java.util.Hashtable; -import java.util.NoSuchElementException; - -import HTML.Template; -import HTML.Tmpl.Util; - -public class Var extends Element -{ - public static final int ESCAPE_NONE = 0; - public static final int ESCAPE_URL = 1; - public static final int ESCAPE_HTML = 2; - public static final int ESCAPE_QUOTE = 4; - - public Var(String name, int escape, Object default_value) - throws IllegalArgumentException - { - this(name, escape); - this.default_value = stringify(default_value); - } - - public Var(String name, int escape) - throws IllegalArgumentException - { - if(name == null) - throw new IllegalArgumentException("tmpl_var must have a name"); - this.type = "var"; - this.name = name; - this.escape = escape; - } - - public Var(String name, String escape) - throws IllegalArgumentException - { - this(name, escape, null); - } - - public Var(String name, String escape, Object default_value) - throws IllegalArgumentException - { - this(name, ESCAPE_NONE, default_value); - - if(escape.equalsIgnoreCase("html")) - this.escape = ESCAPE_HTML; - else if(escape.equalsIgnoreCase("url")) - this.escape = ESCAPE_URL; - else if(escape.equalsIgnoreCase("quote")) - this.escape = ESCAPE_QUOTE; - } - - public Var(String name, boolean escape) - throws IllegalArgumentException - { - this(name, escape?ESCAPE_HTML:ESCAPE_NONE); - } - - public String parse(Hashtable params) - { - String value = null; - - if(params.containsKey(this.name)) - value = stringify(params.get(this.name)); - else - value = this.default_value; - - if(value == null) - return ""; - - if(this.escape == ESCAPE_HTML) - return Util.escapeHTML(value); - else if(this.escape == ESCAPE_URL) - return Util.escapeURL(value); - else if(this.escape == ESCAPE_QUOTE) - return Util.escapeQuote(value); - else - return value; - } - - public String typeOfParam(String param) - throws NoSuchElementException - { - throw new NoSuchElementException(param); - } - - private String stringify(Object o) - { - if(o == null) - return null; - - String cname = o.getClass().getName(); - if(cname.endsWith(".String")) - return (String)o; - else if(cname.endsWith(".Integer")) - return ((Integer)o).toString(); - else if(cname.endsWith(".Boolean")) - return ((Boolean)o).toString(); - else if(cname.endsWith(".Date")) - return ((java.util.Date)o).toString(); - else if(cname.endsWith(".Vector")) - throw new ClassCastException("Attempt to set with a non-scalar. Var name=" + this.name); - else if(cname.endsWith(".Template")) - return ((Template)o).output(); - else - throw new ClassCastException("Unknown object type: " + cname); - } - - // Private data starts here - private int escape=ESCAPE_NONE; - private String default_value=null; - -} diff --git a/apps/q/java/src/HTML/Tmpl/Filter.java b/apps/q/java/src/HTML/Tmpl/Filter.java deleted file mode 100644 index 5d5f82112..000000000 --- a/apps/q/java/src/HTML/Tmpl/Filter.java +++ /dev/null @@ -1,145 +0,0 @@ -/* -* HTML.Template: A module for using HTML Templates with java -* -* Copyright (c) 2002 Philip S Tellis (philip.tellis@iname.com) -* -* This module is free software; you can redistribute it -* and/or modify it under the terms of either: -* -* a) the GNU General Public License as published by the Free -* Software Foundation; either version 1, or (at your option) -* any later version, or -* -* b) the "Artistic License" which comes with this module. -* -* This program is distributed in the hope that it will be -* useful, but WITHOUT ANY WARRANTY; without even the implied -* warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR -* PURPOSE. See either the GNU General Public License or the -* Artistic License for more details. -* -* You should have received a copy of the Artistic License -* with this module, in the file ARTISTIC. If not, I'll be -* glad to provide one. -* -* You should have received a copy of the GNU General Public -* License along with this program; if not, write to the Free -* Software Foundation, Inc., 59 Temple Place, Suite 330, -* Boston, MA 02111-1307 USA -*/ - - -package HTML.Tmpl; - -/** - * Pre-parse filters for HTML.Template templates. - *

- * The HTML.Tmpl.Filter interface allows you to write Filters - * for your templates. The filter is called after the template - * is read and before it is parsed. - *

- * You can use a filter to make changes in the template file before - * it is parsed by HTML.Template, so for example, use it to replace - * constants, or to translate your own tags to HTML.Template tags. - *

- * A common usage would be to do what you think you're doing when you - * do <TMPL_INCLUDE file="<TMPL_VAR name="the_file">">: - *

- * myTemplate.tmpl: - *

- *	<TMPL_INCLUDE file="<%the_file%>">
- * 
- *

- * myFilter.java: - *

- *	class myFilter implements HTML.Tmpl.Filter
- *	{
- *		private String myFile;
- *		private int type=SCALAR
- *
- *		public myFilter(String myFile) {
- *			this.myFile = myFile;
- *		}
- *
- *		public int format() {
- *			return this.type;
- *		}
- *
- *		public String parse(String t) {
- *			// replace all <%the_file%> with myFile
- *			return t;
- *		}
- *
- *		public String [] parse(String [] t) {
- *			throw new UnsupportedOperationException();
- *		}
- *	}
- * 
- *

- * myClass.java: - *

- *	Hashtable params = new Hashtable();
- *	params.put("filename", "myTemplate.tmpl");
- *	params.put("filter", new myFilter("myFile.tmpl"));
- *	Template t = new Template(params);
- * 
- * - * @author Philip S Tellis - * @version 0.0.1 - */ -public interface Filter -{ - /** - * Tells HTML.Template to call the parse(String) method of this filter. - */ - public final static int SCALAR=1; - - /** - * Tells HTML.Template to call the parse(String []) method of this - * filter. - */ - public final static int ARRAY=2; - - /** - * Tells HTML.Template what kind of filter this is. - * Should return either SCALAR or ARRAY to indicate which parse method - * must be called. - * - * @return the values SCALAR or ARRAY indicating which parse method - * is to be called - */ - public int format(); - - /** - * parses the template as a single string, and returns the parsed - * template as a single string. - *

- * Should throw an UnsupportedOperationException if it isn't implemented - * - * @param t a string containing the entire template - * - * @return a string containing the template after you've parsed it - * - * @throws UnsupportedOperationException if this method isn't - * implemented - */ - public String parse(String t); - - /** - * parses the template as an array of strings, and returns the parsed - * template as an array of strings. - *

- * Should throw an UnsupportedOperationException if it isn't implemented - * - * @param t an array of strings containing the template - one line - * at a time - * - * @return an array of strings containing the parsed template - - * one line at a time - * - * @throws UnsupportedOperationException if this method isn't - * implemented - */ - public String [] parse(String [] t); -} - diff --git a/apps/q/java/src/HTML/Tmpl/Parsers/Parser.java b/apps/q/java/src/HTML/Tmpl/Parsers/Parser.java deleted file mode 100644 index 78b9ceff9..000000000 --- a/apps/q/java/src/HTML/Tmpl/Parsers/Parser.java +++ /dev/null @@ -1,392 +0,0 @@ -/* -* HTML.Template: A module for using HTML Templates with java -* -* Copyright (c) 2002 Philip S Tellis (philip.tellis@iname.com) -* -* This module is free software; you can redistribute it -* and/or modify it under the terms of either: -* -* a) the GNU General Public License as published by the Free -* Software Foundation; either version 1, or (at your option) -* any later version, or -* -* b) the "Artistic License" which comes with this module. -* -* This program is distributed in the hope that it will be -* useful, but WITHOUT ANY WARRANTY; without even the implied -* warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR -* PURPOSE. See either the GNU General Public License or the -* Artistic License for more details. -* -* You should have received a copy of the Artistic License -* with this module, in the file ARTISTIC. If not, I'll be -* glad to provide one. -* -* You should have received a copy of the GNU General Public -* License along with this program; if not, write to the Free -* Software Foundation, Inc., 59 Temple Place, Suite 330, -* Boston, MA 02111-1307 USA -*/ - - -package HTML.Tmpl.Parsers; -import java.util.NoSuchElementException; -import java.util.Properties; -import java.util.StringTokenizer; -import java.util.Vector; - -import HTML.Tmpl.Util; -import HTML.Tmpl.Element.Element; -import HTML.Tmpl.Element.If; -import HTML.Tmpl.Element.Loop; -import HTML.Tmpl.Element.Unless; - -public class Parser -{ - private boolean case_sensitive=false; - private boolean strict=true; - private boolean loop_context_vars=false; - private boolean global_vars=false; - - public Parser() - { - } - - public Parser(String [] args) - throws ArrayIndexOutOfBoundsException, - IllegalArgumentException - { - if(args.length%2 != 0) - throw new ArrayIndexOutOfBoundsException("odd number of arguments passed"); - - for(int i=0; i is not allowed inside a template tag - // so we can be sure that if this is a - // template tag, it ends with a > - - // add the closing > as well - if(i -1) - { - do { - temp.append(tag.charAt(0)); - tag=new StringBuffer( - tag.toString().substring(1)); - } while(tag.charAt(0) != '<'); - } - - Util.debug_print("tag: " + tag); - - String test_tag = tag.toString().toLowerCase(); - // if it doesn't contain tmpl_ it is not - // a template tag - if(test_tag.indexOf("tmpl_") < 0) { - temp.append(tag); - continue; - } - - // may be a template tag - // check if it starts with tmpl_ - - test_tag = cleanTag(test_tag); - - Util.debug_print("clean: " + test_tag); - - // check if it is a closing tag - if(test_tag.startsWith("/")) - test_tag = test_tag.substring(1); - - // if it still doesn't start with tmpl_ - // then it is not a template tag - if(!test_tag.startsWith("tmpl_")) { - temp.append(tag); - continue; - } - - // now it must be a template tag - String tag_type=getTagType(test_tag); - - if(tag_type == null) { - if(strict) - throw new - IllegalArgumentException( - tag.toString()); - else - temp.append(tag); - } - - Util.debug_print("type: " + tag_type); - - // if this was an invalid key and we've - // reached so far, then next iteration - if(tag_type == null) - continue; - - // now, push the previous stuff - // into the Vector - if(temp.length()>0) { - parts.addElement(temp.toString()); - temp = new StringBuffer(); - } - - // it is a valid template tag - // get its properties - - Util.debug_print("Checking: " + tag); - Properties tag_props = - getTagProps(tag.toString()); - - if(tag_props.containsKey("name")) - Util.debug_print("name: " + - tag_props.getProperty("name")); - else - Util.debug_print("no name"); - - parts.addElement(tag_props); - } - } - - if(temp.length()>0) - parts.addElement(temp.toString()); - - return parts; - } - - private String cleanTag(String tag) - throws IllegalArgumentException - { - String test_tag = new String(tag); - // first remove < and > - if(test_tag.startsWith("<")) - test_tag = test_tag.substring(1); - if(test_tag.endsWith(">")) - test_tag = test_tag.substring(0, test_tag.length()-1); - else - throw new IllegalArgumentException("Tags must start " + - "and end on the same line"); - - // remove any leading !-- and trailing - // -- in case of comment style tags - if(test_tag.startsWith("!--")) { - test_tag=test_tag.substring(3); - } - if(test_tag.endsWith("--")) { - test_tag=test_tag.substring(0, test_tag.length()-2); - } - // then leading and trailing spaces - test_tag = test_tag.trim(); - - return test_tag; - } - - private String getTagType(String tag) - { - int sp = tag.indexOf(" "); - String tag_type=""; - if(sp < 0) { - tag_type = tag.toLowerCase(); - } else { - tag_type = tag.substring(0, sp).toLowerCase(); - } - if(tag_type.startsWith("tmpl_")) - tag_type=tag_type.substring(5); - - Util.debug_print("tag_type: " + tag_type); - - if(tag_type.equals("var") || - tag_type.equals("if") || - tag_type.equals("unless") || - tag_type.equals("loop") || - tag_type.equals("include") || - tag_type.equals("else")) { - return tag_type; - } else { - return null; - } - } - - private Properties getTagProps(String tag) - throws IllegalArgumentException, - NullPointerException - { - Properties p = new Properties(); - - tag = cleanTag(tag); - - Util.debug_print("clean: " + tag); - - if(tag.startsWith("/")) { - p.put("close", "true"); - tag=tag.substring(1); - } else { - p.put("close", ""); - } - - Util.debug_print("close: " + p.getProperty("close")); - - p.put("type", getTagType(tag)); - - Util.debug_print("type: " + p.getProperty("type")); - - if(p.getProperty("type").equals("else") || - p.getProperty("close").equals("true")) - return p; - - if(p.getProperty("type").equals("var")) - p.put("escape", ""); - - int sp = tag.indexOf(" "); - // if we've got so far, this must succeed - - tag = tag.substring(sp).trim(); - Util.debug_print("checking params: " + tag); - - // now, we should have either name=value pairs - // or name space escape in case of old style vars - - if(tag.indexOf("=") < 0) { - // no = means old style - // first will be var name - // second if any will be escape - - sp = tag.toLowerCase().indexOf(" escape"); - if(sp < 0) { - // no escape - p.put("name", tag); - p.put("escape", "0"); - } else { - tag = tag.substring(0, sp); - p.put("name", tag); - p.put("escape", "html"); - } - } else { - // = means name=value pairs. - // use a StringTokenizer - StringTokenizer st = new StringTokenizer(tag, " ="); - while(st.hasMoreTokens()) { - String key, value; - key = st.nextToken().toLowerCase(); - if(st.hasMoreTokens()) - value = st.nextToken(); - else if(key.equals("escape")) - value = "html"; - else - throw new NullPointerException( - "parameter " + key + " has no value"); - - if(value.startsWith("\"") && - value.endsWith("\"")) - value = value.substring(1, - value.length()-1); - else if(value.startsWith("'") && - value.endsWith("'")) - value = value.substring(1, - value.length()-1); - - if(value.length()==0) - throw new NullPointerException( - "parameter " + key + " has no value"); - - if(key.equals("escape")) - value=value.toLowerCase(); - - p.put(key, value); - } - } - - String name = p.getProperty("name"); - // if not case sensitive, and not special variable, flatten case - // never flatten case for includes - if(!case_sensitive && !p.getProperty("type").equals("include") - && !( name.startsWith("__") && name.endsWith("__") )) - { - p.put("name", name.toLowerCase()); - } - - if(!Util.isNameChar(name)) - throw new IllegalArgumentException( - "parameter name may only contain " + - "letters, digits, ., /, +, -, _"); - // __var__ is allowed in the template, but not in the - // code. this is so that people can reference __FIRST__, - // etc - - return p; - } -} diff --git a/apps/q/java/src/HTML/Tmpl/Util.java b/apps/q/java/src/HTML/Tmpl/Util.java deleted file mode 100644 index 46ad2568b..000000000 --- a/apps/q/java/src/HTML/Tmpl/Util.java +++ /dev/null @@ -1,130 +0,0 @@ -/* -* HTML.Template: A module for using HTML Templates with java -* -* Copyright (c) 2002 Philip S Tellis (philip.tellis@iname.com) -* -* This module is free software; you can redistribute it -* and/or modify it under the terms of either: -* -* a) the GNU General Public License as published by the Free -* Software Foundation; either version 1, or (at your option) -* any later version, or -* -* b) the "Artistic License" which comes with this module. -* -* This program is distributed in the hope that it will be -* useful, but WITHOUT ANY WARRANTY; without even the implied -* warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR -* PURPOSE. See either the GNU General Public License or the -* Artistic License for more details. -* -* You should have received a copy of the Artistic License -* with this module, in the file ARTISTIC. If not, I'll be -* glad to provide one. -* -* You should have received a copy of the GNU General Public -* License along with this program; if not, write to the Free -* Software Foundation, Inc., 59 Temple Place, Suite 330, -* Boston, MA 02111-1307 USA -*/ - - -package HTML.Tmpl; - -public class Util -{ - public static boolean debug=false; - - public static String escapeHTML(String element) - { - String s = new String(element); // don't change the original - String [] metas = {"&", "<", ">", "\""}; - String [] repls = {"&", "<", ">", """}; - for(int i = 0; i < metas.length; i++) { - int pos=0; - do { - pos = s.indexOf(metas[i], pos); - if(pos<0) - break; - - s = s.substring(0, pos) + repls[i] + s.substring(pos+1); - pos++; - } while(pos >= 0); - } - - return s; - } - - public static String escapeURL(String url) - { - StringBuffer s = new StringBuffer(); - String no_escape = "./-_"; - - for(int i=0; i= 0); - } - - return s; - } - - public static boolean isNameChar(char c) - { - return true; - } - - public static boolean isNameChar(String s) - { - String alt_valid = "./+-_"; - - for(int i=0; i"+dest.toBase64()); - - start(); - - } - - /** - * run this EchoServer - */ - public void run() - { - System.out.println("Server: listening on dest:"); - - /** - try { - System.out.println(key.toDestinationBase64()); - } catch (DataFormatException e) { - e.printStackTrace(); - } - */ - - System.out.println(dest.toBase64()); - - while (true) - { - try { - I2PSocket sessSocket = serverSocket.accept(); - - System.out.println("Server: Got connection from client"); - - InputStream socketIn = sessSocket.getInputStream(); - OutputStreamWriter socketOut = new OutputStreamWriter(sessSocket.getOutputStream()); - - System.out.println("Server: created streams"); - - // read a line from input, and echo it back - String line = DataHelper.readLine(socketIn); - - System.out.println("Server: got '" + line + "'"); - - String reply = "EchoServer: got '" + line + "'\n"; - socketOut.write(reply); - socketOut.flush(); - - System.out.println("Server: sent trply"); - - sessSocket.close(); - - System.out.println("Server: closed socket"); - - } catch (ConnectException e) { - e.printStackTrace(); - } catch (IOException e) { - e.printStackTrace(); - } catch (I2PException e) { - e.printStackTrace(); - } - - } - - } - - public Destination getDest() throws DataFormatException - { - // return key.toDestination(); - return dest; - } - - public String getDestBase64() throws DataFormatException - { - // return key.toDestinationBase64(); - return dest.toBase64(); - } - - /** - * runs EchoServer from the command shell - */ - public static void main(String [] args) - { - System.out.println("Constructing an EchoServer"); - - try { - EchoServer myServer = new EchoServer(); - System.out.println("Got an EchoServer"); - System.out.println("Here's the dest:"); - System.out.println(myServer.getDestBase64()); - - myServer.run(); - - } catch (I2PException e) { - e.printStackTrace(); - } catch (IOException e) { - e.printStackTrace(); - } - - } -} - diff --git a/apps/q/java/src/net/i2p/aum/EchoTest.java b/apps/q/java/src/net/i2p/aum/EchoTest.java deleted file mode 100644 index 5ba4cf56f..000000000 --- a/apps/q/java/src/net/i2p/aum/EchoTest.java +++ /dev/null @@ -1,51 +0,0 @@ -// runs EchoServer and EchoClient as threads - -package net.i2p.aum; - -import java.io.IOException; - -import net.i2p.I2PException; -import net.i2p.data.Destination; - -/** - * A simple program which runs the EchoServer and EchoClient - * demos as threads - */ - -public class EchoTest -{ - /** - * create one instance each of EchoServer and EchoClient, - * run the server as a thread, run the client in foreground, - * display detailed results - */ - public static void main(String [] args) - { - EchoServer server; - EchoClient client; - - try { - server = new EchoServer(); - Destination serverDest = server.getDest(); - - System.out.println("EchoTest: serverDest=" + serverDest.toBase64()); - - client = new EchoClient(serverDest); - - } catch (I2PException e) { - e.printStackTrace(); return; - } catch (IOException e) { - e.printStackTrace(); return; - } - - System.out.println("Starting server..."); - //server.start(); - - System.out.println("Starting client..."); - client.run(); - - } - -} - - diff --git a/apps/q/java/src/net/i2p/aum/EmbargoedQueue.java b/apps/q/java/src/net/i2p/aum/EmbargoedQueue.java deleted file mode 100644 index 384224f7b..000000000 --- a/apps/q/java/src/net/i2p/aum/EmbargoedQueue.java +++ /dev/null @@ -1,322 +0,0 @@ -/* - * SimpleScheduler.java - * - * Created on March 24, 2005, 11:14 PM - */ - -package net.i2p.aum; - -import java.util.Date; -import java.util.Random; -import java.util.Vector; - -/** - *

Implements a queue of objects, where each object is 'embargoed' - * against release until a given time. Threads which attempt to .get - * items from this queue will block if the queue is empty, or if the - * first item of the queue has a 'release time' which has not yet passed.

- * - *

Think of it like a news desk which receives media releases which are - * 'embargoed' till a certain time. These releases sit in a queue, and when - * their embargo expires, they are actioned and go to print or broadcast. - * The reporters at this news desk are the 'threads', which get blocked - * until the next item's embargo expires.

- * - *

Purpose of implementing this is to provide a mechanism for scheduling - * background jobs to be executed at precise times

. - */ -public class EmbargoedQueue extends Thread { - - /** - * items which are waiting for dispatch - stored as 2-element vectors, - * where elem 0 is Integer dispatch time, and elem 1 is the object; - * note that this list is kept in strict ascending order of time. - * Whenever an object becomes ready, it is removed from this queue - * and appended to readyItems - */ - public Vector waitingItems; - - /** - * items which are ready for dispatch (their time has come). - */ - public SimpleQueue readyItems; - - /** set this true to enable verbose debug messages */ - public boolean debug = false; - - /** Creates a new embargoed queue */ - public EmbargoedQueue() { - waitingItems = new Vector(); - readyItems = new SimpleQueue(); - - // fire up scheduler thread - start(); - } - - /** - * fetches the item at head of queue, blocking if queue is empty - */ - public Object get() - { - return readyItems.get(); - } - - /** - * adds a new object to queue without any embargo (or, an embargo that expires - * immediately) - * @param item the object to be added - */ - public synchronized void putNow(Object item) - { - putAfter(0, item); - } - - /** - * adds a new object to queue, embargoed until given number of milliseconds - * have elapsed - * @param delay number of milliseconds from now when embargo expires - * @param item the object to be added - */ - public synchronized void putAfter(long delay, Object item) - { - long now = new Date().getTime(); - putAt(now+delay, item); - } - - /** - * adds a new object to the queue, embargoed until given time - * @param time the unixtime in milliseconds when the object's embargo expires, - * and the object is to be made available - * @param item the object to be added - */ - public synchronized void putAt(long time, Object item) - { - Vector elem = new Vector(); - elem.addElement(new Long(time)); - elem.addElement(item); - - long now = new Date().getTime(); - long future = time - now; - //System.out.println("putAt: time="+time+" ("+future+"ms from now), job="+item); - - // find where to insert - int i; - int nitems = waitingItems.size(); - for (i = 0; i < nitems; i++) - { - // get item i - Vector itemI = (Vector)waitingItems.get(i); - long timeI = ((Long)(itemI.get(0))).longValue(); - if (time < timeI) - { - // new item earlier than item i, insert here and bust out - waitingItems.insertElementAt(elem, i); - break; - } - } - - // did we insert? - if (i == nitems) - { - // no - gotta append - waitingItems.addElement(elem); - } - - // debugging - if (debug) { - printWaiting(); - } - - // awaken this scheduler object's thread, so it can - // see if any jobs are ready - //notify(); - interrupt(); - } - - /** - * for debugging - prints out a list of waiting items - */ - public synchronized void printWaiting() - { - int i; - long now = new Date().getTime(); - - System.out.println("EmbargoedQueue dump:"); - - System.out.println(" Waiting items:"); - int nwaiting = waitingItems.size(); - for (i = 0; i < nwaiting; i++) - { - Vector item = (Vector)waitingItems.get(i); - long when = ((Long)item.get(0)).longValue(); - Object job = item.get(1); - int delay = (int)(when - now)/1000; - System.out.println(" "+delay+"s, t="+when+", job="+job); - } - - System.out.println(" Ready items:"); - int nready = readyItems.items.size(); - for (i = 0; i < nready; i++) - { - //Vector item = (Vector)readyItems.items.get(i); - Object item = readyItems.items.get(i); - System.out.println(" job="+item); - } - - } - - /** - * scheduling thread, which wakes up every time a new job is queued, and - * if any jobs are ready, transfers them to the readyQueue and notifies - * any waiting client threads - */ - public void run() - { - // monitor the waiting queue, waiting till one becomes ready - while (true) - { - try { - if (waitingItems.size() > 0) - { - // at least 1 waiting item - Vector item = (Vector)(waitingItems.get(0)); - long now = new Date().getTime(); - long then = ((Long)item.get(0)).longValue(); - long delay = then - now; - - // ready? - if (delay <= 0) - { - // yep, ready, remove job and stick on waiting queue - waitingItems.remove(0); // ditch from waiting - Object elem = item.get(1); - readyItems.put(elem); // and add to ready - - if (debug) - { - System.out.println("embargo expired on "+elem); - printWaiting(); - } - } - else - { - // not ready, hang about till we get woken, or the - // job becomes ready - if (debug) - { - System.out.println("waiting for "+delay+"ms"); - } - Thread.sleep(delay); - } - } - else - { - // no items yet, hang out for an interrupt - if (debug) - { - System.out.println("queue is empty"); - } - synchronized (this) { - wait(); - } - } - } catch (Exception e) { - //System.out.println("exception"); - if (debug) - { - System.out.println("exception ("+e.getClass().getName()+") "+e.getMessage()); - } - } - } - } - - private static class TestThread extends Thread { - - String id; - - EmbargoedQueue q; - - public TestThread(String id, EmbargoedQueue q) { - this.id = id; - this.q = q; - } - - public void run() { - try { - print("waiting for queue"); - - Object item = q.get(); - - print("got item: '"+item+"'"); - - } catch (Exception e) { - e.printStackTrace(); - return; - } - } - - public void print(String msg) { - System.out.println("thread '"+id+"': "+msg); - } - - } - - /** - * @param args the command line arguments - */ - public static void main(String[] args) { - - int i; - int nthreads = 7; - - Thread [] threads = new Thread[nthreads]; - - EmbargoedQueue q = new EmbargoedQueue(); - SimpleSemaphore threadPool = new SimpleSemaphore(nthreads); - - // populate the queue with some stuff - q.putAfter(10000, "red"); - q.putAfter(3000, "orange"); - q.putAfter(6000, "yellow"); - - // populate threads array - for (i = 0; i < nthreads; i++) { - threads[i] = new TestThread("thread"+i, q); - } - - // and launch the threads - for (i = 0; i < nthreads; i++) { - threads[i].start(); - } - - // wait, presumably till all these elements are actioned - try { - Thread.sleep(12000); - } catch (Exception e) { - e.printStackTrace(); - return; - } - - // add some more shit to the queue, randomly scheduled - Random r = new Random(); - String [] items = {"green", "blue", "indigo", "violet", "black", "white", "brown"}; - for (i = 0; i < items.length; i++) { - String item = items[i]; - int delay = 2000 + r.nextInt(8000); - System.out.println("main: adding '"+item+"' after "+delay+"ms ..."); - q.putAfter(delay, item); - } - - // wait, presumably for all jobs to finish - try { - Thread.sleep(12000); - } catch (Exception e) { - e.printStackTrace(); - return; - } - - System.out.println("main: terminating"); - - } - -} diff --git a/apps/q/java/src/net/i2p/aum/I2PCat.java b/apps/q/java/src/net/i2p/aum/I2PCat.java deleted file mode 100644 index 35be1ad12..000000000 --- a/apps/q/java/src/net/i2p/aum/I2PCat.java +++ /dev/null @@ -1,460 +0,0 @@ - -// I2P equivalent of 'netcat' - -package net.i2p.aum; - -import java.io.IOException; -import java.io.InputStream; -import java.io.InterruptedIOException; -import java.io.OutputStream; -import java.io.OutputStreamWriter; -import java.net.ConnectException; -import java.net.NoRouteToHostException; -import java.util.Properties; - -import net.i2p.I2PAppContext; -import net.i2p.I2PException; -import net.i2p.client.naming.HostsTxtNamingService; -import net.i2p.client.streaming.I2PServerSocket; -import net.i2p.client.streaming.I2PSocket; -import net.i2p.client.streaming.I2PSocketManager; -import net.i2p.client.streaming.I2PSocketManagerFactory; -import net.i2p.data.DataFormatException; -import net.i2p.data.DataHelper; -import net.i2p.data.Destination; -import net.i2p.util.Log; - -/** - * A I2P equivalent of the much-beloved 'netcat' utility. - * This command-line utility can either connect to a remote - * destination, or listen on a private destination for incoming - * connections. Once a connection is established, input on stdin - * is sent to the remote peer, and anything received from the - * remote peer is printed to stdout - */ - -public class I2PCat extends Thread -{ - public I2PSocketManager socketManager; - public I2PServerSocket serverSocket; - public I2PSocket sessSocket; - - public PrivDestination key; - public Destination dest; - - public InputStream socketIn; - public OutputStream socketOutStream; - public OutputStreamWriter socketOut; - - public SockInput rxThread; - - protected static Log _log; - - public static String defaultHost = "127.0.0.1"; - public static int defaultPort = 7654; - - /** - * a thread for reading from socket and displaying on stdout - */ - private class SockInput extends Thread { - - InputStream _in; - - protected Log _log; - public SockInput(InputStream i) { - - _in = i; - } - - public void run() - { - // the thread portion, receives incoming bytes on - // the socket input stream and spits them to stdout - - byte [] ch = new byte[1]; - - print("Receiver thread listening..."); - - try { - while (true) { - - //String line = DataHelper.readLine(socketIn); - if (_in.read(ch) != 1) { - print("failed to receive from socket"); - break; - } - - //System.out.println(line); - System.out.write(ch, 0, 1); - System.out.flush(); - } - } catch (IOException e) { - e.printStackTrace(); - print("Receiver thread crashed, terminating!!"); - System.exit(1); - } - - } - - - void print(String msg) - { - System.out.println("-=- I2PCat: "+msg); - - if (_log != null) { - _log.debug(msg); - } - - } - - - } - - - public I2PCat() - { - _log = new Log("I2PCat"); - - } - - /** - * Runs I2PCat in server mode, listening on the given destination - * for one incoming connection. Once connection is established, - * copyies data between the remote peer and - * the local terminal console. - */ - public void runServer(String keyStr) throws IOException, DataFormatException - { - Properties props = new Properties(); - props.setProperty("inbound.length", "0"); - props.setProperty("outbound.length", "0"); - props.setProperty("inbound.lengthVariance", "0"); - props.setProperty("outbound.lengthVariance", "0"); - - // generate new key if needed - if (keyStr.equals("new")) { - - try { - key = PrivDestination.newKey(); - } catch (I2PException e) { - e.printStackTrace(); - return; - } catch (IOException e) { - e.printStackTrace(); - return; - } - - print("Creating new server dest..."); - - socketManager = I2PSocketManagerFactory.createManager(key.getInputStream(), props); - - print("Getting server socket..."); - - serverSocket = socketManager.getServerSocket(); - - print("Server socket created, ready to run..."); - - dest = socketManager.getSession().getMyDestination(); - - print("private key follows:"); - System.out.println(key.toBase64()); - - print("dest follows:"); - System.out.println(dest.toBase64()); - - } - - else { - - key = PrivDestination.fromBase64String(keyStr); - - String dest64Abbrev = key.toBase64().substring(0, 16); - - print("Creating server socket manager on dest "+dest64Abbrev+"..."); - - socketManager = I2PSocketManagerFactory.createManager(key.getInputStream(), props); - - serverSocket = socketManager.getServerSocket(); - - print("Server socket created, ready to run..."); - } - - print("Awaiting client connection..."); - - I2PSocket sessSocket; - - try { - sessSocket = serverSocket.accept(); - } catch (I2PException e) { - e.printStackTrace(); - return; - } catch (ConnectException e) { - e.printStackTrace(); - return; - } - - print("Got connection from client"); - - chat(sessSocket); - - } - - public void runClient(String destStr) - throws DataFormatException, IOException - { - runClient(destStr, defaultHost, defaultPort); - } - - /** - * runs I2PCat in client mode, connecting to a remote - * destination then copying data between the remote peer and - * the local terminal console - */ - public void runClient(String destStr, String host, int port) - throws DataFormatException, IOException - { - // accept 'file:' prefix - if (destStr.startsWith("file:", 0)) - { - String path = destStr.substring(5); - destStr = new SimpleFile(path, "r").read(); - } - - else if (destStr.length() < 255) { - // attempt hosts file lookup - I2PAppContext ctx = new I2PAppContext(); - HostsTxtNamingService h = new HostsTxtNamingService(ctx); - Destination dest1 = h.lookup(destStr); - if (dest1 == null) { - usage("Cannot resolve hostname: '"+destStr+"'"); - } - - // successful lookup - runClient(dest1, host, port); - } - - else { - // otherwise, bigger strings are assumed to be base64 dests - - Destination dest = new Destination(); - dest.fromBase64(destStr); - runClient(dest, host, port); - } - } - - public void runClient(Destination dest) { - runClient(dest, "127.0.0.1", 7654); - } - - /** - * An alternative constructor which accepts an I2P Destination object - */ - public void runClient(Destination dest, String host, int port) - { - this.dest = dest; - - String destAbbrev = dest.toBase64().substring(0, 16)+"..."; - - print("Connecting via i2cp "+host+":"+port+" to destination "+destAbbrev+"..."); - System.out.flush(); - - try { - // get a socket manager - socketManager = I2PSocketManagerFactory.createManager(host, port); - - // get a client socket - print("socketManager="+socketManager); - - sessSocket = socketManager.connect(dest); - - } catch (I2PException e) { - e.printStackTrace(); - return; - } catch (ConnectException e) { - e.printStackTrace(); - return; - } catch (NoRouteToHostException e) { - e.printStackTrace(); - return; - } catch (InterruptedIOException e) { - e.printStackTrace(); - return; - } - - print("Successfully connected!"); - print("(Press Control-C to quit)"); - - // Perform console interaction - chat(sessSocket); - - try { - sessSocket.close(); - - } catch (IOException e) { - e.printStackTrace(); - return; - } - } - - /** - * Launch the background thread to copy incoming data to stdout, then - * loop in foreground copying lines from stdin and sending them to remote peer - */ - public void chat(I2PSocket sessSocket) { - - try { - socketIn = sessSocket.getInputStream(); - socketOutStream = sessSocket.getOutputStream(); - socketOut = new OutputStreamWriter(socketOutStream); - - // launch receiver thread - start(); - //launchRx(); - - while (true) { - - String line = DataHelper.readLine(System.in); - print("sent: '"+line+"'"); - - socketOut.write(line+"\n"); - socketOut.flush(); - } - } catch (IOException e) { - e.printStackTrace(); - return; - } - - } - - /** - * executes in a thread, receiving incoming bytes on - * the socket input stream and spitting them to stdout - */ - public void run() - { - - byte [] ch = new byte[1]; - - print("Receiver thread listening..."); - - try { - while (true) { - - //String line = DataHelper.readLine(socketIn); - if (socketIn.read(ch) != 1) { - print("failed to receive from socket"); - break; - } - - //System.out.println(line); - System.out.write(ch, 0, 1); - System.out.flush(); - } - } catch (IOException e) { - e.printStackTrace(); - print("Receiver thread crashed, terminating!!"); - System.exit(1); - } - - } - - - public void launchRx() { - - rxThread = new SockInput(socketIn); - rxThread.start(); - - } - - static void print(String msg) - { - System.out.println("-=- I2PCat: "+msg); - - if (_log != null) { - _log.debug(msg); - } - - } - - public static void usage(String msg) - { - usage(msg, 1); - } - - public static void usage(String msg, int ret) - { - System.out.println(msg); - usage(ret); - } - - public static void usage(int ret) - { - System.out.print( - "This utility is an I2P equivalent of the standard *nix 'netcat' utility\n"+ - "usage:\n"+ - " net.i2p.aum.I2PCat [-h]\n"+ - " - display this help\n"+ - " net.i2p.aum.I2PCat dest [host [port]]\n"+ - " - run in client mode, 'dest' should be one of:\n"+ - " hostname.i2p - an I2P hostname listed in hosts.txt\n"+ - " (only works with a hosts.txt in current directory)\n"+ - " base64dest - a full base64 destination string\n"+ - " file:b64filename - filename of a file containing base64 dest\n"+ - " net.i2p.aum.I2PCat -l privkey\n"+ - " - run in server mode, 'key' should be one of:\n"+ - " base64privkey - a full base64 private key string\n"+ - " file:b64filename - filename of a file containing base64 privkey\n"+ - "\n" - ); - System.exit(ret); - } - - public static void main(String [] args) throws IOException, DataFormatException - { - int argc = args.length; - - // barf if no args - if (argc == 0) { - usage("Missing argument"); - } - - // show help on request - if (args[0].equals("-h") || args[0].equals("--help")) { - usage(0); - } - - // server or client? - if (args[0].equals("-l")) { - if (argc != 2) { - usage("Bad argument count"); - } - - new I2PCat().runServer(args[1]); - } - else { - // client mode - barf if not 1-3 args - if (argc < 1 || argc > 3) { - usage("Bad argument count"); - } - - try { - int port = defaultPort; - String host = defaultHost; - if (args.length > 1) { - host = args[1]; - if (args.length > 2) { - port = new Integer(args[2]).intValue(); - } - } - new I2PCat().runClient(args[0], host, port); - - } catch (DataFormatException e) { - e.printStackTrace(); - } - } - } - -} - - - diff --git a/apps/q/java/src/net/i2p/aum/I2PSocketHelper.java b/apps/q/java/src/net/i2p/aum/I2PSocketHelper.java deleted file mode 100644 index bc3570c5b..000000000 --- a/apps/q/java/src/net/i2p/aum/I2PSocketHelper.java +++ /dev/null @@ -1,15 +0,0 @@ - -package net.i2p.aum; - - -/** - * Class which wraps an I2PSocket object with convenient methods. - * Nothing presently implemented here. - */ - -public class I2PSocketHelper -{ - -} - - diff --git a/apps/q/java/src/net/i2p/aum/I2PTunnelXMLObject.java b/apps/q/java/src/net/i2p/aum/I2PTunnelXMLObject.java deleted file mode 100644 index 7fa823734..000000000 --- a/apps/q/java/src/net/i2p/aum/I2PTunnelXMLObject.java +++ /dev/null @@ -1,138 +0,0 @@ -package net.i2p.aum; - -import java.util.Hashtable; - -import net.i2p.i2ptunnel.I2PTunnelXMLWrapper; - -/** - * Defines the I2P tunnel management methods which will be - * exposed to XML-RPC clients - * Methods in this class are forwarded to an I2PTunnelXMLWrapper object - */ -public class I2PTunnelXMLObject -{ - protected I2PTunnelXMLWrapper tunmgr; - - /** - * Builds the interface object. You normally shouldn't have to - * instantiate this directly - leave it to I2PTunnelXMLServer - */ - public I2PTunnelXMLObject() - { - tunmgr = new I2PTunnelXMLWrapper(); - } - - /** - * Generates an I2P keypair, returning a dict with keys 'result' (usually 'ok'), - * priv' (private key as base64) and 'dest' (destination as base64) - */ - public Hashtable genkeys() - { - return tunmgr.xmlrpcGenkeys(); - } - - /** - * Get a list of active TCP tunnels currently being managed by this - * tunnel manager. - * @return a dict with keys 'status' (usually 'ok'), - * 'jobs' (a list of dicts representing each job, each with keys 'job' (int, job - * number), 'type' (string, 'server' or 'client'), port' (int, the port number). - * Also for server, keys 'host' (hostname, string) and 'ip' (IP address, string). - * For clients, key 'dest' (string, remote destination as base64). - */ - public Hashtable list() - { - return tunmgr.xmlrpcList(); - } - - /** - * Attempts to find I2P hostname in hosts.txt. - * @param hostname string, I2P hostname - * @return dict with keys 'status' ('ok' or 'fail'), - * and if successful lookup, 'dest' (base64 destination). - */ - public Hashtable lookup(String hostname) - { - return tunmgr.xmlrpcLookup(hostname); - } - - /** - * Attempt to open client tunnel - * @param port local port to listen on, int - * @param dest remote dest to tunnel to, base64 string - * @return dict with keys 'status' (string - 'ok' or 'fail'). - * If 'ok', also key 'result' with text output from tunnelmgr - */ - public Hashtable client(int port, String dest) - { - return tunmgr.xmlrpcClient(port, dest); - } - - /** - * Attempts to open server tunnel - * @param host TCP hostname of TCP server to tunnel to - * @param port number of TCP server - * @param key - base64 private key to receive I2P connections on - * @return dict with keys 'status' (string, 'ok' or 'fail'). - * if 'fail', also a key 'error' with explanatory text. - */ - public Hashtable server(String host, int port, String key) - { - return tunmgr.xmlrpcServer(host, port, key); - } - - /** - * Close an existing tunnel - * @param jobnum (int) job number of connection to close - * @return dict with keys 'status' (string, 'ok' or 'fail') - */ - public Hashtable close(int jobnum) - { - return tunmgr.xmlrpcClose(jobnum); - } - - /** - * Close an existing tunnel - * @param jobnum (string) job number of connection to close as string, - * 'all' to close all jobs. - * @return dict with keys 'status' (string, 'ok' or 'fail') - */ - public Hashtable close(String job) - { - return tunmgr.xmlrpcClose(job); - } - - /** - * Close zero or more tunnels matching given criteria - * @param criteria A dict containing zero or more of the keys: - * 'job' (job number), 'type' (string, 'server' or 'client'), - * 'host' (hostname), 'port' (port number), - * 'ip' (IP address), 'dest' (string, remote dest) - */ - public Hashtable close(Hashtable criteria) - { - return tunmgr.xmlrpcClose(criteria); - } - - /** - * simple method to help with debugging your client prog - * @param x an int - * @return x + 1 - */ - public int bar(int x) - { - System.out.println("foo invoked"); - return x + 1; - } - - /** - * as for bar(int), but returns zero if no arg given - */ - public int bar() - { - return bar(0); - } - -} - - diff --git a/apps/q/java/src/net/i2p/aum/I2PTunnelXMLServer.java b/apps/q/java/src/net/i2p/aum/I2PTunnelXMLServer.java deleted file mode 100644 index 7c9ebbd0d..000000000 --- a/apps/q/java/src/net/i2p/aum/I2PTunnelXMLServer.java +++ /dev/null @@ -1,63 +0,0 @@ - -package net.i2p.aum; - -import org.apache.xmlrpc.WebServer; - -/** - * Provides a means for programs in any language to dynamically manage - * their own I2P <-> TCP tunnels, via simple TCP XML-RPC function calls. - * This server is presently hardwired to listen on port 22322. - */ - -public class I2PTunnelXMLServer -{ - protected WebServer ws; - protected I2PTunnelXMLObject tunobj; - - public int port = 22322; - - // constructor - - public void _init() - { - ws = new WebServer(port); - tunobj = new I2PTunnelXMLObject(); - ws.addHandler("i2p.tunnel", tunobj); - - } - - - // default constructor - public I2PTunnelXMLServer() - { - super(); - _init(); - } - - // constructor which takes shell args - public I2PTunnelXMLServer(String args[]) - { - super(); - _init(); - } - - // run the server - public void run() - { - ws.start(); - System.out.println("I2PTunnel XML-RPC server listening on port "+port); - ws.run(); - - } - - public static void main(String args[]) - { - I2PTunnelXMLServer tun; - - tun = new I2PTunnelXMLServer(); - tun.run(); - } - -} - - diff --git a/apps/q/java/src/net/i2p/aum/I2PXmlRpcClient.java b/apps/q/java/src/net/i2p/aum/I2PXmlRpcClient.java deleted file mode 100644 index fdf650897..000000000 --- a/apps/q/java/src/net/i2p/aum/I2PXmlRpcClient.java +++ /dev/null @@ -1,65 +0,0 @@ - -package net.i2p.aum; - -import java.net.MalformedURLException; -import java.net.URL; - -import net.i2p.data.Destination; -import net.i2p.util.Log; - -import org.apache.xmlrpc.XmlRpcClient; - - -/** - * an object which is used to invoke methods on remote I2P XML-RPC - * servers. You should not instantiate these objects directly, but - * create them through - * {@link net.i2p.aum.I2PXmlRpcClientFactory#newClient(Destination) I2PXmlRpcClientFactory.newClient()} - * Note that this is really just a thin wrapper around XmlRpcClient, mostly for reasons - * of consistency with I2PXmlRpcServer[Factory]. - */ - -public class I2PXmlRpcClient extends XmlRpcClient -{ - public static boolean debug = false; - - protected static Log _log; - - /** - * Construct an I2P XML-RPC client with this URL. - * Note that you should not - * use this constructor directly - use I2PXmlRpcClientFactory.newClient() instead - */ - public I2PXmlRpcClient(URL url) - { - super(url); - _log = new Log("I2PXmlRpcClient"); - - } - - /** - * Construct a XML-RPC client for the URL represented by this String. - * Note that you should not - * use this constructor directly - use I2PXmlRpcClientFactory.newClient() instead - */ - public I2PXmlRpcClient(String url) throws MalformedURLException - { - super(url); - _log = new Log("I2PXmlRpcClientFactory"); - - } - - /** - * Construct a XML-RPC client for the specified hostname and port. - * Note that you should not - * use this constructor directly - use I2PXmlRpcClientFactory.newClient() instead - */ - public I2PXmlRpcClient(String hostname, int port) throws MalformedURLException - { - super(hostname, port); - _log = new Log("I2PXmlRpcClient"); - - } - -} - diff --git a/apps/q/java/src/net/i2p/aum/I2PXmlRpcClientFactory.java b/apps/q/java/src/net/i2p/aum/I2PXmlRpcClientFactory.java deleted file mode 100644 index 16edc157e..000000000 --- a/apps/q/java/src/net/i2p/aum/I2PXmlRpcClientFactory.java +++ /dev/null @@ -1,226 +0,0 @@ - -package net.i2p.aum; - -import java.net.MalformedURLException; -import java.net.URL; -import java.util.Properties; -import java.util.Vector; - -import net.i2p.data.DataFormatException; -import net.i2p.data.Destination; -import net.i2p.util.Log; - -import org.apache.xmlrpc.XmlRpcClient; - - -/** - * Creates I2P XML-RPC client objects, which you can use - * to issue XML-RPC function calls over I2P. - * Instantiating this class causes the vm-wide http proxy system - * properties to be set to the address of the I2P eepProxy host/port. - * I2PXmlRpcClient objects need to communicate with the I2P - * eepProxy. If your eepProxy is at the standard localhost:4444 address, - * you can use the default constructor. Otherwise, you can set this - * eepProxy address by either (1) passing eepProxy hostname/port to the - * constructor, or (2) running the jvm with 'eepproxy.tcp.host' and - * 'eepproxy.tcp.port' system properties set. Note that (1) takes precedence. - * Failure to set up EepProxy host/port correctly will result in an IOException - * when you invoke .execute() on your client objects. - * Invoke this class from your shell to see a demo - */ - -public class I2PXmlRpcClientFactory -{ - public static boolean debug = false; - - public static String _defaultEepHost = "127.0.0.1"; - public static int _defaultEepPort = 4444; - - protected static Log _log; - - /** - * Create an I2P XML-RPC client factory, and set it to create - * clients of a given class. - * @param clientClass a class to use when creating new clients - */ - public I2PXmlRpcClientFactory() - { - this(null, 0); - } - - /** - * Create an I2P XML-RPC client factory, and set it to create - * clients of a given class, and dispatch calls through a non-standard - * eepProxy. - * @param eepHost the eepProxy TCP hostname - * @param eepPort the eepProxy TCP port number - */ - public I2PXmlRpcClientFactory(String eepHost, int eepPort) - { - String eepPortStr; - - _log = new Log("I2PXmlRpcClientFactory"); - _log.shouldLog(Log.DEBUG); - - Properties p = System.getProperties(); - - // determine what actual eepproxy host/port we're using - if (eepHost == null) { - eepHost = p.getProperty("eepproxy.tcp.host", _defaultEepHost); - } - if (eepPort > 0) { - eepPortStr = String.valueOf(eepPort); - } - else { - eepPortStr = p.getProperty("eepproxy.tcp.port"); - if (eepPortStr == null) { - eepPortStr = String.valueOf(_defaultEepPort); - } - } - - p.put("proxySet", "true"); - p.put("http.proxyHost", eepHost); - p.put("http.proxyPort", eepPortStr); - } - - /** - * Create an I2P XML-RPC client object, which is subsequently used for - * dispatching XML-RPC requests. - * @param dest - an I2P destination object, comprising the - * destination of the remote - * I2P XML-RPC server. - * @return a new XmlRpcClient object (refer org.apache.xmlrpc.XmlRpcClient). - */ - public I2PXmlRpcClient newClient(Destination dest) throws MalformedURLException { - - return newClient(new URL("http", "i2p/"+dest.toBase64(), "/")); - } - - /** - * Create an I2P XML-RPC client object, which is subsequently used for - * dispatching XML-RPC requests. - * @param hostOrDest - an I2P hostname (listed in hosts.txt) or a - * destination base64 string, for the remote I2P XML-RPC server - * @return a new XmlRpcClient object (refer org.apache.xmlrpc.XmlRpcClient). - */ - public I2PXmlRpcClient newClient(String hostOrDest) - throws DataFormatException, MalformedURLException - { - String hostname; - URL u; - - try { - // try to make a dest out of the string - Destination dest = new Destination(); - dest.fromBase64(hostOrDest); - - // converted ok, treat as valid dest, form i2p/blahblah url from it - I2PXmlRpcClient client = newClient(new URL("http", "i2p/"+hostOrDest, "/")); - client.debug = debug; - return client; - - } catch (DataFormatException e) { - - if (debug) { - e.printStackTrace(); - print("hostOrDest length="+hostOrDest.length()); - } - - // failed to load up a dest, test length - if (hostOrDest.length() < 255) { - // short-ish, assume a hostname - u = new URL("http", hostOrDest, "/"); - I2PXmlRpcClient client = newClient(u); - client.debug = debug; - return client; - } - else { - // too long for a host, barf - throw new DataFormatException("Bad I2P hostname/dest:\n"+hostOrDest); - } - } - } - - /** - * Create an I2P XML-RPC client object, which is subsequently used for - * dispatching XML-RPC requests. This method is not recommended. - * @param u - a URL object, containing the URL of the remote - * I2P XML-RPC server, for example, "http://xmlrpc.aum.i2p" (assuming - * there's a hosts.txt entry for 'xmlrpc.aum.i2p'), or - * "http://i2p/base64destblahblah...". Note that if you use this method - * directly, the created XML-RPC client object will ONLY work if you - * instantiate the URL object as 'new URL("http", "i2p/"+host-or-dest, "/")'. - */ - protected I2PXmlRpcClient newClient(URL u) - { - Object [] args = { u }; - //return new I2PXmlRpcClient(u); - - // construct and return a client object of required class - return new I2PXmlRpcClient(u); - } - - /** - * Runs a demo of an I2P XML-RPC client. Assumes you have already - * launched an I2PXmlRpcServerFactory demo, because it gets its - * dest from the file 'demo.dest64' created by I2PXmlRpcServerFactory demo. - * - * Ensure you have first launched net.i2p.aum.I2PXmlRpcServerFactory - * from your command line. - */ - public static void main(String [] args) { - - String destStr; - - debug = true; - - try { - print("Creating client factory..."); - - I2PXmlRpcClientFactory f = new I2PXmlRpcClientFactory(); - - print("Creating new client..."); - - if (args.length == 0) { - print("Reading dest from demo.dest64"); - destStr = new SimpleFile("demo.dest64", "r").read(); - } - else { - destStr = args[0]; - } - - XmlRpcClient c = f.newClient(destStr); - - print("Invoking foo..."); - - Vector v = new Vector(); - v.add("one"); - v.add("two"); - - Object res = c.execute("foo.bar", v); - - print("Got back object: " + res); - - } catch (Exception e) { - e.printStackTrace(); - } - - } - /** - * Used for internal debugging - */ - protected static void print(String msg) - { - if (debug) { - System.out.println("I2PXmlRpcClient: " + msg); - - if (_log != null) { - System.out.println("LOGGING SOME SHIT"); - _log.debug(msg); - } - } - } -} - - - diff --git a/apps/q/java/src/net/i2p/aum/I2PXmlRpcDemoClass.java b/apps/q/java/src/net/i2p/aum/I2PXmlRpcDemoClass.java deleted file mode 100644 index a8de8e791..000000000 --- a/apps/q/java/src/net/i2p/aum/I2PXmlRpcDemoClass.java +++ /dev/null @@ -1,21 +0,0 @@ - -package net.i2p.aum; - - -/** - * A simple class providing callable xmlrpc server methods, gets linked in to - * the server demo. - */ -public class I2PXmlRpcDemoClass -{ - public int add1(int n) { - return n + 1; - } - - public String bar(String arg1, String arg2) { - System.out.println("Demo: got hit to bar: arg1='"+arg1+"', arg2='"+arg2+"'"); - return "I2P demo xmlrpc server(foo.bar): arg1='"+arg1+"', arg2='"+arg2+"'"; - } - -} - diff --git a/apps/q/java/src/net/i2p/aum/I2PXmlRpcServer.java b/apps/q/java/src/net/i2p/aum/I2PXmlRpcServer.java deleted file mode 100644 index 82854b00f..000000000 --- a/apps/q/java/src/net/i2p/aum/I2PXmlRpcServer.java +++ /dev/null @@ -1,433 +0,0 @@ -package net.i2p.aum; - -import java.io.ByteArrayInputStream; -import java.io.ByteArrayOutputStream; -import java.io.IOException; -import java.io.InputStream; -import java.io.OutputStreamWriter; -import java.util.Date; -import java.util.Properties; - -import net.i2p.I2PAppContext; -import net.i2p.I2PException; -import net.i2p.client.streaming.I2PServerSocket; -import net.i2p.client.streaming.I2PSocket; -import net.i2p.client.streaming.I2PSocketManager; -import net.i2p.client.streaming.I2PSocketManagerFactory; -import net.i2p.data.DataFormatException; -import net.i2p.data.Destination; - -import org.apache.xmlrpc.XmlRpcServer; - - -/** - * An XML-RPC server which works completely within I2P, listening - * on a dest for requests. - * You should not instantiate this class directly, but instead create - * an I2PXmlRpcServerFactory object, and use its .newServer() method - * to create a server object. - */ -public class I2PXmlRpcServer extends XmlRpcServer implements Runnable -{ - public class I2PXmlRpcServerWorkerThread extends Thread { - - I2PSocket _sock; - - public I2PXmlRpcServerWorkerThread(I2PSocket sock) { - _sock = sock; - } - - public void run() { - - try { - System.out.println("I2PXmlRpcServer.run: got inbound XML-RPC I2P conn"); - - log.info("run: Got client connection, creating streams"); - - InputStream socketIn = _sock.getInputStream(); - OutputStreamWriter socketOut = new OutputStreamWriter(_sock.getOutputStream()); - - log.info("run: reading http headers"); - - // read headers, determine size of req - int size = readHttpHeaders(socketIn); - - if (size <= 0) { - // bad news - log.info("read req failed, terminating session"); - _sock.close(); - return; - } - - log.info("run: reading request body of "+size+" bytes"); - - // get raw request body - byte [] reqBody = new byte[size]; - for (int i=0; i mimetypes - */ - -public class Mimetypes -{ - public static String [][] _map = { - - { ".bz2", "application/x-bzip2" }, - { ".csm", "application/cu-seeme" }, - { ".cu", "application/cu-seeme" }, - { ".tsp", "application/dsptype" }, - { ".xls", "application/excel" }, - { ".spl", "application/futuresplash" }, - { ".hqx", "application/mac-binhex40" }, - { ".doc", "application/msword" }, - { ".dot", "application/msword" }, - { ".bin", "application/octet-stream" }, - { ".oda", "application/oda" }, - { ".pdf", "application/pdf" }, - { ".asc", "application/pgp-keys" }, - { ".pgp", "application/pgp-signature" }, - { ".ps", "application/postscript" }, - { ".ai", "application/postscript" }, - { ".eps", "application/postscript" }, - { ".ppt", "application/powerpoint" }, - { ".rtf", "application/rtf" }, - { ".wp5", "application/wordperfect5.1" }, - { ".zip", "application/zip" }, - { ".wk", "application/x-123" }, - { ".bcpio", "application/x-bcpio" }, - { ".pgn", "application/x-chess-pgn" }, - { ".cpio", "application/x-cpio" }, - { ".deb", "application/x-debian-package" }, - { ".dcr", "application/x-director" }, - { ".dir", "application/x-director" }, - { ".dxr", "application/x-director" }, - { ".dvi", "application/x-dvi" }, - { ".pfa", "application/x-font" }, - { ".pfb", "application/x-font" }, - { ".gsf", "application/x-font" }, - { ".pcf", "application/x-font" }, - { ".pcf.Z", "application/x-font" }, - { ".gtar", "application/x-gtar" }, - { ".tgz", "application/x-gtar" }, - { ".hdf", "application/x-hdf" }, - { ".phtml", "application/x-httpd-php" }, - { ".pht", "application/x-httpd-php" }, - { ".php", "application/x-httpd-php" }, - { ".php3", "application/x-httpd-php3" }, - { ".phps", "application/x-httpd-php3-source" }, - { ".php3p", "application/x-httpd-php3-preprocessed" }, - { ".class", "application/x-java" }, - { ".latex", "application/x-latex" }, - { ".frm", "application/x-maker" }, - { ".maker", "application/x-maker" }, - { ".frame", "application/x-maker" }, - { ".fm", "application/x-maker" }, - { ".fb", "application/x-maker" }, - { ".book", "application/x-maker" }, - { ".fbdoc", "application/x-maker" }, - { ".mif", "application/x-mif" }, - { ".nc", "application/x-netcdf" }, - { ".cdf", "application/x-netcdf" }, - { ".pac", "application/x-ns-proxy-autoconfig" }, - { ".o", "application/x-object" }, - { ".pl", "application/x-perl" }, - { ".pm", "application/x-perl" }, - { ".shar", "application/x-shar" }, - { ".swf", "application/x-shockwave-flash" }, - { ".swfl", "application/x-shockwave-flash" }, - { ".sit", "application/x-stuffit" }, - { ".sv4cpio", "application/x-sv4cpio" }, - { ".sv4crc", "application/x-sv4crc" }, - { ".tar", "application/x-tar" }, - { ".gf", "application/x-tex-gf" }, - { ".pk", "application/x-tex-pk" }, - { ".PK", "application/x-tex-pk" }, - { ".texinfo", "application/x-texinfo" }, - { ".texi", "application/x-texinfo" }, - { ".~", "application/x-trash" }, - { ".%", "application/x-trash" }, - { ".bak", "application/x-trash" }, - { ".old", "application/x-trash" }, - { ".sik", "application/x-trash" }, - { ".t", "application/x-troff" }, - { ".tr", "application/x-troff" }, - { ".roff", "application/x-troff" }, - { ".man", "application/x-troff-man" }, - { ".me", "application/x-troff-me" }, - { ".ms", "application/x-troff-ms" }, - { ".ustar", "application/x-ustar" }, - { ".src", "application/x-wais-source" }, - { ".wz", "application/x-wingz" }, - { ".au", "audio/basic" }, - { ".snd", "audio/basic" }, - { ".mid", "audio/midi" }, - { ".midi", "audio/midi" }, - { ".mpga", "audio/mpeg" }, - { ".mpega", "audio/mpeg" }, - { ".mp2", "audio/mpeg" }, - { ".mp3", "audio/mpeg" }, - { ".m3u", "audio/mpegurl" }, - { ".aif", "audio/x-aiff" }, - { ".aiff", "audio/x-aiff" }, - { ".aifc", "audio/x-aiff" }, - { ".gsm", "audio/x-gsm" }, - { ".ra", "audio/x-pn-realaudio" }, - { ".rm", "audio/x-pn-realaudio" }, - { ".ram", "audio/x-pn-realaudio" }, - { ".rpm", "audio/x-pn-realaudio-plugin" }, - { ".wav", "audio/x-wav" }, - { ".gif", "image/gif" }, - { ".ief", "image/ief" }, - { ".jpeg", "image/jpeg" }, - { ".jpg", "image/jpeg" }, - { ".jpe", "image/jpeg" }, - { ".png", "image/png" }, - { ".tiff", "image/tiff" }, - { ".tif", "image/tiff" }, - { ".ras", "image/x-cmu-raster" }, - { ".bmp", "image/x-ms-bmp" }, - { ".pnm", "image/x-portable-anymap" }, - { ".pbm", "image/x-portable-bitmap" }, - { ".pgm", "image/x-portable-graymap" }, - { ".ppm", "image/x-portable-pixmap" }, - { ".rgb", "image/x-rgb" }, - { ".xbm", "image/x-xbitmap" }, - { ".xpm", "image/x-xpixmap" }, - { ".xwd", "image/x-xwindowdump" }, - { ".csv", "text/comma-separated-values" }, - { ".html", "text/html" }, - { ".htm", "text/html" }, - { ".mml", "text/mathml" }, - { ".txt", "text/plain" }, - { ".rtx", "text/richtext" }, - { ".tsv", "text/tab-separated-values" }, - { ".h++", "text/x-c++hdr" }, - { ".hpp", "text/x-c++hdr" }, - { ".hxx", "text/x-c++hdr" }, - { ".hh", "text/x-c++hdr" }, - { ".c++", "text/x-c++src" }, - { ".cpp", "text/x-c++src" }, - { ".cxx", "text/x-c++src" }, - { ".cc", "text/x-c++src" }, - { ".h", "text/x-chdr" }, - { ".csh", "text/x-csh" }, - { ".c", "text/x-csrc" }, - { ".java", "text/x-java" }, - { ".moc", "text/x-moc" }, - { ".p", "text/x-pascal" }, - { ".pas", "text/x-pascal" }, - { ".etx", "text/x-setext" }, - { ".sh", "text/x-sh" }, - { ".tcl", "text/x-tcl" }, - { ".tk", "text/x-tcl" }, - { ".tex", "text/x-tex" }, - { ".ltx", "text/x-tex" }, - { ".sty", "text/x-tex" }, - { ".cls", "text/x-tex" }, - { ".vcs", "text/x-vCalendar" }, - { ".vcf", "text/x-vCard" }, - { ".dl", "video/dl" }, - { ".fli", "video/fli" }, - { ".gl", "video/gl" }, - { ".mpeg", "video/mpeg" }, - { ".mpg", "video/mpeg" }, - { ".mpe", "video/mpeg" }, - { ".qt", "video/quicktime" }, - { ".mov", "video/quicktime" }, - { ".asf", "video/x-ms-asf" }, - { ".asx", "video/x-ms-asf" }, - { ".avi", "video/x-msvideo" }, - { ".movie", "video/x-sgi-movie" }, - { ".vrm", "x-world/x-vrml" }, - { ".vrml", "x-world/x-vrml" }, - { ".wrl", "x-world/x-vrml" }, - - }; - - /** - * Attempts to determine a mimetype - * @param path - either a file extension string (containing the - * leading '.') or a full file pathname (in which case, the extension - * will be extracted). - * @return the mimetype that corresponds to the file extension, if the - * file extension is known, or "application/octet-stream" if the - * file extension is not known. - */ - public static String guessType(String path) { - // rip the file extension from the path - // first - split 'directories', and get last part - String [] dirs = path.split("/"); - String filename = dirs[dirs.length-1]; - String [] bits = filename.split("\\."); - String extension = "." + bits[bits.length-1]; - - // default mimetype applied to unknown file extensions - String type = "application/octet-stream"; - - for (int i=0; i<_map.length; i++) { - String [] rec = _map[i]; - if (rec[0].equals(extension)) { - type = rec[1]; - break; - } - } - return type; - } - - /** - * Attempts to guess the file extension corresponding to a given - * mimetype. - * @param type a mimetype string - * @return a file extension commonly used for storing files of this type, - * or defaults to ".bin" if mimetype not known - */ - public static String guessExtension(String type) { - // default extension applied to unknown mimetype - String extension = ".bin"; - for (int i=0; i<_map.length; i++) { - String [] rec = _map[i]; - if (rec[1].equals(type)) { - extension = rec[0]; - break; - } - } - return extension; - } - -} - -/** - -suffix_map = { - '.tgz': '.tar.gz', - '.taz': '.tar.gz', - '.tz': '.tar.gz', - } - -encodings_map = { - '.gz': 'gzip', - '.Z': 'compress', - } - -# Before adding new types, make sure they are either registered with IANA, at -# http://www.isi.edu/in-notes/iana/assignments/media-types -# or extensions, i.e. using the x- prefix - -# If you add to these, please keep them sorted! -types_map = { - '.a' : 'application/octet-stream', - '.ai' : 'application/postscript', - '.aif' : 'audio/x-aiff', - '.aifc' : 'audio/x-aiff', - '.aiff' : 'audio/x-aiff', - '.au' : 'audio/basic', - '.avi' : 'video/x-msvideo', - '.bat' : 'text/plain', - '.bcpio' : 'application/x-bcpio', - '.bin' : 'application/octet-stream', - '.bmp' : 'image/x-ms-bmp', - '.c' : 'text/plain', - # Duplicates :( - '.cdf' : 'application/x-cdf', - '.cdf' : 'application/x-netcdf', - '.cpio' : 'application/x-cpio', - '.csh' : 'application/x-csh', - '.css' : 'text/css', - '.dll' : 'application/octet-stream', - '.doc' : 'application/msword', - '.dot' : 'application/msword', - '.dvi' : 'application/x-dvi', - '.eml' : 'message/rfc822', - '.eps' : 'application/postscript', - '.etx' : 'text/x-setext', - '.exe' : 'application/octet-stream', - '.gif' : 'image/gif', - '.gtar' : 'application/x-gtar', - '.h' : 'text/plain', - '.hdf' : 'application/x-hdf', - '.htm' : 'text/html', - '.html' : 'text/html', - '.ief' : 'image/ief', - '.jpe' : 'image/jpeg', - '.jpeg' : 'image/jpeg', - '.jpg' : 'image/jpeg', - '.js' : 'application/x-javascript', - '.ksh' : 'text/plain', - '.latex' : 'application/x-latex', - '.m1v' : 'video/mpeg', - '.man' : 'application/x-troff-man', - '.me' : 'application/x-troff-me', - '.mht' : 'message/rfc822', - '.mhtml' : 'message/rfc822', - '.mif' : 'application/x-mif', - '.mov' : 'video/quicktime', - '.movie' : 'video/x-sgi-movie', - '.mp2' : 'audio/mpeg', - '.mp3' : 'audio/mpeg', - '.mpa' : 'video/mpeg', - '.mpe' : 'video/mpeg', - '.mpeg' : 'video/mpeg', - '.mpg' : 'video/mpeg', - '.ms' : 'application/x-troff-ms', - '.nc' : 'application/x-netcdf', - '.nws' : 'message/rfc822', - '.o' : 'application/octet-stream', - '.obj' : 'application/octet-stream', - '.oda' : 'application/oda', - '.p12' : 'application/x-pkcs12', - '.p7c' : 'application/pkcs7-mime', - '.pbm' : 'image/x-portable-bitmap', - '.pdf' : 'application/pdf', - '.pfx' : 'application/x-pkcs12', - '.pgm' : 'image/x-portable-graymap', - '.pl' : 'text/plain', - '.png' : 'image/png', - '.pnm' : 'image/x-portable-anymap', - '.pot' : 'application/vnd.ms-powerpoint', - '.ppa' : 'application/vnd.ms-powerpoint', - '.ppm' : 'image/x-portable-pixmap', - '.pps' : 'application/vnd.ms-powerpoint', - '.ppt' : 'application/vnd.ms-powerpoint', - '.ps' : 'application/postscript', - '.pwz' : 'application/vnd.ms-powerpoint', - '.py' : 'text/x-python', - '.pyc' : 'application/x-python-code', - '.pyo' : 'application/x-python-code', - '.qt' : 'video/quicktime', - '.ra' : 'audio/x-pn-realaudio', - '.ram' : 'application/x-pn-realaudio', - '.ras' : 'image/x-cmu-raster', - '.rdf' : 'application/xml', - '.rgb' : 'image/x-rgb', - '.roff' : 'application/x-troff', - '.rtx' : 'text/richtext', - '.sgm' : 'text/x-sgml', - '.sgml' : 'text/x-sgml', - '.sh' : 'application/x-sh', - '.shar' : 'application/x-shar', - '.snd' : 'audio/basic', - '.so' : 'application/octet-stream', - '.src' : 'application/x-wais-source', - '.sv4cpio': 'application/x-sv4cpio', - '.sv4crc' : 'application/x-sv4crc', - '.swf' : 'application/x-shockwave-flash', - '.t' : 'application/x-troff', - '.tar' : 'application/x-tar', - '.tcl' : 'application/x-tcl', - '.tex' : 'application/x-tex', - '.texi' : 'application/x-texinfo', - '.texinfo': 'application/x-texinfo', - '.tif' : 'image/tiff', - '.tiff' : 'image/tiff', - '.tr' : 'application/x-troff', - '.tsv' : 'text/tab-separated-values', - '.txt' : 'text/plain', - '.ustar' : 'application/x-ustar', - '.vcf' : 'text/x-vcard', - '.wav' : 'audio/x-wav', - '.wiz' : 'application/msword', - '.xbm' : 'image/x-xbitmap', - '.xlb' : 'application/vnd.ms-excel', - # Duplicates :( - '.xls' : 'application/excel', - '.xls' : 'application/vnd.ms-excel', - '.xml' : 'text/xml', - '.xpm' : 'image/x-xpixmap', - '.xsl' : 'application/xml', - '.xwd' : 'image/x-xwindowdump', - '.zip' : 'application/zip', - } - -# These are non-standard types, commonly found in the wild. They will only -# match if strict=0 flag is given to the API methods. - -# Please sort these too -common_types = { - '.jpg' : 'image/jpg', - '.mid' : 'audio/midi', - '.midi': 'audio/midi', - '.pct' : 'image/pict', - '.pic' : 'image/pict', - '.pict': 'image/pict', - '.rtf' : 'application/rtf', - '.xul' : 'text/xul' - } -**/ - diff --git a/apps/q/java/src/net/i2p/aum/OOTest.java b/apps/q/java/src/net/i2p/aum/OOTest.java deleted file mode 100644 index f7a6fffed..000000000 --- a/apps/q/java/src/net/i2p/aum/OOTest.java +++ /dev/null @@ -1,18 +0,0 @@ -package net.i2p.aum; - - -public class OOTest -{ - public int add(int a, int b) - { - return (a + b); - } - - public static void main(String[] args) - { - OOTest mytest = new OOTest(); - System.out.println(mytest.add(3,3)); - } -} - - diff --git a/apps/q/java/src/net/i2p/aum/PrivDestination.java b/apps/q/java/src/net/i2p/aum/PrivDestination.java deleted file mode 100644 index 2d09bb0af..000000000 --- a/apps/q/java/src/net/i2p/aum/PrivDestination.java +++ /dev/null @@ -1,236 +0,0 @@ - -package net.i2p.aum; - -import java.io.ByteArrayInputStream; -import java.io.ByteArrayOutputStream; -import java.io.FileNotFoundException; -import java.io.IOException; - -import net.i2p.I2PException; -import net.i2p.client.I2PClient; -import net.i2p.client.I2PClientFactory; -import net.i2p.data.Base64; -import net.i2p.data.DataFormatException; -import net.i2p.data.DataStructureImpl; -import net.i2p.data.Destination; -import net.i2p.data.PrivateKey; -import net.i2p.data.PublicKey; -import net.i2p.data.SigningPrivateKey; -import net.i2p.data.SigningPublicKey; -import net.i2p.util.Log; - -/** - * A convenience class for encapsulating and manipulating I2P private keys - */ - -public class PrivDestination - //extends ByteArrayInputStream - extends DataStructureImpl -{ - protected byte [] _bytes; - - protected Destination _dest; - protected PrivateKey _privKey; - protected SigningPrivateKey _signingPrivKey; - - protected static Log _log; - - /** - * Create a PrivDestination object. - * In most cases, you'll probably want to skip this constructor, - * and create PrivDestination objects by invoking the desired static methods - * of this class. - * @param raw an array of bytes containing the raw binary private key - */ - public PrivDestination(byte [] raw) throws DataFormatException, IOException - { - //super(raw); - _log = new Log("PrivDestination"); - - _bytes = raw; - readBytes(getInputStream()); - } - - /** - * reconstitutes a PrivDestination from previously exported Base64 - */ - public PrivDestination(String b64) throws DataFormatException, IOException { - this(Base64.decode(b64)); - } - - /** - * generates a new PrivDestination with random keys - */ - public PrivDestination() throws I2PException, IOException - { - I2PClient client = I2PClientFactory.createClient(); - - ByteArrayOutputStream streamOut = new ByteArrayOutputStream(); - - // create a dest - client.createDestination(streamOut); - - _bytes = streamOut.toByteArray(); - readBytes(getInputStream()); - - // construct from the stream - //return new PrivDestination(streamOut.toByteArray()); - } - - /** return the public Destination object for this private dest */ - public Destination getDestination() { - return _dest; - } - - /** return a PublicKey (encryption public key) object for this priv dest */ - public PublicKey getPublicKey() { - return getDestination().getPublicKey(); - } - - /** return a PrivateKey (encryption private key) object for this priv dest */ - public PrivateKey getPrivateKey() { - return _privKey; - } - - /** return a SigningPublicKey object for this priv dest */ - public SigningPublicKey getSigningPublicKey() { - return getDestination().getSigningPublicKey(); - } - - /** return a SigningPrivateKey object for this priv dest */ - public SigningPrivateKey getSigningPrivateKey() { - return _signingPrivKey; - } - - // static methods returning an instance - - /** - * Creates a PrivDestination object - * @param base64 a string containing the base64 private key data - * @return a PrivDestination object encapsulating that key - */ - public static PrivDestination fromBase64String(String base64) - throws DataFormatException, IOException - { - return new PrivDestination(Base64.decode(base64)); - } - - /** - * Creates a PrivDestination object, from the base64 key data - * stored in a file. - * @param path the pathname of the file from which to read the base64 private key data - * @return a PrivDestination object encapsulating that key - */ - public static PrivDestination fromBase64File(String path) - throws FileNotFoundException, IOException, DataFormatException - { - return fromBase64String(new SimpleFile(path, "r").read()); - /* - File f = new File(path); - char [] rawchars = new char[(int)(f.length())]; - byte [] rawbytes = new byte[(int)(f.length())]; - FileReader fr = new FileReader(f); - fr.read(rawchars); - String raw64 = new String(rawchars); - return PrivDestination.fromBase64String(raw64); - */ - } - - /** - * Creates a PrivDestination object, from the binary key data - * stored in a file. - * @param path the pathname of the file from which to read the binary private key data - * @return a PrivDestination object encapsulating that key - */ - public static PrivDestination fromBinFile(String path) - throws FileNotFoundException, IOException, DataFormatException - { - byte [] raw = new SimpleFile(path, "r").readBytes(); - return new PrivDestination(raw); - } - - /** - * Generate a new random I2P private key - * @return a PrivDestination object encapsulating that key - */ - public static PrivDestination newKey() throws I2PException, IOException - { - return new PrivDestination(); - } - - public ByteArrayInputStream getInputStream() - { - return new ByteArrayInputStream(_bytes); - } - - /** - * Exports the key's full contents to a string - * @return A base64-format string containing the full contents - * of this private key. The string can be used in any subsequent - * call to the .fromBase64String static constructor method. - */ -/* - public String toBase64() - { - return Base64.encode(_bytes); - } -*/ - - /** - * Exports the key's full contents to a byte array - * @return A byte array containing the full contents - * of this private key. - */ -/* - public byte [] toBytes() - { - return _bytes; - } -*/ - - /** - * Converts this key to a public destination. - * @return a standard I2P Destination object containing the - * public portion of this private key. - */ - /* - public Destination toDestination() throws DataFormatException - { - Destination dest = new Destination(); - dest.readBytes(_bytes, 0); - return dest; - } - */ - - /** - * Converts this key to a base64 string representing a public destination - * @return a string containing a base64 representation of the destination - * corresponding to this private key. - */ - public String getDestinationBase64() throws DataFormatException - { - return getDestination().toBase64(); - } - - public void readBytes(java.io.InputStream strm) - throws net.i2p.data.DataFormatException, java.io.IOException - { - _dest = new Destination(); - _privKey = new PrivateKey(); - _signingPrivKey = new SigningPrivateKey(); - - _dest.readBytes(strm); - _privKey.readBytes(strm); - _signingPrivKey.readBytes(strm); - } - - public void writeBytes(java.io.OutputStream outputStream) - throws net.i2p.data.DataFormatException, java.io.IOException - { - _dest.writeBytes(outputStream); - _privKey.writeBytes(outputStream); - _signingPrivKey.writeBytes(outputStream); - } - -} - diff --git a/apps/q/java/src/net/i2p/aum/PropertiesFile.java b/apps/q/java/src/net/i2p/aum/PropertiesFile.java deleted file mode 100644 index 2d1891549..000000000 --- a/apps/q/java/src/net/i2p/aum/PropertiesFile.java +++ /dev/null @@ -1,217 +0,0 @@ -/* - * PropertiesFile.java - * - * Created on 20 March 2005, 19:30 - */ - -package net.i2p.aum; - -import java.io.File; -import java.io.FileInputStream; -import java.io.FileNotFoundException; -import java.io.FileOutputStream; -import java.io.IOException; -import java.io.InputStream; -import java.io.OutputStream; -import java.util.Enumeration; -import java.util.Hashtable; -import java.util.NoSuchElementException; -import java.util.Properties; - -/** - * builds on Properties with methods to load/save directly to/from file - */ -public class PropertiesFile extends Properties { - - public String _path; - public File _file; - public boolean _fileExists; - - /** - * Creates a new instance of PropertiesFile - * @param path Absolute pathname of file where properties are to be stored - */ - public PropertiesFile(String path) throws IOException { - super(); - _path = path; - _file = new File(path); - _fileExists = _file.isFile(); - - if (_file.canRead()) { - loadFromFile(); - } - } - - /** - * Creates new PropertiesFile, updating its content with the - * keys/values in given hashtable - * @param path absolute pathname where properties file is located in filesystem - * @param h instance of Hashtable (or subclass). its content - * will be written to this object (note that string representations of keys/vals - * will be used) - */ - public PropertiesFile(String path, Hashtable h) throws IOException - { - this(path); - Enumeration keys = h.keys(); - Object key; - while (true) - { - try { - key = keys.nextElement(); - } catch (NoSuchElementException e) { - break; - } - setProperty(key.toString(), h.get(key).toString()); - } - } - - /** - * Loads this object from the file - */ - public void loadFromFile() throws IOException, FileNotFoundException { - if (_file.canRead()) { - InputStream fis = new FileInputStream(_file); - load(fis); - } - } - - /** - * Saves this object to the file - */ - public void saveToFile() throws IOException, FileNotFoundException { - - if (!_fileExists) { - _file.createNewFile(); - _fileExists = true; - } - OutputStream fos = new FileOutputStream(_file); - store(fos, null); - } - - /** - * Stores attribute - */ - public Object setProperty(String key, String value) { - Object o = super.setProperty(key, value); - try { - saveToFile(); - } catch (Exception e) { - e.printStackTrace(); - } - return o; - } - - /** - * return a property as an int, fall back on default if not found or invalid - */ - public int getIntProperty(String key, int dflt) { - try { - return new Integer((String)getProperty(key)).intValue(); - } catch (Exception e) { - setIntProperty(key, dflt); - return dflt; - } - } - - /** - * return a property as an int - */ - public int getIntProperty(String key) { - return new Integer((String)getProperty(key)).intValue(); - } - - /** - * set a property as an int - */ - public void setIntProperty(String key, int value) { - setProperty(key, String.valueOf(value)); - } - - /** - * return a property as a long, fall back on default if not found or invalid - */ - public long getIntProperty(String key, long dflt) { - try { - return new Long((String)getProperty(key)).longValue(); - } catch (Exception e) { - setLongProperty(key, dflt); - return dflt; - } - } - - /** - * return a property as an int - */ - public long getLongProperty(String key) { - return new Long((String)getProperty(key)).longValue(); - } - - /** - * set a property as an int - */ - public void setLongProperty(String key, long value) { - setProperty(key, String.valueOf(value)); - } - - /** - * return a property as a float - */ - public double getFloatProperty(String key) { - return new Float((String)getProperty(key)).floatValue(); - } - - /** - * return a property as a float, fall back on default if not found or invalid - */ - public double getFloatProperty(String key, float dflt) { - try { - return new Float((String)getProperty(key)).floatValue(); - } catch (Exception e) { - setFloatProperty(key, dflt); - return dflt; - } - } - - /** - * set a property as a float - */ - public void setFloatProperty(String key, float value) { - setProperty(key, String.valueOf(value)); - } - - /** - * return a property as a double - */ - public double getDoubleProperty(String key) { - return new Double((String)getProperty(key)).doubleValue(); - } - - /** - * return a property as a double, fall back on default if not found - */ - public double getDoubleProperty(String key, double dflt) { - try { - return new Double((String)getProperty(key)).doubleValue(); - } catch (Exception e) { - setDoubleProperty(key, dflt); - return dflt; - } - } - - /** - * set a property as a double - */ - public void setDoubleProperty(String key, double value) { - setProperty(key, String.valueOf(value)); - } - - /** - * increment an integer property value - */ - public void incrementIntProperty(String key) { - setIntProperty(key, getIntProperty(key)+1); - } - -} - diff --git a/apps/q/java/src/net/i2p/aum/SimpleFile.java b/apps/q/java/src/net/i2p/aum/SimpleFile.java deleted file mode 100644 index a6f0438de..000000000 --- a/apps/q/java/src/net/i2p/aum/SimpleFile.java +++ /dev/null @@ -1,118 +0,0 @@ -package net.i2p.aum; - -import java.io.File; -import java.io.FileNotFoundException; -import java.io.IOException; -import java.io.RandomAccessFile; - -/** - * SimpleFile - subclass of File which adds some python-like - * methods. Cuts out a lot of the red tape involved with reading - * from and writing to files - */ -public class SimpleFile { - - public RandomAccessFile _file; - public String _path; - - public SimpleFile(String path, String mode) throws FileNotFoundException { - - _path = path; - _file = new RandomAccessFile(path, mode); - } - - public byte [] readBytes() throws IOException { - return readBytes((int)_file.length()); - } - - public byte[] readBytes(int n) throws IOException { - byte [] buf = new byte[n]; - _file.readFully(buf); - return buf; - } - - public char [] readChars() throws IOException { - return readChars((int)_file.length()); - } - - public char[] readChars(int n) throws IOException { - char [] buf = new char[n]; - //_file.readFully(buf); - return buf; - } - - /** - * Reads all remaining content from the file - * @return the content as a String - * @throws IOException - */ - public String read() throws IOException { - - return read((int)_file.length()); - } - - /** - * Reads one or more bytes of data from the file - * @return the content as a String - * @throws IOException - */ - public String read(int nbytes) throws IOException { - - return new String(readBytes(nbytes)); - } - - /** - * Writes one or more bytes of data to a file - * @param buf a String containing the data to write - * @return the number of bytes written, as an int - * @throws IOException - */ - public int write(String buf) throws IOException { - - return write(buf.getBytes()); - } - - public int write(byte [] buf) throws IOException { - - _file.write(buf); - return buf.length; - } - - /** - * convenient one-hit write - * @param path pathname of file to write to - * @param buf data to write - */ - public static int write(String path, String buf) throws IOException { - return new SimpleFile(path, "rws").write(buf); - } - - /** - * tests if argument refers to an actual file - * @param path pathname to test - * @return true if a file, false if not - */ - public boolean isFile() { - return new File(_path).isFile(); - } - - /** - * tests if argument refers to a directory - * @param path pathname to test - * @return true if a directory, false if not - */ - public boolean isDir() { - return new File(_path).isDirectory(); - } - - /** - * tests if a file or directory exists - * @param path pathname to test - * @return true if exists, or false - */ - public boolean exists() { - return new File(_path).exists(); - } - -} - diff --git a/apps/q/java/src/net/i2p/aum/SimpleFile_old.java b/apps/q/java/src/net/i2p/aum/SimpleFile_old.java deleted file mode 100644 index ac74b58ac..000000000 --- a/apps/q/java/src/net/i2p/aum/SimpleFile_old.java +++ /dev/null @@ -1,121 +0,0 @@ -package net.i2p.aum; - -import java.io.File; -import java.io.FileReader; -import java.io.FileWriter; -import java.io.IOException; - -/** - * SimpleFile - subclass of File which adds some python-like - * methods. Cuts out a lot of the red tape involved with reading - * from and writing to files - */ -public class SimpleFile_old extends File { - - public FileReader _reader; - public FileWriter _writer; - - public SimpleFile_old(String path) { - - super(path); - - _reader = null; - _writer = null; - } - - /** - * Reads all remaining content from the file - * @return the content as a String - * @throws IOException - */ - public String read() throws IOException { - - return read((int)length()); - } - - /** - * Reads one or more bytes of data from the file - * @return the content as a String - * @throws IOException - */ - public String read(int nbytes) throws IOException { - - // get a reader, if we don't already have one - if (_reader == null) { - _reader = new FileReader(this); - } - - char [] cbuf = new char[nbytes]; - - int nread = _reader.read(cbuf); - - if (nread == 0) { - return ""; - } - - return new String(cbuf, 0, nread); - - } - - /** - * Writes one or more bytes of data to a file - * @param buf a String containing the data to write - * @return the number of bytes written, as an int - * @throws IOException - */ - public int write(String buf) throws IOException { - - // get a reader, if we don't already have one - if (_writer == null) { - _writer = new FileWriter(this); - } - - _writer.write(buf); - _writer.flush(); - return buf.length(); - } - - public int write(byte [] buf) throws IOException { - - return write(new String(buf)); - } - - /** - * convenient one-hit write - * @param path pathname of file to write to - * @param buf data to write - */ - public static int write(String path, String buf) throws IOException { - SimpleFile_old f = new SimpleFile_old(path); - return f.write(buf); - } - - /** - * tests if argument refers to an actual file - * @param path pathname to test - * @return true if a file, false if not - */ - public static boolean isFile(String path) { - return new File(path).isFile(); - } - - /** - * tests if argument refers to a directory - * @param path pathname to test - * @return true if a directory, false if not - */ - public static boolean isDir(String path) { - return new File(path).isDirectory(); - } - - /** - * tests if a file or directory exists - * @param path pathname to test - * @return true if exists, or false - */ - public static boolean exists(String path) { - return new File(path).exists(); - } - -} - diff --git a/apps/q/java/src/net/i2p/aum/SimpleQueue.java b/apps/q/java/src/net/i2p/aum/SimpleQueue.java deleted file mode 100644 index 15da498ec..000000000 --- a/apps/q/java/src/net/i2p/aum/SimpleQueue.java +++ /dev/null @@ -1,136 +0,0 @@ -/* - * SimpleQueue.java - * - * Created on March 24, 2005, 11:14 PM - */ - -package net.i2p.aum; - -import java.util.Vector; - -/** - * Implements simething similar to python's 'Queue' class - */ -public class SimpleQueue { - - public Vector items; - - /** Creates a new instance of SimpleQueue */ - public SimpleQueue() { - items = new Vector(); - } - - /** - * fetches the item at head of queue, blocking if queue is empty - */ - public synchronized Object get() - { - while (true) - { - try { - if (items.size() == 0) - wait(); - - // someone has added - Object item = items.get(0); - items.remove(0); - return item; - } catch (Exception e) { - e.printStackTrace(); - } - } - } - - /** - * adds a new object to the queue - */ - public synchronized void put(Object item) - { - items.addElement(item); - notify(); - } - - private static class TestThread extends Thread { - - String id; - - SimpleQueue q; - - public TestThread(String id, SimpleQueue q) { - this.id = id; - this.q = q; - } - - public void run() { - try { - print("waiting for queue"); - - Object item = q.get(); - - print("got item: '"+item+"'"); - - } catch (Exception e) { - e.printStackTrace(); - return; - } - } - - public void print(String msg) { - System.out.println("thread '"+id+"': "+msg); - } - - } - - /** - * @param args the command line arguments - */ - public static void main(String[] args) { - - int i; - int nthreads = 7; - - Thread [] threads = new Thread[nthreads]; - - SimpleQueue q = new SimpleQueue(); - - // populate the queue with some stuff - q.put("red"); - q.put("orange"); - q.put("yellow"); - - // populate threads array - for (i = 0; i < nthreads; i++) { - threads[i] = new TestThread("thread"+i, q); - } - - // and launch the threads - for (i = 0; i < nthreads; i++) { - threads[i].start(); - } - - try { - Thread.sleep(3000); - } catch (Exception e) { - e.printStackTrace(); - return; - } - - // wait a bit and see what happens - String [] items = {"green", "blue", "indigo", "violet", "black", "white", "brown"}; - for (i = 0; i < items.length; i++) { - String item = items[i]; - System.out.println("main: adding '"+item+"'..."); - q.put(item); - try { - Thread.sleep(3000); - } catch (Exception e) { - e.printStackTrace(); - return; - } - } - - System.out.println("main: terminating"); - - } - -} diff --git a/apps/q/java/src/net/i2p/aum/SimpleSemaphore.java b/apps/q/java/src/net/i2p/aum/SimpleSemaphore.java deleted file mode 100644 index 83a736acd..000000000 --- a/apps/q/java/src/net/i2p/aum/SimpleSemaphore.java +++ /dev/null @@ -1,108 +0,0 @@ -/* - * SimpleSemaphore.java - * - * Created on March 24, 2005, 11:51 PM - */ - -package net.i2p.aum; - -/** - * Simple implementation of semaphores - */ -public class SimpleSemaphore { - - protected int count; - - /** Creates a new instance of SimpleSemaphore */ - public SimpleSemaphore(int size) { - count = size; - } - - public synchronized void acquire() throws InterruptedException - { - if (count == 0) - { - wait(); - } - count -= 1; - } - - public synchronized void release() - { - count += 1; - notify(); - } - - private static class TestThread extends Thread - { - String id; - SimpleSemaphore sem; - - public TestThread(String id, SimpleSemaphore sem) - { - this.id = id; - this.sem = sem; - } - - public void run() - { - try { - print("waiting for semaphore"); - sem.acquire(); - - print("got semaphore"); - - Thread.sleep(1000); - - print("releasing semaphore"); - - sem.release(); - - print("terminating"); - - } catch (Exception e) { - e.printStackTrace(); - return; - } - } - - public void print(String msg) { - System.out.println("thread '"+id+"': "+msg); - } - } - - /** - * @param args the command line arguments - */ - public static void main(String[] args) { - - int i; - - Thread [] threads = new Thread[10]; - - SimpleSemaphore sem = new SimpleSemaphore(3); - - // populate threads array - for (i = 0; i < 10; i++) { - threads[i] = new TestThread("thread"+i, sem); - } - - // and launch the threads - for (i = 0; i < 10; i++) { - threads[i].start(); - } - - // wait a bit and see what happens - System.out.println("main: threads launched, waiting 20 secs"); - - try { - Thread.sleep(20000); - } catch (Exception e) { - e.printStackTrace(); - } - - System.out.println("main: terminating"); - - } - -} diff --git a/apps/q/java/src/net/i2p/aum/helloworld.java b/apps/q/java/src/net/i2p/aum/helloworld.java deleted file mode 100644 index 2a8ce30e9..000000000 --- a/apps/q/java/src/net/i2p/aum/helloworld.java +++ /dev/null @@ -1,17 +0,0 @@ - -public class helloworld -{ - public static void main(String [] args) - { - helloworld h = new helloworld(); - h.greet(); - } - - public void greet() - { - System.out.println("Hi, this is your greeting"); - } -} - - - diff --git a/apps/q/java/src/net/i2p/aum/http/HtmlPage.java b/apps/q/java/src/net/i2p/aum/http/HtmlPage.java deleted file mode 100644 index 67ede7207..000000000 --- a/apps/q/java/src/net/i2p/aum/http/HtmlPage.java +++ /dev/null @@ -1,72 +0,0 @@ -/* - * HtmlPage.java - * - * Created on April 8, 2005, 8:22 PM - */ - -package net.i2p.aum.http; - -import java.util.Enumeration; - -import net.i2p.aum.DupHashtable; - -/** - * Framework for building up a page of HTML by method calls alone, breaking - * every design rule by enmeshing content, presentation and logic - */ -public class HtmlPage { - - public String dtd = ""; - - public Tag page; - public Tag head; - public Tag body; - DupHashtable cssSettings; - - /** Creates a new HtmlPage object */ - public HtmlPage() { - page = new Tag("html"); - head = new Tag(page, "head"); - body = new Tag(page, "body"); - cssSettings = new DupHashtable(); - } - - /** renders out the whole page into a single string */ - public String toString() { - - // embed stylesheet, if non-empty - if (cssSettings.size() > 0) { - Tag t1 = head.nest("style type=\"text/css\""); - t1.raw("\n"); - Enumeration elems = cssSettings.keys(); - while (elems.hasMoreElements()) { - String name = (String)elems.nextElement(); - cssTag.raw(name + " { "); - Enumeration items = cssSettings.get(name).elements(); - while (items.hasMoreElements()) { - String item = (String)items.nextElement(); - cssTag.raw(item+";"); - } - cssTag.raw(" }\n"); - } - } - - // now render out the whole page - return dtd + "\n" + page; - } - - /** adds a setting to the page's embedded stylesheet */ - public HtmlPage css(String tag, String item, String val) { - return css(tag, item+":"+val); - } - - /** adds a setting to the page's embedded stylesheet */ - public HtmlPage css(String tag, String setting) { - cssSettings.put(tag, setting); - return this; - } -} diff --git a/apps/q/java/src/net/i2p/aum/http/I2PHttpRequestHandler.java b/apps/q/java/src/net/i2p/aum/http/I2PHttpRequestHandler.java deleted file mode 100644 index 5ab1911be..000000000 --- a/apps/q/java/src/net/i2p/aum/http/I2PHttpRequestHandler.java +++ /dev/null @@ -1,47 +0,0 @@ -/* - * I2PHttpRequestHandler.java - * - * Created on April 8, 2005, 11:57 PM - */ - -package net.i2p.aum.http; - -import java.io.IOException; -import java.io.InputStream; -import java.io.OutputStream; -import java.net.Socket; - -import net.i2p.client.streaming.I2PSocket; - -/** - * - * @author david - */ -public abstract class I2PHttpRequestHandler extends MiniHttpRequestHandler -{ - /** Creates a new instance of I2PHttpRequestHandler */ - public I2PHttpRequestHandler(MiniHttpServer server, Object sock, Object arg) - throws Exception - { - super(server, sock, arg); - } - - /** Extracts a readable InputStream from own socket */ - public InputStream getInputStream() throws IOException { - try { - return ((I2PSocket)socket).getInputStream(); - } catch (Exception e) { - return ((Socket)socket).getInputStream(); - } - } - - /** Extracts a writeable OutputStream from own socket */ - public OutputStream getOutputStream() throws IOException { - try { - return ((I2PSocket)socket).getOutputStream(); - } catch (Exception e) { - return ((Socket)socket).getOutputStream(); - } - } - -} diff --git a/apps/q/java/src/net/i2p/aum/http/I2PHttpServer.java b/apps/q/java/src/net/i2p/aum/http/I2PHttpServer.java deleted file mode 100644 index 0e693dac0..000000000 --- a/apps/q/java/src/net/i2p/aum/http/I2PHttpServer.java +++ /dev/null @@ -1,120 +0,0 @@ -/* - * I2PHttpServer.java - * - * Created on April 8, 2005, 11:39 PM - */ - -package net.i2p.aum.http; - -import java.io.IOException; -import java.util.Properties; - -import net.i2p.I2PException; -import net.i2p.aum.PrivDestination; -import net.i2p.client.streaming.I2PServerSocket; -import net.i2p.client.streaming.I2PSocket; -import net.i2p.client.streaming.I2PSocketManager; -import net.i2p.client.streaming.I2PSocketManagerFactory; -import net.i2p.data.DataFormatException; - -/** - * - * @author david - */ -public class I2PHttpServer extends MiniHttpServer { - - PrivDestination privKey; - I2PSocketManager socketMgr; - - public I2PHttpServer(PrivDestination key) - throws DataFormatException, IOException, I2PException - { - this(key, I2PHttpRequestHandler.class, null, null); - } - - public I2PHttpServer(PrivDestination key, Class hdlrClass) - throws DataFormatException, IOException, I2PException - { - this(key, hdlrClass, null, null); - } - - public I2PHttpServer(PrivDestination key, Class hdlrClass, Properties props) - throws DataFormatException, IOException, I2PException - { - this(key, hdlrClass, null, props); - } - - /** Creates a new instance of I2PHttpServer */ - public I2PHttpServer(PrivDestination key, Class hdlrClass, Object hdlrArg, Properties props) - throws DataFormatException, IOException, I2PException - { - super(hdlrClass, hdlrArg); - - if (key != null) { - privKey = key; - } else { - privKey = new PrivDestination(); - } - - // get a socket manager - // socketManager = I2PSocketManagerFactory.createManager(key); - if (props == null) { - socketMgr = I2PSocketManagerFactory.createManager(privKey.getInputStream()); - } else { - socketMgr = I2PSocketManagerFactory.createManager(privKey.getInputStream(), props); - } - - if (socketMgr == null) { - throw new I2PException("I2PHttpServer: Failed to create socketManager"); - } - - String d = privKey.getDestination().toBase64(); - System.out.println("Server: getting server socket for dest "+d); - - // get a server socket - //serverSocket = socketManager.getServerSocket(); - } - - public void getServerSocket() throws IOException { - - I2PServerSocket sock; - sock = socketMgr.getServerSocket(); - serverSocket = sock; - System.out.println("listening on dest: "+privKey.getDestination().toBase64()); - } - - /** - * Listens on our 'serverSocket' object for an incoming connection, - * and returns a connected socket object. You should override this - * if you're using non-standard socket objects - */ - public Object acceptConnection() throws IOException { - - I2PSocket sock; - - try { - sock = ((I2PServerSocket)serverSocket).accept(); - } catch (I2PException e) { - throw new IOException(e.toString()); - } - - System.out.println("Got connection from: "+sock.getPeerDestination().toBase64()); - - //System.out.println("New connection accepted" + - // sock.getInetAddress() + - // ":" + sock.getPort()); - return sock; - } - - public static void main(String [] args) { - try { - System.out.println("I2PHttpServer: starting up with new random key"); - I2PHttpServer server = new I2PHttpServer((PrivDestination)null); - System.out.println("I2PHttpServer: running server"); - server.run(); - } catch (Exception e) { - e.printStackTrace(); - } - } -} - diff --git a/apps/q/java/src/net/i2p/aum/http/MiniDemoXmlRpcHandler.java b/apps/q/java/src/net/i2p/aum/http/MiniDemoXmlRpcHandler.java deleted file mode 100644 index f75dfa94e..000000000 --- a/apps/q/java/src/net/i2p/aum/http/MiniDemoXmlRpcHandler.java +++ /dev/null @@ -1,22 +0,0 @@ -/* - * MiniDemoXmlRpcHandler.java - * - * Created on April 13, 2005, 3:20 PM - */ - -package net.i2p.aum.http; - - -public class MiniDemoXmlRpcHandler { - - MiniHttpServer server; - - public MiniDemoXmlRpcHandler(MiniHttpServer server) { - this.server = server; - } - - public String bar(String arg) { - return "bar: got '"+arg+"'"; - } -} - diff --git a/apps/q/java/src/net/i2p/aum/http/MiniHttpRequestHandler.java b/apps/q/java/src/net/i2p/aum/http/MiniHttpRequestHandler.java deleted file mode 100644 index 42317a39a..000000000 --- a/apps/q/java/src/net/i2p/aum/http/MiniHttpRequestHandler.java +++ /dev/null @@ -1,574 +0,0 @@ -/* - * MiniHttpRequestHandler.java - * Adapted from pont.net's httpRequestHandler (httpServer.java) - * - * Created on April 8, 2005, 3:15 PM - */ - -package net.i2p.aum.http; - -import java.io.BufferedReader; -import java.io.ByteArrayInputStream; -import java.io.IOException; -import java.io.InputStream; -import java.io.InputStreamReader; -import java.io.OutputStream; -import java.net.Socket; -import java.net.URLDecoder; -import java.util.Enumeration; -import java.util.Vector; - -import net.i2p.aum.DupHashtable; -import net.i2p.aum.Mimetypes; - -public abstract class MiniHttpRequestHandler implements Runnable { - final static String CRLF = "\r\n"; - - /** server which created this handler */ - protected MiniHttpServer server; - - /** socket through which client is connected to us */ - protected Object socket; - - /** stored constructor arg */ - protected Object serverArg; - - /** input sent from client in request */ - protected InputStream input; - - /** we use this to read from client */ - protected BufferedReader br; - - /** output sent to client in reply */ - protected OutputStream output; - - /** http request type - GET, POST etc */ - protected String reqType; - - /** the request pathname */ - protected String reqFile; - - /** the request protocol (eg 'HTTP/1.0') */ - protected String reqProto; - - /** http headers */ - protected DupHashtable headerVars; - - /** variable settings from POST data */ - public DupHashtable postVars; - - /** variable settings from URL (?name1=val1&name2=val2...) */ - public DupHashtable urlVars; - - /** consolidated variable settings from URL or POST data */ - public DupHashtable allVars; - - /** first line of response we send back to client, set this - * with 'setStatus' - */ - private String status = "HTTP/1.0 200 OK"; - private String contentType = "text/plain"; - private String reqContentType = null; - protected String serverName = "aum's MiniHttpServer"; - - protected byte [] rawContentBytes = null; - - /** - * raw data sent by client in post req - */ - protected char [] postData; - - /** if a POST, this holds the full POST data as a string */ - public String postDataStr; - - // Constructors - public MiniHttpRequestHandler(MiniHttpServer server, Object socket) throws Exception { - this(server, socket, null); - } - - public MiniHttpRequestHandler(MiniHttpServer server, Object socket, Object arg) throws Exception { - this.server = server; - this.socket = socket; - this.serverArg = arg; - this.input = getInputStream(); - this.output = getOutputStream(); - this.br = new BufferedReader(new InputStreamReader(input)); - } - - // ------------------------------------------- - // START OF OVERRIDEABLES - // ------------------------------------------- - - // override these methods in subclass if your socket-type thang is not - // a genuine Socket objct - - /** Extracts a readable InputStream from own socket */ - public InputStream getInputStream() throws IOException { - return ((Socket)socket).getInputStream(); - } - - /** Extracts a writeable OutputStream from own socket */ - public OutputStream getOutputStream() throws IOException { - return ((Socket)socket).getOutputStream(); - } - - /** closes the socket (or our socket-ish object) */ - public void closeSocket() throws IOException { - ((Socket)socket).close(); - } - - /** method which gets called upon receipt of a GET. - * You should override this - */ - public abstract void on_GET() throws Exception; - - /** method which gets called upon receipt of a POST. - * You should override this - */ - public abstract void on_POST() throws Exception; - - // ------------------------------------------- - // END OF OVERRIDEABLES - // ------------------------------------------- - - /** Sets the HTTP status line (default 'HTTP/1.0 200 OK') */ - public void setStatus(String status) { - this.status = status; - } - - /** Sets the Content=Type header (default "text/plain") */ - public void setContentType(String contentType) { - this.contentType = contentType; - } - - /** Sets the 'Server' header (default "aum's MiniHttpServer") */ - public void setServer(String serverType) { - this.serverName = serverType; - } - - /** Sets the full body of raw output to be written, replacing - * the generated html tags - */ - public void setRawOutput(String raw) { - setRawOutput(raw.getBytes()); - } - - /** Sets the full body of raw output to be written, replacing - * the generated html tags - */ - public void setRawOutput(byte [] raw) { - rawContentBytes = raw; - } - - /** writes a String to output - normally you shouldn't need to call - * this directly - */ - public void write(String raw) { - write(raw.getBytes()); - } - - /** writes a byte array to output - normally you shouldn't need to call - * this directly - */ - public void write(byte [] raw) { - try { - output.write(raw); - } catch (Exception e) { - System.out.print(e); - } - } - - /** processes the request, sends back response */ - public void run() { - try { - processRequest(); - } - catch(Exception e) { - e.printStackTrace(); - System.out.println(e); - } - } - - /** does all the work of processing the request */ - protected void processRequest() throws Exception { - - headerVars = new DupHashtable(); - urlVars = new DupHashtable(); - postVars = new DupHashtable(); - allVars = new DupHashtable(); - - String line; - - // basic parsing of first req line - String reqLine = br.readLine(); - printReq(reqLine); - String [] reqBits = reqLine.split("\\s+", 3); - reqType = reqBits[0]; - String [] reqFileBits = reqBits[1].split("[?]", 2); - reqFile = reqFileBits[0]; - - // check for URL variables - if (reqFileBits.length > 1) { - urlVars = parseVars(reqFileBits[1]); - } - - // extract the 'request protocol', default to HTTP/1.0 - try { - reqProto = reqBits[2]; - } catch (Exception e) { - // workaround eepproxy bug - reqFile = "/"; - reqProto = "HTTP/1.0"; - } - - // suck the headers - while (true) { - line = br.readLine(); - //System.out.println("Got header line: "+line); - if (line.equals("")) { - break; - } - String [] lineBits = line.split(":\\s+", 2); - headerVars.put(lineBits[0], lineBits[1]); - } - //br.close(); - - // GET is simple, all the work is already done - if (reqType.equals("GET")) { - on_GET(); - } - - // POST is more involved - need to read POST data and - // break it up into fields - else if (reqType.equals("POST")) { - int postLen; - String postLenStr; - try { - reqContentType = headerVars.get("Content-Type", 0, ""); - - try { - postLenStr = headerVars.get("Content-Length", 0); - } catch (Exception e) { - // damn opera - postLenStr = headerVars.get("Content-length", 0); - } - - postLen = new Integer(postLenStr).intValue(); - postData = new char[postLen]; - - //System.out.println("postLen="+postLen); - for (int i=0; i"; - } - - if (tagBits.length > 1) { - attribs.addElement(tagBits[1]); - } - - breakBefore = nlOnOpen.contains(open); - breakAfter = breakBefore || nlOnClose.contains(open); - } - - // ----------------------------------------------------- - // METHODS FOR ADDING SPECIFIC HTML TAGS - // ----------------------------------------------------- - - /** insert a <br> on the fly */ - public Tag br() { - return add("br/"); - } - - /** insert a <hr> on the fly */ - public Tag hr() { - return add("hr/"); - } - - public Tag center() { - return nest("center"); - } - - public Tag center(String attr) { - return nest("center "+attr); - } - - public Tag big() { - return nest("big"); - } - - public Tag big(String attr) { - return nest("big "+attr); - } - - public Tag small() { - return nest("small"); - } - - public Tag small(String attr) { - return nest("small "+attr); - } - - public Tag i() { - return nest("i"); - } - - public Tag i(String attr) { - return nest("i "+attr); - } - - public Tag strong() { - return nest("strong"); - } - - public Tag strong(String attr) { - return nest("big "+attr); - } - - public Tag table() { - return nest("table"); - } - - public Tag table(String attr) { - return nest("table "+attr); - } - - public Tag tr() { - return nest("tr"); - } - - public Tag tr(String attr) { - return nest("tr "+attr); - } - - public Tag td() { - return nest("td"); - } - - public Tag td(String attr) { - return nest("td "+attr); - } - - public Tag form() { - return nest("form"); - } - - public Tag form(String attr) { - return nest("form "+attr); - } - - // ----------------------------------------------------- - // METHODS FOR ADDING GENERAL CONTENT - // ----------------------------------------------------- - - /** create a new tag, embed it into this one, return this tag */ - public Tag add(String s) { - Tag t = new Tag(s); - content.addElement(t); - return this; - } - - /** add a tag to this one, returning this tag */ - public Tag add(Tag t) { - content.addElement(t); - return this; - } - - /** create a new tag, nest it into this one, return the new tag */ - public Tag nest(String opentag) { - Tag t = new Tag(this, opentag); - t.parent = this; - return t; - } - public Tag nest() { - Tag t = new Tag(this); - t.parent = this; - return t; - } - - /** insert object into this tag, return this tag */ - public Tag raw(Object o) { - content.addElement(o); - return this; - } - - /** set an attribute of this tag, return this tag */ - public Tag set(String name, String val) { - return set(name + "=\"" + val + "\""); - } - - /** set an attribute of this tag, return this tag */ - public Tag set(String setting) { - attribs.addElement(setting); - return this; - } - - public Tag style(String name, String val) { - return style(name+":"+val); - } - - public Tag style(String setting) { - styles.addElement(setting); - return this; - } - - // ----------------------------------------------------- - // METHODS FOR RENDERING - // ----------------------------------------------------- - - public void render(OutputStream out) throws IOException { - - //System.out.print("{render:"+open+"}"); - //System.out.flush(); - - if (open != null) { - out.write("<".getBytes()); - out.write(open.getBytes()); - - // add in attributes, if any - for (int i=0; i 0) { - out.write((" style=\"").getBytes()); - Enumeration elems = styles.elements(); - while (elems.hasMoreElements()) { - String s = (String)elems.nextElement()+";"; - out.write(s.getBytes()); - } - out.write("\"".getBytes()); - } - - if (close.equals("")) { - out.write("/".getBytes()); - } - out.write(">".getBytes()); - - if (breakBefore) { - out.write("\n".getBytes()); - } - } - - for (int i=0; i < content.size(); i++) { - Object item = content.get(i); - if (item.getClass().isAssignableFrom(Tag.class)) { - ((Tag)item).render(out); - } else { - out.write(item.toString().getBytes()); - } - } - - if (open != null) { - out.write(close.getBytes()); - //buf.append(close); - - if (breakAfter) { - out.write("\n".getBytes()); - } - } - } - - public String render() { - ByteArrayOutputStream s = new ByteArrayOutputStream(); - try { - render(s); - } catch (Exception e) { - e.printStackTrace(); - return null; - } - return s.toString(); - } - - public String toString() { - return render(); - } -} - diff --git a/apps/q/java/src/net/i2p/aum/q/Favicon.java b/apps/q/java/src/net/i2p/aum/q/Favicon.java deleted file mode 100644 index f5f6ab6c9..000000000 --- a/apps/q/java/src/net/i2p/aum/q/Favicon.java +++ /dev/null @@ -1,61 +0,0 @@ -package net.i2p.aum.q; -public class Favicon { - public static byte [] image = { - 0, 0, 1, 0, 1, 0, 16, 16, 0, 0, 1, 0, 24, 0, 104, 3, - 0, 0, 22, 0, 0, 0, 40, 0, 0, 0, 16, 0, 0, 0, 32, 0, - 0, 0, 1, 0, 24, 0, 0, 0, 0, 0, 0, 0, 0, 0, 72, 0, - 0, 0, 72, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 4, 4, 4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 19, - 19, 19, -127, -127, -127, 12, 12, 12, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 2, 2, 2, -120, - -120, -120, -49, -49, -49, 116, 116, 116, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 90, 90, 90, -80, -80, -80, - -18, -18, -18, -55, -55, -55, -122, -122, -122, 68, 68, 68, 107, 107, 107, -62, - -62, -62, -20, -20, -20, -59, -59, -59, 4, 4, 4, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, -109, -109, -109, -30, -30, -30, -8, -8, -8, - -25, -25, -25, -2, -2, -2, -28, -28, -28, -49, -49, -49, -2, -2, -2, -14, - -14, -14, -36, -36, -36, 33, 33, 33, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 72, 72, 72, -1, -1, -1, -1, -1, -1, -28, -28, -28, - -34, -34, -34, 118, 118, 118, -124, -124, -124, -1, -1, -1, -1, -1, -1, -6, - -6, -6, 68, 68, 68, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, -98, -98, -98, -1, -1, -1, -38, -38, -38, -80, -80, -80, - 13, 13, 13, 0, 0, 0, 100, 100, 100, -11, -11, -11, -9, -9, -9, -3, - -3, -3, -90, -90, -90, 10, 10, 10, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, -49, -49, -49, -4, -4, -4, -57, -57, -57, 63, 63, 63, - 0, 0, 0, 26, 26, 26, -74, -74, -74, -56, -56, -56, -35, -35, -35, -29, - -29, -29, -13, -13, -13, 104, 104, 104, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, -28, -28, -28, -46, -46, -46, -22, -22, -22, 2, 2, 2, - 0, 0, 0, 2, 2, 2, 41, 41, 41, 108, 108, 108, 37, 37, 37, -32, - -32, -32, -29, -29, -29, -60, -60, -60, 1, 1, 1, 0, 0, 0, 0, 0, - 0, 0, 0, 0, -60, -60, -60, -60, -60, -60, -44, -44, -44, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 8, 8, 8, -56, - -56, -56, -49, -49, -49, -43, -43, -43, 24, 24, 24, 0, 0, 0, 0, 0, - 0, 0, 0, 0, -117, -117, -117, -70, -70, -70, -48, -48, -48, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 50, 50, 50, -93, - -93, -93, -12, -12, -12, -47, -47, -47, 32, 32, 32, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 54, 54, 54, -42, -42, -42, -79, -79, -79, 28, 28, 28, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 7, 7, 7, 110, 110, 110, -70, - -70, -70, -4, -4, -4, -64, -64, -64, 3, 3, 3, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 121, 121, 121, -51, -51, -51, -87, -87, -87, - 10, 10, 10, 0, 0, 0, 37, 37, 37, -119, -119, -119, -106, -106, -106, -20, - -20, -20, -33, -33, -33, 95, 95, 95, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, -124, -124, -124, -23, -23, -23, - -33, -33, -33, -107, -107, -107, -75, -75, -75, -68, -68, -68, -15, -15, -15, -16, - -16, -16, -111, -111, -111, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 55, 55, 55, - -97, -97, -97, -26, -26, -26, -29, -29, -29, -31, -31, -31, -61, -61, -61, 121, - 121, 121, 11, 11, 11, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - }; -} diff --git a/apps/q/java/src/net/i2p/aum/q/QClientAPI.java b/apps/q/java/src/net/i2p/aum/q/QClientAPI.java deleted file mode 100644 index a431283df..000000000 --- a/apps/q/java/src/net/i2p/aum/q/QClientAPI.java +++ /dev/null @@ -1,187 +0,0 @@ -/* - * QClientAPI.java - * - * Created on March 31, 2005, 5:19 PM - */ - -package net.i2p.aum.q; - -import java.io.IOException; -import java.net.MalformedURLException; -import java.util.Hashtable; -import java.util.Vector; - -import org.apache.xmlrpc.XmlRpcClient; -import org.apache.xmlrpc.XmlRpcException; - -/** - *

The official Java API for client applications wishing to access the Q - * network

- *

This API is just a thin wrapper that hides the XMLRPC details, and exposes - a simple set of methods.

- *

Note to app developers - I'm only implementing this API in Java - * and Python at present. If you've got some time and knowledge of other - * languages and their available XML-RPC client libs, we'd really appreciate - * it if you can port this API into other languages - such as Perl, C++, - * Ruby, OCaml, C#, etc. You can take this API implementation as the reference - * code for porting to your own language.

- */ - -public class QClientAPI { - - XmlRpcClient node; - - /** - * Creates a new instance of QClientAPI talking on given xmlrpc port - */ - public QClientAPI(int port) throws MalformedURLException { - node = new XmlRpcClient("http://127.0.0.1:"+port); - } - - /** - * Creates a new instance of QClientAPI talking on default xmlrpc port - */ - public QClientAPI() throws MalformedURLException { - node = new XmlRpcClient("http://127.0.0.1:"+QClientNode.defaultXmlRpcServerPort); - } - - /** - * Pings a Q client node, gets back a bunch of useful stats - */ - public Hashtable ping() throws XmlRpcException, IOException { - return (Hashtable)node.execute("i2p.q.ping", new Vector()); - } - - /** - * Retrieves an update of content catalog - * @param since a unixtime in seconds. The content list returned will - * be a differential update since this time. - */ - public Hashtable getUpdate(int since) - throws XmlRpcException, IOException - { - Vector args = new Vector(); - args.addElement(new Integer(since)); - args.addElement(new Integer(1)); - args.addElement(new Integer(1)); - return (Hashtable)node.execute("i2p.q.getUpdate", args); - } - - /** - * Retrieves an item of content from the network, given its key - * @param key the key to retrieve - */ - public Hashtable getItem(String key) throws XmlRpcException, IOException { - Vector args = new Vector(); - args.addElement(key); - return (Hashtable)node.execute("i2p.q.getItem", args); - } - - /** - * Inserts a single item of data, without metadata. A default metadata set - * will be generated. - * @param data a byte[] of data to insert - * @return a Hashtable containing results, including: - *
    - *
  • result - either "ok" or "error"
  • - *
  • error - (only if result != "ok") - terse error label
  • - *
  • key - the key under which this item has been inserted
  • - *
- */ - public Hashtable putItem(byte [] data) throws XmlRpcException, IOException { - Vector args = new Vector(); - args.addElement(data); - return (Hashtable)node.execute("i2p.q.putItem", args); - } - - /** - * Inserts a single item of data, with metadata - * @param metadata a Hashtable of metadata to insert - * @param data a byte[] of data to insert - * @return a Hashtable containing results, including: - *
    - *
  • result - either "ok" or "error"
  • - *
  • error - (only if result != "ok") - terse error label
  • - *
  • key - the key under which this item has been inserted
  • - *
- */ - public Hashtable putItem(Hashtable metadata, byte [] data) - throws XmlRpcException, IOException - { - Vector args = new Vector(); - args.addElement(metadata); - args.addElement(data); - return (Hashtable)node.execute("i2p.q.putItem", args); - } - - /** - * Generates a new keypair for inserting signed-space items - * @return a struct with the keys: - *
    - *
  • status - "ok"
  • - *
  • publicKey - base64-encoded signed space public key
  • - *
  • privateKey - base64-encoded signed space private key
  • - *
- * When inserting an item using the privateKey, the resulting uri - * will be Q:publicKey/path - */ - public Hashtable newKeys() throws XmlRpcException, IOException - { - Vector args = new Vector(); - return (Hashtable)node.execute("i2p.q.newKeys", args); - } - - - /** - * Adds a new noderef to node - * @param dest - the base64 i2p destination for the remote peer - * @return a Hashtable containing results, including: - *
    - *
  • result - either "ok" or "error"
  • - *
  • error - (only if result != "ok") - terse error label
  • - *
- */ - public Hashtable hello(String dest) throws XmlRpcException, IOException { - Vector args = new Vector(); - args.addElement(dest); - return (Hashtable)node.execute("i2p.q.hello", args); - } - - /** - * Shuts down a running node - * If the shutdown succeeds, then this call will fail with an exception. But - * if the call succeeds, then the shutdown has failed (sorry if this is a tad - * counter-intuitive). - * @param privKey - the base64 i2p private key for this node. - * @return a Hashtable containing results, including: - *
    - *
  • result - "error"
  • - *
  • error - terse error label
  • - *
- */ - public Hashtable shutdown(String privKey) throws XmlRpcException, IOException { - Vector args = new Vector(); - args.addElement(privKey); - return (Hashtable)node.execute("i2p.q.shutdown", args); - } - - /** - * Search the node for catalog entries matching a set of criteria - * @param criteria a Hashtable of metadata criteria to match, and whose - * values are regular expressions - * @return a Hashtable containing results, including: - *
    - *
  • result - "ok" or "error"
  • - *
  • error - if result != "ok", a terse error label
  • - *
  • items - a Vector of items found which match the given search - * criteria. If no available matching items were found, this vector - * will come back empty. - *
- */ - public Hashtable search(Hashtable criteria) throws XmlRpcException, IOException { - Vector args = new Vector(); - args.addElement(criteria); - return (Hashtable)node.execute("i2p.q.search", args); - } -} - diff --git a/apps/q/java/src/net/i2p/aum/q/QClientNode.java b/apps/q/java/src/net/i2p/aum/q/QClientNode.java deleted file mode 100644 index 4cd7b37c3..000000000 --- a/apps/q/java/src/net/i2p/aum/q/QClientNode.java +++ /dev/null @@ -1,608 +0,0 @@ -/* - * QClient.java - * - * Created on 20 March 2005, 23:22 - */ - -package net.i2p.aum.q; - -import java.io.File; -import java.io.IOException; -import java.util.Hashtable; -import java.util.Iterator; -import java.util.Properties; -import java.util.Vector; - -import net.i2p.I2PException; -import net.i2p.aum.Mimetypes; -import net.i2p.aum.http.I2PHttpServer; -import net.i2p.aum.http.MiniHttpServer; -import net.i2p.data.DataFormatException; - -/** - * Implements Q client nodes. - */ - -public class QClientNode extends QNode { - - static String defaultStoreDir = ".quartermaster_client"; - I2PHttpServer webServer; - MiniHttpServer webServerTcp; - Properties httpProps; - - public String nodeType = "Client"; - - // ------------------------------------------------------- - // CONSTRUCTORS - // ------------------------------------------------------- - - /** - * Creates a new instance of QClient, using default - * datastore location - * @throws IOException, DataFormatException, I2PException - */ - public QClientNode() throws IOException, DataFormatException, I2PException - { - super(System.getProperties().getProperty("user.home") + sep + defaultStoreDir); - log.debug("TEST CLIENT DEBUG MSG1"); - } - - /** - * Creates a new instance of QClient, using specified - * datastore location - * @param path of node's datastore directory - * @throws IOException, DataFormatException, I2PException - */ - public QClientNode(String dataDir) throws IOException, DataFormatException, I2PException - { - super(dataDir); - - log.error("TEST CLIENT DEBUG MSG"); - } - - // ------------------------------------------------------- - // METHODS - XML-RPC PRIMITIVE OVERRIDES - // ------------------------------------------------------- - - /** - * hello cmds to client nodes are illegal! - */ - /** - public Hashtable localHello(String destBase64) - { - Hashtable h = new Hashtable(); - h.put("status", "error"); - h.put("error", "unimplemented"); - return h; - } - **/ - - /** perform client-specific setup */ - public void setup() - { - updateCatalogFromPeers = 1; - isClient = true; - - // allow a port change for xmlrpc client app conns - String xmlPortStr = System.getProperty("q.xmlrpc.tcp.port"); - if (xmlPortStr != null) { - xmlRpcServerPort = new Integer(xmlPortStr).intValue(); - conf.setIntProperty("xmlRpcServerPort", xmlRpcServerPort); - } - - // ditto for listening host - String xmlHostStr = System.getProperty("q.xmlrpc.tcp.host"); - if (xmlHostStr != null) { - xmlRpcServerHost = xmlHostStr; - conf.setProperty("xmlRpcServerHost", xmlRpcServerHost); - } - - // --------------------------------------------------- - // now fire up the HTTP interface - // listening only within I2P on client node's dest - - // set up a properties object for short local tunnel - httpProps = new Properties(); - httpProps.setProperty("inbound.length", "0"); - httpProps.setProperty("outbound.length", "0"); - httpProps.setProperty("inbound.lengthVariance", "0"); - httpProps.setProperty("outbound.lengthVariance", "0"); - Properties sysProps = System.getProperties(); - String i2cpHost = sysProps.getProperty("i2cp.tcp.host", "127.0.0.1"); - String i2cpPort = sysProps.getProperty("i2cp.tcp.port", "7654"); - httpProps.setProperty("i2cp.tcp.host", i2cpHost); - httpProps.setProperty("i2cp.tcp.port", i2cpPort); - } - - public void run() { - - // then do all the parent stuff - super.run(); - } - - /** - *

Sets up and launches an http server for servicing requests - * to this node.

- *

For server nodes, the xml-rpc server listens within I2P on the - * node's destination.

- *

For client nodes, the xml-rpc server listens on a local TCP - * port (according to attributes xmlRpcServerHost and xmlRpcServerPort)

- */ - public void startExternalInterfaces(QServerMethods methods) throws Exception - { - System.out.println("Creating http interface..."); - try { - // create tcp http server for xmlrpc and browser access - webServerTcp = new MiniHttpServer(QClientWebInterface.class, xmlRpcServerPort, this); - webServerTcp.addXmlRpcHandler(baseXmlRpcServiceName, methods); - System.out.println("started in-i2p http/xmlrpc server listening on port:" + xmlRpcServerPort); - webServerTcp.start(); - - // create in-i2p http server for xmlrpc and browser access - webServer - = new I2PHttpServer(privKey, - QClientWebInterface.class, - this, - httpProps - ); - webServer.addXmlRpcHandler(baseXmlRpcServiceName, methods); - webServer.start(); - System.out.println("Started in-i2p http/xmlrpc server listening on dest:"); - String dest = privKey.getDestination().toBase64(); - System.out.println(dest); - - - System.out.println("web interfaces created"); - - } catch (Exception e) { - e.printStackTrace(); - System.out.println("Failed to create client web interfaces"); - System.exit(1); - } - -/** - WebServer serv = new WebServer(xmlRpcServerPort); - // if host is non-null, add as a listen host - if (xmlRpcServerHost.length() > 0) { - serv.setParanoid(true); - serv.acceptClient(xmlRpcServerHost); - } - serv.addHandler(baseXmlRpcServiceName, methods); - serv.start(); - log.info("Client XML-RPC server listening on port "+xmlRpcServerPort+" as service"+baseXmlRpcServiceName); -**/ - - } - - // ----------------------------------------------------- - // client-specific customisations of xmlRpc methods - // ----------------------------------------------------- - - /** - * Insert an item of content, with metadata. Then (since this is the client's - * override) schedules a job to insert this item to a selection of remote peers. - * @param metadata Hashtable of item's metadata - * @param data raw data to insert - */ - public Hashtable putItem(Hashtable metadata, byte [] data) throws QException - { - Hashtable resp = new Hashtable(); - QDataItem item; - String uri; - - // do the local insert first - try { - item = new QDataItem(metadata, data); - item.processAndValidate(true); - localPutItem(item); - uri = (String)item.get("uri"); - - } catch (QException e) { - resp.put("status", "error"); - resp.put("error", "qexception"); - resp.put("summary", e.getLocalizedMessage()); - return resp; - } - - // now schedule remote insertion - schedulePeerUploadJob(item); - - // and return success, rest will happen automatically in background - resp.put("status", "ok"); - resp.put("uri", uri); - return resp; - } - - /** - * Search datastore and catalog for a given item of content - * @param criteria Hashtable of criteria to match in metadata - */ - public Hashtable search(Hashtable criteria) - { - Hashtable result = new Hashtable(); - Vector matchingItems = new Vector(); - Iterator items; - Hashtable item; - Hashtable foundUris = new Hashtable(); - String uri; - - // get an iterator for all catalog items - try { - // test all local content - items = contentIdx.getItemsSince(0); - while (items.hasNext()) { - String uriHash = (String)items.next(); - item = getLocalMetadataForHash(uriHash); - uri = (String)item.get("uri"); - //System.out.println("search: testing "+metadata+" against "+criteria); - if (metadataMatchesCriteria(item, criteria)) { - matchingItems.addElement(item); - foundUris.put(uri, item); - } - } - - // now test remote catalog - items = catalogIdx.getItemsSince(0); - while (items.hasNext()) { - String uriHash = (String)items.next(); - item = getLocalCatalogMetadataForHash(uriHash); - uri = (String)item.get("uri"); - //System.out.println("search: testing "+metadata+" against "+criteria); - if (metadataMatchesCriteria(item, criteria)) { - if (!foundUris.containsKey("uri")) { - matchingItems.addElement(item); - } - } - } - - } catch (Exception e) { - e.printStackTrace(); - result.put("status", "error"); - result.put("error", e.getMessage()); - return result; - } - - result.put("status", "ok"); - result.put("items", matchingItems); - return result; - - } - - - /** - * retrieves a peers/catalog update - executes on base class, then - * adds in our catalog entries - */ - public Hashtable getUpdate(int since, int includePeers, int includeCatalog) - { - Hashtable h = localGetUpdate(since, includePeers, includeCatalog); - - if (includeCatalog != 0) { - - // must extend v with remote catalog entries - Vector vCat = (Vector)(h.get("items")); - Iterator items; - - // get an iterator for all new catalog items since given unixtime - try { - items = catalogIdx.getItemsSince(since); - - // pick through the iterator, and fetch metadata for each item - while (items.hasNext()) { - String key = (String)(items.next()); - Hashtable pf = getLocalCatalogMetadata(key); - log.error("getUpdate(client): key="+key+", pf="+pf); - System.out.println("getUpdate(client): key="+key+", pf="+pf); - if (pf != null) { - // clone this metadata, add in the key - Hashtable pf1 = (Hashtable)pf.clone(); - pf1.put("key", key); - vCat.addElement(pf1); - } - } - - - } catch (IOException e) { - e.printStackTrace(); - } - } - - return h; - } - - /** - *

Retrieve an item of content.

- *

This client override tries the local datastore first, then - * attempts to get the data from remote servers believed to have the data

- */ - public Hashtable getItem(String uri) throws IOException, QException - { - Hashtable res; - - log.info("getItem: uri='"+uri+"'"); - - if (localHasItem(uri)) { - - class Fred { - } - - Fred xxx = new Fred(); - - // got it locally, send it back - return localGetItem(uri); - } - - // ain't got it locally - try remote sources in turn till we - // either get it or fail - Vector sources = getItemLocation(uri); - - // send back an error if not in local catalog - if (sources == null || sources.size() == 0) { - Hashtable dnf = new Hashtable(); - dnf.put("status", "error"); - dnf.put("error", "notfound"); - dnf.put("comment", "uri not known locally or remotely"); - return dnf; - } - - // ok, got at least one remote source, go through them till - // we get data that checks out - int i; - int npeers = sources.size(); - int numCmdFail = 0; - int numDnf = 0; - int numBadData = 0; - for (i=0; i - *
  • status - String - either "ok" or "error"
  • - *
  • error - String - short summary of error, only present if - * status is "error"
  • - *
  • uri - the full Q URI for the top level of the site - * - */ - public Hashtable insertQSite(String privKey64, - String siteName, - String rootPath, - Hashtable metadata - ) - throws Exception - { - // for results - Hashtable result = new Hashtable(); - String uri = null; // uri under which this site will be reachable - String pubKey64; - - File dir = new File(rootPath); - - // barf if no such directory - if (!dir.isDirectory()) { - result.put("status", "error"); - result.put("error", "nosuchdir"); - result.put("detail", "Path '"+rootPath+"' is not a directory"); - return result; - } - - // barf if not readable - if (!dir.canRead()) { - result.put("status", "error"); - result.put("error", "cantread"); - result.put("detail", "Path '"+rootPath+"' is not readable"); - return result; - } - - // barf if missing or invalid site name - siteName = siteName.trim(); - if (!siteName.matches("[a-zA-Z0-9_-]+")) { - result.put("status", "error"); - result.put("error", "badsitename"); - result.put("detail", "QSite name should be only alphanumerics, '-' and '_'"); - return result; - } - - String defaultPath = rootPath + sep + "index.html"; - File defaultFile = new File(defaultPath); - - // barf if index.html not present and readable - if (!(defaultFile.isFile() && defaultFile.canRead())) { - result.put("status", "error"); - result.put("error", "noindex"); - result.put("detail", "Required file index.html missing or unreadable"); - return result; - } - - // derive public key and uri for site, barf if bad key - try { - pubKey64 = QUtil.privateToPubHash(privKey64); - } catch (Exception e) { - result.put("status", "error"); - result.put("error", "badprivkey"); - return result; - } - uri = "Q:" + pubKey64 + "/" + siteName + "/"; - - // now the fun recursive bit - insertQSiteDir(privKey64, siteName, rootPath, ""); - - // queue up an insert of default file - metadata.put("type", "qsite"); - metadata.put("path", siteName+"/"); - metadata.put("mimetype", "text/html"); - - //System.out.println("insertQSite: privKey='"+privKey64+"'"); - //System.out.println("insertQSite: siteName='"+siteName+"'"); - //System.out.println("insertQSite: rootDir='"+rootPath+"'"); - //System.out.println("insertQSite: metadata="+metadata); - //System.out.println("insertQSite: default="+defaultPath); - - insertQSiteFile(privKey64, siteName, defaultPath, "", metadata); - - result.put("status", "ok"); - result.put("uri", uri); - return result; - } - - /** - * recursively queues jobs for the insertion of a directory's contents, for - * a qsite. - * @param privKey64 - private 'signed space' key, base64 format - * @param siteName - short text name for the site - * @param absPath - physical pathname of the subdirectory to insert - * @param relPath - qsite-relative pathname of this item - */ - protected void insertQSiteDir(String privKey64, String siteName, String absPath, String relPath) - throws Exception - { - File dir = new File(absPath); - - // fail gracefully if not a readable directory - if (!(dir.isDirectory() && dir.canRead())) { - System.out.println("insertQSiteDir: not a readable directory "+absPath); - return; - } - - //System.out.println("insertQSiteDir: entry - abs='"+absPath+"' rel='"+relPath+"'"); - - // loop through the contents - String [] contents = dir.list(); - for (int i=0; i 0) { - node = new QClientNode(args[0]); - } - else { - node = new QClientNode(); - } - node.log.info("QClientNode: running node..."); - node.run(); - } - - public void foo1() { - System.out.println("QClientNode.foo: isClient="+isClient); - } - - -} diff --git a/apps/q/java/src/net/i2p/aum/q/QClientWebInterface.java b/apps/q/java/src/net/i2p/aum/q/QClientWebInterface.java deleted file mode 100644 index 72b5bd857..000000000 --- a/apps/q/java/src/net/i2p/aum/q/QClientWebInterface.java +++ /dev/null @@ -1,755 +0,0 @@ -/* - * QClientWebInterface.java - * - * Created on April 9, 2005, 1:10 PM - */ - -package net.i2p.aum.q; - -import java.io.ByteArrayOutputStream; -import java.io.PrintStream; -import java.lang.reflect.InvocationTargetException; -import java.lang.reflect.Method; -import java.net.Socket; -import java.util.Enumeration; -import java.util.Hashtable; -import java.util.Vector; - -import net.i2p.aum.http.HtmlPage; -import net.i2p.aum.http.I2PHttpRequestHandler; -import net.i2p.aum.http.MiniHttpServer; -import HTML.Template; - - -/** - * Request handler for Q Client nodes that listens within I2P - * on the client node's destination. Intended for access via - * eepProxy, and by adding a hosts.txt entry for this dest - * under the hostname 'q'. - */ -public class QClientWebInterface extends I2PHttpRequestHandler { - - /** set this to true when debugging html layout */ - public static boolean loadTemplateWithEachHit = true; - - public QNode node = null; - - // refs to main page template, and components of main page - static Template tmplt; - static Vector tabRow; - static Vector pageItems; - - /** - * for security - disables direct-uri GETs of content if running directly over TCP; - * we need to coerce users to use their eepproxy browser instead - */ - public boolean isRunningOverTcp = true; - - /** Creates a new instance of QClientWebInterface */ - public QClientWebInterface(MiniHttpServer server, Object socket, Object node) - throws Exception - { - super(server, socket, node); - this.node = (QNode)node; - isRunningOverTcp = socket.getClass() == Socket.class; - } - - static String [] tabNames = { - "home", "search", "insert", "tools", "status", "jobs", "help", "about" - }; - - /** - * Loads a template of a given name. Invokes method on node - * to resolve this to an absolute pathname, so 'name' -> '/path/to/html/name.html' - */ - public Template loadTemplate(String name) throws Exception { - - String fullPath = node.getResourcePath("html"+node.sep+name)+".html"; - //System.out.println("fullPath='"+fullPath+"'"); - String [] args = new String [] { - "filename", fullPath, - "case_sensitive", "true", - "max_includes", "5" - }; - return new Template(args); - } - - // ---------------------------------------------------- - // FRONT-END METHODS - // ---------------------------------------------------- - - /** GET and POST both go through .safelyHandleReq() */ - public void on_GET() { - - safelyHandleReq(); - } - - /** GET and POST both go through .safelyHandleReq() */ - public void on_POST() { - - safelyHandleReq(); - } - - public void on_RPC() { - - } - - /** - * wrap .handleReq() - on exception, call dump_error() to - * generate a 400 error page with diagnostics - */ - public void safelyHandleReq() { - try { - handleReq(); - } catch (Exception e) { - dump_error(e); - } - } - - /** - *

    Forwards hits to either a path handler method, or generic get method.

    - * - *

    Detects hits to paths for which we have a handler (ie, methods - * of this class with name 'hdlr_<somepath>', (such as 'hdlr_help' - * for handling hits to '/help').

    - * - *

    If we have a handler, forward to it, otherwise forward to standard - * getItem() method

    - */ - public void handleReq() throws Exception { - - Class [] noArgs; - Method hdlrMethod; - - // strip useless leading slash from reqFile - reqFile = reqFile.substring(1); - - // default to 'home' - if (reqFile.equals("")) { - reqFile = "home"; - } - //print("handleReq: reqFile='"+reqFile+"'"); - - // Set up the main page template - try { - tmplt = loadTemplate("main"); - pageItems = new Vector(); - tmplt.setParam("items", pageItems); - tmplt.setParam("nodeType", node.nodeType); - - } catch (Exception e) { - e.printStackTrace(); - throw e; - } - //print("handleReq: loaded template"); - - // execute if a command - if (allVars.containsKey("cmd")) { - do_cmd(); - } - - // -------------------------------------------------------- - // intercept magic paths for which we have a handler method - noArgs = new Class[0]; - try { - // extract top dir of path and make it method-name-safe - String methodName = "hdlr_"+reqFile.split("/")[0].replace('.','_'); - hdlrMethod = this.getClass().getMethod(methodName, null); - - // now dispatch the method - hdlrMethod.invoke(this, null); - - // spit out html, if no raw content inserted - sendPageIfNoContent(); - - // done - return; - - } catch (NoSuchMethodException e) { - // routinely fails if we dont' have handler, so assume it's - // a GET - } - - // if we get here, client is requesting a specific uri - allVars.put("uri", reqFile); - if (!cmd_get()) { - hdlr_home(); - } - sendPageIfNoContent(); - } - - /** - * as name implies, generates standard html page - * if setRawOutput hasnt' been called - */ - public void sendPageIfNoContent() { - - if (rawContentBytes == null) { - - // we're spitting out html - setContentType("text/html"); - - // set up tab row style vector - setupTabRow(); - - // finally, render out our filled-out template - setRawOutput(tmplt.output()); - } - } - - /** - * Inserts an item into main pane - */ - public Object addToMainPane(Object item) { - - Hashtable h = new Hashtable(); - h.put("item", item); - pageItems.addElement(h); - return item; - } - - /** - * Generates a set of tabs and adds these to the page, - * marking as active the tab whose name is in the current URL - */ - public void setupTabRow() - { - Hashtable h; - tabRow = new Vector(); - for (int i=0; i< tabNames.length; i++) { - String name = tabNames[i]; - h = new Hashtable(); - h.put("name", name); - h.put("label", name.substring(0,1).toUpperCase()+name.substring(1)); - if (name.equals(reqFile)) { - h.put("active", "1"); - } - tabRow.addElement(h); - tmplt.setParam("tabs", tabRow); - } - } - - // ----------------------------------------------------- - // METHODS FOR HANDLING MAGIC PATHS - // ---------------------------------------------------- - - /** Display home page */ - public void hdlr_home() throws Exception { - - // stick in 'getitem' form - addToMainPane(loadTemplate("getform")); - - } - - /** Display status page */ - public void hdlr_status() throws Exception { - - // ping the node, extract status items - Vector statusItems = new Vector(); - Hashtable h = node.ping(); - for (Enumeration e = h.keys(); e.hasMoreElements();) { - String key = (String)e.nextElement(); - String val = h.get(key).toString(); - if (val.length() > 60) { - // too big for table, stick into a readonly text field - val = ""; - } - Hashtable rec = new Hashtable(); - rec.put("key", key); - rec.put("value", val); - //print("key='"+key+"' val='"+val+"'"); - statusItems.addElement(rec); - } - - // get status form template insert the items, stick onto main pane - Template tmpltStatus = loadTemplate("status"); - tmpltStatus.setParam("items", statusItems); - addToMainPane(tmpltStatus); - - } - - /** display current node jobs list */ - public void hdlr_jobs() throws Exception { - - // get jobs list, add to jobs list template, add that to main pane - Template tmpltJobs = loadTemplate("jobs"); - tmpltJobs.setParam("items", node.getJobsList()); - addToMainPane(tmpltJobs); - } - - /** Display search form */ - public void hdlr_search() throws Exception { - addToMainPane(loadTemplate("searchform")); - } - - /** Display insert page */ - public void hdlr_insert() throws Exception { - - String formName = allVars.get("mode", 0, "file").equals("site") ? "putsiteform" : "putform"; - Template tmpltPut = loadTemplate(formName); - addToMainPane(tmpltPut); - } - - /** Display settings screen */ - public void hdlr_settings() throws Exception { - addToMainPane(loadTemplate("settings")); - } - - /** Display tools screen */ - public void hdlr_tools() throws Exception { - - addToMainPane(loadTemplate("tools")); - addToMainPane(loadTemplate("genkeysform")); - addToMainPane(loadTemplate("addrefform")); - } - - /** Display help screen */ - public void hdlr_help() throws Exception { - addToMainPane(loadTemplate("help")); - } - - /** Display about screen */ - public void hdlr_about() throws Exception { - addToMainPane(loadTemplate("about")); - } - - /** handle /favicon.ico hits */ - public void hdlr_favicon_ico() { - - System.out.println("Sending favicon image"); - setContentType("image/x-icon"); - setRawOutput(Favicon.image); - } - - /** dummy handler, causes an exception (for testing error dump pages */ - public void hdlr_shit() throws Exception { - throw new Exception("this method is shit"); - } - - // ---------------------------------------------------- - // METHODS FOR HANDLING COMMANDS - // ---------------------------------------------------- - - /** - * invoked if GET or POST vars contain 'cmd'. - * attempts to dispatch command handler method 'cmd_xxxx' - */ - public void do_cmd() throws Exception { - - // this whole method could be done in python with the statement: - // getattr(self, 'cmd_'+urlVars['cmd'], lambda:None)() - String cmd = allVars.get("cmd", 0); - try { - // extract top dir of path and make it method-name-safe - String methodName = "cmd_"+cmd; - Method hdlrMethod = this.getClass().getMethod(methodName, null); - - // now dispatch the method - hdlrMethod.invoke(this, null); - } catch (NoSuchMethodException e) {} - } - - - /** - * executes a 'get' cmd - */ - public boolean cmd_get() throws Exception { - - Hashtable result = null; - String status = null; - Hashtable metadata = null; - String mimetype = null; - - // bail if node offline - if (node == null) { - return false; - } - - // bail if no 'url' arg - if (!allVars.containsKey("uri")) { - return false; - } - - // get uri, prepend 'Q:' if needed - String uri = allVars.get("uri", 0); - if (!uri.startsWith("Q:")) { - uri = "Q:" + uri; - } - - // attempt the fetch - result = node.getItem(uri); - status = (String)result.get("status"); - - // how'd we go? - if (status.equals("ok")) { - // got it - send it back - metadata = (Hashtable)result.get("metadata"); - mimetype = (String)metadata.get("mimetype"); - - // forbid content retrieval via MSIE - boolean isIE = false; - for (Enumeration e = headerVars.get("User-Agent").elements(); e.hasMoreElements();) { - String val = ((String)e.nextElement()).toLowerCase(); - if (val.matches(".*(msie|windows|\\.net).*")) { - Template warning = loadTemplate("msiealert"); - addToMainPane(warning); - return false; - } - } - - // forbid direct delivery of text/* content via direct tcp - if (isRunningOverTcp) { - // security feature - set to application/octet-stream if req arrives via tcp. - // this prevents people surfing the q web interface directly over TCP and - // falling prey to anonymity attacks (eg gif bugs) - - // if user is trying to hit an html page, we can send back a warning - if (mimetype.startsWith("text")) { - Template warning = loadTemplate("anonalert"); - warning.setParam("dest", node.destStr); - addToMainPane(warning); - return false; - } - setContentType("application/octet-stream"); - } else { - // got this conn via I2P and eeproxy - safer to obey the mimetype - setContentType(mimetype); - } - - setRawOutput((byte [])result.get("data")); - return true; - } else { - // 404 - tmplt.setParam("show_404", "1"); - tmplt.setParam("404_uri", uri); - return false; - } - } - - /** executes genkeys command */ - public void cmd_genkeys() throws Exception { - - Hashtable res = node.newKeys(); - String pubKey = (String)res.get("publicKey"); - String privKey = (String)res.get("privateKey"); - Template keysWidget = loadTemplate("genkeysresult"); - keysWidget.setParam("publickey", pubKey); - keysWidget.setParam("privatekey", privKey); - addToMainPane(keysWidget); - } - - /** adds a noderef */ - public void cmd_addref() throws Exception { - - String ref = allVars.get("noderef", 0).trim(); - node.hello(ref); - } - - /** executes 'put' command */ - public void cmd_put() throws Exception { - - // barf if user posted both data and rawdata - if (allVars.containsKey("data") - && ((String)allVars.get("data", 0)).length() > 0 - && allVars.containsKey("rawdata") - && ((String)allVars.get("rawdata", 0)).length() > 0 - ) - { - Template t = loadTemplate("puterror"); - t.setParam("error", "you specified a file as well as 'rawdata'"); - addToMainPane(t); - addToMainPane(dumpVars().toString()); - return; - } - - Hashtable metadata = new Hashtable(); - byte [] data = new byte[0]; - - // stick in some defaults - String [] keys = { - "data", "rawdata", - "mimetype", "keywords", "privkey", "abstract", "type", "title", - "path" - }; - - //System.out.println("allVars='"+allVars+"'"); - - // extract all items from form, add to metadata ones that - // have non-zero length. Take 'data' or 'rawdata' and stick their - // bytes into data. - for (int i=0; i 0) { - data = dataval; - } - } else if (key.equals("rawdata")) { - byte [] dataval = allVars.get("rawdata", 0).getBytes(); - if (dataval.length > 0) { - data = dataval; - } - } else if (key.equals("privkey")) { - String k = allVars.get("privkey", 0); - if (k.length() > 0) { - metadata.put("privateKey", k); - } - } else { - String val = allVars.get(key, 0); - //System.out.println("'"+key+"'='"+val+"'"); - if (val.length() > 0) { - metadata.put(key, allVars.get(key, 0)); - } - } - } - } - - //System.out.println("metadata="+metadata); - - if (metadata.size() == 0) { - Template err = loadTemplate("puterror"); - err.setParam("error", "No metadata!"); - addToMainPane(err); - addToMainPane(dumpVars().toString()); - return; - } - - if (data.length == 0) { - Template err = loadTemplate("puterror"); - err.setParam("error", "No data!"); - addToMainPane(err); - addToMainPane(dumpVars().toString()); - return; - } - - // phew! ready to put - System.out.println("WEB:cmd_put: inserting"); - - Hashtable result = node.putItem(metadata, data); - - System.out.println("WEB:cmd_put: got"+result); - - String status = (String)result.get("status"); - if (!status.equals("ok")) { - String errTxt = (String)result.get("error"); - if (result.containsKey("summary")) { - errTxt = errTxt + ":" + result.get("summary").toString(); - } - Template err = loadTemplate("puterror"); - err.setParam("error", (String)result.get("error")); - addToMainPane(err); - addToMainPane(dumpVars().toString()); - return; - } - - // success, yay! - Template success = loadTemplate("putok"); - success.setParam("uri", (String)result.get("uri")); - addToMainPane(success); - - //System.out.println("cmd_put: debug on page??"); - //addToMainPane(dumpVars().toString()); - } - - /** executes 'putsite' command */ - public void cmd_putsite() throws Exception { - - Hashtable metadata = new Hashtable(); - String privKey = allVars.get("privkey", 0, ""); - String name = allVars.get("name", 0, ""); - String dir = allVars.get("dir", 0, ""); - - // pick up optional metadata items - String [] keys = { - "title", "keywords", "abstract", - }; - - // extract all items from form, add to metadata ones that - // have non-zero length. - for (int i=0; i 0) { - metadata.put(key, allVars.get(key, 0)); - } - } - } - - //System.out.println("metadata="+metadata); - - if (metadata.size() == 0) { - cmd_putsite_error("No metadata!"); - return; - } - - // phew! ready to put - Hashtable result = node.insertQSite(privKey, name, dir, metadata); - String status = (String)result.get("status"); - if (!status.equals("ok")) { - cmd_putsite_error((String)result.get("error")); - return; - } - - // success, yay! - Template success = loadTemplate("putok"); - success.setParam("is_site", "1"); - success.setParam("uri", (String)result.get("uri")); - addToMainPane(success); - - //System.out.println("cmd_put: debug on page??"); - //addToMainPane(dumpVars().toString()); - } - - protected void cmd_putsite_error(String msg) throws Exception { - - Template err = loadTemplate("puterror"); - err.setParam("error", msg); - err.setParam("is_site", "1"); - addToMainPane(err); - addToMainPane(dumpVars().toString()); - } - - /** performs a search */ - public void cmd_search() throws Exception { - - Hashtable criteria = new Hashtable(); - String [] fields = { - "type", "title", "path", "mimetype", "keywords", - "summary", "searchmode" - }; - - for (int i=0; iGET and POST methods. - * @param request servlet request - * @param response servlet response - */ - protected void processRequest(HttpServletRequest request, HttpServletResponse response) - throws ServletException, IOException { - - findNode(); - determineIfNodeIsRunning(); - - Hashtable vars = parseVars(request.getQueryString()); - - response.setContentType("text/html"); - PrintWriter out = response.getWriter(); - out.println(""); - out.println(""); - out.println("QConsole"); - out.println(""); - out.println(""); - - out.println("

    Q Node Manager

    "); - - //out.println("debug: vars='"+vars+"'

    "); - - if (vars.containsKey("startnode") && !nodeIsRunning) { - startNode(); - if (!nodeIsRunning) { - out.println("Failed to start node :(

    "); - } - - } else if (vars.containsKey("stopnode") && nodeIsRunning) { - stopNode(); - nodeIsRunning = false; - } - - if (nodeIsRunning) { - out.println("Q Node is running

    "); - out.print("Node Console"); - out.print(" | "); - out.println("Stop Node"); - } else { - out.println("Q Node is not running

    "); - out.println("Start Node"); - } - - out.println(""); - out.println(""); - /* */ - out.close(); - } - - /** Handles the HTTP GET method. - * @param request servlet request - * @param response servlet response - */ - protected void doGet(HttpServletRequest request, HttpServletResponse response) - throws ServletException, IOException { - processRequest(request, response); - } - - /** Handles the HTTP POST method. - * @param request servlet request - * @param response servlet response - */ - protected void doPost(HttpServletRequest request, HttpServletResponse response) - throws ServletException, IOException { - processRequest(request, response); - } - - /** Returns a short description of the servlet. - */ - public String getServletInfo() { - return "Short description"; - } - - /** try to find node */ - public void findNode() { - - try { - nodeDirStr = System.getProperties().getProperty("user.home") - + sep + ".quartermaster_client"; - - // yay, found a node (we hope), create an xmlrpc client for talking - // to that node - String propPath = nodeDirStr + sep + "node.conf"; - File propFile = new File(propPath); - FileInputStream propIn = new FileInputStream(propPath); - Properties prop = new Properties(); - prop.load(propIn); - - nodePrivKey = prop.getProperty("privKey"); - - // presence of private key indicates node exists - nodeExists = nodePrivKey != null; - - } catch (Exception e) { - // node doesn't exist - } - - } - - public void startNode() { - - int i; - String [] jars = { - "i2p", "mstreaming", "aum", - }; - - String cp = ""; - - String jarsDir = "lib"; - - for (i=0; i 0) { - cp += cpsep; - } - cp += jarsDir + sep + jars[i] + ".jar"; - } - - System.out.println("cp='"+cp+"'"); - - // build up args - int nopts = options.size(); - String args = ""; - args += "java"; - for (Enumeration e = options.propertyNames(); e.hasMoreElements();) { - String opt = (String)e.nextElement(); - String arg = "-D" + opt + "=" + options.getProperty(opt); - System.out.println(arg); - args += " " + arg; - } - - args += " -cp " + cp; - args += " net.i2p.aum.q.QMgr"; - args += " foreground"; - - Runtime runtime = Runtime.getRuntime(); - - // and spawn the start job - try { - //runtime.exec(startForegroundArgs, propLines); - System.out.println("args='"+args+"'"); - runtime.exec(args, null); - } catch (IOException e) { - e.printStackTrace(); - } - - // wait a bit - sleep(3); - - // try for 10s to contact node - for (i=0; i<10; i++) { - sleep(1); - determineIfNodeIsRunning(); - if (nodeIsRunning) { - break; - } - } - } - - public void stopNode() { - - Vector args = new Vector(); - args.addElement(nodePrivKey); - try { - System.out.println("stopping node..."); - nodeProxy.execute("i2p.q.shutdown", args); - } catch (Exception e) { - - } - System.out.println("node terminated"); - } - - /** returns true if node is up */ - public void determineIfNodeIsRunning() { - try { - nodeProxy.execute("i2p.q.ping", new Vector()); - nodeIsRunning = true; - } catch (Exception e) { - nodeIsRunning = false; - return; - } - } - - public void sleep(int n) { - try { - Thread.sleep(n * 1000); - } catch (Exception e) {} - } - - public Hashtable parseVars(String raw) { - Hashtable h = new Hashtable(); - - if (raw == null) { - return h; - } - - URLDecoder u = new URLDecoder(); - String [] items = raw.split("[&]"); - String dec; - for (int i=0; i 0) { - if (!_path.startsWith("/")) { - _path = "/" + _path; - put("path", _path); - } - } - - // determine file extension - String [] bits = _path.split("/"); - String name = bits[bits.length-1]; - bits = name.split("\\.", 2); - ext = "." + bits[bits.length-1]; - } - else { - // path is empty - set to '/.ext' where 'ext' is the - // file extension guessed from present mimetype value, and dataHash - // is a shortened hash of the content - String mime = (String)get("mimetype"); - if (mime == null) { - mime = "application/octet-stream"; - put("mimetype", mime); - } - - // determine file extension - ext = Mimetypes.guessExtension(mime); - - // and determine final path - _path = "/" + ((String)get("dataHash")).substring(0, 10) + ext; - put("path", _path); - } - - // ----------------------------------------- - // default the mimetype - if (!containsKey("mimetype")) { - String mimetype = Mimetypes.guessType(ext); - put("mimetype", mimetype); - } - - // ------------------------------------------ - // barf if contains mutually-exclusive signed space keys - if (containsKey("privateKey") && (containsKey("publicKey") || containsKey("signature"))) { - throw new QException("Metadata must NOT contain privateKey and one of publicKey or signature"); - } - - // ------------------------------------------ - // barf if exactly one of publicKey and signature are present - if (containsKey("publicKey") ^ containsKey("signature")) { - throw new QException("Either both or neither of 'publicKey' and 'signature' must be present"); - } - - // ----------------------------------------- - // now discern between plain hash items and - // signed space items - if (containsKey("privateKey") || containsKey("publicKey")) { - - DSAEngine dsa = DSAEngine.getInstance(); - - // process/validate remaining data in signed space context - - if (containsKey("privateKey")) { - // only private key given - uplift, remove, replace with public key - _privKey = new SigningPrivateKey(); - String priv64 = get("privateKey").toString(); - try { - _privKey.fromBase64(priv64); - } catch (Exception e) { - throw new QException("Invalid privateKey", e); - } - - // ok, got valid privateKey - - // expunge privKey from metadata, replace with publicKey - this.remove("privateKey"); - _pubKey = _privKey.toPublic(); - put("publicKey", _pubKey.toBase64()); - - // create and insert a signature - QUtil.debug("before sig, asSortedString give:\n"+asSortedString()); - - Signature sig = dsa.sign(asSortedString().getBytes(), _privKey); - String sigBase64 = sig.toBase64(); - put("signature", sigBase64); - } - else { - // barf if not both signature and pubkey present - if (!(containsKey("publicKey") && containsKey("signature"))) { - throw new QException("need both publicKey and signature"); - } - _pubKey = new SigningPublicKey(); - String pub64 = get("publicKey").toString(); - try { - _pubKey.fromBase64(pub64); - } catch (Exception e) { - throw new QException("Invalid publicKey", e); - } - } - - // now, whether we just signed or not, validate the signature/pubkey - byte [] thisAsBytes = asSortedString().getBytes(); - - String sig64 = get("signature").toString(); - Signature sig1 = new Signature(); - try { - sig1.fromBase64(sig64); - } catch (DataFormatException e) { - throw new QException("Invalid signature string", e); - } - - if (!dsa.verifySignature(sig1, thisAsBytes, _pubKey)) { - throw new QException("Invalid signature"); - } - - // last step - determine the correct URI - String pubHash = QUtil.hashPubKey(_pubKey); - uri = "Q:"+pubHash+_path; - - } // end of 'signed space' mode processing - else { - // ----------------------------------------------------- - // process/validate remaining data in plain hash context - String thisHashed = QUtil.sha64(asSortedString()); - uri = "Q:"+ thisHashed + ext; - - } // end of plain hash mode processing - - - // ----------------------------------------------------- - // final step - add or validate uri - if (containsKey("uri")) { - if (!get("uri").toString().equals(uri)) { - throw new QException("Invalid URI"); - } - } else { - put("uri", uri); - } - - } - - /** - * returns a filename under which this item should be stored - */ - public String getStoreFilename() throws QException { - if (!containsKey("uri")) { - throw new QException("Missing URI"); - } - return QUtil.sha64((String)get("uri")); - } - - /** - * Hashes this set of metadata, excluding any 'signature' key - * @return Base64 hash of metadata - */ - public String hashThisAsBase64() { - - return QUtil.sha64(asSortedString()); - } - - public byte [] hashThis() { - - return QUtil.sha(asSortedString()); - } - - /** - * alphabetise thie metadata to a single string, containing one - * 'key=value' entry per line. Excludes keys 'uri' and 'signature' - */ - public String asSortedString() { - - TreeSet t = new TreeSet(keySet()); - Iterator keys = t.iterator(); - int nkeys = t.size(); - int i; - String metaStr = ""; - for (i = 0; i < nkeys; i++) - { - String metaKey = (String)keys.next(); - if (!(metaKey.equals("signature") || metaKey.equals("uri"))) { - metaStr += metaKey + "=" + get(metaKey) + "\n"; - } - } - return metaStr; - } - -} - diff --git a/apps/q/java/src/net/i2p/aum/q/QException.java b/apps/q/java/src/net/i2p/aum/q/QException.java deleted file mode 100644 index fcb58aee0..000000000 --- a/apps/q/java/src/net/i2p/aum/q/QException.java +++ /dev/null @@ -1,48 +0,0 @@ -/* - * QException.java - * - * Created on April 6, 2005, 2:05 PM - */ - -package net.i2p.aum.q; - -import java.io.PrintStream; -import java.io.PrintWriter; - -/** - * Base class of Q exceptions - * @author jrandom (shamelessly rebadged by aum) - */ - -public class QException extends Exception { - private Throwable _source; - - public QException() { - this(null, null); - } - - public QException(String msg) { - this(msg, null); - } - - public QException(String msg, Throwable source) { - super(msg); - _source = source; - } - - public void printStackTrace() { - if (_source != null) _source.printStackTrace(); - super.printStackTrace(); - } - - public void printStackTrace(PrintStream ps) { - if (_source != null) _source.printStackTrace(ps); - super.printStackTrace(ps); - } - - public void printStackTrace(PrintWriter pw) { - if (_source != null) _source.printStackTrace(pw); - super.printStackTrace(pw); - } -} - diff --git a/apps/q/java/src/net/i2p/aum/q/QIndexFile.java b/apps/q/java/src/net/i2p/aum/q/QIndexFile.java deleted file mode 100644 index 0df2411a4..000000000 --- a/apps/q/java/src/net/i2p/aum/q/QIndexFile.java +++ /dev/null @@ -1,230 +0,0 @@ -/* - * QIndexFile.java - * - * Created on March 24, 2005, 11:55 AM - */ - -package net.i2p.aum.q; - -import java.io.File; -import java.io.FileReader; -import java.io.FileWriter; -import java.io.IOException; -import java.io.RandomAccessFile; -import java.util.Date; -import java.util.Iterator; - -/** - *

    Implements a binary-searchable file for storing (time, hash) records. - * This makes it faster for server nodes to determine which content entries, - * catalog entries and peer entries have appeared since time t.

    - * - *

    To ease inter-operation with other programs, as well as human troubleshooting, - * The file is implemented as a plain text file, with records in the - * following format: - *

      - *
    • time unixtime, as 10-byte decimal string
    • - *
    • = single-char delimiter
    • - *
    • hash - a 44-byte Base64 representation of an sha256 hash
    • - *
    - *

    - */ -public class QIndexFile { - - public String path; - File fileObj; - RandomAccessFile file; - public long rawLength; - public int numRecs; - FileReader reader; - FileWriter writer; - - /** length of base64 representation of sha256 hash */ - static public int hashLen = 43; - - /** length of unixtime milliseconds in decimal format */ - static public int timeLen = 13; - - /** - * length of records, allowing for time field, delimter (,), - * hash field and terminating newline - */ - static public int recordLen = hashLen + timeLen + 2; - - /** - * Create a new index file - * @param path absolute pathname on filesystem - */ - public QIndexFile(String path) throws IOException { - this.path = path; - fileObj = new File(path); - - // if file doesn't exist, ensure parent dir exists, so subsequent - // file creation will (hopefully) succeed - if (!fileObj.exists()) - { - // create parent directory if not already existing - String parentDir = fileObj.getParent(); - File parentFile = new File(parentDir); - if (!parentFile.isDirectory()) - { - parentFile.mkdirs(); - } - } - - // get a random access object, creating file if not yet existing - file = new RandomAccessFile(fileObj, "rws"); - - // barf if file's length is not a multiple of record length - rawLength = file.length(); - if (rawLength % recordLen != 0) { - throw new IOException("File size not a multiple of record length ("+recordLen+")"); - } - - // note record count - numRecs = (int)(rawLength / recordLen); - } - - /** - * fetch an iterator for items after a given time - */ - public synchronized Iterator getItemsSince(int time) throws IOException - { - //System.out.println("getItemsSince: time="+time); - - // if no records, return an empty iterator - if (numRecs == 0) - { - return new QIndexFileIterator(this, 0); - } - - // otherwise, binary search till we find an item time-stamped - // after given time - long mtime = ((long)time) * 1000; - int lo = 0; - int hi = numRecs; - int lastguess = -1; - while (hi - lo > 0) - { - int guess = (hi + lo) / 2; - //System.out.println("getItemsSince: lo="+lo+" guess="+guess+" hi="+hi); - if (guess == lastguess) // && hi - lo == 1) - { - break; - } - lastguess = guess; - - Object [] rec = getRecord(guess); - long t = ((Long)rec[0]).longValue(); - if (t <= mtime) - { - // guess too low, go for upper range - lo = guess; - continue; - } - else - { - // guess too high, pick lower range - hi = guess; - continue; - } - } - - // found - return new QIndexFileIterator(this, hi); - } - - /** - * adds a new base64 hash value record, saving it with current time - */ - public synchronized void add(String h) throws IOException - { - // barf if hash is incorrect length - if (h.length() != hashLen) - { - System.out.println("hash="+h); - throw new IOException("Incorrect hash length ("+h.length()+"), should be "+hashLen); - } - - // format current date/time as decimal string, pad with leading zeroes - Date d = new Date(); - String ds = String.valueOf(d.getTime()); - while (ds.length() < timeLen) - { - ds = "0" + ds; - } - - // now can construct record - String rec = ds + "," + h + "\n"; - - // append it to file - file.seek(numRecs * recordLen); - file.writeBytes(rec); - - // and update count - numRecs += 1; - rawLength += recordLen; - } - - public long getRecordTime(int n) throws IOException - { - Object [] rec = getRecord(n); - - return ((Long)rec[0]).longValue(); - } - - /** return number of records currently within file */ - public int length() - { - return numRecs; - } - - /** - * returns the hash field of record n - */ - public String getRecordHash(int n) throws IOException - { - Object [] rec = getRecord(n); - return (String)rec[1]; - } - - public synchronized Object [] getRecord(int n) throws IOException - { - Object [] rec = new Object[2]; - - String recStr = getRecordStr(n); - String [] flds = recStr.split(","); - Long t = new Long(flds[0]); - String h = flds[1]; - rec[0] = t; - rec[1] = h; - return rec; - } - - protected synchronized String getRecordStr(int n) throws IOException - { - // barf if over or under-reaching - if (n < 0 || n > numRecs - 1) - { - throw new IOException("Record number ("+n+") out of range"); - } - - // position to location of the record - file.seek(n * recordLen); - - // read, trim and return - return file.readLine().trim(); - } - - /** - * @param args the command line arguments - */ - public static void main(String[] args) { - try { - QIndexFile q = new QIndexFile("/home/david/.quartermaster_client/content/index.dat"); - Iterator i = q.getItemsSince((int)(new Date().getTime() / 1000)); - } catch (Exception e) { - e.printStackTrace(); - } - } -} diff --git a/apps/q/java/src/net/i2p/aum/q/QIndexFileIterator.java b/apps/q/java/src/net/i2p/aum/q/QIndexFileIterator.java deleted file mode 100644 index 95a7df845..000000000 --- a/apps/q/java/src/net/i2p/aum/q/QIndexFileIterator.java +++ /dev/null @@ -1,56 +0,0 @@ -/* - * QIndexFileIterator.java - * - * Created on March 24, 2005, 1:49 PM - */ - -package net.i2p.aum.q; - -import java.util.Iterator; -import java.util.NoSuchElementException; - -/** - * Implements an Iterator for index files - */ -public class QIndexFileIterator implements Iterator -{ - public QIndexFile file; - int recNum; - - /** Creates an iterator starting from beginning of index file */ - public QIndexFileIterator(QIndexFile qif) - { - this(qif, 0); - } - - /** Creates a new instance of QIndexFileIterator */ - public QIndexFileIterator(QIndexFile qif, int recNum) - { - file = qif; - this.recNum = recNum; - } - - public boolean hasNext() - { - return recNum < file.length(); - } - - public Object next() throws NoSuchElementException - { - String rec; - try { - rec = file.getRecordHash(recNum); - } - catch (Exception e) { - throw new NoSuchElementException("Reached end of index"); - } - recNum += 1; - return rec; - } - - public void remove() - { - } - -} - diff --git a/apps/q/java/src/net/i2p/aum/q/QKademliaComparator.java b/apps/q/java/src/net/i2p/aum/q/QKademliaComparator.java deleted file mode 100644 index 2b949b08d..000000000 --- a/apps/q/java/src/net/i2p/aum/q/QKademliaComparator.java +++ /dev/null @@ -1,57 +0,0 @@ -/* - * QKademliaComparator.java - * - * Created on March 30, 2005, 12:30 PM - */ - -package net.i2p.aum.q; - -import java.math.BigInteger; -import java.util.Comparator; - -/** - * implements a Comparator class which compares two QPeerRec objects - * for kademlia-closeness to a given base64 sha hash value - */ -public class QKademliaComparator implements Comparator { - - QNode node; - BigInteger hashed; - - /** - * Creates a kademlia comparator, which given a base64 sha256 hash - * of something, can compare two nodes for their kademlia-closeness to - * that hash - * @param node a QNode object - needed for access to its base64 routines - * @param base64hash - string - a base64 representation of the sha256 hash - * of anything - */ - public QKademliaComparator(QNode node, String base64hash) { - - this.node = node; - hashed = new BigInteger(node.base64Dec(base64hash).getBytes()); - } - - /** - * compares two given QPeerRec objects for how close each one's ID - * is to the stored hash - */ - public int compare(Object o1, Object o2) { - - QPeer peer1 = (QPeer)o1; - QPeer peer2 = (QPeer)o2; - - String id1 = peer1.getId(); - String id2 = peer2.getId(); - - BigInteger i1 = new BigInteger(id1.getBytes()); - BigInteger i2 = new BigInteger(id2.getBytes()); - - BigInteger xor1 = i1.xor(hashed); - BigInteger xor2 = i2.xor(hashed); - - return xor1.compareTo(xor2); - } - -} - diff --git a/apps/q/java/src/net/i2p/aum/q/QMgr.java b/apps/q/java/src/net/i2p/aum/q/QMgr.java deleted file mode 100644 index a9e048a64..000000000 --- a/apps/q/java/src/net/i2p/aum/q/QMgr.java +++ /dev/null @@ -1,927 +0,0 @@ -/* - * QLaunch.java - * - * Created on March 30, 2005, 10:09 PM - */ - -package net.i2p.aum.q; - -import java.io.BufferedReader; -import java.io.ByteArrayOutputStream; -import java.io.File; -import java.io.IOException; -import java.io.InputStreamReader; -import java.util.Enumeration; -import java.util.Hashtable; -import java.util.Properties; -import java.util.Vector; - -import net.i2p.aum.I2PXmlRpcClientFactory; -import net.i2p.aum.PropertiesFile; -import net.i2p.aum.SimpleFile; -import net.i2p.data.Destination; - -import org.apache.xmlrpc.XmlRpcClient; - -/** - *

    Command Line Interface (CLI) for starting/stopping Q nodes, - * and also, executing commands on Q nodes such as inserting, retrieving - * and searching for content.

    - * - *

    Commands include: - *

      - *
    • Start a server or client Node
    • - *
    • Stop a server or client Node
    • - *
    • Get status of a server or client Node
    • - *
    • Export a server node's dest
    • - *
    • Import a foreign dest to a server or client node
    • - *
    • Insert a file to a client node, with metadata
    • - *
    • Retrieve data/metadata from a client node
    • - *
    • Search a client node for content
    • - */ -public class QMgr { - - public Runtime runtime; - public XmlRpcClient node; - public String nodePrivKey; - public String nodeDest; - public String nodeDirStr; - public File nodeDir; - public boolean isServer = false; - - public String [] args; - public String cmd; - public int cmdIdx; - public int argc; - public int argi; - - public static String [] commonI2PSystemPropertyKeys = { - "i2cp.tcp.host", - "i2cp.tcp.port", - "eepproxy.tcp.host", - "eepproxy.tcp.port", - "q.xmlrpc.tcp.host", - "q.xmlrpc.tcp.port", - "inbound.length", - "outbound.length", - "inbound.lengthVariance", - "outbound.lengthVariance", - }; - - /** Creates a new instance of QLaunch */ - public QMgr() { - } - - public void notimplemented() { - usage(1, "Command '"+cmd+"' not yet implemented, sorry"); - } - - /** procures an XML-RPC client for node interaction */ - public void getXmlRpcClient() { - - - } - - public int doHelp() { - if (argi == argc) { - // output short help - System.out.println( - "I2P QMgr - Brief command summary:\n" - +"Synopsis:" - +" java net.i2p.aum.q.QMgr [-dir ] [server] [ []]\n" - +"Commands:\n" - +" help - print this help summary\n" - +" help verbose - print detailed verbose usage info\n" - +" start - start a node in background\n" - +" foreground - run a node in foreground\n" - +" stop - terminate node\n" - +" status - display node status\n" - +" getref [] - output the node's noderef (its base64 dest)\n" - +" addref [] - add one or more node refs to node\n" - +" get key [] - get key to stdout (or to file)\n" - +" put [] [-m ] - insert content\n" - +" search item1=val1 item2=val2... - search for content\n" - ); - } - else if (args[argi].equals("verbose")) { - System.out.println( - "----------------------------\n" - +"Welcome to the I2P Q network\n" - +"----------------------------\n" - +"\n" - +"This program, QMgr, is a command-line interface to the Q network,\n" - +"(an in-I2P distributed file store)\n" - +"and allows you to perform basic operations, including:\n" - +"\n" - +" - create, startup and shutdown Q server and client nodes\n" - +" - determine status of running Q nodes\n" - +" - import and export noderefs to/from these nodes\n" - +" - search for, insert and retrieve content\n" - +"\n" - +"Command syntax:\n" - +" java net.i2p.aum.q.QMgr [-dir ] [-port ] [server] [ []]\n" - +"\n" - +"Explanation of commands and arguments:" - +"\n" - +"* 'server'\n" - +" Specifies that we're operating on a server node (otherwise it's\n" - +" assumed we're operating on a client node)\n" - +"\n" - +"* '-dir='\n" - +" Server nodes by default reside at ~/.quartermaster_server,\n" - +" and client nodes at ~/.quartermaster_client.\n" - +" Nodes are uniquely identified by the directory at which they\n" - +" reside. Specifying this argument allows you to operate on a\n" - +" server or client node which resides at a different location\n" - +"\n" - +"* '-port='\n" - +" Applies to client nodes only. Valid only for startup command.\n" - +" Permanently changes the port on which a given client listens\n" - +" for cmmands.\n" - +"\n" - +"* Commands - the basic commands are:\n" - +"\n" - +" help\n" - +" - display a help summary\n" - +"\n" - +" help verbose\n" - +" - display this long-winded help\n" - +"\n" - +" start\n" - +" - start the node. If a nonexistent directory path is given,\n" - +" a whole new unique server or client node will be created\n" - +" at that path\n" - +"\n" - +" foreground\n" - +" - as for start, but run the server in foreground rather\n" - +" than as a background daemon\n" - +"\n" - +" stop\n" - +" - shutdown the node\n" - +"\n" - +" status\n" - +" - print a dump of node status and statistics to stdout\n" - +"\n" - +" newkeys\n" - +" - generate and print out a new keypair for signed-space\n" - +" data item inserts\n" - +"\n" - +" getref []\n" - +" - print the node's noderef (its base64 destination) to\n" - +" stdout. If arg is given, writes the destination\n" - +" to this file instead.\n" - +"\n" - +" addref []\n" - +" - add one or more noderefs to the node. If [] argument\n" - +" is given, the refs are read from this file, which is expected\n" - +" to contain one base64 destination per line\n" - +"\n" - +"The following commands are only valid for client nodes:\n" - +"\n" - +" get []\n" - +" - Try to retrieve a content item, (identified by ), from the\n" - +" node. If the item is retrieved, its raw data will be printed\n" - +" to stdout, or to if given. NOTE - REDIRECTING TO STDOUT\n" - +" IS PRESENTLY UNRELIABLE, SO SPECIFY AN EXPLICIT FILENAME FOR NOW\n" - +"\n" - +" put [] [-m item=val ...]\n" - +" - Inserts an item of content to the node, and prints its key to\n" - +" stdout. Reads content data from if given, or from standard\n" - +" input if not. Metadata arguments may be given as '-m' followed by\n" - +" a space-separated sequence of 'item=value' specifiers.\n" - +" Typical metadata items include:\n" - +" - type (one of text/html/image/audio/video/archive)\n" - +" - title - a short (<80 char) descriptive title\n" - +" - filename - a recommended filename under which to store this\n" - +" item on retrieve.\n" - +" - abstract - a longer (<256 char) summary of content\n" - +" - keywords - a comma-separated list of keywords\n" - +"\n" - +" search -m item=val [ item=val ...]\n" - +" - searches node for content matching a set of metadata criteria\n" - +" each 'item=val' specifies an 'item' of metadata, to be matched\n" - +" against regular expression 'val'. For example:\n" - +" java net.i2p.aum.q.QMgr search -m title=\"^Madonna\" type=\"music\"\n" - ); - } - else { - System.out.println( - "Unrecognised help qualifier '"+args[argi]+"'\n" - +"type 'java net.i2p.aum.q.QMgr help' for more info" - ); - } - return 0; - } - - public int doStart() { - //notimplemented(); - - String [] startForegroundArgs; - int i; - - // Detect/add any '-D' settings - // search our list of known i2p-relevant sysprops, detect - // if they've been set in System properties, and if so, copy - // them to a customProps table - Hashtable customProps = new Hashtable(); - Properties sysprops = System.getProperties(); - for (i=0; i= argc || !args[argi].equals("-m")) { - usage("Bad put command syntax"); - } - - // now skip over the '-m' - argi++; - - metadata = readMetadataSpec(); - } - - byte [] data = null; - - if (path != null) { - // easy way - suck the file or barf - try { - data = new SimpleFile(path, "r").readBytes(); - } catch (IOException e) { - e.printStackTrace(); - usage("get: Failed to read input file '"+path+"'"); - } - } - else { - // the crap option - suck it from stdin - // read lines from stdin - ByteArrayOutputStream bo = new ByteArrayOutputStream(); - int c; - try { - while (true) { - c = System.in.read(); - if (c < 0) { - break; - } - bo.write(c); - } - } catch (Exception e) { - e.printStackTrace(); - usage("put: error reading from input stream"); - } - - data = bo.toByteArray(); - } - - // ok, got data (and possibly metadata too) - Vector putArgs = new Vector(); - Hashtable res; - putArgs.addElement(metadata); - putArgs.addElement(data); - - System.out.println("data length="+data.length); - - try { - res = (Hashtable)node.execute("i2p.q.putItem", putArgs); - } catch (Exception e) { - e.printStackTrace(System.err); - System.err.println("Failed to put"); - return 1; - } - - // got a res - String status = (String)res.get("status"); - if (!status.equals("ok")) { - String error = (String)res.get("error"); - usage("put: failure - "+error); - } - - // success - String key = (String)res.get("key"); - System.out.print(key); - System.out.flush(); - - return 0; - } - - public int doNewKeys() { - - System.err.println("Generating new signed-space keypair..."); - - String [] keys = QUtil.newKeys(); - System.out.println("Public: "+keys[0]); - System.out.println("Private: "+keys[1]); - - return 0; - } - - public int doSearch() { - - if (argi == argc) { - usage("Missing search metadata"); - } - - // expect -m, or error - if (argi >= argc || !args[argi].equals("-m")) { - usage("Bad search command syntax"); - } - - // now skip over the '-m' - argi++; - - if (argi == argc) { - usage("Missing search metadata"); - } - - Hashtable metadata = readMetadataSpec(); - - // ok, got data (and possibly metadata too) - Vector searchArgs = new Vector(); - Hashtable res; - searchArgs.addElement(metadata); - try { - res = (Hashtable)node.execute("i2p.q.search", searchArgs); - } catch (Exception e) { - e.printStackTrace(System.err); - System.err.println("Failed to search"); - return 1; - } - - // got a res - String status = (String)res.get("status"); - if (!status.equals("ok")) { - String error = (String)res.get("error"); - usage("search: failure - "+error); - } - - // success - Vector items = (Vector)res.get("items"); - - //System.out.println(items); - - for (int i=0; i] [server] [cmd [args]]\n" - +"Type 'java net.i2p.aum.q.QMgr help' for help summary\n" - +"or 'java net.i2p.aum.q.QMgr help verbose' for long-winded help" - ); - System.exit(retval); - return 0; // stop silly compiler from whingeing - } - - /** - * Startup a Q server or client node, or send a command to a running node - * @param args the command line arguments - */ - public static void main(String[] args) { - QMgr mgr = new QMgr(); - int retval = mgr.execute(args); - System.exit(retval); - } - -} - diff --git a/apps/q/java/src/net/i2p/aum/q/QNode.java b/apps/q/java/src/net/i2p/aum/q/QNode.java deleted file mode 100644 index bb4a29c2a..000000000 --- a/apps/q/java/src/net/i2p/aum/q/QNode.java +++ /dev/null @@ -1,1976 +0,0 @@ -/* - * QNode.java - * - * Created on 20 March 2005, 23:27 - */ - -package net.i2p.aum.q; - -import java.io.ByteArrayOutputStream; -import java.io.File; -import java.io.FileInputStream; -import java.io.FileNotFoundException; -import java.io.FileOutputStream; -import java.io.FileWriter; -import java.io.IOException; -import java.io.InputStream; -import java.io.ObjectInputStream; -import java.io.ObjectOutputStream; -import java.io.RandomAccessFile; -import java.util.Calendar; -import java.util.Collections; -import java.util.Date; -import java.util.Enumeration; -import java.util.Hashtable; -import java.util.Iterator; -import java.util.List; -import java.util.NoSuchElementException; -import java.util.Properties; -import java.util.Vector; -import java.util.jar.JarEntry; -import java.util.jar.JarFile; - -import net.i2p.I2PAppContext; -import net.i2p.I2PException; -import net.i2p.aum.EmbargoedQueue; -import net.i2p.aum.I2PXmlRpcClient; -import net.i2p.aum.I2PXmlRpcClientFactory; -import net.i2p.aum.I2PXmlRpcServer; -import net.i2p.aum.PrivDestination; -import net.i2p.aum.PropertiesFile; -import net.i2p.aum.SimpleFile; -import net.i2p.aum.SimpleSemaphore; -import net.i2p.client.I2PClient; -import net.i2p.client.I2PClientFactory; -import net.i2p.data.DataFormatException; -import net.i2p.data.Destination; - -import org.apache.xmlrpc.XmlRpcException; - -//import gnu.crypto.hash.*; - - -/** - * Base class for Quartermaster nodes. Contains mechanisms for local datastore - * and - * - */ -public abstract class QNode extends Thread -{ - - /** get an i2p context */ - public I2PAppContext i2p; - - // XML-RPC service name base - public static String baseXmlRpcServiceName = "i2p.q"; - - // generator of XML-RPC client objects - public I2PXmlRpcClientFactory peerInterfaceGen; - - // directory requirements - public static String [] coreSubDirs = { "peers", "content", "locations", "catalog", "jobs"}; - public static String [] extraSubDirs = {}; - - // thread pooling - public static int defaultMaxThreads = 3; - protected SimpleSemaphore threadPool; - protected EmbargoedQueue jobQueue; - - // directory paths of this node - - /** base path of our datastore directory */ - public String dataDir; - - /** subdirectory of peers records */ - public String peersDir; - - /** index file of peers */ - public QIndexFile peersIdx; - - /** subdirectory of catalog records */ - public String catalogDir; - - /** subdirectory of catalog location records */ - public String locationDir; - - /** index file of peers */ - public QIndexFile catalogIdx; - - /** subdirectory of content and metadata items */ - public String contentDir; - - /** directory where resources live */ - public String resourcesDir; - - /** index file of peers */ - public QIndexFile contentIdx; - - /** subdirectory of job records */ - public String jobsDir; - - /** index file of jobs */ - public QIndexFile jobsIdx; - - /** private key, as base64 string */ - public String privKeyStr; - - /** public dest, as base64 string */ - public String destStr; - - /** our own node ID - SHA1(dest) */ - public String id; - - /** our own node private key */ - public PrivDestination privKey; - - /** our own destination */ - public Destination dest; - - /** general node config properties */ - public PropertiesFile conf; - - /** path of node's config file */ - public String configPath; - - /** convenience */ - public static String sep = File.separator; - - public I2PXmlRpcServer xmlRpcServer; - - /** map of all known peers */ - public Hashtable peers; - - /** - * override in subclass - */ - public static String defaultStoreDir = ".quartermaster"; - - // status attributes - /** time node got online */ - public Date nodeStartTime; - - // logging file - public RandomAccessFile logFile; - public net.i2p.util.Log log; - - public static int updateCatalogFromPeers = 0; - - public boolean isClient = false; - - public double load_yPrev = 0.0; - public long load_tPrev = 0; - public double load_kRise = 10.0; - public double load_kFall = 800000.0; - - public int load_backoffMin = 180; - public int load_backoffBits = 13; - public double load_backoffBase = 3.0; - - // client only - public String xmlRpcServerHost = ""; - public int xmlRpcServerPort = 7651; - public static int defaultXmlRpcServerPort = 7651; - - /** Number of pending content uploads. You should never shut down a - * node while this value is non-zero. You can get the current value - * of this via a node 'ping' command - */ - public int numPendingUploads = 0; - - /** unixtime in millisecs of last incoming xml-rpc hit to this node, used - * in calculating node load - */ - - public String nodeType = "(base)"; - - public boolean isRunning; - - // ---------------------------------------------------------- - // CONSTRUCTORS - // ---------------------------------------------------------- - - /** - * Creates a new QNode instance, with store tree located - * at default location - */ - public QNode() throws IOException, DataFormatException, I2PException - { - this(System.getProperties().getProperty("user.home") + sep + defaultStoreDir); - log.info("Constructor finished"); - } - - /** - * Creates a Q node, using specified datastore directory - * @param dataDir absolute pathname where this server's datastore tree is - * located. If tree doesn't exist, it will be created along with new keys - */ - public QNode(String dataDir) throws IOException, DataFormatException, I2PException - { - // establish ourself as a thread - super(); - - setupStoreTree(dataDir); - getConfig(); - peerInterfaceGen = new I2PXmlRpcClientFactory(); - - // determine threads limit - int maxThreads = defaultMaxThreads; - String maxThreadsStr = System.getProperty("qnode.maxthreads"); - if (maxThreadsStr != null) - { - try { - maxThreads = Integer.getInteger(maxThreadsStr).intValue(); - } catch (Exception e) { - e.printStackTrace(); - log.error("Invalid qnode.maxThreads setting '"+maxThreadsStr+"'"); - } - } - - // set up thread pool and job queue - threadPool = new SimpleSemaphore(maxThreads); - jobQueue = new EmbargoedQueue(); - - // load all known peers into peers table - loadPeers(); - - // for benefit of subclasses - //System.out.println("Invoking setup, isClient="+isClient); - setup(); - System.out.println("after setup, isClient="+isClient); - - // queue up the first lot of jobs - scheduleStartupJobs(); - - // now launch our background - //log.info("launching background engine"); - //start(); - - } - - public void loadPeers() throws IOException - { - // populate job queue with jobs for all known servers - // man, doesn't it feel good to eat up memory by the gigabyte!! :P - Iterator peerIds = peersIdx.getItemsSince(0); - QPeer peerRec; - peers = new Hashtable(); - while (peerIds.hasNext()) - { - String peerId = (String)peerIds.next(); - try { - peerRec = getPeerRecord(peerId); - } catch (Exception e) { - log.error("Failed to load peer '"+peerId+"'", e); - continue; - } - peers.put(peerId, peerRec); - } - } - - // -------------------------------------------- - // XML-RPC FRONT-END - // -------------------------------------------- - - /** - *

      Sets up and launches an xml-rpc server for servicing requests - * to this node.

      - *

      For server nodes, the xml-rpc server listens within I2P on the - * node's destination.

      - *

      For client nodes, the xml-rpc server listens on a local TCP - * port (according to attributes xmlRpcServerHost and xmlRpcServerPort)

      - */ - public abstract void startExternalInterfaces(QServerMethods methods) - throws Exception; - - - // -------------------------------------------- - // XML-RPC BACKEND - // -------------------------------------------- - - /** - * Dispatches a XML-RPC call to remote peer - */ - public Hashtable peerExecute(String peerId, String name, Vector args) - throws XmlRpcException, IOException, DataFormatException - { - // get peer record - QPeer peerRec = getPeerRecord(peerId); - - // need peer's dest - String dest64 = peerRec.destStr; - - // need xmlrpc client obj - log.debug("peerExecute: name="+name+", id="+peerId+", dest="+dest64); - - I2PXmlRpcClient client = peerInterfaceGen.newClient(dest64); - - // execute the request - Object result = client.execute(baseXmlRpcServiceName+"."+name, args); - - // ensure it's a hashtable - if (!result.getClass().isAssignableFrom(Hashtable.class)) { - throw new XmlRpcException(0, "Expected Hashtable in peer reply"); - } - - // all ok - return (Hashtable)result; - } - - // -------------------------------------- - // METHODS - initialisation - // -------------------------------------- - - /** perform mode-specific setup - overridden in subclasses */ - public void setup() throws DataFormatException, I2PException - { - } - - /** - * Checks the store directory tree, creating any missing - * directories - */ - public void setupStoreTree(String dataDir) throws IOException - { - this.dataDir = dataDir; - int i; - File rootDir = new File(dataDir); - - // ensure parent exists - if (!rootDir.isDirectory()) { - rootDir.mkdirs(); - } - String logPath = dataDir + sep + "node.log"; - - // set up node-specific logger - Properties envProps = new Properties(); - envProps.setProperty("loggerFilenameOverride", logPath); - - //i2p = new I2PAppContext(envProps); - i2p = I2PAppContext.getGlobalContext(); - - log = i2p.logManager().getLog(this.getClass()); - - //System.out.println("HASHTEST1: "+sha256Base64("hello, one, two three".getBytes())); - //System.out.println("BASE64TEST1: "+base64Enc("hello, one two three")); - //byte [] shit = {39,-20,54,-93,-19,-33,-61,65,-91,-85, - // -19,25,-31,-81,20,-125,26,92,-51,-100,83,43,38,58,77,72,3,40,-78,-62,79,0, - //}; - //System.out.println("BASE64TEST2: "+base64Enc(shit)); - - log.setMinimumPriority(log.DEBUG); - - log.info("creating server at directory "+dataDir); - - /** - if (!logFileObj.isFile()) { - logFileObj.createNewFile(); - } - System.out.println("Created logfile at "+logPath); - logFile = new RandomAccessFile(logFileObj, "rws"); - */ - - // create core subdirectories - for (i = 0; i < coreSubDirs.length; i++) - { - String subdir = dataDir + sep + coreSubDirs[i]; - File d = new File(subdir); - if (!d.isDirectory()) - { - log.info("Creating datastore subdirectory '"+subdir+"'"); - if (!d.mkdirs()) - { - throw new IOException("Failed to create directory "+subdir); - } - } - } - - // create supplementary subdirectories - for (i = 0; i < extraSubDirs.length; i++) - { - String subdir = dataDir + sep + extraSubDirs[i]; - File d = new File(subdir); - if (!d.isDirectory()) - { - log.info("Creating supplementary datastore subdir '"+subdir+"'"); - if (!d.mkdirs()) - { - throw new IOException("Failed to create directory "+subdir); - } - } - } - - // store pathnames of core subdirectories - peersDir = dataDir + sep + "peers"; - peersIdx = new QIndexFile(peersDir + sep + "index.dat"); - - catalogDir = dataDir + sep + "catalog"; - catalogIdx = new QIndexFile(catalogDir + sep + "index.dat"); - locationDir = dataDir + sep + "locations"; - - contentDir = dataDir + sep + "content"; - contentIdx = new QIndexFile(contentDir + sep + "index.dat"); - - jobsDir = dataDir + sep + "jobs"; - jobsIdx = new QIndexFile(jobsDir + sep + "index.dat"); - - // extract resources directory from jarfile (or wherever) - getResources(); - - } - - public void getConfig() throws IOException, DataFormatException, I2PException - { - // create a config object, and stick in any missing defaults - String confPath = dataDir + sep + "node.conf"; - conf = new PropertiesFile(confPath); - - // generate a new dest, if one doesn't already exist - privKeyStr = conf.getProperty("privKey"); - if (privKeyStr == null) - { - // need to generate whole new config - log.info("No private key found, generating new one"); - - ByteArrayOutputStream privBytes = new ByteArrayOutputStream(); - I2PClient client = I2PClientFactory.createClient(); - - // save attributes - dest = client.createDestination(privBytes); - privKey = new PrivDestination(privBytes.toByteArray()); - privKeyStr = privKey.toBase64(); - destStr = dest.toBase64(); - - // save out keys to files - String privKeyPath = dataDir + sep + "nodeKey.priv"; - SimpleFile.write(privKeyPath, privKey.toBase64()); - String destPath = dataDir + sep + "nodeKey.pub"; - SimpleFile.write(destPath, dest.toBase64()); - - // now we can figure out our own node ID - id = destToId(dest); - - // and populate our stored config - conf.setProperty("dest", dest.toBase64()); - conf.setProperty("privKey", privKey.toBase64()); - conf.setProperty("id", id); - conf.setProperty("numPeers", "0"); - conf.setDoubleProperty("loadDampRise", load_kRise); - conf.setDoubleProperty("loadDampFall", load_kFall); - conf.setIntProperty("loadBackoffMin", load_backoffMin); - conf.setIntProperty("loadBackoffBits", load_backoffBits); - - // these items only relevant to client nodes - conf.setIntProperty("xmlRpcServerPort", xmlRpcServerPort); - - log.info("Saved new keys, and nodeID " + id); - } - else - { - // already got a config, load it - //System.out.println("loading config"); - dest = new Destination(); - dest.fromBase64(conf.getProperty("dest")); - destStr = dest.toBase64(); - privKey = PrivDestination.fromBase64String(conf.getProperty("privKey")); - privKeyStr = privKey.toBase64(); - id = conf.getProperty("id"); - load_kRise = conf.getDoubleProperty("loadDampRise", load_kRise); - load_kFall = conf.getDoubleProperty("loadDampFall", load_kFall); - load_backoffMin = conf.getIntProperty("loadBackoffMin", load_backoffMin); - load_backoffBits = conf.getIntProperty("loadBackoffBits", load_backoffBits); - - // these items only relevant to client nodes - xmlRpcServerPort = conf.getIntProperty("xmlRpcServerPort", xmlRpcServerPort); - - //System.out.println("our privkey="+privKeyStr); - if (privKeyStr == null) { - privKeyStr = conf.getProperty("privKey"); - //System.out.println("our privkey="+privKeyStr); - } - } - } - - /** - * Copies resources from jarfile (or wherever) into datastore dir. - * Somwhat of a kludge which determines if the needed resources - * reside within a jarfile or on the host filesystem. - * If the resources live in a jarfile, we extract them and - * copy them into the 'resources' subdirectory of our datastore - * directory. If they live in a directory on the host filesystem, - * we configure the node to access the resources directly from that - * directory instead. - */ - public void getResources() throws IOException { - - String resPath = dataDir + sep + "resources"; - File resDir = new File(resPath); - ClassLoader cl = this.getClass().getClassLoader(); - String jarPath = cl.getResource("qresources").getPath(); - System.out.println("jarPath='"+jarPath+"'"); - if (jarPath.startsWith("jar:")) { - jarPath = jarPath.split("jar:")[1]; - } - - if (jarPath.startsWith("file:")) { - jarPath = jarPath.split("file:")[1]; - } - int bangIdx = jarPath.indexOf("!"); - //System.out.println("jarPath='"+jarPath+"' bangIdx="+bangIdx); - if (bangIdx > 0) { - jarPath = jarPath.substring(0, bangIdx); - } - - if (!jarPath.endsWith(".jar")) { - - // easy - found a directory with our resources - resourcesDir = jarPath; - System.out.println("Found physical resources dir: '"+resourcesDir+"'"); - return; - } - System.out.println("jarPath='"+jarPath+"'"); - - // harder case - create resources dir, copy across resources - if (!resDir.isDirectory()) { - resDir.mkdirs(); - } - resourcesDir = resDir.getPath(); - - JarFile jf = new JarFile(jarPath); - Enumeration jfe = jf.entries(); - Vector entlist = new Vector(); - while (jfe.hasMoreElements()) { - JarEntry ent = (JarEntry)jfe.nextElement(); - String name = ent.getName(); - if (name.startsWith("qresources") && !ent.isDirectory()) { - entlist.addElement(name); - System.out.println("Need to extract resource: "+name); - String absPath = resDir.getPath() + sep + name.split("qresources/")[1]; - File absFile = new File(absPath); - File parent = absFile.getParentFile(); - if (!parent.isDirectory()) { - parent.mkdirs(); - } - // finally, can create and copy the file - FileWriter fw = new FileWriter(absFile); - InputStream is = cl.getResourceAsStream(name); - int c; - while ((c = is.read()) >= 0) { - fw.write(c); - } - fw.close(); - } - } - } - - /** - * given a 'logical resource path', such as 'html/page.html', - * returns an absolute pathname on the host filesystem of - * the needed file - */ - public String getResourcePath(String name) { - return resourcesDir + sep + name; - } - - // -------------------------------------- - // METHODS - scheduling and traffic control - // - // Background processing depends on node type: - // - all nodes: - // - peer list synchronisation - // - client nodes - // - catalog synchronisation - // - content insertion, triggered by local - // insertion - // - server nodes - // - content insertion, triggered by above-threshold - // demand from clients - // - // All background jobs are scheduled on a queue of - // timed jobs (using an EmbargoedQueue), and picked off - // and passed to background threads. - // -------------------------------------- - - // -------------------------------------------- - // HIGH-LEVEL TASK-SPECIFIC JOB SCHEDULING METHODS - // -------------------------------------------- - - public void scheduleStartupJobs() - { - Iterator peerRecs = peers.values().iterator(); - while (peerRecs.hasNext()) { - QPeer peerRec = (QPeer)peerRecs.next(); - - // also, while we're here, schedule a 'getUpdate' update job - schedulePeerUpdateJob(peerRec); - } - - System.out.println("scheduleStartupJobs: cRetrieve an item of content.

      - *

      On server nodes this only retrieves from the local datastore.

      - *

      On client nodes, this tries the local datastore first, then - * attempts to get the data from remote servers believed to have the data

      - */ - public Hashtable getItem(String uri) throws IOException, QException - { - log.info("getItem: uri='"+uri+"'"); - return localGetItem(uri); - } - - /** - * retrieves an item of content from remote peer - */ - public Hashtable peerGetItem(String peerId, String uri) - throws XmlRpcException, IOException, DataFormatException - { - Vector v = new Vector(); - v.add(uri); - - return peerExecute(peerId, "getItem", v); - } - - - /** returns true if this node possesses given key, false if not */ - public boolean localHasItem(String uri) { - if (getLocalMetadata(uri) == null) { - return false; - } - else { - return true; - } - } - - /** returns true if this node possesses given key, false if not */ - public boolean localHasCatalogItem(String uri) { - if (getLocalCatalogMetadata(uri) == null) { - return false; - } - else { - return true; - } - } - - /** - * returns the data stored under given key - */ - public Hashtable localGetItem(String uri) throws IOException - { - log.info("localGetItem: uri='"+uri+"'"); - Hashtable h = new Hashtable(); - - QDataItem item = getLocalMetadata(uri); - if (item == null) - { - // Honest, officer, we don't have it, we were just - // holding it for a friend! - System.out.println("localGetItem: no metadata for uri "+uri); - h.put("status", "error"); - h.put("error", "notfound"); - return h; - } - - // locate the content - String dataHash = (String)item.get("dataHash"); - String dataPath = makeDataPath(dataHash); - SimpleFile dataFile = new SimpleFile(dataPath, "r"); - - // barf if content missing - if (!dataFile.isFile()) - { - System.out.println("localGetItem: no data for uri "+uri); - h.put("status", "error"); - h.put("error", "missingdata"); - return h; - } - - // get data, hand it back with metadata - byte [] dataImage = dataFile.readBytes(); - h.put("status", "ok"); - h.put("metadata", item); - h.put("data", dataImage); - System.out.println("localGetItem: successful get: uri "+uri); - System.out.println("localGetItem: data hash="+sha256Base64(dataImage)); - return h; - } - - // --------------------------------------- - // PRIMITIVE - putItem - // --------------------------------------- - - /** - * Insert an item of content, with no metadata - * @param raw data to insert - */ - public Hashtable putItem(byte [] data) throws IOException, QException - { - return putItem(new Hashtable(), data); - } - - /** - * Insert an item of content, with metadata - * overridden in client nodes - * @param metadata Hashtable of item's metadata - * @param data raw data to insert - */ - public Hashtable putItem(Hashtable metadata, byte [] data) throws QException - { - Hashtable resp = new Hashtable(); - QDataItem item; - try { - item = new QDataItem(metadata, data); - item.processAndValidate(false); - localPutItem(item); - } catch (QException e) { - resp.put("status", "error"); - resp.put("error", "qexception"); - resp.put("summary", e.getLocalizedMessage()); - return resp; - } - - // success, it seems - resp.put("status", "ok"); - resp.put("uri", (String)item.get("uri")); - return resp; - } - - /** - * inserts an item of content to remote peer - */ - public Hashtable peerPutItem(String peerId, byte [] data) - throws XmlRpcException, IOException, DataFormatException - { - Vector v = new Vector(); - v.add(data); - - return peerExecute(peerId, "putItem", v); - } - - /** - * inserts an item of content to remote peer - */ - public Hashtable peerPutItem(String peerId, Hashtable metadata, byte [] data) - throws XmlRpcException, IOException, DataFormatException - { - Vector v = new Vector(); - v.add(metadata); - v.add(data); - - return peerExecute(peerId, "putItem", v); - } - - /** - * adds a new item of content to our local store, with given metadata - */ - public void localPutItem(QDataItem item) throws QException - { - /** - // 1) hash the data, add to metadata - String dataHash = sha256Base64(data); - metadata.put("dataHash", dataHash); - System.out.println("localPutItem: dataHash="+dataHash); - - // 2) if metadata has no key 'title', use hash as data - if (!metadata.containsKey("title")) - { - metadata.put("title", dataHash); - } - - // 3) add size field to metadata - metadata.put("size", new Integer(data.length)); - - // 4) get deterministic hash of final metadata - TreeSet t = new TreeSet(metadata.keySet()); - Iterator keys = t.iterator(); - int nkeys = t.size(); - int i; - String metaStr = ""; - for (i = 0; i < nkeys; i++) - { - String metaKey = (String)keys.next(); - metaStr += metaKey + "=" + metadata.get(metaKey) + "\n"; - } - - // store the metadata and data - String metaPath = makeDataPath(metaHash+".meta"); - String dataPath = makeDataPath(dataHash+".data"); - new SimpleFile(dataPath, "rws").write(data); - - PropertiesFile pf = new PropertiesFile(metaPath, metadata); - - // update index - contentIdx.add(metaHash); - - Hashtable h = new Hashtable(); - h.put("status", "ok"); - h.put("key", metaHash); - return h; - - */ - - // work out where to store metadata and data - String metaFilename = item.getStoreFilename(); - String metaPath = makeDataPath(metaFilename); - String dataPath = makeDataPath((String)item.get("dataHash")); - - // store the data, if not already present - if (!(new File(dataPath).isFile())) { - byte [] data = item._data; - try { - new SimpleFile(dataPath, "rws").write(data); - } catch (Exception e) { - throw new QException("Error storing metadata", e); - } - } - - // store metadata and add to index, if not already present - if (!(new File(metaPath).isFile())) { - try { - // store the metadata - PropertiesFile pf = new PropertiesFile(metaPath, item); - } catch (Exception e) { - throw new QException("Error storing data", e); - } - - try { - // enter the metadata hash into our index - contentIdx.add(metaFilename); - } catch (Exception e) { - throw new QException("Error adding metadata to index", e); - } - } - } - - // --------------------------------------- - // PRIMITIVE - newKeys - // --------------------------------------- - - /** - * Generates a new keypair for signed-space insertions - * @return a struct with the keys: - *
        - *
      • status - "ok"
      • - *
      • publicKey - base64-encoded signed space public key
      • - *
      • privateKey - base64-encoded signed space private key
      • - *
      - * When inserting an item using the privateKey, the resulting uri - * will be Q:publicKey/path - */ - public Hashtable newKeys() { - - String [] keys = QUtil.newKeys(); - Hashtable res = new Hashtable(); - res.put("status", "ok"); - res.put("publicKey", keys[0]); - res.put("privateKey", keys[1]); - return res; - } - - // --------------------------------------- - // PRIMITIVE - search - // --------------------------------------- - - /** - * Search datastore and catalog for a given item of content - * @param criteria - */ - public Hashtable search(Hashtable criteria) - { - return localSearch(criteria); - } - - public Hashtable localSearch(Hashtable criteria) - { - Hashtable result = new Hashtable(); - result.put("status", "error"); - result.put("error", "notimplemented"); - return result; - } - - public Hashtable insertQSite(String privKey64, - String siteName, - String rootPath, - Hashtable metadata - ) - throws Exception - { - Hashtable result = new Hashtable(); - result.put("status", "error"); - result.put("error", "notimplemented"); - return result; - } - - /** - * returns true if all values in a given metadata set match their respective - * regexps in criteria. - * @param metadata a Hashtable of metadata to test. Set the 'magic' key 'searchmode' - * to 'or' to make this an or-based test, otherwise defaults to and-based test. - * @param criteria a Hashbable containing zero or more matching criteria - */ - public boolean metadataMatchesCriteria(Hashtable metadata, Hashtable criteria) - { - boolean is_OrMode = false; - - // search mode defaults to AND unless explicitly set to OR - if (criteria.containsKey("searchmode")) { - if (((String)criteria.get("searchmode")).toLowerCase().equals("or")) { - is_OrMode = true; - } - } - - // test all keys and regexp values in criteria against metadata - Enumeration cKeys = criteria.keys(); - while (cKeys.hasMoreElements()) { - - String key = (String)cKeys.nextElement(); - if (key.equals("searchmode")) { - // this is a meta-key - skip - continue; - } - - String cval = (String)criteria.get(key); - String mval = (String)metadata.get(key); - if (mval == null) { - mval = ""; - } - - //System.out.println("metadataMatchesCriteria: key='"+key+"'" - // +" cval='"+cval+"'" - // +" mval='"+mval+"'"); - - // reduced xor-based comparison - if (!(mval.matches(cval) ^ is_OrMode)) { - return is_OrMode; - } - } - - // completed all - return !is_OrMode; - } - - // ---------------------------------------------------------- - // METHODS - datastore - // ---------------------------------------------------------- - - /** - * returns the number of known remote catalog entries - */ - public int remoteCatalogSize() - { - return this.catalogIdx.numRecs; - } - - /** - * returns the number of locally stored items - */ - public int localCatalogSize() - { - return this.contentIdx.numRecs; - } - - /** return a list of nodeIds containing a key, or null if none */ - public Vector getItemLocation(String key) throws IOException { - - String dir1 = key.substring(0, 1); - String dir2 = key.substring(0, 2); - String fullPath = locationDir + sep + dir1 + sep + dir2 + sep + key; - File fullFile = new File(fullPath); - File parent = fullFile.getParentFile(); - if (!parent.isDirectory()) { - parent.mkdirs(); - } - - if (!fullFile.exists()) { - return null; - } - - String p = new SimpleFile(fullPath, "r").read().trim(); - - String [] locs = p.split("\\s+"); - Vector v = new Vector(); - int i, nlocs=locs.length; - if (p.length() > 0) { - for (i=0; i 0) { - for (i=0; idetermines an absolute pathname for storing an item of a - * given name. Uses multi-level directories in sourceforge style

      - *

      For instance, if name is 'blah', and node's data dir lives - * at /home/qserver/content, then the path will be /home/qserver/content/b/bl/blah.

      - *

      Note that directories are created as needed

      - * @param name the filename to store - * @return the full pathname to write to - */ - public String makeDataPath(String name) - { - String dir1 = name.substring(0, 1); - String dir2 = name.substring(0, 2); - String fullPath = contentDir + sep + dir1 + sep + dir2 + sep + name; - File fullFile = new File(fullPath); - File parent = fullFile.getParentFile(); - if (!parent.isDirectory()) { - parent.mkdirs(); - } - - // all done, parent dir now exists - return fullPath; - } - - /** - *

      determines an absolute pathname for cataloging an item of a - * given name. Uses multi-level directories in sourceforge style

      - *

      For instance, if name is 'blah', and node's data dir lives - * at /home/qserver/content, then the path will be /home/qserver/content/b/bl/blah.

      - *

      Note that directories are created as needed

      - * @param name the filename to store - * @return the full pathname to write to - */ - public String makeCatalogPath(String name) - { - String dir1 = name.substring(0, 1); - String dir2 = name.substring(0, 2); - String fullPath = catalogDir + sep + dir1 + sep + dir2 + sep + name; - File fullFile = new File(fullPath); - File parent = fullFile.getParentFile(); - if (!parent.isDirectory()) { - parent.mkdirs(); - } - - // all done, parent dir now exists - return fullPath; - } - - - /** - * returns a PropertiesFile object for given peer - * @param peerId - * @return PropertiesFile object representing that peer's data - */ - public QPeer getPeerRecord(String peerId) throws IOException, DataFormatException - { - // return peer's property object - return new QPeer(this, peerId); - } - - /** - * Creates new peer record in our datastore - * @param dest64 String - destination in base64 format - */ - public void newPeer(String dest64) throws IOException, DataFormatException - { - Destination d = new Destination(); - d.fromBase64(dest64); - newPeer(d); - } - - /** - * Fetches/Creates new peer record in our datastore - */ - public void newPeer(Destination peerDest) throws IOException - { - String peerDest64 = peerDest.toBase64(); - - // bail if this new peer is self - if (peerDest64.equals(destStr)) { - return; - } - - // determine peerID - String peerId = destToId(peerDest); - - // bail if peer is already known - if (peers.containsKey(peerId)) { - log.debug("newPeer: already know peer "+peerId+" ("+peerDest64.substring(0, 12)+"...)"); - return; - } - - // where does the peer file live? - String peerPath = peersDir + sep + peerId; - - // get the record - QPeer peerRec = new QPeer(this, peerDest); - - // and write it into index - peersIdx.add(peerId); - - // and stick into our global peers map - peers.put(peerId, peerRec); - - // note that we've got a new peer - conf.incrementIntProperty("numPeers"); - - // and, finally, schedule in a greeting to this peer - if (isClient) { - schedulePeerUpdateJob(peerRec); - } else { - schedulePeerGreetingJob(peerRec); - } - } - - /** - * Get a list of peers, in order of their kademlia-closeness to - * a given uri - */ - public Vector peersClosestTo(String uri, int max) { - - String itemHash = sha256Base64(uri); - - // get our peer list as a vector - Vector allPeers = new Vector(); - Iterator peerRecs = peers.values().iterator(); - while (peerRecs.hasNext()) { - allPeers.addElement(peerRecs.next()); - } - - // create a comparator to find peers closest to URI - QKademliaComparator comp = new QKademliaComparator(this, itemHash); - - // sort the peerlist according to k-closeness of uri - Collections.sort(allPeers, comp); - - // get the closest (up to) n peers - int npeers = Math.min(max, allPeers.size()); - List closestPeers = allPeers.subList(0, npeers); - - return new Vector(closestPeers); - } - - // ---------------------------------------------------------- - // METHODS - node status indicators - // ---------------------------------------------------------- - - /** return uptime of this node, in seconds */ - public int nodeUptime() - { - Date now = new Date(); - return (int)((now.getTime() - nodeStartTime.getTime()) / 1000); - } - - /** return node load, as float */ - public float nodeLoad() - { - long now = new Date().getTime(); - long dt = now - load_tPrev; - load_tPrev = now; - - //System.out.println("nodeLoad: dt="+dt+" load_yPrev="+load_yPrev); - - load_yPrev = load_yPrev * Math.exp(-((double)dt) / load_kFall); - - //System.out.println("nodeLoad: y="+load_yPrev); - - return (float)load_yPrev; - } - - public float nodeLoadAfterHit() - { - //System.out.println("nodeLoadAfterHit: "+load_yPrev+" before recalc"); - // update decay phase - nodeLoad(); - - //System.out.println("nodeLoadAfterHit: "+load_yPrev+" after recalc"); - - // and add spike - load_yPrev += (1.0 - load_yPrev) / load_kRise; - - //System.out.println("nodeLoadAfterHit: "+load_yPrev+" after hit"); - //System.out.println("-----------------------------------------"); - - return (float)load_yPrev; - } - - /** - * Determine an advised time for next contact from a peer node. - * This is based on the node's current load - */ - public int getAdvisedNextContactTime() - { - //long now = new Date().getTime() / 1000; - // fudge 30 secs from now - //return (int)(now + 30); - - // formula here is to advise a backup delay of: - // loadBackoffMin + 2 ** (loadBackoffBits * currentLoad) - return nowSecs() - + load_backoffMin - + (int)(Math.pow(load_backoffBase, load_backoffBits * load_yPrev)); - } - - - // ---------------------------------------------------------- - // METHODS - general - // ---------------------------------------------------------- - - public String base64Enc(String raw) - { - return base64Enc(raw.getBytes()); - } - - public String base64Enc(byte[] raw) - { - return net.i2p.data.Base64.encode(raw); - } - - public String base64Dec(String enc) - { - return new String(net.i2p.data.Base64.decode(enc)); - } - - public String sha256Base64(String raw) - { - return sha256Base64(raw.getBytes()); - } - - public String sha256Base64(byte [] raw) - { - //return base64Enc(sha256(raw)); - return base64Enc(i2p.sha().calculateHash(raw).getData()).replaceAll("[=]+", ""); - } - - /** - * simple interface for sha256 hashing - * @param raw a String to be hashed - * @return the sha256 hash, as binary - */ - public String sha256(String raw) - { - return sha256(raw.getBytes()); - } - - public String sha256(byte [] raw) - { - return new String(i2p.sha().calculateHash(raw).getData()); - - //SHA256Generator shagen = new SHA256Generator(i2p); - //return new String(shagen.calculateHash(raw).getData()); - //Sha256 s = new Sha256(); - //s.update(raw, 0, raw.length); - //byte [] d = s.digest(); - //for (int i=0; i= 0 - * @param width minimum width of string, which will get padded - * with leading zeroes to make up the desired width - */ - public String intFmt(int n, int width) - { - String nS = String.valueOf(n); - while (nS.length() < width) - { - nS = "0" + nS; - } - return nS; - } - - public void log__(String msg) - { - System.out.println("QNode: " + msg); - - // bail if logFile not yet created, can help in avoiding npe - if (logFile == null) { - return; - } - - try { - Calendar now = Calendar.getInstance(); - String timestamp - = intFmt(now.YEAR, 4) - + "-" - + intFmt(now.MONTH, 2) - + "-" - + intFmt(now.DAY_OF_MONTH, 2) - + "-" - + intFmt(now.HOUR_OF_DAY, 2) - + ":" - + intFmt(now.MINUTE, 2) - + ":" - + intFmt(now.SECOND, 2) - + " "; - - synchronized (logFile) { - logFile.seek(logFile.length()); - logFile.write((timestamp + msg + "\n").getBytes()); - } - } catch (IOException e) { - e.printStackTrace(); - } - - } - - public void dj() { - dumpjobs(); - } - - public void dumpjobs() { - - jobQueue.printWaiting(); - } - - public void foo() { - System.out.println("QNode.foo: isClient="+isClient); - } - -} - diff --git a/apps/q/java/src/net/i2p/aum/q/QPeer.java b/apps/q/java/src/net/i2p/aum/q/QPeer.java deleted file mode 100644 index 83fe2311a..000000000 --- a/apps/q/java/src/net/i2p/aum/q/QPeer.java +++ /dev/null @@ -1,105 +0,0 @@ -/* - * QPeer.java - * - * Created on March 28, 2005, 2:13 PM - */ - -package net.i2p.aum.q; - -import java.io.IOException; -import java.io.Serializable; - -import net.i2p.aum.PropertiesFile; -import net.i2p.data.DataFormatException; -import net.i2p.data.Destination; - -/** - * Wrapper for a peer record file. - * Implements a bunch of accessor methods for getting/setting numerical attribs - */ -public class QPeer implements Serializable { - - QNode node; - protected Destination dest; - protected String peerId; - protected String destStr; - - public PropertiesFile file; - - /** Creates a whole new peer */ - public QPeer(QNode node, Destination dest) throws IOException { - - file = new PropertiesFile(node.peersDir + node.sep + node.destToId(dest)); - - this.dest = dest; - destStr = dest.toBase64(); - peerId = node.destToId(dest); - - file.setProperty("id", peerId); - file.setProperty("dest", destStr); - file.setProperty("timeLastUpdate", "0"); - file.setProperty("timeLastContact", "0"); - file.setProperty("timeNextContact", "0"); - } - - /** Loads an existing peer, barfs if nonexistent */ - public QPeer(QNode node, String destId) throws IOException, DataFormatException { - - file = new PropertiesFile(node.peersDir + node.sep + destId); - - // barf if file doesn't exist - if (!file._fileExists) { - throw new IOException("Missing peer record file"); - } - - destStr = file.getProperty("dest"); - dest = new Destination(); - dest.fromBase64(destStr); - peerId = destId; - } - - public Destination getDestination() { - return dest; - } - - public String getDestStr() { - return destStr; - } - - public String getId() { - return peerId; - } - - public int getTimeLastUpdate() { - return new Integer(file.getProperty("timeLastUpdate")).intValue(); - } - - public void setTimeLastUpdate(long when) { - file.setProperty("timeLastUpdate", String.valueOf(when)); - } - - public int getTimeLastContact() { - return new Integer(file.getProperty("timeLastContact")).intValue(); - } - - public void setTimeLastContact(int when) { - file.setProperty("timeLastContact", String.valueOf(when)); - } - - public int getTimeNextContact() { - return new Integer(file.getProperty("timeNextContact")).intValue(); - } - - public void setTimeNextContact(int when) { - file.setProperty("timeNextContact", String.valueOf(when)); - } - - public boolean hasBeenGreeted() { - return file.containsKey("sentHello"); - } - - public void markAsGreeted() { - file.setProperty("sentHello", "1"); - } -} - diff --git a/apps/q/java/src/net/i2p/aum/q/QServerMethods.java b/apps/q/java/src/net/i2p/aum/q/QServerMethods.java deleted file mode 100644 index b06c22817..000000000 --- a/apps/q/java/src/net/i2p/aum/q/QServerMethods.java +++ /dev/null @@ -1,388 +0,0 @@ -/* - * QServerMethods.java - * - * Created on 20 March 2005, 23:23 - */ - -package net.i2p.aum.q; - -import java.io.IOException; -import java.util.Hashtable; -import java.util.Vector; - - -/** - * Defines the methods which will be exposed in the server's - * XML-RPC interface. On the xml-rpc client side, these methods are invoked - * through the 'peerXXXX' methods. - * This class is just a shim, which invokes methods of the same name on - * the QServerNode. It's separated off as a shim because the XML-RPC implementation - * we're using (org.apache.xmlrpc) can only add entire objects and all their - * methods as handlers, and doesn't support adding a-la-carte methods. - */ -public class QServerMethods { - - private QNode node; - - /** - * Creates a new instance of QServerMethods, - * with a ref to the server - */ - public QServerMethods(QNode node) { - this.node = node; - } - - /** - * pings this peer node - */ - public Hashtable ping() { - node.nodeLoadAfterHit(); - System.out.println("XMLRPC: ping"); - return node.ping(); - } - - /** - * pings this peer node - * @param args a Hashtable (dict, struct, assoc array) of args, all of which are - * completely ignored - */ - public Hashtable ping(Hashtable args) { - return ping(); - } - - /** - * introduces ourself to this remote peer. From then on, caller will be expected - * to maintain reasonable uptime - * @param destStr our own base64 destination - */ - public Hashtable hello(String destStr) { - node.nodeLoadAfterHit(); - System.out.println("XMLRPC: hello"); - return node.hello(destStr); - } - - /** - * introduces ourself to this remote peer. From then on, caller will be expected - * to maintain reasonable uptime - * @param args a Hashtable/dict/struct/assoc-array containing: - *
        - *
      • dest - base64 destination (noderef) for the remote peer to add
      • - *
      - */ - public Hashtable hello(Hashtable args) { - String destStr; - System.out.println("XMLRPC: hello"); - try { - destStr = (String)args.get("dest"); - } catch (Exception e) { - destStr = null; - } - if (destStr == null) { - Hashtable res = new Hashtable(); - res.put("status", "error"); - res.put("error", "baddest"); - res.put("summary", "Bad or missing destination"); - node.nodeLoadAfterHit(); - return res; - } - return hello(destStr); - } - - /** - * Searches node for all data items whose metadata keys match the keys - * of the given mapping. - * @param criteria a Hashtable (or python dict, etc) of search criteria. Each - * 'key' is a metadata item to match, and corresponding value is a regular expression - * to match. - */ - public Hashtable search(Hashtable criteria) { - node.nodeLoadAfterHit(); - System.out.println("XMLRPC: search"); - System.out.println("XMLRPC: search: "+criteria); - return node.search(criteria); - } - - /** - * returns a list of new content and/or peers which have - * been stored on the server since a given time - * @param since (int) unixtime in seconds - * @param includePeers (int) set to 1 to include 'peers' list in update, 0 to omit - * @param includeCatalog (int) set to 1 to include 'items' (catalog) list in - * update, 0 to omit - */ - public Hashtable getUpdate(int since, int includePeers, int includeCatalog) { - node.nodeLoadAfterHit(); - System.out.println("XMLRPC: getUpdate: "+since+" "+includePeers+" "+includeCatalog); - return node.getUpdate(since, includePeers, includeCatalog); - } - - /** - * returns a list of new content and/or peers which have - * been stored on the server since a given time - * Wparam args a Hashtable/struct/dict/assoc-array of arguments, including: - *
        - *
      • since - (int) unixtime in seconds
      • - *
      • includePeers - (int) set to nonzero to include 'peers' list in update, 0 to omit, - * default 0
      • - *
      • includeCatalog - (int) set to nonzero to include 'items' (catalog) list in - * update, 0 to omit (default 0)
      • - *
      - */ - public Hashtable getUpdate(Hashtable args) { - int since; - int includePeers = 0; - int includeCatalog = 0; - - // uplift 'since' key from args, or barf if invalid - try { - since = ((Integer)(args.get("since"))).intValue(); - } catch (Exception e) { - Hashtable res = new Hashtable(); - res.put("status", "error"); - res.put("error", "badargument"); - res.put("summary", "Invalid value for 'since'"); - node.nodeLoadAfterHit(); - return res; - } - - // uplift 'includePeers' key from args, silently fall back - // on default if invalid - if (args.containsKey("includePeers")) { - try { - includePeers = ((Integer)(args.get("includePeers"))).intValue(); - } catch (Exception e) {} - } - - // uplift 'includeCatalog' key from args, silently fall back - // on default if invalid - if (args.containsKey("includeCatalog")) { - try { - includeCatalog = ((Integer)(args.get("includeCatalog"))).intValue(); - } catch (Exception e) {} - } - return getUpdate(since, includePeers, includeCatalog); - } - - public Vector getJobsList() throws Exception { - return node.getJobsList(); - } - - /** - * attempt to retrieve a data item from remote peer - * @param key - the key under which the content item is assumedly stored in Q - */ - public Hashtable getItem(String uri) throws IOException, QException { - node.nodeLoadAfterHit(); - System.out.println("XMLRPC: getItem: "+uri); - return node.getItem(uri); - } - - /** - * attempt to retrieve a data item from remote peer - * @param args - a Hashtable/struct/dict/assoc-array, containing: - *
        - *
      • key - (string) the key under which the content item is assumedly stored in Q
      • - *
      - */ - public Hashtable getItem(Hashtable args) throws IOException, QException { - String key; - try { - key = (String)args.get("key"); - } catch (Exception e) { - Hashtable res = new Hashtable(); - res.put("status", "error"); - res.put("error", "badargs"); - node.nodeLoadAfterHit(); - return res; - } - - return getItem(key); - } - - /** - * puts an item of content to remote peer - * @param args - a Hashtable/struct/dict/assoc-array, containing at least: - *
        - *
      • data - binary - the raw data to insert
      • - *
      - * Any other key/value pairs in this struct will be taken as metadata, and - * inserted into the datastore as such. - * @return the assigned key for the item, under which the item - * can be subsequently retrieved. This key will be inserted into - * the metadata - */ - public Hashtable putItem(Hashtable args) - throws IOException, QException - { - byte [] data; - try { - data = (byte [])args.get("data"); - args.remove("data"); - } catch (Exception e) { - Hashtable res = new Hashtable(); - res.put("status", "error"); - res.put("error", "baddata"); - node.nodeLoadAfterHit(); - return res; - } - return putItem(args, data); - } - - /** - * alternative wrapper method which allows data to be a String. - * DO NOT USE if the string contains any control chars or bit-7-set chars - */ - public Hashtable putItem(Hashtable metadata, String data) - throws IOException, QException - { - return putItem(metadata, data.getBytes()); - } - - /** - * alternative wrapper method which allows data to be a String. - * DO NOT USE if the string contains any control chars or bit-7-set chars - */ - public Hashtable putItem(String data) - throws IOException, QException - { - return putItem(data.getBytes()); - } - - - /** - * puts an item of content to remote peer - * Wparam metadata a mapping object containing metadata - * @param data raw data to insert - * @return the assigned key for the item, under which the item - * can be subsequently retrieved. This key will be inserted into - * the metadata - */ - public Hashtable putItem(Hashtable metadata, byte [] data) - throws IOException, QException - { - node.nodeLoadAfterHit(); - System.out.println("XMLRPC: putItem: "+metadata); - return node.putItem(metadata, data); - } - - /** - * puts an item of data, without metadata, into the network - * @param data - binary - the raw data to insert - * @return the assigned key for the item - */ - public Hashtable putItem(byte [] data) - throws IOException, QException - { - node.nodeLoadAfterHit(); - System.out.println("XMLRPC: putItem (no metadata)"); - return node.putItem(data); - } - - /** - * Schedules the insertion of a qsite. Valid for client nodes only - * @param privKey64 base64 representation of a signed space private key - * @param siteName short text name of the qsite, whose URI will end up - * as 'Q:pubKey64/siteName/'. - * @param rootPath physical absolute pathname of the qsite's root directory - * on the host filesystem. - * Note that this directory must have a file called 'index.html' at its top - * level, which will be used as the qsite's default document. - * @param metadata A set of metadata to associate with the qsite - * @return Hashtable containing results, as the keys: - *
        - *
      • status - String - either "ok" or "error"
      • - *
      • error - String - short summary of error, only present if - * status is "error"
      • - *
      • uri - the full Q URI for the top level of the site - *
      - */ - public Hashtable insertQSite(String privKey64, - String siteName, - String rootPath, - Hashtable metadata - ) - throws Exception - { - node.nodeLoadAfterHit(); - System.out.println("XMLRPC: insertQSite("+privKey64+", "+siteName+", "+rootPath+", "+metadata+")"); - return node.insertQSite(privKey64, siteName, rootPath, metadata); - } - - /** - * Generates a new keypair for signed-space insertions - * @return a struct with the keys: - *
        - *
      • status - "ok"
      • - *
      • publicKey - base64-encoded signed space public key
      • - *
      • privateKey - base64-encoded signed space private key
      • - *
      - * When inserting an item using the privateKey, the resulting uri - * will be Q:publicKey/path - */ - public Hashtable newKeys() { - - return node.newKeys(); - } - - /** - * shuts down the node - * for the purpose of security, the caller must quote the node's full - * base64 private key - * @param nodePrivKey the node's full base64 I2P private key - * @return if shutdown succeeds, an XML-RPC error will result, because - * the node will fail to send a reply. If an invalid key is given, - * the reply Hashtable will contain {"status":"error", "error":"invalidkey"} - */ - public Hashtable shutdown(String nodePrivKey) { - - Hashtable res = new Hashtable(); - - // sekkret h4x - kill the VM if key is the node's I2P base64 privkey - //System.out.println("shutdown: our privkey="+node.privKeyStr); - //System.out.println("shutdown: nodePrivKey="+nodePrivKey); - if (nodePrivKey.equals(node.privKeyStr)) { - - res.put("status", "ok"); - //node.scheduleShutdown(); - // get a runtime - //System.out.println("Node at "+node.dataDir+" shutting down"); - Runtime r = Runtime.getRuntime(); - // and terminate the vm - //r.exit(0); - r.halt(0); - } - else { - res.put("status", "error"); - res.put("error", "invalidkey"); - } - - return res; - } - - /** - * shuts down the node - * for the purpose of security, the caller must quote the node's full - * base64 private key - * @param args - a Hashtable/struct/dict/assoc-array, containing: - *
        - *
      • privKey - string - the node's full base64 I2P private key
      • - *
      - * @return if shutdown succeeds, an XML-RPC error will result, because - * the node will fail to send a reply. If an invalid key is given, - * the reply Hashtable will contain {"status":"error", "error":"invalidkey"} - */ - public Hashtable shutdown(Hashtable args) { - String privKey; - try { - privKey = (String)args.get("privKey"); - } catch (Exception e) { - Hashtable res = new Hashtable(); - res.put("status", "error"); - res.put("error", "badkey"); - node.nodeLoadAfterHit(); - return res; - } - return shutdown(privKey); - } -} - diff --git a/apps/q/java/src/net/i2p/aum/q/QServerNode.java b/apps/q/java/src/net/i2p/aum/q/QServerNode.java deleted file mode 100644 index 1b91e652d..000000000 --- a/apps/q/java/src/net/i2p/aum/q/QServerNode.java +++ /dev/null @@ -1,146 +0,0 @@ -/* - * QServer.java - * - * Created on 20 March 2005, 23:23 - */ - -package net.i2p.aum.q; - -import java.io.IOException; -import java.util.Properties; - -import net.i2p.I2PException; -import net.i2p.aum.I2PXmlRpcServerFactory; -import net.i2p.aum.http.I2PHttpServer; -import net.i2p.aum.http.MiniHttpServer; -import net.i2p.data.DataFormatException; - -/** - * - * Implements Q Server nodes. - */ -public class QServerNode extends QNode { - - /** - * default datastore directory - */ - public static String defaultStoreDir = ".quartermaster_server"; - - /** - * can set this to 0 before instantiating servers, to set tunnel length - * for debugging purposes - **/ - public static int tunLength = 2; - - public I2PXmlRpcServerFactory xmlRpcServerFactory; - - public String nodeType = "Server"; - - /** Creates a new instance of QServer */ - public QServerNode() throws IOException, DataFormatException, I2PException - { - super(System.getProperties().getProperty("user.home") + sep + defaultStoreDir); - } - - /** - * Creates a Q node in server mode, using specified datastore directory - * @param dataDir absolute pathname where this server's datastore tree is - * located. If tree doesn't exist, it will be created along with new keys - */ - public QServerNode(String dataDir) throws IOException, DataFormatException, I2PException - { - super(dataDir); - } - - /** - * performs mode-specific node setup - */ - public void setup() throws DataFormatException, I2PException - { - } - - /** - *

      Sets up and launches an xml-rpc server for servicing requests - * to this node.

      - *

      For server nodes, the xml-rpc server listens within I2P on the - * node's destination.

      - *

      For client nodes, the xml-rpc server listens on a local TCP - * port (according to attributes xmlRpcServerHost and xmlRpcServerPort)

      - */ - public void startExternalInterfaces(QServerMethods methods) throws Exception { - /** - * // get a server factory if none already existing - * if (xmlRpcServerFactory == null) { - * getTunnelLength(); - * log.info("Creating an xml-rpc server factory with tunnel length "+tunLength); - * xmlRpcServerFactory = new I2PXmlRpcServerFactory( - * tunLength, tunLength, tunLength, tunLength, i2p); - * } - * - * log.info("Creating XML-RPC server listening within i2p"); - * xmlRpcServer = xmlRpcServerFactory.newServer(privKey); - * - * // bind in our interface class - * log.info("Binding XML-RPC interface object"); - * xmlRpcServer.addHandler(baseXmlRpcServiceName, methods); - * - * // and fire it up - * log.info("Launching XML-RPC server"); - * xmlRpcServer.start(); - **/ - - Properties httpProps = new Properties(); - - httpProps = new Properties(); - Properties sysProps = System.getProperties(); - String i2cpHost = sysProps.getProperty("i2cp.tcp.host", "127.0.0.1"); - String i2cpPort = sysProps.getProperty("i2cp.tcp.port", "7654"); - httpProps.setProperty("i2cp.tcp.host", i2cpHost); - httpProps.setProperty("i2cp.tcp.port", i2cpPort); - - // create in-i2p http server for xmlrpc and browser access - MiniHttpServer webServer = new I2PHttpServer(privKey, QClientWebInterface.class, this, httpProps); - webServer.addXmlRpcHandler(baseXmlRpcServiceName, methods); - webServer.start(); - System.out.println("Started in-i2p http/xmlrpc server listening on dest:"); - String dest = privKey.getDestination().toBase64(); - System.out.println(dest); - - } - - public void getTunnelLength() - { - String tunLenStr = System.getProperty("quartermaster.tunnelLength"); - if (tunLenStr == null) - { - return; - } - - tunLength = new Integer(tunLenStr).intValue(); - } - - /** - * @param args the command line arguments - */ - public static void main(String[] args) { - - QServerNode node; - - try { - if (args.length > 0) { - node = new QServerNode(args[0]); - } - else { - node = new QServerNode(); - } - node.log.info("QServerNode: entering endless loop..."); - while (true) { - Thread.sleep(1000); - } - } catch (Exception e) { - e.printStackTrace(); - System.exit(1); - } - } -} - diff --git a/apps/q/java/src/net/i2p/aum/q/QTest.java b/apps/q/java/src/net/i2p/aum/q/QTest.java deleted file mode 100644 index 0c5dbfd63..000000000 --- a/apps/q/java/src/net/i2p/aum/q/QTest.java +++ /dev/null @@ -1,132 +0,0 @@ -/* - * QTest.java - * - * Created on March 23, 2005, 11:34 PM - */ - -package net.i2p.aum.q; - -import java.io.IOException; -import java.util.Hashtable; - -import net.i2p.I2PException; -import net.i2p.data.DataFormatException; - - -/** - * - * @author david - */ -public class QTest { - - QServerNode server; - - QClientNode client; - - /** Creates a new instance of QTest */ - public QTest() { - } - - /** - * performs a series of tests on client node - */ - public void testClientNode() - throws IOException, DataFormatException, I2PException, QException - { - print("Creating new client node"); - QClientNode node = new QClientNode(); - - print("Starting node background stuff"); - node.start(); - - print("Inserting new plain hash data item"); - byte [] data = "Hello, world".getBytes(); - Hashtable meta = new Hashtable(); - meta.put("title", "simple test"); - meta.put("type", "text"); - meta.put("path", "/test.txt"); - Hashtable res = node.putItem(meta, data); - print("putItem result="+res); - if (!res.get("status").equals("ok")) { - print("putItem fail: error="+res.get("error")); - node.interrupt(); - return; - } - - String uri = (String)res.get("uri"); - print("putItem successful: uri="+uri); - - print("now attempting to retrieve"); - Hashtable res1 = node.getItem(uri); - print("getItem: result="+res1); - if (!res1.get("status").equals("ok")) { - print("getItem fail: error="+res.get("error")); - node.interrupt(); - return; - } - byte [] data1 = (byte [])res1.get("data"); - String dataStr = new String(data1); - print("getItem: success, data="+dataStr); - - print("now searching for what we just inserted"); - Hashtable crit = new Hashtable(); - crit.put("type", "text"); - Hashtable res1a = node.search(crit); - print("After search: res="+res1a); - - print("now creating a keypair"); - Hashtable keys = node.newKeys(); - String pub = (String)keys.get("publicKey"); - String priv = (String)keys.get("privateKey"); - print("public="+pub); - print("private="+priv); - - print("Inserting new secure space data item"); - byte [] data2 = "The quick brown fox".getBytes(); - Hashtable meta2 = new Hashtable(); - meta2.put("title", "simple test 2"); - meta2.put("type", "text"); - meta2.put("path", "/test.txt"); - meta2.put("privateKey", priv); - Hashtable res2 = node.putItem(meta2, data2); - print("putItem result="+res2); - if (!res2.get("status").equals("ok")) { - print("putItem fail: error="+res2.get("error")); - node.interrupt(); - return; - } - - String uri2 = (String)res2.get("uri"); - print("putItem successful: uri="+uri2); - - print("now attempting to retrieve"); - Hashtable res2a = node.getItem(uri2); - print("getItem: result="+res2a); - if (!res2a.get("status").equals("ok")) { - print("getItem fail: error="+res.get("error")); - node.interrupt(); - return; - } - byte [] data2a = (byte [])res2a.get("data"); - String dataStr2a = new String(data2a); - print("getItem: success, data="+dataStr2a); - - } - - public void print(String msg) { - System.out.println(msg); - } - - /** - * @param args the command line arguments - */ - public static void main(String[] args) { - QTest test = new QTest(); - try { - test.testClientNode(); - } catch (Exception e) { - e.printStackTrace(); - } - } -} - diff --git a/apps/q/java/src/net/i2p/aum/q/QUtil.java b/apps/q/java/src/net/i2p/aum/q/QUtil.java deleted file mode 100644 index 5b1ccec2c..000000000 --- a/apps/q/java/src/net/i2p/aum/q/QUtil.java +++ /dev/null @@ -1,99 +0,0 @@ -/* - * QUtil.java - * - * Created on April 6, 2005, 2:11 PM - */ - -package net.i2p.aum.q; - -import net.i2p.I2PAppContext; -import net.i2p.data.Base64; -import net.i2p.data.DataFormatException; -import net.i2p.data.SigningPrivateKey; -import net.i2p.data.SigningPublicKey; - -/** - * A general collection of static utility methods - */ -public class QUtil { - - public static boolean debugEnabled = true; - - /** - * Generates a new secure space public/private keypair - * @return an array of 2 strings, first one is SSK Public Key, second one - * is SSK Private Key. - */ - public static String [] newKeys() { - Object [] keypair = I2PAppContext.getGlobalContext().keyGenerator().generateSigningKeypair(); - SigningPublicKey pub = (SigningPublicKey)keypair[0]; - SigningPrivateKey priv = (SigningPrivateKey)keypair[1]; - String [] sskKeypair = new String[2]; - sskKeypair[0] = hashPubKey(pub); - sskKeypair[1] = priv.toBase64(); - return sskKeypair; - } - - /** - * converts a signed space private key (in base64) - * to its base64 ssk public equivalent - * @param priv64 SSK private key string as base64 - * @return public key, base64-encoded - */ - public static String privateToPubHash(String priv) - throws DataFormatException - { - return hashPubKey(new SigningPrivateKey(priv).toPublic()); - } - - public static SigningPublicKey privateToPublic(String priv64) - throws DataFormatException - { - SigningPrivateKey priv = new SigningPrivateKey(priv64); - SigningPublicKey pub = priv.toPublic(); - return pub; - } - - public static String hashPubKey(String pub64) - throws DataFormatException - { - return hashPubKey(new SigningPublicKey(pub64)); - } - - /** - * hashes a public key for use in signed space keypairs - * possibly shorten this - */ - public static String hashPubKey(SigningPublicKey pub) { - String hashed = sha64(pub.toByteArray()); - String abbrev = hashed.substring(0, 24); - return abbrev; - } - - /** - * returns base64 of sha hash of a string - */ - public static String sha64(String raw) { - return sha64(raw.getBytes()); - } - - public static String sha64(byte [] raw) { - //return stripEquals(Base64.encode(sha(raw))); - return Base64.encode(sha(raw)).replaceAll("[=]", ""); - } - - public static byte [] sha(String raw) { - return sha(raw.getBytes()); - } - - public static byte [] sha(byte [] raw) { - return I2PAppContext.getGlobalContext().sha().calculateHash(raw).getData(); - } - - public static void debug(String s) { - if (debugEnabled) { - System.out.println("QSSL:"+s); - } - } - -} diff --git a/apps/q/java/src/net/i2p/aum/q/QWorkerThread.java b/apps/q/java/src/net/i2p/aum/q/QWorkerThread.java deleted file mode 100644 index 502de342e..000000000 --- a/apps/q/java/src/net/i2p/aum/q/QWorkerThread.java +++ /dev/null @@ -1,347 +0,0 @@ -/* - * QWorkerThread.java - * - * Created on April 17, 2005, 2:44 PM - */ - -package net.i2p.aum.q; - -import java.io.File; -import java.util.Enumeration; -import java.util.Hashtable; -import java.util.Vector; - -import net.i2p.aum.SimpleFile; - -/** - * Thread which performs a single background job for a nod - */ - -class QWorkerThread extends Thread { - - QNode node; - Hashtable job; - String jobTime; - String peerId; - String jobDesc; - - /* - * Creates this thread for executing a background job for the node - * @param node the node for which this job is to run - * @param jobTime unixtime-milliseconds at which job is to run, - * represented as string because it denotes a file in the node's jobs dir - */ - public QWorkerThread(QNode node, String jobTime) { - this.node = node; - this.jobTime = jobTime; - } - - public void run() { - try { - node.log.info("worker: executing job: "+jobTime); - - // reconstitute the job from its serialisation in jobs directory - job = node.loadJob(jobTime); - jobDesc = node.loadJobDescription(jobTime); - - // a couple of details - String cmd = (String)job.get("cmd"); - peerId = (String)job.get("peerId"); - - // dispatch off to required handler routine - if (cmd.equals("getUpdate")) { - doGetUpdate(); - } - else if (cmd.equals("hello")) { - doHello(); - } - else if (cmd.equals("localPutItem")) { - doLocalPutItem(); - } - else if (cmd.equals("uploadItem")) { - doUploadItem(); - } - else if (cmd.equals("test")) { - doTest(); - } - else if (cmd.equals("shutdown")) { - doShutdown(); - } - else { - node.log.error("workerthread.run: unrecognised command '"+cmd+"'"); - System.out.println("workerthread.run: unrecognised command '"+cmd+"'"); - } - - } catch (Exception e) { - e.printStackTrace(); - node.log.warn("worker thread crashed"); - } - - // finished (or failed), so replenish the jobs pool - node.threadPool.release(); - - // and remove the job record and description - try { - new File(node.jobsDir + node.sep + jobTime).delete(); - new File(node.jobsDir + node.sep + jobTime + ".desc").delete(); - } catch (Exception e) { - e.printStackTrace(); - } - } - - public void doTest() throws Exception { - - String msg = (String)job.get("msg"); - System.out.println("TESTJOB: msg='"+msg+"'"); - } - - public void doShutdown() throws Exception { - - try { - new File(node.jobsDir + node.sep + jobTime).delete(); - new File(node.jobsDir + node.sep + jobTime + ".desc").delete(); - } catch (Exception e) { - e.printStackTrace(); - } - - SimpleFile f = new SimpleFile("/tmp/eeee", "rws"); - f.write("xxx"); - node.isRunning = false; - Runtime.getRuntime().halt(0); - } - - public void doLocalPutItem() throws Exception { - Hashtable metadata = (Hashtable)job.get("metadata"); - String path = (String)job.get("localDataFilePath"); - SimpleFile f = new SimpleFile(path, "r"); - byte [] data = f.readBytes(); - - System.out.println("doLocalPutItem: path='"+path+"' dataLen="+data.length+" metadata="+metadata); - node.putItem(metadata, data); - } - - /** - *

      Upload a locally-inserted data item to n remote hubs.

      - *

      This is one intricate algorithm. The aim is to upload the content - * item to the 3 peers which are closest (Kademlia-wise) to the item's URI. - * Some requirements include: - *

        - *
      • If we discover new peers over time, we have to consider these peers - * as upload targets
      • - *
      • If upload to an individual peer fails, we have to retry a few times
      • - *
      • If there aren't enough viable peers yet, we need to keep rescheduling this - * job till enough peers come online
      • - *
      • Don't hog a thread slot on the jobs queue, give other jobs a chance to run
      • - *
      - *

      - * - */ - public void doUploadItem() throws QException { - QDataItem item = (QDataItem)job.get("item"); - String uri = (String)item.get("uri"); - String desc = "uploadItem:uri="+uri; - byte [] data = item._data; - - Hashtable peersUploaded = (Hashtable)job.get("peersUploaded"); - Hashtable peersPending = (Hashtable)job.get("peersPending"); - Hashtable peersFailed = (Hashtable)job.get("peersFailed"); - Hashtable peersNumTries = (Hashtable)job.get("peersNumTries"); - - String itemHash = item.getStoreFilename(); - QPeer peerRec; - - // get current list of up to 100 closest peers to item's URI - Vector cPeers = node.peersClosestTo(uri, 100); - - // loop on this list, try to upload item to n of them - for (Enumeration en = cPeers.elements(); en.hasMoreElements();) { - QPeer peer = (QPeer)en.nextElement(); - String peerId = peer.getId(); - - // skip this peer if we've already succeeded or failed with it - if (peersFailed.containsKey(peerId) || peersUploaded.containsKey(peerId)) { - continue; - } - - // if there are less than 3 or more pending peers, add this peer to - // pending list, otherwise skip it - if (!peersPending.containsKey(peerId)) { - if (peersPending.size() < 3) { - peersPending.put(peerId, ""); - } else { - continue; - } - } - - // try to insert item to this peer - boolean uploadedOk; - try { - Hashtable res = node.peerPutItem(peerId, item, item._data); - if (res.containsKey("status") && ((String)res.get("status")).equals("ok")) { - // successful upload - uploadedOk = true; - } else { - // upload failed for some reason - uploadedOk = false; - System.out.println("upload failure:"+res); - } - } catch (Exception e) { - // possibly because peer is offline or presently unreachable - uploadedOk = false; - e.printStackTrace(); - System.out.println("upload failure"); - } - - // how'd the upload go? - if (uploadedOk) { - // successful - remove from pending list, add to success list - peersPending.remove(peerId); - peersNumTries.remove(peerId); - peersUploaded.put(peerId, ""); - - // have we successfully uploaded to 3 or more peers yet? - if (peersUploaded.size() >= 3) { - // yep, this job has now run its course and can expire - return; - } else { - // bust out so we don't hog a scheduler slot - node.runAfter(5000, job, desc); - return; - } - - } else { - // insert failed - // increment retry count, fail this peer if retries maxed out - int numTries = ((Integer)peersNumTries.get(peerId)).intValue() + 1; - if (numTries > 4) { - // move peer from pending list to failed list - peersPending.remove(peerId); - peersNumTries.remove(peerId); - peersFailed.put(peerId, ""); - } - - // bust out so we don't hog a scheduler slot - node.runAfter(30000, job, desc); - return; - } - } - - // we'return out of peers, reschedule this job to retry in an hour's time - node.runAfter(3600000, job, desc); - } - - public void doHello() { - QPeer peerRec = (QPeer)node.peers.get(peerId); - - node.log.debug("doHello: "+node.id+" -> "+peerId); - - try { - // execute peers list req on peer - Hashtable result = node.peerHello(peerId, node.destStr); - - // see what happened - String status = (String)result.get("status"); - if (status.equals("ok")) { - peerRec.markAsGreeted(); - - // and, schedule in regular peersList updates - node.schedulePeerUpdateJob(peerRec); - } - } catch (Exception e) { - node.log.warn("Got an xmlrpc client failure, trying again in 1 hour", e); - - // schedule another attempt in 2 hours - Hashtable job = new Hashtable(); - job.put("cmd", "hello"); - job.put("peerId", peerId); - node.runAfter(3600000, job, "hello:peerId="+peerId); - } - } - - public void doGetUpdate() { - QPeer peerRec = (QPeer)node.peers.get(peerId); - int timeLastPeersUpdate = peerRec.getTimeLastUpdate(); - int timeNextContact; - int doCatalog = ((Integer)(job.get("includeCatalog"))).intValue(); - int doPeers = ((Integer)(job.get("includePeers"))).intValue(); - Vector peers; - Vector items; - - node.log.info("doGetUpdate: "+node.id+" -> "+peerId); - - try { - // execute peers list req on peer - Hashtable result = node.peerGetUpdate( - peerId, timeLastPeersUpdate, doPeers, doCatalog); - - // see what happened - String status = (String)result.get("status"); - if (status.equals("ok")) { - - node.log.debug("doGetUpdate: successful, result="+result); - - int i; - - // success - add all new peers - peers = (Vector)result.get("peers"); - int npeers = peers.size(); - for (i=0; i - - - - QConsole - - - QConsole - net.i2p.aum.q.QConsole - 1 - - - - QConsole - /* - - - - diff --git a/apps/q/java/xmlrpc.jar b/apps/q/java/xmlrpc.jar deleted file mode 100644 index 6e3d8bdee..000000000 Binary files a/apps/q/java/xmlrpc.jar and /dev/null differ diff --git a/apps/rome/readme.txt b/apps/rome/readme.txt deleted file mode 100644 index 412710a2d..000000000 --- a/apps/rome/readme.txt +++ /dev/null @@ -1 +0,0 @@ -This is ROME 0.8 from http://rome.dev.java.net/, released under a BSD license diff --git a/apps/rome/rome-0.8.jar b/apps/rome/rome-0.8.jar deleted file mode 100644 index 27d2ad329..000000000 Binary files a/apps/rome/rome-0.8.jar and /dev/null differ diff --git a/apps/routerconsole/java/src/net/i2p/router/web/ConfigAdvancedHelper.java b/apps/routerconsole/java/src/net/i2p/router/web/ConfigAdvancedHelper.java index c90194860..3ab6354a6 100644 --- a/apps/routerconsole/java/src/net/i2p/router/web/ConfigAdvancedHelper.java +++ b/apps/routerconsole/java/src/net/i2p/router/web/ConfigAdvancedHelper.java @@ -6,22 +6,7 @@ import java.util.TreeSet; import net.i2p.router.RouterContext; -public class ConfigAdvancedHelper { - private RouterContext _context; - /** - * Configure this bean to query a particular router context - * - * @param contextId begging few characters of the routerHash, or null to pick - * the first one we come across. - */ - public void setContextId(String contextId) { - try { - _context = ContextHelper.getContext(contextId); - } catch (Throwable t) { - t.printStackTrace(); - } - } - +public class ConfigAdvancedHelper extends HelperBase { public ConfigAdvancedHelper() {} public String getSettings() { diff --git a/apps/routerconsole/java/src/net/i2p/router/web/ConfigClientsHelper.java b/apps/routerconsole/java/src/net/i2p/router/web/ConfigClientsHelper.java index 18e746644..2bee43533 100644 --- a/apps/routerconsole/java/src/net/i2p/router/web/ConfigClientsHelper.java +++ b/apps/routerconsole/java/src/net/i2p/router/web/ConfigClientsHelper.java @@ -9,22 +9,7 @@ import java.util.TreeSet; import net.i2p.router.RouterContext; import net.i2p.router.startup.ClientAppConfig; -public class ConfigClientsHelper { - private RouterContext _context; - /** - * Configure this bean to query a particular router context - * - * @param contextId begging few characters of the routerHash, or null to pick - * the first one we come across. - */ - public void setContextId(String contextId) { - try { - _context = ContextHelper.getContext(contextId); - } catch (Throwable t) { - t.printStackTrace(); - } - } - +public class ConfigClientsHelper extends HelperBase { public ConfigClientsHelper() {} public String getForm1() { @@ -79,7 +64,7 @@ public class ConfigClientsHelper { } buf.append("/> "); if (!enabled) { - buf.append(""); + buf.append(""); } buf.append(" ").append(desc).append("\n"); } diff --git a/apps/routerconsole/java/src/net/i2p/router/web/ConfigKeyringHandler.java b/apps/routerconsole/java/src/net/i2p/router/web/ConfigKeyringHandler.java new file mode 100644 index 000000000..b43bc4d1f --- /dev/null +++ b/apps/routerconsole/java/src/net/i2p/router/web/ConfigKeyringHandler.java @@ -0,0 +1,40 @@ +package net.i2p.router.web; + +import net.i2p.I2PAppContext; +import net.i2p.data.DataFormatException; +import net.i2p.data.Hash; +import net.i2p.data.SessionKey; +import net.i2p.util.ConvertToHash; + +/** + * Support additions via B64 Destkey, B64 Desthash, or blahblah.i2p + */ +public class ConfigKeyringHandler extends FormHandler { + private String _peer; + private String _key; + + protected void processForm() { + if ("Add key".equals(_action)) { + if (_peer == null || _key == null) { + addFormError("You must enter a destination and a key"); + return; + } + Hash h = ConvertToHash.getHash(_peer); + SessionKey sk = new SessionKey(); + try { + sk.fromBase64(_key); + } catch (DataFormatException dfe) {} + if (h != null && h.getData() != null && sk.getData() != null) { + _context.keyRing().put(h, sk); + addFormNotice("Key for " + h.toBase64() + " added to keyring"); + } else { + addFormError("Invalid destination or key"); + } + } else { + addFormError("Unsupported"); + } + } + + public void setPeer(String peer) { _peer = peer; } + public void setKey(String peer) { _key = peer; } +} diff --git a/apps/routerconsole/java/src/net/i2p/router/web/ConfigKeyringHelper.java b/apps/routerconsole/java/src/net/i2p/router/web/ConfigKeyringHelper.java new file mode 100644 index 000000000..85c8ee423 --- /dev/null +++ b/apps/routerconsole/java/src/net/i2p/router/web/ConfigKeyringHelper.java @@ -0,0 +1,21 @@ +package net.i2p.router.web; + +import java.io.ByteArrayOutputStream; +import java.io.IOException; +import java.io.OutputStreamWriter; + +import net.i2p.router.RouterContext; + +public class ConfigKeyringHelper extends HelperBase { + public ConfigKeyringHelper() {} + + public String getSummary() { + ByteArrayOutputStream baos = new ByteArrayOutputStream(4*1024); + try { + _context.keyRing().renderStatusHTML(new OutputStreamWriter(baos)); + } catch (IOException ioe) { + ioe.printStackTrace(); + } + return new String(baos.toByteArray()); + } +} diff --git a/apps/routerconsole/java/src/net/i2p/router/web/ConfigLoggingHelper.java b/apps/routerconsole/java/src/net/i2p/router/web/ConfigLoggingHelper.java index 07acb0849..635d2e544 100644 --- a/apps/routerconsole/java/src/net/i2p/router/web/ConfigLoggingHelper.java +++ b/apps/routerconsole/java/src/net/i2p/router/web/ConfigLoggingHelper.java @@ -6,22 +6,7 @@ import java.util.TreeSet; import net.i2p.router.RouterContext; -public class ConfigLoggingHelper { - private RouterContext _context; - /** - * Configure this bean to query a particular router context - * - * @param contextId begging few characters of the routerHash, or null to pick - * the first one we come across. - */ - public void setContextId(String contextId) { - try { - _context = ContextHelper.getContext(contextId); - } catch (Throwable t) { - t.printStackTrace(); - } - } - +public class ConfigLoggingHelper extends HelperBase { public ConfigLoggingHelper() {} public String getLogFilePattern() { diff --git a/apps/routerconsole/java/src/net/i2p/router/web/ConfigNetHandler.java b/apps/routerconsole/java/src/net/i2p/router/web/ConfigNetHandler.java index 0ddcd58a9..a4fe7483e 100644 --- a/apps/routerconsole/java/src/net/i2p/router/web/ConfigNetHandler.java +++ b/apps/routerconsole/java/src/net/i2p/router/web/ConfigNetHandler.java @@ -237,7 +237,7 @@ public class ConfigNetHandler extends FormHandler { private void hiddenSwitch() { // Full restart required to generate new keys - _context.router().addShutdownTask(new UpdateWrapperManagerAndRekeyTask(Router.EXIT_GRACEFUL_RESTART)); + _context.addShutdownTask(new UpdateWrapperManagerAndRekeyTask(Router.EXIT_GRACEFUL_RESTART)); _context.router().shutdownGracefully(Router.EXIT_GRACEFUL_RESTART); } diff --git a/apps/routerconsole/java/src/net/i2p/router/web/ConfigNetHelper.java b/apps/routerconsole/java/src/net/i2p/router/web/ConfigNetHelper.java index b648d9607..9beeb33cf 100644 --- a/apps/routerconsole/java/src/net/i2p/router/web/ConfigNetHelper.java +++ b/apps/routerconsole/java/src/net/i2p/router/web/ConfigNetHelper.java @@ -10,22 +10,7 @@ import net.i2p.router.transport.udp.UDPAddress; import net.i2p.router.transport.udp.UDPTransport; import net.i2p.time.Timestamper; -public class ConfigNetHelper { - private RouterContext _context; - /** - * Configure this bean to query a particular router context - * - * @param contextId begging few characters of the routerHash, or null to pick - * the first one we come across. - */ - public void setContextId(String contextId) { - try { - _context = ContextHelper.getContext(contextId); - } catch (Throwable t) { - t.printStackTrace(); - } - } - +public class ConfigNetHelper extends HelperBase { public ConfigNetHelper() {} /** copied from various private components */ @@ -156,7 +141,7 @@ public class ConfigNetHelper { public String getInboundBurstFactorBox() { int numSeconds = 1; int rateKBps = _context.bandwidthLimiter().getInboundBurstKBytesPerSecond(); - int burstKB = _context.bandwidthLimiter().getInboundBurstBytes() * 1024; + int burstKB = _context.bandwidthLimiter().getInboundBurstBytes() / 1024; if ( (rateKBps > 0) && (burstKB > 0) ) numSeconds = burstKB / rateKBps; return getBurstFactor(numSeconds, "inboundburstfactor"); @@ -165,7 +150,7 @@ public class ConfigNetHelper { public String getOutboundBurstFactorBox() { int numSeconds = 1; int rateKBps = _context.bandwidthLimiter().getOutboundBurstKBytesPerSecond(); - int burstKB = _context.bandwidthLimiter().getOutboundBurstBytes() * 1024; + int burstKB = _context.bandwidthLimiter().getOutboundBurstBytes() / 1024; if ( (rateKBps > 0) && (burstKB > 0) ) numSeconds = burstKB / rateKBps; return getBurstFactor(numSeconds, "outboundburstfactor"); @@ -175,16 +160,21 @@ public class ConfigNetHelper { StringBuffer buf = new StringBuffer(256); buf.append("\n"); return buf.toString(); diff --git a/apps/routerconsole/java/src/net/i2p/router/web/ConfigPeerHelper.java b/apps/routerconsole/java/src/net/i2p/router/web/ConfigPeerHelper.java index 63fc1f5e5..662a078b8 100644 --- a/apps/routerconsole/java/src/net/i2p/router/web/ConfigPeerHelper.java +++ b/apps/routerconsole/java/src/net/i2p/router/web/ConfigPeerHelper.java @@ -4,25 +4,9 @@ import java.io.ByteArrayOutputStream; import java.io.IOException; import java.io.OutputStreamWriter; -import net.i2p.data.DataHelper; import net.i2p.router.RouterContext; -public class ConfigPeerHelper { - private RouterContext _context; - /** - * Configure this bean to query a particular router context - * - * @param contextId begging few characters of the routerHash, or null to pick - * the first one we come across. - */ - public void setContextId(String contextId) { - try { - _context = ContextHelper.getContext(contextId); - } catch (Throwable t) { - t.printStackTrace(); - } - } - +public class ConfigPeerHelper extends HelperBase { public ConfigPeerHelper() {} public String getBlocklistSummary() { diff --git a/apps/routerconsole/java/src/net/i2p/router/web/ConfigRestartBean.java b/apps/routerconsole/java/src/net/i2p/router/web/ConfigRestartBean.java index 2b8817a31..e8eb6b26d 100644 --- a/apps/routerconsole/java/src/net/i2p/router/web/ConfigRestartBean.java +++ b/apps/routerconsole/java/src/net/i2p/router/web/ConfigRestartBean.java @@ -1,5 +1,7 @@ package net.i2p.router.web; +import java.util.StringTokenizer; + import net.i2p.data.DataHelper; import net.i2p.router.Router; import net.i2p.router.RouterContext; @@ -23,18 +25,20 @@ public class ConfigRestartBean { String systemNonce = getNonce(); if ( (nonce != null) && (systemNonce.equals(nonce)) && (action != null) ) { if ("shutdownImmediate".equals(action)) { - ctx.router().addShutdownTask(new ConfigServiceHandler.UpdateWrapperManagerTask(Router.EXIT_HARD)); - ctx.router().shutdown(Router.EXIT_HARD); // never returns + ctx.addShutdownTask(new ConfigServiceHandler.UpdateWrapperManagerTask(Router.EXIT_HARD)); + //ctx.router().shutdown(Router.EXIT_HARD); // never returns + ctx.router().shutdownGracefully(Router.EXIT_HARD); // give the UI time to respond } else if ("cancelShutdown".equals(action)) { ctx.router().cancelGracefulShutdown(); } else if ("restartImmediate".equals(action)) { - ctx.router().addShutdownTask(new ConfigServiceHandler.UpdateWrapperManagerTask(Router.EXIT_HARD_RESTART)); - ctx.router().shutdown(Router.EXIT_HARD_RESTART); // never returns + ctx.addShutdownTask(new ConfigServiceHandler.UpdateWrapperManagerTask(Router.EXIT_HARD_RESTART)); + //ctx.router().shutdown(Router.EXIT_HARD_RESTART); // never returns + ctx.router().shutdownGracefully(Router.EXIT_HARD_RESTART); // give the UI time to respond } else if ("restart".equals(action)) { - ctx.router().addShutdownTask(new ConfigServiceHandler.UpdateWrapperManagerTask(Router.EXIT_GRACEFUL_RESTART)); + ctx.addShutdownTask(new ConfigServiceHandler.UpdateWrapperManagerTask(Router.EXIT_GRACEFUL_RESTART)); ctx.router().shutdownGracefully(Router.EXIT_GRACEFUL_RESTART); } else if ("shutdown".equals(action)) { - ctx.router().addShutdownTask(new ConfigServiceHandler.UpdateWrapperManagerTask(Router.EXIT_GRACEFUL)); + ctx.addShutdownTask(new ConfigServiceHandler.UpdateWrapperManagerTask(Router.EXIT_GRACEFUL)); ctx.router().shutdownGracefully(); } } @@ -47,31 +51,48 @@ public class ConfigRestartBean { return "Shutdown imminent"; } else { return "Shutdown in " + DataHelper.formatDuration(timeRemaining) + "
      " - + "Shutdown immediately
      " - + "Cancel shutdown "; + + buttons(urlBase, systemNonce, "shutdownImmediate,Shutdown immediately,cancelShutdown,Cancel shutdown"); } } else if (restarting) { if (timeRemaining <= 0) { return "Restart imminent"; } else { return "Restart in " + DataHelper.formatDuration(timeRemaining) + "
      " - + "Restart immediately
      " - + "Cancel restart "; + + buttons(urlBase, systemNonce, "restartImmediate,Restart immediately,cancelShutdown,Cancel restart"); } } else { - String shutdown = "Shutdown"; if (System.getProperty("wrapper.version") != null) - return "Restart " - + shutdown; + return buttons(urlBase, systemNonce, "restart,Restart,shutdown,Shutdown"); else - return shutdown; + return buttons(urlBase, systemNonce, "shutdown,Shutdown"); } } + /** @param s value,label,... pairs */ + private static String buttons(String url, String nonce, String s) { + StringBuffer buf = new StringBuffer(128); + StringTokenizer tok = new StringTokenizer(s, ","); + buf.append("
      \n"); + buf.append("\n"); + while (tok.hasMoreTokens()) + buf.append("\n"); + buf.append("
      \n"); + return buf.toString(); + } + private static boolean isShuttingDown(RouterContext ctx) { - return Router.EXIT_GRACEFUL == ctx.router().scheduledGracefulExitCode(); + return Router.EXIT_GRACEFUL == ctx.router().scheduledGracefulExitCode() || + Router.EXIT_HARD == ctx.router().scheduledGracefulExitCode(); } private static boolean isRestarting(RouterContext ctx) { - return Router.EXIT_GRACEFUL_RESTART == ctx.router().scheduledGracefulExitCode(); + return Router.EXIT_GRACEFUL_RESTART == ctx.router().scheduledGracefulExitCode() || + Router.EXIT_HARD_RESTART == ctx.router().scheduledGracefulExitCode(); + } + /** this is for summaryframe.jsp */ + public static long getRestartTimeRemaining() { + RouterContext ctx = ContextHelper.getContext(null); + if (ctx.router().gracefulShutdownInProgress()) + return ctx.router().getShutdownTimeRemaining(); + return Long.MAX_VALUE/2; // summaryframe.jsp adds a safety factor so we don't want to overflow... } } diff --git a/apps/routerconsole/java/src/net/i2p/router/web/ConfigServiceHandler.java b/apps/routerconsole/java/src/net/i2p/router/web/ConfigServiceHandler.java index a72ac9286..8d3e5725c 100644 --- a/apps/routerconsole/java/src/net/i2p/router/web/ConfigServiceHandler.java +++ b/apps/routerconsole/java/src/net/i2p/router/web/ConfigServiceHandler.java @@ -3,14 +3,13 @@ package net.i2p.router.web; import java.io.File; import java.io.FileWriter; import java.io.IOException; -import java.util.Iterator; -import java.util.Properties; -import java.util.TreeMap; +import java.util.List; import net.i2p.apps.systray.SysTray; import net.i2p.apps.systray.UrlLauncher; import net.i2p.data.DataHelper; import net.i2p.router.Router; +import net.i2p.router.startup.ClientAppConfig; import org.tanukisoftware.wrapper.WrapperManager; @@ -54,31 +53,31 @@ public class ConfigServiceHandler extends FormHandler { if (_action == null) return; if ("Shutdown gracefully".equals(_action)) { - _context.router().addShutdownTask(new UpdateWrapperManagerTask(Router.EXIT_GRACEFUL)); + _context.addShutdownTask(new UpdateWrapperManagerTask(Router.EXIT_GRACEFUL)); _context.router().shutdownGracefully(); addFormNotice("Graceful shutdown initiated"); } else if ("Shutdown immediately".equals(_action)) { - _context.router().addShutdownTask(new UpdateWrapperManagerTask(Router.EXIT_HARD)); + _context.addShutdownTask(new UpdateWrapperManagerTask(Router.EXIT_HARD)); _context.router().shutdown(Router.EXIT_HARD); addFormNotice("Shutdown immediately! boom bye bye bad bwoy"); } else if ("Cancel graceful shutdown".equals(_action)) { _context.router().cancelGracefulShutdown(); addFormNotice("Graceful shutdown cancelled"); } else if ("Graceful restart".equals(_action)) { - _context.router().addShutdownTask(new UpdateWrapperManagerTask(Router.EXIT_GRACEFUL_RESTART)); + _context.addShutdownTask(new UpdateWrapperManagerTask(Router.EXIT_GRACEFUL_RESTART)); _context.router().shutdownGracefully(Router.EXIT_GRACEFUL_RESTART); addFormNotice("Graceful restart requested"); } else if ("Hard restart".equals(_action)) { - _context.router().addShutdownTask(new UpdateWrapperManagerTask(Router.EXIT_HARD_RESTART)); + _context.addShutdownTask(new UpdateWrapperManagerTask(Router.EXIT_HARD_RESTART)); _context.router().shutdown(Router.EXIT_HARD_RESTART); addFormNotice("Hard restart requested"); } else if ("Rekey and Restart".equals(_action)) { addFormNotice("Rekeying after graceful restart"); - _context.router().addShutdownTask(new UpdateWrapperManagerAndRekeyTask(Router.EXIT_GRACEFUL_RESTART)); + _context.addShutdownTask(new UpdateWrapperManagerAndRekeyTask(Router.EXIT_GRACEFUL_RESTART)); _context.router().shutdownGracefully(Router.EXIT_GRACEFUL_RESTART); } else if ("Rekey and Shutdown".equals(_action)) { addFormNotice("Rekeying after graceful shutdown"); - _context.router().addShutdownTask(new UpdateWrapperManagerAndRekeyTask(Router.EXIT_GRACEFUL)); + _context.addShutdownTask(new UpdateWrapperManagerAndRekeyTask(Router.EXIT_GRACEFUL)); _context.router().shutdownGracefully(Router.EXIT_GRACEFUL); } else if ("Run I2P on startup".equals(_action)) { installService(); @@ -143,80 +142,22 @@ public class ConfigServiceHandler extends FormHandler { } } - private final static String NL = System.getProperty("line.separator"); private void browseOnStartup(boolean shouldLaunchBrowser) { - File f = new File("clients.config"); - Properties p = new Properties(); - try { - DataHelper.loadProps(p, f); - - int i = 0; - int launchIndex = -1; - while (true) { - String className = p.getProperty("clientApp." + i + ".main"); - if (className == null) break; - if (UrlLauncher.class.getName().equals(className)) { - launchIndex = i; - break; - } - i++; + List clients = ClientAppConfig.getClientApps(_context); + boolean found = false; + for (int cur = 0; cur < clients.size(); cur++) { + ClientAppConfig ca = (ClientAppConfig) clients.get(cur); + if (UrlLauncher.class.getName().equals(ca.className)) { + ca.disabled = !shouldLaunchBrowser; + found = true; + break; } - - if ((launchIndex >= 0) && shouldLaunchBrowser) - return; - if ((launchIndex < 0) && !shouldLaunchBrowser) - return; - - if (shouldLaunchBrowser) { - p.setProperty("clientApp." + i + ".main", UrlLauncher.class.getName()); - p.setProperty("clientApp." + i + ".name", "BrowserLauncher"); - p.setProperty("clientApp." + i + ".args", "http://localhost:7657/index.jsp"); - p.setProperty("clientApp." + i + ".delay", "5"); - } else { - p.remove("clientApp." + launchIndex + ".main"); - p.remove("clientApp." + launchIndex + ".name"); - p.remove("clientApp." + launchIndex + ".args"); - p.remove("clientApp." + launchIndex + ".onBoot"); - p.remove("clientApp." + launchIndex + ".delay"); - - i = launchIndex + 1; - while (true) { - String main = p.getProperty("clientApp." + i + ".main"); - String name = p.getProperty("clientApp." + i + ".name"); - String args = p.getProperty("clientApp." + i + ".args"); - String boot = p.getProperty("clientApp." + i + ".onBoot"); - String delay= p.getProperty("clientApp." + i + ".delay"); - - if (main == null) break; - - p.setProperty("clientApp." + (i-1) + ".main", main); - p.setProperty("clientApp." + (i-1) + ".name", name); - p.setProperty("clientApp." + (i-1) + ".args", args); - if (boot != null) - p.setProperty("clientApp." + (i-1) + ".onBoot", boot); - if (delay != null) - p.setProperty("clientApp." + (i-1) + ".delay", delay); - - p.remove("clientApp." + i + ".main"); - p.remove("clientApp." + i + ".name"); - p.remove("clientApp." + i + ".args"); - p.remove("clientApp." + i + ".onBoot"); - p.remove("clientApp." + i + ".delay"); - - i++; - } - } - - TreeMap sorted = new TreeMap(p); - FileWriter out = new FileWriter(f); - for (Iterator iter = sorted.keySet().iterator(); iter.hasNext(); ) { - String name = (String)iter.next(); - String val = (String)sorted.get(name); - out.write(name + "=" + val + NL); - } - out.close(); - } catch (IOException ioe) { - addFormError("Error updating the client config"); } + // releases <= 0.6.5 deleted the entry completely + if (shouldLaunchBrowser && !found) { + ClientAppConfig ca = new ClientAppConfig(UrlLauncher.class.getName(), "consoleBrowser", "http://localhost:7657", 5, false); + clients.add(ca); + } + ClientAppConfig.writeClientAppConfig(_context, clients); } } diff --git a/apps/routerconsole/java/src/net/i2p/router/web/ConfigStatsHelper.java b/apps/routerconsole/java/src/net/i2p/router/web/ConfigStatsHelper.java index 925fce79a..3af4ffafb 100644 --- a/apps/routerconsole/java/src/net/i2p/router/web/ConfigStatsHelper.java +++ b/apps/routerconsole/java/src/net/i2p/router/web/ConfigStatsHelper.java @@ -15,8 +15,7 @@ import net.i2p.stat.RateStat; import net.i2p.stat.StatManager; import net.i2p.util.Log; -public class ConfigStatsHelper { - private RouterContext _context; +public class ConfigStatsHelper extends HelperBase { private Log _log; private String _filter; private Set _filters; diff --git a/apps/routerconsole/java/src/net/i2p/router/web/ConfigTunnelsHelper.java b/apps/routerconsole/java/src/net/i2p/router/web/ConfigTunnelsHelper.java index fe17164d3..8b8b2fb16 100644 --- a/apps/routerconsole/java/src/net/i2p/router/web/ConfigTunnelsHelper.java +++ b/apps/routerconsole/java/src/net/i2p/router/web/ConfigTunnelsHelper.java @@ -8,22 +8,7 @@ import net.i2p.data.Destination; import net.i2p.router.RouterContext; import net.i2p.router.TunnelPoolSettings; -public class ConfigTunnelsHelper { - private RouterContext _context; - /** - * Configure this bean to query a particular router context - * - * @param contextId begging few characters of the routerHash, or null to pick - * the first one we come across. - */ - public void setContextId(String contextId) { - try { - _context = ContextHelper.getContext(contextId); - } catch (Throwable t) { - t.printStackTrace(); - } - } - +public class ConfigTunnelsHelper extends HelperBase { public ConfigTunnelsHelper() {} @@ -64,7 +49,9 @@ public class ConfigTunnelsHelper { private static final int WARN_LENGTH = 4; private static final int MAX_LENGTH = 4; - private static final int MAX_QUANTITY = 3; + private static final int WARN_QUANTITY = 5; + private static final int MAX_QUANTITY = 6; + private static final int MAX_BACKUP_QUANTITY = 3; private static final int MAX_VARIANCE = 2; private static final int MIN_NEG_VARIANCE = -1; private void renderForm(StringBuffer buf, int index, String prefix, String name, TunnelPoolSettings in, TunnelPoolSettings out) { @@ -79,6 +66,9 @@ public class ConfigTunnelsHelper { if (in.getLength() + Math.abs(in.getLengthVariance()) >= WARN_LENGTH || out.getLength() + Math.abs(out.getLengthVariance()) >= WARN_LENGTH) buf.append("PERFORMANCE WARNING - Settings include very long tunnels"); + if (in.getQuantity() + in.getBackupQuantity() >= WARN_QUANTITY || + out.getQuantity() + out.getBackupQuantity() >= WARN_QUANTITY) + buf.append("PERFORMANCE WARNING - Settings include high tunnel quantities"); buf.append("InboundOutbound\n"); @@ -145,15 +135,15 @@ public class ConfigTunnelsHelper { buf.append("Backup quantity\n"); buf.append("\n"); buf.append("\n"); buf.append("\n"); diff --git a/apps/routerconsole/java/src/net/i2p/router/web/ConfigUpdateHandler.java b/apps/routerconsole/java/src/net/i2p/router/web/ConfigUpdateHandler.java index 65b81ed87..818b748a7 100644 --- a/apps/routerconsole/java/src/net/i2p/router/web/ConfigUpdateHandler.java +++ b/apps/routerconsole/java/src/net/i2p/router/web/ConfigUpdateHandler.java @@ -51,7 +51,7 @@ public class ConfigUpdateHandler extends FormHandler { if ( (_updatePolicy == null) || (!_updatePolicy.equals("notify")) ) addFormNotice("Update available, attempting to download now"); else - addFormNotice("Update available, click link on left to download"); + addFormNotice("Update available, click button on left to download"); } else addFormNotice("No update available"); } @@ -104,6 +104,7 @@ public class ConfigUpdateHandler extends FormHandler { } if ( (_updateURL != null) && (_updateURL.length() > 0) ) { + _updateURL = _updateURL.replaceAll("\r\n", ",").replaceAll("\n", ","); String oldURL = _context.router().getConfigSetting(PROP_UPDATE_URL); if ( (oldURL == null) || (!_updateURL.equals(oldURL)) ) { _context.router().setConfigSetting(PROP_UPDATE_URL, _updateURL); diff --git a/apps/routerconsole/java/src/net/i2p/router/web/ConfigUpdateHelper.java b/apps/routerconsole/java/src/net/i2p/router/web/ConfigUpdateHelper.java index 94b8eef61..d0d243799 100644 --- a/apps/routerconsole/java/src/net/i2p/router/web/ConfigUpdateHelper.java +++ b/apps/routerconsole/java/src/net/i2p/router/web/ConfigUpdateHelper.java @@ -4,22 +4,7 @@ import net.i2p.crypto.TrustedUpdate; import net.i2p.data.DataHelper; import net.i2p.router.RouterContext; -public class ConfigUpdateHelper { - private RouterContext _context; - /** - * Configure this bean to query a particular router context - * - * @param contextId begging few characters of the routerHash, or null to pick - * the first one we come across. - */ - public void setContextId(String contextId) { - try { - _context = ContextHelper.getContext(contextId); - } catch (Throwable t) { - t.printStackTrace(); - } - } - +public class ConfigUpdateHelper extends HelperBase { public ConfigUpdateHelper() {} public boolean updateAvailable() { @@ -36,7 +21,7 @@ public class ConfigUpdateHelper { public String getUpdateURL() { String url = _context.getProperty(ConfigUpdateHandler.PROP_UPDATE_URL); if (url != null) - return url; + return url.replaceAll(",", "\n"); else return ConfigUpdateHandler.DEFAULT_UPDATE_URL; } diff --git a/apps/routerconsole/java/src/net/i2p/router/web/ContentHelper.java b/apps/routerconsole/java/src/net/i2p/router/web/ContentHelper.java index edacfaa41..ce29250b9 100644 --- a/apps/routerconsole/java/src/net/i2p/router/web/ContentHelper.java +++ b/apps/routerconsole/java/src/net/i2p/router/web/ContentHelper.java @@ -6,25 +6,11 @@ import java.util.Locale; import net.i2p.router.RouterContext; import net.i2p.util.FileUtil; -public class ContentHelper { +public class ContentHelper extends HelperBase { private String _page; private int _maxLines; private boolean _startAtBeginning; private String _lang; - private RouterContext _context; - /** - * Configure this bean to query a particular router context - * - * @param contextId begging few characters of the routerHash, or null to pick - * the first one we come across. - */ - public void setContextId(String contextId) { - try { - _context = ContextHelper.getContext(contextId); - } catch (Throwable t) { - t.printStackTrace(); - } - } public ContentHelper() {} diff --git a/apps/routerconsole/java/src/net/i2p/router/web/GraphHelper.java b/apps/routerconsole/java/src/net/i2p/router/web/GraphHelper.java index 658caa1a3..16ce7337d 100644 --- a/apps/routerconsole/java/src/net/i2p/router/web/GraphHelper.java +++ b/apps/routerconsole/java/src/net/i2p/router/web/GraphHelper.java @@ -11,27 +11,12 @@ import net.i2p.data.DataHelper; import net.i2p.router.RouterContext; import net.i2p.stat.Rate; -public class GraphHelper { - private RouterContext _context; - private Writer _out; +public class GraphHelper extends HelperBase { private int _periodCount; private boolean _showEvents; private int _width; private int _height; private int _refreshDelaySeconds; - /** - * Configure this bean to query a particular router context - * - * @param contextId begging few characters of the routerHash, or null to pick - * the first one we come across. - */ - public void setContextId(String contextId) { - try { - _context = ContextHelper.getContext(contextId); - } catch (Throwable t) { - t.printStackTrace(); - } - } public GraphHelper() { _periodCount = 60; // SummaryListener.PERIODS; @@ -41,7 +26,6 @@ public class GraphHelper { _refreshDelaySeconds = 60; } - public void setOut(Writer out) { _out = out; } public void setPeriodCount(String str) { try { _periodCount = Integer.parseInt(str); } catch (NumberFormatException nfe) {} } diff --git a/apps/routerconsole/java/src/net/i2p/router/web/HelperBase.java b/apps/routerconsole/java/src/net/i2p/router/web/HelperBase.java new file mode 100644 index 000000000..db5aa9ba2 --- /dev/null +++ b/apps/routerconsole/java/src/net/i2p/router/web/HelperBase.java @@ -0,0 +1,29 @@ +package net.i2p.router.web; + +import java.io.Writer; + +import net.i2p.router.RouterContext; + +/** + * Base helper + */ +public abstract class HelperBase { + protected RouterContext _context; + protected Writer _out; + + /** + * Configure this bean to query a particular router context + * + * @param contextId begging few characters of the routerHash, or null to pick + * the first one we come across. + */ + public void setContextId(String contextId) { + try { + _context = ContextHelper.getContext(contextId); + } catch (Throwable t) { + t.printStackTrace(); + } + } + + public void setWriter(Writer out) { _out = out; } +} diff --git a/apps/routerconsole/java/src/net/i2p/router/web/JobQueueHelper.java b/apps/routerconsole/java/src/net/i2p/router/web/JobQueueHelper.java index a56cce19a..cf8ed2352 100644 --- a/apps/routerconsole/java/src/net/i2p/router/web/JobQueueHelper.java +++ b/apps/routerconsole/java/src/net/i2p/router/web/JobQueueHelper.java @@ -7,27 +7,9 @@ import java.io.Writer; import net.i2p.router.RouterContext; -public class JobQueueHelper { - private RouterContext _context; - private Writer _out; - /** - * Configure this bean to query a particular router context - * - * @param contextId begging few characters of the routerHash, or null to pick - * the first one we come across. - */ - public void setContextId(String contextId) { - try { - _context = ContextHelper.getContext(contextId); - } catch (Throwable t) { - t.printStackTrace(); - } - } - +public class JobQueueHelper extends HelperBase { public JobQueueHelper() {} - public void setWriter(Writer writer) { _out = writer; } - public String getJobQueueSummary() { try { if (_out != null) { diff --git a/apps/routerconsole/java/src/net/i2p/router/web/LogsHelper.java b/apps/routerconsole/java/src/net/i2p/router/web/LogsHelper.java index 67d2fc38c..e1fce8f3e 100644 --- a/apps/routerconsole/java/src/net/i2p/router/web/LogsHelper.java +++ b/apps/routerconsole/java/src/net/i2p/router/web/LogsHelper.java @@ -5,22 +5,7 @@ import java.util.List; import net.i2p.router.RouterContext; import net.i2p.util.FileUtil; -public class LogsHelper { - private RouterContext _context; - /** - * Configure this bean to query a particular router context - * - * @param contextId begging few characters of the routerHash, or null to pick - * the first one we come across. - */ - public void setContextId(String contextId) { - try { - _context = ContextHelper.getContext(contextId); - } catch (Throwable t) { - t.printStackTrace(); - } - } - +public class LogsHelper extends HelperBase { public LogsHelper() {} public String getLogs() { diff --git a/apps/routerconsole/java/src/net/i2p/router/web/NavHelper.java b/apps/routerconsole/java/src/net/i2p/router/web/NavHelper.java index a4b2125e3..2d50379f3 100644 --- a/apps/routerconsole/java/src/net/i2p/router/web/NavHelper.java +++ b/apps/routerconsole/java/src/net/i2p/router/web/NavHelper.java @@ -6,22 +6,8 @@ import java.util.Map; import net.i2p.router.RouterContext; -public class NavHelper { +public class NavHelper extends HelperBase { private static Map _apps = new HashMap(); - private RouterContext _context; - /** - * Configure this bean to query a particular router context - * - * @param contextId begging few characters of the routerHash, or null to pick - * the first one we come across. - */ - public void setContextId(String contextId) { - try { - _context = ContextHelper.getContext(contextId); - } catch (Throwable t) { - t.printStackTrace(); - } - } public NavHelper() {} diff --git a/apps/routerconsole/java/src/net/i2p/router/web/NetDbHelper.java b/apps/routerconsole/java/src/net/i2p/router/web/NetDbHelper.java index 41f0ad90c..a3280ac44 100644 --- a/apps/routerconsole/java/src/net/i2p/router/web/NetDbHelper.java +++ b/apps/routerconsole/java/src/net/i2p/router/web/NetDbHelper.java @@ -7,29 +7,14 @@ import java.io.Writer; import net.i2p.router.RouterContext; -public class NetDbHelper { - private RouterContext _context; - private Writer _out; +public class NetDbHelper extends HelperBase { private String _routerPrefix; - - /** - * Configure this bean to query a particular router context - * - * @param contextId begging few characters of the routerHash, or null to pick - * the first one we come across. - */ - public void setContextId(String contextId) { - try { - _context = ContextHelper.getContext(contextId); - } catch (Throwable t) { - t.printStackTrace(); - } - } + private boolean _full = false; public NetDbHelper() {} - public void setWriter(Writer writer) { _out = writer; } public void setRouter(String r) { _routerPrefix = r; } + public void setFull(String f) { _full = "1".equals(f); }; public String getNetDbSummary() { try { @@ -37,14 +22,14 @@ public class NetDbHelper { if (_routerPrefix != null) _context.netDb().renderRouterInfoHTML(_out, _routerPrefix); else - _context.netDb().renderStatusHTML(_out); + _context.netDb().renderStatusHTML(_out, _full); return ""; } else { ByteArrayOutputStream baos = new ByteArrayOutputStream(32*1024); if (_routerPrefix != null) _context.netDb().renderRouterInfoHTML(new OutputStreamWriter(baos), _routerPrefix); else - _context.netDb().renderStatusHTML(new OutputStreamWriter(baos)); + _context.netDb().renderStatusHTML(new OutputStreamWriter(baos), _full); return new String(baos.toByteArray()); } } catch (IOException ioe) { diff --git a/apps/routerconsole/java/src/net/i2p/router/web/NoticeHelper.java b/apps/routerconsole/java/src/net/i2p/router/web/NoticeHelper.java index cd656e9e9..d5ce2b0d9 100644 --- a/apps/routerconsole/java/src/net/i2p/router/web/NoticeHelper.java +++ b/apps/routerconsole/java/src/net/i2p/router/web/NoticeHelper.java @@ -7,22 +7,7 @@ import net.i2p.router.RouterContext; * Simple helper to query the appropriate router for data necessary to render * any emergency notices */ -public class NoticeHelper { - private RouterContext _context; - /** - * Configure this bean to query a particular router context - * - * @param contextId begging few characters of the routerHash, or null to pick - * the first one we come across. - */ - public void setContextId(String contextId) { - try { - _context = ContextHelper.getContext(contextId); - } catch (Throwable t) { - t.printStackTrace(); - } - } - +public class NoticeHelper extends HelperBase { public String getSystemNotice() { if (true) return ""; // moved to the left hand nav if (_context.router().gracefulShutdownInProgress()) { @@ -35,4 +20,4 @@ public class NoticeHelper { return ""; } } -} \ No newline at end of file +} diff --git a/apps/routerconsole/java/src/net/i2p/router/web/OldConsoleHelper.java b/apps/routerconsole/java/src/net/i2p/router/web/OldConsoleHelper.java index 556367e27..6237183ab 100644 --- a/apps/routerconsole/java/src/net/i2p/router/web/OldConsoleHelper.java +++ b/apps/routerconsole/java/src/net/i2p/router/web/OldConsoleHelper.java @@ -8,29 +8,9 @@ import java.io.Writer; import net.i2p.router.RouterContext; import net.i2p.router.admin.StatsGenerator; -public class OldConsoleHelper { - private RouterContext _context; - private Writer _out; - /** - * Configure this bean to query a particular router context - * - * @param contextId begging few characters of the routerHash, or null to pick - * the first one we come across. - */ - public void setContextId(String contextId) { - try { - _context = ContextHelper.getContext(contextId); - } catch (Throwable t) { - t.printStackTrace(); - } - } - +public class OldConsoleHelper extends HelperBase { public OldConsoleHelper() {} - public void setWriter(Writer writer) { - _out = writer; - } - public String getConsole() { try { if (_out != null) { diff --git a/apps/routerconsole/java/src/net/i2p/router/web/PeerHelper.java b/apps/routerconsole/java/src/net/i2p/router/web/PeerHelper.java index e5561fe1f..2504067ac 100644 --- a/apps/routerconsole/java/src/net/i2p/router/web/PeerHelper.java +++ b/apps/routerconsole/java/src/net/i2p/router/web/PeerHelper.java @@ -5,28 +5,12 @@ import java.io.Writer; import net.i2p.router.RouterContext; -public class PeerHelper { - private RouterContext _context; - private Writer _out; +public class PeerHelper extends HelperBase { private int _sortFlags; private String _urlBase; - /** - * Configure this bean to query a particular router context - * - * @param contextId begging few characters of the routerHash, or null to pick - * the first one we come across. - */ - public void setContextId(String contextId) { - try { - _context = ContextHelper.getContext(contextId); - } catch (Throwable t) { - t.printStackTrace(); - } - } public PeerHelper() {} - public void setOut(Writer out) { _out = out; } public void setSort(String flags) { if (flags != null) { try { diff --git a/apps/routerconsole/java/src/net/i2p/router/web/ProfilesHelper.java b/apps/routerconsole/java/src/net/i2p/router/web/ProfilesHelper.java index 4db1010a5..702a63e50 100644 --- a/apps/routerconsole/java/src/net/i2p/router/web/ProfilesHelper.java +++ b/apps/routerconsole/java/src/net/i2p/router/web/ProfilesHelper.java @@ -6,22 +6,7 @@ import java.io.OutputStreamWriter; import net.i2p.router.RouterContext; -public class ProfilesHelper { - private RouterContext _context; - /** - * Configure this bean to query a particular router context - * - * @param contextId begging few characters of the routerHash, or null to pick - * the first one we come across. - */ - public void setContextId(String contextId) { - try { - _context = ContextHelper.getContext(contextId); - } catch (Throwable t) { - t.printStackTrace(); - } - } - +public class ProfilesHelper extends HelperBase { public ProfilesHelper() {} public String getProfileSummary() { diff --git a/apps/routerconsole/java/src/net/i2p/router/web/StatHelper.java b/apps/routerconsole/java/src/net/i2p/router/web/StatHelper.java index 8b67d2622..ce6fefd5d 100644 --- a/apps/routerconsole/java/src/net/i2p/router/web/StatHelper.java +++ b/apps/routerconsole/java/src/net/i2p/router/web/StatHelper.java @@ -11,12 +11,10 @@ import net.i2p.router.RouterContext; * uuuugly. dump the peer profile data if given a peer. * */ -public class StatHelper { +public class StatHelper extends HelperBase { private String _peer; - private Writer _writer; public void setPeer(String peer) { _peer = peer; } - public void setWriter(Writer writer) { _writer = writer; } public String getProfile() { RouterContext ctx = (RouterContext)net.i2p.router.RouterContext.listContexts().get(0); @@ -25,10 +23,10 @@ public class StatHelper { Hash peer = (Hash)iter.next(); if (peer.toBase64().startsWith(_peer)) { try { - WriterOutputStream wos = new WriterOutputStream(_writer); + WriterOutputStream wos = new WriterOutputStream(_out); ctx.profileOrganizer().exportProfile(peer, wos); wos.flush(); - _writer.flush(); + _out.flush(); return ""; } catch (Exception e) { e.printStackTrace(); diff --git a/apps/routerconsole/java/src/net/i2p/router/web/StatSummarizer.java b/apps/routerconsole/java/src/net/i2p/router/web/StatSummarizer.java index 6fc982386..331f42d89 100644 --- a/apps/routerconsole/java/src/net/i2p/router/web/StatSummarizer.java +++ b/apps/routerconsole/java/src/net/i2p/router/web/StatSummarizer.java @@ -54,6 +54,7 @@ public class StatSummarizer implements Runnable { // ",udp.receivePacketSkew.60000" + // ",udp.sendConfirmTime.60000" + // ",udp.sendPacketSize.60000" + + ",router.memoryUsed.60000" + ",router.activePeers.60000"; // ",router.activeSendPeers.60000" + // ",tunnel.acceptLoad.60000" + diff --git a/apps/routerconsole/java/src/net/i2p/router/web/SummaryHelper.java b/apps/routerconsole/java/src/net/i2p/router/web/SummaryHelper.java index 279650ebb..1b302ff0b 100644 --- a/apps/routerconsole/java/src/net/i2p/router/web/SummaryHelper.java +++ b/apps/routerconsole/java/src/net/i2p/router/web/SummaryHelper.java @@ -1,11 +1,15 @@ package net.i2p.router.web; +import java.text.Collator; import java.text.DateFormat; import java.text.DecimalFormat; +import java.util.ArrayList; +import java.util.Collections; +import java.util.Comparator; import java.util.Date; import java.util.Iterator; +import java.util.List; import java.util.Locale; -import java.util.Set; import net.i2p.data.DataHelper; import net.i2p.data.Destination; @@ -25,22 +29,7 @@ import net.i2p.stat.RateStat; * Simple helper to query the appropriate router for data necessary to render * the summary sections on the router console. */ -public class SummaryHelper { - private RouterContext _context; - /** - * Configure this bean to query a particular router context - * - * @param contextId begging few characters of the routerHash, or null to pick - * the first one we come across. - */ - public void setContextId(String contextId) { - try { - _context = ContextHelper.getContext(contextId); - } catch (Throwable t) { - t.printStackTrace(); - } - } - +public class SummaryHelper extends HelperBase { /** * Retrieve the shortened 4 character ident for the router located within * the current JVM at the given context. @@ -117,6 +106,9 @@ public class SummaryHelper { public int getAllPeers() { return _context.netDb().getKnownRouters(); } public String getReachability() { + if (_context.router().getUptime() > 60*1000 && (!_context.router().gracefulShutdownInProgress()) && + !_context.clientManager().isAlive()) + return "ERR-Client Manager I2CP Error - check logs"; // not a router problem but the user should know if (!_context.clock().getUpdatedSuccessfully()) return "ERR-ClockSkew"; if (_context.router().isHidden()) @@ -144,7 +136,7 @@ public class SummaryHelper { case CommSystemFacade.STATUS_UNKNOWN: // fallthrough default: ra = _context.router().getRouterInfo().getTargetAddress("UDP"); - if (ra == null) { + if (ra == null && _context.router().getUptime() > 5*60*1000) { if (_context.getProperty(ConfigNetHelper.PROP_I2NP_NTCP_HOSTNAME) == null || _context.getProperty(ConfigNetHelper.PROP_I2NP_NTCP_PORT) == null) return "ERR-UDP Disabled and Inbound TCP host/port not set"; @@ -244,7 +236,7 @@ public class SummaryHelper { */ public String getInboundSecondKBps() { if (_context == null) - return "0.0"; + return "0"; double kbps = _context.bandwidthLimiter().getReceiveBps()/1024d; DecimalFormat fmt = new DecimalFormat("##0.00"); return fmt.format(kbps); @@ -256,7 +248,7 @@ public class SummaryHelper { */ public String getOutboundSecondKBps() { if (_context == null) - return "0.0"; + return "0"; double kbps = _context.bandwidthLimiter().getSendBps()/1024d; DecimalFormat fmt = new DecimalFormat("##0.00"); return fmt.format(kbps); @@ -269,10 +261,10 @@ public class SummaryHelper { */ public String getInboundFiveMinuteKBps() { if (_context == null) - return "0.0"; + return "0"; RateStat receiveRate = _context.statManager().getRate("bw.recvRate"); - if (receiveRate == null) return "0.0"; + if (receiveRate == null) return "0"; Rate rate = receiveRate.getRate(5*60*1000); double kbps = rate.getAverageValue()/1024; DecimalFormat fmt = new DecimalFormat("##0.00"); @@ -286,10 +278,10 @@ public class SummaryHelper { */ public String getOutboundFiveMinuteKBps() { if (_context == null) - return "0.0"; + return "0"; RateStat receiveRate = _context.statManager().getRate("bw.sendRate"); - if (receiveRate == null) return "0.0"; + if (receiveRate == null) return "0"; Rate rate = receiveRate.getRate(5*60*1000); double kbps = rate.getAverageValue()/1024; DecimalFormat fmt = new DecimalFormat("##0.00"); @@ -303,10 +295,10 @@ public class SummaryHelper { */ public String getInboundLifetimeKBps() { if (_context == null) - return "0.0"; + return "0"; RateStat receiveRate = _context.statManager().getRate("bw.recvRate"); - if (receiveRate == null) return "0.0"; + if (receiveRate == null) return "0"; double kbps = receiveRate.getLifetimeAverageValue()/1024; DecimalFormat fmt = new DecimalFormat("##0.00"); return fmt.format(kbps); @@ -319,10 +311,10 @@ public class SummaryHelper { */ public String getOutboundLifetimeKBps() { if (_context == null) - return "0.0"; + return "0"; RateStat sendRate = _context.statManager().getRate("bw.sendRate"); - if (sendRate == null) return "0.0"; + if (sendRate == null) return "0"; double kbps = sendRate.getLifetimeAverageValue()/1024; DecimalFormat fmt = new DecimalFormat("##0.00"); return fmt.format(kbps); @@ -335,11 +327,11 @@ public class SummaryHelper { */ public String getInboundTransferred() { if (_context == null) - return "0.0"; + return "0"; long received = _context.bandwidthLimiter().getTotalAllocatedInboundBytes(); - return getTransferred(received); + return DataHelper.formatSize(received) + 'B'; } /** @@ -349,40 +341,10 @@ public class SummaryHelper { */ public String getOutboundTransferred() { if (_context == null) - return "0.0"; + return "0"; long sent = _context.bandwidthLimiter().getTotalAllocatedOutboundBytes(); - return getTransferred(sent); - } - - private static String getTransferred(long bytes) { - double val = bytes; - int scale = 0; - if (bytes > 1024*1024*1024) { - // gigs transferred - scale = 3; - val /= (double)(1024*1024*1024); - } else if (bytes > 1024*1024) { - // megs transferred - scale = 2; - val /= (double)(1024*1024); - } else if (bytes > 1024) { - // kbytes transferred - scale = 1; - val /= (double)1024; - } else { - scale = 0; - } - - DecimalFormat fmt = new DecimalFormat("##0.00"); - - String str = fmt.format(val); - switch (scale) { - case 1: return str + "KB"; - case 2: return str + "MB"; - case 3: return str + "GB"; - default: return bytes + "bytes"; - } + return DataHelper.formatSize(sent) + 'B'; } /** @@ -391,20 +353,16 @@ public class SummaryHelper { * @return html section summary */ public String getDestinations() { - Set clients = _context.clientManager().listClients(); + // covert the set to a list so we can sort by name and not lose duplicates + List clients = new ArrayList(_context.clientManager().listClients()); + Collections.sort(clients, new AlphaComparator()); StringBuffer buf = new StringBuffer(512); buf.append("Local destinations
      "); for (Iterator iter = clients.iterator(); iter.hasNext(); ) { Destination client = (Destination)iter.next(); - TunnelPoolSettings in = _context.tunnelManager().getInboundSettings(client.calculateHash()); - TunnelPoolSettings out = _context.tunnelManager().getOutboundSettings(client.calculateHash()); - String name = (in != null ? in.getDestinationNickname() : null); - if (name == null) - name = (out != null ? out.getDestinationNickname() : null); - if (name == null) - name = client.calculateHash().toBase64().substring(0,6); + String name = getName(client); buf.append("* ").append(name).append("
      \n"); LeaseSet ls = _context.netDb().lookupLeaseSetLocally(client.calculateHash()); @@ -418,14 +376,38 @@ public class SummaryHelper { buf.append("No leases
      \n"); } buf.append("Details "); + buf.append("\" target=\"_top\">Details "); buf.append("Config
      \n"); + buf.append("\" target=\"_top\">Config
      \n"); } buf.append("
      \n"); return buf.toString(); } + private class AlphaComparator implements Comparator { + public int compare(Object lhs, Object rhs) { + String lname = getName((Destination)lhs); + String rname = getName((Destination)rhs); + if (lname.equals("shared clients")) + return -1; + if (rname.equals("shared clients")) + return 1; + return Collator.getInstance().compare(lname, rname); + } + } + + private String getName(Destination d) { + TunnelPoolSettings in = _context.tunnelManager().getInboundSettings(d.calculateHash()); + String name = (in != null ? in.getDestinationNickname() : null); + if (name == null) { + TunnelPoolSettings out = _context.tunnelManager().getOutboundSettings(d.calculateHash()); + name = (out != null ? out.getDestinationNickname() : null); + if (name == null) + name = d.calculateHash().toBase64().substring(0,6); + } + return name; + } + /** * How many free inbound tunnels we have. * @@ -556,4 +538,5 @@ public class SummaryHelper { public boolean updateAvailable() { return NewsFetcher.getInstance(_context).updateAvailable(); } + } diff --git a/apps/routerconsole/java/src/net/i2p/router/web/SummaryListener.java b/apps/routerconsole/java/src/net/i2p/router/web/SummaryListener.java index ef2274512..6ef9df9db 100644 --- a/apps/routerconsole/java/src/net/i2p/router/web/SummaryListener.java +++ b/apps/routerconsole/java/src/net/i2p/router/web/SummaryListener.java @@ -184,7 +184,7 @@ class SummaryRenderer { def.setTimePeriod(start/1000, 0); String name = _listener.getRate().getRateStat().getName(); // heuristic to set K=1024 - if ((name.startsWith("bw.") || name.indexOf("Size") >= 0 || name.indexOf("Bps") >= 0) + if ((name.startsWith("bw.") || name.indexOf("Size") >= 0 || name.indexOf("Bps") >= 0 || name.indexOf("memory") >= 0) && !showEvents) def.setBaseValue(1024); String title = name; diff --git a/apps/routerconsole/java/src/net/i2p/router/web/TunnelHelper.java b/apps/routerconsole/java/src/net/i2p/router/web/TunnelHelper.java index 4d4eba76b..3cd8a96e3 100644 --- a/apps/routerconsole/java/src/net/i2p/router/web/TunnelHelper.java +++ b/apps/routerconsole/java/src/net/i2p/router/web/TunnelHelper.java @@ -7,27 +7,9 @@ import java.io.Writer; import net.i2p.router.RouterContext; -public class TunnelHelper { - private RouterContext _context; - private Writer _out; - /** - * Configure this bean to query a particular router context - * - * @param contextId begging few characters of the routerHash, or null to pick - * the first one we come across. - */ - public void setContextId(String contextId) { - try { - _context = ContextHelper.getContext(contextId); - } catch (Throwable t) { - t.printStackTrace(); - } - } - +public class TunnelHelper extends HelperBase { public TunnelHelper() {} - public void setWriter(Writer writer) { _out = writer; } - public String getTunnelSummary() { try { if (_out != null) { diff --git a/apps/routerconsole/java/src/net/i2p/router/web/UpdateHandler.java b/apps/routerconsole/java/src/net/i2p/router/web/UpdateHandler.java index 56c291c43..83495f33e 100644 --- a/apps/routerconsole/java/src/net/i2p/router/web/UpdateHandler.java +++ b/apps/routerconsole/java/src/net/i2p/router/web/UpdateHandler.java @@ -27,13 +27,13 @@ import net.i2p.util.Log; *

      */ public class UpdateHandler { - private static UpdateRunner _updateRunner; - private RouterContext _context; - private Log _log; - private DecimalFormat _pct = new DecimalFormat("00.0%"); + protected static UpdateRunner _updateRunner; + protected RouterContext _context; + protected Log _log; + protected DecimalFormat _pct = new DecimalFormat("00.0%"); - private static final String SIGNED_UPDATE_FILE = "i2pupdate.sud"; - private static final String PROP_UPDATE_IN_PROGRESS = "net.i2p.router.web.UpdateHandler.updateInProgress"; + protected static final String SIGNED_UPDATE_FILE = "i2pupdate.sud"; + protected static final String PROP_UPDATE_IN_PROGRESS = "net.i2p.router.web.UpdateHandler.updateInProgress"; public UpdateHandler() { this(ContextHelper.getContext(null)); @@ -93,9 +93,8 @@ public class UpdateHandler { } public class UpdateRunner implements Runnable, EepGet.StatusListener { - private boolean _isRunning; - private String _status; - private long _startedOn; + protected boolean _isRunning; + protected String _status; public UpdateRunner() { _isRunning = false; _status = "Updating"; @@ -108,8 +107,7 @@ public class UpdateHandler { System.setProperty(PROP_UPDATE_IN_PROGRESS, "false"); _isRunning = false; } - private void update() { - _startedOn = -1; + protected void update() { _status = "Updating"; String updateURL = selectUpdateURL(); if (_log.shouldLog(Log.DEBUG)) @@ -130,7 +128,6 @@ public class UpdateHandler { else get = new EepGet(_context, 1, SIGNED_UPDATE_FILE, updateURL, false); get.addStatusListener(UpdateRunner.this); - _startedOn = _context.clock().now(); get.fetch(); } catch (Throwable t) { _context.logManager().getLog(UpdateHandler.class).error("Error updating", t); @@ -188,7 +185,7 @@ public class UpdateHandler { } private void restart() { - _context.router().addShutdownTask(new ConfigServiceHandler.UpdateWrapperManagerTask(Router.EXIT_GRACEFUL_RESTART)); + _context.addShutdownTask(new ConfigServiceHandler.UpdateWrapperManagerTask(Router.EXIT_GRACEFUL_RESTART)); _context.router().shutdownGracefully(Router.EXIT_GRACEFUL_RESTART); } diff --git a/apps/routerconsole/jsp/config.jsp b/apps/routerconsole/jsp/config.jsp index 0d9573fe6..ae357d69f 100644 --- a/apps/routerconsole/jsp/config.jsp +++ b/apps/routerconsole/jsp/config.jsp @@ -75,7 +75,7 @@ with "SSU introductions" - peers who will relay a request from someone you don't know to your router for your router so that you can make an outbound connection to them. I2P will use these introductions automatically if it detects that the port is not forwarded (as shown by - the Status: OK (NAT) line), or you can manually require them here. + the Status: Firewalled line), or you can manually require them here. Users behind symmetric NATs, such as OpenBSD's pf, are not currently supported.


      @@ -103,6 +103,8 @@ substantially. When in doubt, leave the hostname and port number blank.

      Note: changing any of these settings will terminate all of your connections and effectively restart your router. +

      +

      Reachability Help:

      @@ -110,6 +112,7 @@ if you open up your port (generally 8887) to both UDP and TCP, and enable inbound TCP above. If you think you have opened up your firewall and I2P still thinks you are firewalled, remember that you may have multiple firewalls, for example both software packages and external hardware routers. + If there is an error, the logs may also help diagnose the problem.

      • OK - Your UDP port does not appear to be firewalled.
      • Firewalled - Your UDP port appears to be firewalled. @@ -149,6 +152,9 @@ You have not configured inbound TCP with a hostname and port above, however you have disabled UDP. Therefore your router cannot accept inbound connections. Please configure a TCP host and port above or enable UDP. +
      • ERR - Client Manager I2CP Error - check logs - + This is usually due to a port 7654 conflict. Check the logs to verify. Do you have another I2P instance running? + Stop the conflicting program and restart I2P.


      @@ -171,7 +177,6 @@


      --> -
  • diff --git a/apps/routerconsole/jsp/configkeyring.jsp b/apps/routerconsole/jsp/configkeyring.jsp new file mode 100644 index 000000000..7dd8bf178 --- /dev/null +++ b/apps/routerconsole/jsp/configkeyring.jsp @@ -0,0 +1,58 @@ +<%@page contentType="text/html"%> +<%@page pageEncoding="UTF-8"%> + + + +I2P Router Console - config keyring + + + +<%@include file="nav.jsp" %> +<%@include file="summary.jsp" %> + +
    + <%@include file="confignav.jsp" %> + + + + " /> + + + + + + + " /> + +

    +

    Keyring

    + The router keyring is used to decrypt encrypted leaseSets. + The keyring may contain keys for local or remote encrypted destinations. +

    +

    + +
    + +
    + <% String prev = System.getProperty("net.i2p.router.web.ConfigKeyringHandler.nonce"); + if (prev != null) System.setProperty("net.i2p.router.web.ConfigKeyringHandler.noncePrev", prev); + System.setProperty("net.i2p.router.web.ConfigKeyringHandler.nonce", new java.util.Random().nextLong()+""); %> + " /> +

    Manual Keyring Addition

    + Enter keys for encrypted remote destinations here. + Keys for local destinations must be entered on the I2PTunnel page. +

    + +
    Dest. name, hash, or full key: + +
    Session Key: + +
    +
    +

    + + +
    + + + diff --git a/apps/routerconsole/jsp/confignav.jsp b/apps/routerconsole/jsp/confignav.jsp index b6a5ce6df..851ab79b5 100644 --- a/apps/routerconsole/jsp/confignav.jsp +++ b/apps/routerconsole/jsp/confignav.jsp @@ -10,6 +10,8 @@ %>Clients | <% } else { %>Clients | <% } if (request.getRequestURI().indexOf("configpeer.jsp") != -1) { %>Peers | <% } else { %>Peers | <% } + if (request.getRequestURI().indexOf("configkeyring.jsp") != -1) { + %>Keyring | <% } else { %>Keyring | <% } if (request.getRequestURI().indexOf("configlogging.jsp") != -1) { %>Logging | <% } else { %>Logging | <% } if (request.getRequestURI().indexOf("configstats.jsp") != -1) { diff --git a/apps/routerconsole/jsp/configstats.jsp b/apps/routerconsole/jsp/configstats.jsp index 651636176..c7ec3090c 100644 --- a/apps/routerconsole/jsp/configstats.jsp +++ b/apps/routerconsole/jsp/configstats.jsp @@ -88,6 +88,7 @@ function toggleAll(category) LogGraph<% } // end iterating over required groups for the current stat %> + checked="true" <% } %>/> diff --git a/apps/routerconsole/jsp/default.css b/apps/routerconsole/jsp/default.css index b5a63a1ad..527a7c9be 100644 --- a/apps/routerconsole/jsp/default.css +++ b/apps/routerconsole/jsp/default.css @@ -22,11 +22,12 @@ div.logo { top: 1em; margin: 0em; padding: .5em; - text-align: left; + text-align: center; } div.toolbar { - font-weight: bold + margin: 0em 0em 2em 0em; + font-weight: bold; } div.routersummary { @@ -70,7 +71,7 @@ div.news { margin: 0em 1em 1em 224px; padding: .5em 1em; background-color: #ffffc0; - border: medium solid #ffffd0; + border: medium solid #ffffa0; text-align: left; color: inherit; } diff --git a/apps/routerconsole/jsp/graphs.jsp b/apps/routerconsole/jsp/graphs.jsp index 422bf19d6..06807f397 100644 --- a/apps/routerconsole/jsp/graphs.jsp +++ b/apps/routerconsole/jsp/graphs.jsp @@ -14,7 +14,7 @@ " /> - +
    diff --git a/apps/routerconsole/jsp/help.jsp b/apps/routerconsole/jsp/help.jsp index 8580f6e65..68f1b6245 100644 --- a/apps/routerconsole/jsp/help.jsp +++ b/apps/routerconsole/jsp/help.jsp @@ -12,12 +12,139 @@

    Help

    -Sorry, there's no help text here yet, so check out the +Sorry, there's not much help text here yet, so also check out the FAQ on www.i2p2.i2p or the Deutsch FAQ. +You may also try the +forum +or IRC.
    +

    Summary Bar Information

    +Many of the stats on the summary bar may be +configured to be +graphed for further analysis. + +

    General

    +
      +
    • Ident: +The first four characters (24 bits) of your 44-character (256-bit) Base64 router hash. +The full hash is shown on your router info page. +Never reveal this to anyone, as your router info contains your IP. +
    • Version: +The version of the I2P software you are running. +
    • Now: +The current time (UTC) and the skew, if any. I2P requires your computer's time be accurate. +If the skew is more than a few seconds, please correct the problem by adjusting +your computer's time. +
    • Reachability: +The router's view of whether it can be contacted by other routers. +Further information is on the configuration page. +
    + +

    Peers

    +
      +
    • Active: +The first number is the number of peers you've sent or received a message from in the last few minutes. +This may range from 8-10 to several hundred, depending on your total bandwidth, +shared bandwidth, and locally-generated traffic. +The second number is the number of peers seen in the last hour or so. +Do not be concerned if these numbers vary widely. +Enable graphing +
    • Fast: +This is the number of peers you use for building client tunnels. It is generally in the +range 8-15. Your fast peers are shown on the profiles page. +Enable graphing +
    • High Capacity: +This is the number of peers you use for building some of your exploratory tunnels. It is generally in the +range 8-25. The fast peers are included in the high capacity tier. +Your high capacity peers are shown on the profiles page. +Enable graphing +
    • Well Integrated: +This is the number of peers you use for network database inquiries. +These are usually the "floodfill" peers. +Your well integrated peers are shown on the bottom of the profiles page. +
    • Known: +This is the total number of routers you know about. +They are listed on the network database page. +This may range from under 100 to 1000 or more. +This number is not the total size of the network; +it may vary widely depending on your total bandwidth, +shared bandwidth, and locally-generated traffic. +I2P does not require a router to know every other router. +
    + +

    Bandwidth in/out

    +Should be self-explanatory. All values are in bytes per second, not bits per second. +Change your bandwidth limits on the configuration page. +Bandwidth is graphed by default. + +

    Local destinations

    +The local applications connecting through your router. +These may be clients started through I2PTunnel +or external programs connecting through SAM, BOB, or directly to I2CP. + +

    Tunnels in/out

    +The actual tunnels are shown on the the tunnels page. +
      +
    • Exploratory: +Tunnels built by your router and used for communication with the floodfill peers, +building new tunnels, and testing existing tunnels. +
    • Client: +Tunnels built by your router for each client's use. +
    • Participating: +Tunnels built by other routers through your router. +This may vary widely depending on network demand, your +shared bandwidth, and amount of locally-generated traffic. +The recommended method for limiting participating tunnels is +to change your share percentage on the configuration page. +You may also limit the total number by setting router.maxParticipatingTunnels=nnn on +the advanced configuration page. +Enable graphing +
    + +

    Congestion

    +Some basic indications of router overload. +
      +
    • Job lag: +How long jobs are waiting before execution. The job queue is listed on the jobs page. +Unfortunately, there are several other job queues in the router that may be congested, +and their status is not available in the router console. +The job lag should generally be zero. +If it is consistently higher than 500ms, your computer is very slow, or the +router has serious problems. +Enable graphing +
    • Message delay: +How long an outbound message waits in the queue. +This should generally be a few hundred milliseconds or less. +If it is consistently higher than 1000ms, your computer is very slow, +or you should adjust your bandwidth limits, or your (bittorrent?) clients +may be sending too much data and should have their transmit bandwidth limit reduced. +Enable graphing (transport.sendProcessingTime) +
    • Tunnel lag: +This is the round trip time for a tunnel test, which sends a single message +out a client tunnel and in an exploratory tunnel, or vice versa. +It should usually be less than 5 seconds. +If it is consistently higher than that, your computer is very slow, +or you should adjust your bandwidth limits, or there are network problems. +Enable graphing (tunnel.testSuccessTime) +
    • Handle backlog: +This is the number of pending requests from other routers to build a +participating tunnel through your router. +It should usually be close to zero. +If it is consistently high, your computer is too slow, +and you should reduce your share bandwidth limits. +
    • Accepting/Rejecting: +Your routers' status on accepting or rejecting +requests from other routers to build a +participating tunnel through your router. +Your router may accept all requests, accept or reject a percentage of requests, +or reject all requests for a number of reasons, to control +the bandwidth and CPU demands and maintain capacity for +local clients. +
    +

    Legal stuff

    The I2P router (router.jar) and SDK (i2p.jar) are almost entirely public domain, with a few notable exceptions:
      @@ -34,9 +161,8 @@ licenses and dependencies. This webpage is being served as part of the I2P rout client application, which is built off a trimmed down Jetty instance (trimmed down, as in, we do not include the demo apps or other add-ons, and we simplify configuration), allowing you to deploy standard JSP/Servlet web applications into your router. Jetty in turn makes use of -Apache's javax.servlet (javax.servlet.jar) implementation, as well as their xerces-j XML parser (xerces.jar). -Their XML parser requires the Sun XML APIs (JAXP) which is included in binary form (xml-apis.jar) as required -by their binary code license. This product includes software developed by the Apache Software Foundation +Apache's javax.servlet (javax.servlet.jar) implementation. +This product includes software developed by the Apache Software Foundation (http://www.apache.org/).

      Another application you can see on this webpage is I2PTunnel diff --git a/apps/routerconsole/jsp/nav.jsp b/apps/routerconsole/jsp/nav.jsp index a0c6076f8..22bb8ec24 100644 --- a/apps/routerconsole/jsp/nav.jsp +++ b/apps/routerconsole/jsp/nav.jsp @@ -12,7 +12,6 @@

      <% if (new File("docs/toolbar.html").exists()) { %> diff --git a/apps/routerconsole/jsp/netdb.jsp b/apps/routerconsole/jsp/netdb.jsp index 89c2bdec2..08a1377d3 100644 --- a/apps/routerconsole/jsp/netdb.jsp +++ b/apps/routerconsole/jsp/netdb.jsp @@ -14,6 +14,7 @@ " /> + " /> " />
      diff --git a/apps/routerconsole/jsp/peers.jsp b/apps/routerconsole/jsp/peers.jsp index a537af634..d3b941a34 100644 --- a/apps/routerconsole/jsp/peers.jsp +++ b/apps/routerconsole/jsp/peers.jsp @@ -13,7 +13,7 @@
      " /> - + " /> diff --git a/apps/routerconsole/jsp/summary.jsp b/apps/routerconsole/jsp/summary.jsp index 308088914..48f3b4fef 100644 --- a/apps/routerconsole/jsp/summary.jsp +++ b/apps/routerconsole/jsp/summary.jsp @@ -7,8 +7,14 @@ " /> + + +" />
      +
      Configuration  Help
      +
      + General
      Ident: (, never reveal it to anyone" href="netdb.jsp?r=.">view)
      Version:
      @@ -25,15 +31,15 @@ if (prev != null) System.setProperty("net.i2p.router.web.UpdateHandler.noncePrev", prev); System.setProperty("net.i2p.router.web.UpdateHandler.nonce", nonce+""); String uri = request.getRequestURI(); - if (uri.indexOf('?') > 0) - uri = uri + "&updateNonce=" + nonce; - else - uri = uri + "?updateNonce=" + nonce; - out.print("
      Update available"); + out.print("

      \n"); + out.print("\n"); + out.print("

      \n"); } } %> -
      <%=net.i2p.router.web.ConfigRestartBean.renderStatus(request.getRequestURI(), request.getParameter("action"), request.getParameter("consoleNonce"))%> +

      + <%=net.i2p.router.web.ConfigRestartBean.renderStatus(request.getRequestURI(), request.getParameter("action"), request.getParameter("consoleNonce"))%> +


      Peers
      @@ -57,11 +63,9 @@ if (prev != null) System.setProperty("net.i2p.router.web.ReseedHandler.noncePrev", prev); System.setProperty("net.i2p.router.web.ReseedHandler.nonce", nonce+""); String uri = request.getRequestURI(); - if (uri.indexOf('?') > 0) - uri = uri + "&reseedNonce=" + nonce; - else - uri = uri + "?reseedNonce=" + nonce; - out.print(" reseed
      "); + out.print("

      \n"); + out.print("\n"); + out.print("

      \n"); } } // If a new reseed ain't running, and the last reseed had errors, show error message @@ -94,6 +98,5 @@ Tunnel lag:
      Handle backlog:

      -
      diff --git a/apps/sam/java/build.xml b/apps/sam/java/build.xml index 5f48bff98..bb692f2db 100644 --- a/apps/sam/java/build.xml +++ b/apps/sam/java/build.xml @@ -25,7 +25,14 @@ + + + @@ -38,6 +45,9 @@ + + + diff --git a/apps/stasher/python/README.txt b/apps/stasher/python/README.txt deleted file mode 100644 index 7c2a4d0ab..000000000 --- a/apps/stasher/python/README.txt +++ /dev/null @@ -1,77 +0,0 @@ -STASHER README - ------------------------ -INSTALLING STASHER - -Prerequisite: - -Before you can install/run Stasher, you will first need to have installed -the I2P Python modules - available in cvs at i2p/apps/sam/python. - -To install stasher, just make sure you've got the latest cvs, then type - python setup.py install -as root. - -This installs the stasher engine, plus a wrapper client script called -'stasher', which setup.py will install into your execution path. - -If you don't like the thought of becoming root, you could just put stasher.py -on your execution path, and/or create a symlink called 'stasher'. - -Test your installation by typing 'stasher -h' - this should display -a help message. - ------------------------- -DOZE USERS PLEASE NOTE - -You'll need to watch and see where the stasher.py -wrapper script gets installed. On my box, it ends up on d:\python23\scripts, -but on your box it'll likely go into c:\python23\scripts. - -You may either update your system PATH environment variable to include your -python scripts directory, OR, you can copy stasher.py to anywhere that's -on your path. - -In the explanations below, note that wherever I say to type 'stasher', you'll -need to type 'stasher.py' instead. - ------------------------- -WARNING - -This is a very early pre-alpha test version of stasher. -It is only capable of storing or retrieving files of -less than 29k in size. - -Also, files are totally insecure - anyone can overwrite any keys you -insert, and vice versa. - -I'll be adding support for CHK-like and SSK-like keys in due course. - ------------------------- -USING STASHER - -To see stasher's options, type: - - stasher -h - -This should dump out a verbose help message. - -To start a stasher node, type: - - stasher start - -To shut down a stasher node, type: - - stasher stop - -To insert a file into stasher, type: - - stasher put mykey myfile - -Note, if you don't supply a filename, stasher can read -the file from standard input. - -To retrieve a file from stasher, type: - - stasher get mykey - diff --git a/apps/stasher/python/noderefs/aum.stasher b/apps/stasher/python/noderefs/aum.stasher deleted file mode 100644 index 5692d6019..000000000 --- a/apps/stasher/python/noderefs/aum.stasher +++ /dev/null @@ -1 +0,0 @@ -qeu89U8BXS8~jlsGvr-wjMvSIcXpYZ6wX2iEmGFI2Lm9eMV-yZfoZcAAF1Ll8Ck3FvIkH3~N0OobuGjcVZTiZ5PC2~h-zGHVaBPbsnOdVjYEeCGUwxwlNW6cxZZ6SfWbjTxXrpbSjLYZtlnGTBm5cd2Qaj61~A4lcoI72kj-v9GNXD5zeCQ9PeqKJHRN5p29VR8lTh9eqoIIHlGnsllQeZieeFJEAdnydOTi1ERzm4Hftq0P47lWu2FYh3aMtxI7HLeklWrmnQ--rW0XJ~xndWl45e~DEDaIL0k2FfMEmWYPtF-8l-xBX9IwIx8uZ2tcsexxLvJCY8-RiI4wgBqSf1CxPTGJ4TYUIqTUcMv2Sku8WslAdrWSPJofHWmeAmTJdSgCe8ZwvgMLkNZVeGgEccwtCDJbe5AeyJaQDFOTLlgwhHu5ExbyUPZtNZ4nSg-~qiGnpFTdgonqEYoJF9LvSaAgSfhOS3kdtZ6kKqgMFFY8InpGgCBuc6A6c5hsfCVjAAAA \ No newline at end of file diff --git a/apps/stasher/python/scripts/stasher b/apps/stasher/python/scripts/stasher deleted file mode 100644 index 601cfee4e..000000000 --- a/apps/stasher/python/scripts/stasher +++ /dev/null @@ -1,10 +0,0 @@ -#! /usr/bin/env python -# wrapper script to run stasher node - -# set this to the directory where you've installed stasher -stasherDir = "/path/to/my/stasher/dir" - -import sys -sys.path.append(stasherDir) -import stasher -stasher.main() diff --git a/apps/stasher/python/scripts/stasher.py b/apps/stasher/python/scripts/stasher.py deleted file mode 100644 index 3f404e511..000000000 --- a/apps/stasher/python/scripts/stasher.py +++ /dev/null @@ -1,9 +0,0 @@ -# wrapper script to run stasher node - -# set this to the directory where you've installed stasher -stasherDir = "/path/to/my/stasher/dir" - -import sys -sys.path.append(stasherDir) -import stasher -stasher.main() diff --git a/apps/stasher/python/setup.py b/apps/stasher/python/setup.py deleted file mode 100644 index c81520c1f..000000000 --- a/apps/stasher/python/setup.py +++ /dev/null @@ -1,46 +0,0 @@ -#! /usr/bin/env python -#@+leo-ver=4 -#@+node:@file setup-stasher.py -#@@first -""" -This is the installation script for Stasher, a distributed -file storage framework for I2P. -""" - -import sys, os -from distutils.core import setup - -oldcwd = os.getcwd() -os.chdir("src") - -if sys.platform == 'win32': - stasherScript = "..\\scripts\\stasher.py" -else: - stasherScript = "../scripts/stasher" - - -try: - import i2p - import i2p.socket - import i2p.select -except: - print "Sorry, but you don't seem to have the core I2P" - print "python library modules installed." - print "If you're installing from cvs, please go to" - print "i2p/apps/sam/python, become root, and type:" - print " python setup.py install" - print "Then, retry this installation." - sys.exit(1) - -setup(name="Stasher", - version="0.0", - description="Kademlia-based P2P distributed file storage app for I2P", - author="aum", - author_email="aum_i2p@hotmail.com", - url="http://stasher.i2p", - py_modules = ['stasher', 'bencode'], - scripts = [stasherScript], - ) -#@nonl -#@-node:@file setup-stasher.py -#@-leo diff --git a/apps/stasher/python/src/bencode.py b/apps/stasher/python/src/bencode.py deleted file mode 100644 index 93af40744..000000000 --- a/apps/stasher/python/src/bencode.py +++ /dev/null @@ -1,254 +0,0 @@ -# Written by Petru Paler -# see LICENSE.txt for license information - -from types import IntType, LongType, StringType, ListType, TupleType, DictType -import re -from cStringIO import StringIO - -int_filter = re.compile('(0|-?[1-9][0-9]*)e') - -def decode_int(x, f): - m = int_filter.match(x, f) - if m is None: - raise ValueError - return (long(m.group(1)), m.end()) - -string_filter = re.compile('(0|[1-9][0-9]*):') - -def decode_string(x, f): - m = string_filter.match(x, f) - if m is None: - raise ValueError - l = int(m.group(1)) - s = m.end() - return (x[s:s+l], s + l) - -def decode_list(x, f): - r = [] - while x[f] != 'e': - v, f = bdecode_rec(x, f) - r.append(v) - return (r, f + 1) - -def decode_dict(x, f): - r = {} - lastkey = None - while x[f] != 'e': - k, f = decode_string(x, f) - if lastkey is not None and lastkey >= k: - raise ValueError - lastkey = k - v, f = bdecode_rec(x, f) - r[k] = v - return (r, f + 1) - -def bdecode_rec(x, f): - t = x[f] - if t == 'i': - return decode_int(x, f + 1) - elif t == 'l': - return decode_list(x, f + 1) - elif t == 'd': - return decode_dict(x, f + 1) - else: - return decode_string(x, f) - -def bdecode(x): - try: - r, l = bdecode_rec(x, 0) - except IndexError: - raise ValueError - if l != len(x): - raise ValueError - return r - -def test_bdecode(): - try: - bdecode('0:0:') - assert 0 - except ValueError: - pass - try: - bdecode('ie') - assert 0 - except ValueError: - pass - try: - bdecode('i341foo382e') - assert 0 - except ValueError: - pass - assert bdecode('i4e') == 4L - assert bdecode('i0e') == 0L - assert bdecode('i123456789e') == 123456789L - assert bdecode('i-10e') == -10L - try: - bdecode('i-0e') - assert 0 - except ValueError: - pass - try: - bdecode('i123') - assert 0 - except ValueError: - pass - try: - bdecode('') - assert 0 - except ValueError: - pass - try: - bdecode('i6easd') - assert 0 - except ValueError: - pass - try: - bdecode('35208734823ljdahflajhdf') - assert 0 - except ValueError: - pass - try: - bdecode('2:abfdjslhfld') - assert 0 - except ValueError: - pass - assert bdecode('0:') == '' - assert bdecode('3:abc') == 'abc' - assert bdecode('10:1234567890') == '1234567890' - try: - bdecode('02:xy') - assert 0 - except ValueError: - pass - try: - bdecode('l') - assert 0 - except ValueError: - pass - assert bdecode('le') == [] - try: - bdecode('leanfdldjfh') - assert 0 - except ValueError: - pass - assert bdecode('l0:0:0:e') == ['', '', ''] - try: - bdecode('relwjhrlewjh') - assert 0 - except ValueError: - pass - assert bdecode('li1ei2ei3ee') == [1, 2, 3] - assert bdecode('l3:asd2:xye') == ['asd', 'xy'] - assert bdecode('ll5:Alice3:Bobeli2ei3eee') == [['Alice', 'Bob'], [2, 3]] - try: - bdecode('d') - assert 0 - except ValueError: - pass - try: - bdecode('defoobar') - assert 0 - except ValueError: - pass - assert bdecode('de') == {} - assert bdecode('d3:agei25e4:eyes4:bluee') == {'age': 25, 'eyes': 'blue'} - assert bdecode('d8:spam.mp3d6:author5:Alice6:lengthi100000eee') == {'spam.mp3': {'author': 'Alice', 'length': 100000}} - try: - bdecode('d3:fooe') - assert 0 - except ValueError: - pass - try: - bdecode('di1e0:e') - assert 0 - except ValueError: - pass - try: - bdecode('d1:b0:1:a0:e') - assert 0 - except ValueError: - pass - try: - bdecode('d1:a0:1:a0:e') - assert 0 - except ValueError: - pass - try: - bdecode('i03e') - assert 0 - except ValueError: - pass - try: - bdecode('l01:ae') - assert 0 - except ValueError: - pass - try: - bdecode('9999:x') - assert 0 - except ValueError: - pass - try: - bdecode('l0:') - assert 0 - except ValueError: - pass - try: - bdecode('d0:0:') - assert 0 - except ValueError: - pass - try: - bdecode('d0:') - assert 0 - except ValueError: - pass - -def bencode_rec(x, b): - t = type(x) - if t in (IntType, LongType): - b.write('i%de' % x) - elif t is StringType: - b.write('%d:%s' % (len(x), x)) - elif t in (ListType, TupleType): - b.write('l') - for e in x: - bencode_rec(e, b) - b.write('e') - elif t is DictType: - b.write('d') - keylist = x.keys() - keylist.sort() - for k in keylist: - assert type(k) is StringType - bencode_rec(k, b) - bencode_rec(x[k], b) - b.write('e') - else: - assert 0 - -def bencode(x): - b = StringIO() - bencode_rec(x, b) - return b.getvalue() - -def test_bencode(): - assert bencode(4) == 'i4e' - assert bencode(0) == 'i0e' - assert bencode(-10) == 'i-10e' - assert bencode(12345678901234567890L) == 'i12345678901234567890e' - assert bencode('') == '0:' - assert bencode('abc') == '3:abc' - assert bencode('1234567890') == '10:1234567890' - assert bencode([]) == 'le' - assert bencode([1, 2, 3]) == 'li1ei2ei3ee' - assert bencode([['Alice', 'Bob'], [2, 3]]) == 'll5:Alice3:Bobeli2ei3eee' - assert bencode({}) == 'de' - assert bencode({'age': 25, 'eyes': 'blue'}) == 'd3:agei25e4:eyes4:bluee' - assert bencode({'spam.mp3': {'author': 'Alice', 'length': 100000}}) == 'd8:spam.mp3d6:author5:Alice6:lengthi100000eee' - try: - bencode({1: 'foo'}) - assert 0 - except AssertionError: - pass - diff --git a/apps/stasher/python/src/code.leo b/apps/stasher/python/src/code.leo deleted file mode 100644 index 4ae647bd2..000000000 --- a/apps/stasher/python/src/code.leo +++ /dev/null @@ -1,6339 +0,0 @@ - - - - - - - - - - - - - - -Kademlia -@file stasher.py -explanatory comments -imports -constants -globals -Exceptions -Mixins -class KBase - -Main Engine -class KCore -attributes -__init__ -subscribe -unsubscribe -threadRxPackets -threadHousekeeping -nodeWhichOwnsSock -cycle -run -stop -runClient -select - -create instance - -Basic Classes -Node-local Storage -class KStorageBase -__init__ -putRefs -getRefs -putKey -getKey -private methods -_expandRefsList - - -class KStorageFile -__init__ -putRefs -getRefs -putKey -getKey - - -class KHash -__init__ -__str__ -asHex -distance -rawdistance -operators - -class KBucket -__init__ -justSeenPeer -__iter__ - -class KPeer -__init__ -send_ping -send_store -send_findNode -send_findData -send_reply -send_raw -justSeen -lowlevel -__str__ -__repr__ -__eq__ -__ne__ - - - -RPC Classes -class KRpc -attribs -__init__ -__del__ -__str__ -__repr__ -bindPeerReply -unbindPeerReply -unbindAll -start -execute -terminate -returnValue -on_reply -on_tick - -PING -class KRpcPing -attribs -__init__ -start -on_reply -on_tick - - -FIND_NODE -class KPeerQueryRecord -__init__ -hasTimedOut -__cmp__ -__lt__ etc -isCloserThanAllOf -isCloserThanOneOf - -class KPeerQueryTable -__init__ -setlist -getExpired -purgeExpired -sort -select -count -changeState -filter -purge -chooseN -__str__ -newtable -dump -list-like methods -extend -append -remove -__getitem__ -__len__ -__getslice__ -__iter__ -__add__ -__contains__ - - -class KRpcFindNode -spec info comments -attribs -__init__ -start -sendSomeQueries -sendOneQuery -findClosestPeersInitial -addPeerIfCloser -isCloserThanQueried -on_reply -on_tick -checkEndOfRound -gotAnyCloser -returnTheBestWeGot -returnValue -reportStats - - -FIND_DATA -class KRpcFindData -attribs -start -on_reply -on_gotValue -on_gotChunk -returnValue - - -STORE -class KRpcStore -attribs -__init__ -start -storeSplit -on_doneChunkManifest -on_doneChunk -returnValue -on_doneFindNode -on_reply -on_tick - - -PINGALL -class KRpcPingAll -attribs -__init__ -start -on_reply -on_tick -returnValue - - - -Node Socket Server -class KNodeServer -__init__ -serve_forever - -class KNodeReqHandler -handle -finish - -class KNodeClient -__init__ -hello -connect -close -get -put -addref -getref -pingall -kill -__getitem__ -__setitem__ - - -NODE -class KNode -attributes -__init__ -__del__ -application-level -start -stop -get -put -addref -__getitem__ -__setitem__ - -peer/rpc methods -_ping -_pingall -_findnode -_finddata -_store -_findPeer - -comms methods -_sendRaw - -engine -_threadRx -_doChug -_doRx -_doHousekeeping - -event handling -_on_ping -_on_findNode -_on_findData -_on_store -_on_reply -_on_unknown - -Socket Client Server -serve - -lowlevel stuff -__str__ -__repr__ -_msgIdAlloc -_normalisePeer -__del__ - - - -funcs -userI2PDir -nodePidfile -messageEncode -messageDecode -shahash -log -logexc -spawnproc -killproc -i2psocket -usage -err -main - -MAINLINE -mainline - - -Deployment -@file-nosent stasher -contents - -@file-nosent stasher-launch.py -contents - -@file release.sh -@file setup-stasher.py -@file-nosent README.txt -installing-from-cvs -doze warning -alpha warning -using - -@file-nosent README-tarball.txt -installing as tarball -alpha warning -using - - -Testing -@file ktest.py -imports -constants -TEST NETWORK -class KTestSocket -class KTestMap -class KCore -class KNode -class KTestNetwork -attribs -__init__ -__del__ -__getitem__ -__len__ -connect -start -stop -dump -dumplong -findpath -testconnectivity -purge -whohas -whocanfind -findnode -closestto -getPeer -getPeerIdx -getPeerName -dumpids -__str__ -__repr__ - - -Funcs -test -test1 -debug -doput - -MAINLINE -mainline - - -@file node1.sh -@file node2.sh - -Utility -@file pybloom.py -imports -globals -mixarray_init -class Bloom -attribs -__init__ -_make_array -_hashfunc -insert -__contains__ - -class CountedBloom -__init__ -insert -__contains__ -__delitem__ - -mainline - -@file-nosent hashcash.py -imports -globals -class HashCash -attributes -__init__ -generate -verify -_checkBase64 -_enc64 -_dec64 - -generate -verify -binify -intify -_randomString -psyco -test -ctest -ntest -mainline - - -JUNK -Findnode RPC on_reply -on_reply - -class KPendingResultBase -__init__ -append -wait -check -destroySelf -on_tick -on_packet -__cmp__ - -class KPendingResultPing -__init__ -on_tick -on_packet - - - - - - -@first #! /usr/bin/env python -""" -A simple implementation of the -U{Kademlia<http://www.infoanarchy.org/wiki/wiki.pl?Kademlia>} -P2P distributed storage and retrieval protocol, designed to -utilise the U{I2P<http://www.i2p.net>} stealth network as its transport. - -Most application developers will only need to know about the L{KNode} class -""" - -# I strongly recommend that when editing this file, you use the Leo -# outlining and literate programming editor - http://leo.sf.net -# If Leo doesn't agree with your religion, please try to leave the markups intact - -@others - - -# define our exceptions - -class KValueTooLarge(Exception): - """ - Trying to insert a value of excessive size into the network. - Maximum key size is L{maxValueSize} - """ - -class KBadHash(Exception): - """ - Invalid hash string - """ - -class KNotImplemented(Exception): - """ - A required method was not implemented - """ - -class KBadNode(Exception): - """ - Invalid Node object - """ - -class KBadPeer(Exception): - """ - Invalid Peer object - should be a KPeer - """ - -class KBadDest(Exception): - """Invalid I2P Node Dest""" - - -class KHash(KBase): - """ - Wraps 160-bit hashes as abstract objects, on which - operations such as xor, <, >, etc can be performed. - - Kademlia node ids and keys are held as objects - of this class. - - Internally, hashes are stored as python long ints - """ - @others - -def __init__(self, val=None, **kw): - """ - Create a new hash object. - - val can be one of the following: - - None (default) - a random value will be created - - long int - this will be used as the raw hash - - string - the string will be hashed and stored - - another KHash object - its value will be taken - - a KNode or KPeer object - its hash will be taken - - If val is not given, a raw hash value can be passed in - with the keyword 'raw'. Such value must be a python long int - or a 20-char string - """ - self.value = 0L - if val: - if isinstance(val, KHash): - self.value = val.value - elif type(val) in [type(0), type(0L)]: - self.value = long(val) - elif isinstance(val, KNode) or isinstance(val, KPeer): - self.value = val.id.value - else: - raw = self.raw = shahash(val, bin=1) - for c in raw: - self.value = self.value * 256 + ord(c) - else: - rawval = kw.get('raw', None) - if rawval == None: - # generate random - random.seed() - for i in range(20): - self.value = self.value * 256 + random.randint(0, 256) - elif type(rawval) in [type(0), type(0L)]: - self.value = long(rawval) - elif type(rawval) == type(""): - if len(rawval) == 20: - for i in rawval: - self.value = self.value * 256 + ord(i) - elif len(rawval) == 40: - try: - self.value = long(rawval, 16) - except: - raise KBadHash(rawval) - else: - raise KBadHash(rawval) - else: - print "rawval=%s %s %s" % (type(rawval), rawval.__class__, repr(rawval)) - raise KBadHash(rawval) - - -import sys, os, types, sha, random, threading, thread, traceback, Queue -import time, math, random, pickle, getopt, re -import signal - -# some windows-specifics (yggghh) -if sys.platform == 'win32': - try: - import win32api - import win32process - import _winreg - except: - print "Python win32 extensions not installed." - print "Please go to http://sourceforge.net/project/showfiles.php?group_id=78018" - print "and download/install the file pywin32-202.win32-py%s.%s.exe" % \ - sys.version_info[:2] - sys.exit(1) - -from StringIO import StringIO -from pdb import set_trace - -try: - import bencode -except: - print "The bencode module is missing from your python installation." - print "Are you sure you installed Stasher correctly?" - sys.exit(1) - -try: - import i2p.socket - import i2p.select - import i2p.pylib - SocketServer = i2p.pylib.SocketServer - socket = i2p.pylib.socket -except: - print "You don't appear to have the I2P Python modules installed." - print "Not good. Stasher totally needs them." - print "Please to to i2p/apps/sam/python in your I2P cvs tree, and" - print "install the core I2P python modules first" - sys.exit(1) - - -class KPeer(KBase): - """ - Encapsulates a peer node of a L{KNode}, - storing its ID and contact info - """ - @others - -def __init__(self, node, dest): - """ - Create a ref to a kademlia peer node - - Arguments: - - node - reference to node which has the relationship - to this peer - - dest - the peer's I2P destination, as base64 - """ - if not isinstance(node, KNode): - raise KBadNode(node) - if not isinstance(dest, str): - raise KBadDest(dest) - - self.node = node - self.dest = dest - self.id = KHash(dest) - - self.justSeen() - - -def send_ping(self, **kw): - """ - Sends a ping to remote peer - """ - self.send_raw(type="ping", **kw) - -def send_store(self, **kw): - """ - sends a store command to peer - """ - self.log(4, "\npeer %s\ndest %s...\nsending store cmd: %s" % (self, self.dest[:12], repr(kw))) - - self.send_raw(type="store", **kw) - - -def send_findNode(self, hash, **kw): - """ - sends a findNode command to peer - """ - if not isinstance(hash, KHash): - raise KBadHash - - self.log(5, "\nquerying peer %s\ntarget hash %s" % (self, hash)) - - self.send_raw(type="findNode", hash=hash.value, **kw) - - -def send_findData(self, hash, **kw): - """ - sends a findData command to peer - """ - if not isinstance(hash, KHash): - raise KBadHash - - self.log(5, "\nquerying peer %s\ntarget hash %s" % (self, hash)) - - self.send_raw(type="findData", hash=hash.value, **kw) - - -class KNode(KBase): - """ - B{Public API to this Kademlia implementation} - - You should not normally need to use, or even be aware of, - any of the other classes - - And in this class, the only methods you need to worry about are: - - L{start} - starts the node running - - L{stop} - stops the node - - L{get} - retrieve a key value - - L{put} - stores a key value - - L{addref} - imports a noderef - - This class implements a single kademlia node. - Within a single process, you can create as many nodes as you like. - """ - @others - -def start(self, doPings=True): - """ - Starts the node running - """ - # barf if already running - if self.isRunning: - self.log(3, "node %s is already running!" % self.name) - return - - self.log(3, "starting node %s" % self.name) - - # first step - ping all our peers - if doPings: - for peer in self.peers: - self.log(3, "doing initial ping\n%s\n%s" % (self, peer)) - KRpcPing(self, peer=peer) - - # first step - do a findNode against our own node id, and ping our - # neighbours - if greetPeersOnStartup: - neighbours = KRpcFindNode(self, hash=self.id).execute() - self.log(3, "neighbours=%s" % repr([n[:10] for n in neighbours])) - for n in neighbours: - n = self._normalisePeer(n) - KRpcPing(self, peer=n) - - # note now that we're running - self.isRunning = True - - # and enlist with the core - if runCore: - core.subscribe(self) - else: - # central core disabled, run our own receiver thread instead - thread.start_new_thread(self._threadRx, ()) - -def stop(self): - """ - Shuts down the node - """ - self.isRunning = 0 - if runCore: - try: - core.unsubscribe(self) - except: - pass - -def get(self, item, callback=None, **kw): - """ - Attempts to retrieve data from the network - - Arguments: - - item - the key we desire - - callback - optional - if given, the get will be performed - asynchronously, and callback will be invoked upon completion, with - the result as first argument - Keywords: - - local - optional - if True, limits this search to this local node - default is False - - Returns: - - if no callback - the item value if the item was found, or None if not - - if callback, None is returned - """ - def processResult(r): - if isinstance(r, str): - return r - return None - - if callback: - # create a func to process callback result - def onCallback(res): - callback(processResult(res)) - - self._finddata(item, onCallback, **kw) - else: - return processResult(self._finddata(item, **kw)) - - -def put(self, key, value, callback=None, **kw): - """ - Inserts a named key into the network - - Arguments: - - key - one of: - - None - a secure key will be generated and used - - a KHash object - - a raw string which will be hashed into a KHash object - - val - a string, the value associated with the key - - Keywords: - - local - default False - if True, limits the insert to the - local node - - If the value is larger than L{maxValueSize}, a L{KValueTooLarge} - exception will occur. - """ - return self._store(key, value, callback, **kw) - - -def __getitem__(self, item): - """ - Allows dict-like accesses on the node object - """ - return self.get(item) - -def __setitem__(self, item, val): - """ - Allows dict-like key setting on the node object - """ - self.put(item, val) - - -def __str__(self): - return "<KHash: 0x%x>" % self.value - -def __repr__(self): - return str(self) - - -def __eq__(self, other): - #log(2, "KHash: comparing %s to %s" % (self, other)) - res = self.value == getattr(other, 'value', None) - #self.log(2, "KHash: res = %s" % repr(res)) - return res - -def __ne__(self, other): - return not (self == other) - -def __lt__(self, other): - return self.value < other.value - -def __gt__(self, other): - return self.value > other.value - -def __le__(self, other): - return self.value <= other.value - -def __ge__(self, other): - return self.value >= other.value - -def __ne__(self, other): - return self.value != other.value - -def __xor__(self, other): - return self.value ^ other.value - - -def send_raw(self, **kw): - """ - Sends a raw datagram to peer - - No arguments - just keywords, all of which must be strings or - other objects which can be bencoded - """ - self.node._sendRaw(self, **kw) - -def __init__(self, name, **kw): - """ - Creates a kademlia node of name 'name'. - - Name is mandatory, because each name is permanently written - to the SAM bridge's store - - I thought of supporting random name generation, but went off this - idea because names get permanently stored to SAM bridge's file - - Arguments: - - name - mandatory - a short text name for the node, should - be alphanumerics, '-', '.', '_' - This name is used for the SAM socket session. - - Keywords: - - storage - optional - an instance of L{KStorageBase} or one of - its subclasses. If not given, default action is to instantiate - a L{KStorageFile} object against the given node name - """ - # remember who we are - self.name = name - - # not running yet, will launch when explicitly started, or implicitly - # when the first operation gets done - self.isRunning = False - - # create socket and get its dest, and determine our node id - self.id = KHash("<NONE>") - self.log(5, "creating socket for node %s" % name) - self.log(5, "socket for node %s created" % name) - if self.SocketFactory == None: - self.SocketFactory = i2p.socket.socket - self.sock = self.SocketFactory( - "stashernode-"+name, - i2p.socket.SOCK_DGRAM, - samaddr=samAddr, - **kw) - #self.sockLock = threading.Lock() # prevents socket API reentrance - self.sock.setblocking(0) - self.dest = self.sock.dest - self.id = KHash(self.dest) - - # create our buckets - self.buckets = [] - for i in range(160): - self.buckets.append(KBucket()) - - # create our storage object, default to new instance of KStorageFile - self.storage = kw.get('storage', KStorageFile(self)) - - # dig out all previously known nodes - self.peers = self.storage.getRefs() - - # set up dict of callers awaiting replies - # keys are (peerobj, msgId) tuples, values are Queue.Queue objects - self.pendingPings = {} - - # mapping of (peer, msgId) to RPC object, so when RPC replies come in, - # they can be passed directly to the RPC object concerned - self.rpcBindings = {} - - # KRpc objects waiting for peer replies - used for checking for timeouts - self.rpcPending = [] - - # miscellaneous shit - self._msgIdNext = 0 - #self._msgIdLock = threading.Lock() - - # register in global map - _nodes[name] = self - - - -class KStorageBase(KBase): - """ - Base class for node storage objects - - This needs to be overridden by implementation-specific - solutions. - """ - @others - -def __init__(self, node, *args, **kw): - """ - Override this method - - First argument should be a node instance - """ - raise KNotImplemented - - -def putRefs(self, *refs): - """ - Saves one or more noderefs - - Arguments: - - zero or more KPeer objects, or lists or tuples of objects - """ - raise KNotImplemented - -def getRefs(self): - """ - Returns a list of KPeer objects, comprising refs - of peers known to this node - """ - raise KNotImplemented - - -def putKey(self, key, value): - """ - Stores value, a string, into the local storage - under key 'key' - """ - raise KNotImplemented - - -def getKey(self, key): - """ - Attempts to retrieve item from node's local, which was - stored with key 'key'. - - Returns value as a string if found, or None if not present - """ - raise KNotImplemented - -class KStorageFile(KStorageBase): - """ - Implements node-local storage, using the local filesystem, - with the following hierarchy: - - - HOME ( ~ in linux, some other shit for windows) - - .i2pkademlia - - <nodename> - - noderefs - - <node1 base64 hash> - - contains node dest, and other shit - - ... - - keys - - <keyname1> - - contains raw key value - - ... - - This is one ugly sukka, perhaps a db4, mysql etc implementation - would be better. - """ - @others - -def __init__(self, node, storeDir=None): - """ - Creates a persistent storage object for node - 'nodeName', based at directory 'storeDir' (default - is nodeDir - """ - self.node = node - self.nodeName = node.name - - if storeDir == None: - # work out local directory - self.topDir = userI2PDir() - - # add node dir and subdirs - self.nodeDir = userI2PDir(self.nodeName) - - self.refsDir = os.path.join(self.nodeDir, "noderefs") - if not os.path.isdir(self.refsDir): - os.makedirs(self.refsDir) - - self.keysDir = os.path.join(self.nodeDir, "keys") - if not os.path.isdir(self.keysDir): - os.makedirs(self.keysDir) - - -class KBucket(KBase): - """ - Implements the 'k-bucket' object as required in Kademlia spec - """ - @others - -def __init__(self): - """ - Creates a single k-bucket - """ - # list of known nodes - # order is least recently seen at head, most recently seen at tail - self.nodes = [] - - # list of death-row records - # refer spec section 2.1, paragraph 2 - # we deviate a little: - # when we hear from a new peer, and the bucket is full, - # we temporarily displace the old peer, and stick the new - # peer at end of list, then send out a ping - # If we hear from the old peer within a reasonable time, - # the new peer gets evicted and replaced with the old peer - # - # this list holds 2-tuples (oldpeer, newpeer), where - # oldpeer is the least-recently-seen peer that we displaced, and - # newpeer is the new peer we just heard from. - self.deathrow = [] - - - -# -------------------------------------------- -# START USER-CONFIGURABLE CONSTANTS -# -------------------------------------------- - -# host:port to connect to I2P SAM Bridge -samAddr = i2p.socket.samaddr - -# host:port to listen on for command line client -clientAddr = "127.0.0.1:7659" - -defaultNodename = "0" # will be prefixed by 'stashernode' - -# maximum size of each stored item -maxValueSize = 30000 - -# maximum number of noderefs that can be stored in a bucket -# (refer spec section 2.1, first paragraph) -maxBucketSize = 20 - -# number of peers to return from a search -numSearchPeers = 3 - -# maximum number of concurrent queries per findnode/finddata rpc -maxConcurrentQueries = 10 - -# number of peers to store onto -numStorePeers = 10 - -# Logger settings -logFile = None -logVerbosity = 2 - -# data directory location - set to a path to override the default -# which is the user's home dir -dataDir = None - -# whether a node, on startup, should do a findnode on itself to -# locate its closest neighbours -greetPeersOnStartup = False -#greetPeersOnStartup = True - -# multi-purpose testing flag -testing = False -#testing = True - -tunnelDepth = 0 - -# set to True to enable single handler thread that manages all nodes, -# or False to make each node run its own handler thread -#runCore = False -runCore = True - -# timeouts - calibrate as needed -timeout = { - 'ping' : 120, - 'findNode' : 120, - 'findData' : 120, - 'store' : 120, - } - -logToSocket = None - -desperatelyDebugging = False - -if desperatelyDebugging: - runCoreInBackground = False -else: - runCoreInBackground = True - -# -------------------------------------------- -# END OF USER-CONFIGURABLE CONSTANTS -# -------------------------------------------- - -# ---------------------------------------------- -# hack anything below this line at your own risk - - -def justSeenPeer(self, peer): - """ - Tells the bucket that we've just seen a given node - """ - nodes = self.nodes - - if not isinstance(peer, KPeer): - raise KBadNode - - try: - idx = nodes.index(peer) - except: - idx = -1 - if idx >= 0: - del nodes[idx] - nodes.append(peer) - else: - nodes.append(peer) - - # might at some time need to implement death-row logic - # when we set a bucket size limit - refer __init__ - -def addref(self, peer, doPing=False): - """ - Given a peer node's destination, add it to our - buckets and internal data store - - Arguments: - - peer - one of: - - the I2P destination of the peer node, as - a base64 string - - a KNode object - - a KPeer object - - doPing - ping this node automatically (default False) - """ - peer = self._normalisePeer(peer) - - # remember peer if not already known - if peer.dest == self.dest: - self.log(3, "node %s, trying to add ref to ourself???" % self.name) - return peer - elif not self._findPeer(peer.dest): - self.peers.append(peer) - self.storage.putRefs(peer) - else: - self.log(4, "node %s, trying to add duplicate noderef %s" % ( - self.name, peer)) - return peer - - # update our KBucket - dist = self.id.distance(peer.id) - self.buckets[dist].justSeenPeer(peer) - - if doPing: - self.log(4, "doing initial ping\n%s\n%s" % (self, peer)) - KRpcPing(self, peer=peer) - - return peer - - - - -def _sendRaw(self, peer, **kw): - """ - Serialises keywords passed, and sends this as a datagram - to node 'peer' - """ - # update our KBucket - dist = self.id.distance(peer.id) - self.buckets[dist].justSeenPeer(peer) - - # save ref to this peer - self.addref(peer) - - params = dict(kw) - msgId = params.get('msgId', None) - if msgId == None: - msgId = params['msgId'] = self._msgIdAlloc() - - objenc = messageEncode(params) - self.log(5, "node %s waiting for send lock" % self.name) - #self.sockLock.acquire() - self.log(5, "node %s got send lock" % self.name) - try: - self.sock.sendto(objenc, 0, peer.dest) - except: - traceback.print_exc() - #self.sockLock.release() - self.log(5, "node %s released send lock" % self.name) - - self.log(4, "node %s sent %s to peer %s" % (self.name, params, peer.dest)) - return msgId - - - -def _threadRx(self): - """ - Thread which listens for incoming datagrams and actions - accordingly - """ - self.log(3, "starting receiver thread for node %s" % self.name) - - try: - # loop to drive the node - while self.isRunning: - self._doChug() - except: - traceback.print_exc() - self.log(3, "node %s - THREAD CRASH!" % self.name) - - self.log(3, "receiver thread for node %s terminated" % self.name) - - -@others - -# keep a dict of existing nodes, so we can prevent -# client progs from creating 2 nodes of the same name -_nodes = {} - -version = "0.0.1" - - -logLock = threading.Lock() - -def log(verbosity, msg, nPrev=0, clsname=None): - - global logToSocket, logFile - - # create logfile if not exists - if logFile == None: - logFile = os.path.join(userI2PDir(), "stasher.log") - - # rip the stack - caller = traceback.extract_stack()[-(2+nPrev)] - path, line, func = caller[:3] - path = os.path.split(path)[1] - - #print "func is type %s, val %s" % (type(func), repr(func)) - - #if hasattr(func, "im_class"): - # func = - - if clsname: - func = clsname + "." + func - - #msg = "%s:%s:%s(): %s" % ( - # path, - # line, - # func, - # msg.replace("\n", "\n + ")) - - msg = "%s():%s: %s" % ( - func, - line, - msg.replace("\n", "\n + ")) - - # do better logging later - if verbosity > logVerbosity: - return - - if logToSocket: - try: - if isinstance(logToSocket, int): - portnum = logToSocket - logToSocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM) - connected = 0 - while 1: - try: - logToSocket.connect(("localhost", portnum)) - break - except socket.error: - print "Please open an xterm/nc listening on %s" % logToSocket - time.sleep(1) - - logToSocket.send(msg+"\n") - except: - traceback.print_exc() - else: - print msg - - logLock.acquire() - file(logFile, "a+").write(msg + "\n") - logLock.release() - -def _findPeer(self, dest): - """ - Look up our table of current peers for a given dest. - - If dest is found, return its object, otherwise return None - """ - for peerObj in self.peers: - if peerObj.dest == dest: - return peerObj - return None - - -def putRefs(self, *args): - """ - Saves one or more noderefs into filesystem - - Arguments: - - zero or more KPeer objects, or lists or tuples of objects - """ - lst = self._expandRefsList(args) - for item in lst: - b64hash = shahash(item.dest) - itemPath = os.path.join(self.refsDir, b64hash) - itemDict = {'dest':item.dest} # might need to expand later - itemPickle = bencode.bencode(itemDict) - file(itemPath, "wb").write(itemPickle) - pass - -@others - -def _expandRefsList(self, args, lst=None): - """ - Takes a sequence of args, each of which can be a KPeer - object, or a list or tuple of KPeer objects, and expands - this into a flat list - """ - if lst == None: - lst = [] - for item in args: - if type(item) in [type(()), type([])]: - self._expandRefsList(item, lst) - else: - lst.append(item) - return lst - - -def getRefs(self): - """ - Returns a list of KPeer objects, comprising refs - of peers known to this node - - These are read from the directory self.refsDir. - Any that can't be unpickled and instantiated are dropped, but logged - """ - peers = [] - for f in os.listdir(self.refsDir): - - path = os.path.join(self.refsDir, f) - pickled = file(path, "rb").read() - try: - d = bencode.bdecode(pickled) - except: - self.log(3, "node %s, bad pickle ref file %s" % ( - self.nodeName, f)) - continue - - # instantiate a peer object - try: - peer = KPeer(self.node, d['dest']) - except: - self.log(3, "node %s, bad unpickled ref file %s" % ( - self.nodeName, f)) - continue - - # success - peers.append(peer) - - return peers - - -@others - -def test(numnodes=10): - - global n, n0, res - - stasher.logVerbosity = 3 - stasher.core = KCore(1) - - os.system("rm -rf ~/.i2pkademlia") - - n = KTestNetwork(10, purge=1) - n.connect() - n.start() - n0 = n[0] - - return - - if 0: - core.fg = True - n = KTestNetwork(10, purge=1) - n.connect() - n.start() - core.cycle() - - print "about to insert" - set_trace() - n[0].put('roses', 'red') - print "insert completed" - #set_trace() - - return - - #set_trace() - #n[0].put('roses', 'red') - - #print n[0].get('roses') - - if 0: - successes = [] - failures = [] - for idx in range(numnodes): - - # build test network of known topology - print "Building test network" - n = KTestNetwork(numnodes, purge=1) - n.connect() - n.start() - core.n = n - n.trigger = 0 - - if 0: - print n[0]._findnode('roses') - break - - if 1: - # store something - print "storing something" - - n[0].put('roses', 'red') - - # try to retrieve it from node i - print "trying to retrieve it from node %s" % idx - if n[idx].get('roses') != None: - print "Node %s retrieved ok" % idx - successes.append(idx) - else: - print "Node %s failed to retrieve" % idx - failures.append(idx) - - del n - - print "Successful nodes: %s" % " ".join([str(x) for x in successes]) - print "Failed nodes: %s" % " ".join([str(x) for x in failures]) - - if 0 and desperatelyDebugging: - - while not n.trigger: - time.sleep(1) - - n.trigger = 0 - n[0].put('roses', 'red') - - while not n.trigger: - time.sleep(1) - - print "retrieving 'roses'" - print n[0].get('roses') - - -@others - -def __str__(self): - return "<KNode:%s=0x%s...>" % ( - self.name, - ("%x" % self.id.value)[:8], - ) - -def __repr__(self): - return str(self) - - -@others - -def __str__(self): - - return "<KPeer:%s=>0x%s... dest %s...>" % ( - self.node.name, ("%x" % self.id.value)[:8], self.dest[:8]) - - -def __repr__(self): - - return str(self) - - -@others - -def _on_ping(self, peer, msgId, **kw): - """ - Handler for ping received events - """ - KRpcPing(self, (peer, msgId), local=True, **kw) - return - - # old stuff - - self.log(3, "\nnode %s\nfrom %s\nreceived:\n%s" % (self.name, peer, kw)) - - # randomly cause ping timeouts if testing - if testing: - howlong = random.randint(0, 5) - self.log(3, "deliberately pausing for %s seconds" % howlong) - time.sleep(howlong) - - # pong back to node - peer.send_reply(msgId=msgId) - - -def _on_unknown(self, peer, msgId, **kw): - """ - Handler for unknown events - """ - self.log(3, "node %s from %s received msgId=%s:\n%s" % ( - self.name, peer, msgId, kw)) - - -def _msgIdAlloc(self): - """ - issue a new and unique message id - """ - #self._msgIdLock.acquire() - msgId = self._msgIdNext - self._msgIdNext += 1 - #self._msgIdLock.release() - return msgId - -def _normalisePeer(self, peer): - """ - Takes either a b64 dest string, a KPeer object or a KNode object, - and returns a KPeer object - """ - # act according to whatever type we're given - if isinstance(peer, KPeer): - return peer # already desired format - elif isinstance(peer, KNode): - return KPeer(self, peer.dest) - elif isinstance(peer, str) and len(peer) > 256: - return KPeer(self, peer) - else: - self.log(3, "node %s, trying to add invalid noderef %s" % ( - self.name, peer)) - raise KBadNode(peer) - - -def _ping(self, peer=None, callback=None, **kw): - """ - Sends a ping to remote peer, and awaits response - - Not of much real use to application level, except - perhaps for testing - - If the argument 'peer' is not given, the effect is to 'ping the - local node', which I guess might be a bit silly - - The second argument 'callback' is a callable, which if given, makes this - an asynchronous (non-blocking) call, in which case the callback will be - invoked upon completion (or timeout). - - If the keyword 'cbArgs' is given in addition to the callback, the callback - will fire with the results as first argument and this value as second arg - """ - if callback: - KRpcPing(self, callback, peer=peer, **kw) - else: - return KRpcPing(self, peer=peer).execute() - - -def distance(self, other): - """ - calculates the 'distance' between this hash and another hash, - and returns it as i (where distance = 2^i, and 0 <= i < 160) - """ - - #log(4, "comparing: %s\nwith %s" % (self.value, other.value)) - - rawdistance = self.value ^ other.value - if not rawdistance: - return 0 - - return int(math.log(rawdistance, 2)) - - -def _on_findNode(self, peer, msgId, **kw): - """ - Handles incoming findNode command - """ - KRpcFindNode(self, (peer, msgId), local=True, **kw) - - -def _on_findData(self, peer, msgId, **kw): - """ - Handles incoming findData command - """ - KRpcFindData(self, (peer, msgId), local=True, **kw) - - -def justSeen(self): - self.timeLastSeen = time.time() - - -class KCore(KBase): - """ - Singleton class which performs all the needed background processing. - - By scheduling all processing through this object, we eliminate the - need to create threads on a per-node basis, and also make this thing - far easier to debug. - - The core launches only two background threads: - - L{threadRxPackets} - listen for incoming packets bound for - any node running within a single process - - L{threadHousekeeping} - periodically invoke maintenance methods - of each node, so the node can check for timeout conditions and - other untoward happenings - - These threads start up when the first node in this process is created, - and stop when the last node ceases to exist. - - Upon first import, the L{stasher} module creates one instance of this - class. Upon creation, L{KNode} objects register themselves with this core. - """ - @others - - -def __init__(self, bg=True): - """ - Creates the I2P Kademlia core object - """ - self.bg = bg - self.fg = False - - # subscribed nodes - self.nodes = [] - #self.nodesLock = threading.Lock() - - self.isRunning = False - self.isRunning_rx = False - - -def subscribe(self, node): - """ - Called by a node to 'subscribe' for background processing - If this is the first node, starts the handler thread - """ - #self.nodesLock.acquire() - try: - nodes = self.nodes - - if node in nodes: - self.log(2, "duhhh! node already subscribed" % repr(node)) - return - - nodes.append(node) - - if not self.isRunning: - self.isRunning = True - if self.bg and not self.fg: - self.log(3, "First node subscribing, launching threads") - thread.start_new_thread(self.threadRxPackets, ()) - thread.start_new_thread(self.threadHousekeeping, ()) - except: - traceback.print_exc() - self.log(2, "exception") - - #self.nodesLock.release() - - -def unsubscribe(self, node): - """ - Unsubscribes a node from the core - - If this was the last node, stops the handler thread - """ - #self.nodesLock.acquire() - try: - nodes = self.nodes - - if node not in nodes: - self.log(4, "duhhh! node %s was not subscribed" % repr(node)) - return - - self.log(2, "trying to unsubscribe node %s" % node.name) - nodes.remove(node) - - if len(nodes) == 0: - self.isRunning = False - except: - traceback.print_exc() - self.log(2, "exception") - - #self.nodesLock.release() - - -def threadRxPackets(self): - """ - Sits on a select() loop, processing incoming datagrams - and actioning them appropriately. - """ - self.isRunning_rx = True - self.log(3, "KCore packet receiver thread running") - try: - while self.isRunning: - socks = [node.sock for node in self.nodes] - if desperatelyDebugging: - set_trace() - try: - inlist, outlist, errlist = self.select(socks, [], [], 1) - except KeyboardInterrupt: - self.isRunning = 0 - return - - self.log(5, "\ninlist=%s" % repr(inlist)) - if inlist: - self.log(5, "got one or more sockets with inbound data") - #self.nodesLock.acquire() - for sock in inlist: - node = self.nodeWhichOwnsSock(sock) - if node != None: - node._doRx() - #self.nodesLock.release() - - elif self.fg: - return - - else: - time.sleep(0.1) - except: - #self.nodesLock.release() - traceback.print_exc() - self.log(1, "core handler thread crashed") - self.isRunning_rx = False - self.log(3, "core handler thread terminated") - - -# create an instance of _KCore -core = KCore() - - -def __del__(self): - """ - Clean up on delete - """ - self.log(3, "node dying: %s" % self.name) - - try: - del _nodes[self.name] - except: - pass - - self.stop() - - -def _doRx(self): - """ - Receives and handles one incoming packet - - Returns True if a packet got handled, or False if timeout - """ - # get next packet - self.log(5, "%s seeking socket lock" % self.name) - #self.sockLock.acquire() - self.log(5, "%s got socket lock" % self.name) - try: - item = self.sock.recvfrom(-1) - except i2p.socket.BlockError: - #self.sockLock.release() - self.log(5, "%s released socket lock after timeout" % self.name) - if not runCore: - time.sleep(0.1) - return False - except: - traceback.print_exc() - self.log(5, "%s released socket lock after exception" % self.name) - #self.sockLock.release() - return True - #self.sockLock.release() - self.log(5, "%s released socket lock normally" % self.name) - - try: - (data, dest) = item - except ValueError: - self.log(3, "node %s: recvfrom returned no dest, possible spoof" \ - % self.name) - data = item[0] - dest = None - - # try to decode - try: - d = messageDecode(data) - except: - traceback.print_exc() - self.log(3, "failed to unpickle incoming data for node %s" % \ - self.name) - return True - - # ditch if not a dict - if type(d) != type({}): - self.log(3, "node %s: decoded packet is not a dict" % self.name) - return True - - # temporary workaround for sam socket bug - if dest == None: - if hasattr(d, 'has_key') and d.has_key('dest'): - dest = d['dest'] - - # try to find it in our store - peerObj = self._findPeer(dest) - if peerObj == None: - # previously unknown peer - add it to our store - peerObj = self.addref(dest) - else: - peerObj.justSeen() # already exists - refresh its timestamp - self.addref(peerObj.dest) - - # drop packet if no msgId - msgId = d.get('msgId', None) - if msgId == None: - self.log(3, "no msgId, dropping") - return True - del d['msgId'] - - msgType = d.get('type', 'unknown') - - if desperatelyDebugging: - pass - #set_trace() - - # if a local RPC is awaiting this message, fire its callback - item = self.rpcBindings.get((peerObj.dest, msgId), None) - if item: - rpc, peer = item - try: - rpc.unbindPeerReply(peerObj, msgId) - if desperatelyDebugging: - set_trace() - rpc.on_reply(peerObj, msgId, **d) - - except: - traceback.print_exc() - self.log(2, "unhandled exception in RPC on_reply") - else: - # find a handler, fallback on 'unknown' - self.log(5, "\nnode %s\ngot msg id %s type %s:\n%s" % ( - self.name, msgId, msgType, d)) - hdlrName = d.get('type', 'unknown') - hdlr = getattr(self, "_on_"+hdlrName) - try: - if desperatelyDebugging: - set_trace() - hdlr(peerObj, msgId, **d) - except: - traceback.print_exc() - self.log(2, "unhandled exception in unbound packet handler %s" % hdlrName) - - return True - - -def nodeWhichOwnsSock(self, sock): - """ - returns ref to node which owns a socket - """ - for node in self.nodes: - if node.sock == sock: - return node - return None - -class KTestNetwork(stasher.KBase): - """ - Builds and runs a variable-sized test network - """ - @others - -def __init__(self, numnodes=numTestNodes, doPings=True, purge=False): - """ - Builds the test network - """ - global Socket, select - - self.trigger = 0 - - stasher.core.n = self # for convenience while debugging - - if purge: - self.purge() - - if 0: - if KTestNetwork.aNetworkExists: - raise Exception("A test network already exists, may not create another") - KTestNetwork.aNetworkExists = True - - self.nodes = [] - - for i in range(numnodes): - nodeName = "kademlia-testnode-%s" % i - self.log(3, "Creating test node %s" % nodeName) - node = KNode(nodeName, in_depth=0, out_depth=0) - node.idx = i - node.n = self - self.nodes.append(node) - #print node.peers - - self.log(3, "test network successfully created") - - -def __getitem__(self, num): - """ - Allows test network to be treated as array, returns the 'num'th node - """ - return self.nodes[num] - - -def __len__(self): - return len(self.nodes) - - -def start(self, doPings=True): - """ - Starts up the test network - """ - for node in self.nodes: - self.log(3, "starting node %s" % node.name) - node.start(doPings) - - -def stop(self): - """ - Stops (or tries to stop) the test network - """ - for node in self.nodes: - self.log(3, "stopping node %s" % node.name) - node.stop() - - -aNetworkExists = False - -def _doHousekeeping(self): - """ - Performs periodical housekeeping on this node. - - Activities include: - - checking pending records for timeouts - """ - now = time.time() - - # DEPRECATED - SWITCH TO RPC-based - # check for expired pings - for msgId, (dest, q, pingDeadline) in self.pendingPings.items(): - - if pingDeadline > now: - # not timed out, leave in pending - continue - - # ping has timed out - del self.pendingPings[msgId] - q.put(False) - - # check for timed-out RPCs - for rpc in self.rpcPending[:]: - if rpc.nextTickTime != None and now >= rpc.nextTickTime: - try: - rpc.on_tick() - except: - traceback.print_exc() - self.log(2, "unhandled exception in RPC on_tick") - - -class KPendingResultBase: - """ - Class which holds the details of an RPC sent to another node. - """ - @others - - -def __init__(self, node, typ, **kw): - """ - Creates a pending result object - - Arguments: - - node - node which is waiting for this result - - typ - operation type on which we're awaiting a response, - one of 'ping', 'findNode', 'findData', 'store' - - Keywords: - - gotta think about this - """ - self.node = node - self.typ = type - - # dict of msgId, peer pairs - self.msgIds = {} - - # indicates when to timeout and return the best available result - self.deadline = time.time() + timeout[typ] - - # add ourself to node - self.node.pendingResults.append(self) - - -def _doChug(self): - """ - Do what's needed to drive the node. - Handle incoming packets - Check on and action timeouts - """ - # handle all available packets - while self._doRx(): - pass - - # do maintenance - eg processing timeouts - self._doHousekeeping() - - -def __cmp__(self, other): - - # for sorting pending results by deadline - return cmp(self.deadline, other.deadline) - - -def append(self, peer, msgId): - """ - Adds a (peer, msgId) pair, to watch out for - """ - peer = self.node._normalisePeer(peer) - - self.msgIds[msgId] = peer - self.node.awaitingMsgIds[msgId] = self - - -def wait(self): - """ - Called by application-level routines to wait on and return some kind of result - """ - return self.queue.get() - -def check(self, now=None): - """ - Checks for a timeout condition, which if one occurs, sticks the best - available result onto the queue to be picked up by the caller - """ - if now == None: - now = time.time() - if now > self.deadline: - self.queue.put(self.bestAvailableResult()) - - -def on_tick(self): - """ - Override this in subclasses. - If a timeout occurs, this routine gets called, and should return - the 'best available result' to be delivered back to synchronous caller - """ - raise KNotImplemented - -def on_packet(self, msgId, **details): - """ - Called when a packet of id msgId arrives - - Should return True if this packet was for us, or False if not - """ - raise KNotImplemented - - -class KPendingResultPing(KPendingResultBase): - """ - for managing synchronous pings - """ - @others - -def __init__(self, node): - - KPendingResultBase.__init__(self, node, 'ping') - - -def on_tick(self): - """ - Handle synchronous ping timeouts - """ - return False - - -def on_packet(self, msgId, **details): - """ - Must have got back a ping reply - """ - self.destroySelf() - self.queue.put(True) - - return True - -def destroySelf(self): - """ - Remove ourself from node - """ - self.node.pendingResults.remove(self) - - - -def __eq__(self, other): - - #self.log(2, "KPeer: comparing %s to %s (%s to %s)" % (self, other, self.__class__, other.__class__)) - res = self.id == getattr(other, 'id', None) - #self.log(2, "KPeer: res=%s" % res) - return res - - -def __ne__(self, other): - return not (self == other) - - -def execute(self): - """ - Only for synchronous (application-level) execution. - Wait for the RPC to complete (or time out) and return - whatever it came up with - """ - if core.fg: - print "servicing background thread" - while self.queue.empty(): - core.cycle() - - return self.queue.get() - - - -def bindPeerReply(self, peer, msgId): - """ - Sets up the node to give us a callback when a reply - comes in from downstream peer 'peer' with msg id 'msgId' - """ - self.localNode.rpcBindings[(peer.dest, msgId)] = (self, peer) - - -def unbindPeerReply(self, peer, msgId): - """ - Disables the callback from node for replies - from peer 'peer' with msgId 'msgId' - """ - bindings = self.localNode.rpcBindings - peerdest = peer.dest - if bindings.has_key((peerdest, msgId)): - del bindings[(peerdest, msgId)] - - -def unbindAll(self): - """ - Remove all reply bindings - """ - bindings = self.localNode.rpcBindings - self.log(5, "node bindings before: %s" % bindings) - for k,v in bindings.items(): - if v[0] == self: - del bindings[k] - self.log(5, "node bindings after: %s" % bindings) - - -class KRpc(KBase): - """ - Base class for RPCs between nodes. - Refer subclasses - """ - @others - -def __init__(self, localNode, client=None, **kw): - """ - Holds all the information for an RPC - - Arguments: - - localNode - the node from which this RPC is being driven - - client - a representation of who is initiating this rpc, one of: - - None - an API caller, which is to be blocked until the RPC completes - or times out - - (upstreamPeer, upstreamMsgId) - an upstream peer - - callable object - something which requires a callback upon completion - in which case the callable will be invoked with the RPC results as the - first argument - - Keywords: - - cbArgs - optional - if given, and if client is a callback, the callback - will be invoked with the results as first argument, and this object as - second argument - """ - self.localNode = localNode - - if client == None: - # an api client - self.isLocal = True - self.queue = Queue.Queue() - self.callback = None - elif callable(client): - self.isLocal = False - self.callback = client - elif isinstance(client, tuple): - # we're doing the RPC on behalf of an upstream peer - upstreamPeer, upstreamMsgId = client - upstreamPeer = localNode._normalisePeer(upstreamPeer) - self.isLocal = False - self.upstreamPeer = upstreamPeer - self.upstreamMsgId = upstreamMsgId - self.callback = None - - # save keywords - self.__dict__.update(kw) - - # set time for receiving a tick. - # if this is set to an int absolute time value, the on_tick method - # will be called as soon as possible after that time - self.nextTickTime = None - - # and register with node as a pending command - self.localNode.rpcPending.append(self) - - # now start up the request - self.start() - - -def start(self): - """ - Start the RPC running. - Override this in subclasses - """ - raise KNotImplemented - - -def on_reply(self, peer, msgId, **details): - """ - Callback which fires when a downstream peer replies - - Override this in subclasses - """ - raise KNotImplemented - - -def on_tick(self): - """ - Callback which fires if the whole RPC times out, in which - case the RPC should return whatever it can - - Override in subclasses - """ - self.localNode.rpcPending.remove(self) - - -class KRpcPing(KRpc): - """ - Implements the PING rpc as per Kademlia spec - """ - @others - -def __init__(self, localNode, client=None, **kw): - """ - Creates and performs a PING RPC - - Arguments: - - localNode - the node performing this RPC - - upstreamPeer - if given, the peer wanting a reply - - upstreamMsgId - if upstreamPeer is given, this is the msgId - of the RPC message from the upstream peer - - Keywords: - - peer - the peer to ping - default is local node - """ - peer = kw.get('peer', None) - if peer != None: - peer = localNode._normalisePeer(peer) - self.peerToPing = peer - - if kw.has_key('cbArgs'): - KRpc.__init__(self, localNode, client, cbArgs=kw['cbArgs']) - else: - KRpc.__init__(self, localNode, client) - - -def start(self): - """ - Sends out the ping - """ - peer = self.peerToPing - - # are we ourselves being pinged? - if peer == None: - # yes, just reply - self.returnValue(True) - return - - # no - we need to ping a peer - thisNode = self.localNode - - msgId = thisNode.msgId = thisNode._msgIdAlloc() - - # bind for peer response - self.bindPeerReply(peer, msgId) - - # and send it off - self.log(3, "node %s sending ping" % self.localNode.name) - peer.send_ping(msgId=msgId) - - # and set a reply timeout - self.nextTickTime = time.time() + timeout['ping'] - - -def on_reply(self, peer, msgId, **details): - """ - Callback for PING reply - """ - self.log(3, "got ping reply from %s" % peer) - self.returnValue(True) - - -def terminate(self): - """ - Clean up after ourselves. - Mainly involves removing ourself from local node - """ - self.unbindAll() - try: - self.localNode.rpcPending.remove(self) - except: - #traceback.print_exc() - pass - - -def messageEncode(params): - """ - Serialise the dict 'params' for sending - - Temporarily using bencode - replace later with a more - efficient struct-based impl. - """ - try: - return bencode.bencode(params) - except: - log(1, "encoder failed to encode: %s" % repr(params)) - raise - - -def messageDecode(raw): - return bencode.bdecode(raw) - -def on_tick(self): - """ - 'tick' handler. - - For PING RPC, the only time we should get a tick is when the ping - has timed out - """ - self.log(3, "timeout awaiting ping reply from %s" % self.peerToPing) - self.returnValue(False) - - -def _on_reply(self, peer, msgId, **kw): - """ - This should never happen - """ - self.log(4, "got unhandled reply:\npeer=%s\nmsgId=%s\nkw=%s" % ( - peer, msgId, kw)) - - -def send_reply(self, **kw): - """ - Sends an RPC reply back to upstream peer - """ - self.log(5, "\nnode %s\nreplying to peer %s:\n%s" % ( - self.node, self, kw)) - self.send_raw(type="reply", **kw) - - -def threadHousekeeping(self): - """ - Periodically invoke nodes' housekeeping - """ - self.log(3, "\nnode housekeeping thread running") - try: - while self.isRunning: - #self.log(4, "calling nodes' housekeeping methods") - #self.nodesLock.acquire() - for node in self.nodes: - node._doHousekeeping() - #self.nodesLock.release() - time.sleep(1) - self.log(3, "\nnode housekeeping thread terminated") - except: - #self.nodesLock.release() - traceback.print_exc() - self.log(1, "\nnode housekeeping thread crashed") - - -class KRpcFindNode(KRpc): - """ - Implements the FIND_NODE rpc as per Kademlia spec - """ - @others - -def __init__(self, localNode, client=None, **kw): - """ - Creates and launches the findNode rpc - - Arguments: - - localNode - the node performing this RPC - - client - see KRpc.__init__ - - Keywords: - - hash - a string, long int or KHash object representing - what we're looking for. treatment depends on type: - - KHash object - used as is - - string - gets wrapped into a KHash object - - long int - wrapped into a KHash object - refer KHash.__init__ - - raw - whether 'hash' is already a hash, default True - - local - True/False - whether to only search local store, - or pass on the query to the network, default True - """ - kw = dict(kw) - if kw.get('raw', False): - h = kw['hash'] - del kw['hash'] - kw['raw'] = h - self.hashWanted = KHash(**kw) - else: - self.hashWanted = KHash(kw['hash'], **kw) - self.isLocalOnly = kw.get('local', True) - - self.numQueriesPending = 0 - - self.numRounds = 0 # count number of rounds - self.numReplies = 0 # number of query replies received - self.numQueriesSent = 0 - self.numPeersRecommended = 0 - - # whichever mode we're called from, we gotta find the k closest peers - self.localNode = localNode - self.peerTab = self.findClosestPeersInitial() - - self.log(4, "KRpcFindNode: isLocalOnly=%s" % self.isLocalOnly) - - if kw.has_key('cbArgs'): - KRpc.__init__(self, localNode, client, cbArgs=kw['cbArgs']) - else: - KRpc.__init__(self, localNode, client) - - -@ -Tech overview: - - this implementation creates each Node ID as an SHA1 hash of - the node's 'destination' - the string which constitutes its - address as an I2P endpoint. - -Datagram formats: - - each datagram sent from one node to another is a python dict object, - encoded and decoded with the 'bencode' object serialisation module. - - we use bencode because regular Python pickle is highly insecure, - allowing crackers to create malformed pickles which can have all - manner of detrimental effects, including execution of arbitrary code. - - the possible messages are listed below, along with their consituent - dictionary keys: - 1. ping: - - msgId - a message identifier guaranteed to be unique - with respect to the sending node - 2. findNode: - - msgId - unique message identifier - - hash - the hash we're looking for - - initiator - True/False, according to whether this node - should initiate/perform the findNode, or whether this - rpc is coming from another seeking node - 3. findData: - - msgId - unique message identifier - - hash - the exact key hash of the data we want to retrieve - - initiator - True/False, according to whether this node - should initiate/perform the findNode, or whether this - rpc is coming from another seeking node - 4. store: - - msgId - unique message identifier - - hash - the exact key hash of the data we want to store - - data - the data we want to store - 5. reply: - - msgId - the original msgId we're replying to - The other items in a reply message depend on what kind - of message we're replying to, listed below: - 1. ping - no additional data - 2. findNode: - - nodes - a list of dests nearest the given hash - 3. findData: - - nodes - as for findNode, OR - - data - the retrieved data, or None if not found - 4. store: - - status - True or False according to whether - the store operation was successful - -def _findnode(self, something=None, callback=None, **kw): - """ - Mainly for testing - does a findNode query on the network - - Arguments: - - something - one of: - - plain string - the string gets hashed and used for the search - - int or long int - this gets used as the raw hash - - a KHash object - that's what gets used - - None - the value of the 'raw' keyword will be used instead - - callback - optional - if given, a callable object which will be - called upon completion, with the result as argument - - Keywords: - - local - optional - if True, only returns the closest peers known to - node. if False, causes node to query other nodes. - default is False - - raw - one of: - - 20-byte string - this gets used as a binary hash - - 40-byte string - this gets used as a hex hash - """ - if not kw.has_key('local'): - kw = dict(kw) - kw['local'] = False - - self.log(3, "about to instantiate findnode rpc") - if callback: - KRpcFindNode(self, callback, hash=something, **kw) - self.log(3, "asynchronously invoked findnode, expecting callback") - else: - lst = KRpcFindNode(self, hash=something, **kw).execute() - self.log(3, "back from findnode rpc") - res = [self._normalisePeer(p) for p in lst] # wrap in KPeer objects - return res - - -def start(self): - """ - Kicks off this RPC - """ - # if we're being called by an upstream initiator, just return the peer list - if self.isLocalOnly: - peerDests = [peer.dest for peer in self.peerTab] - self.log(5, "findNode: local only: returning to upstream with %s" % repr(peerDests)) - self.returnValue(peerDests) - return - - # just return nothing if we don't have any peers - if len(self.peerTab) == 0: - self.returnValue([]) - return - - # send off first round of queries - self.sendSomeQueries() - - return - - -def on_reply(self, peer, msgId, **details): - """ - Callback for FIND_NODE reply - """ - # shorthand - peerTab = self.peerTab - - self.numReplies += 1 - - # ------------------------------------------------------------ - # determine who replied, and get the raw dests sent back - try: - peerRec = peerTab[peer] - except: - traceback.print_exc() - self.log(3, "discarding findNode reply from unknown peer %s %s, discarding" % ( - peer, details)) - return - - # one less query to wait for - self.numQueriesPending -= 1 - - # ---------------------------------------------------------- - # peerRec is the peer that replied - # peers is a list of raw dests - - # save ref to this peer, it's seemingly good - self.localNode.addref(peerRec.peer) - - # mark it as having replied - if peerRec.state != 'queried': - self.log(2, "too weird - got a reply from a peer we didn't query") - peerRec.state = 'replied' - - # wrap the returned peers as KPeer objects - peersReturned = details.get('result', []) - peersReturned = [self.localNode._normalisePeer(p) for p in peersReturned] - - self.numPeersRecommended += len(peersReturned) - - # and add them to table in state 'recommended' - for p in peersReturned: - peerTab.append(p, 'recommended') - - # try to fire off more queries - self.sendSomeQueries() - - # and check for and action possible end of query round - self.checkEndOfRound() - - - -def on_tick(self): - """ - Callback for FIND_NODE reply timeout - """ - # check for timeouts, and update offending peers - now = time.time() - for peerRec in self.peerTab: - if peerRec.hasTimedOut(now): - peerRec.state = 'timeout' - - # makes room for more queries - self.sendSomeQueries() - - # possible end of round - self.checkEndOfRound() - - # schedule next tick - self.nextTickTime = time.time() + 5 - - -@ -Verbatim extract from original Kademlia paper follows: - -The lookup initiator starts by picking x nodes from its closest -non-empty k-bucket (or, if that bucket has fewer than x -entries, it just takes the closest x nodes it knows of). - -The initiator then sends parallel, asynchronous -FIND NODE RPCs to the x nodes it has chosen. -x is a system-wide concurrency parameter, such as 3. - -In the recursive step, the initiator resends the -FIND NODE to nodes it has learned about from previous RPCs. - -[Paraphrased - in the recursive step, the initiator sends a FIND_NODE to -each of the nodes that were returned as results of these previous -FIND_NODE RPCs.] - -(This recursion can begin before all of the previous RPCs have -returned). - -Of the k nodes the initiator has heard of closest to -the target, it picks x that it has not yet queried and resends -the FIND_NODE RPC to them. - -Nodes that fail to respond quickly are removed from consideration -until and unless they do respond. - -If a round of FIND_NODEs fails to return a node any closer -than the closest already seen, the initiator resends -the FIND NODE to all of the k closest nodes it has -not already queried. - -The lookup terminates when the initiator has queried and gotten -responses from the k closest nodes it has seen. - -class KRpcFindData(KRpcFindNode): - """ - variant of KRpcFindNode which returns key value if found - """ - @others - - -class KRpcStore(KRpc): - """ - Implements key storage - """ - @others - -def _finddata(self, something=None, callback=None, **kw): - """ - As for findnode, but if data is found, return the data instead - """ - if not kw.has_key('local'): - kw = dict(kw) - kw['local'] = False - - self.log(3, "about to instantiate finddata rpc") - if callback: - KRpcFindData(self, callback, hash=something, **kw) - self.log(3, "asynchronously invoked finddata, expecting callback") - else: - res = KRpcFindData(self, hash=something, **kw).execute() - self.log(3, "back from finddata rpc") - if not isinstance(res, str): - self.log(4, "findData RPC returned %s" % repr(res)) - res = [self._normalisePeer(p) for p in res] # wrap in KPeer objects - return res - - -def _store(self, key, value, callback=None, **kw): - """ - Performs a STORE rpc - - Arguments: - - key - string - text name of key - - value - string - value to store - - Keywords: - - local - if given and true, only store value onto local store - """ - if not kw.has_key('local'): - kw = dict(kw) - kw['local'] = False - - key = shahash(key) - if callback: - KRpcStore(self, callback, key=key, value=value, **kw) - self.log(3, "asynchronously invoked findnode, expecting callback") - else: - res = KRpcStore(self, key=key, value=value, **kw).execute() - return res - - -type = 'unknown' # override in subclass - - -type = 'ping' - - -type = 'findNode' - -type = 'findData' - -type = 'store' - -@ignore -@language python -""" -Bloom filters in Python -Adam Langley <agl@imperialviolet.org> -""" -@others - -import array -import struct - - -__all__ = ['Bloom'] - -mixarray = array.array ('B', '\x00' * 256) -# The mixarray is based on RC4 and used as diffusion in the hashing function - - -def mixarray_init (mixarray): - for i in range (256): - mixarray[i] = i - k = 7 - for j in range (4): - for i in range (256): - s = mixarray[i] - k = (k + s) % 256 - mixarray[i] = mixarray[k] - mixarray[k] = s - -mixarray_init(mixarray) - - -class Bloom (object): - """ - Bloom filters provide a fast and compact way of checking set membership. - They do this by introducing a risk of a false positive (but there are - no false negatives). - - For more information see: - - http://www.cs.wisc.edu/~cao/papers/summary-cache/node8.html - """ - @others - - -def __init__ (self, bytes, hashes): - ''' - bytes is the size of the bloom filter in 8-bit bytes and - hashes is the number of hash functions to use. - Consult the web page linked above for values to use. - If in doubt, bytes = num_elements and hashes = 4 - ''' - self.hashes = hashes - self.bytes = bytes - - self.a = self._make_array (bytes) - - -def _make_array (self, size): - - a = array.array ('B') - # stupidly, there's no good way that I can see of - # resizing an array without allocing a huge string to do so - # thus I use this, slightly odd, method: - blocklen = 256 - arrayblock = array.array ('B', '\x00' * blocklen) - todo = size - while (todo >= blocklen): - a.extend (arrayblock) - todo -= blocklen - if todo: - a.extend (array.array ('B', '\x00' * todo)) - - # now a is of the right length - return a - - -def _hashfunc (self, n, val): - '''Apply the nth hash function''' - - global mixarray - - b = [ord(x) for x in struct.pack ('I', val)] - c = array.array ('B', [0, 0, 0, 0]) - for i in range (4): - c[i] = mixarray[(b[i] + n) % 256] - - return struct.unpack ('I', c.tostring())[0] - - -def insert(self, val): - - for i in range(self.hashes): - n = self._hashfunc(i, val) % (self.bytes * 8) - self.a[n // 8] |= self.bitmask[n % 8] - - -def __contains__ (self, val): - - for i in range (self.hashes): - n = self._hashfunc (i, val) % (self.bytes * 8) - if not self.a[n // 8] & self.bitmask[n % 8]: - return 0 - - return 1 - - -class CountedBloom (Bloom): - """ - Just like a Bloom filter, but provides counting (e.g. you can delete as well). - This uses 4 bits per bucket, so is generally four times larger - than the same non-counted bloom filter. - """ - @others - -def __init__ (self, buckets, hashes): - ''' - Please note that @buckets must be even. - Also note that with a Bloom object you give the - number of *bytes* and each byte is 8 buckets. - Here you're giving the number of buckets. - ''' - assert buckets % 2 == 0 - - self.hashes = hashes - self.buckets = buckets - - self.a = self._make_array (buckets // 2) - - -def insert (self, val): - - masks = [(0x0f, 0xf0), (0xf0, 0x0f)] - shifts = [4, 0 ] - - for i in range (self.hashes): - n = self._hashfunc (i, val) % self.buckets - byte = n // 2 - bucket = n % 2 - (notmask, mask) = masks[bucket] - shift = shifts[bucket] - bval = ((self.a[byte] & mask) >> shift) - if bval < 15: - # we shouldn't increment it if it's at the maximum - bval += 1 - self.a[byte] = (self.a[byte] & notmask) | (bval << shift) - - -def __contains__ (self, val): - - masks = [(0x0f, 0xf0), (0xf0, 0x0f)] - shifts = [4, 0 ] - - for i in range (self.hashes): - n = self._hashfunc (i, val) % self.buckets - byte = n // 2 - bucket = n % 2 - (notmask, mask) = masks[bucket] - shift = shifts[bucket] - bval = ((self.a[byte] & mask) >> shift) - - if bval == 0: - return 0 - - return 1 - -def __delitem__ (self, val): - - masks = [(0x0f, 0xf0), (0xf0, 0x0f)] - shifts = [4, 0 ] - - for i in range (self.hashes): - n = self._hashfunc (i, val) % self.buckets - byte = n // 2 - bucket = n % 2 - (notmask, mask) = masks[bucket] - shift = shifts[bucket] - bval = ((self.a[byte] & mask) >> shift) - - if bval < 15: # we shouldn't decrement it if it's at the maximum - bval -= 1 - - self.a[byte] = (self.a[byte] & notmask) | (bval << shift) - - -if __name__ == '__main__': - - print 'Testing bloom filter: there should be no assertion failures' - a = Bloom (3, 4) - - a.insert (45) - print a.a - a.insert (17) - print a.a - a.insert (12) - print a.a - assert 45 in a - - assert 45 in a - assert not 33 in a - assert 45 in a - assert 17 in a - assert 12 in a - - c = 0 - for x in range (255): - if x in a: - c += 1 - print c - print float(c)/255 - - - a = CountedBloom (24, 4) - a.insert (45) - print a.a - a.insert (17) - print a.a - a.insert (12) - print a.a - assert 45 in a - - assert 45 in a - assert not 33 in a - assert 45 in a - assert 17 in a - assert 12 in a - - c = 0 - for x in range (255): - if x in a: - c += 1 - print c - print float(c)/255 - - del a[45] - assert not 45 in a - - -bitmask = [0x80, 0x40, 0x20, 0x10, 0x08, 0x04, 0x02, 0x01] - - -def __iter__(self): - return iter(self.nodes) - -def findClosestPeersInitial(self): - """ - Searches our k-buckets, and returns a table of k of - peers closest to wanted hash into self.closestPeersInitial - """ - hashobj = self.hashWanted - - lst = [] - buckets = self.localNode.buckets - for bucket in buckets: - for peer in bucket: - lst.append(peer) - - table = KPeerQueryTable(lst, self.hashWanted, 'start') - table.sort() - - return table[:maxBucketSize] - - -def returnValue(self, res=None, **kw): - """ - Passes a return value back to the original caller, be it - the local application, or an upstream peer - - Arguments: - - just one - a result object to pass back, if this RPC - was instigated by a local application call. - Note that if this RPC was instigated by an upstream - peer, this will be ignored. - - Keywords: - - the items to return, in the case that this RPC was - instigated by an upstream peer. Ignored if this - RPC was instigated by a local application call. - Note - the RPC invocation/reply dict keys are - listed at the top of this source file. - """ - self.terminate() - if self.callback: - if hasattr(self, 'cbArgs'): - self.callback(res, self.cbArgs) - else: - self.callback(res) - elif self.isLocal: - self.queue.put(res) - else: - self.upstreamPeer.send_reply(msgId=self.upstreamMsgId, - **kw) - -def addPeerIfCloser(self, peer): - """ - Maintains the private .peersToQuery array. - If the array is not yet maxed (ie, length < maxBucketSize), - the peer is simply added. - However, if the array is maxed, it finds the least-close peer, - and replaces it with the given peer if closer. - """ - -def isCloserThanQueried(self, peer): - """ - Test function which returns True if argument 'peer' - is closer than all the peers in self.peersAlreadyQueried, - or False if not - """ - for p in self.peersAlreadyQueried: - if p.id.rawdistance(self.hashWanted) < peer.id.rawdistance(self.hashWanted): - return False - return True - - -def __del__(self): - - #self.log(4, "\nRPC %s getting the chop" % (str(self))) - pass - - -def __str__(self): - - return "<%s on node %s>" % (self.__class__.__name__, self.localNode.name) - - -def __repr__(self): - return str(self) - -class KPeerQueryRecord(KBase): - """ - Keeps state information regarding a peer we're quering - """ - @others - -class KPeerQueryTable(KBase): - """ - Holds zero or more instances of KPeerQuery and - presents/sorts table in different forms - """ - @others - -def __str__(self): - return "<KTestNetwork: %d nodes>" % len(self.nodes) - -def __repr__(self): - return str(self) - - -def sendSomeQueries(self, **kw): - """ - First step of findNode - - Select alpha nodes that we haven't yet queried, and send them queries - """ - # bail if too busy - if self.numQueriesPending >= maxConcurrentQueries: - return - - # shorthand - localNode = self.localNode - hashWanted = self.hashWanted - - # randomly choose some peers - #somePeerRecs = self.peerTab.chooseN(numSearchPeers) - somePeerRecs = self.peerTab.select('start') - - # start our ticker - self.nextTickTime = time.time() + timeout['findNode'] - - numQueriesSent = 0 - - # and send them findNode queries - if len(somePeerRecs) > 0: - for peerRec in somePeerRecs: - self.log(3, "querying %s" % peerRec) - if self.numQueriesPending < maxConcurrentQueries: - self.sendOneQuery(peerRec) - numQueriesSent += 1 - else: - break - self.log(3, "%s queries sent, awaiting reply" % numQueriesSent) - else: - self.log(3, "no peer recs???") - for peerRec in self.peerTab: - self.log(4, "%s state=%s, dest=%s..." % (peerRec, peerRec.state, peerRec.dest[:12])) - - -def returnTheBestWeGot(self): - """ - Returns the k closest nodes to the wanted hash that we have - actually heard from - """ - # pick the peers which have replied to us - closest = self.peerTab.select('closest') - - self.peerTab.dump() - - # add ourself to the list - we could easily be one of the best - localNode = self.localNode - selfDest = localNode._normalisePeer(localNode.dest) - closest.append(selfDest, state='closest') - - # sort in order of least distance first - closest.sort() - - # pick the best k of these - #peersHeardFrom = peersHeardFrom[:maxBucketSize] - #peersHeardFrom = peersHeardFrom[:numSearchPeers] - - # extract their dest strings - peers = [p.peer.dest for p in closest] - - # pass these back - self.returnValue(peers) - - # and we're done - return - - -def __init__(self, lst=None, sorthash=None, state=None, **kw): - self.peers = [] - if lst == None: - lst = [] - else: - self.setlist(lst, state, **kw) - self.sorthash = sorthash - - -def setlist(self, lst, state=None, **kw): - for item in lst: - self.append(item, state, **kw) - - -def extend(self, items, state, **kw): - for item in items: - self.append(item, state, **kw) - - -def append(self, item, state=None, **kw): - - if isinstance(item, KPeerQueryRecord): - self.log(5, "adding a KPeerQueryRecord, state=%s" % state) - if state != None: - item.state = state - item.__dict__.update(kw) - peerRec = item - - elif isinstance(item, KPeer): - self.log(5, "adding a KPeer") - peerRec = KPeerQueryRecord(item, self, state, **kw) - - else: - self.log(2, "bad peer %s" % repr(item)) - raise KBadPeer - - if peerRec not in self: - self.log(5, "peerRec=%s list=%s" % (peerRec, self.peers)) - self.peers.append(peerRec) - else: - self.log(5, "trying to append duplicate peer???") - - -def remove(self, item): - self.peers.remove(item) - - -def getExpired(self): - """ - return a list of peers which have expired - """ - return KPeerQueryTable( - filter(lambda item: item.hasTimedOut(), self.peers), - self.sorthash - ) - - -def purgeExpired(self): - """ - Eliminate peers which have expired - """ - for peer in self.peers: - if peer.hasTimedOut(): - self.peers.remove(peer) - - -def __getitem__(self, idx): - """ - Allow the table to be indexed by any of: - - KPeerQueryRecord - - integer index - - long string - treated as dest - - short string - treated as peer id hash string - - KHash - finds peer with that id - - KPeer - returns peer with that peer - """ - if type(idx) == type(0): - return self.peers[idx] - elif isinstance(idx, KPeer): - for peer in self.peers: - if peer.peer == idx: - return peer - raise IndexError("Query table has no peer %s" % idx) - elif isinstance(idx, str): - if len(str) > 512: - for peer in self.peers: - if peer.peer.dest == idx: - return peer - raise IndexError("No peer with dest %s" % idx) - else: - for peer in self.peers: - if peer.peer.id.value == idx: - return peer - raise IndexError("No peer with dest hash %s" % idx) - elif isinstance(idx, KHash): - for peer in self.peers: - if peer.peer.id == idx: - return peer - raise IndexError("No peer with id %s" % idx) - else: - raise IndexError("Invalid selector %s" % repr(idx)) - - -def __len__(self): - return len(self.peers) - - -def __getslice__(self, fromidx, toidx): - return KPeerQueryTable(self.peers[fromidx:toidx], self.sorthash) - - -def __iter__(self): - return iter(self.peers) - - -def sort(self): - """ - Sort the table in order of increasing distance from self.sorthash - """ - self.peers.sort() - - -def select(self, criterion): - """ - Returns a table of items for which criterion(item) returns True - Otherwise, if 'criterion' is a string, returns the items whose - state == criterion. - Otherwise, if 'criterion' is a list or tuple, return the items - whose state is one of the elements in criterion - """ - if callable(criterion): - func = criterion - elif type(criterion) in [type(()), type([])]: - func = lambda p: p.state in criterion - else: - func = lambda p: p.state == criterion - - recs = [] - for peerRec in self.peers: - if func(peerRec): - recs.append(peerRec) - return self.newtable(recs) - - -def filter(self, func): - """ - Eliminate, in place, all items where func(item) returns False - """ - for peerRec in self.peers: - if not func(peerRec): - self.peers.remove(peerRec) - - -def purge(self, func): - """ - Eliminate, in place, all items where func(item) returns True - """ - if 0 and desperatelyDebugging: - set_trace() - for peerRec in self.peers: - if func(peerRec): - self.peers.remove(peerRec) - - -def chooseN(self, n): - """ - Randomly select n peer query records - """ - candidates = self.peers[:] - - self.log(3, "candidates = %s" % repr(candidates)) - - chosen = [] - i = 0 - - if len(candidates) <= n: - chosen = candidates - else: - while i < n: - try: - peer = random.choice(candidates) - except: - self.log(2, "failed to choose one of %s" % repr(candidates)) - raise - chosen.append(peer) - candidates.remove(peer) - i += 1 - - return self.newtable(chosen) - - -def __str__(self): - return "<KPeerQueryTable: %d peers>" % len(self) #.peers) - -def __repr__(self): - return str(self) - - - - - - - - - - - - -def returnValue(self, items): - """ - override with a nicer call sig - """ - # a hack for testing - save this RPC object into the node - # so we can introspect it - self.localNode.lastrpc = self - - items = items[:maxBucketSize] - - self.reportStats() - - KRpc.returnValue(self, items, result=items) - - - -def newtable(self, items, state=None, **kw): - """ - Returns a new KPeerQueryTable object, based on this - one, but containing 'items' - """ - tab = KPeerQueryTable(items, sorthash=self.sorthash, state=state, **kw) - return tab - - - -def __add__(self, other): - self.extend(other) - - -def __init__(self, peer, table, state=None, **kw): - - self.peer = peer - self.dest = peer.dest - self.deadline = time.time() + timeout['findNode'] - self.table = table - - # state is always one of: - # - 'start' - have not yet sent query to peer - # - 'recommended' - peer was recommended by another peer, no query sent - # - 'queried' - sent query, awaiting reply or timeout - # - 'replied' - this peer has replied to our query - # - 'timeout' - timed out waiting for peer reply - # - 'toofar' - too far away to be of interest - # - 'closest' - this peer is one of the closest so far - - if state == None: - state = 'start' - if not isinstance(state, str): - raise Exception("Invalid state %s" % state) - - self.state = state - - self.__dict__.update(kw) - - -def hasTimedOut(self, now=None): - if now == None: - now = time.time() - return self.state == 'queried' and now > self.deadline - - -def __cmp__(self, other): - - return cmp(self.peer.id.rawdistance(self.table.sorthash), - other.peer.id.rawdistance(self.table.sorthash)) - - -def __lt__(self, other): - return (cmp(self, other) < 0) - -def __le__(self, other): - return (cmp(self, other) <= 0) - -def __gt__(self, other): - return (cmp(self, other) > 0) - -def __ge__(self, other): - return (cmp(self, other) <= 0) - - -def isCloserThanAllOf(self, tab): - """ - returns True if this peerRec is closer to the desired hash - than all of the peerRecs in table 'tab' - """ - if not isinstance(tab, KPeerQueryTable): - self.log(2, "invalid qtable %s" % repr(tab)) - raise Exception("invalid qtable %s" % repr(tab)) - - for rec in tab: - if self > rec: - return False - return True - - -def isCloserThanOneOf(self, tab): - """ - returns True if this peerRec is closer to the desired hash - than one or more of of the peerRecs in table 'tab' - """ - if not isinstance(tab, KPeerQueryTable): - self.log(2, "invalid qtable %s" % repr(tab)) - raise Exception("invalid qtable %s" % repr(tab)) - - for rec in tab: - if self < rec: - return True - return False - - -def __contains__(self, other): - self.log(5, "testing if %s is in %s" % (other, self.peers)) - for peerRec in self.peers: - if peerRec.peer.dest == other.peer.dest: - return True - return False - - -def sendOneQuery(self, peerRec): - """ - Sends off a query to a single peer - """ - if peerRec.state != 'start': - self.log(2, "duh!! peer state %s:\n%s" % (peerRec.state, peerRec)) - return - - msgId = self.localNode._msgIdAlloc() - self.bindPeerReply(peerRec.peer, msgId) - peerRec.msgId = msgId - - if self.type == 'findData': - peerRec.peer.send_findData(hash=self.hashWanted, msgId=msgId) - else: - peerRec.peer.send_findNode(hash=self.hashWanted, msgId=msgId) - - peerRec.state = 'queried' - - self.numQueriesPending += 1 - - self.numQueriesSent += 1 - - -def run(self, func=None): - """ - Runs the core in foreground, with the client func in background - """ - if func==None: - func = test - - self.bg = False - - thread.start_new_thread(self.runClient, (func,)) - - set_trace() - - self.threadRxPackets() - - -def stop(self): - self.isRunning = False - - -def runClient(self, func): - - self.log(3, "Core: running client func") - try: - func() - except: - traceback.print_exc() - self.log(3, "Core: client func exited") - self.stop() - -def putKey(self, key, val, keyIsHashed=False): - """ - Stores a string into this storage under the key 'key' - - Returns True if key was saved successfully, False if not - """ - try: - if keyIsHashed: - keyHashed = key - else: - keyHashed = shahash(key) - keyHashed = keyHashed.lower() - keyPath = os.path.join(self.keysDir, keyHashed) - file(keyPath, "wb").write(val) - self.log(4, "stored key: '%s'\nunder hash '%s'\n(keyIsHashed=%s)" % ( - key, keyHashed, keyIsHashed)) - return True - except: - traceback.print_exc() - self.log(3, "failed to store key") - return False - - -def shahash(somestr, bin=False): - shaobj = sha.new(somestr) - if bin: - return shaobj.digest() - else: - return shaobj.hexdigest() - - -def getKey(self, key, keyIsHashed=False): - """ - Attempts to retrieve item from node's local file storage, which was - stored with key 'key'. - - Returns value as a string if found, or None if not present - """ - try: - if keyIsHashed: - keyHashed = key - else: - keyHashed = shahash(key) - - keyHashed = keyHashed.lower() - self.log(4, "key=%s, keyHashed=%s, keyIsHashed=%s" % (key, keyHashed, keyIsHashed)) - - keyPath = os.path.join(self.keysDir, keyHashed) - - if os.path.isfile(keyPath): - return file(keyPath, "rb").read() - else: - return None - except: - traceback.print_exc() - self.log(3, "error retrieving key '%s'" % key) - return None - - -def __init__(self, localNode, client=None, **kw): - """ - Creates and launches a STORE rpc - - Arguments: - - localNode - the node performing this RPC - - client - see KRpc.__init__ - - Keywords: - - key - the key under which we wish to save the data - - value - the value we wish to save - - local - True/False: - - if True, only save in local store - - if False, do a findNode to find the nodes to save the - key to, and tell them to save it - default is True - """ - self.key = kw['key'] - #self.keyHashed = shahash(self.key) - self.keyHashed = self.key - self.value = kw['value'] - self.isLocalOnly = kw.get('local', True) - - # set 'splitting' flag to indicate if we need to insert as splitfiles - self.splitting = len(self.value) > maxValueSize - - self.log(4, "isLocalOnly=%s" % self.isLocalOnly) - - if kw.has_key('cbArgs'): - KRpc.__init__(self, localNode, client, cbArgs=kw['cbArgs']) - else: - KRpc.__init__(self, localNode, client) - - -def start(self): - """ - Kicks off this RPC - """ - # if too big, then break up into <30k chunks - if self.splitting: - self.storeSplit() - return - - # not too big - prefix a 0 chunk count, and go ahead as a single entity - self.value = "chunks:0\n" + self.value - - # if local only, or no peers, just save locally - if self.isLocalOnly or len(self.localNode.peers) == 0: - result = self.localNode.storage.putKey(self.keyHashed, self.value, keyIsHashed=True) - if result: - result = 1 - else: - result = 0 - self.returnValue(result) - return - - # no - se have to find peers to store the key to, and tell them to - # store the key - - # launch a findNode rpc, continue in our callback - KRpcFindNode(self.localNode, self.on_doneFindNode, - hash=self.keyHashed, raw=True, local=False) - return - - - -def on_doneFindNode(self, lst): - """ - Receive a callback from findNode - - Send STORE command to each node that comes back - """ - localNode = self.localNode - - # normalise results - normalisePeer = localNode._normalisePeer - peers = [normalisePeer(p) for p in lst] # wrap in KPeer objects - - self.log(2, "STORE RPC findNode - got peers %s" % repr(peers)) - - i = 0 - - self.numPeersSucceeded = 0 - self.numPeersFailed = 0 - self.numPeersFinished = 0 - - # and fire off store messages for each peer - for peer in peers: - - if peer.dest == localNode.dest: - self.log(3, "storing to ourself") - localNode.storage.putKey(self.keyHashed, self.value, keyIsHashed=True) - self.numPeersSucceeded += 1 - self.numPeersFinished += 1 - else: - msgId = self.localNode._msgIdAlloc() - self.log(4, "forwarding store cmd to peer:\npeer=%s\nmsgId=%s" % (peer, msgId)) - self.bindPeerReply(peer, msgId) - peer.send_store(key=self.keyHashed, value=self.value, msgId=msgId) - i += 1 - if i >= numStorePeers: - break - - self.nextTickTime = time.time() + timeout['store'] - - self.log(2, "Sent store cmd to %s peers, awaiting responses" % i) - - self.numPeersToStore = i - - - -def returnValue(self, result): - """ - an override with a nicer call sig - """ - # a hack for testing - save this RPC object into the node - # so we can introspect it - self.localNode.lastrpc = self - - try: - KRpc.returnValue(self, result, status=result) - except: - traceback.print_exc() - self.log(3, "Failed to return %s" % repr(result)) - KRpc.returnValue(self, 0, status=0) - - -def on_reply(self, peer, msgId, **details): - """ - callback which fires when we get a reply from a STORE we sent to a - peer - """ - self.numPeersSucceeded += 1 - self.numPeersFinished += 1 - - if self.numPeersFinished == self.numPeersToStore: - # rpc is finished - self.returnValue(True) - - -def on_tick(self): - - self.log(3, "Timeout awaiting store reply from %d out of %d peers" % ( - self.numPeersToStore - self.numPeersSucceeded, self.numPeersToStore)) - - if self.numPeersSucceeded == 0: - self.log(3, "Store timeout - no peers replied, storing locally") - self.localNode.storage.putKey(self.keyHashed, self.value, keyIsHashed=True) - - self.returnValue(True) - - -def _on_store(self, peer, msgId, **kw): - """ - Handles incoming STORE command - """ - self.log(4, "got STORE rpc from upstream:\npeer=%s\nmsgId=%s\nkw=%s" % (peer, msgId, kw)) - - KRpcStore(self, (peer, msgId), local=True, **kw) - - -def start(self): - """ - Kicks off the RPC. - If requested key is stored locally, simply returns it. - Otherwise, falls back on parent method - """ - # if we posses the data, just return the data - value = self.localNode.storage.getKey(self.hashWanted.asHex(), keyIsHashed=True) - if value != None: - self.log(4, "Found required value in local storage") - self.log(4, "VALUE='%s'" % value) - self.on_gotValue(value, self.hashWanted.asHex()) - return - - # no such luck - pass on to parent - KRpcFindNode.start(self) - - -def asHex(self): - return ("%040x" % self.value).lower() - - -def on_reply(self, peer, msgId, **details): - """ - Callback for FIND_NODE reply - """ - res = details.get('result', None) - if isinstance(res, str): - self.on_gotValue(res, self.hashWanted.asHex()) - else: - KRpcFindNode.on_reply(self, peer, msgId, **details) - - -def __del__(self): - """ - Cleanup - """ - - -@first #! /usr/bin/env python - -""" -A simple Hashcash implementation - -Visit U{http://www.hashcash.org} for more info about -the theory and usage of hashcash. - -Run this module through epydoc to get pretty doco. - -Overview: - - implements a class L{HashCash}, with very configurable parameters - - offers two convenience wrapper functions, L{generate} and L{verify}, - for those who can't be bothered instantiating a class - - given a string s, genToken produces a hashcash token - string t, as binary or base64 - - generating t consumes a lot of cpu time - - verifying t against s is almost instantaneous - - this implementation produces clusters of tokens, to even out - the token generation time - -Performance: - - this implementation is vulnerable to: - - people with lots of computers, especially big ones - - people writing bruteforcers in C (python is way slow) - - even with the smoothing effect of creating token clusters, - the time taken to create a token can vary by a factor of 7 - -Theory of this implementation: - - - a hashcash token is created by a brute-force algorithm - of finding an n-bit partial hash collision - - - given a string s, and a quality level q, - generate a 20-byte string h, such that: - - 1. h != s - 2. len(h) == 20 - 3. ((sha(s) xor sha(h)) and (2 ^ q - 1)) == 0 - - - in other words, hash(h) and hash(s) have q least - significant bits in common - -If you come up with a faster, but PURE PYTHON implementation, -using only modules included in standard python distribution, -please let me know so I can upgrade mine or link to yours. - -Written by David McNab, August 2004 -Released to the public domain. -""" -@others - - -import sha, array, random, base64, math -from random import randint - -shanew = sha.new - - -def generate(value, quality, b64=False): - """ - Generates a hashcash token - - This is a convenience wrapper function which saves you from having to - instantiate a HashCash object. - - Arguments: - - value - a string against which to generate token - - quality - an int from 1 to 160 - typically values are 16 to 30 - - b64 - if True, return the token as base64 (suitable for email, - news, and other text-based contexts), otherwise return a binary string - - Quality values for desktop PC usage should typically be between 16 and 30. - Too low, and it makes an attacker's life easy. - Too high, and it makes life painful for the user. - """ - if b64: - format = 'base64' - else: - format = 'binary' - - h = HashCash(quality=quality, format=format) - - return h.generate(value) - - -def binify(L): - """ - Convert a python long int into a binary string - """ - res = [] - while L: - res.append(chr(L & 0xFF)) - L >>= 8 - res.reverse() - return "".join(res) - - -def intify(s): - """ - Convert a binary string to a python long int - """ - n = 0L - for c in s: - n = (n << 8) | ord(c) - return n - - -# your own config settings - set these to get a good trade-off between -# token size and uniformity of time taken to generate tokens -# -# the final token size will be tokenSize * chunksPerToken for binary -# tokens, or ceil(4/3 * tokenSize * chunksPerToken) for base64 tokens -# -# the reason for building a token out of multiple token chunks is to -# try to even out the time taken for token generation -# -# without this, token generation time is very random, with some tokens -# generating almost instantaneously, and other tokens taking ages - -defaultChunkSize = 3 # size of each chunk in a token -defaultNumChunks = 12 # number of chunks in each token -defaultQuality = 12 # number of partial hash collision bits required -defaultFormat = 'base64' # by default, return tokens in base64 format -defaultVerbosity = 0 # increase this to get more verbose output - - -def verify(value, quality, token): - """ - Verifies a hashcash token. - - This is a convenience wrapper function which saves you from having to - instantiate a HashCash object. - - Arguments: - - value - the string against which to check the hashcash token - - quality - the number of bits of token quality we require - - token - a hashcash token string - """ - h = HashCash(quality=quality) - - return h.verify(value, token) - - -def test(nbits=14): - """ - Basic test function - perform encoding and decoding, - in plain and base64 formats, using the wrapper functions - """ - print "Test, using wrapper functions" - - value = _randomString() - print "Generated random string\n%s" % value - print - - print "Generating plain binary %s-bit token for:\n%s" % (nbits, value) - tok = generate(value, nbits) - - print "Got token %s, now verifying" % repr(tok) - result = verify(value, nbits, tok) - - print "Verify = %s" % repr(result) - print - - print "Now generating base64 %s-bit token for:\n%s" % (nbits, value) - tok = generate(value, nbits, True) - - print "Got base64 token %s, now verifying" % repr(tok) - result = verify(value, nbits, tok) - - print "Verify = %s" % repr(result) - - -# get a boost of speed if psyco is available on target machine -try: - import psyco - psyco.bind(genToken) - psyco.bind(binify) - psyco.bind(intify) -except: - pass - - -class HashCash: - """ - Class for creating/verifying hashcash tokens - - Feel free to subclass this, overriding the default attributes: - - chunksize - - numchunks - - quality - - format - - verbosity - """ - @others - -def __init__(self, **kw): - """ - Create a HashCash object - - Keywords: - - chunksize - size of each token chunk - - numchunks - number of chunks per token - - quality - strength of token, in bits: - - legal values are 1 to 160 - - typical values are 10 to 30, larger values taking much - longer to generate - - format - 'base64' to output tokens in base64 format; any other - value causes tokens to be generated in binary string format - - verbosity - verbosity of output messages: - - 0 = silent - - 1 = critical only - - 2 = noisy - """ - for key in ['chunksize', 'numchunks', 'quality', 'format', 'verbosity']: - if kw.has_key(key): - setattr(self, key, kw[key]) - - self.b64ChunkLen = int(math.ceil(self.chunksize * 4.0 / 3)) - - -def generate(self, value): - """ - Generate a hashcash token against string 'value' - """ - quality = self.quality - mask = 2 ** quality - 1 - hV = sha.new(value).digest() - nHV = intify(hV) - - maxTokInt = 2 ** (self.chunksize * 8) - - tokenChunks = [] - chunksPerToken = self.numchunks - - # loop around generating random strings until we get one which, - # when xor'ed with value, produces a hash with the first n bits - # set to zero - while 1: - nTok = randint(0, maxTokInt) - sNTok = binify(nTok) - hSNTok = shanew(sNTok).digest() - nHSNTok = intify(hSNTok) - if (nHV ^ nHSNTok) & mask == 0: - # got a good token - if self.format == 'base64': - if not self._checkBase64(sNTok): - # chunk fails to encode/decode base64 - if self.verbosity >= 2: - print "Ditching bad candidate token" - continue - bSNTok = self._enc64(sNTok) - if self.verbosity >= 2: - print "encoded %s to %s, expect chunklen %s" % ( - repr(sNTok), repr(bSNTok), self.b64ChunkLen) - sNTok = bSNTok - # got something that works, add it to chunks, return if we got enough chunks - if sNTok in tokenChunks: - continue # already got this one - tokenChunks.append(sNTok) - if len(tokenChunks) == chunksPerToken: - return "".join(tokenChunks) - - -def verify(self, value, token): - """ - Verifies a hashcash token against string 'value' - """ - if self.verbosity >= 2: - print "Verify: checking token %s (len %s) against %s" % (token, len(token), value) - # mask is an int with least-significant 'q' bits set to 1 - mask = 2 ** self.quality - 1 - - # breaking up token into its constituent chunks - chunks = [] - - # verify token size - if len(token) != self.chunksize * self.numchunks: - # try base64 - decoded = False - try: - for i in range(0, self.numchunks): - b64chunk = token[(i * self.b64ChunkLen) : ((i + 1) * self.b64ChunkLen)] - chunk = self._dec64(b64chunk) - if len(chunk) != self.chunksize: - if self.verbosity >= 2: - print "Bad chunk length in decoded base64, wanted %s, got %s" % ( - self.chunksize, len(chunk)) - return False - chunks.append(chunk) - except: - if self.verbosity >= 2: - if decoded: - print "Bad token length" - else: - print "Base64 decode failed" - return False - else: - # break up token into its chunks - for i in range(0, self.numchunks): - chunks.append(token[(i * self.chunksize) : ((i + 1) * self.chunksize)]) - - # produce hash string and hash int for input string - hV = sha.new(value).digest() - nHv = intify(hV) - - # test each chunk - if self.verbosity >= 2: - print "chunks = %s" % repr(chunks) - - while chunks: - chunk = chunks.pop() - - # defeat duplicate chunks - if chunk in chunks: - if self.verbosity >= 2: - print "Rejecting token chunk - duplicate exists" - return False - - # hash the string and the token - hTok = sha.new(chunk).digest() - - # defeat the obvious attack - if hTok == hV: - if self.verbosity >= 2: - print "Rejecting token chunk - equal to token" - return False - - # test if these hashes have the least significant n bits in common - nHTok = intify(hTok) - if (nHTok ^ nHv) & mask != 0: - # chunk failed - if self.verbosity >= 2: - print "Rejecting token chunk %s - hash test failed" % repr(chunk) - return False - - # pass - return True - - -def ctest(quality=14): - """ - Basic test function - perform token generation and verify, against - a random string. Instantiate a HashCash class instead of just using the - wrapper funcs. - """ - print "Test using HashCash class" - - value = _randomString() - print "Generated random string\n%s" % value - print - - hc = HashCash(quality=quality, format='base64') - - print "Generating plain binary %s-bit token for:\n%s" % (quality, value) - tok = hc.generate(value) - - print "Got token %s, now verifying" % repr(tok) - result = hc.verify(value, tok) - - print "Verify = %s" % repr(result) - print - - -if __name__ == '__main__': - - test() - - -def ntest(): - """ - This function does 256 key generations in a row, and dumps - some statistical results - """ - # adjust these as desired - chunksize=3 - numchunks=32 - quality=6 - numIterations = 256 - - import time - try: - import stats - except: - print "This test requires the stats module" - print "Get it (and its dependencies) from:" - print "http://www.nmr.mgh.harvard.edu/Neural_Systems_Group/gary/python.html" - return - - print "Thrash test" - - times = [] - - # create a hashcash token generator object - hc = HashCash( - chunksize=chunksize, - numchunks=numchunks, - quality=quality - ) - - # 256 times, create a random string and a matching hashcash token - for i in range(numIterations): - - value = _randomString() - - # measure time for a single token generation - then = time.time() - tok = hc.generate(value) - now = time.time() - times.append(now - then) - - # sanity check, make sure it's valid - result = hc.verify(value, tok) - if not result: - print "Verify failed, token length=%s" % len(tok) - return - - print "Generated %s of %s tokens" % (i, numIterations) - - print "---------------------------------" - print "Thrash test performance results" - print "Token quality: %s bits" % quality - print "Min=%.3f max=%.3f max/min=%.3f mean=%.3f, median=%.3f, stdev=%.3f" % ( - min(times), - max(times), - max(times)/min(times), - stats.lmean(times), - stats.lmedian(times), - stats.lstdev(times) - ) - - -def _checkBase64(self, item): - """ - Ensures the item correctly encodes then decodes to/from base64 - """ - #if self.verbose: - # print "Checking candidate token" - enc = self._enc64(item) - if len(enc) != self.b64ChunkLen: - if self.verbosity >= 1: - print "Bad candidate token" - return False - return self._dec64(enc) == item - - -def _enc64(self, item): - """ - Base64-encode a string, remove padding - """ - enc = base64.encodestring(item).strip() - while enc[-1] == '=': - enc = enc[:-1] - return enc - - -def _dec64(self, item): - """ - Base64-decode a string - """ - dec = base64.decodestring(item+"====") - return dec - - -def _randomString(): - """ - For our tests below. - Generates a random-length human-readable random string, - between 16 and 80 chars - """ - chars = [] - slen = randint(16, 80) - for i in range(slen): - chars.append(chr(randint(32, 128))) - value = "".join(chars) - return value - - -# override these at your pleasure - -chunksize = defaultChunkSize -numchunks = defaultNumChunks -quality = defaultQuality -format = defaultFormat -verbosity = defaultVerbosity - - -def dump(self, detailed=0): - """ - Outputs a list of nodes and their connections - """ - if detailed: - self.dumplong() - return - - for node in self.nodes: - print node.name + ":" - print " " + ", ".join([self.getPeerName(peer) for peer in node.peers]) - - -def getPeerName(self, peer): - for n in self.nodes: - if n.dest == peer.dest: - return n.name - return "<??%s>" % n.dest[:8] - - -def whohas(self, key): - print "Nodes having key %s:" % key - - h = KHash(key) - - def hcmp(n1, n2): - res = cmp(h.rawdistance(n1.id), h.rawdistance(n2.id)) - #print "compared: %s %s = %s" % (n1.idx, n2.idx, res) - return res - - i = 0 - - nodes = self.nodes[:] - nodes.sort(hcmp) - - for node in nodes: - if node.storage.getKey(key) != None: - i += 1 - print "%3s" % node.idx, - if i % 16 == 0: - print - - -def whocanfind(self, key): - """ - Produces a list of nodes which can find key 'key' - """ - successes = [] - failures = [] - print "Nodes which can find key %s" % key - for i in range(len(self.nodes)): - node = self.nodes[i] - if node.get(key) != None: - print " %s found it" % node.name - successes.append(i) - else: - print " %s failed" % node.name - failures.append(i) - - print "Successful finds: %s" % repr(successes) - print "Failed finds: %s" % repr(failures) - - -def closestto(self, key): - """ - Outputs a list of node names, in order of increasing 'distance' - from key - """ - key = KHash(key) - def nodecmp(node1, node2): - #self.log(3, "comparing node %s with %s" % (node1, node2)) - return cmp(node1.id.rawdistance(key), node2.id.rawdistance(key)) - - nodes = self.nodes[:] - nodes.sort(nodecmp) - print "Nodes, in order of increasing distance from '%s'" % key - - i = 0 - for node in nodes: - i += 1 - print "%3s" % node.idx, - if i % 16 == 0: - print - - -def findnode(self, idx, key): - """ - does a findnode on peer 'idx' against key 'key' - """ - peers = self.nodes[idx]._findnode(key) - print "Findnode on node %s (%s) returned:" % (self.nodes[idx].name, key) - - peers = [self.getPeerIdx(p) for p in peers] - - i = 0 - for p in peers: - print "%3d" % p, - i += 1 - if i % 16 == 0: - print - - -def dumpids(self): - print "Nodes listed by name and id" - for i in range(len(self.nodes)): - node = self.nodes[i] - print "%s: %s (%s...)" % (i, node.name, node.id.asHex()[:10]) - -def dumplong(self): - """ - Outputs a list of nodes and their connections - """ - for node in self.nodes: - print "%s: id=%s dest=%s" % (node.name, node.id.asHex()[:8], node.dest[:8]) - for peer in node.peers: - npeer = self.getPeer(peer) - print " %s: id=%s dest=%s" % (npeer.name, npeer.id.asHex()[:8], npeer.dest[:8]) - -def getPeer(self, peer): - for n in self.nodes: - if n.dest == peer.dest: - return n - return None - - -def logexc(verbosity, msg, nPrev=0, clsname=None): - - fd = StringIO("%s\n" % msg) - traceback.print_exc(file=fd) - log(verbosity, fd.getvalue(), nPrev, clsname) - - -class KTestSocket(stasher.KBase): - """ - Emulates an I2P Socket for testing - """ - # class-global mapping of b64 dests to sockets - opensocks = {} - totalQueuedItems = 0 - - def __init__(self, sessname, *args, **kw): - - self.log(4, "creating simulated i2p socket %s" % sessname) - - # that'll do for pseudo-random dests - self.dest = stasher.shahash(sessname) + "0" * 256 - - # set up our inbound queue - self.queue = Queue.Queue() - - # register ourself - self.opensocks[self.dest] = self - - def __del__(self): - - # deregister ourself - del self.opensocks[self.dest] - - def sendto(self, data, flags, dest): - - self.opensocks[dest].queue.put((data, self.dest)) - KTestSocket.totalQueuedItems += 1 - - def recvfrom(self, *args): - - KTestSocket.totalQueuedItems -= 1 - return self.queue.get() - - def select(inlist, outlist, errlist, timeout=0): - - log = stasher.log - log(5, "fake select called") - deadline = time.time() + timeout - while (time.time() < deadline) and KTestSocket.totalQueuedItems == 0: - time.sleep(0.1) - if KTestSocket.totalQueuedItems == 0: - return [], [], [] - socksWithData = [] - for sock in inlist: - if not sock.queue.empty(): - socksWithData.append(sock) - log(5, "fake select returning %s" % repr(socksWithData)) - return socksWithData, [], [] - - select = staticmethod(select) - - def setblocking(self, val): - - self.blocking = val - - -class KBase: - """ - A mixin which adds a class-specific logger - """ - def log(self, verbosity, msg): - - log(verbosity, msg, 1, self.__class__.__name__) - - def logexc(self, verbosity, msg): - - logexc(verbosity, msg, 1, self.__class__.__name__) - - -def debug(): - """ - Alternative testing entry point which runs the test() function - in background, and the engine in foreground, allowing the engine - to be stepped through with a debugger - """ - global desperatelyDebugging - desperatelyDebugging = True - core.run() - - - -def on_reply(self, peer, msgId, **details): - """ - Callback for FIND_NODE reply - """ - # shorthand - peerRecs = self.peerRecs - - # who replied? - try: - peerRec = peerRecs[peer] - except: - traceback.print_exc() - self.log(3, "discarding findNode reply from unknown peer %s %s, discarding" % ( - peer, details)) - return - - if logVerbosity >= 3: - try: - dests = "\n".join([d[:6] for d in details['nodes']]) - except: - logexc(4, "*** find-node rpc reply = %s" % details) - - self.log(3, "got findNode reply from %s:\n%s" % (peer, details)) - self.unbindPeerReply(peer, msgId) - - # save ref to this peer, it's seemingly good - self.localNode.addref(peerRec.peer) - - # mark it as having replied - peerRec.state = 'replied' - - # one less query to wait for - self.numQueriesPending -= 1 - - # save these as 'fromReply' peers - peersReturned = details.get('nodes', []) - peersReturned = [self.localNode._normalisePeer(p) for p in peersReturned] - peerRecsReturned = peerRecs.newtable(peersReturned, 'fromReply') - peerRecsReturned.sort() - peerRecsReturned.purge(lambda p:p in peerRecs or p.peer.dest == self.localNode.dest) - - # update our node's KBucket - for peerObj in peersReturned: - dist = self.localNode.id.distance(peerObj.id) - self.localNode.buckets[dist].justSeenPeer(peerObj) - - self.log(5, "peerRecsReturned = %s" % repr(peerRecsReturned)) - - if len(peerRecsReturned) > 0: - peerRecs.extend(peerRecsReturned, 'fromReply') - - if desperatelyDebugging: - print "TRACING???" - set_trace() - - # are there any peers we're still waiting to hear from? - if self.numQueriesPending == 0 and len(peerRecs.select(('idle', 'fromQuery'))) == 0: - - # query round is finished - see how good the results are - repliedPeers = peerRecs.select('replied') - self.log(3, "====== query round finished, got %s" % repr(repliedPeers)) - - # if this round returned any peers better than the ones we've already - # heard from, then launch another round of queries - candidates = peerRecs.select('candidate') - ncandidates = len(candidates) - - # test all returned peers, and see if one or more is nearer than - # our candidates - closerPeers = [] - gotNearer = 0 - for rec in peerRecs.select('fromReply'): - if ncandidates == 0 or rec.isCloserThanOneOf(candidates): - self.log(3, "Got a closer peer (or no candiates yet)") - gotNearer = 1 - - if gotNearer: - # mark replied peers as candidates - for rec in peerRecs.select('replied'): - rec.state = 'candidate' - pass - else: - # no - all queries are exhausted - it's the end of this round - self.log(3, "Query round returned no closer peers") - self.returnTheBestWeGot() - - self.sendSomeQueries() - - -def count(self, *args): - """ - returns the number of records whose state is one of args - """ - count = 0 - for rec in self.peers: - if rec.state in args: - count += 1 - return count - - -def changeState(self, oldstate, newstate): - """ - for all recs of state 'oldstate', change their - state to 'newstate' - """ - for p in self.peers: - if p.state == oldstate: - p.state = newstate - -def checkEndOfRound(self): - """ - Checks if we've hit the end of a query round. - If so, and if either: - - we've got some closer peers, OR - - we've heard from less than maxBucketSize peers, - fire off more queries - - Otherwise, return the best available - """ - peerTab = self.peerTab - - if core.fg: - set_trace() - - # has this query round ended? - if peerTab.count('start', 'queried') > 0: - # not yet - return - - self.log(2, "********** query round ended") - - # ------------------------------------ - # end of round processing - - self.numRounds += 1 - - # did we get any closer to required hash? - if self.type == 'findData' \ - or self.gotAnyCloser() \ - or peerTab.count('closest') < maxBucketSize: - - # yes - save these query results - self.log(4, "starting another round") - peerTab.changeState('replied', 'closest') - peerTab.changeState('recommended', 'start') - - # cull the shortlist - self.log(2, "culling to k peers") - if peerTab.count('closest') > maxBucketSize: - peerTab.sort() - excess = peerTab.select('closest')[maxBucketSize:] - excess.changeState('closest', 'toofar') - pass - - # and start up another round - self.sendSomeQueries() - - # did anything launch? - if peerTab.count('start', 'queried') == 0: - # no - we're screwed - self.returnTheBestWeGot() - - # done for now - return - - -def gotAnyCloser(self): - """ - Tests if any peer records in state 'recommended' or 'replied' - are nearer than the records in state 'closest' - """ - peerTab = self.peerTab - - # get current closest peers - closest = peerTab.select('closest') - - # if none yet, then this was just end of first round - if len(closest) == 0: - return True - - # get the peers we're considering - #candidates = peerTab.select(('recommended', 'replied')) - candidates = peerTab.select('recommended') - - # now test them - gotOneCloser = False - for c in candidates: - #if c.isCloserThanOneOf(closest): - if c.isCloserThanAllOf(closest): - return True - - # none were closer - return False - - -def doput(): - - n[0].put('roses', 'red') - - -def dump(self): - - c = self.count - self.log(2, - "PeerQueryTable stats:\n" - "start: %s\n" - "recommended: %s\n" - "queried: %s\n" - "replied: %s\n" - "timeout: %s\n" - "closest: %s\n" - "toofar: %s\n" - "TOTAL: %s\n" % ( - c('start'), - c('recommended'), - c('queried'), - c('replied'), - c('timeout'), - c('closest'), - c('toofar'), - len(self.peers))) - - #states = [p.state for p in self.peers] - #self.log(3, "PeerQueryTable states:\n%s" % states) - - -class KTestMap(stasher.KBase): - """ - Creates a random set of interconnections between nodes - in a test network - """ - path = "testnet.topology-%s" - - def __init__(self, numnodes): - - path = self.path % numnodes - if os.path.isfile(path): - self.load(numnodes) - return - - self.log(2, "Creating new test topology of %s nodes" % numnodes) - - self.refs = {} # key is nodenum, val is list of ref nodenums - self.numnodes = numnodes - i = 0 - for i in range(1, numnodes): - - # get a random number not equal to i - while 1: - ref = random.randrange(0, i) - if ref != i: - break - - # add it - self.refs[i] = ref - - # now point node 0 somewhere - self.refs[0] = random.randrange(1, i) - - # and save it - self.save() - - def __getitem__(self, idx): - """ - Returns the ref num for node index idx - """ - return self.refs[idx] - - def dump(self): - nodes = self.refs.keys() - nodes.sort() - for n in nodes: - print ("%2s -> %2s" % (n, self.refs[n])), - if (n + 1) % 8 == 0: - print - else: - print "|", - - def load(self, numnodes): - path = self.path % numnodes - encoded = file(path, "rb").read() - decoded = pickle.loads(encoded) - self.numnodes, self.refs = decoded - self.log(2, "Restored existing topology of %s nodes" % numnodes) - - def save(self): - path = self.path % self.numnodes - encoded = pickle.dumps((self.numnodes, self.refs)) - file(path, "wb").write(encoded) - - -def connect(self, topology=None): - """ - Connect these nodes together - """ - if topology: - self.map = topology - else: - self.map = KTestMap(len(self.nodes)) - - nodeIdxs = self.map.refs.keys() - nodeIdxs.sort() - - for idx in nodeIdxs: - #print "Node %s, adding ref to node %s" % (idx, self.map[idx]) - self[idx].addref(self[self.map[idx]]) - - -def purge(self): - - os.system("rm -rf ~/.i2pkademlia") - - -if __name__ == '__main__': - - main() - - -def __del__(self): - - pass - #KTestNetwork.aNetworkExists = False - - -def cycle(self): - - self.fg = True - self.threadRxPackets() - - -def findpath(self, i0, i1): - """ - Tries to find a path from idx0 to idx1, printing out - the nodes along the way - """ - def _findpath(self, idx0, idx1, tried): - #print "seeking path from %s to %s" % (idx0, idx1) - n0 = self.nodes[idx0] - n1 = self.nodes[idx1] - - n0peers = [self.getPeer(p) for p in n0.peers] - - n0peerIdxs = [self.nodes.index(p) for p in n0peers] - - possibles = [] - for idx in n0peerIdxs: - if idx == idx1: - # success - return [idx1] - if idx not in tried: - possibles.append(idx) - - if possibles == []: - return None - - #print " possibles = %s" % repr(possibles) - - for idx in possibles: - tried.append(idx) - res = _findpath(self, idx, idx1, tried) - if isinstance(res, list): - res.insert(0, idx) - return res - - return None - - res = _findpath(self, i0, i1, [i0]) - - if isinstance(res, list): - res.insert(0, i0) - - return res - -def testconnectivity(self): - """ - Ensures that every peer can reach every other peer - """ - nNodes = len(self.nodes) - - for i in range(nNodes): - for j in range(nNodes): - if i != j and not self.findpath(i, j): - print "No route from node %s to node %s" % (i, j) - return False - print "Every node can reach every other node" - return True - - -def getPeerIdx(self, peer): - - for i in range(len(self.nodes)): - n = self.nodes[i] - if n.dest == peer.dest: - return i - return None - - -def rawdistance(self, other): - """ - calculates the 'distance' between this hash and another hash, - and returns it raw as this xor other - """ - return self.value ^ other.value - - -def reportStats(self): - """ - Logs a stat dump of query outcome - """ - if self.isLocalOnly: - return - self.log(2, - "query terminated after %s rounds, %s queries, %s replies, %s recommendations" % ( - (self.numRounds+1), - self.numQueriesSent, - (self.numReplies+1), - self.numPeersRecommended - ) - ) - -@first #! /usr/bin/env python - -""" -Implements a simulated kademlia test network -""" - -@others - - - -@others - -if __name__ == '__main__': - - print "starting test" - pass - test() - - -import sys, os, Queue, pickle, time -from pdb import set_trace - -import stasher - - - -class KCore(stasher.KCore): - """ - Override kademlia core to use simulated I2P sockets - """ - def select(self, inlist, outlist, errlist, timeout): - - #print "dummy select" - return KTestSocket.select(inlist, outlist, errlist, timeout) - - -class KNode(stasher.KNode): - """ - Override kademlia node class to use simulated test socket - """ - SocketFactory = KTestSocket - - -# number of nodes to build in test network -numTestNodes = 100 - -stasher.logToSocket = 19199 - - -SocketFactory = None # defaults to I2P socket - - -def select(self, inlist, outlist, errlist, timeout): - - return i2p.select.select(inlist, outlist, errlist, timeout) - - -def main(): - """ - Command line interface - """ - global samAddr, clientAddr, logVerbosity, dataDir - - argv = sys.argv - argc = len(argv) - - try: - opts, args = getopt.getopt(sys.argv[1:], - "h?vV:S:C:sd:fl", - ['help', 'version', 'samaddr=', 'clientaddr=', - 'verbosity=', 'status', 'datadir=', 'foreground', - 'shortversion', 'localonly', - ]) - except: - traceback.print_exc(file=sys.stdout) - usage("You entered an invalid option") - - daemonise = True - verbosity = 2 - debug = False - foreground = False - localOnly = False - - for opt, val in opts: - - if opt in ['-h', '-?', '--help']: - usage(True) - - elif opt in ['-v', '--version']: - print "Stasher version %s" % version - sys.exit(0) - - elif opt in ['-V', '--verbosity']: - logVerbosity = int(val) - - elif opt in ['-f', '--foreground']: - foreground = True - - elif opt in ['-S', '--samaddr']: - samAddr = val - - elif opt in ['-C', '--clientaddr']: - clientAddr = val - - elif opt in ['-s', '--status']: - dumpStatus() - - elif opt in ['-d', '--datadir']: - dataDir = val - - elif opt == '--shortversion': - sys.stdout.write("%s" % version) - sys.stdout.flush() - sys.exit(0) - - elif opt in ['-l', '--localonly']: - localOnly = True - - #print "Debug - bailing" - #print repr(opts) - #print repr(args) - #sys.exit(0) - - # Barf if no command given - if len(args) == 0: - err("No command given") - usage(0, 1) - - cmd = args.pop(0) - argc = len(args) - - #print "cmd=%s, args=%s" % (repr(cmd), repr(args)) - - if cmd not in ['help', '_start', 'start', 'stop', - 'hello', 'get', 'put', 'addref', 'getref', - 'pingall']: - err("Illegal command '%s'" % cmd) - usage(0, 1) - - if cmd == 'help': - usage() - - # dirty hack - if foreground and cmd == 'start': - cmd = '_start' - - # magic undocumented command name - starts node, launches its client server, - # this should only happen if we're spawned from a 'start' command - if cmd == '_start': - if argc not in [0, 1]: - err("start: bad argument count") - usage() - if argc == 0: - nodeName = defaultNodename - else: - nodeName = args[0] - - # create and serve a node - #set_trace() - node = KNode(nodeName) - node.start() - log(3, "Node %s launched, dest = %s" % (node.name, node.dest)) - node.serve() - sys.exit(0) - - if cmd == 'start': - if argc not in [0, 1]: - err("start: bad argument count") - usage() - if argc == 0: - nodeName = defaultNodename - else: - nodeName = args[0] - pidFile = nodePidfile(nodeName) - - if os.path.exists(pidFile): - err(("Stasher node '%s' seems to be already running. If you are\n" % nodeName) - +"absolutely sure it's not running, please remove its pidfile:\n" - +pidFile+"\n") - sys.exit(1) - - # spawn off a node - import stasher - pid = spawnproc(sys.argv[0], "-S", samAddr, "-C", clientAddr, "_start", nodeName) - file(pidFile, "wb").write("%s" % pid) - print "Launched stasher node as pid %s" % pid - print "Pidfile is %s" % pidFile - sys.exit(0) - - if cmd == 'stop': - if argc not in [0, 1]: - err("stop: bad argument count") - usage() - if argc == 0: - nodeName = defaultNodename - else: - nodename = args[0] - - pidFile = nodePidfile(nodeName) - - if not os.path.isfile(pidFile): - err("Stasher node '%s' is not running - cannot kill\n" % nodeName) - sys.exit(1) - - pid = int(file(pidFile, "rb").read()) - try: - killproc(pid) - print "Killed stasher node (pid %s)" % pid - except: - print "Failed to kill node (pid %s)" % pid - os.unlink(pidFile) - sys.exit(0) - - try: - client = KNodeClient() - except: - traceback.print_exc() - err("Node doesn't seem to be up, or reachable on %s" % clientAddr) - return - - - if cmd == 'hello': - err("Node seems fine") - sys.exit(0) - - elif cmd == 'get': - if argc not in [1, 2]: - err("get: bad argument count") - usage() - - key = args[0] - - if argc == 2: - # try to open output file - path = args[1] - try: - outfile = file(path, "wb") - except: - err("Cannot open output file %s" % repr(path)) - usage(0, 1) - else: - outfile = sys.stdout - - if logVerbosity >= 3: - sys.stderr.write("Searching for key - may take up to %s seconds or more\n" % ( - timeout['findData'])) - res = client.get(key, local=localOnly) - if res == None: - err("Failed to retrieve '%s'" % key) - sys.exit(1) - else: - outfile.write(res) - outfile.flush() - outfile.close() - sys.exit(0) - - elif cmd == 'put': - if argc not in [1, 2]: - err("put: bad argument count") - usage() - - key = args[0] - - if argc == 2: - # try to open input file - path = args[1] - try: - infile = file(path, "rb") - except: - err("Cannot open input file %s" % repr(path)) - usage(0, 1) - else: - infile = sys.stdin - - val = infile.read() - if len(val) > maxValueSize: - err("File is too big - please trim to %s" % maxValueSize) - - if logVerbosity >= 3: - sys.stderr.write("Inserting key - may take up to %s seconds\n" % ( - timeout['findNode'] + timeout['store'])) - res = client.put(key, val, local=localOnly) - if res == None: - err("Failed to insert '%s'" % key) - sys.exit(1) - else: - sys.exit(0) - - elif cmd == 'addref': - if argc not in [0, 1]: - err("addref: bad argument count") - usage() - - if argc == 1: - # try to open input file - path = args[0] - try: - infile = file(path, "rb") - except: - err("Cannot open input file %s" % repr(path)) - usage(0, 1) - else: - infile = sys.stdin - - ref = infile.read() - - res = client.addref(ref) - if res == None: - err("Failed to add ref") - sys.exit(1) - else: - sys.exit(0) - - elif cmd == 'getref': - if argc not in [0, 1]: - err("getref: bad argument count") - usage() - - res = client.getref() - - if argc == 1: - # try to open output file - path = args[0] - try: - outfile = file(path, "wb") - except: - err("Cannot open output file %s" % repr(path)) - usage(0, 1) - else: - outfile = sys.stdout - - if res == None: - err("Failed to retrieve node ref") - sys.exit(1) - else: - outfile.write(res) - outfile.flush() - outfile.close() - sys.exit(0) - - elif cmd == 'pingall': - if logVerbosity > 2: - print "Pinging all peers, waiting %s seconds for results" % timeout['ping'] - res = client.pingall() - print res - sys.exit(0) - - -def test1(): - """ - A torturous test - """ - -def usage(detailed=False, ret=0): - - print "Usage: %s <options> [<command> [<ars>...]]" % sys.argv[0] - if not detailed: - print "Type %s -h for help" % sys.argv[0] - sys.exit(ret) - - print "This is stasher, distributed file storage network that runs" - print "atop the anonymising I2P network (http://www.i2p.net)" - print "Written by aum - August 2004" - print - print "Options:" - print " -h, --help - display this help" - print " -v, --version - print program version" - print " -V, --verbosity=n - verbosity, default 1, 1=quiet ... 4=noisy" - print " -S, --samaddr=host:port - host:port of I2P SAM port, " - print " default %s" % i2p.socket.samaddr - print " -C, --clientaddr=host:port - host:port for socket interface to listen on" - print " for clients, default %s" % clientAddr - print " -d, --datadir=dir - directory in which stasher files get written" - print " default is ~/.stasher" - print " -f, --foreground - only valid for 'start' cmd - runs the node" - print " in foreground without spawning - for debugging" - print " -l, --localonly - only valid for get/put - restricts the get/put" - print " operation to the local node only" - print - print "Commands:" - print " start [<nodename>]" - print " - launches a single node, which forks off and runs in background" - print " nodename is a short unique nodename, default is '%s'" % defaultNodename - print " stop [<nodename>]" - print " - terminates running node <nodename>" - print " get <keyname> [<file>]" - print " - attempts to retrieve key <keyname> from the network, saving" - print " to file <file> if given, or to stdout if not" - print " put <keyname> [<file>]" - print " - inserts key <keyname> into the network, taking its content" - print " from file <file> if given, otherwise reads content from stdin" - print " addref <file>" - print " - adds a new noderef to the node, taking the base64 noderef" - print " from file <file> if given, or from stdin" - print " (if you don't have any refs, visit http://stasher.i2p, or use" - print " the dest in the file aum.stasher in cvs)" - print " getref <file>" - print " - uplifts the running node's dest as base64, writing it to file" - print " <file> if given, or to stdout" - print " hello" - print " - checks that local node is running" - print " pingall" - print " - diagnostic tool - pings all peers, waits for replies or timeouts," - print " reports results" - print " help" - print " - display this help" - print - - sys.exit(0) - - - -class KNodeServer(KBase, SocketServer.ThreadingMixIn, SocketServer.TCPServer): - """ - Listens for incoming socket connections - """ - @others - -class KNodeReqHandler(KBase, SocketServer.StreamRequestHandler): - """ - Manages a single client connection - """ - @others - -def handle(self): - """ - Conducts all conversation for a single req - """ - req = self.request - client = self.client_address - server = self.server - node = self.server.node - - read = self.rfile.read - readline = self.rfile.readline - write = self.wfile.write - flush = self.wfile.flush - - finish = self.finish - - # start with a greeting - write("Stasher version %s ready\n" % version) - - # get the command - line = readline().strip() - - try: - cmd, args = re.split("\\s+", line, 1) - except: - cmd = line - args = '' - - self.log(3, "cmd=%s args=%s" % (repr(cmd), repr(args))) - - if cmd in ["get", "getlocal"]: - isLocal = cmd == "getlocal" - value = node.get(args, local=isLocal) - if value == None: - write("notfound\n") - else: - write("ok\n%s\n%s" % (len(value), value)) - flush() - time.sleep(2) - finish() - return - - elif cmd in ["put", "putlocal"]: - isLocal = cmd == "putlocal" - try: - size = int(readline()) - value = read(size) - res = node.put(args, value, local=isLocal) - if res: - write("ok\n") - else: - write("failed\n") - flush() - except: - traceback.print_exc() - write("exception\n") - finish() - return - - elif cmd == 'addref': - try: - res = node.addref(args, True) - if res: - write("ok\n") - else: - write("failed\n") - flush() - except: - traceback.print_exc() - write("exception\n") - finish() - return - - elif cmd == 'getref': - res = node.dest - write("ok\n") - write("%s\n" % res) - flush() - time.sleep(1) - finish() - return - - elif cmd == 'pingall': - res = node._pingall() - write(res+"\n") - finish() - return - - elif cmd == "die": - server.isRunning = False - write("server terminated\n") - finish() - - else: - write("unrecognisedcommand\n") - finish() - return - - -def __init__(self, node, addr=None): - - if addr == None: - addr = clientAddr - - self.isRunning = True - - self.node = node - - listenHost, listenPort = addr.split(":") - listenPort = int(listenPort) - self.listenPort = listenPort - SocketServer.TCPServer.__init__(self, (listenHost, listenPort), KNodeReqHandler) - - - -def serve(self): - """ - makes this node listen on socket for incoming client - connections, and services these connections - """ - server = KNodeServer(self) - server.serve_forever() - - -def serve_forever(self): - - print "awaiting client connections on port %s" % self.listenPort - while self.isRunning: - self.handle_request() - - -class KNodeClient(KBase): - """ - Talks to a KNodeServer over a socket - - Subclass this to implement Stasher clients in Python - """ - @others - - -def __init__(self, address=clientAddr): - - if type(address) in [type(()), type([])]: - self.host, self.port = clientAddr - else: - self.host, self.port = clientAddr.split(":") - self.port = int(self.port) - - self.hello() - - -def connect(self): - - self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) - self.sock.connect((self.host, self.port)) - - self.rfile = self.sock.makefile("rb") - self.read = self.rfile.read - self.readline = self.rfile.readline - self.wfile = self.sock.makefile("wb") - self.write = self.wfile.write - self.flush = self.wfile.flush - - # read greeting - greeting = self.readline() - parts = re.split("\\s+", greeting) - if parts[0] != "Stasher": - self.close() - raise Exception("Not connected to valid stasher interface") - - -def get(self, key, **kw): - """ - sends a get command to stasher socket, and retrieves - and interprets result - - Arguments: - - key - key to retrieve - - Keywords: - - local - default False - if True, only looks in local storage - - Returns key's value if found, or None if key not found - """ - if kw.get('local', False): - cmd = 'getlocal' - else: - cmd = 'get' - - self.connect() - - self.write("%s %s\n" % (cmd, key)) - self.flush() - - #print "waiting for resp line" - res = self.readline().strip() - - if res == "ok": - size = int(self.readline()) - val = self.read(size) - self.close() - return val - else: - self.close() - return None - - -def close(self): - - self.rfile.close() - #self.wfile.close() - self.sock.close() - - -def hello(self): - - self.connect() - self.close() - -def put(self, key, val, **kw): - """ - Tells remote stasher port to insert a file into the network - - Arguments: - - key - key to insert under - - val - value to insert under this key - - Keywords: - - local - default False - if True, only looks in local storage - - """ - if kw.get('local', False): - cmd = 'putlocal' - else: - cmd = 'put' - - self.connect() - self.write("%s %s\n" % (cmd, key)) - self.write("%s\n" % len(val)) - self.write(val) - self.flush() - - res = self.readline().strip() - - self.close() - - if res == "ok": - return True - else: - print repr(res) - return False - - -def __getitem__(self, item): - - return self.get(item) - - -def __setitem__(self, item, val): - - if not self.put(item, val): - raise Exception("Failed to insert") - - -def kill(self): - """ - Tells remote server to fall on its sword - """ - try: - while 1: - self.connect() - self.write("die\n") - self.flush() - self.close() - except: - pass - - - -def finish(self): - - SocketServer.StreamRequestHandler.finish(self) - - -def err(msg): - sys.stderr.write(msg+"\n") - -def addref(self, ref): - """ - Passes a new noderef to node - """ - self.connect() - self.write("addref %s\n" % ref) - self.flush() - - res = self.readline().strip() - - self.close() - - if res == "ok": - return True - else: - print repr(res) - return False - - -def userI2PDir(nodeName=None): - """ - Returns a directory under user's home dir into which - stasher files can be written - - If nodename is given, a subdirectory will be found/created - - Return value is toplevel storage dir if nodename not given, - otherwise absolute path including node dir - """ - if dataDir != None: - if not os.path.isdir(dataDir): - os.makedirs(dataDir) - return dataDir - - if sys.platform == 'win32': - home = os.getenv("APPDATA") - if home: - topDir = os.path.join(home, "stasher") - else: - topDir = os.path.join(os.getcwd(), "stasher") - else: - #return os.path.dirname(__file__) - topDir = os.path.join(os.path.expanduser('~'), ".stasher") - - if not os.path.isdir(topDir): - os.makedirs(topDir) - if nodeName == None: - return topDir - else: - nodeDir = os.path.join(topDir, nodeName) - if not os.path.isdir(nodeDir): - os.makedirs(nodeDir) - return nodeDir - - -def nodePidfile(nodename): - return os.path.join(userI2PDir(nodename), "node.pid") - - -def spawnproc(*args, **kw): - """ - Spawns a process and returns its PID - - VOMIT! - - I have to do a pile of odious for the win32 side - - Returns a usable PID - - Keywords: - - priority - priority at which to spawn - default 20 (highest) - """ - # get priority, convert to a unix 'nice' value - priority = 20 - kw.get('priority', 20) - - if sys.platform != 'win32': - # *nix - easy - #print "spawnproc: launching %s" % repr(args) - - # insert nice invocation - args = ['/usr/bin/nice', '-n', str(priority)] + list(args) - return os.spawnv(os.P_NOWAIT, args[0], args) - - else: - # just close your eyes here and pretend this abomination isn't happening! :(( - args = list(args) - args.insert(0, sys.executable) - cmd = " ".join(args) - #print "spawnproc: launching %s" % repr(cmd) - - if 0: - try: - c = _winreg.ConnectRegistry(None, _winreg.HKEY_LOCAL_MACHINE) - c1 = _winreg.OpenKey(c, "SOFTWARE") - c2 = _winreg.OpenKey(c1, "Microsoft") - c3 = _winreg.OpenKey(c2, "Windows NT") - c4 = _winreg.OpenKey(c3, "CurrentVersion") - supportsBelowNormalPriority = 1 - except: - supportsBelowNormalPriority = 0 - else: - if sys.getwindowsversion()[3] != 2: - supportsBelowNormalPriority = 0 - else: - supportsBelowNormalPriority = 1 - - # frig the priority into a windows value - if supportsBelowNormalPriority: - if priority < 7: - pri = win32process.IDLE_PRIORITY_CLASS - elif priority < 14: - pri = 0x4000 - else: - pri = win32process.NORMAL_PRIORITY_CLASS - else: - if priority < 11: - pri = win32process.IDLE_PRIORITY_CLASS - else: - pri = win32process.NORMAL_PRIORITY_CLASS - - print "spawnproc: launching %s" % repr(args) - si = win32process.STARTUPINFO() - hdl = win32process.CreateProcess( - sys.executable, # lpApplicationName - cmd, # lpCommandLine - None, # lpProcessAttributes - None, # lpThreadAttributes - 0, # bInheritHandles - 0, # dwCreationFlags - None, # lpEnvironment - None, # lpCurrentDirectory - si, # lpStartupInfo - ) - pid = hdl[2] - #print "spawnproc: pid=%s" % pid - return pid - -def killproc(pid): - if sys.platform == 'win32': - print repr(pid) - handle = win32api.OpenProcess(1, 0, pid) - print "pid %s -> %s" % (pid, repr(handle)) - #return (0 != win32api.TerminateProcess(handle, 0)) - win32process.TerminateProcess(handle, 0) - else: - return os.kill(pid, signal.SIGKILL) - -def i2psocket(self, *args, **kw): - return i2p.socket.socket(*args, **kw) - - -def getref(self): - """ - Uplifts node's own ref - """ - self.connect() - self.write("getref\n") - self.flush() - - res = self.readline().strip() - - if res == "ok": - ref = self.readline().strip() - self.close() - return ref - else: - self.close() - return "failed" - - -@first #! /usr/bin/env python -@others - -@first #! /bin/sh - -export I2PPYDIR=/main/i2p/cvs/i2p/apps/stasher/python -export WEBDIR=/main/i2p/services/stasher.i2p -export CVSDIR=/main/i2p/cvs/i2p/apps/stasher/python - -cp README.txt $I2PPYDIR -cp stasher.py $I2PPYDIR/src -cp bencode.py $I2PPYDIR/src -cp code.leo $I2PPYDIR/src -cp *.stasher $I2PPYDIR/noderefs -cp *.stasher $WEBDIR/noderefs - -cp stasher $I2PPYDIR/scripts -cp stasher-launch.py $I2PPYDIR/scripts/stasher.py - -cp setup-stasher.py $I2PPYDIR/setup.py - -# generate API dco -epydoc -n "Stasher Python API" -o api stasher.py - -# make a release tarball - -rm -rf release/* -export STVERSION=`./stasher.py --shortversion` -export TARDIR=release/stasher-$STVERSION -export DIRNAME=stasher-$STVERSION -export TARBALLNAME=stasher.tar.gz -mkdir $TARDIR - -cp -a /main/i2p/cvs/i2p/apps/sam/python/i2p $TARDIR -cp -a code.leo stasher stasher.py bencode.py api $TARDIR -cp README-tarball.txt $TARDIR/README.txt - -mkdir $TARDIR/noderefs -cp *.stasher $TARDIR/noderefs - -cd release - -tar cfz $TARBALLNAME $DIRNAME - -# copy tarball and doco to websites -cp $TARBALLNAME $WEBDIR -cd .. -cp -a api $WEBDIR - -# last but not least, commit to cvs -cp stasher.py $CVSDIR/src -cp *.stasher $CVSDIR/noderefs -cd $CVSDIR -cvs commit - -@others - -@first #! /usr/bin/env python -# wrapper script to run stasher node - -# set this to the directory where you've installed stasher -stasherDir = "/path/to/my/stasher/dir" - -import sys -sys.path.append(stasherDir) -import stasher -stasher.main() - -@first #! /bin/sh -rm -rf /tmp/node1 -stasher -V4 -Slocalhost:7656 -Clocalhost:7659 -d/tmp/node1 _start node1 - - -@first #! /bin/sh -rm -rf /tmp/node2 -stasher -V4 -Slocalhost:17656 -Clocalhost:17659 -d/tmp/node2 _start node2 - - -@first #! /usr/bin/env python -""" -This is the installation script for Stasher, a distributed -file storage framework for I2P. -""" - -import sys, os -from distutils.core import setup - -oldcwd = os.getcwd() -os.chdir("src") - -if sys.platform == 'win32': - stasherScript = "..\\scripts\\stasher.py" -else: - stasherScript = "../scripts/stasher" - - -try: - import i2p - import i2p.socket - import i2p.select -except: - print "Sorry, but you don't seem to have the core I2P" - print "python library modules installed." - print "If you're installing from cvs, please go to" - print "i2p/apps/sam/python, become root, and type:" - print " python setup.py install" - print "Then, retry this installation." - sys.exit(1) - -setup(name="Stasher", - version="0.0", - description="Kademlia-based P2P distributed file storage app for I2P", - author="aum", - author_email="aum_i2p@hotmail.com", - url="http://stasher.i2p", - py_modules = ['stasher', 'bencode'], - scripts = [stasherScript], - ) - - - -@nocolor -STASHER README - -@others - -@nocolor -STASHER README - -@others - ------------------------ -INSTALLING STASHER - -Prerequisite: - -Before you can install/run Stasher, you will first need to have installed -the I2P Python modules - available in cvs at i2p/apps/sam/python. - -To install stasher, just make sure you've got the latest cvs, then type - python setup.py install -as root. - -This installs the stasher engine, plus a wrapper client script called -'stasher', which setup.py will install into your execution path. - -If you don't like the thought of becoming root, you could just put stasher.py -on your execution path, and/or create a symlink called 'stasher'. - -Test your installation by typing 'stasher -h' - this should display -a help message. - - ------------------------- -DOZE USERS PLEASE NOTE - -You'll need to watch and see where the stasher.py -wrapper script gets installed. On my box, it ends up on d:\python23\scripts, -but on your box it'll likely go into c:\python23\scripts. - -You may either update your system PATH environment variable to include your -python scripts directory, OR, you can copy stasher.py to anywhere that's -on your path. - -In the explanations below, note that wherever I say to type 'stasher', you'll -need to type 'stasher.py' instead. - - ------------------------- -WARNING - -This is a very early pre-alpha test version of stasher. -It is only capable of storing or retrieving files of -less than 29k in size. - -Also, files are totally insecure - anyone can overwrite any keys you -insert, and vice versa. - -I'll be adding support for CHK-like and SSK-like keys in due course. - - ------------------------- -USING STASHER - -To see stasher's options, type: - - stasher -h - -This should dump out a verbose help message. - -To start a stasher node, type: - - stasher start - -To shut down a stasher node, type: - - stasher stop - -To insert a file into stasher, type: - - stasher put mykey myfile - -Note, if you don't supply a filename, stasher can read -the file from standard input. - -To retrieve a file from stasher, type: - - stasher get mykey - - -INSTALLING STASHER FROM THE TARBALL - -For regular users: - - 1. Crack this tarball, and copy the 'stasher-n.n.n' directory - somewhere safe, such as your home directory - - 2. Edit the small 'stasher' script therein, and set the variable - 'stasherDir' as directed - - 3. Either put this directory onto your PATH, or create a symlink - called 'stasher' within any of your PATH dirs, pointing to - the stasher script - - 3. Test your installation by typing: - stasher -v - -For windows users: - - 1. Make sure you have python2.3 or later installed - - 2. Untar this directory, and copy the directory into - C:\Program Files, or wherever you like to put your appz - - 3. Wherever you put the directory, add that to your system-wide - PATH environment variable - - 4. Test your installation by opening up an ugly black DOS window, - and typing: - stasher.py -v - - 5. Note - in the USAGE directions below, instead of typing 'stasher', - you'll need to type 'stasher.py' - - - -class KRpcPingAll(KRpc): - """ - Pings all peers - """ - @others - -type = 'pingall' - -def __init__(self, localNode, client=None, **kw): - """ - Creates and launches a PINGALL rpc - - Arguments: - - localNode - the node performing this RPC - - client - see KRpc.__init__ - - Keywords: none - """ - if kw.has_key('cbArgs'): - KRpc.__init__(self, localNode, client, cbArgs=kw['cbArgs']) - else: - KRpc.__init__(self, localNode, client) - - -def start(self): - """ - Kicks off this RPC - """ - # launch a findNode rpc against each of our peers - peers = self.localNode.peers - self.numSent = self.numPending = len(peers) - self.numReplied = self.numFailed = 0 - for peer in peers: - KRpcPing(self.localNode, self.on_reply, peer=peer) - return - - -def returnValue(self, result): - """ - an override with a nicer call sig - """ - # a hack for testing - save this RPC object into the node - # so we can introspect it - self.localNode.lastrpc = self - - try: - KRpc.returnValue(self, result, status=result) - except: - traceback.print_exc() - self.log(3, "Failed to return %s" % repr(result)) - KRpc.returnValue(self, 0, status=0) - - -def on_reply(self, result): - """ - callback which fires when we get a reply from a STORE we sent to a - peer - """ - log(3, "got %s" % repr(result)) - - if result: - self.numReplied += 1 - else: - self.numFailed += 1 - self.numPending -= 1 - - if self.numPending <= 0: - res = "pinged:%s replied:%s timeout:%s" % ( - self.numSent, self.numReplied, self.numFailed) - self.log(3, res) - self.returnValue(res) - - -def on_tick(self): - - self.log(3, "this shouldn't have happened") - self.returnValue(False) - - -def _pingall(self, callback=None): - """ - Sends a ping to all peers, returns text string on replies/failures - """ - if callback: - KRpcPingAll(self, callback, **kw) - else: - return KRpcPingAll(self).execute() - - - -def pingall(self): - """ - Uplifts node's own ref - """ - self.connect() - self.write("pingall\n") - self.flush() - - res = self.readline().strip() - - self.close() - - return res - - - -def returnValue(self, items): - """ - override with a nicer call sig - """ - # a hack for testing - save this RPC object into the node - # so we can introspect it - self.localNode.lastrpc = self - - # another debugging hack - self.reportStats() - - KRpc.returnValue(self, items, result=items) - - -def storeSplit(self): - """ - Gets called if we're splitting a big file into smaller chunks - - Here, we: - - break the file up into chunks - - build a manifest - - launch store RPCs to store each chunk, where the key is SHA(chunk) - - launch a store RPC to store the 'manifest' (noting that if the manifest - is too big, it'll get recursively inserted as a splitfile as well - """ - # break up into chunks - chunks = [] - hashes = [] - size = len(self.value) - i = 0 - self.nchunks = 0 - while i < size: - chunks.append(self.value[i:i+maxValueSize]) - hashes.append(shahash(chunks[-1])) - i += maxValueSize - self.nchunks += 1 - - # build the manifest - manifest = "chunks:%s\n%s\n" % (self.nchunks, "\n".join(hashes)) - - # set progress attributes - self.chunkManifestInserted = False - self.chunksInserted = 0 - - # launch nested Store RPCs for manifest, and each chunk - KRpcStore(self.localNode, self.on_doneChunkManifest, - local=self.isLocalOnly, - key=self.key, - value=manifest) - i = 0 - while i < self.nchunks: - KRpcStore(self.localNode, self.on_doneChunk, - local=self.isLocalOnly, - key=hashes[i], - value=chunks[i]) - i += 1 - - # now sit back and wait for the callbacks - -def on_doneChunkManifest(self, result): - """ - Callback which fires when a manifest insert succeeds/fails - """ - # the chunk callback handles all - self.on_doneChunk(result, isManifest=True) - -def on_doneChunk(self, result, isManifest=False): - """ - Callback which fires when a single chunk insert succeeds/fails - """ - # a failure either way means the whole RPC has failed - if not result: - # one huge fuck-up - self.returnValue(False) - return - - # update our tally - if isManifest: - self.chunkManifestInserted = True - else: - self.chunksInserted += 1 - - # finished? - if self.chunkManifestInserted and (self.chunksInserted == self.nchunks): - # yep = success - self.returnValue(True) - - -def on_gotValue(self, value, hash=None): - """ - Callback which fires when we get the value stored under a key - - Value is either the real value, or a splitfile manifest - If a real value, just return it. - If a splitfile manifest, launch nested findValue RPCs to get each chunk - """ - nchunks = 0 - try: - firstline, rest = value.split("\n", 1) - firstline = firstline.strip() - kwd, str_nchunks = firstline.split(":") - if kwd != 'chunks': - raise hell - nchunks = int(nchunks) - value = rest - except: - pass # in this case, hell hath no fury at all - - if nchunks == 0: - self.returnValue(value) - return - - # now we get to the hard bit - we have to set up nested findData RPCs to - # get all the chunks and reassemble them - hashes = rest.strip().split("\n") - - # do sanity checks - hashesAllValid = [len(h) == 40 for h in hashes] - if len(hashes) != nchunks: - self.log( - 2, - "Splitfile retrieval failure\nmanifest contains %s hashes, should have been %s" % ( - len(hashes), nchunks)) - self.returnValue(None) - if False in hashesAllValid: - self.log(2, "Splitfile retrieval failure - one or more invalid hashes") - - # now this is a bit weird - we need to bind each chunk to its hash, so we create a - # class which produces callables which fire our on_gotChunk callback - class ChunkNotifier: - def __init__(me, h, cb): - me.h = h - me.cb = cb - def __call__(me, val): - me.cb(me.h, val) - - # now launch the chunk retrieval RPCs - # result is that for each retrieved chunk, our on_gotChunk callback will - # be invoked with the arguments (hash, value), so we can tick them off - self.numChunks = nchunks - self.numChunksReceived = 0 - self.chunkHashes = hashes - self.chunks = dict.fromkeys(hashes) - for h in hashes: - KRpcFindData(self.localNode, h, ChunkNotifier(h, self.on_gotChunk)) - - # now, we can sit back and receive the chunks - - -def on_gotChunk(self, hexhash, value): - """ - Callback which fires when a nested chunk findNode returns - """ - if value == None: - self.log(2, "Chunk retrieval failed, fatal to this findData") - self.returnValue(None) - return - - # got a value - vet it against hash - if shahash(value) != hexhash: - self.log(2, "Got a chunk, but it doesn't hash right - fatal to this findData") - self.returnValue(None) - return - - # it's valid - stash it - self.chunks[hexhash] = value - self.numChunksReceived += 1 - - # have we finished yet? - if self.numChunksReceived <= self.numChunks: - # no - self.log(4, "Received chunk %s of %s" % (self.numChunksReceived, self.numChunks)) - return - - # maybe we have - self.log(4, "We appear to have all chunks, checking further") - - # sanity check - if None in self.chunks.values(): - self.log(2, "Fatal - reached chunk count, but chunks still missing") - self.returnValue(None) - return - - # finally done - got all chunks, hashes are valid, reassemble in order - allChunks = [self.chunks[h] for h in self.chunkHashes] - reassembled = "".join(allChunks) - self.log(4, "Reassembled all %s chunks, SUCCESS" % self.numChunks) - self.returnValue(reassembled) - - - - diff --git a/apps/stasher/python/src/stasher.py b/apps/stasher/python/src/stasher.py deleted file mode 100644 index d007282a2..000000000 --- a/apps/stasher/python/src/stasher.py +++ /dev/null @@ -1,4416 +0,0 @@ -#! /usr/bin/env python -#@+leo-ver=4 -#@+node:@file stasher.py -#@@first -""" -A simple implementation of the -U{Kademlia} -P2P distributed storage and retrieval protocol, designed to -utilise the U{I2P} stealth network as its transport. - -Most application developers will only need to know about the L{KNode} class -""" - -# I strongly recommend that when editing this file, you use the Leo -# outlining and literate programming editor - http://leo.sf.net -# If Leo doesn't agree with your religion, please try to leave the markups intact - -#@+others -#@+node:explanatory comments -#@+at -# Tech overview: -# - this implementation creates each Node ID as an SHA1 hash of -# the node's 'destination' - the string which constitutes its -# address as an I2P endpoint. -# -# Datagram formats: -# - each datagram sent from one node to another is a python dict object, -# encoded and decoded with the 'bencode' object serialisation module. -# - we use bencode because regular Python pickle is highly insecure, -# allowing crackers to create malformed pickles which can have all -# manner of detrimental effects, including execution of arbitrary code. -# - the possible messages are listed below, along with their consituent -# dictionary keys: -# 1. ping: -# - msgId - a message identifier guaranteed to be unique -# with respect to the sending node -# 2. findNode: -# - msgId - unique message identifier -# - hash - the hash we're looking for -# - initiator - True/False, according to whether this node -# should initiate/perform the findNode, or whether this -# rpc is coming from another seeking node -# 3. findData: -# - msgId - unique message identifier -# - hash - the exact key hash of the data we want to retrieve -# - initiator - True/False, according to whether this node -# should initiate/perform the findNode, or whether this -# rpc is coming from another seeking node -# 4. store: -# - msgId - unique message identifier -# - hash - the exact key hash of the data we want to store -# - data - the data we want to store -# 5. reply: -# - msgId - the original msgId we're replying to -# The other items in a reply message depend on what kind -# of message we're replying to, listed below: -# 1. ping - no additional data -# 2. findNode: -# - nodes - a list of dests nearest the given hash -# 3. findData: -# - nodes - as for findNode, OR -# - data - the retrieved data, or None if not found -# 4. store: -# - status - True or False according to whether -# the store operation was successful -#@-at -#@-node:explanatory comments -#@+node:imports -import sys, os, types, sha, random, threading, thread, traceback, Queue -import time, math, random, pickle, getopt, re -import signal - -# some windows-specifics (yggghh) -if sys.platform == 'win32': - try: - import win32api - import win32process - import _winreg - except: - print "Python win32 extensions not installed." - print "Please go to http://sourceforge.net/project/showfiles.php?group_id=78018" - print "and download/install the file pywin32-202.win32-py%s.%s.exe" % \ - sys.version_info[:2] - sys.exit(1) - -from StringIO import StringIO -from pdb import set_trace - -try: - import bencode -except: - print "The bencode module is missing from your python installation." - print "Are you sure you installed Stasher correctly?" - sys.exit(1) - -try: - import i2p.socket - import i2p.select - import i2p.pylib - SocketServer = i2p.pylib.SocketServer - socket = i2p.pylib.socket -except: - print "You don't appear to have the I2P Python modules installed." - print "Not good. Stasher totally needs them." - print "Please to to i2p/apps/sam/python in your I2P cvs tree, and" - print "install the core I2P python modules first" - sys.exit(1) - -#@-node:imports -#@+node:constants - -# -------------------------------------------- -# START USER-CONFIGURABLE CONSTANTS -# -------------------------------------------- - -# host:port to connect to I2P SAM Bridge -samAddr = i2p.socket.samaddr - -# host:port to listen on for command line client -clientAddr = "127.0.0.1:7659" - -defaultNodename = "0" # will be prefixed by 'stashernode' - -# maximum size of each stored item -maxValueSize = 30000 - -# maximum number of noderefs that can be stored in a bucket -# (refer spec section 2.1, first paragraph) -maxBucketSize = 20 - -# number of peers to return from a search -numSearchPeers = 3 - -# maximum number of concurrent queries per findnode/finddata rpc -maxConcurrentQueries = 10 - -# number of peers to store onto -numStorePeers = 10 - -# Logger settings -logFile = None -logVerbosity = 2 - -# data directory location - set to a path to override the default -# which is the user's home dir -dataDir = None - -# whether a node, on startup, should do a findnode on itself to -# locate its closest neighbours -greetPeersOnStartup = False -#greetPeersOnStartup = True - -# multi-purpose testing flag -testing = False -#testing = True - -tunnelDepth = 0 - -# set to True to enable single handler thread that manages all nodes, -# or False to make each node run its own handler thread -#runCore = False -runCore = True - -# timeouts - calibrate as needed -timeout = { - 'ping' : 120, - 'findNode' : 120, - 'findData' : 120, - 'store' : 120, - } - -logToSocket = None - -desperatelyDebugging = False - -if desperatelyDebugging: - runCoreInBackground = False -else: - runCoreInBackground = True - -# -------------------------------------------- -# END OF USER-CONFIGURABLE CONSTANTS -# -------------------------------------------- - -# ---------------------------------------------- -# hack anything below this line at your own risk - -#@-node:constants -#@+node:globals -# keep a dict of existing nodes, so we can prevent -# client progs from creating 2 nodes of the same name -_nodes = {} - -version = "0.0.1" - -#@-node:globals -#@+node:Exceptions -# define our exceptions - -class KValueTooLarge(Exception): - """ - Trying to insert a value of excessive size into the network. - Maximum key size is L{maxValueSize} - """ - -class KBadHash(Exception): - """ - Invalid hash string - """ - -class KNotImplemented(Exception): - """ - A required method was not implemented - """ - -class KBadNode(Exception): - """ - Invalid Node object - """ - -class KBadPeer(Exception): - """ - Invalid Peer object - should be a KPeer - """ - -class KBadDest(Exception): - """Invalid I2P Node Dest""" - -#@-node:Exceptions -#@+node:Mixins -#@+node:class KBase -class KBase: - """ - A mixin which adds a class-specific logger - """ - def log(self, verbosity, msg): - - log(verbosity, msg, 1, self.__class__.__name__) - - def logexc(self, verbosity, msg): - - logexc(verbosity, msg, 1, self.__class__.__name__) - -#@-node:class KBase -#@-node:Mixins -#@+node:Main Engine -#@+node:class KCore -class KCore(KBase): - """ - Singleton class which performs all the needed background processing. - - By scheduling all processing through this object, we eliminate the - need to create threads on a per-node basis, and also make this thing - far easier to debug. - - The core launches only two background threads: - - L{threadRxPackets} - listen for incoming packets bound for - any node running within a single process - - L{threadHousekeeping} - periodically invoke maintenance methods - of each node, so the node can check for timeout conditions and - other untoward happenings - - These threads start up when the first node in this process is created, - and stop when the last node ceases to exist. - - Upon first import, the L{stasher} module creates one instance of this - class. Upon creation, L{KNode} objects register themselves with this core. - """ - #@ @+others - #@+node:attributes - #@-node:attributes - #@+node:__init__ - def __init__(self, bg=True): - """ - Creates the I2P Kademlia core object - """ - self.bg = bg - self.fg = False - - # subscribed nodes - self.nodes = [] - #self.nodesLock = threading.Lock() - - self.isRunning = False - self.isRunning_rx = False - - #@-node:__init__ - #@+node:subscribe - def subscribe(self, node): - """ - Called by a node to 'subscribe' for background processing - If this is the first node, starts the handler thread - """ - #self.nodesLock.acquire() - try: - nodes = self.nodes - - if node in nodes: - self.log(2, "duhhh! node already subscribed" % repr(node)) - return - - nodes.append(node) - - if not self.isRunning: - self.isRunning = True - if self.bg and not self.fg: - self.log(3, "First node subscribing, launching threads") - thread.start_new_thread(self.threadRxPackets, ()) - thread.start_new_thread(self.threadHousekeeping, ()) - except: - traceback.print_exc() - self.log(2, "exception") - - #self.nodesLock.release() - - #@-node:subscribe - #@+node:unsubscribe - def unsubscribe(self, node): - """ - Unsubscribes a node from the core - - If this was the last node, stops the handler thread - """ - #self.nodesLock.acquire() - try: - nodes = self.nodes - - if node not in nodes: - self.log(4, "duhhh! node %s was not subscribed" % repr(node)) - return - - self.log(2, "trying to unsubscribe node %s" % node.name) - nodes.remove(node) - - if len(nodes) == 0: - self.isRunning = False - except: - traceback.print_exc() - self.log(2, "exception") - - #self.nodesLock.release() - - #@-node:unsubscribe - #@+node:threadRxPackets - def threadRxPackets(self): - """ - Sits on a select() loop, processing incoming datagrams - and actioning them appropriately. - """ - self.isRunning_rx = True - self.log(3, "KCore packet receiver thread running") - try: - while self.isRunning: - socks = [node.sock for node in self.nodes] - if desperatelyDebugging: - set_trace() - try: - inlist, outlist, errlist = self.select(socks, [], [], 1) - except KeyboardInterrupt: - self.isRunning = 0 - return - - self.log(5, "\ninlist=%s" % repr(inlist)) - if inlist: - self.log(5, "got one or more sockets with inbound data") - #self.nodesLock.acquire() - for sock in inlist: - node = self.nodeWhichOwnsSock(sock) - if node != None: - node._doRx() - #self.nodesLock.release() - - elif self.fg: - return - - else: - time.sleep(0.1) - except: - #self.nodesLock.release() - traceback.print_exc() - self.log(1, "core handler thread crashed") - self.isRunning_rx = False - self.log(3, "core handler thread terminated") - - #@-node:threadRxPackets - #@+node:threadHousekeeping - def threadHousekeeping(self): - """ - Periodically invoke nodes' housekeeping - """ - self.log(3, "\nnode housekeeping thread running") - try: - while self.isRunning: - #self.log(4, "calling nodes' housekeeping methods") - #self.nodesLock.acquire() - for node in self.nodes: - node._doHousekeeping() - #self.nodesLock.release() - time.sleep(1) - self.log(3, "\nnode housekeeping thread terminated") - except: - #self.nodesLock.release() - traceback.print_exc() - self.log(1, "\nnode housekeeping thread crashed") - - #@-node:threadHousekeeping - #@+node:nodeWhichOwnsSock - def nodeWhichOwnsSock(self, sock): - """ - returns ref to node which owns a socket - """ - for node in self.nodes: - if node.sock == sock: - return node - return None - #@-node:nodeWhichOwnsSock - #@+node:cycle - def cycle(self): - - self.fg = True - self.threadRxPackets() - - #@-node:cycle - #@+node:run - def run(self, func=None): - """ - Runs the core in foreground, with the client func in background - """ - if func==None: - func = test - - self.bg = False - - thread.start_new_thread(self.runClient, (func,)) - - set_trace() - - self.threadRxPackets() - - #@-node:run - #@+node:stop - def stop(self): - self.isRunning = False - - #@-node:stop - #@+node:runClient - def runClient(self, func): - - self.log(3, "Core: running client func") - try: - func() - except: - traceback.print_exc() - self.log(3, "Core: client func exited") - self.stop() - #@-node:runClient - #@+node:select - def select(self, inlist, outlist, errlist, timeout): - - return i2p.select.select(inlist, outlist, errlist, timeout) - - #@-node:select - #@-others - -#@-node:class KCore -#@+node:create instance -# create an instance of _KCore -core = KCore() - -#@-node:create instance -#@-node:Main Engine -#@+node:Basic Classes -#@+node:Node-local Storage -#@+node:class KStorageBase -class KStorageBase(KBase): - """ - Base class for node storage objects - - This needs to be overridden by implementation-specific - solutions. - """ - #@ @+others - #@+node:__init__ - def __init__(self, node, *args, **kw): - """ - Override this method - - First argument should be a node instance - """ - raise KNotImplemented - - #@-node:__init__ - #@+node:putRefs - def putRefs(self, *refs): - """ - Saves one or more noderefs - - Arguments: - - zero or more KPeer objects, or lists or tuples of objects - """ - raise KNotImplemented - #@-node:putRefs - #@+node:getRefs - def getRefs(self): - """ - Returns a list of KPeer objects, comprising refs - of peers known to this node - """ - raise KNotImplemented - - #@-node:getRefs - #@+node:putKey - def putKey(self, key, value): - """ - Stores value, a string, into the local storage - under key 'key' - """ - raise KNotImplemented - - #@-node:putKey - #@+node:getKey - def getKey(self, key): - """ - Attempts to retrieve item from node's local, which was - stored with key 'key'. - - Returns value as a string if found, or None if not present - """ - raise KNotImplemented - #@-node:getKey - #@+node:private methods - #@+others - #@+node:_expandRefsList - def _expandRefsList(self, args, lst=None): - """ - Takes a sequence of args, each of which can be a KPeer - object, or a list or tuple of KPeer objects, and expands - this into a flat list - """ - if lst == None: - lst = [] - for item in args: - if type(item) in [type(()), type([])]: - self._expandRefsList(item, lst) - else: - lst.append(item) - return lst - - #@-node:_expandRefsList - #@-others - #@-node:private methods - #@-others -#@-node:class KStorageBase -#@+node:class KStorageFile -class KStorageFile(KStorageBase): - """ - Implements node-local storage, using the local filesystem, - with the following hierarchy: - - - HOME ( ~ in linux, some other shit for windows) - - .i2pkademlia - - - - noderefs - - - - contains node dest, and other shit - - ... - - keys - - - - contains raw key value - - ... - - This is one ugly sukka, perhaps a db4, mysql etc implementation - would be better. - """ - #@ @+others - #@+node:__init__ - def __init__(self, node, storeDir=None): - """ - Creates a persistent storage object for node - 'nodeName', based at directory 'storeDir' (default - is nodeDir - """ - self.node = node - self.nodeName = node.name - - if storeDir == None: - # work out local directory - self.topDir = userI2PDir() - - # add node dir and subdirs - self.nodeDir = userI2PDir(self.nodeName) - - self.refsDir = os.path.join(self.nodeDir, "noderefs") - if not os.path.isdir(self.refsDir): - os.makedirs(self.refsDir) - - self.keysDir = os.path.join(self.nodeDir, "keys") - if not os.path.isdir(self.keysDir): - os.makedirs(self.keysDir) - - #@-node:__init__ - #@+node:putRefs - def putRefs(self, *args): - """ - Saves one or more noderefs into filesystem - - Arguments: - - zero or more KPeer objects, or lists or tuples of objects - """ - lst = self._expandRefsList(args) - for item in lst: - b64hash = shahash(item.dest) - itemPath = os.path.join(self.refsDir, b64hash) - itemDict = {'dest':item.dest} # might need to expand later - itemPickle = bencode.bencode(itemDict) - file(itemPath, "wb").write(itemPickle) - pass - #@-node:putRefs - #@+node:getRefs - def getRefs(self): - """ - Returns a list of KPeer objects, comprising refs - of peers known to this node - - These are read from the directory self.refsDir. - Any that can't be unpickled and instantiated are dropped, but logged - """ - peers = [] - for f in os.listdir(self.refsDir): - - path = os.path.join(self.refsDir, f) - pickled = file(path, "rb").read() - try: - d = bencode.bdecode(pickled) - except: - self.log(3, "node %s, bad pickle ref file %s" % ( - self.nodeName, f)) - continue - - # instantiate a peer object - try: - peer = KPeer(self.node, d['dest']) - except: - self.log(3, "node %s, bad unpickled ref file %s" % ( - self.nodeName, f)) - continue - - # success - peers.append(peer) - - return peers - - #@-node:getRefs - #@+node:putKey - def putKey(self, key, val, keyIsHashed=False): - """ - Stores a string into this storage under the key 'key' - - Returns True if key was saved successfully, False if not - """ - try: - if keyIsHashed: - keyHashed = key - else: - keyHashed = shahash(key) - keyHashed = keyHashed.lower() - keyPath = os.path.join(self.keysDir, keyHashed) - file(keyPath, "wb").write(val) - self.log(4, "stored key: '%s'\nunder hash '%s'\n(keyIsHashed=%s)" % ( - key, keyHashed, keyIsHashed)) - return True - except: - traceback.print_exc() - self.log(3, "failed to store key") - return False - - #@-node:putKey - #@+node:getKey - def getKey(self, key, keyIsHashed=False): - """ - Attempts to retrieve item from node's local file storage, which was - stored with key 'key'. - - Returns value as a string if found, or None if not present - """ - try: - if keyIsHashed: - keyHashed = key - else: - keyHashed = shahash(key) - - keyHashed = keyHashed.lower() - self.log(4, "key=%s, keyHashed=%s, keyIsHashed=%s" % (key, keyHashed, keyIsHashed)) - - keyPath = os.path.join(self.keysDir, keyHashed) - - if os.path.isfile(keyPath): - return file(keyPath, "rb").read() - else: - return None - except: - traceback.print_exc() - self.log(3, "error retrieving key '%s'" % key) - return None - - #@-node:getKey - #@-others -#@-node:class KStorageFile -#@-node:Node-local Storage -#@+node:class KHash -class KHash(KBase): - """ - Wraps 160-bit hashes as abstract objects, on which - operations such as xor, <, >, etc can be performed. - - Kademlia node ids and keys are held as objects - of this class. - - Internally, hashes are stored as python long ints - """ - #@ @+others - #@+node:__init__ - def __init__(self, val=None, **kw): - """ - Create a new hash object. - - val can be one of the following: - - None (default) - a random value will be created - - long int - this will be used as the raw hash - - string - the string will be hashed and stored - - another KHash object - its value will be taken - - a KNode or KPeer object - its hash will be taken - - If val is not given, a raw hash value can be passed in - with the keyword 'raw'. Such value must be a python long int - or a 20-char string - """ - self.value = 0L - if val: - if isinstance(val, KHash): - self.value = val.value - elif type(val) in [type(0), type(0L)]: - self.value = long(val) - elif isinstance(val, KNode) or isinstance(val, KPeer): - self.value = val.id.value - else: - raw = self.raw = shahash(val, bin=1) - for c in raw: - self.value = self.value * 256 + ord(c) - else: - rawval = kw.get('raw', None) - if rawval == None: - # generate random - random.seed() - for i in range(20): - self.value = self.value * 256 + random.randint(0, 256) - elif type(rawval) in [type(0), type(0L)]: - self.value = long(rawval) - elif type(rawval) == type(""): - if len(rawval) == 20: - for i in rawval: - self.value = self.value * 256 + ord(i) - elif len(rawval) == 40: - try: - self.value = long(rawval, 16) - except: - raise KBadHash(rawval) - else: - raise KBadHash(rawval) - else: - print "rawval=%s %s %s" % (type(rawval), rawval.__class__, repr(rawval)) - raise KBadHash(rawval) - - #@-node:__init__ - #@+node:__str__ - def __str__(self): - return "" % self.value - - def __repr__(self): - return str(self) - - #@-node:__str__ - #@+node:asHex - def asHex(self): - return ("%040x" % self.value).lower() - - #@-node:asHex - #@+node:distance - def distance(self, other): - """ - calculates the 'distance' between this hash and another hash, - and returns it as i (where distance = 2^i, and 0 <= i < 160) - """ - - #log(4, "comparing: %s\nwith %s" % (self.value, other.value)) - - rawdistance = self.value ^ other.value - if not rawdistance: - return 0 - - return int(math.log(rawdistance, 2)) - - #@-node:distance - #@+node:rawdistance - def rawdistance(self, other): - """ - calculates the 'distance' between this hash and another hash, - and returns it raw as this xor other - """ - return self.value ^ other.value - - #@-node:rawdistance - #@+node:operators - def __eq__(self, other): - #log(2, "KHash: comparing %s to %s" % (self, other)) - res = self.value == getattr(other, 'value', None) - #self.log(2, "KHash: res = %s" % repr(res)) - return res - - def __ne__(self, other): - return not (self == other) - - def __lt__(self, other): - return self.value < other.value - - def __gt__(self, other): - return self.value > other.value - - def __le__(self, other): - return self.value <= other.value - - def __ge__(self, other): - return self.value >= other.value - - def __ne__(self, other): - return self.value != other.value - - def __xor__(self, other): - return self.value ^ other.value - - #@-node:operators - #@-others -#@-node:class KHash -#@+node:class KBucket -class KBucket(KBase): - """ - Implements the 'k-bucket' object as required in Kademlia spec - """ - #@ @+others - #@+node:__init__ - def __init__(self): - """ - Creates a single k-bucket - """ - # list of known nodes - # order is least recently seen at head, most recently seen at tail - self.nodes = [] - - # list of death-row records - # refer spec section 2.1, paragraph 2 - # we deviate a little: - # when we hear from a new peer, and the bucket is full, - # we temporarily displace the old peer, and stick the new - # peer at end of list, then send out a ping - # If we hear from the old peer within a reasonable time, - # the new peer gets evicted and replaced with the old peer - # - # this list holds 2-tuples (oldpeer, newpeer), where - # oldpeer is the least-recently-seen peer that we displaced, and - # newpeer is the new peer we just heard from. - self.deathrow = [] - - #@-node:__init__ - #@+node:justSeenPeer - def justSeenPeer(self, peer): - """ - Tells the bucket that we've just seen a given node - """ - nodes = self.nodes - - if not isinstance(peer, KPeer): - raise KBadNode - - try: - idx = nodes.index(peer) - except: - idx = -1 - if idx >= 0: - del nodes[idx] - nodes.append(peer) - else: - nodes.append(peer) - - # might at some time need to implement death-row logic - # when we set a bucket size limit - refer __init__ - #@-node:justSeenPeer - #@+node:__iter__ - def __iter__(self): - return iter(self.nodes) - #@-node:__iter__ - #@-others -#@-node:class KBucket -#@+node:class KPeer -class KPeer(KBase): - """ - Encapsulates a peer node of a L{KNode}, - storing its ID and contact info - """ - #@ @+others - #@+node:__init__ - def __init__(self, node, dest): - """ - Create a ref to a kademlia peer node - - Arguments: - - node - reference to node which has the relationship - to this peer - - dest - the peer's I2P destination, as base64 - """ - if not isinstance(node, KNode): - raise KBadNode(node) - if not isinstance(dest, str): - raise KBadDest(dest) - - self.node = node - self.dest = dest - self.id = KHash(dest) - - self.justSeen() - - #@-node:__init__ - #@+node:send_ping - def send_ping(self, **kw): - """ - Sends a ping to remote peer - """ - self.send_raw(type="ping", **kw) - #@-node:send_ping - #@+node:send_store - def send_store(self, **kw): - """ - sends a store command to peer - """ - self.log(4, "\npeer %s\ndest %s...\nsending store cmd: %s" % (self, self.dest[:12], repr(kw))) - - self.send_raw(type="store", **kw) - - #@-node:send_store - #@+node:send_findNode - def send_findNode(self, hash, **kw): - """ - sends a findNode command to peer - """ - if not isinstance(hash, KHash): - raise KBadHash - - self.log(5, "\nquerying peer %s\ntarget hash %s" % (self, hash)) - - self.send_raw(type="findNode", hash=hash.value, **kw) - - #@-node:send_findNode - #@+node:send_findData - def send_findData(self, hash, **kw): - """ - sends a findData command to peer - """ - if not isinstance(hash, KHash): - raise KBadHash - - self.log(5, "\nquerying peer %s\ntarget hash %s" % (self, hash)) - - self.send_raw(type="findData", hash=hash.value, **kw) - - #@-node:send_findData - #@+node:send_reply - def send_reply(self, **kw): - """ - Sends an RPC reply back to upstream peer - """ - self.log(5, "\nnode %s\nreplying to peer %s:\n%s" % ( - self.node, self, kw)) - self.send_raw(type="reply", **kw) - - #@-node:send_reply - #@+node:send_raw - def send_raw(self, **kw): - """ - Sends a raw datagram to peer - - No arguments - just keywords, all of which must be strings or - other objects which can be bencoded - """ - self.node._sendRaw(self, **kw) - #@-node:send_raw - #@+node:justSeen - def justSeen(self): - self.timeLastSeen = time.time() - - #@-node:justSeen - #@+node:lowlevel - #@+others - #@+node:__str__ - def __str__(self): - - return "0x%s... dest %s...>" % ( - self.node.name, ("%x" % self.id.value)[:8], self.dest[:8]) - - #@-node:__str__ - #@+node:__repr__ - def __repr__(self): - - return str(self) - - #@-node:__repr__ - #@+node:__eq__ - def __eq__(self, other): - - #self.log(2, "KPeer: comparing %s to %s (%s to %s)" % (self, other, self.__class__, other.__class__)) - res = self.id == getattr(other, 'id', None) - #self.log(2, "KPeer: res=%s" % res) - return res - - #@-node:__eq__ - #@+node:__ne__ - def __ne__(self, other): - return not (self == other) - #@-node:__ne__ - #@-others - #@-node:lowlevel - #@-others -#@-node:class KPeer -#@-node:Basic Classes -#@+node:RPC Classes -#@+node:class KRpc -class KRpc(KBase): - """ - Base class for RPCs between nodes. - Refer subclasses - """ - #@ @+others - #@+node:attribs - type = 'unknown' # override in subclass - - #@-node:attribs - #@+node:__init__ - def __init__(self, localNode, client=None, **kw): - """ - Holds all the information for an RPC - - Arguments: - - localNode - the node from which this RPC is being driven - - client - a representation of who is initiating this rpc, one of: - - None - an API caller, which is to be blocked until the RPC completes - or times out - - (upstreamPeer, upstreamMsgId) - an upstream peer - - callable object - something which requires a callback upon completion - in which case the callable will be invoked with the RPC results as the - first argument - - Keywords: - - cbArgs - optional - if given, and if client is a callback, the callback - will be invoked with the results as first argument, and this object as - second argument - """ - self.localNode = localNode - - if client == None: - # an api client - self.isLocal = True - self.queue = Queue.Queue() - self.callback = None - elif callable(client): - self.isLocal = False - self.callback = client - elif isinstance(client, tuple): - # we're doing the RPC on behalf of an upstream peer - upstreamPeer, upstreamMsgId = client - upstreamPeer = localNode._normalisePeer(upstreamPeer) - self.isLocal = False - self.upstreamPeer = upstreamPeer - self.upstreamMsgId = upstreamMsgId - self.callback = None - - # save keywords - self.__dict__.update(kw) - - # set time for receiving a tick. - # if this is set to an int absolute time value, the on_tick method - # will be called as soon as possible after that time - self.nextTickTime = None - - # and register with node as a pending command - self.localNode.rpcPending.append(self) - - # now start up the request - self.start() - - #@-node:__init__ - #@+node:__del__ - def __del__(self): - - #self.log(4, "\nRPC %s getting the chop" % (str(self))) - pass - - #@-node:__del__ - #@+node:__str__ - def __str__(self): - - return "<%s on node %s>" % (self.__class__.__name__, self.localNode.name) - - #@-node:__str__ - #@+node:__repr__ - def __repr__(self): - return str(self) - #@-node:__repr__ - #@+node:bindPeerReply - def bindPeerReply(self, peer, msgId): - """ - Sets up the node to give us a callback when a reply - comes in from downstream peer 'peer' with msg id 'msgId' - """ - self.localNode.rpcBindings[(peer.dest, msgId)] = (self, peer) - - #@-node:bindPeerReply - #@+node:unbindPeerReply - def unbindPeerReply(self, peer, msgId): - """ - Disables the callback from node for replies - from peer 'peer' with msgId 'msgId' - """ - bindings = self.localNode.rpcBindings - peerdest = peer.dest - if bindings.has_key((peerdest, msgId)): - del bindings[(peerdest, msgId)] - - #@-node:unbindPeerReply - #@+node:unbindAll - def unbindAll(self): - """ - Remove all reply bindings - """ - bindings = self.localNode.rpcBindings - self.log(5, "node bindings before: %s" % bindings) - for k,v in bindings.items(): - if v[0] == self: - del bindings[k] - self.log(5, "node bindings after: %s" % bindings) - - #@-node:unbindAll - #@+node:start - def start(self): - """ - Start the RPC running. - Override this in subclasses - """ - raise KNotImplemented - - #@-node:start - #@+node:execute - def execute(self): - """ - Only for synchronous (application-level) execution. - Wait for the RPC to complete (or time out) and return - whatever it came up with - """ - if core.fg: - print "servicing background thread" - while self.queue.empty(): - core.cycle() - - return self.queue.get() - - #@-node:execute - #@+node:terminate - def terminate(self): - """ - Clean up after ourselves. - Mainly involves removing ourself from local node - """ - self.unbindAll() - try: - self.localNode.rpcPending.remove(self) - except: - #traceback.print_exc() - pass - - #@-node:terminate - #@+node:returnValue - def returnValue(self, res=None, **kw): - """ - Passes a return value back to the original caller, be it - the local application, or an upstream peer - - Arguments: - - just one - a result object to pass back, if this RPC - was instigated by a local application call. - Note that if this RPC was instigated by an upstream - peer, this will be ignored. - - Keywords: - - the items to return, in the case that this RPC was - instigated by an upstream peer. Ignored if this - RPC was instigated by a local application call. - Note - the RPC invocation/reply dict keys are - listed at the top of this source file. - """ - self.terminate() - if self.callback: - if hasattr(self, 'cbArgs'): - self.callback(res, self.cbArgs) - else: - self.callback(res) - elif self.isLocal: - self.queue.put(res) - else: - self.upstreamPeer.send_reply(msgId=self.upstreamMsgId, - **kw) - #@-node:returnValue - #@+node:on_reply - def on_reply(self, peer, msgId, **details): - """ - Callback which fires when a downstream peer replies - - Override this in subclasses - """ - raise KNotImplemented - - #@-node:on_reply - #@+node:on_tick - def on_tick(self): - """ - Callback which fires if the whole RPC times out, in which - case the RPC should return whatever it can - - Override in subclasses - """ - self.localNode.rpcPending.remove(self) - - #@-node:on_tick - #@-others -#@-node:class KRpc -#@+node:PING -#@+node:class KRpcPing -class KRpcPing(KRpc): - """ - Implements the PING rpc as per Kademlia spec - """ - #@ @+others - #@+node:attribs - type = 'ping' - - #@-node:attribs - #@+node:__init__ - def __init__(self, localNode, client=None, **kw): - """ - Creates and performs a PING RPC - - Arguments: - - localNode - the node performing this RPC - - upstreamPeer - if given, the peer wanting a reply - - upstreamMsgId - if upstreamPeer is given, this is the msgId - of the RPC message from the upstream peer - - Keywords: - - peer - the peer to ping - default is local node - """ - peer = kw.get('peer', None) - if peer != None: - peer = localNode._normalisePeer(peer) - self.peerToPing = peer - - if kw.has_key('cbArgs'): - KRpc.__init__(self, localNode, client, cbArgs=kw['cbArgs']) - else: - KRpc.__init__(self, localNode, client) - - #@-node:__init__ - #@+node:start - def start(self): - """ - Sends out the ping - """ - peer = self.peerToPing - - # are we ourselves being pinged? - if peer == None: - # yes, just reply - self.returnValue(True) - return - - # no - we need to ping a peer - thisNode = self.localNode - - msgId = thisNode.msgId = thisNode._msgIdAlloc() - - # bind for peer response - self.bindPeerReply(peer, msgId) - - # and send it off - self.log(3, "node %s sending ping" % self.localNode.name) - peer.send_ping(msgId=msgId) - - # and set a reply timeout - self.nextTickTime = time.time() + timeout['ping'] - - #@-node:start - #@+node:on_reply - def on_reply(self, peer, msgId, **details): - """ - Callback for PING reply - """ - self.log(3, "got ping reply from %s" % peer) - self.returnValue(True) - - #@-node:on_reply - #@+node:on_tick - def on_tick(self): - """ - 'tick' handler. - - For PING RPC, the only time we should get a tick is when the ping - has timed out - """ - self.log(3, "timeout awaiting ping reply from %s" % self.peerToPing) - self.returnValue(False) - - #@-node:on_tick - #@-others -#@-node:class KRpcPing -#@-node:PING -#@+node:FIND_NODE -#@+node:class KPeerQueryRecord -class KPeerQueryRecord(KBase): - """ - Keeps state information regarding a peer we're quering - """ - #@ @+others - #@+node:__init__ - def __init__(self, peer, table, state=None, **kw): - - self.peer = peer - self.dest = peer.dest - self.deadline = time.time() + timeout['findNode'] - self.table = table - - # state is always one of: - # - 'start' - have not yet sent query to peer - # - 'recommended' - peer was recommended by another peer, no query sent - # - 'queried' - sent query, awaiting reply or timeout - # - 'replied' - this peer has replied to our query - # - 'timeout' - timed out waiting for peer reply - # - 'toofar' - too far away to be of interest - # - 'closest' - this peer is one of the closest so far - - if state == None: - state = 'start' - if not isinstance(state, str): - raise Exception("Invalid state %s" % state) - - self.state = state - - self.__dict__.update(kw) - - #@-node:__init__ - #@+node:hasTimedOut - def hasTimedOut(self, now=None): - if now == None: - now = time.time() - return self.state == 'queried' and now > self.deadline - - #@-node:hasTimedOut - #@+node:__cmp__ - def __cmp__(self, other): - - return cmp(self.peer.id.rawdistance(self.table.sorthash), - other.peer.id.rawdistance(self.table.sorthash)) - - #@-node:__cmp__ - #@+node:__lt__ etc - def __lt__(self, other): - return (cmp(self, other) < 0) - - def __le__(self, other): - return (cmp(self, other) <= 0) - - def __gt__(self, other): - return (cmp(self, other) > 0) - - def __ge__(self, other): - return (cmp(self, other) <= 0) - - #@-node:__lt__ etc - #@+node:isCloserThanAllOf - def isCloserThanAllOf(self, tab): - """ - returns True if this peerRec is closer to the desired hash - than all of the peerRecs in table 'tab' - """ - if not isinstance(tab, KPeerQueryTable): - self.log(2, "invalid qtable %s" % repr(tab)) - raise Exception("invalid qtable %s" % repr(tab)) - - for rec in tab: - if self > rec: - return False - return True - - #@-node:isCloserThanAllOf - #@+node:isCloserThanOneOf - def isCloserThanOneOf(self, tab): - """ - returns True if this peerRec is closer to the desired hash - than one or more of of the peerRecs in table 'tab' - """ - if not isinstance(tab, KPeerQueryTable): - self.log(2, "invalid qtable %s" % repr(tab)) - raise Exception("invalid qtable %s" % repr(tab)) - - for rec in tab: - if self < rec: - return True - return False - - #@-node:isCloserThanOneOf - #@-others -#@-node:class KPeerQueryRecord -#@+node:class KPeerQueryTable -class KPeerQueryTable(KBase): - """ - Holds zero or more instances of KPeerQuery and - presents/sorts table in different forms - """ - #@ @+others - #@+node:__init__ - def __init__(self, lst=None, sorthash=None, state=None, **kw): - self.peers = [] - if lst == None: - lst = [] - else: - self.setlist(lst, state, **kw) - self.sorthash = sorthash - - #@-node:__init__ - #@+node:setlist - def setlist(self, lst, state=None, **kw): - for item in lst: - self.append(item, state, **kw) - - #@-node:setlist - #@+node:getExpired - def getExpired(self): - """ - return a list of peers which have expired - """ - return KPeerQueryTable( - filter(lambda item: item.hasTimedOut(), self.peers), - self.sorthash - ) - - #@-node:getExpired - #@+node:purgeExpired - def purgeExpired(self): - """ - Eliminate peers which have expired - """ - for peer in self.peers: - if peer.hasTimedOut(): - self.peers.remove(peer) - - #@-node:purgeExpired - #@+node:sort - def sort(self): - """ - Sort the table in order of increasing distance from self.sorthash - """ - self.peers.sort() - - #@-node:sort - #@+node:select - def select(self, criterion): - """ - Returns a table of items for which criterion(item) returns True - Otherwise, if 'criterion' is a string, returns the items whose - state == criterion. - Otherwise, if 'criterion' is a list or tuple, return the items - whose state is one of the elements in criterion - """ - if callable(criterion): - func = criterion - elif type(criterion) in [type(()), type([])]: - func = lambda p: p.state in criterion - else: - func = lambda p: p.state == criterion - - recs = [] - for peerRec in self.peers: - if func(peerRec): - recs.append(peerRec) - return self.newtable(recs) - - #@-node:select - #@+node:count - def count(self, *args): - """ - returns the number of records whose state is one of args - """ - count = 0 - for rec in self.peers: - if rec.state in args: - count += 1 - return count - - #@-node:count - #@+node:changeState - def changeState(self, oldstate, newstate): - """ - for all recs of state 'oldstate', change their - state to 'newstate' - """ - for p in self.peers: - if p.state == oldstate: - p.state = newstate - #@-node:changeState - #@+node:filter - def filter(self, func): - """ - Eliminate, in place, all items where func(item) returns False - """ - for peerRec in self.peers: - if not func(peerRec): - self.peers.remove(peerRec) - - #@-node:filter - #@+node:purge - def purge(self, func): - """ - Eliminate, in place, all items where func(item) returns True - """ - if 0 and desperatelyDebugging: - set_trace() - for peerRec in self.peers: - if func(peerRec): - self.peers.remove(peerRec) - - #@-node:purge - #@+node:chooseN - def chooseN(self, n): - """ - Randomly select n peer query records - """ - candidates = self.peers[:] - - self.log(3, "candidates = %s" % repr(candidates)) - - chosen = [] - i = 0 - - if len(candidates) <= n: - chosen = candidates - else: - while i < n: - try: - peer = random.choice(candidates) - except: - self.log(2, "failed to choose one of %s" % repr(candidates)) - raise - chosen.append(peer) - candidates.remove(peer) - i += 1 - - return self.newtable(chosen) - - #@-node:chooseN - #@+node:__str__ - def __str__(self): - return "" % len(self) #.peers) - - def __repr__(self): - return str(self) - - #@-node:__str__ - #@+node:newtable - def newtable(self, items, state=None, **kw): - """ - Returns a new KPeerQueryTable object, based on this - one, but containing 'items' - """ - tab = KPeerQueryTable(items, sorthash=self.sorthash, state=state, **kw) - return tab - - #@-node:newtable - #@+node:dump - def dump(self): - - c = self.count - self.log(2, - "PeerQueryTable stats:\n" - "start: %s\n" - "recommended: %s\n" - "queried: %s\n" - "replied: %s\n" - "timeout: %s\n" - "closest: %s\n" - "toofar: %s\n" - "TOTAL: %s\n" % ( - c('start'), - c('recommended'), - c('queried'), - c('replied'), - c('timeout'), - c('closest'), - c('toofar'), - len(self.peers))) - - #states = [p.state for p in self.peers] - #self.log(3, "PeerQueryTable states:\n%s" % states) - - #@-node:dump - #@+node:list-like methods - #@+node:extend - def extend(self, items, state, **kw): - for item in items: - self.append(item, state, **kw) - - #@-node:extend - #@+node:append - def append(self, item, state=None, **kw): - - if isinstance(item, KPeerQueryRecord): - self.log(5, "adding a KPeerQueryRecord, state=%s" % state) - if state != None: - item.state = state - item.__dict__.update(kw) - peerRec = item - - elif isinstance(item, KPeer): - self.log(5, "adding a KPeer") - peerRec = KPeerQueryRecord(item, self, state, **kw) - - else: - self.log(2, "bad peer %s" % repr(item)) - raise KBadPeer - - if peerRec not in self: - self.log(5, "peerRec=%s list=%s" % (peerRec, self.peers)) - self.peers.append(peerRec) - else: - self.log(5, "trying to append duplicate peer???") - - #@-node:append - #@+node:remove - def remove(self, item): - self.peers.remove(item) - - #@-node:remove - #@+node:__getitem__ - def __getitem__(self, idx): - """ - Allow the table to be indexed by any of: - - KPeerQueryRecord - - integer index - - long string - treated as dest - - short string - treated as peer id hash string - - KHash - finds peer with that id - - KPeer - returns peer with that peer - """ - if type(idx) == type(0): - return self.peers[idx] - elif isinstance(idx, KPeer): - for peer in self.peers: - if peer.peer == idx: - return peer - raise IndexError("Query table has no peer %s" % idx) - elif isinstance(idx, str): - if len(str) > 512: - for peer in self.peers: - if peer.peer.dest == idx: - return peer - raise IndexError("No peer with dest %s" % idx) - else: - for peer in self.peers: - if peer.peer.id.value == idx: - return peer - raise IndexError("No peer with dest hash %s" % idx) - elif isinstance(idx, KHash): - for peer in self.peers: - if peer.peer.id == idx: - return peer - raise IndexError("No peer with id %s" % idx) - else: - raise IndexError("Invalid selector %s" % repr(idx)) - - #@-node:__getitem__ - #@+node:__len__ - def __len__(self): - return len(self.peers) - - #@-node:__len__ - #@+node:__getslice__ - def __getslice__(self, fromidx, toidx): - return KPeerQueryTable(self.peers[fromidx:toidx], self.sorthash) - - #@-node:__getslice__ - #@+node:__iter__ - def __iter__(self): - return iter(self.peers) - - #@-node:__iter__ - #@+node:__add__ - def __add__(self, other): - self.extend(other) - - #@-node:__add__ - #@+node:__contains__ - def __contains__(self, other): - self.log(5, "testing if %s is in %s" % (other, self.peers)) - for peerRec in self.peers: - if peerRec.peer.dest == other.peer.dest: - return True - return False - - #@-node:__contains__ - #@-node:list-like methods - #@-others -#@-node:class KPeerQueryTable -#@+node:class KRpcFindNode -class KRpcFindNode(KRpc): - """ - Implements the FIND_NODE rpc as per Kademlia spec - """ - #@ @+others - #@+node:spec info comments - #@+at - # Verbatim extract from original Kademlia paper follows: - # - # The lookup initiator starts by picking x nodes from its closest - # non-empty k-bucket (or, if that bucket has fewer than x - # entries, it just takes the closest x nodes it knows of). - # - # The initiator then sends parallel, asynchronous - # FIND NODE RPCs to the x nodes it has chosen. - # x is a system-wide concurrency parameter, such as 3. - # - # In the recursive step, the initiator resends the - # FIND NODE to nodes it has learned about from previous RPCs. - # - # [Paraphrased - in the recursive step, the initiator sends a FIND_NODE to - # each of the nodes that were returned as results of these previous - # FIND_NODE RPCs.] - # - # (This recursion can begin before all of the previous RPCs have - # returned). - # - # Of the k nodes the initiator has heard of closest to - # the target, it picks x that it has not yet queried and resends - # the FIND_NODE RPC to them. - # - # Nodes that fail to respond quickly are removed from consideration - # until and unless they do respond. - # - # If a round of FIND_NODEs fails to return a node any closer - # than the closest already seen, the initiator resends - # the FIND NODE to all of the k closest nodes it has - # not already queried. - # - # The lookup terminates when the initiator has queried and gotten - # responses from the k closest nodes it has seen. - #@-at - #@-node:spec info comments - #@+node:attribs - type = 'findNode' - #@-node:attribs - #@+node:__init__ - def __init__(self, localNode, client=None, **kw): - """ - Creates and launches the findNode rpc - - Arguments: - - localNode - the node performing this RPC - - client - see KRpc.__init__ - - Keywords: - - hash - a string, long int or KHash object representing - what we're looking for. treatment depends on type: - - KHash object - used as is - - string - gets wrapped into a KHash object - - long int - wrapped into a KHash object - refer KHash.__init__ - - raw - whether 'hash' is already a hash, default True - - local - True/False - whether to only search local store, - or pass on the query to the network, default True - """ - kw = dict(kw) - if kw.get('raw', False): - h = kw['hash'] - del kw['hash'] - kw['raw'] = h - self.hashWanted = KHash(**kw) - else: - self.hashWanted = KHash(kw['hash'], **kw) - self.isLocalOnly = kw.get('local', True) - - self.numQueriesPending = 0 - - self.numRounds = 0 # count number of rounds - self.numReplies = 0 # number of query replies received - self.numQueriesSent = 0 - self.numPeersRecommended = 0 - - # whichever mode we're called from, we gotta find the k closest peers - self.localNode = localNode - self.peerTab = self.findClosestPeersInitial() - - self.log(4, "KRpcFindNode: isLocalOnly=%s" % self.isLocalOnly) - - if kw.has_key('cbArgs'): - KRpc.__init__(self, localNode, client, cbArgs=kw['cbArgs']) - else: - KRpc.__init__(self, localNode, client) - - #@-node:__init__ - #@+node:start - def start(self): - """ - Kicks off this RPC - """ - # if we're being called by an upstream initiator, just return the peer list - if self.isLocalOnly: - peerDests = [peer.dest for peer in self.peerTab] - self.log(5, "findNode: local only: returning to upstream with %s" % repr(peerDests)) - self.returnValue(peerDests) - return - - # just return nothing if we don't have any peers - if len(self.peerTab) == 0: - self.returnValue([]) - return - - # send off first round of queries - self.sendSomeQueries() - - return - - #@-node:start - #@+node:sendSomeQueries - def sendSomeQueries(self, **kw): - """ - First step of findNode - - Select alpha nodes that we haven't yet queried, and send them queries - """ - # bail if too busy - if self.numQueriesPending >= maxConcurrentQueries: - return - - # shorthand - localNode = self.localNode - hashWanted = self.hashWanted - - # randomly choose some peers - #somePeerRecs = self.peerTab.chooseN(numSearchPeers) - somePeerRecs = self.peerTab.select('start') - - # start our ticker - self.nextTickTime = time.time() + timeout['findNode'] - - numQueriesSent = 0 - - # and send them findNode queries - if len(somePeerRecs) > 0: - for peerRec in somePeerRecs: - self.log(3, "querying %s" % peerRec) - if self.numQueriesPending < maxConcurrentQueries: - self.sendOneQuery(peerRec) - numQueriesSent += 1 - else: - break - self.log(3, "%s queries sent, awaiting reply" % numQueriesSent) - else: - self.log(3, "no peer recs???") - for peerRec in self.peerTab: - self.log(4, "%s state=%s, dest=%s..." % (peerRec, peerRec.state, peerRec.dest[:12])) - - #@-node:sendSomeQueries - #@+node:sendOneQuery - def sendOneQuery(self, peerRec): - """ - Sends off a query to a single peer - """ - if peerRec.state != 'start': - self.log(2, "duh!! peer state %s:\n%s" % (peerRec.state, peerRec)) - return - - msgId = self.localNode._msgIdAlloc() - self.bindPeerReply(peerRec.peer, msgId) - peerRec.msgId = msgId - - if self.type == 'findData': - peerRec.peer.send_findData(hash=self.hashWanted, msgId=msgId) - else: - peerRec.peer.send_findNode(hash=self.hashWanted, msgId=msgId) - - peerRec.state = 'queried' - - self.numQueriesPending += 1 - - self.numQueriesSent += 1 - - #@-node:sendOneQuery - #@+node:findClosestPeersInitial - def findClosestPeersInitial(self): - """ - Searches our k-buckets, and returns a table of k of - peers closest to wanted hash into self.closestPeersInitial - """ - hashobj = self.hashWanted - - lst = [] - buckets = self.localNode.buckets - for bucket in buckets: - for peer in bucket: - lst.append(peer) - - table = KPeerQueryTable(lst, self.hashWanted, 'start') - table.sort() - - return table[:maxBucketSize] - - #@-node:findClosestPeersInitial - #@+node:addPeerIfCloser - def addPeerIfCloser(self, peer): - """ - Maintains the private .peersToQuery array. - If the array is not yet maxed (ie, length < maxBucketSize), - the peer is simply added. - However, if the array is maxed, it finds the least-close peer, - and replaces it with the given peer if closer. - """ - #@-node:addPeerIfCloser - #@+node:isCloserThanQueried - def isCloserThanQueried(self, peer): - """ - Test function which returns True if argument 'peer' - is closer than all the peers in self.peersAlreadyQueried, - or False if not - """ - for p in self.peersAlreadyQueried: - if p.id.rawdistance(self.hashWanted) < peer.id.rawdistance(self.hashWanted): - return False - return True - - #@-node:isCloserThanQueried - #@+node:on_reply - def on_reply(self, peer, msgId, **details): - """ - Callback for FIND_NODE reply - """ - # shorthand - peerTab = self.peerTab - - self.numReplies += 1 - - # ------------------------------------------------------------ - # determine who replied, and get the raw dests sent back - try: - peerRec = peerTab[peer] - except: - traceback.print_exc() - self.log(3, "discarding findNode reply from unknown peer %s %s, discarding" % ( - peer, details)) - return - - # one less query to wait for - self.numQueriesPending -= 1 - - # ---------------------------------------------------------- - # peerRec is the peer that replied - # peers is a list of raw dests - - # save ref to this peer, it's seemingly good - self.localNode.addref(peerRec.peer) - - # mark it as having replied - if peerRec.state != 'queried': - self.log(2, "too weird - got a reply from a peer we didn't query") - peerRec.state = 'replied' - - # wrap the returned peers as KPeer objects - peersReturned = details.get('result', []) - peersReturned = [self.localNode._normalisePeer(p) for p in peersReturned] - - self.numPeersRecommended += len(peersReturned) - - # and add them to table in state 'recommended' - for p in peersReturned: - peerTab.append(p, 'recommended') - - # try to fire off more queries - self.sendSomeQueries() - - # and check for and action possible end of query round - self.checkEndOfRound() - - - #@-node:on_reply - #@+node:on_tick - def on_tick(self): - """ - Callback for FIND_NODE reply timeout - """ - # check for timeouts, and update offending peers - now = time.time() - for peerRec in self.peerTab: - if peerRec.hasTimedOut(now): - peerRec.state = 'timeout' - - # makes room for more queries - self.sendSomeQueries() - - # possible end of round - self.checkEndOfRound() - - # schedule next tick - self.nextTickTime = time.time() + 5 - - #@-node:on_tick - #@+node:checkEndOfRound - def checkEndOfRound(self): - """ - Checks if we've hit the end of a query round. - If so, and if either: - - we've got some closer peers, OR - - we've heard from less than maxBucketSize peers, - fire off more queries - - Otherwise, return the best available - """ - peerTab = self.peerTab - - if core.fg: - set_trace() - - # has this query round ended? - if peerTab.count('start', 'queried') > 0: - # not yet - return - - self.log(2, "********** query round ended") - - # ------------------------------------ - # end of round processing - - self.numRounds += 1 - - # did we get any closer to required hash? - if self.type == 'findData' \ - or self.gotAnyCloser() \ - or peerTab.count('closest') < maxBucketSize: - - # yes - save these query results - self.log(4, "starting another round") - peerTab.changeState('replied', 'closest') - peerTab.changeState('recommended', 'start') - - # cull the shortlist - self.log(2, "culling to k peers") - if peerTab.count('closest') > maxBucketSize: - peerTab.sort() - excess = peerTab.select('closest')[maxBucketSize:] - excess.changeState('closest', 'toofar') - pass - - # and start up another round - self.sendSomeQueries() - - # did anything launch? - if peerTab.count('start', 'queried') == 0: - # no - we're screwed - self.returnTheBestWeGot() - - # done for now - return - - #@-node:checkEndOfRound - #@+node:gotAnyCloser - def gotAnyCloser(self): - """ - Tests if any peer records in state 'recommended' or 'replied' - are nearer than the records in state 'closest' - """ - peerTab = self.peerTab - - # get current closest peers - closest = peerTab.select('closest') - - # if none yet, then this was just end of first round - if len(closest) == 0: - return True - - # get the peers we're considering - #candidates = peerTab.select(('recommended', 'replied')) - candidates = peerTab.select('recommended') - - # now test them - gotOneCloser = False - for c in candidates: - #if c.isCloserThanOneOf(closest): - if c.isCloserThanAllOf(closest): - return True - - # none were closer - return False - - #@-node:gotAnyCloser - #@+node:returnTheBestWeGot - def returnTheBestWeGot(self): - """ - Returns the k closest nodes to the wanted hash that we have - actually heard from - """ - # pick the peers which have replied to us - closest = self.peerTab.select('closest') - - self.peerTab.dump() - - # add ourself to the list - we could easily be one of the best - localNode = self.localNode - selfDest = localNode._normalisePeer(localNode.dest) - closest.append(selfDest, state='closest') - - # sort in order of least distance first - closest.sort() - - # pick the best k of these - #peersHeardFrom = peersHeardFrom[:maxBucketSize] - #peersHeardFrom = peersHeardFrom[:numSearchPeers] - - # extract their dest strings - peers = [p.peer.dest for p in closest] - - # pass these back - self.returnValue(peers) - - # and we're done - return - - #@-node:returnTheBestWeGot - #@+node:returnValue - def returnValue(self, items): - """ - override with a nicer call sig - """ - # a hack for testing - save this RPC object into the node - # so we can introspect it - self.localNode.lastrpc = self - - items = items[:maxBucketSize] - - self.reportStats() - - KRpc.returnValue(self, items, result=items) - - - #@-node:returnValue - #@+node:reportStats - def reportStats(self): - """ - Logs a stat dump of query outcome - """ - if self.isLocalOnly: - return - self.log(2, - "query terminated after %s rounds, %s queries, %s replies, %s recommendations" % ( - (self.numRounds+1), - self.numQueriesSent, - (self.numReplies+1), - self.numPeersRecommended - ) - ) - #@-node:reportStats - #@-others -#@-node:class KRpcFindNode -#@-node:FIND_NODE -#@+node:FIND_DATA -#@+node:class KRpcFindData -class KRpcFindData(KRpcFindNode): - """ - variant of KRpcFindNode which returns key value if found - """ - #@ @+others - #@+node:attribs - type = 'findData' - #@-node:attribs - #@+node:start - def start(self): - """ - Kicks off the RPC. - If requested key is stored locally, simply returns it. - Otherwise, falls back on parent method - """ - # if we posses the data, just return the data - value = self.localNode.storage.getKey(self.hashWanted.asHex(), keyIsHashed=True) - if value != None: - self.log(4, "Found required value in local storage") - self.log(4, "VALUE='%s'" % value) - self.on_gotValue(value, self.hashWanted.asHex()) - return - - # no such luck - pass on to parent - KRpcFindNode.start(self) - - #@-node:start - #@+node:on_reply - def on_reply(self, peer, msgId, **details): - """ - Callback for FIND_NODE reply - """ - res = details.get('result', None) - if isinstance(res, str): - self.on_gotValue(res, self.hashWanted.asHex()) - else: - KRpcFindNode.on_reply(self, peer, msgId, **details) - - #@-node:on_reply - #@+node:on_gotValue - def on_gotValue(self, value, hash=None): - """ - Callback which fires when we get the value stored under a key - - Value is either the real value, or a splitfile manifest - If a real value, just return it. - If a splitfile manifest, launch nested findValue RPCs to get each chunk - """ - nchunks = 0 - try: - firstline, rest = value.split("\n", 1) - firstline = firstline.strip() - kwd, str_nchunks = firstline.split(":") - if kwd != 'chunks': - raise hell - nchunks = int(nchunks) - value = rest - except: - pass # in this case, hell hath no fury at all - - if nchunks == 0: - self.returnValue(value) - return - - # now we get to the hard bit - we have to set up nested findData RPCs to - # get all the chunks and reassemble them - hashes = rest.strip().split("\n") - - # do sanity checks - hashesAllValid = [len(h) == 40 for h in hashes] - if len(hashes) != nchunks: - self.log( - 2, - "Splitfile retrieval failure\nmanifest contains %s hashes, should have been %s" % ( - len(hashes), nchunks)) - self.returnValue(None) - if False in hashesAllValid: - self.log(2, "Splitfile retrieval failure - one or more invalid hashes") - - # now this is a bit weird - we need to bind each chunk to its hash, so we create a - # class which produces callables which fire our on_gotChunk callback - class ChunkNotifier: - def __init__(me, h, cb): - me.h = h - me.cb = cb - def __call__(me, val): - me.cb(me.h, val) - - # now launch the chunk retrieval RPCs - # result is that for each retrieved chunk, our on_gotChunk callback will - # be invoked with the arguments (hash, value), so we can tick them off - self.numChunks = nchunks - self.numChunksReceived = 0 - self.chunkHashes = hashes - self.chunks = dict.fromkeys(hashes) - for h in hashes: - KRpcFindData(self.localNode, h, ChunkNotifier(h, self.on_gotChunk)) - - # now, we can sit back and receive the chunks - - #@-node:on_gotValue - #@+node:on_gotChunk - def on_gotChunk(self, hexhash, value): - """ - Callback which fires when a nested chunk findNode returns - """ - if value == None: - self.log(2, "Chunk retrieval failed, fatal to this findData") - self.returnValue(None) - return - - # got a value - vet it against hash - if shahash(value) != hexhash: - self.log(2, "Got a chunk, but it doesn't hash right - fatal to this findData") - self.returnValue(None) - return - - # it's valid - stash it - self.chunks[hexhash] = value - self.numChunksReceived += 1 - - # have we finished yet? - if self.numChunksReceived <= self.numChunks: - # no - self.log(4, "Received chunk %s of %s" % (self.numChunksReceived, self.numChunks)) - return - - # maybe we have - self.log(4, "We appear to have all chunks, checking further") - - # sanity check - if None in self.chunks.values(): - self.log(2, "Fatal - reached chunk count, but chunks still missing") - self.returnValue(None) - return - - # finally done - got all chunks, hashes are valid, reassemble in order - allChunks = [self.chunks[h] for h in self.chunkHashes] - reassembled = "".join(allChunks) - self.log(4, "Reassembled all %s chunks, SUCCESS" % self.numChunks) - self.returnValue(reassembled) - - #@-node:on_gotChunk - #@+node:returnValue - def returnValue(self, items): - """ - override with a nicer call sig - """ - # a hack for testing - save this RPC object into the node - # so we can introspect it - self.localNode.lastrpc = self - - # another debugging hack - self.reportStats() - - KRpc.returnValue(self, items, result=items) - - #@-node:returnValue - #@-others - -#@-node:class KRpcFindData -#@-node:FIND_DATA -#@+node:STORE -#@+node:class KRpcStore -class KRpcStore(KRpc): - """ - Implements key storage - """ - #@ @+others - #@+node:attribs - type = 'store' - #@-node:attribs - #@+node:__init__ - def __init__(self, localNode, client=None, **kw): - """ - Creates and launches a STORE rpc - - Arguments: - - localNode - the node performing this RPC - - client - see KRpc.__init__ - - Keywords: - - key - the key under which we wish to save the data - - value - the value we wish to save - - local - True/False: - - if True, only save in local store - - if False, do a findNode to find the nodes to save the - key to, and tell them to save it - default is True - """ - self.key = kw['key'] - #self.keyHashed = shahash(self.key) - self.keyHashed = self.key - self.value = kw['value'] - self.isLocalOnly = kw.get('local', True) - - # set 'splitting' flag to indicate if we need to insert as splitfiles - self.splitting = len(self.value) > maxValueSize - - self.log(4, "isLocalOnly=%s" % self.isLocalOnly) - - if kw.has_key('cbArgs'): - KRpc.__init__(self, localNode, client, cbArgs=kw['cbArgs']) - else: - KRpc.__init__(self, localNode, client) - - #@-node:__init__ - #@+node:start - def start(self): - """ - Kicks off this RPC - """ - # if too big, then break up into <30k chunks - if self.splitting: - self.storeSplit() - return - - # not too big - prefix a 0 chunk count, and go ahead as a single entity - self.value = "chunks:0\n" + self.value - - # if local only, or no peers, just save locally - if self.isLocalOnly or len(self.localNode.peers) == 0: - result = self.localNode.storage.putKey(self.keyHashed, self.value, keyIsHashed=True) - if result: - result = 1 - else: - result = 0 - self.returnValue(result) - return - - # no - se have to find peers to store the key to, and tell them to - # store the key - - # launch a findNode rpc, continue in our callback - KRpcFindNode(self.localNode, self.on_doneFindNode, - hash=self.keyHashed, raw=True, local=False) - return - - - #@-node:start - #@+node:storeSplit - def storeSplit(self): - """ - Gets called if we're splitting a big file into smaller chunks - - Here, we: - - break the file up into chunks - - build a manifest - - launch store RPCs to store each chunk, where the key is SHA(chunk) - - launch a store RPC to store the 'manifest' (noting that if the manifest - is too big, it'll get recursively inserted as a splitfile as well - """ - # break up into chunks - chunks = [] - hashes = [] - size = len(self.value) - i = 0 - self.nchunks = 0 - while i < size: - chunks.append(self.value[i:i+maxValueSize]) - hashes.append(shahash(chunks[-1])) - i += maxValueSize - self.nchunks += 1 - - # build the manifest - manifest = "chunks:%s\n%s\n" % (self.nchunks, "\n".join(hashes)) - - # set progress attributes - self.chunkManifestInserted = False - self.chunksInserted = 0 - - # launch nested Store RPCs for manifest, and each chunk - KRpcStore(self.localNode, self.on_doneChunkManifest, - local=self.isLocalOnly, - key=self.key, - value=manifest) - i = 0 - while i < self.nchunks: - KRpcStore(self.localNode, self.on_doneChunk, - local=self.isLocalOnly, - key=hashes[i], - value=chunks[i]) - i += 1 - - # now sit back and wait for the callbacks - #@-node:storeSplit - #@+node:on_doneChunkManifest - def on_doneChunkManifest(self, result): - """ - Callback which fires when a manifest insert succeeds/fails - """ - # the chunk callback handles all - self.on_doneChunk(result, isManifest=True) - #@-node:on_doneChunkManifest - #@+node:on_doneChunk - def on_doneChunk(self, result, isManifest=False): - """ - Callback which fires when a single chunk insert succeeds/fails - """ - # a failure either way means the whole RPC has failed - if not result: - # one huge fuck-up - self.returnValue(False) - return - - # update our tally - if isManifest: - self.chunkManifestInserted = True - else: - self.chunksInserted += 1 - - # finished? - if self.chunkManifestInserted and (self.chunksInserted == self.nchunks): - # yep = success - self.returnValue(True) - - #@-node:on_doneChunk - #@+node:returnValue - def returnValue(self, result): - """ - an override with a nicer call sig - """ - # a hack for testing - save this RPC object into the node - # so we can introspect it - self.localNode.lastrpc = self - - try: - KRpc.returnValue(self, result, status=result) - except: - traceback.print_exc() - self.log(3, "Failed to return %s" % repr(result)) - KRpc.returnValue(self, 0, status=0) - - #@-node:returnValue - #@+node:on_doneFindNode - def on_doneFindNode(self, lst): - """ - Receive a callback from findNode - - Send STORE command to each node that comes back - """ - localNode = self.localNode - - # normalise results - normalisePeer = localNode._normalisePeer - peers = [normalisePeer(p) for p in lst] # wrap in KPeer objects - - self.log(2, "STORE RPC findNode - got peers %s" % repr(peers)) - - i = 0 - - self.numPeersSucceeded = 0 - self.numPeersFailed = 0 - self.numPeersFinished = 0 - - # and fire off store messages for each peer - for peer in peers: - - if peer.dest == localNode.dest: - self.log(3, "storing to ourself") - localNode.storage.putKey(self.keyHashed, self.value, keyIsHashed=True) - self.numPeersSucceeded += 1 - self.numPeersFinished += 1 - else: - msgId = self.localNode._msgIdAlloc() - self.log(4, "forwarding store cmd to peer:\npeer=%s\nmsgId=%s" % (peer, msgId)) - self.bindPeerReply(peer, msgId) - peer.send_store(key=self.keyHashed, value=self.value, msgId=msgId) - i += 1 - if i >= numStorePeers: - break - - self.nextTickTime = time.time() + timeout['store'] - - self.log(2, "Sent store cmd to %s peers, awaiting responses" % i) - - self.numPeersToStore = i - - - #@-node:on_doneFindNode - #@+node:on_reply - def on_reply(self, peer, msgId, **details): - """ - callback which fires when we get a reply from a STORE we sent to a - peer - """ - self.numPeersSucceeded += 1 - self.numPeersFinished += 1 - - if self.numPeersFinished == self.numPeersToStore: - # rpc is finished - self.returnValue(True) - - #@-node:on_reply - #@+node:on_tick - def on_tick(self): - - self.log(3, "Timeout awaiting store reply from %d out of %d peers" % ( - self.numPeersToStore - self.numPeersSucceeded, self.numPeersToStore)) - - if self.numPeersSucceeded == 0: - self.log(3, "Store timeout - no peers replied, storing locally") - self.localNode.storage.putKey(self.keyHashed, self.value, keyIsHashed=True) - - self.returnValue(True) - - #@-node:on_tick - #@-others -#@-node:class KRpcStore -#@-node:STORE -#@+node:PINGALL -#@+node:class KRpcPingAll -class KRpcPingAll(KRpc): - """ - Pings all peers - """ - #@ @+others - #@+node:attribs - type = 'pingall' - #@-node:attribs - #@+node:__init__ - def __init__(self, localNode, client=None, **kw): - """ - Creates and launches a PINGALL rpc - - Arguments: - - localNode - the node performing this RPC - - client - see KRpc.__init__ - - Keywords: none - """ - if kw.has_key('cbArgs'): - KRpc.__init__(self, localNode, client, cbArgs=kw['cbArgs']) - else: - KRpc.__init__(self, localNode, client) - - #@-node:__init__ - #@+node:start - def start(self): - """ - Kicks off this RPC - """ - # launch a findNode rpc against each of our peers - peers = self.localNode.peers - self.numSent = self.numPending = len(peers) - self.numReplied = self.numFailed = 0 - for peer in peers: - KRpcPing(self.localNode, self.on_reply, peer=peer) - return - - #@-node:start - #@+node:on_reply - def on_reply(self, result): - """ - callback which fires when we get a reply from a STORE we sent to a - peer - """ - log(3, "got %s" % repr(result)) - - if result: - self.numReplied += 1 - else: - self.numFailed += 1 - self.numPending -= 1 - - if self.numPending <= 0: - res = "pinged:%s replied:%s timeout:%s" % ( - self.numSent, self.numReplied, self.numFailed) - self.log(3, res) - self.returnValue(res) - - #@-node:on_reply - #@+node:on_tick - def on_tick(self): - - self.log(3, "this shouldn't have happened") - self.returnValue(False) - - #@-node:on_tick - #@+node:returnValue - def returnValue(self, result): - """ - an override with a nicer call sig - """ - # a hack for testing - save this RPC object into the node - # so we can introspect it - self.localNode.lastrpc = self - - try: - KRpc.returnValue(self, result, status=result) - except: - traceback.print_exc() - self.log(3, "Failed to return %s" % repr(result)) - KRpc.returnValue(self, 0, status=0) - - #@-node:returnValue - #@-others -#@-node:class KRpcPingAll -#@-node:PINGALL -#@-node:RPC Classes -#@+node:Node Socket Server -#@+node:class KNodeServer -class KNodeServer(KBase, SocketServer.ThreadingMixIn, SocketServer.TCPServer): - """ - Listens for incoming socket connections - """ - #@ @+others - #@+node:__init__ - def __init__(self, node, addr=None): - - if addr == None: - addr = clientAddr - - self.isRunning = True - - self.node = node - - listenHost, listenPort = addr.split(":") - listenPort = int(listenPort) - self.listenPort = listenPort - SocketServer.TCPServer.__init__(self, (listenHost, listenPort), KNodeReqHandler) - - #@-node:__init__ - #@+node:serve_forever - def serve_forever(self): - - print "awaiting client connections on port %s" % self.listenPort - while self.isRunning: - self.handle_request() - - #@-node:serve_forever - #@-others -#@-node:class KNodeServer -#@+node:class KNodeReqHandler -class KNodeReqHandler(KBase, SocketServer.StreamRequestHandler): - """ - Manages a single client connection - """ - #@ @+others - #@+node:handle - def handle(self): - """ - Conducts all conversation for a single req - """ - req = self.request - client = self.client_address - server = self.server - node = self.server.node - - read = self.rfile.read - readline = self.rfile.readline - write = self.wfile.write - flush = self.wfile.flush - - finish = self.finish - - # start with a greeting - write("Stasher version %s ready\n" % version) - - # get the command - line = readline().strip() - - try: - cmd, args = re.split("\\s+", line, 1) - except: - cmd = line - args = '' - - self.log(3, "cmd=%s args=%s" % (repr(cmd), repr(args))) - - if cmd in ["get", "getlocal"]: - isLocal = cmd == "getlocal" - value = node.get(args, local=isLocal) - if value == None: - write("notfound\n") - else: - write("ok\n%s\n%s" % (len(value), value)) - flush() - time.sleep(2) - finish() - return - - elif cmd in ["put", "putlocal"]: - isLocal = cmd == "putlocal" - try: - size = int(readline()) - value = read(size) - res = node.put(args, value, local=isLocal) - if res: - write("ok\n") - else: - write("failed\n") - flush() - except: - traceback.print_exc() - write("exception\n") - finish() - return - - elif cmd == 'addref': - try: - res = node.addref(args, True) - if res: - write("ok\n") - else: - write("failed\n") - flush() - except: - traceback.print_exc() - write("exception\n") - finish() - return - - elif cmd == 'getref': - res = node.dest - write("ok\n") - write("%s\n" % res) - flush() - time.sleep(1) - finish() - return - - elif cmd == 'pingall': - res = node._pingall() - write(res+"\n") - finish() - return - - elif cmd == "die": - server.isRunning = False - write("server terminated\n") - finish() - - else: - write("unrecognisedcommand\n") - finish() - return - - #@-node:handle - #@+node:finish - def finish(self): - - SocketServer.StreamRequestHandler.finish(self) - - #@-node:finish - #@-others -#@-node:class KNodeReqHandler -#@+node:class KNodeClient -class KNodeClient(KBase): - """ - Talks to a KNodeServer over a socket - - Subclass this to implement Stasher clients in Python - """ - #@ @+others - #@+node:__init__ - def __init__(self, address=clientAddr): - - if type(address) in [type(()), type([])]: - self.host, self.port = clientAddr - else: - self.host, self.port = clientAddr.split(":") - self.port = int(self.port) - - self.hello() - - #@-node:__init__ - #@+node:hello - def hello(self): - - self.connect() - self.close() - #@-node:hello - #@+node:connect - def connect(self): - - self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) - self.sock.connect((self.host, self.port)) - - self.rfile = self.sock.makefile("rb") - self.read = self.rfile.read - self.readline = self.rfile.readline - self.wfile = self.sock.makefile("wb") - self.write = self.wfile.write - self.flush = self.wfile.flush - - # read greeting - greeting = self.readline() - parts = re.split("\\s+", greeting) - if parts[0] != "Stasher": - self.close() - raise Exception("Not connected to valid stasher interface") - - #@-node:connect - #@+node:close - def close(self): - - self.rfile.close() - #self.wfile.close() - self.sock.close() - - #@-node:close - #@+node:get - def get(self, key, **kw): - """ - sends a get command to stasher socket, and retrieves - and interprets result - - Arguments: - - key - key to retrieve - - Keywords: - - local - default False - if True, only looks in local storage - - Returns key's value if found, or None if key not found - """ - if kw.get('local', False): - cmd = 'getlocal' - else: - cmd = 'get' - - self.connect() - - self.write("%s %s\n" % (cmd, key)) - self.flush() - - #print "waiting for resp line" - res = self.readline().strip() - - if res == "ok": - size = int(self.readline()) - val = self.read(size) - self.close() - return val - else: - self.close() - return None - - #@-node:get - #@+node:put - def put(self, key, val, **kw): - """ - Tells remote stasher port to insert a file into the network - - Arguments: - - key - key to insert under - - val - value to insert under this key - - Keywords: - - local - default False - if True, only looks in local storage - - """ - if kw.get('local', False): - cmd = 'putlocal' - else: - cmd = 'put' - - self.connect() - self.write("%s %s\n" % (cmd, key)) - self.write("%s\n" % len(val)) - self.write(val) - self.flush() - - res = self.readline().strip() - - self.close() - - if res == "ok": - return True - else: - print repr(res) - return False - - #@-node:put - #@+node:addref - def addref(self, ref): - """ - Passes a new noderef to node - """ - self.connect() - self.write("addref %s\n" % ref) - self.flush() - - res = self.readline().strip() - - self.close() - - if res == "ok": - return True - else: - print repr(res) - return False - - #@-node:addref - #@+node:getref - def getref(self): - """ - Uplifts node's own ref - """ - self.connect() - self.write("getref\n") - self.flush() - - res = self.readline().strip() - - if res == "ok": - ref = self.readline().strip() - self.close() - return ref - else: - self.close() - return "failed" - - #@-node:getref - #@+node:pingall - def pingall(self): - """ - Uplifts node's own ref - """ - self.connect() - self.write("pingall\n") - self.flush() - - res = self.readline().strip() - - self.close() - - return res - - - #@-node:pingall - #@+node:kill - def kill(self): - """ - Tells remote server to fall on its sword - """ - try: - while 1: - self.connect() - self.write("die\n") - self.flush() - self.close() - except: - pass - - - #@-node:kill - #@+node:__getitem__ - def __getitem__(self, item): - - return self.get(item) - - #@-node:__getitem__ - #@+node:__setitem__ - def __setitem__(self, item, val): - - if not self.put(item, val): - raise Exception("Failed to insert") - - #@-node:__setitem__ - #@-others - -#@-node:class KNodeClient -#@-node:Node Socket Server -#@+node:NODE -#@+node:class KNode -class KNode(KBase): - """ - B{Public API to this Kademlia implementation} - - You should not normally need to use, or even be aware of, - any of the other classes - - And in this class, the only methods you need to worry about are: - - L{start} - starts the node running - - L{stop} - stops the node - - L{get} - retrieve a key value - - L{put} - stores a key value - - L{addref} - imports a noderef - - This class implements a single kademlia node. - Within a single process, you can create as many nodes as you like. - """ - #@ @+others - #@+node:attributes - SocketFactory = None # defaults to I2P socket - - #@-node:attributes - #@+node:__init__ - def __init__(self, name, **kw): - """ - Creates a kademlia node of name 'name'. - - Name is mandatory, because each name is permanently written - to the SAM bridge's store - - I thought of supporting random name generation, but went off this - idea because names get permanently stored to SAM bridge's file - - Arguments: - - name - mandatory - a short text name for the node, should - be alphanumerics, '-', '.', '_' - This name is used for the SAM socket session. - - Keywords: - - storage - optional - an instance of L{KStorageBase} or one of - its subclasses. If not given, default action is to instantiate - a L{KStorageFile} object against the given node name - """ - # remember who we are - self.name = name - - # not running yet, will launch when explicitly started, or implicitly - # when the first operation gets done - self.isRunning = False - - # create socket and get its dest, and determine our node id - self.id = KHash("") - self.log(5, "creating socket for node %s" % name) - self.log(5, "socket for node %s created" % name) - if self.SocketFactory == None: - self.SocketFactory = i2p.socket.socket - self.sock = self.SocketFactory( - "stashernode-"+name, - i2p.socket.SOCK_DGRAM, - samaddr=samAddr, - **kw) - #self.sockLock = threading.Lock() # prevents socket API reentrance - self.sock.setblocking(0) - self.dest = self.sock.dest - self.id = KHash(self.dest) - - # create our buckets - self.buckets = [] - for i in range(160): - self.buckets.append(KBucket()) - - # create our storage object, default to new instance of KStorageFile - self.storage = kw.get('storage', KStorageFile(self)) - - # dig out all previously known nodes - self.peers = self.storage.getRefs() - - # set up dict of callers awaiting replies - # keys are (peerobj, msgId) tuples, values are Queue.Queue objects - self.pendingPings = {} - - # mapping of (peer, msgId) to RPC object, so when RPC replies come in, - # they can be passed directly to the RPC object concerned - self.rpcBindings = {} - - # KRpc objects waiting for peer replies - used for checking for timeouts - self.rpcPending = [] - - # miscellaneous shit - self._msgIdNext = 0 - #self._msgIdLock = threading.Lock() - - # register in global map - _nodes[name] = self - - - #@-node:__init__ - #@+node:__del__ - def __del__(self): - """ - Cleanup - """ - - #@-node:__del__ - #@+node:application-level - #@+node:start - def start(self, doPings=True): - """ - Starts the node running - """ - # barf if already running - if self.isRunning: - self.log(3, "node %s is already running!" % self.name) - return - - self.log(3, "starting node %s" % self.name) - - # first step - ping all our peers - if doPings: - for peer in self.peers: - self.log(3, "doing initial ping\n%s\n%s" % (self, peer)) - KRpcPing(self, peer=peer) - - # first step - do a findNode against our own node id, and ping our - # neighbours - if greetPeersOnStartup: - neighbours = KRpcFindNode(self, hash=self.id).execute() - self.log(3, "neighbours=%s" % repr([n[:10] for n in neighbours])) - for n in neighbours: - n = self._normalisePeer(n) - KRpcPing(self, peer=n) - - # note now that we're running - self.isRunning = True - - # and enlist with the core - if runCore: - core.subscribe(self) - else: - # central core disabled, run our own receiver thread instead - thread.start_new_thread(self._threadRx, ()) - #@-node:start - #@+node:stop - def stop(self): - """ - Shuts down the node - """ - self.isRunning = 0 - if runCore: - try: - core.unsubscribe(self) - except: - pass - #@-node:stop - #@+node:get - def get(self, item, callback=None, **kw): - """ - Attempts to retrieve data from the network - - Arguments: - - item - the key we desire - - callback - optional - if given, the get will be performed - asynchronously, and callback will be invoked upon completion, with - the result as first argument - Keywords: - - local - optional - if True, limits this search to this local node - default is False - - Returns: - - if no callback - the item value if the item was found, or None if not - - if callback, None is returned - """ - def processResult(r): - if isinstance(r, str): - return r - return None - - if callback: - # create a func to process callback result - def onCallback(res): - callback(processResult(res)) - - self._finddata(item, onCallback, **kw) - else: - return processResult(self._finddata(item, **kw)) - - #@-node:get - #@+node:put - def put(self, key, value, callback=None, **kw): - """ - Inserts a named key into the network - - Arguments: - - key - one of: - - None - a secure key will be generated and used - - a KHash object - - a raw string which will be hashed into a KHash object - - val - a string, the value associated with the key - - Keywords: - - local - default False - if True, limits the insert to the - local node - - If the value is larger than L{maxValueSize}, a L{KValueTooLarge} - exception will occur. - """ - return self._store(key, value, callback, **kw) - - #@-node:put - #@+node:addref - def addref(self, peer, doPing=False): - """ - Given a peer node's destination, add it to our - buckets and internal data store - - Arguments: - - peer - one of: - - the I2P destination of the peer node, as - a base64 string - - a KNode object - - a KPeer object - - doPing - ping this node automatically (default False) - """ - peer = self._normalisePeer(peer) - - # remember peer if not already known - if peer.dest == self.dest: - self.log(3, "node %s, trying to add ref to ourself???" % self.name) - return peer - elif not self._findPeer(peer.dest): - self.peers.append(peer) - self.storage.putRefs(peer) - else: - self.log(4, "node %s, trying to add duplicate noderef %s" % ( - self.name, peer)) - return peer - - # update our KBucket - dist = self.id.distance(peer.id) - self.buckets[dist].justSeenPeer(peer) - - if doPing: - self.log(4, "doing initial ping\n%s\n%s" % (self, peer)) - KRpcPing(self, peer=peer) - - return peer - - #@-node:addref - #@+node:__getitem__ - def __getitem__(self, item): - """ - Allows dict-like accesses on the node object - """ - return self.get(item) - #@-node:__getitem__ - #@+node:__setitem__ - def __setitem__(self, item, val): - """ - Allows dict-like key setting on the node object - """ - self.put(item, val) - - #@-node:__setitem__ - #@-node:application-level - #@+node:peer/rpc methods - #@+node:_ping - def _ping(self, peer=None, callback=None, **kw): - """ - Sends a ping to remote peer, and awaits response - - Not of much real use to application level, except - perhaps for testing - - If the argument 'peer' is not given, the effect is to 'ping the - local node', which I guess might be a bit silly - - The second argument 'callback' is a callable, which if given, makes this - an asynchronous (non-blocking) call, in which case the callback will be - invoked upon completion (or timeout). - - If the keyword 'cbArgs' is given in addition to the callback, the callback - will fire with the results as first argument and this value as second arg - """ - if callback: - KRpcPing(self, callback, peer=peer, **kw) - else: - return KRpcPing(self, peer=peer).execute() - - #@-node:_ping - #@+node:_pingall - def _pingall(self, callback=None): - """ - Sends a ping to all peers, returns text string on replies/failures - """ - if callback: - KRpcPingAll(self, callback, **kw) - else: - return KRpcPingAll(self).execute() - - - #@-node:_pingall - #@+node:_findnode - def _findnode(self, something=None, callback=None, **kw): - """ - Mainly for testing - does a findNode query on the network - - Arguments: - - something - one of: - - plain string - the string gets hashed and used for the search - - int or long int - this gets used as the raw hash - - a KHash object - that's what gets used - - None - the value of the 'raw' keyword will be used instead - - callback - optional - if given, a callable object which will be - called upon completion, with the result as argument - - Keywords: - - local - optional - if True, only returns the closest peers known to - node. if False, causes node to query other nodes. - default is False - - raw - one of: - - 20-byte string - this gets used as a binary hash - - 40-byte string - this gets used as a hex hash - """ - if not kw.has_key('local'): - kw = dict(kw) - kw['local'] = False - - self.log(3, "about to instantiate findnode rpc") - if callback: - KRpcFindNode(self, callback, hash=something, **kw) - self.log(3, "asynchronously invoked findnode, expecting callback") - else: - lst = KRpcFindNode(self, hash=something, **kw).execute() - self.log(3, "back from findnode rpc") - res = [self._normalisePeer(p) for p in lst] # wrap in KPeer objects - return res - - #@-node:_findnode - #@+node:_finddata - def _finddata(self, something=None, callback=None, **kw): - """ - As for findnode, but if data is found, return the data instead - """ - if not kw.has_key('local'): - kw = dict(kw) - kw['local'] = False - - self.log(3, "about to instantiate finddata rpc") - if callback: - KRpcFindData(self, callback, hash=something, **kw) - self.log(3, "asynchronously invoked finddata, expecting callback") - else: - res = KRpcFindData(self, hash=something, **kw).execute() - self.log(3, "back from finddata rpc") - if not isinstance(res, str): - self.log(4, "findData RPC returned %s" % repr(res)) - res = [self._normalisePeer(p) for p in res] # wrap in KPeer objects - return res - - #@-node:_finddata - #@+node:_store - def _store(self, key, value, callback=None, **kw): - """ - Performs a STORE rpc - - Arguments: - - key - string - text name of key - - value - string - value to store - - Keywords: - - local - if given and true, only store value onto local store - """ - if not kw.has_key('local'): - kw = dict(kw) - kw['local'] = False - - key = shahash(key) - if callback: - KRpcStore(self, callback, key=key, value=value, **kw) - self.log(3, "asynchronously invoked findnode, expecting callback") - else: - res = KRpcStore(self, key=key, value=value, **kw).execute() - return res - - #@-node:_store - #@+node:_findPeer - def _findPeer(self, dest): - """ - Look up our table of current peers for a given dest. - - If dest is found, return its object, otherwise return None - """ - for peerObj in self.peers: - if peerObj.dest == dest: - return peerObj - return None - - #@-node:_findPeer - #@-node:peer/rpc methods - #@+node:comms methods - #@+node:_sendRaw - def _sendRaw(self, peer, **kw): - """ - Serialises keywords passed, and sends this as a datagram - to node 'peer' - """ - # update our KBucket - dist = self.id.distance(peer.id) - self.buckets[dist].justSeenPeer(peer) - - # save ref to this peer - self.addref(peer) - - params = dict(kw) - msgId = params.get('msgId', None) - if msgId == None: - msgId = params['msgId'] = self._msgIdAlloc() - - objenc = messageEncode(params) - self.log(5, "node %s waiting for send lock" % self.name) - #self.sockLock.acquire() - self.log(5, "node %s got send lock" % self.name) - try: - self.sock.sendto(objenc, 0, peer.dest) - except: - traceback.print_exc() - #self.sockLock.release() - self.log(5, "node %s released send lock" % self.name) - - self.log(4, "node %s sent %s to peer %s" % (self.name, params, peer.dest)) - return msgId - - #@-node:_sendRaw - #@-node:comms methods - #@+node:engine - #@+node:_threadRx - def _threadRx(self): - """ - Thread which listens for incoming datagrams and actions - accordingly - """ - self.log(3, "starting receiver thread for node %s" % self.name) - - try: - # loop to drive the node - while self.isRunning: - self._doChug() - except: - traceback.print_exc() - self.log(3, "node %s - THREAD CRASH!" % self.name) - - self.log(3, "receiver thread for node %s terminated" % self.name) - - #@-node:_threadRx - #@+node:_doChug - def _doChug(self): - """ - Do what's needed to drive the node. - Handle incoming packets - Check on and action timeouts - """ - # handle all available packets - while self._doRx(): - pass - - # do maintenance - eg processing timeouts - self._doHousekeeping() - - #@-node:_doChug - #@+node:_doRx - def _doRx(self): - """ - Receives and handles one incoming packet - - Returns True if a packet got handled, or False if timeout - """ - # get next packet - self.log(5, "%s seeking socket lock" % self.name) - #self.sockLock.acquire() - self.log(5, "%s got socket lock" % self.name) - try: - item = self.sock.recvfrom(-1) - except i2p.socket.BlockError: - #self.sockLock.release() - self.log(5, "%s released socket lock after timeout" % self.name) - if not runCore: - time.sleep(0.1) - return False - except: - traceback.print_exc() - self.log(5, "%s released socket lock after exception" % self.name) - #self.sockLock.release() - return True - #self.sockLock.release() - self.log(5, "%s released socket lock normally" % self.name) - - try: - (data, dest) = item - except ValueError: - self.log(3, "node %s: recvfrom returned no dest, possible spoof" \ - % self.name) - data = item[0] - dest = None - - # try to decode - try: - d = messageDecode(data) - except: - traceback.print_exc() - self.log(3, "failed to unpickle incoming data for node %s" % \ - self.name) - return True - - # ditch if not a dict - if type(d) != type({}): - self.log(3, "node %s: decoded packet is not a dict" % self.name) - return True - - # temporary workaround for sam socket bug - if dest == None: - if hasattr(d, 'has_key') and d.has_key('dest'): - dest = d['dest'] - - # try to find it in our store - peerObj = self._findPeer(dest) - if peerObj == None: - # previously unknown peer - add it to our store - peerObj = self.addref(dest) - else: - peerObj.justSeen() # already exists - refresh its timestamp - self.addref(peerObj.dest) - - # drop packet if no msgId - msgId = d.get('msgId', None) - if msgId == None: - self.log(3, "no msgId, dropping") - return True - del d['msgId'] - - msgType = d.get('type', 'unknown') - - if desperatelyDebugging: - pass - #set_trace() - - # if a local RPC is awaiting this message, fire its callback - item = self.rpcBindings.get((peerObj.dest, msgId), None) - if item: - rpc, peer = item - try: - rpc.unbindPeerReply(peerObj, msgId) - if desperatelyDebugging: - set_trace() - rpc.on_reply(peerObj, msgId, **d) - - except: - traceback.print_exc() - self.log(2, "unhandled exception in RPC on_reply") - else: - # find a handler, fallback on 'unknown' - self.log(5, "\nnode %s\ngot msg id %s type %s:\n%s" % ( - self.name, msgId, msgType, d)) - hdlrName = d.get('type', 'unknown') - hdlr = getattr(self, "_on_"+hdlrName) - try: - if desperatelyDebugging: - set_trace() - hdlr(peerObj, msgId, **d) - except: - traceback.print_exc() - self.log(2, "unhandled exception in unbound packet handler %s" % hdlrName) - - return True - - #@-node:_doRx - #@+node:_doHousekeeping - def _doHousekeeping(self): - """ - Performs periodical housekeeping on this node. - - Activities include: - - checking pending records for timeouts - """ - now = time.time() - - # DEPRECATED - SWITCH TO RPC-based - # check for expired pings - for msgId, (dest, q, pingDeadline) in self.pendingPings.items(): - - if pingDeadline > now: - # not timed out, leave in pending - continue - - # ping has timed out - del self.pendingPings[msgId] - q.put(False) - - # check for timed-out RPCs - for rpc in self.rpcPending[:]: - if rpc.nextTickTime != None and now >= rpc.nextTickTime: - try: - rpc.on_tick() - except: - traceback.print_exc() - self.log(2, "unhandled exception in RPC on_tick") - - #@-node:_doHousekeeping - #@-node:engine - #@+node:event handling - #@+others - #@+node:_on_ping - def _on_ping(self, peer, msgId, **kw): - """ - Handler for ping received events - """ - KRpcPing(self, (peer, msgId), local=True, **kw) - return - - # old stuff - - self.log(3, "\nnode %s\nfrom %s\nreceived:\n%s" % (self.name, peer, kw)) - - # randomly cause ping timeouts if testing - if testing: - howlong = random.randint(0, 5) - self.log(3, "deliberately pausing for %s seconds" % howlong) - time.sleep(howlong) - - # pong back to node - peer.send_reply(msgId=msgId) - - - #@nonl - #@-node:_on_ping - #@+node:_on_findNode - def _on_findNode(self, peer, msgId, **kw): - """ - Handles incoming findNode command - """ - KRpcFindNode(self, (peer, msgId), local=True, **kw) - - #@-node:_on_findNode - #@+node:_on_findData - def _on_findData(self, peer, msgId, **kw): - """ - Handles incoming findData command - """ - KRpcFindData(self, (peer, msgId), local=True, **kw) - - #@-node:_on_findData - #@+node:_on_store - def _on_store(self, peer, msgId, **kw): - """ - Handles incoming STORE command - """ - self.log(4, "got STORE rpc from upstream:\npeer=%s\nmsgId=%s\nkw=%s" % (peer, msgId, kw)) - - KRpcStore(self, (peer, msgId), local=True, **kw) - - #@-node:_on_store - #@+node:_on_reply - def _on_reply(self, peer, msgId, **kw): - """ - This should never happen - """ - self.log(4, "got unhandled reply:\npeer=%s\nmsgId=%s\nkw=%s" % ( - peer, msgId, kw)) - - #@-node:_on_reply - #@+node:_on_unknown - def _on_unknown(self, peer, msgId, **kw): - """ - Handler for unknown events - """ - self.log(3, "node %s from %s received msgId=%s:\n%s" % ( - self.name, peer, msgId, kw)) - - #@-node:_on_unknown - #@-others - #@-node:event handling - #@+node:Socket Client Server - #@+node:serve - def serve(self): - """ - makes this node listen on socket for incoming client - connections, and services these connections - """ - server = KNodeServer(self) - server.serve_forever() - - #@-node:serve - #@-node:Socket Client Server - #@+node:lowlevel stuff - #@+others - #@+node:__str__ - def __str__(self): - return "" % ( - self.name, - ("%x" % self.id.value)[:8], - ) - #@-node:__str__ - #@+node:__repr__ - def __repr__(self): - return str(self) - - #@-node:__repr__ - #@+node:_msgIdAlloc - def _msgIdAlloc(self): - """ - issue a new and unique message id - """ - #self._msgIdLock.acquire() - msgId = self._msgIdNext - self._msgIdNext += 1 - #self._msgIdLock.release() - return msgId - #@-node:_msgIdAlloc - #@+node:_normalisePeer - def _normalisePeer(self, peer): - """ - Takes either a b64 dest string, a KPeer object or a KNode object, - and returns a KPeer object - """ - # act according to whatever type we're given - if isinstance(peer, KPeer): - return peer # already desired format - elif isinstance(peer, KNode): - return KPeer(self, peer.dest) - elif isinstance(peer, str) and len(peer) > 256: - return KPeer(self, peer) - else: - self.log(3, "node %s, trying to add invalid noderef %s" % ( - self.name, peer)) - raise KBadNode(peer) - - #@-node:_normalisePeer - #@+node:__del__ - def __del__(self): - """ - Clean up on delete - """ - self.log(3, "node dying: %s" % self.name) - - try: - del _nodes[self.name] - except: - pass - - self.stop() - - #@-node:__del__ - #@-others - #@-node:lowlevel stuff - #@-others -#@-node:class KNode -#@-node:NODE -#@+node:funcs -#@+others -#@+node:userI2PDir -def userI2PDir(nodeName=None): - """ - Returns a directory under user's home dir into which - stasher files can be written - - If nodename is given, a subdirectory will be found/created - - Return value is toplevel storage dir if nodename not given, - otherwise absolute path including node dir - """ - if dataDir != None: - if not os.path.isdir(dataDir): - os.makedirs(dataDir) - return dataDir - - if sys.platform == 'win32': - home = os.getenv("APPDATA") - if home: - topDir = os.path.join(home, "stasher") - else: - topDir = os.path.join(os.getcwd(), "stasher") - else: - #return os.path.dirname(__file__) - topDir = os.path.join(os.path.expanduser('~'), ".stasher") - - if not os.path.isdir(topDir): - os.makedirs(topDir) - if nodeName == None: - return topDir - else: - nodeDir = os.path.join(topDir, nodeName) - if not os.path.isdir(nodeDir): - os.makedirs(nodeDir) - return nodeDir - -#@-node:userI2PDir -#@+node:nodePidfile -def nodePidfile(nodename): - return os.path.join(userI2PDir(nodename), "node.pid") - -#@-node:nodePidfile -#@+node:messageEncode -def messageEncode(params): - """ - Serialise the dict 'params' for sending - - Temporarily using bencode - replace later with a more - efficient struct-based impl. - """ - try: - return bencode.bencode(params) - except: - log(1, "encoder failed to encode: %s" % repr(params)) - raise - -#@-node:messageEncode -#@+node:messageDecode -def messageDecode(raw): - return bencode.bdecode(raw) -#@-node:messageDecode -#@+node:shahash -def shahash(somestr, bin=False): - shaobj = sha.new(somestr) - if bin: - return shaobj.digest() - else: - return shaobj.hexdigest() - -#@-node:shahash -#@+node:log -logLock = threading.Lock() - -def log(verbosity, msg, nPrev=0, clsname=None): - - global logToSocket, logFile - - # create logfile if not exists - if logFile == None: - logFile = os.path.join(userI2PDir(), "stasher.log") - - # rip the stack - caller = traceback.extract_stack()[-(2+nPrev)] - path, line, func = caller[:3] - path = os.path.split(path)[1] - - #print "func is type %s, val %s" % (type(func), repr(func)) - - #if hasattr(func, "im_class"): - # func = - - if clsname: - func = clsname + "." + func - - #msg = "%s:%s:%s(): %s" % ( - # path, - # line, - # func, - # msg.replace("\n", "\n + ")) - - msg = "%s():%s: %s" % ( - func, - line, - msg.replace("\n", "\n + ")) - - # do better logging later - if verbosity > logVerbosity: - return - - if logToSocket: - try: - if isinstance(logToSocket, int): - portnum = logToSocket - logToSocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM) - connected = 0 - while 1: - try: - logToSocket.connect(("localhost", portnum)) - break - except socket.error: - print "Please open an xterm/nc listening on %s" % logToSocket - time.sleep(1) - - logToSocket.send(msg+"\n") - except: - traceback.print_exc() - else: - print msg - - logLock.acquire() - file(logFile, "a+").write(msg + "\n") - logLock.release() -#@-node:log -#@+node:logexc -def logexc(verbosity, msg, nPrev=0, clsname=None): - - fd = StringIO("%s\n" % msg) - traceback.print_exc(file=fd) - log(verbosity, fd.getvalue(), nPrev, clsname) - -#@-node:logexc -#@+node:spawnproc -def spawnproc(*args, **kw): - """ - Spawns a process and returns its PID - - VOMIT! - - I have to do a pile of odious for the win32 side - - Returns a usable PID - - Keywords: - - priority - priority at which to spawn - default 20 (highest) - """ - # get priority, convert to a unix 'nice' value - priority = 20 - kw.get('priority', 20) - - if sys.platform != 'win32': - # *nix - easy - #print "spawnproc: launching %s" % repr(args) - - # insert nice invocation - args = ['/usr/bin/nice', '-n', str(priority)] + list(args) - return os.spawnv(os.P_NOWAIT, args[0], args) - - else: - # just close your eyes here and pretend this abomination isn't happening! :(( - args = list(args) - args.insert(0, sys.executable) - cmd = " ".join(args) - #print "spawnproc: launching %s" % repr(cmd) - - if 0: - try: - c = _winreg.ConnectRegistry(None, _winreg.HKEY_LOCAL_MACHINE) - c1 = _winreg.OpenKey(c, "SOFTWARE") - c2 = _winreg.OpenKey(c1, "Microsoft") - c3 = _winreg.OpenKey(c2, "Windows NT") - c4 = _winreg.OpenKey(c3, "CurrentVersion") - supportsBelowNormalPriority = 1 - except: - supportsBelowNormalPriority = 0 - else: - if sys.getwindowsversion()[3] != 2: - supportsBelowNormalPriority = 0 - else: - supportsBelowNormalPriority = 1 - - # frig the priority into a windows value - if supportsBelowNormalPriority: - if priority < 7: - pri = win32process.IDLE_PRIORITY_CLASS - elif priority < 14: - pri = 0x4000 - else: - pri = win32process.NORMAL_PRIORITY_CLASS - else: - if priority < 11: - pri = win32process.IDLE_PRIORITY_CLASS - else: - pri = win32process.NORMAL_PRIORITY_CLASS - - print "spawnproc: launching %s" % repr(args) - si = win32process.STARTUPINFO() - hdl = win32process.CreateProcess( - sys.executable, # lpApplicationName - cmd, # lpCommandLine - None, # lpProcessAttributes - None, # lpThreadAttributes - 0, # bInheritHandles - 0, # dwCreationFlags - None, # lpEnvironment - None, # lpCurrentDirectory - si, # lpStartupInfo - ) - pid = hdl[2] - #print "spawnproc: pid=%s" % pid - return pid -#@-node:spawnproc -#@+node:killproc -def killproc(pid): - if sys.platform == 'win32': - print repr(pid) - handle = win32api.OpenProcess(1, 0, pid) - print "pid %s -> %s" % (pid, repr(handle)) - #return (0 != win32api.TerminateProcess(handle, 0)) - win32process.TerminateProcess(handle, 0) - else: - return os.kill(pid, signal.SIGKILL) -#@-node:killproc -#@+node:i2psocket -def i2psocket(self, *args, **kw): - return i2p.socket.socket(*args, **kw) - -#@-node:i2psocket -#@+node:usage -def usage(detailed=False, ret=0): - - print "Usage: %s [ [...]]" % sys.argv[0] - if not detailed: - print "Type %s -h for help" % sys.argv[0] - sys.exit(ret) - - print "This is stasher, distributed file storage network that runs" - print "atop the anonymising I2P network (http://www.i2p.net)" - print "Written by aum - August 2004" - print - print "Options:" - print " -h, --help - display this help" - print " -v, --version - print program version" - print " -V, --verbosity=n - verbosity, default 1, 1=quiet ... 4=noisy" - print " -S, --samaddr=host:port - host:port of I2P SAM port, " - print " default %s" % i2p.socket.samaddr - print " -C, --clientaddr=host:port - host:port for socket interface to listen on" - print " for clients, default %s" % clientAddr - print " -d, --datadir=dir - directory in which stasher files get written" - print " default is ~/.stasher" - print " -f, --foreground - only valid for 'start' cmd - runs the node" - print " in foreground without spawning - for debugging" - print " -l, --localonly - only valid for get/put - restricts the get/put" - print " operation to the local node only" - print - print "Commands:" - print " start []" - print " - launches a single node, which forks off and runs in background" - print " nodename is a short unique nodename, default is '%s'" % defaultNodename - print " stop []" - print " - terminates running node " - print " get []" - print " - attempts to retrieve key from the network, saving" - print " to file if given, or to stdout if not" - print " put []" - print " - inserts key into the network, taking its content" - print " from file if given, otherwise reads content from stdin" - print " addref " - print " - adds a new noderef to the node, taking the base64 noderef" - print " from file if given, or from stdin" - print " (if you don't have any refs, visit http://stasher.i2p, or use" - print " the dest in the file aum.stasher in cvs)" - print " getref " - print " - uplifts the running node's dest as base64, writing it to file" - print " if given, or to stdout" - print " hello" - print " - checks that local node is running" - print " pingall" - print " - diagnostic tool - pings all peers, waits for replies or timeouts," - print " reports results" - print " help" - print " - display this help" - print - - sys.exit(0) - -#@-node:usage -#@+node:err -def err(msg): - sys.stderr.write(msg+"\n") -#@-node:err -#@+node:main -def main(): - """ - Command line interface - """ - global samAddr, clientAddr, logVerbosity, dataDir - - argv = sys.argv - argc = len(argv) - - try: - opts, args = getopt.getopt(sys.argv[1:], - "h?vV:S:C:sd:fl", - ['help', 'version', 'samaddr=', 'clientaddr=', - 'verbosity=', 'status', 'datadir=', 'foreground', - 'shortversion', 'localonly', - ]) - except: - traceback.print_exc(file=sys.stdout) - usage("You entered an invalid option") - - daemonise = True - verbosity = 2 - debug = False - foreground = False - localOnly = False - - for opt, val in opts: - - if opt in ['-h', '-?', '--help']: - usage(True) - - elif opt in ['-v', '--version']: - print "Stasher version %s" % version - sys.exit(0) - - elif opt in ['-V', '--verbosity']: - logVerbosity = int(val) - - elif opt in ['-f', '--foreground']: - foreground = True - - elif opt in ['-S', '--samaddr']: - samAddr = val - - elif opt in ['-C', '--clientaddr']: - clientAddr = val - - elif opt in ['-s', '--status']: - dumpStatus() - - elif opt in ['-d', '--datadir']: - dataDir = val - - elif opt == '--shortversion': - sys.stdout.write("%s" % version) - sys.stdout.flush() - sys.exit(0) - - elif opt in ['-l', '--localonly']: - localOnly = True - - #print "Debug - bailing" - #print repr(opts) - #print repr(args) - #sys.exit(0) - - # Barf if no command given - if len(args) == 0: - err("No command given") - usage(0, 1) - - cmd = args.pop(0) - argc = len(args) - - #print "cmd=%s, args=%s" % (repr(cmd), repr(args)) - - if cmd not in ['help', '_start', 'start', 'stop', - 'hello', 'get', 'put', 'addref', 'getref', - 'pingall']: - err("Illegal command '%s'" % cmd) - usage(0, 1) - - if cmd == 'help': - usage() - - # dirty hack - if foreground and cmd == 'start': - cmd = '_start' - - # magic undocumented command name - starts node, launches its client server, - # this should only happen if we're spawned from a 'start' command - if cmd == '_start': - if argc not in [0, 1]: - err("start: bad argument count") - usage() - if argc == 0: - nodeName = defaultNodename - else: - nodeName = args[0] - - # create and serve a node - #set_trace() - node = KNode(nodeName) - node.start() - log(3, "Node %s launched, dest = %s" % (node.name, node.dest)) - node.serve() - sys.exit(0) - - if cmd == 'start': - if argc not in [0, 1]: - err("start: bad argument count") - usage() - if argc == 0: - nodeName = defaultNodename - else: - nodeName = args[0] - pidFile = nodePidfile(nodeName) - - if os.path.exists(pidFile): - err(("Stasher node '%s' seems to be already running. If you are\n" % nodeName) - +"absolutely sure it's not running, please remove its pidfile:\n" - +pidFile+"\n") - sys.exit(1) - - # spawn off a node - import stasher - pid = spawnproc(sys.argv[0], "-S", samAddr, "-C", clientAddr, "_start", nodeName) - file(pidFile, "wb").write("%s" % pid) - print "Launched stasher node as pid %s" % pid - print "Pidfile is %s" % pidFile - sys.exit(0) - - if cmd == 'stop': - if argc not in [0, 1]: - err("stop: bad argument count") - usage() - if argc == 0: - nodeName = defaultNodename - else: - nodename = args[0] - - pidFile = nodePidfile(nodeName) - - if not os.path.isfile(pidFile): - err("Stasher node '%s' is not running - cannot kill\n" % nodeName) - sys.exit(1) - - pid = int(file(pidFile, "rb").read()) - try: - killproc(pid) - print "Killed stasher node (pid %s)" % pid - except: - print "Failed to kill node (pid %s)" % pid - os.unlink(pidFile) - sys.exit(0) - - try: - client = KNodeClient() - except: - traceback.print_exc() - err("Node doesn't seem to be up, or reachable on %s" % clientAddr) - return - - - if cmd == 'hello': - err("Node seems fine") - sys.exit(0) - - elif cmd == 'get': - if argc not in [1, 2]: - err("get: bad argument count") - usage() - - key = args[0] - - if argc == 2: - # try to open output file - path = args[1] - try: - outfile = file(path, "wb") - except: - err("Cannot open output file %s" % repr(path)) - usage(0, 1) - else: - outfile = sys.stdout - - if logVerbosity >= 3: - sys.stderr.write("Searching for key - may take up to %s seconds or more\n" % ( - timeout['findData'])) - res = client.get(key, local=localOnly) - if res == None: - err("Failed to retrieve '%s'" % key) - sys.exit(1) - else: - outfile.write(res) - outfile.flush() - outfile.close() - sys.exit(0) - - elif cmd == 'put': - if argc not in [1, 2]: - err("put: bad argument count") - usage() - - key = args[0] - - if argc == 2: - # try to open input file - path = args[1] - try: - infile = file(path, "rb") - except: - err("Cannot open input file %s" % repr(path)) - usage(0, 1) - else: - infile = sys.stdin - - val = infile.read() - if len(val) > maxValueSize: - err("File is too big - please trim to %s" % maxValueSize) - - if logVerbosity >= 3: - sys.stderr.write("Inserting key - may take up to %s seconds\n" % ( - timeout['findNode'] + timeout['store'])) - res = client.put(key, val, local=localOnly) - if res == None: - err("Failed to insert '%s'" % key) - sys.exit(1) - else: - sys.exit(0) - - elif cmd == 'addref': - if argc not in [0, 1]: - err("addref: bad argument count") - usage() - - if argc == 1: - # try to open input file - path = args[0] - try: - infile = file(path, "rb") - except: - err("Cannot open input file %s" % repr(path)) - usage(0, 1) - else: - infile = sys.stdin - - ref = infile.read() - - res = client.addref(ref) - if res == None: - err("Failed to add ref") - sys.exit(1) - else: - sys.exit(0) - - elif cmd == 'getref': - if argc not in [0, 1]: - err("getref: bad argument count") - usage() - - res = client.getref() - - if argc == 1: - # try to open output file - path = args[0] - try: - outfile = file(path, "wb") - except: - err("Cannot open output file %s" % repr(path)) - usage(0, 1) - else: - outfile = sys.stdout - - if res == None: - err("Failed to retrieve node ref") - sys.exit(1) - else: - outfile.write(res) - outfile.flush() - outfile.close() - sys.exit(0) - - elif cmd == 'pingall': - if logVerbosity > 2: - print "Pinging all peers, waiting %s seconds for results" % timeout['ping'] - res = client.pingall() - print res - sys.exit(0) - -#@-node:main -#@-others -#@-node:funcs -#@+node:MAINLINE -#@+others -#@+node:mainline -if __name__ == '__main__': - - main() - -#@-node:mainline -#@-others -#@-node:MAINLINE -#@-others - -#@-node:@file stasher.py -#@-leo diff --git a/apps/streaming/java/build.xml b/apps/streaming/java/build.xml index 568367033..a32ca077c 100644 --- a/apps/streaming/java/build.xml +++ b/apps/streaming/java/build.xml @@ -25,7 +25,14 @@ + + + @@ -33,6 +40,9 @@ + + + diff --git a/apps/streaming/java/src/net/i2p/client/streaming/Connection.java b/apps/streaming/java/src/net/i2p/client/streaming/Connection.java index 100f59606..6b99fdc00 100644 --- a/apps/streaming/java/src/net/i2p/client/streaming/Connection.java +++ b/apps/streaming/java/src/net/i2p/client/streaming/Connection.java @@ -12,7 +12,9 @@ import net.i2p.client.I2PSession; import net.i2p.data.DataHelper; import net.i2p.data.Destination; import net.i2p.util.Log; +import net.i2p.util.SimpleScheduler; import net.i2p.util.SimpleTimer; +import net.i2p.util.SimpleTimer2; /** * Maintain the state controlling a streaming connection between two @@ -45,6 +47,7 @@ public class Connection { private long _congestionWindowEnd; private long _highestAckedThrough; private boolean _isInbound; + private boolean _updatedShareOpts; /** Packet ID (Long) to PacketLocal for sent but unacked packets */ private Map _outboundPackets; private PacketQueue _outboundQueue; @@ -67,6 +70,7 @@ public class Connection { /** how many messages have been resent and not yet ACKed? */ private int _activeResends; private ConEvent _connectionEvent; + private int _randomWait; private long _lifetimeBytesSent; private long _lifetimeBytesReceived; @@ -120,8 +124,10 @@ public class Connection { _activeResends = 0; _resetSentOn = -1; _isInbound = false; + _updatedShareOpts = false; _connectionEvent = new ConEvent(); _hardDisconnected = false; + _randomWait = _context.random().nextInt(10*1000); // just do this once to reduce usage _context.statManager().createRateStat("stream.con.windowSizeAtCongestion", "How large was our send window when we send a dup?", "Stream", new long[] { 60*1000, 10*60*1000, 60*60*1000 }); _context.statManager().createRateStat("stream.chokeSizeBegin", "How many messages were outstanding when we started to choke?", "Stream", new long[] { 60*1000, 10*60*1000, 60*60*1000 }); _context.statManager().createRateStat("stream.chokeSizeEnd", "How many messages were outstanding when we stopped being choked?", "Stream", new long[] { 60*1000, 10*60*1000, 60*60*1000 }); @@ -245,7 +251,7 @@ public class Connection { void sendReset() { if (_disconnectScheduledOn < 0) { _disconnectScheduledOn = _context.clock().now(); - SimpleTimer.getInstance().addEvent(new DisconnectEvent(), DISCONNECT_TIMEOUT); + SimpleScheduler.getInstance().addEvent(new DisconnectEvent(), DISCONNECT_TIMEOUT); } long now = _context.clock().now(); if (_resetSentOn + 10*1000 > now) return; // don't send resets too fast @@ -323,7 +329,8 @@ public class Connection { if (_log.shouldLog(Log.DEBUG)) _log.debug("Resend in " + timeout + " for " + packet, new Exception("Sent by")); - RetransmissionTimer.getInstance().addEvent(new ResendPacketEvent(packet, timeout + _context.clock().now()), timeout); + // schedules itself + ResendPacketEvent rpe = new ResendPacketEvent(packet, timeout); } _context.statManager().getStatLog().addData(Packet.toId(_sendStreamId), "stream.rtt", _options.getRTT(), _options.getWindowSize()); @@ -459,7 +466,7 @@ public class Connection { void resetReceived() { if (_disconnectScheduledOn < 0) { _disconnectScheduledOn = _context.clock().now(); - SimpleTimer.getInstance().addEvent(new DisconnectEvent(), DISCONNECT_TIMEOUT); + SimpleScheduler.getInstance().addEvent(new DisconnectEvent(), DISCONNECT_TIMEOUT); } _resetReceived = true; MessageOutputStream mos = _outputStream; @@ -507,7 +514,7 @@ public class Connection { if (removeFromConMgr) { if (_disconnectScheduledOn < 0) { _disconnectScheduledOn = _context.clock().now(); - SimpleTimer.getInstance().addEvent(new DisconnectEvent(), DISCONNECT_TIMEOUT); + SimpleScheduler.getInstance().addEvent(new DisconnectEvent(), DISCONNECT_TIMEOUT); } } _connected = false; @@ -523,7 +530,7 @@ public class Connection { if (_receiver != null) _receiver.destroy(); if (_activityTimer != null) - SimpleTimer.getInstance().removeEvent(_activityTimer); + _activityTimer.cancel(); //_activityTimer = null; if (_inputStream != null) _inputStream.streamErrorOccurred(new IOException("disconnected!")); @@ -586,6 +593,8 @@ public class Connection { if (_remotePeerSet) throw new RuntimeException("Remote peer already set [" + _remotePeer + ", " + peer + "]"); _remotePeerSet = true; _remotePeer = peer; + // now that we know who the other end is, get the rtt etc. from the cache + _connectionManager.updateOptsFromShare(this); } private boolean _sendStreamIdSet = false; @@ -704,12 +713,18 @@ public class Connection { _closeSentOn = when; if (_disconnectScheduledOn < 0) { _disconnectScheduledOn = _context.clock().now(); - SimpleTimer.getInstance().addEvent(new DisconnectEvent(), DISCONNECT_TIMEOUT); + SimpleScheduler.getInstance().addEvent(new DisconnectEvent(), DISCONNECT_TIMEOUT); } } public long getCloseReceivedOn() { return _closeReceivedOn; } public void setCloseReceivedOn(long when) { _closeReceivedOn = when; } - + + public void updateShareOpts() { + if (_closeSentOn > 0 && !_updatedShareOpts) { + _connectionManager.updateShareOpts(this); + _updatedShareOpts = true; + } + } public void incrementUnackedPacketsReceived() { _unackedPacketsReceived++; } public int getUnackedPacketsReceived() { return _unackedPacketsReceived; } /** how many packets have we sent but not yet received an ACK for? @@ -811,16 +826,21 @@ public class Connection { return; } long howLong = _options.getInactivityTimeout(); - howLong += _context.random().nextInt(30*1000); // randomize it a bit, so both sides don't do it at once + howLong += _randomWait; // randomize it a bit, so both sides don't do it at once if (_log.shouldLog(Log.DEBUG)) _log.debug("Resetting the inactivity timer to " + howLong, new Exception(toString())); // this will get rescheduled, and rescheduled, and rescheduled... - RetransmissionTimer.getInstance().removeEvent(_activityTimer); - RetransmissionTimer.getInstance().addEvent(_activityTimer, howLong); + _activityTimer.reschedule(howLong, false); // use the later of current and previous timeout } - private class ActivityTimer implements SimpleTimer.TimedEvent { + private class ActivityTimer extends SimpleTimer2.TimedEvent { + public ActivityTimer() { + super(RetransmissionTimer.getInstance()); + setFuzz(5*1000); // sloppy timer, don't reschedule unless at least 5s later + } public void timeReached() { + if (_log.shouldLog(Log.DEBUG)) + _log.debug("Fire inactivity timer on " + Connection.this.toString()); // uh, nothing more to do... if (!_connected) { if (_log.shouldLog(Log.DEBUG)) _log.debug("Inactivity timeout reached, but we are already closed"); @@ -830,7 +850,7 @@ public class Connection { long left = getTimeLeft(); if (left > 0) { if (_log.shouldLog(Log.DEBUG)) _log.debug("Inactivity timeout reached, but there is time left (" + left + ")"); - RetransmissionTimer.getInstance().addEvent(ActivityTimer.this, left); + schedule(left); return; } // these are either going to time out or cause further rescheduling @@ -844,24 +864,31 @@ public class Connection { return; } // if one of us can't talk... - if ( (_closeSentOn > 0) || (_closeReceivedOn > 0) ) { - if (_log.shouldLog(Log.DEBUG)) _log.debug("Inactivity timeout reached, but we are closing"); - return; - } + // No - not true - data and acks are still going back and forth. + // Prevent zombie connections by keeping the inactivity timer. + // Not sure why... receiving a close but never sending one? + // If so we can probably re-enable this for _closeSentOn. + // For further investigation... + //if ( (_closeSentOn > 0) || (_closeReceivedOn > 0) ) { + // if (_log.shouldLog(Log.DEBUG)) _log.debug("Inactivity timeout reached, but we are closing"); + // return; + //} if (_log.shouldLog(Log.DEBUG)) _log.debug("Inactivity timeout reached, with action=" + _options.getInactivityAction()); // bugger it, might as well do the hard work now switch (_options.getInactivityAction()) { - case ConnectionOptions.INACTIVITY_ACTION_SEND: - if (_log.shouldLog(Log.WARN)) - _log.warn("Sending some data due to inactivity"); - _receiver.send(null, 0, 0, true); - break; case ConnectionOptions.INACTIVITY_ACTION_NOOP: if (_log.shouldLog(Log.WARN)) _log.warn("Inactivity timer expired, but we aint doin' shit"); break; + case ConnectionOptions.INACTIVITY_ACTION_SEND: + if (_closeSentOn <= 0 && _closeReceivedOn <= 0) { + if (_log.shouldLog(Log.WARN)) + _log.warn("Sending some data due to inactivity"); + _receiver.send(null, 0, 0, true); + break; + } // else fall through case ConnectionOptions.INACTIVITY_ACTION_DISCONNECT: // fall through default: @@ -877,7 +904,9 @@ public class Connection { _inputStream.streamErrorOccurred(new IOException("Inactivity timeout")); _outputStream.streamErrorOccurred(new IOException("Inactivity timeout")); - disconnect(false); + // Clean disconnect if we have already scheduled one + // (generally because we already sent a close) + disconnect(_disconnectScheduledOn >= 0); break; } } @@ -948,9 +977,9 @@ public class Connection { } if (getResetSent()) - buf.append(" reset sent"); + buf.append(" reset sent ").append(DataHelper.formatDuration(_context.clock().now() - getResetSentOn())).append(" ago"); if (getResetReceived()) - buf.append(" reset received"); + buf.append(" reset received ").append(DataHelper.formatDuration(_context.clock().now() - getDisconnectScheduledOn())).append(" ago"); if (getCloseSentOn() > 0) { buf.append(" close sent "); long timeSinceClose = _context.clock().now() - getCloseSentOn(); @@ -958,7 +987,7 @@ public class Connection { buf.append(" ago"); } if (getCloseReceivedOn() > 0) - buf.append(" close received"); + buf.append(" close received ").append(DataHelper.formatDuration(_context.clock().now() - getCloseReceivedOn())).append(" ago"); buf.append(" sent: ").append(1 + _lastSendId); if (_inputStream != null) buf.append(" rcvd: ").append(1 + _inputStream.getHighestBlockId() - missing); @@ -990,21 +1019,23 @@ public class Connection { /** * If we have been explicitly NACKed three times, retransmit the packet even if - * there are other packets in flight. + * there are other packets in flight. 3 takes forever, let's try 2. * */ - static final int FAST_RETRANSMIT_THRESHOLD = 3; + static final int FAST_RETRANSMIT_THRESHOLD = 2; /** * Coordinate the resends of a given packet */ - private class ResendPacketEvent implements SimpleTimer.TimedEvent { + public class ResendPacketEvent extends SimpleTimer2.TimedEvent { private PacketLocal _packet; private long _nextSendTime; - public ResendPacketEvent(PacketLocal packet, long sendTime) { + public ResendPacketEvent(PacketLocal packet, long delay) { + super(RetransmissionTimer.getInstance()); _packet = packet; - _nextSendTime = sendTime; + _nextSendTime = delay + _context.clock().now(); packet.setResendPacketEvent(ResendPacketEvent.this); + schedule(delay); } public long getNextSendTime() { return _nextSendTime; } @@ -1012,6 +1043,10 @@ public class Connection { /** * Retransmit the packet if we need to. * + * ackImmediately() above calls directly in here, so + * we have to use forceReschedule() instead of schedule() below, + * to prevent duplicates in the timer queue. + * * @param penalize true if this retransmission is caused by a timeout, false if we * are just sending this packet instead of an ACK * @return true if the packet was sent, false if it was not @@ -1020,7 +1055,9 @@ public class Connection { if (_packet.getAckTime() > 0) return false; - if (_resetSent || _resetReceived) { + if (_resetSent || _resetReceived || !_connected) { + if(_log.shouldLog(Log.WARN) && (!_resetSent) && (!_resetReceived)) + _log.warn("??? no resets but not connected: " + _packet); // don't think this is possible _packet.cancelled(); return false; } @@ -1044,7 +1081,7 @@ public class Connection { if (_log.shouldLog(Log.INFO)) _log.info("Delaying resend of " + _packet + " as there are " + _activeResends + " active resends already in play"); - RetransmissionTimer.getInstance().addEvent(ResendPacketEvent.this, 1000); + forceReschedule(1000); _nextSendTime = 1000 + _context.clock().now(); return false; } @@ -1104,26 +1141,6 @@ public class Connection { _context.sessionKeyManager().failTags(_remotePeer.getPublicKey()); } - if (numSends - 1 <= _options.getMaxResends()) { - if (_log.shouldLog(Log.INFO)) - _log.info("Resend packet " + _packet + " time " + numSends + - " activeResends: " + _activeResends + - " (wsize " - + newWindowSize + " lifetime " - + (_context.clock().now() - _packet.getCreatedOn()) + "ms)"); - _outboundQueue.enqueue(_packet); - _lastSendTime = _context.clock().now(); - } - - // acked during resending (... or somethin') - if ( (_packet.getAckTime() > 0) && (_packet.getNumSends() > 1) ) { - _activeResends--; - synchronized (_outboundPackets) { - _outboundPackets.notifyAll(); - } - return true; - } - if (numSends - 1 > _options.getMaxResends()) { if (_log.shouldLog(Log.DEBUG)) _log.debug("Too many resends"); @@ -1137,11 +1154,32 @@ public class Connection { long timeout = rto << (numSends-1); if ( (timeout > MAX_RESEND_DELAY) || (timeout <= 0) ) timeout = MAX_RESEND_DELAY; + // set this before enqueue() as it passes it on to the router + _nextSendTime = timeout + _context.clock().now(); + + if (_log.shouldLog(Log.INFO)) + _log.info("Resend packet " + _packet + " time " + numSends + + " activeResends: " + _activeResends + + " (wsize " + + newWindowSize + " lifetime " + + (_context.clock().now() - _packet.getCreatedOn()) + "ms)"); + _outboundQueue.enqueue(_packet); + _lastSendTime = _context.clock().now(); + if (_log.shouldLog(Log.DEBUG)) _log.debug("Scheduling resend in " + timeout + "ms for " + _packet); - RetransmissionTimer.getInstance().addEvent(ResendPacketEvent.this, timeout); - _nextSendTime = timeout + _context.clock().now(); + forceReschedule(timeout); } + + // acked during resending (... or somethin') + if ( (_packet.getAckTime() > 0) && (_packet.getNumSends() > 1) ) { + _activeResends--; + synchronized (_outboundPackets) { + _outboundPackets.notifyAll(); + } + return true; + } + return true; } else { //if (_log.shouldLog(Log.DEBUG)) diff --git a/apps/streaming/java/src/net/i2p/client/streaming/ConnectionHandler.java b/apps/streaming/java/src/net/i2p/client/streaming/ConnectionHandler.java index a189d9a30..37a1f0d7c 100644 --- a/apps/streaming/java/src/net/i2p/client/streaming/ConnectionHandler.java +++ b/apps/streaming/java/src/net/i2p/client/streaming/ConnectionHandler.java @@ -1,34 +1,47 @@ package net.i2p.client.streaming; -import java.net.ConnectException; +import java.util.concurrent.LinkedBlockingQueue; +import java.util.concurrent.TimeUnit; import java.util.ArrayList; import java.util.List; import net.i2p.I2PAppContext; -import net.i2p.util.Clock; import net.i2p.util.Log; +import net.i2p.util.SimpleScheduler; import net.i2p.util.SimpleTimer; /** * Receive new connection attempts + * + * Use a bounded queue to limit the damage from SYN floods, + * router overload, or a slow client + * + * @author zzz modded to use concurrent and bound queue size */ class ConnectionHandler { private I2PAppContext _context; private Log _log; private ConnectionManager _manager; - private List _synQueue; + private LinkedBlockingQueue _synQueue; private boolean _active; private int _acceptTimeout; /** max time after receiveNewSyn() and before the matched accept() */ private static final int DEFAULT_ACCEPT_TIMEOUT = 3*1000; + + /** + * This is both SYNs and subsequent packets, and with an initial window size of 12, + * this is a backlog of 5 to 64 Syns, which seems like plenty for now + * Don't make this too big because the removal by all the TimeoutSyns is O(n**2) - sortof. + */ + private static final int MAX_QUEUE_SIZE = 64; /** Creates a new instance of ConnectionHandler */ public ConnectionHandler(I2PAppContext context, ConnectionManager mgr) { _context = context; _log = context.logManager().getLog(ConnectionHandler.class); _manager = mgr; - _synQueue = new ArrayList(5); + _synQueue = new LinkedBlockingQueue(MAX_QUEUE_SIZE); _active = false; _acceptTimeout = DEFAULT_ACCEPT_TIMEOUT; } @@ -36,9 +49,11 @@ class ConnectionHandler { public void setActive(boolean active) { if (_log.shouldLog(Log.DEBUG)) _log.debug("setActive(" + active + ") called"); - synchronized (_synQueue) { - _active = active; - _synQueue.notifyAll(); // so we break from the accept() + _active = active; + if (!active) { + try { + _synQueue.put(new PoisonPacket()); // so we break from the accept() - waits until space is available + } catch (InterruptedException ie) {} } } public boolean getActive() { return _active; } @@ -46,20 +61,31 @@ class ConnectionHandler { /** * Non-SYN packets with a zero SendStreamID may also be queued here so * that they don't get thrown away while the SYN packet before it is queued. + * + * Additional overload protection may be required here... + * We don't have a 3-way handshake, so the SYN fully opens a connection. + * Does that make us more or less vulnerable to SYN flooding? + * */ public void receiveNewSyn(Packet packet) { if (!_active) { if (_log.shouldLog(Log.WARN)) _log.warn("Dropping new SYN request, as we're not listening"); - sendReset(packet); + if (packet.isFlagSet(Packet.FLAG_SYNCHRONIZE)) + sendReset(packet); return; } if (_log.shouldLog(Log.DEBUG)) _log.debug("Receive new SYN: " + packet + ": timeout in " + _acceptTimeout); - RetransmissionTimer.getInstance().addEvent(new TimeoutSyn(packet), _acceptTimeout); - synchronized (_synQueue) { - _synQueue.add(packet); - _synQueue.notifyAll(); + // also check if expiration of the head is long past for overload detection with peek() ? + boolean success = _synQueue.offer(packet); // fail immediately if full + if (success) { + SimpleScheduler.getInstance().addEvent(new TimeoutSyn(packet), _acceptTimeout); + } else { + if (_log.shouldLog(Log.WARN)) + _log.warn("Dropping new SYN request, as the queue is full"); + if (packet.isFlagSet(Packet.FLAG_SYNCHRONIZE)) + sendReset(packet); } } @@ -85,41 +111,44 @@ class ConnectionHandler { while (true) { if (!_active) { // fail all the ones we had queued up - synchronized (_synQueue) { - for (int i = 0; i < _synQueue.size(); i++) { - Packet packet = (Packet)_synQueue.get(i); - sendReset(packet); - } - _synQueue.clear(); + while(true) { + Packet packet = _synQueue.poll(); // fails immediately if empty + if (packet == null || packet.getOptionalDelay() == PoisonPacket.MAX_DELAY_REQUEST) + break; + sendReset(packet); } return null; } Packet syn = null; - synchronized (_synQueue) { - while ( _active && (_synQueue.size() <= 0) ) { - if (_log.shouldLog(Log.DEBUG)) - _log.debug("Accept("+ timeoutMs+"): active=" + _active + " queue: " - + _synQueue.size()); - if (timeoutMs < 0) { - try { _synQueue.wait(); } catch (InterruptedException ie) {} - } else { - long remaining = expiration - _context.clock().now(); -// BUGFIX -// The specified amount of real time has elapsed, more or less. -// If timeout is zero, however, then real time is not taken into consideration -// and the thread simply waits until notified. - if (remaining < 1) - break; - try { _synQueue.wait(remaining); } catch (InterruptedException ie) {} - } - } - if (_active && _synQueue.size() > 0) { - syn = (Packet)_synQueue.remove(0); + while ( _active && syn == null) { + if (_log.shouldLog(Log.DEBUG)) + _log.debug("Accept("+ timeoutMs+"): active=" + _active + " queue: " + + _synQueue.size()); + if (timeoutMs <= 0) { + try { + syn = _synQueue.take(); // waits forever + } catch (InterruptedException ie) {} + } else { + long remaining = expiration - _context.clock().now(); + // (dont think this applies anymore for LinkedBlockingQueue) + // BUGFIX + // The specified amount of real time has elapsed, more or less. + // If timeout is zero, however, then real time is not taken into consideration + // and the thread simply waits until notified. + if (remaining < 1) + break; + try { + syn = _synQueue.poll(remaining, TimeUnit.MILLISECONDS); // waits the specified time max + } catch (InterruptedException ie) {} + break; } } if (syn != null) { + if (syn.getOptionalDelay() == PoisonPacket.MAX_DELAY_REQUEST) + return null; + // deal with forged / invalid syn packets // Handle both SYN and non-SYN packets in the queue @@ -184,10 +213,7 @@ class ConnectionHandler { } public void timeReached() { - boolean removed = false; - synchronized (_synQueue) { - removed = _synQueue.remove(_synPacket); - } + boolean removed = _synQueue.remove(_synPacket); if (removed) { if (_synPacket.isFlagSet(Packet.FLAG_SYNCHRONIZE)) @@ -201,4 +227,17 @@ class ConnectionHandler { } } } + + /** + * Simple end-of-queue marker. + * The standard class limits the delay to MAX_DELAY_REQUEST so + * an evil user can't use this to shut us down + */ + private static class PoisonPacket extends Packet { + public static final int MAX_DELAY_REQUEST = Packet.MAX_DELAY_REQUEST + 1; + + public PoisonPacket() { + setOptionalDelay(MAX_DELAY_REQUEST); + } + } } diff --git a/apps/streaming/java/src/net/i2p/client/streaming/ConnectionManager.java b/apps/streaming/java/src/net/i2p/client/streaming/ConnectionManager.java index da2b1ab12..7826ba2a8 100644 --- a/apps/streaming/java/src/net/i2p/client/streaming/ConnectionManager.java +++ b/apps/streaming/java/src/net/i2p/client/streaming/ConnectionManager.java @@ -30,6 +30,7 @@ public class ConnectionManager { private PacketQueue _outboundQueue; private SchedulerChooser _schedulerChooser; private ConnectionPacketHandler _conPacketHandler; + private TCBShare _tcbShare; /** Inbound stream ID (Long) to Connection map */ private Map _connectionByInboundId; /** Ping ID (Long) to PingRequest */ @@ -52,6 +53,7 @@ public class ConnectionManager { _connectionHandler = new ConnectionHandler(context, this); _schedulerChooser = new SchedulerChooser(context); _conPacketHandler = new ConnectionPacketHandler(context); + _tcbShare = new TCBShare(context); _session = session; session.setSessionListener(_messageHandler); _outboundQueue = new PacketQueue(context, session, this); @@ -127,6 +129,7 @@ public class ConnectionManager { */ public Connection receiveConnection(Packet synPacket) { Connection con = new Connection(_context, this, _schedulerChooser, _outboundQueue, _conPacketHandler, new ConnectionOptions(_defaultOptions)); + _tcbShare.updateOptsFromShare(con); con.setInbound(); long receiveId = _context.random().nextLong(Packet.MAX_STREAM_ID-1)+1; boolean reject = false; @@ -277,6 +280,8 @@ public class ConnectionManager { public ConnectionHandler getConnectionHandler() { return _connectionHandler; } public I2PSession getSession() { return _session; } public PacketQueue getPacketQueue() { return _outboundQueue; } + public void updateOptsFromShare(Connection con) { _tcbShare.updateOptsFromShare(con); } + public void updateShareOpts(Connection con) { _tcbShare.updateShareOpts(con); } /** * Something b0rked hard, so kill all of our connections without mercy. @@ -292,6 +297,7 @@ public class ConnectionManager { _connectionByInboundId.clear(); _connectionLock.notifyAll(); } + _tcbShare.stop(); } /** diff --git a/apps/streaming/java/src/net/i2p/client/streaming/ConnectionOptions.java b/apps/streaming/java/src/net/i2p/client/streaming/ConnectionOptions.java index 3cc159f98..4363e3f49 100644 --- a/apps/streaming/java/src/net/i2p/client/streaming/ConnectionOptions.java +++ b/apps/streaming/java/src/net/i2p/client/streaming/ConnectionOptions.java @@ -54,9 +54,9 @@ public class ConnectionOptions extends I2PSocketOptionsImpl { public static final String PROP_SLOW_START_GROWTH_RATE_FACTOR = "i2p.streaming.slowStartGrowthRateFactor"; private static final int TREND_COUNT = 3; - static final int INITIAL_WINDOW_SIZE = 12; + static final int INITIAL_WINDOW_SIZE = 6; static final int DEFAULT_MAX_SENDS = 8; - public static final int DEFAULT_INITIAL_RTT = 10*1000; + public static final int DEFAULT_INITIAL_RTT = 8*1000; static final int MIN_WINDOW_SIZE = 1; /** diff --git a/apps/streaming/java/src/net/i2p/client/streaming/ConnectionPacketHandler.java b/apps/streaming/java/src/net/i2p/client/streaming/ConnectionPacketHandler.java index 6a062d4a6..f7b245cb8 100644 --- a/apps/streaming/java/src/net/i2p/client/streaming/ConnectionPacketHandler.java +++ b/apps/streaming/java/src/net/i2p/client/streaming/ConnectionPacketHandler.java @@ -7,6 +7,7 @@ import net.i2p.I2PException; import net.i2p.data.DataHelper; import net.i2p.data.Destination; import net.i2p.util.Log; +import net.i2p.util.SimpleScheduler; import net.i2p.util.SimpleTimer; /** @@ -168,7 +169,7 @@ public class ConnectionPacketHandler { // take note of congestion if (_log.shouldLog(Log.WARN)) _log.warn("congestion.. dup " + packet); - RetransmissionTimer.getInstance().addEvent(new AckDup(con), con.getOptions().getSendAckDelay()); + SimpleScheduler.getInstance().addEvent(new AckDup(con), con.getOptions().getSendAckDelay()); //con.setNextSendTime(_context.clock().now() + con.getOptions().getSendAckDelay()); //fastAck = true; } else { @@ -213,6 +214,10 @@ public class ConnectionPacketHandler { packet.releasePayload(); } + // update the TCB Cache now that we've processed the acks and updated our rtt etc. + if (isNew && packet.isFlagSet(Packet.FLAG_CLOSE) && packet.isFlagSet(Packet.FLAG_SIGNATURE_INCLUDED)) + con.updateShareOpts(); + //if (choke) // con.fastRetransmit(); } diff --git a/apps/streaming/java/src/net/i2p/client/streaming/MessageOutputStream.java b/apps/streaming/java/src/net/i2p/client/streaming/MessageOutputStream.java index 3819f8999..4a810d565 100644 --- a/apps/streaming/java/src/net/i2p/client/streaming/MessageOutputStream.java +++ b/apps/streaming/java/src/net/i2p/client/streaming/MessageOutputStream.java @@ -8,7 +8,7 @@ import net.i2p.I2PAppContext; import net.i2p.data.ByteArray; import net.i2p.util.ByteCache; import net.i2p.util.Log; -import net.i2p.util.SimpleTimer; +import net.i2p.util.SimpleTimer2; /** * A stream that we can shove data into that fires off those bytes @@ -201,13 +201,20 @@ public class MessageOutputStream extends OutputStream { * Flush data that has been enqued but not flushed after a certain * period of inactivity */ - private class Flusher implements SimpleTimer.TimedEvent { + private class Flusher extends SimpleTimer2.TimedEvent { private boolean _enqueued; + public Flusher() { + super(RetransmissionTimer.getInstance()); + } public void enqueue() { // no need to be overly worried about duplicates - it would just // push it further out if (!_enqueued) { - RetransmissionTimer.getInstance().addEvent(_flusher, _passiveFlushDelay); + // Maybe we could just use schedule() here - or even SimpleScheduler - not sure... + // To be safe, use forceReschedule() so we don't get lots of duplicates + // We've seen the queue blow up before, maybe it was this before the rewrite... + // So perhaps it IS wise to be "overly worried" ... + forceReschedule(_passiveFlushDelay); if (_log.shouldLog(Log.DEBUG)) _log.debug("Enqueueing the flusher for " + _passiveFlushDelay + "ms out"); } else { diff --git a/apps/streaming/java/src/net/i2p/client/streaming/Packet.java b/apps/streaming/java/src/net/i2p/client/streaming/Packet.java index 61a7de96d..8fc0f02fa 100644 --- a/apps/streaming/java/src/net/i2p/client/streaming/Packet.java +++ b/apps/streaming/java/src/net/i2p/client/streaming/Packet.java @@ -41,6 +41,8 @@ import net.i2p.util.Log; *
    • {@link #FLAG_DELAY_REQUESTED}: 2 byte integer
    • *
    • {@link #FLAG_MAX_PACKET_SIZE_INCLUDED}: 2 byte integer
    • *
    • {@link #FLAG_PROFILE_INTERACTIVE}: no option data
    • + *
    • {@link #FLAG_ECHO}: no option data
    • + *
    • {@link #FLAG_NO_ACK}: no option data
    • * * *

      If the signature is included, it uses the Destination's DSA key @@ -144,7 +146,7 @@ public class Packet { public static final int FLAG_NO_ACK = (1 << 10); public static final int DEFAULT_MAX_SIZE = 32*1024; - private static final int MAX_DELAY_REQUEST = 65535; + protected static final int MAX_DELAY_REQUEST = 65535; public Packet() { } @@ -530,6 +532,8 @@ public class Packet { public boolean verifySignature(I2PAppContext ctx, Destination from, byte buffer[]) { if (!isFlagSet(FLAG_SIGNATURE_INCLUDED)) return false; if (_optionSignature == null) return false; + // prevent receiveNewSyn() ... !active ... sendReset() ... verifySignature ... NPE + if (from == null) return false; int size = writtenSize(); diff --git a/apps/streaming/java/src/net/i2p/client/streaming/PacketLocal.java b/apps/streaming/java/src/net/i2p/client/streaming/PacketLocal.java index f0288df5f..cbb89e79e 100644 --- a/apps/streaming/java/src/net/i2p/client/streaming/PacketLocal.java +++ b/apps/streaming/java/src/net/i2p/client/streaming/PacketLocal.java @@ -6,7 +6,7 @@ import net.i2p.I2PAppContext; import net.i2p.data.Destination; import net.i2p.data.SessionKey; import net.i2p.util.Log; -import net.i2p.util.SimpleTimer; +import net.i2p.util.SimpleTimer2; /** * coordinate local attributes about a packet - send time, ack time, number of @@ -27,7 +27,7 @@ public class PacketLocal extends Packet implements MessageOutputStream.WriteStat private long _cancelledOn; private volatile int _nackCount; private volatile boolean _retransmitted; - private SimpleTimer.TimedEvent _resendEvent; + private SimpleTimer2.TimedEvent _resendEvent; public PacketLocal(I2PAppContext ctx, Destination to) { this(ctx, to, null); @@ -93,7 +93,7 @@ public class PacketLocal extends Packet implements MessageOutputStream.WriteStat releasePayload(); notifyAll(); } - SimpleTimer.getInstance().removeEvent(_resendEvent); + _resendEvent.cancel(); } public void cancelled() { synchronized (this) { @@ -101,11 +101,11 @@ public class PacketLocal extends Packet implements MessageOutputStream.WriteStat releasePayload(); notifyAll(); } - SimpleTimer.getInstance().removeEvent(_resendEvent); + _resendEvent.cancel(); if (_log.shouldLog(Log.DEBUG)) _log.debug("Cancelled! " + toString(), new Exception("cancelled")); } - public SimpleTimer.TimedEvent getResendEvent() { return _resendEvent; } + public SimpleTimer2.TimedEvent getResendEvent() { return _resendEvent; } /** how long after packet creation was it acked? * @return how long after packet creation the packet was ACKed in ms @@ -122,15 +122,15 @@ public class PacketLocal extends Packet implements MessageOutputStream.WriteStat public void incrementNACKs() { int cnt = ++_nackCount; - SimpleTimer.TimedEvent evt = _resendEvent; + SimpleTimer2.TimedEvent evt = _resendEvent; if ( (cnt >= Connection.FAST_RETRANSMIT_THRESHOLD) && (evt != null) && (!_retransmitted)) { _retransmitted = true; - RetransmissionTimer.getInstance().addEvent(evt, 0); + evt.reschedule(0); } } public int getNACKs() { return _nackCount; } - public void setResendPacketEvent(SimpleTimer.TimedEvent evt) { _resendEvent = evt; } + public void setResendPacketEvent(SimpleTimer2.TimedEvent evt) { _resendEvent = evt; } @Override public StringBuffer formatAsString() { diff --git a/apps/streaming/java/src/net/i2p/client/streaming/PacketQueue.java b/apps/streaming/java/src/net/i2p/client/streaming/PacketQueue.java index 2d22226d3..e91cbdb7d 100644 --- a/apps/streaming/java/src/net/i2p/client/streaming/PacketQueue.java +++ b/apps/streaming/java/src/net/i2p/client/streaming/PacketQueue.java @@ -82,7 +82,24 @@ class PacketQueue { // this should not block! begin = _context.clock().now(); - sent = _session.sendMessage(packet.getTo(), buf, 0, size, keyUsed, tagsSent); + long expires = 0; + Connection.ResendPacketEvent rpe = (Connection.ResendPacketEvent) packet.getResendEvent(); + if (rpe != null) + // we want the router to expire it a little before we do, + // so if we retransmit it will use a new tunnel/lease combo + expires = rpe.getNextSendTime() - 500; + if (expires > 0) + // I2PSessionImpl2 + //sent = _session.sendMessage(packet.getTo(), buf, 0, size, keyUsed, tagsSent, expires); + // I2PSessionMuxedImpl + sent = _session.sendMessage(packet.getTo(), buf, 0, size, keyUsed, tagsSent, expires, + I2PSession.PROTO_STREAMING, I2PSession.PORT_UNSPECIFIED, I2PSession.PORT_UNSPECIFIED); + else + // I2PSessionImpl2 + //sent = _session.sendMessage(packet.getTo(), buf, 0, size, keyUsed, tagsSent, 0); + // I2PSessionMuxedImpl + sent = _session.sendMessage(packet.getTo(), buf, 0, size, keyUsed, tagsSent, + I2PSession.PROTO_STREAMING, I2PSession.PORT_UNSPECIFIED, I2PSession.PORT_UNSPECIFIED); end = _context.clock().now(); if ( (end-begin > 1000) && (_log.shouldLog(Log.WARN)) ) diff --git a/apps/streaming/java/src/net/i2p/client/streaming/RetransmissionTimer.java b/apps/streaming/java/src/net/i2p/client/streaming/RetransmissionTimer.java index c52c373b1..92c4cf1c2 100644 --- a/apps/streaming/java/src/net/i2p/client/streaming/RetransmissionTimer.java +++ b/apps/streaming/java/src/net/i2p/client/streaming/RetransmissionTimer.java @@ -1,12 +1,12 @@ package net.i2p.client.streaming; -import net.i2p.util.SimpleTimer; +import net.i2p.util.SimpleTimer2; /** * */ -public class RetransmissionTimer extends SimpleTimer { +public class RetransmissionTimer extends SimpleTimer2 { private static final RetransmissionTimer _instance = new RetransmissionTimer(); - public static final SimpleTimer getInstance() { return _instance; } + public static final RetransmissionTimer getInstance() { return _instance; } protected RetransmissionTimer() { super("StreamingTimer"); } } diff --git a/apps/streaming/java/src/net/i2p/client/streaming/SchedulerImpl.java b/apps/streaming/java/src/net/i2p/client/streaming/SchedulerImpl.java index 3d29880f0..e02a1b413 100644 --- a/apps/streaming/java/src/net/i2p/client/streaming/SchedulerImpl.java +++ b/apps/streaming/java/src/net/i2p/client/streaming/SchedulerImpl.java @@ -2,7 +2,7 @@ package net.i2p.client.streaming; import net.i2p.I2PAppContext; import net.i2p.util.Log; -import net.i2p.util.SimpleTimer; +import net.i2p.util.SimpleScheduler; /** * Base scheduler @@ -17,6 +17,6 @@ abstract class SchedulerImpl implements TaskScheduler { } protected void reschedule(long msToWait, Connection con) { - SimpleTimer.getInstance().addEvent(con.getConnectionEvent(), msToWait); + SimpleScheduler.getInstance().addEvent(con.getConnectionEvent(), msToWait); } } diff --git a/apps/streaming/java/src/net/i2p/client/streaming/TCBShare.java b/apps/streaming/java/src/net/i2p/client/streaming/TCBShare.java new file mode 100644 index 000000000..7c8df3e3e --- /dev/null +++ b/apps/streaming/java/src/net/i2p/client/streaming/TCBShare.java @@ -0,0 +1,139 @@ +package net.i2p.client.streaming; + +import java.util.Iterator; +import java.util.Map; +import java.util.concurrent.ConcurrentHashMap; + +import net.i2p.I2PAppContext; +import net.i2p.data.Destination; +import net.i2p.util.Log; +import net.i2p.util.SimpleTimer2; + +/** + * Share important TCP Control Block parameters across Connections + * to the same remote peer. + * This is intended for "temporal" sharing at connection open/close time, + * not "ensemble" sharing during a connection. Ref. RFC 2140. + * + * There is a TCB share per ConnectionManager (i.e. per local Destination) + * so that there is no information leakage to other Destinations on the + * same router. + * + */ +public class TCBShare { + private I2PAppContext _context; + private Log _log; + private Map _cache; + private CleanEvent _cleaner; + + private static final long EXPIRE_TIME = 30*60*1000; + private static final long CLEAN_TIME = 10*60*1000; + private static final double RTT_DAMPENING = 0.75; + private static final double WDW_DAMPENING = 0.75; + private static final int MAX_RTT = ((int) Connection.MAX_RESEND_DELAY) / 2; + private static final int MAX_WINDOW_SIZE = Connection.MAX_WINDOW_SIZE / 4; + + public TCBShare(I2PAppContext ctx) { + _context = ctx; + _log = ctx.logManager().getLog(TCBShare.class); + _cache = new ConcurrentHashMap(4); + _cleaner = new CleanEvent(); + _cleaner.schedule(CLEAN_TIME); + } + + public void stop() { + _cleaner.cancel(); + } + + public void updateOptsFromShare(Connection con) { + Destination dest = con.getRemotePeer(); + if (dest == null) + return; + ConnectionOptions opts = con.getOptions(); + if (opts == null) + return; + Entry e = _cache.get(dest); + if (e == null || e.isExpired()) + return; + if (_log.shouldLog(Log.DEBUG)) + _log.debug("From cache: " + + con.getSession().getMyDestination().calculateHash().toBase64().substring(0, 4) + + '-' + + dest.calculateHash().toBase64().substring(0, 4) + + " RTT: " + e.getRTT() + " wdw: " + e.getWindowSize()); + opts.setRTT(e.getRTT()); + opts.setWindowSize(e.getWindowSize()); + } + + public void updateShareOpts(Connection con) { + Destination dest = con.getRemotePeer(); + if (dest == null) + return; + if (con.getAckedPackets() <= 0) + return; + ConnectionOptions opts = con.getOptions(); + if (opts == null) + return; + int old = -1; + int oldw = -1; + Entry e = _cache.get(dest); + if (e == null || e.isExpired()) { + e = new Entry(opts.getRTT(), opts.getWindowSize()); + _cache.put(dest, e); + } else { + old = e.getRTT(); + oldw = e.getWindowSize(); + e.setRTT(opts.getRTT()); + e.setWindowSize(opts.getWindowSize()); + } + if (_log.shouldLog(Log.DEBUG)) + _log.debug("To cache: " + + con.getSession().getMyDestination().calculateHash().toBase64().substring(0, 4) + + '-' + + dest.calculateHash().toBase64().substring(0, 4) + + " old: " + old + " con: " + opts.getRTT() + " new: " + e.getRTT() + + " oldw: " + oldw + " conw: " + opts.getWindowSize() + " neww: " + e.getWindowSize()); + } + + private class Entry { + int _rtt; + int _wdw; + long _updated; + + public Entry(int ms, int wdw) { + _rtt = ms; + _wdw = wdw; + _updated = _context.clock().now(); + } + public int getRTT() { return _rtt; } + public void setRTT(int ms) { + _rtt = (int)(RTT_DAMPENING*_rtt + (1-RTT_DAMPENING)*ms); + if (_rtt > MAX_RTT) + _rtt = MAX_RTT; + _updated = _context.clock().now(); + } + public int getWindowSize() { return _wdw; } + public void setWindowSize(int wdw) { + _wdw = (int)(0.5 + WDW_DAMPENING*_wdw + (1-WDW_DAMPENING)*wdw); + if (_wdw > MAX_WINDOW_SIZE) + _wdw = MAX_WINDOW_SIZE; + _updated = _context.clock().now(); + } + public boolean isExpired() { + return _updated < _context.clock().now() - EXPIRE_TIME; + } + } + + private class CleanEvent extends SimpleTimer2.TimedEvent { + public CleanEvent() { + super(RetransmissionTimer.getInstance()); + } + public void timeReached() { + for (Iterator iter = _cache.keySet().iterator(); iter.hasNext(); ) { + if (_cache.get(iter.next()).isExpired()) + iter.remove(); + } + schedule(CLEAN_TIME); + } + } +} diff --git a/apps/susidns/src/build.xml b/apps/susidns/src/build.xml index f31340954..d3f5f1662 100644 --- a/apps/susidns/src/build.xml +++ b/apps/susidns/src/build.xml @@ -63,12 +63,10 @@ - - diff --git a/apps/susidns/src/jsp/addressbook.jsp b/apps/susidns/src/jsp/addressbook.jsp index 435ce2ec5..d36b91354 100644 --- a/apps/susidns/src/jsp/addressbook.jsp +++ b/apps/susidns/src/jsp/addressbook.jsp @@ -160,7 +160,8 @@

      Add new destination:

      -Hostname: Destination:
      +Hostname: +Destination:

      diff --git a/apps/susimail/build.xml b/apps/susimail/build.xml index 2bdc4f164..abf2a88cf 100644 --- a/apps/susimail/build.xml +++ b/apps/susimail/build.xml @@ -19,7 +19,7 @@ + basedir="src/" excludes="WEB-INF/web.xml LICENSE src/**/*"> diff --git a/apps/syndie/doc/intro.sml b/apps/syndie/doc/intro.sml deleted file mode 100644 index 45f6c8f89..000000000 --- a/apps/syndie/doc/intro.sml +++ /dev/null @@ -1,31 +0,0 @@ -Syndie is a new effort to build a user friendly secure blogging tool, exploiting the capabilities offered by anonymity and security systems such as [link schema="web" location="http://www.i2p.net/"]I2P[/link], [link schema="web" location="http://tor.eff.org/"]TOR[/link], [link schema="web" location="http://www.freenetproject.org/"]Freenet[/link], [link schema="web" location="http://www.mnetproject.org/"]MNet[/link], and others. Abstracting away the content distribution side, Syndie allows people to [b]build content and communities[/b] that span technologies rather than tying oneself down to the ups and downs of any particular network. - -[cut][/cut]Syndie is working to take the technologies of the security, anonymity, and cryptography worlds and merge them with the simplicity and user focus of the blogging world. From the user's standpoint, you could perhaps view Syndie as a distributed [link schema="web" location="http://www.livejournal.com"]LiveJournal[/link], while technically Syndie is much, much simpler. - -[b]How Syndie works[/b][hr][/hr]The [i]magic[/i] behind Syndie's abstraction is to ignore any content distribution issues and merely assume data moves around as necessary. Each Syndie instance runs agains the filesystem, verifying and indexing blogs and offering up what it knows to the user through a web interface. The core idea in Syndie, therefore, is the [b]archive[/b]- a collection of blogs categorized and ready for consumption. - -Whenever someone reads or posts to a Syndie instance, it is working with the [b]local archive[/b]. However, as Syndie's development progresses, people will be able to read [b]remote archives[/b] - pulling the archive summary from an I2P [i]eepsite[/i], TOR [i]hosted service[/i], Freenet [i]Freesite[/i], MNet [i]key[/i], or (with a little less glamor) usenet, filesharing apps, or the web. The first thing Syndie needs to use a remote archive is the archive's index - a plain text file summarizing what the archive contains ([attachment id="0"]an example[/attachment]). From that, Syndie will let the user browse through the blogs, pulling the individual blog posts into the local archive when necessary. - -[b]Posting[/b][hr][/hr]Creating and posting to blogs with Syndie is trivial - simply log in to Syndie, click on the [i]Post[/i] button, and fill out the form offered. Syndie handles all of the encryption and formatting details - packaging up the post with any attached files into a single signed, compressed, and potentially encrypted bundle, storing it in the local archive and capable of being shared with other Syndie users. Every blog is identified by its public key behind the scenes, so there is no need for a central authority to require that your blogs are all named uniquely or any other such thing. - -While each blog is run by a single author, they can in turn allow other authors to post to the blog while still letting readers know that the post is authorized (though created by a different author). Of course, if multiple people wanted to run a single blog and make it look like only one person wrote it, they could share the blog's private keys. - -[b]Tags[/b][hr][/hr]Following the lessons from the last few years, every Syndie entry has any number of tags associated with it by the author, allowing trivial categorization and filtering. - -[b]Hosting[/b][hr][/hr]While in many scenarios it is best for people to run Syndie locally on their machine, Syndie is a fully multiuser system so anyone can be a Syndie hosting provider by simply exposing the web interface to the public. The Syndie host's operator can password protect the blog registration interface so only authorized people can create a blog, and the operator can technically go through and delete blog posts or even entire blogs from their local archive. A public Syndie host can be a general purpose blog repository, letting anyone sign up (following the blogger and geocities path), be a more community oriented blog repository, requiring people to introduce you to the host to sign up (following the livejournal/orkut path), be a more focused blog repository, requiring posts to stay within certain guidelines (following the indymedia path), or even to fit specialized needs by picking and choosing among the best blogs and posts out there, offering the operator's editorial flare into a comprehensive collection. - -[b]Syndication[/b][hr][/hr]By itself, Syndie is a nice blogging community system, but its real strength as a tool for individual and community empowerment comes when blogs are shared. While Syndie does not aim to be a content distribution network, it does want to exploit them to allow those who require their message to get out to do so. By design, syndicating Syndie can be done with some of the most basic tools - simply pass around the self authenticating files written to the archive and you're done. The archive itself is organized so that you can expose it as an indexed directory in some webserver and let people wget against it, picking to pull individual posts, all posts within a blog, all posts since a given date, or all posts in all blogs. With a very small shell script, you could parse the plain text archive summary to pull posts by size and tag as well. People could offer up their archives as rsync repositories or package up tarballs/zipfiles of blogs or entries - simply grabbing them and extracting them into your local Syndie archive would instantly give you access to all of the content therein. - -Of course, manual syndication as described above has... limits. When appropriate, Syndie will tie in to content syndication systems such as [link schema="eep" location="http://feedspace.i2p/"]Feedspace[/link] (or even good ol' Usenet) to automatically import (and export) posts. Integration with content distribution networks like Freenet and MNet will allow the user to periodically grab a published archive index and pull down blogs as necessary. Posting archives and blogs to those networks will be done trivially as well, though they do still depend upon a polling paradigm. - -[b]SML[/b][hr][/hr]Syndie is meant to work securely with any browser regardless of the browser's security. Blog entries are written in [b]SML[/b] [i](Syndie or Secure Markup Language)[/i] with a bbcode-linke syntax, extended to exploit some of Syndie's capabilities and context. In addition to the SML content in a blog entry, there can be any number of attachments, references to other blogs/posts/tags, nym<->public key mappings (useful for I2P host distribution), references to archives of blogs (on eepsites, freesites, etc), links to various resources, and more. - -[b]Future[/b][hr][/hr]Down the road, there are lots of things to improve with Syndie. The interface, of course, is critical, as are tools for SML authoring and improvements to SML itself to offer a more engaging user experience. Integration with a search engine like Lucene would allow full text search through entire archives, and Atom/RSS interfaces would allow trivial import and export to existing clients. Even further, blogs could be transparently encrypted, allowing only authorized users (those with the key) to read entries posted to them (or even know what attachments are included). Integration with existing blogging services (such as [link schema="web" location="http://www.anonyblog.com"]anonyblog[/link], [link schema="web" location="http://blo.gs"]blo.gs[/link], and [link schema="web" location="http://livejournal.com"]livejournal[/link]) may also be explored. Of course, bundling with I2P and other anonymity, security, and community systems will be pursued. - -[b]Who/where/when/why[/b][hr][/hr]The base Syndie system was written in a few days by [blog name="jrandom" bloghash="ovpBy2mpO1CQ7deYhQ1cDGAwI6pQzLbWOm1Sdd0W06c=" archive0="eep://dev.i2p/~jrandom" archive1="http://dev.i2p.net/~jrandom" archive2="mailto://jrandom@i2p.net"][/blog], though comes out of discussions with [link schema="eep" location="http://frosk.i2p"]Frosk[/link] and many others in the I2P community. Yes, this is an incarnation of [b]MyI2P[/b] (or for those who remember jrand0m's flog, [b]Flogger[/b]). - -All of the Syndie code is of course open source and released into the public domain (the [i]real[/i] "free as in freedom"), though it does use some BSD licensed cryptographic routines and an Apache licensed file upload component. Contributions of code are very much welcome - the source is located within the [link schema="web" location="http://www.i2p.net/cvs"]I2P codebase[/link]. Of course, those who cannot or choose not to contribute code are encouraged to [b]use[/b] Syndie - create a blog, create some content, read some content! For those who really want to though, financial contributions to the Syndie development effort can be channeled through the [link schema="web" location="http://www.i2p.net/donate"]I2P fund[/link] (donations for Syndie are distributed to Syndie developers from time to time). - -The "why" of Syndie is a much bigger question, though is hopefully self-evident. We need kickass anonymity-aware client applications so that we can get better anonymity (since without kickass clients, we don't have many users). We also need kickass tools for safe blogging, since there are limits to the strength offered by low latency anonymity systems like I2P and TOR - Syndie goes beyond them to offer an interface to mid and high latency anonymous systems while exploiting their capabilities for fast and efficient syndication. - -Oh, and jrandom also lost his blog's private key, so needed something to blog with again. diff --git a/apps/syndie/doc/readme-standalone.txt b/apps/syndie/doc/readme-standalone.txt deleted file mode 100644 index 534612e4d..000000000 --- a/apps/syndie/doc/readme-standalone.txt +++ /dev/null @@ -1,3 +0,0 @@ -To run Syndie, fire it up with "java -jar launch-syndie.jar" (or, on windows, -to run without the dos box, "javaw -jar launch-syndie.jar"). For further -information, swing to http://localhost:8001/ \ No newline at end of file diff --git a/apps/syndie/doc/readme.txt b/apps/syndie/doc/readme.txt deleted file mode 100644 index 9472dae5d..000000000 --- a/apps/syndie/doc/readme.txt +++ /dev/null @@ -1,27 +0,0 @@ -To install this base instance: - - mkdir lib - cp ../lib/i2p.jar lib/ - cp ../lib/commons-el.jar lib/ - cp ../lib/commons-logging.jar lib/ - cp ../lib/jasper-compiler.jar lib/ - cp ../lib/jasper-runtime.jar lib/ - cp ../lib/javax.servlet.jar lib/ - cp ../lib/jbigi.jar lib/ - cp ../lib/org.mortbay.jetty.jar lib/ - cp ../lib/xercesImpl.jar lib/ - -To run it: - sh run.sh - firefox http://localhost:7653/syndie/ - -You can share your archive at http://localhost:7653/ so -that people can syndicate off you via - cd archive ; wget -m -nH http://yourmachine:7653/ - -You may want to add a password on the registration form -so that you have control over who can create blogs via /syndie/. -To do so, set the password in the run.sh script. - -Windows users: -write your own instructions. We're alpha, here ;) diff --git a/apps/syndie/doc/sml.sml b/apps/syndie/doc/sml.sml deleted file mode 100644 index 327d321fc..000000000 --- a/apps/syndie/doc/sml.sml +++ /dev/null @@ -1,41 +0,0 @@ -[cut]A brief glance at SML[/cut] -[b]General rules[/b] - -Newlines are newlines are newlines. If you include a newline in your SML, you'll get a newline in the rendered HTML. - -All < and > characters are replaced by their HTML entity counterparts. - -All SML tags are enclosed with [[ and ]] (e.g. [[b]]bold stuff[[/b]]). ([[ and ]] characters are quoted by [[[[ and ]]]], respectively) - -Nesting SML tags is [b]not[/b] currently supported (though will be at a later date). - -All SML tags must have a beginning and end tag (even for ones without any 'body', such as [[hr]][[/hr]]). This restriction may be removed later. - -Simple formatting tags behave as expected: [[b]], [[i]], [[u]], [[h1]] through [[h5]], [[hr]], [[pre]]. -[hr][/hr] -[b]Tag details[/b] - -* To cut an entry so that the summary is before while the details are afterwards: -[[cut]]more inside...[[/cut]] - -* To load an attachment as an image with "syndie's logo" as the alternate text: -[[img attachment="0"]]syndie's logo[[/img]] - -* To add a download link to an attachment: -[[attachment id="0"]]anchor text[[/img]] - -* To quote someone: -[[quote author="who you are quoting" location="blog://ovpBy2mpO1CQ7deYhQ1cDGAwI6pQzLbWOm1Sdd0W06c=/1234567890"]]stuff they said[[/quote]] - -* To sample some code: -[[code location="eep://dev.i2p/cgi-bin/cvsweb.cgi/i2p/index.html"]][[/code]] - -* To link to a [blog name="jrandom" bloghash="ovpBy2mpO1CQ7deYhQ1cDGAwI6pQzLbWOm1Sdd0W06c=" blogentry="1124402137773" archive0="eep://dev.i2p/~jrandom/archive" archive1="irc2p://jrandom@irc.postman.i2p/#i2p"]bitchin' blog[/blog]: -[[blog name="the blogs name" bloghash="ovpBy2mpO1CQ7deYhQ1cDGAwI6pQzLbWOm1Sdd0W06c=" blogtag="tag" blogentry="123456789" archive0="eep://dev.i2p/~jrandom/archive/" archive1="freenet://SSK@blah/archive//"]]description of the blog[[/blog]]. blogentry and blogtag are optional and there can be any number of archiveN locations specified. - -* To link to an [link schema="eep" location="http://dev.i2p/"]external resource[/link]: -[[link schema="eep" location="http://dev.i2p/"]]link to it[[/link]]. -[i]The schema should be a network selection tool, such as "eep" for an eepsite, "tor" for a tor hidden service, "web" for a normal website, "freenet" for a freenet key, etc. The local user's Syndie configuration should include information necessary for the user to access the content referenced through the given schemas.[/i] - -* To pass an [address name="dev.i2p" schema="eep" location="NF2RLVUxVulR3IqK0sGJR0dHQcGXAzwa6rEO4WAWYXOHw-DoZhKnlbf1nzHXwMEJoex5nFTyiNMqxJMWlY54cvU~UenZdkyQQeUSBZXyuSweflUXFqKN-y8xIoK2w9Ylq1k8IcrAFDsITyOzjUKoOPfVq34rKNDo7fYyis4kT5bAHy~2N1EVMs34pi2RFabATIOBk38Qhab57Umpa6yEoE~rbyR~suDRvD7gjBvBiIKFqhFueXsR2uSrPB-yzwAGofTXuklofK3DdKspciclTVzqbDjsk5UXfu2nTrC1agkhLyqlOfjhyqC~t1IXm-Vs2o7911k7KKLGjB4lmH508YJ7G9fLAUyjuB-wwwhejoWqvg7oWvqo4oIok8LG6ECR71C3dzCvIjY2QcrhoaazA9G4zcGMm6NKND-H4XY6tUWhpB~5GefB3YczOqMbHq4wi0O9MzBFrOJEOs3X4hwboKWANf7DT5PZKJZ5KorQPsYRSq0E3wSOsFCSsdVCKUGsAAAA"]addressbook entry[/address]: -[[address name="dev.i2p" schema="eep" location="NF2...AAAA"]]add it[[/address]]. diff --git a/apps/syndie/java/build.xml b/apps/syndie/java/build.xml deleted file mode 100644 index 7d0bdc456..000000000 --- a/apps/syndie/java/build.xml +++ /dev/null @@ -1,172 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - diff --git a/apps/syndie/java/src/net/i2p/syndie/Archive.java b/apps/syndie/java/src/net/i2p/syndie/Archive.java deleted file mode 100644 index d44d3dfd0..000000000 --- a/apps/syndie/java/src/net/i2p/syndie/Archive.java +++ /dev/null @@ -1,495 +0,0 @@ -package net.i2p.syndie; - -import java.io.ByteArrayOutputStream; -import java.io.File; -import java.io.FileInputStream; -import java.io.FileOutputStream; -import java.io.FilenameFilter; -import java.io.IOException; -import java.text.ParseException; -import java.text.SimpleDateFormat; -import java.util.ArrayList; -import java.util.Date; -import java.util.HashMap; -import java.util.List; -import java.util.Locale; -import java.util.Map; - -import net.i2p.I2PAppContext; -import net.i2p.data.Base64; -import net.i2p.data.DataHelper; -import net.i2p.data.Hash; -import net.i2p.data.SessionKey; -import net.i2p.syndie.data.ArchiveIndex; -import net.i2p.syndie.data.BlogInfo; -import net.i2p.syndie.data.BlogURI; -import net.i2p.syndie.data.EntryContainer; -import net.i2p.util.FileUtil; -import net.i2p.util.Log; - -/** - * Store blog info in the local filesystem. - * - * Entries are stored under: - * $rootDir/$h(blogKey)/$entryId.snd (the index lists them as YYYYMMDD_n_jKB) - * Blog info is stored under: - * $rootDir/$h(blogKey)/meta.snm - * Archive summary is stored under - * $rootDir/archive.txt - * Any key=value pairs in - * $rootDir/archiveHeaders.txt - * are injected into the archive.txt on regeneration. - * - * When entries are loaded for extraction/verification/etc, their contents are written to - * $cacheDir/$h(blogKey)/$entryId/ (e.g. $cacheDir/$h(blogKey)/$entryId/entry.sml) - */ -public class Archive { - private I2PAppContext _context; - private Log _log; - private BlogManager _mgr; - private File _rootDir; - private File _cacheDir; - private Map _blogInfo; - private ArchiveIndex _index; - private EntryExtractor _extractor; - private String _defaultSelector; - - public static final String METADATA_FILE = "meta.snm"; - public static final String INDEX_FILE = "archive.txt"; - public static final String HEADER_FILE = "archiveHeaders.txt"; - - private static final FilenameFilter _entryFilenameFilter = new FilenameFilter() { - public boolean accept(File dir, String name) { return name.endsWith(".snd"); } - }; - - public Archive(I2PAppContext ctx, String rootDir, String cacheDir, BlogManager mgr) { - _context = ctx; - _log = ctx.logManager().getLog(Archive.class); - _mgr = mgr; - _rootDir = new File(rootDir); - if (!_rootDir.exists()) - _rootDir.mkdirs(); - _cacheDir = new File(cacheDir); - if (!_cacheDir.exists()) - _cacheDir.mkdirs(); - _blogInfo = new HashMap(); - _index = null; - _extractor = new EntryExtractor(ctx); - _defaultSelector = ctx.getProperty("syndie.defaultSelector"); - if (_defaultSelector == null) _defaultSelector = ""; - reloadInfo(); - } - - public void reloadInfo() { - File f[] = _rootDir.listFiles(); - List info = new ArrayList(); - for (int i = 0; i < f.length; i++) { - if (f[i].isDirectory()) { - File meta = new File(f[i], METADATA_FILE); - if (meta.exists()) { - BlogInfo bi = new BlogInfo(); - FileInputStream fi = null; - try { - fi = new FileInputStream(meta); - bi.load(fi); - if (_mgr.isBanned(bi.getKey().calculateHash())) { - fi.close(); - fi = null; - _log.error("Deleting banned blog " + bi.getKey().calculateHash().toBase64()); - delete(bi.getKey().calculateHash()); - continue; - } - if (bi.verify(_context)) { - info.add(bi); - } else { - _log.error("BlogInfo is invalid: " + bi); - meta.delete(); - } - } catch (IOException ioe) { - _log.error("Error loading the blog", ioe); - } finally { - if (fi != null) try { fi.close(); } catch (IOException ioe) {} - } - } - } - } - - synchronized (_blogInfo) { - _blogInfo.clear(); - for (int i = 0; i < info.size(); i++) { - BlogInfo bi = (BlogInfo)info.get(i); - _blogInfo.put(bi.getKey().calculateHash(), bi); - } - } - } - - public String getDefaultSelector() { return _defaultSelector; } - public void setDefaultSelector(String sel) { - if (sel == null) - _defaultSelector = ""; - else - _defaultSelector = sel; - } - - public BlogInfo getBlogInfo(BlogURI uri) { - if (uri == null) return null; - synchronized (_blogInfo) { - return (BlogInfo)_blogInfo.get(uri.getKeyHash()); - } - } - public BlogInfo getBlogInfo(Hash key) { - synchronized (_blogInfo) { - return (BlogInfo)_blogInfo.get(key); - } - } - public boolean storeBlogInfo(BlogInfo info) { - if (!info.verify(_context)) { - _log.warn("Not storing invalid blog " + info); - return false; - } - - if (_mgr.isBanned(info.getKey().calculateHash())) { - _log.error("Not storing banned blog " + info.getKey().calculateHash().toBase64(), new Exception("Stored by")); - return false; - } - - boolean isNew = true; - synchronized (_blogInfo) { - BlogInfo old = (BlogInfo)_blogInfo.get(info.getKey().calculateHash()); - if ( (old == null) || (old.getEdition() < info.getEdition()) ) - _blogInfo.put(info.getKey().calculateHash(), info); - else - isNew = false; - } - if (!isNew) return true; // valid entry, but not stored, since its old - try { - File blogDir = new File(_rootDir, info.getKey().calculateHash().toBase64()); - blogDir.mkdirs(); - File blogFile = new File(blogDir, "meta.snm"); - FileOutputStream out = new FileOutputStream(blogFile); - info.write(out); - out.close(); - if (_log.shouldLog(Log.DEBUG)) - _log.debug("Blog info written to " + blogFile.getPath()); - return true; - } catch (IOException ioe) { - _log.error("Error writing out info", ioe); - return false; - } - } - - public List listBlogs() { - synchronized (_blogInfo) { - return new ArrayList(_blogInfo.values()); - } - } - - private File getEntryDir(File entryFile) { - String name = entryFile.getName(); - if (!name.endsWith(".snd")) throw new RuntimeException("hmm, why are we trying to get an entry dir for " + entryFile.getAbsolutePath()); - String blog = entryFile.getParentFile().getName(); - File blogDir = new File(_cacheDir, blog); - return new File(blogDir, name.substring(0, name.length()-4)); - //return new File(entryFile.getParentFile(), "." + name.substring(0, name.length()-4)); - } - - /** - * Expensive operation, reading all entries within the blog and parsing out the tags. - * Whenever possible, query the index instead of the archive - * - */ - public List listTags(Hash blogKeyHash) { - List rv = new ArrayList(); - BlogInfo info = getBlogInfo(blogKeyHash); - if (info == null) - return rv; - - File blogDir = new File(_rootDir, Base64.encode(blogKeyHash.getData())); - File entries[] = blogDir.listFiles(_entryFilenameFilter); - for (int j = 0; j < entries.length; j++) { - try { - File entryDir = getEntryDir(entries[j]); - EntryContainer entry = null; - if (entryDir.exists()) - entry = getCachedEntry(entryDir); - if ( (entry == null) || (!entryDir.exists()) ) { - if (!extractEntry(entries[j], entryDir, info)) { - if (_log.shouldLog(Log.ERROR)) - _log.error("Entry " + entries[j].getPath() + " is not valid"); - entries[j].delete(); - continue; - } - entry = getCachedEntry(entryDir); - } - String tags[] = entry.getTags(); - for (int t = 0; t < tags.length; t++) { - if (!rv.contains(tags[t])) { - if (_log.shouldLog(Log.DEBUG)) - _log.debug("Found a new tag in cached " + entry.getURI() + ": " + tags[t]); - rv.add(tags[t]); - } - } - } catch (IOException ioe) { - _log.error("Error listing tags", ioe); - } - } // end iterating over the entries - - return rv; - } - - /** - * Extract the entry to the given dir, returning true if it was verified properly - * - */ - private boolean extractEntry(File entryFile, File entryDir, BlogInfo info) throws IOException { - if (!entryDir.exists()) - entryDir.mkdirs(); - - boolean ok = true; - try { - ok = _extractor.extract(entryFile, entryDir, null, info); - } catch (IOException ioe) { - ok = false; - _log.error("Error extracting " + entryFile.getPath() + ", deleting it", ioe); - } - if (!ok) { - File files[] = entryDir.listFiles(); - for (int i = 0; i < files.length; i++) - files[i].delete(); - entryDir.delete(); - } - return ok; - } - - private EntryContainer getCachedEntry(File entryDir) { - try { - CachedEntry ce = new CachedEntry(entryDir); - if (ce.isValid()) - return ce; - return null; - } catch (IOException ioe) { - _log.warn("Error reading cached entry... deleting cache elements"); - } - - File files[] = entryDir.listFiles(); - for (int i = 0; i < files.length; i++) - files[i].delete(); - entryDir.delete(); - return null; - } - - public EntryContainer getEntry(BlogURI uri) { return getEntry(uri, null); } - public EntryContainer getEntry(BlogURI uri, SessionKey blogKey) { - List entries = listEntries(uri, null, blogKey); - if (entries.size() > 0) - return (EntryContainer)entries.get(0); - else - return null; - } - - public List listEntries(BlogURI uri, String tag, SessionKey blogKey) { - if (uri == null) return new ArrayList(); - return listEntries(uri.getKeyHash(), uri.getEntryId(), tag, blogKey); - } - public List listEntries(Hash blog, long entryId, String tag, SessionKey blogKey) { - List rv = new ArrayList(); - BlogInfo info = getBlogInfo(blog); - if (info == null) - return rv; - - File blogDir = new File(_rootDir, blog.toBase64()); - File entries[] = blogDir.listFiles(_entryFilenameFilter); - if (entries == null) - return rv; - for (int i = 0; i < entries.length; i++) { - try { - EntryContainer entry = null; - if (blogKey == null) { - // no key, cache. - File entryDir = getEntryDir(entries[i]); - if (entryDir.exists()) { - entry = getCachedEntry(entryDir); - } - if ((entry == null) || !entryDir.exists()) { - if (!extractEntry(entries[i], entryDir, info)) { - _log.error("Entry " + entries[i].getPath() + " is not valid"); - entries[i].delete(); - continue; - } - entry = getCachedEntry(entryDir); - } - } else { - // we have an explicit key - no caching - entry = new EntryContainer(); - FileInputStream fi = null; - try { - fi = new FileInputStream(entries[i]); - entry.load(fi); - } finally { - if (fi != null) try { fi.close(); } catch (IOException ioe) {} - } - boolean ok = entry.verifySignature(_context, info); - if (!ok) { - _log.error("Keyed entry " + entries[i].getPath() + " is not valid"); - entries[i].delete(); - continue; - } - - entry.parseRawData(_context, blogKey); - - entry.setCompleteSize((int)entries[i].length()); - } - - if (entryId >= 0) { - if (entry.getURI().getEntryId() == entryId) { - rv.add(entry); - return rv; - } - } else if (tag != null) { - String tags[] = entry.getTags(); - for (int j = 0; j < tags.length; j++) { - if (tags[j].equals(tag)) { - rv.add(entry); - if (_log.shouldLog(Log.DEBUG)) - _log.debug("cached entry matched requested tag [" + tag + "]: " + entry.getURI()); - break; - } - } - } else { - if (_log.shouldLog(Log.DEBUG)) - _log.debug("cached entry is ok and no id or tag was requested: " + entry.getURI()); - rv.add(entry); - } - } catch (IOException ioe) { - _log.error("Error listing entries", ioe); - } - } - return rv; - } - - public synchronized void delete(Hash blog) { - if (blog == null) return; - File blogDir = new File(_rootDir, blog.toBase64()); - boolean deleted = FileUtil.rmdir(blogDir, false); - File cacheDir = new File(_cacheDir, blog.toBase64()); - deleted = FileUtil.rmdir(cacheDir, false) && deleted; - _log.info("Deleted blog " + blog.toBase64() + " completely? " + deleted); - } - - public boolean storeEntry(EntryContainer container) { - if (container == null) return false; - BlogURI uri = container.getURI(); - if (uri == null) return false; - - File blogDir = new File(_rootDir, uri.getKeyHash().toBase64()); - blogDir.mkdirs(); - File entryFile = new File(blogDir, getEntryFilename(uri.getEntryId())); - if (entryFile.exists()) return true; - - - BlogInfo info = getBlogInfo(uri); - if (info == null) { - _log.error("no blog metadata for the uri " + uri); - return false; - } - if (!container.verifySignature(_context, info)) { - _log.error("Not storing the invalid blog entry at " + uri); - return false; - } else { - //System.out.println("Signature is valid: " + container.getSignature() + " for info " + info); - } - try { - ByteArrayOutputStream baos = new ByteArrayOutputStream(); - container.write(baos, true); - byte data[] = baos.toByteArray(); - FileOutputStream out = new FileOutputStream(entryFile); - out.write(data); - out.close(); - container.setCompleteSize(data.length); - return true; - } catch (IOException ioe) { - _log.error("Error storing", ioe); - return false; - } - } - - public static String getEntryFilename(long entryId) { return entryId + ".snd"; } - - private static SimpleDateFormat _dateFmt = new SimpleDateFormat("yyyyMMdd", Locale.UK); - public static String getIndexName(long entryId, int numBytes) { - try { - synchronized (_dateFmt) { - String yy = _dateFmt.format(new Date(entryId)); - long begin = _dateFmt.parse(yy).getTime(); - long n = entryId - begin; - int kb = numBytes / 1024; - return yy + '_' + n + '_' + kb + "KB"; - } - } catch (NumberFormatException nfe) { - nfe.printStackTrace(); - return "UNKNOWN"; - } catch (ParseException pe) { - pe.printStackTrace(); - return "UNKNOWN"; - } - } - - public static long getEntryIdFromIndexName(String entryIndexName) { - if (entryIndexName == null) return -1; - if (entryIndexName.endsWith(".snd")) - entryIndexName = entryIndexName.substring(0, entryIndexName.length() - 4); - int endYY = entryIndexName.indexOf('_'); - if (endYY <= 0) return -1; - int endN = entryIndexName.indexOf('_', endYY+1); - if (endN <= 0) return -1; - String yy = entryIndexName.substring(0, endYY); - String n = entryIndexName.substring(endYY+1, endN); - try { - synchronized (_dateFmt) { - long dayBegin = _dateFmt.parse(yy).getTime(); - long dayEntry = Long.parseLong(n); - return dayBegin + dayEntry; - } - } catch (NumberFormatException nfe) { - nfe.printStackTrace(); - } catch (ParseException pe) { - pe.printStackTrace(); - } - return -1; - } - public static int getSizeFromIndexName(String entryIndexName) { - if (entryIndexName == null) return -1; - if (entryIndexName.endsWith(".snd")) - entryIndexName = entryIndexName.substring(0, entryIndexName.length() - 4); - int beginSize = entryIndexName.lastIndexOf('_'); - if ( (beginSize <= 0) || (beginSize >= entryIndexName.length()-3) ) - return -1; - try { - String sz = entryIndexName.substring(beginSize+1, entryIndexName.length()-2); - return Integer.parseInt(sz); - } catch (NumberFormatException nfe) { - nfe.printStackTrace(); - } - return -1; - } - - public ArchiveIndex getIndex() { - if (_index == null) - regenerateIndex(); - return _index; - } - - public File getArchiveDir() { return _rootDir; } - public File getIndexFile() { return new File(_rootDir, INDEX_FILE); } - public void regenerateIndex() { - reloadInfo(); - _index = ArchiveIndexer.index(_context, this); - try { - FileOutputStream out = new FileOutputStream(new File(_rootDir, INDEX_FILE)); - out.write(DataHelper.getUTF8(_index.toString())); - out.flush(); - } catch (IOException ioe) { - _log.error("Error writing out the index"); - } - } -} diff --git a/apps/syndie/java/src/net/i2p/syndie/ArchiveIndexer.java b/apps/syndie/java/src/net/i2p/syndie/ArchiveIndexer.java deleted file mode 100644 index 9a39f70a0..000000000 --- a/apps/syndie/java/src/net/i2p/syndie/ArchiveIndexer.java +++ /dev/null @@ -1,228 +0,0 @@ -package net.i2p.syndie; - -import java.io.BufferedReader; -import java.io.File; -import java.io.FileInputStream; -import java.io.IOException; -import java.io.InputStreamReader; -import java.util.Iterator; -import java.util.List; -import java.util.Map; -import java.util.Properties; -import java.util.StringTokenizer; -import java.util.TreeMap; - -import net.i2p.I2PAppContext; -import net.i2p.data.Base64; -import net.i2p.data.Hash; -import net.i2p.syndie.data.ArchiveIndex; -import net.i2p.syndie.data.BlogInfo; -import net.i2p.syndie.data.BlogURI; -import net.i2p.syndie.data.EntryContainer; -import net.i2p.syndie.data.LocalArchiveIndex; -import net.i2p.syndie.sml.HTMLRenderer; -import net.i2p.syndie.sml.SMLParser; -import net.i2p.util.Log; - -/** - * Dig through the archive to build an index - */ -class ArchiveIndexer { - private static final int RECENT_BLOG_COUNT = 10; - private static final int RECENT_ENTRY_COUNT = 10; - - public static ArchiveIndex index(I2PAppContext ctx, Archive source) { - Log log = ctx.logManager().getLog(ArchiveIndexer.class); - LocalArchiveIndex rv = new LocalArchiveIndex(ctx); - WritableThreadIndex threads = new WritableThreadIndex(); - rv.setGeneratedOn(ctx.clock().now()); - - File rootDir = source.getArchiveDir(); - - File headerFile = new File(rootDir, Archive.HEADER_FILE); - if (headerFile.exists()) { - BufferedReader in = null; - try { - in = new BufferedReader(new InputStreamReader(new FileInputStream(headerFile), "UTF-8")); - String line = null; - while ( (line = in.readLine()) != null) { - StringTokenizer tok = new StringTokenizer(line, ":"); - if (tok.countTokens() == 2) - rv.setHeader(tok.nextToken(), tok.nextToken()); - } - } catch (IOException ioe) { - log.error("Error reading header file", ioe); - } finally { - if (in != null) try { in.close(); } catch (IOException ioe) {} - } - } - - // things are new if we just received them in the last day - long newSince = ctx.clock().now() - 24*60*60*1000; - - rv.setVersion(Version.INDEX_VERSION); - - /** 0-lowestEntryId --> blog Hash */ - Map blogsByAge = new TreeMap(); - /** 0-entryId --> BlogURI */ - Map entriesByAge = new TreeMap(); - List blogs = source.listBlogs(); - rv.setAllBlogs(blogs.size()); - - int newEntries = 0; - int allEntries = 0; - long newSize = 0; - long totalSize = 0; - int newBlogs = 0; - - SMLParser parser = new SMLParser(ctx); - - for (int i = 0; i < blogs.size(); i++) { - BlogInfo cur = (BlogInfo)blogs.get(i); - Hash key = cur.getKey().calculateHash(); - String keyStr = Base64.encode(key.getData()); - File blogDir = new File(rootDir, Base64.encode(key.getData())); - - File metaFile = new File(blogDir, Archive.METADATA_FILE); - long metadate = metaFile.lastModified(); - - List entries = source.listEntries(key, -1, null, null); - if (log.shouldLog(Log.DEBUG)) - log.debug("Entries under " + key + ": " + entries); - /** tag name --> ordered map of entryId to EntryContainer */ - Map tags = new TreeMap(); - - for (int j = 0; j < entries.size(); j++) { - EntryContainer entry = (EntryContainer)entries.get(j); - entriesByAge.put(new Long(0-entry.getURI().getEntryId()), entry.getURI()); - allEntries++; - totalSize += entry.getCompleteSize(); - String entryTags[] = entry.getTags(); - threads.addEntry(entry.getURI(), entryTags); - log.debug("Adding entry " + entry.getURI() + " to the threads, with tag count " + (entryTags != null ? entryTags.length : 0)); - for (int t = 0; t < entryTags.length; t++) { - if (!tags.containsKey(entryTags[t])) { - tags.put(entryTags[t], new TreeMap()); - //System.err.println("New tag [" + entryTags[t] + "]"); - } - Map entriesByTag = (Map)tags.get(entryTags[t]); - entriesByTag.put(new Long(0-entry.getURI().getEntryId()), entry); - if (log.shouldLog(Log.DEBUG)) - log.debug("Entries under tag " + entryTags[t] + ":" + entriesByTag.values()); - } - - if (entry.getURI().getEntryId() >= newSince) { - newEntries++; - newSize += entry.getCompleteSize(); - } - HeaderReceiver rec = new HeaderReceiver(); - parser.parse(entry.getEntry().getText(), rec); - String reply = rec.getHeader(HTMLRenderer.HEADER_IN_REPLY_TO); - if (reply != null) { - String forceNewThread = rec.getHeader(HTMLRenderer.HEADER_FORCE_NEW_THREAD); - if ( (forceNewThread != null) && (Boolean.valueOf(forceNewThread).booleanValue()) ) { - // ignore the parent - log.warn("Ignore the parent of " + entry.getURI() + ": " + reply); - } else { - BlogURI parent = new BlogURI(reply.trim()); - if ( (parent.getKeyHash() != null) && (parent.getEntryId() >= 0) ) { - rv.addReply(parent, entry.getURI()); - threads.addParent(parent, entry.getURI()); - } else if (log.shouldLog(Log.WARN)) { - log.warn("Parent of " + entry.getURI() + " is not valid: [" + reply.trim() + "]"); - } - } - } - } - - long lowestEntryId = -1; - for (Iterator iter = tags.keySet().iterator(); iter.hasNext(); ) { - String tagName = (String)iter.next(); - Map tagEntries = (Map)tags.get(tagName); - long highestId = -1; - if (tagEntries.size() <= 0) break; - Long id = (Long)tagEntries.keySet().iterator().next(); - highestId = 0 - id.longValue(); - - rv.addBlog(key, tagName, highestId); - for (Iterator entryIter = tagEntries.values().iterator(); entryIter.hasNext(); ) { - EntryContainer entry = (EntryContainer)entryIter.next(); - String indexName = Archive.getIndexName(entry.getURI().getEntryId(), entry.getCompleteSize()); - rv.addBlogEntry(key, tagName, indexName); - if (!entryIter.hasNext()) - lowestEntryId = entry.getURI().getEntryId(); - } - } - - if (lowestEntryId > newSince) - newBlogs++; - - blogsByAge.put(new Long(0-lowestEntryId), key); - } - - rv.setAllEntries(allEntries); - rv.setNewBlogs(newBlogs); - rv.setNewEntries(newEntries); - rv.setTotalSize(totalSize); - rv.setNewSize(newSize); - - int i = 0; - for (Iterator iter = blogsByAge.keySet().iterator(); iter.hasNext() && i < RECENT_BLOG_COUNT; i++) { - Long when = (Long)iter.next(); - Hash key = (Hash)blogsByAge.get(when); - rv.addNewestBlog(key); - } - i = 0; - for (Iterator iter = entriesByAge.keySet().iterator(); iter.hasNext() && i < RECENT_ENTRY_COUNT; i++) { - Long when = (Long)iter.next(); - BlogURI uri = (BlogURI)entriesByAge.get(when); - rv.addNewestEntry(uri); - } - - threads.organizeTree(); - if (log.shouldLog(Log.DEBUG)) - log.debug("Tree: \n" + threads.toString()); - rv.setThreadedIndex(threads); - - return rv; - } - - private static class HeaderReceiver implements SMLParser.EventReceiver { - private Properties _headers; - public HeaderReceiver() { _headers = null; } - public String getHeader(String name) { return (_headers != null ? _headers.getProperty(name) : null); } - public void receiveHeader(String header, String value) { - if (_headers == null) _headers = new Properties(); - _headers.setProperty(header, value); - } - - public void receiveAddress(String name, String schema, String protocol, String location, String anchorText) {} - public void receiveArchive(String name, String description, String locationSchema, String location, String postingKey, String anchorText) {} - public void receiveAttachment(int id, int thumbnail, String anchorText) {} - public void receiveBegin() {} - public void receiveBlog(String name, String blogKeyHash, String blogPath, long blogEntryId, List blogArchiveLocations, String anchorText) {} - public void receiveBold(String text) {} - public void receiveCode(String text, String codeLocationSchema, String codeLocation) {} - public void receiveCut(String summaryText) {} - public void receiveEnd() {} - public void receiveGT() {} - public void receiveH1(String text) {} - public void receiveH2(String text) {} - public void receiveH3(String text) {} - public void receiveH4(String text) {} - public void receiveH5(String text) {} - public void receiveHR() {} - public void receiveHeaderEnd() {} - public void receiveImage(String alternateText, int attachmentId) {} - public void receiveItalic(String text) {} - public void receiveLT() {} - public void receiveLeftBracket() {} - public void receiveLink(String schema, String location, String text) {} - public void receiveNewline() {} - public void receivePlain(String text) {} - public void receivePre(String text) {} - public void receiveQuote(String text, String whoQuoted, String quoteLocationSchema, String quoteLocation) {} - public void receiveRightBracket() {} - public void receiveUnderline(String text) {} - } -} diff --git a/apps/syndie/java/src/net/i2p/syndie/BlogManager.java b/apps/syndie/java/src/net/i2p/syndie/BlogManager.java deleted file mode 100644 index 2eefba784..000000000 --- a/apps/syndie/java/src/net/i2p/syndie/BlogManager.java +++ /dev/null @@ -1,1142 +0,0 @@ -package net.i2p.syndie; - -import java.io.BufferedReader; -import java.io.BufferedWriter; -import java.io.ByteArrayInputStream; -import java.io.ByteArrayOutputStream; -import java.io.File; -import java.io.FileInputStream; -import java.io.FileOutputStream; -import java.io.IOException; -import java.io.InputStream; -import java.io.InputStreamReader; -import java.io.OutputStreamWriter; -import java.io.UnsupportedEncodingException; -import java.io.Writer; -import java.text.ParseException; -import java.text.SimpleDateFormat; -import java.util.ArrayList; -import java.util.Date; -import java.util.HashSet; -import java.util.Iterator; -import java.util.List; -import java.util.Locale; -import java.util.Properties; -import java.util.StringTokenizer; -import java.util.TimeZone; - -import net.i2p.I2PAppContext; -import net.i2p.client.naming.PetName; -import net.i2p.client.naming.PetNameDB; -import net.i2p.data.Base64; -import net.i2p.data.DataFormatException; -import net.i2p.data.DataHelper; -import net.i2p.data.Destination; -import net.i2p.data.Hash; -import net.i2p.data.SessionKey; -import net.i2p.data.SigningPrivateKey; -import net.i2p.data.SigningPublicKey; -import net.i2p.syndie.data.BlogInfo; -import net.i2p.syndie.data.BlogURI; -import net.i2p.syndie.data.EntryContainer; -import net.i2p.syndie.sml.HTMLRenderer; -import net.i2p.util.Log; - -/** - * - */ -public class BlogManager { - private I2PAppContext _context; - private Log _log; - private static BlogManager _instance; - private File _blogKeyDir; - private File _privKeyDir; - private File _archiveDir; - private File _userDir; - private File _cacheDir; - private File _tempDir; - private File _rootDir; - private Archive _archive; - - static { - TimeZone.setDefault(TimeZone.getTimeZone("GMT")); - } - - public static BlogManager instance() { - synchronized (BlogManager.class) { - if (_instance == null) { - String rootDir = I2PAppContext.getGlobalContext().getProperty("syndie.rootDir"); - if (false) { - if (rootDir == null) - rootDir = System.getProperty("user.home"); - rootDir = rootDir + File.separatorChar + ".syndie"; - } else { - if (rootDir == null) - rootDir = "./syndie"; - } - _instance = new BlogManager(I2PAppContext.getGlobalContext(), rootDir, false); - _instance.getArchive().regenerateIndex(); - } - return _instance; - } - } - - public BlogManager(I2PAppContext ctx, String rootDir) { this(ctx, rootDir, true); } - public BlogManager(I2PAppContext ctx, String rootDir, boolean regenIndex) { - _context = ctx; - _log = ctx.logManager().getLog(BlogManager.class); - _rootDir = new File(rootDir); - _rootDir.mkdirs(); - readConfig(); - _blogKeyDir = new File(_rootDir, "blogkeys"); - _privKeyDir = new File(_rootDir, "privkeys"); - String archiveDir = _context.getProperty("syndie.archiveDir"); - if (archiveDir != null) - _archiveDir = new File(archiveDir); - else - _archiveDir = new File(_rootDir, "archive"); - _userDir = new File(_rootDir, "users"); - _cacheDir = new File(_rootDir, "cache"); - _tempDir = new File(_rootDir, "temp"); - _blogKeyDir.mkdirs(); - _privKeyDir.mkdirs(); - _archiveDir.mkdirs(); - _cacheDir.mkdirs(); - _userDir.mkdirs(); - _tempDir.mkdirs(); - _archive = new Archive(ctx, _archiveDir.getAbsolutePath(), _cacheDir.getAbsolutePath(), this); - if (regenIndex) - _archive.regenerateIndex(); - } - - private File getConfigFile() { return new File(_rootDir, "syndie.config"); } - private void readConfig() { - File config = getConfigFile(); - if (config.exists()) { - try { - Properties p = new Properties(); - DataHelper.loadProps(p, config); - for (Iterator iter = p.keySet().iterator(); iter.hasNext(); ) { - String key = (String)iter.next(); - System.setProperty(key, p.getProperty(key)); - if (_log.shouldLog(Log.DEBUG)) - _log.debug("Read config prop [" + key + "] = [" + p.getProperty(key) + "]"); - } - } catch (IOException ioe) { - if (_log.shouldLog(Log.DEBUG)) - _log.debug("Err reading", ioe); - } - } else { - if (_log.shouldLog(Log.DEBUG)) - _log.debug("Config doesn't exist: " + config.getPath()); - } - } - - public void writeConfig() { - File config = new File(_rootDir, "syndie.config"); - FileOutputStream out = null; - try { - out = new FileOutputStream(config); - for (Iterator iter = _context.getPropertyNames().iterator(); iter.hasNext(); ) { - String name = (String)iter.next(); - if (name.startsWith("syndie.")) - out.write(DataHelper.getUTF8(name + '=' + _context.getProperty(name) + '\n')); - } - } catch (IOException ioe) { - _log.error("Error writing the config", ioe); - } finally { - if (out != null) try { out.close(); } catch (IOException ioe) {} - } - } - - public BlogInfo createBlog(String name, String description, String contactURL, String archives[]) { - return createBlog(name, null, description, contactURL, archives); - } - public BlogInfo createBlog(String name, SigningPublicKey posters[], String description, String contactURL, String archives[]) { - Object keys[] = _context.keyGenerator().generateSigningKeypair(); - SigningPublicKey pub = (SigningPublicKey)keys[0]; - SigningPrivateKey priv = (SigningPrivateKey)keys[1]; - - try { - FileOutputStream out = new FileOutputStream(new File(_privKeyDir, Base64.encode(pub.calculateHash().getData()) + ".priv")); - pub.writeBytes(out); - priv.writeBytes(out); - } catch (DataFormatException dfe) { - _log.error("Error creating the blog", dfe); - return null; - } catch (IOException ioe) { - _log.error("Error creating the blog", ioe); - return null; - } - - return createInfo(pub, priv, name, posters, description, contactURL, archives, 0); - } - - public BlogInfo createInfo(SigningPublicKey pub, SigningPrivateKey priv, String name, SigningPublicKey posters[], - String description, String contactURL, String archives[], int edition) { - Properties opts = new Properties(); - if (name == null) name = ""; - opts.setProperty("Name", name); - if (description == null) description = ""; - opts.setProperty("Description", description); - opts.setProperty("Edition", Integer.toString(edition)); - if (contactURL == null) contactURL = ""; - opts.setProperty("ContactURL", contactURL); - for (int i = 0; archives != null && i < archives.length; i++) - opts.setProperty("Archive." + i, archives[i]); - - BlogInfo info = new BlogInfo(pub, posters, opts); - info.sign(_context, priv); - - _archive.storeBlogInfo(info); - - return info; - } - - public boolean updateMetadata(User user, Hash blog, Properties opts) { - if (!user.getAuthenticated()) return false; - BlogInfo oldInfo = getArchive().getBlogInfo(blog); - if (oldInfo == null) return false; - if (!user.getBlog().equals(oldInfo.getKey().calculateHash())) return false; - int oldEdition = 0; - try { - String ed = oldInfo.getProperty("Edition"); - if (ed != null) - oldEdition = Integer.parseInt(ed); - } catch (NumberFormatException nfe) {} - opts.setProperty("Edition", oldEdition + 1 + ""); - BlogInfo info = new BlogInfo(oldInfo.getKey(), oldInfo.getPosters(), opts); - SigningPrivateKey key = getMyPrivateKey(oldInfo); - info.sign(_context, key); - getArchive().storeBlogInfo(info); - user.setLastMetaEntry(oldEdition+1); - saveUser(user); - return true; - } - - public Archive getArchive() { return _archive; } - public File getTempDir() { return _tempDir; } - public File getRootDir() { return _rootDir; } - - public List listMyBlogs() { - File files[] = _privKeyDir.listFiles(); - List rv = new ArrayList(); - for (int i = 0; i < files.length; i++) { - if (files[i].isFile() && !files[i].isHidden()) { - FileInputStream in = null; - try { - SigningPublicKey pub = new SigningPublicKey(); - in = new FileInputStream(files[i]); - pub.readBytes(in); - BlogInfo info = _archive.getBlogInfo(pub.calculateHash()); - if (info != null) - rv.add(info); - } catch (IOException ioe) { - _log.error("Error listing the blog", ioe); - } catch (DataFormatException dfe) { - _log.error("Error listing the blog", dfe); - } finally { - if (in != null) try { in.close(); } catch (IOException ioe) {} - } - } - } - return rv; - } - - public SigningPrivateKey getMyPrivateKey(BlogInfo blog) { - if (blog == null) return null; - File keyFile = new File(_privKeyDir, Base64.encode(blog.getKey().calculateHash().getData()) + ".priv"); - FileInputStream in = null; - try { - in = new FileInputStream(keyFile); - SigningPublicKey pub = new SigningPublicKey(); - pub.readBytes(in); - SigningPrivateKey priv = new SigningPrivateKey(); - priv.readBytes(in); - return priv; - } catch (IOException ioe) { - _log.error("Error reading the blog key", ioe); - return null; - } catch (DataFormatException dfe) { - _log.error("Error reading the blog key", dfe); - return null; - } finally { - if (in != null) try { in.close(); } catch (IOException ioe) {} - } - } - - public User getUser(Hash blog) { - File files[] = _userDir.listFiles(); - for (int i = 0; i < files.length; i++) { - if (files[i].isFile() && !files[i].isHidden()) { - Properties userProps = loadUserProps(files[i]); - if (userProps == null) - continue; - User user = new User(_context); - user.load(userProps); - if (blog.equals(user.getBlog())) - return user; - } - } - return null; - } - - /** - * List of User instances - */ - public List listUsers() { - File files[] = _userDir.listFiles(); - List rv = new ArrayList(); - for (int i = 0; i < files.length; i++) { - if (files[i].isFile() && !files[i].isHidden()) { - Properties userProps = loadUserProps(files[i]); - if (userProps == null) - continue; - User user = new User(_context); - user.load(userProps); - rv.add(user); - } - } - return rv; - } - - private Properties loadUserProps(File userFile) { - BufferedReader in = null; - try { - Properties props = new Properties(); - FileInputStream fin = new FileInputStream(userFile); - in = new BufferedReader(new InputStreamReader(fin, "UTF-8")); - String line = null; - while ( (line = in.readLine()) != null) { - int split = line.indexOf('='); - if (split <= 0) continue; - String key = line.substring(0, split); - String val = line.substring(split+1); - props.setProperty(key.trim(), val.trim()); - } - String userHash = userFile.getName(); - props.setProperty(User.PROP_USERHASH, userHash); - return props; - } catch (IOException ioe) { - return null; - } finally { - if (in != null) try { in.close(); } catch (IOException ioe) {} - } - } - - public boolean changePasswrd(User user, String oldPass, String pass0, String pass1) { - boolean ok = user.changePassword(oldPass, pass0, pass1); - if (ok) - saveUser(user); - return ok; - } - - - public User login(String login, String pass) { - User u = new User(_context); - String ok = login(u, login, pass); - if (User.LOGIN_OK.equals(ok)) - return u; - else - return new User(_context); - } - - public String login(User user, String login, String pass) { - if ( (login == null) || (pass == null) ) return "Login not specified"; - Hash userHash = _context.sha().calculateHash(DataHelper.getUTF8(login)); - Hash passHash = _context.sha().calculateHash(DataHelper.getUTF8(pass)); - File userFile = new File(_userDir, Base64.encode(userHash.getData())); - if (_log.shouldLog(Log.INFO)) - _log.info("Attempting to login to " + login + " w/ pass = " + pass - + ": file = " + userFile.getAbsolutePath() + " passHash = " - + Base64.encode(passHash.getData())); - if (userFile.exists()) { - try { - Properties props = loadUserProps(userFile); - if (props == null) throw new IOException("Error reading " + userFile); - String rv = user.login(login, pass, props); - if (User.LOGIN_OK.equals(rv)) - _log.info("Login successful"); - else - _log.info("Login failed: [" + rv + "]"); - return rv; - } catch (IOException ioe) { - _log.error("Error logging in", ioe); - return "Error logging in - corrupt userfile"; - } - } else { - if (_log.shouldLog(Log.INFO)) - _log.info("User does not exist"); - return "User does not exist"; - } - } - - /** hash of the password required to register and create a new blog (null means no password required) */ - public String getRegistrationPasswordHash() { - String pass = _context.getProperty("syndie.registrationPassword"); - if ( (pass == null) || (pass.trim().length() <= 0) ) return null; - return pass; - } - - /** Password required to access the remote syndication functinoality (null means no password required) */ - public String getRemotePasswordHash() { - String pass = _context.getProperty("syndie.remotePassword"); - - if ( (pass == null) || (pass.trim().length() <= 0) ) return null; - return pass; - } - public String getAdminPasswordHash() { - String pass = _context.getProperty("syndie.adminPassword"); - if ( (pass == null) || (pass.trim().length() <= 0) ) return ""; - return pass; - } - - public boolean isConfigured() { - String p = _context.getProperty("syndie.singleUser"); - if(p==null) - return false; - return true; - } - - private static final boolean DEFAULT_IS_SINGLEUSER = true; - - /** - * If true, this syndie instance is meant for just one local user, so we don't need - * to password protect registration, remote.jsp, or admin.jsp - * - */ - public boolean isSingleUser() { - if (!isConfigured()) return DEFAULT_IS_SINGLEUSER; - String isSingle = _context.getProperty("syndie.singleUser"); - return ( (isSingle != null) && (Boolean.valueOf(isSingle).booleanValue()) ); - } - - public String getDefaultProxyHost() { return _context.getProperty("syndie.defaultProxyHost", "localhost"); } - public String getDefaultProxyPort() { return _context.getProperty("syndie.defaultProxyPort", "4444"); } - public String[] getUpdateArchives() { - String str = _context.getProperty("syndie.updateArchives", ""); - if ( (str != null) && (str.trim().length() > 0) ) - return str.split(","); - else - return new String[0]; - } - public boolean getImportAddresses() { return _context.getProperty("syndie.importAddresses", "false").equals("true"); } - public int getUpdateDelay() { - int delay = Integer.parseInt(_context.getProperty("syndie.updateDelay", "12")); - if (delay < 1) delay = 1; - return delay; - } - - public List getRssFeeds() { - List feedList = new ArrayList(); - int i=0; - while(true) { - String url = _context.getProperty("syndie.rssFeed."+i+".url"); - String blog = _context.getProperty("syndie.rssFeed."+i+".blog"); - String tagPrefix = _context.getProperty("syndie.rssFeed."+i+".tagPrefix"); - if(url==null || blog==null || tagPrefix==null) - break; - String feed[] = new String[3]; - feed[0]=url.trim(); - feed[1]=blog.trim(); - feed[2]=tagPrefix.trim(); - feedList.add(feed); - i++; - } - return feedList; - } - public boolean addRssFeed(String url, String blog, String tagPrefix) { - - List feedList = getRssFeeds(); - int nextIdx=feedList.size(); - - String baseFeedProp="syndie.rssFeed."+nextIdx; - System.setProperty(baseFeedProp+".url",url); - System.setProperty(baseFeedProp+".blog",blog); - System.setProperty(baseFeedProp+".tagPrefix",tagPrefix); - _log.info("addRssFeed("+nextIdx+"): "+url); - writeConfig(); - Updater.wakeup(); - return true; - } - public boolean deleteRssFeed(String url, String blog, String tagPrefix) { - List feedList = getRssFeeds(); - Iterator iter = feedList.iterator(); - int idx=0; - while(iter.hasNext()) { - String fields[] = (String[])iter.next(); - if(fields[0].equals(url) && - fields[1].equals(blog) && - fields[2].equals(tagPrefix)) { - break; - } - idx++; - } - - // copy any remaining to idx-1 - while(iter.hasNext()) { - String fields[] = (String[])iter.next(); - String baseFeedProp="syndie.rssFeed."+idx; - System.setProperty(baseFeedProp+".url",fields[0]); - System.setProperty(baseFeedProp+".blog",fields[1]); - System.setProperty(baseFeedProp+".tagPrefix",fields[2]); - idx++; - } - - // Delete last idx from properties - String baseFeedProp="syndie.rssFeed."+idx; - System.getProperties().remove(baseFeedProp+".url"); - System.getProperties().remove(baseFeedProp+".blog"); - System.getProperties().remove(baseFeedProp+".tagPrefix"); - _log.info("deleteRssFeed("+idx+"): "+url); - writeConfig(); - return true; - } - - private static final String DEFAULT_LOGIN = "default"; - private static final String DEFAULT_PASS = ""; - - private static final String PROP_DEFAULT_LOGIN = "syndie.defaultSingleUserLogin"; - private static final String PROP_DEFAULT_PASS = "syndie.defaultSingleUserPass"; - - public String getDefaultLogin() { - String login = _context.getProperty(PROP_DEFAULT_LOGIN); - if ( (login == null) || (login.trim().length() <= 0) ) - login = DEFAULT_LOGIN; - return login; - } - public String getDefaultPass() { - String pass = _context.getProperty(PROP_DEFAULT_PASS); - if ( (pass == null) || (pass.trim().length() <= 0) ) - pass = DEFAULT_PASS; - return pass; - } - - /** - * If we are a single user instance, when we create the default user, give them - * addressbook entries for each of the following, *and* schedule them for syndication - * - */ - private static final String DEFAULT_SINGLE_USER_ARCHIVES[] = new String[] { - "http://syndiemedia.i2p/archive/archive.txt" - , "http://gloinsblog.i2p/archive/archive.txt" - , "http://glog.i2p/archive/archive.txt" - }; - - public User getDefaultUser() { - User user = new User(_context); - getDefaultUser(user); - return user; - } - public void getDefaultUser(User user) { - if (isSingleUser()) { - Hash userHash = _context.sha().calculateHash(DataHelper.getUTF8(getDefaultLogin())); - File userFile = new File(_userDir, Base64.encode(userHash.getData())); - if (_log.shouldLog(Log.INFO)) - _log.info("Attempting to login to the default user: " + userFile.getAbsolutePath()); - - if (userFile.exists()) { - Properties props = loadUserProps(userFile); - if (props == null) { - user.invalidate(); - _log.error("Error reading the default user file: " + userFile); - return; - } - String ok = user.login(getDefaultLogin(), getDefaultPass(), props); - if (User.LOGIN_OK.equals(ok)) { - return; - } else { - user.invalidate(); - _log.error("Error logging into the default user: " + ok); - return; - } - } else { - String ok = register(user, getDefaultLogin(), getDefaultPass(), "", "default", "Default Syndie blog", ""); - if (User.LOGIN_OK.equals(ok)) { - _log.info("Default user created: " + user); - String altArchives = _context.getProperty("syndie.defaultSingleUserArchives"); - String archives[] = DEFAULT_SINGLE_USER_ARCHIVES; - if ( (altArchives != null) && (altArchives.trim().length() > 0) ) { - ArrayList list = new ArrayList(); - StringTokenizer tok = new StringTokenizer(altArchives, ",\t "); - while (tok.hasMoreTokens()) - list.add(tok.nextToken()); - if (list.size() > 0) { - archives = new String[list.size()]; - for (int i = 0; i < list.size(); i++) - archives[i] = (String)list.get(i); - } - } - for (int i = 0; i < archives.length; i++) - user.getPetNameDB().add(new PetName("DefaultArchive" + i, "syndie", "syndiearchive", archives[i])); - scheduleSyndication(archives); - saveUser(user); - return; - } else { - user.invalidate(); - _log.error("Error registering the default user: " + ok); - return; - } - } - } else { - return; - } - } - - public boolean authorizeAdmin(String pass) { - if (isSingleUser()) return true; - String admin = getAdminPasswordHash(); - if ( (admin == null) || (admin.trim().length() <= 0) ) - return false; - String hash = Base64.encode(_context.sha().calculateHash(DataHelper.getUTF8(pass.trim())).getData()); - return (hash.equals(admin)); - } - public boolean authorizeRemote(String pass) { - if (isSingleUser()) return true; - String hash = Base64.encode(_context.sha().calculateHash(DataHelper.getUTF8(pass.trim())).getData()); - String rem = getRemotePasswordHash(); - boolean ok = false; - if ( (rem != null) && (rem.trim().length() > 0) ) - ok = hash.equals(rem); - if (!ok) { - rem = getAdminPasswordHash(); - if ( (rem != null) && (rem.trim().length() > 0) ) - ok = hash.equals(rem); - } - return ok; - } - public boolean authorizeRemote(User user) { - if (isSingleUser()) return true; - return (user.getAuthenticated() && user.getAllowAccessRemote()); - } - - private boolean isOkDefaultUser(String defaultUser, String defaultPass) { - User t = new User(_context); - Hash userHash = _context.sha().calculateHash(DataHelper.getUTF8(defaultUser)); - File userFile = new File(_userDir, Base64.encode(userHash.getData())); - if (userFile.exists()) { - Properties props = loadUserProps(userFile); - if (props == null) { - _log.error("Error reading the default user file: " + userFile); - return false; - } - String ok = t.login(defaultUser, defaultPass, props); - if (User.LOGIN_OK.equals(ok)) { - // ok, good enough - return true; - } else { - _log.error("Error logging into the default user, so configuration change rejected: " + ok); - return false; - } - } else { - _log.error("Not setting the default user to a nonexistant user [" + defaultUser + "]"); - return false; - } - } - - public void configure(String registrationPassword, String remotePassword, String adminPass, String defaultSelector, - String defaultProxyHost, int defaultProxyPort, boolean isSingleUser, Properties opts, - String defaultUser, String defaultPass) { - if ( (defaultUser == null) || (defaultUser.length() <= 0) ) - defaultUser = getDefaultLogin(); - if (defaultPass == null) - defaultPass = getDefaultPass(); - // first make sure the default user is valid, if its single user - if (isSingleUser) { - if (!isOkDefaultUser(defaultUser, defaultPass)) - return; - } - File cfg = getConfigFile(); - Writer out = null; - try { - out = new OutputStreamWriter(new FileOutputStream(cfg), "UTF-8"); - if (registrationPassword != null) - out.write("syndie.registrationPassword="+Base64.encode(_context.sha().calculateHash(DataHelper.getUTF8(registrationPassword.trim())).getData()) + "\n"); - if (remotePassword != null) - out.write("syndie.remotePassword="+Base64.encode(_context.sha().calculateHash(DataHelper.getUTF8(remotePassword.trim())).getData()) + "\n"); - if (adminPass != null) - out.write("syndie.adminPassword="+Base64.encode(_context.sha().calculateHash(DataHelper.getUTF8(adminPass.trim())).getData()) + "\n"); - if (defaultSelector != null) - out.write("syndie.defaultSelector="+defaultSelector.trim() + "\n"); - if (defaultProxyHost != null) - out.write("syndie.defaultProxyHost="+defaultProxyHost.trim() + "\n"); - if (defaultProxyPort > 0) - out.write("syndie.defaultProxyPort="+defaultProxyPort + "\n"); - - out.write("syndie.defaultSingleUserLogin="+defaultUser+"\n"); - out.write("syndie.defaultSingleUserPass="+defaultPass+"\n"); - - out.write("syndie.singleUser=" + isSingleUser + "\n"); // Used also in isConfigured() - if (opts != null) { - for (Iterator iter = opts.keySet().iterator(); iter.hasNext(); ) { - String key = (String)iter.next(); - String val = opts.getProperty(key); - out.write(key.trim() + "=" + val.trim() + "\n"); - } - } - _archive.setDefaultSelector(defaultSelector); - } catch (IOException ioe) { - _log.error("Error writing out the config", ioe); - } finally { - if (out != null) try { out.close(); } catch (IOException ioe) {} - readConfig(); - } - } - - public String authorizeRemoteAccess(User user, String password) { - if (!user.getAuthenticated()) return "Not logged in"; - String remPass = getRemotePasswordHash(); - if (remPass == null) - return "Remote access password not configured - please specify a remote " + - "archive password"; - - if (authorizeRemote(password)) { - user.setAllowAccessRemote(true); - saveUser(user); - return "Remote access authorized"; - } else { - return "Remote access denied"; - } - } - - /** - * Store user info, regardless of whether they're logged in. This lets you update a - * different user's info! - */ - void storeUser(User user) { - String userHash = user.getUserHash(); - File userFile = new File(_userDir, userHash); - if (!userFile.exists()) return; - FileOutputStream out = null; - try { - out = new FileOutputStream(userFile); - out.write(DataHelper.getUTF8(user.export())); - user.getPetNameDB().store(user.getAddressbookLocation()); - } catch (IOException ioe) { - _log.error("Error writing out the user", ioe); - } finally { - if (out != null) try { out.close(); } catch (IOException ioe){} - } - } - - public void saveUser(User user) { - if (!user.getAuthenticated()) return; - storeUser(user); - } - public User register(String login, String password, String registrationPassword, String blogName, String blogDescription, String contactURL) { - User user = new User(_context); - if (User.LOGIN_OK.equals(register(user, login, password, registrationPassword, blogName, blogDescription, contactURL))) - return user; - else - return null; - } - public String register(User user, String login, String password, String registrationPassword, String blogName, String blogDescription, String contactURL) { - System.err.println("Register [" + login + "] pass [" + password + "] name [" + blogName + "] descr [" + blogDescription + "] contact [" + contactURL + "] regPass [" + registrationPassword + "]"); - String hashedRegistrationPassword = getRegistrationPasswordHash(); - if ( (hashedRegistrationPassword != null) && (!isSingleUser()) ) { - try { - if (!hashedRegistrationPassword.equals(Base64.encode(_context.sha().calculateHash(registrationPassword.getBytes("UTF-8")).getData()))) - return "Invalid registration password"; - } catch (UnsupportedEncodingException uee) { - return "Error registering"; - } - } - String userHash = Base64.encode(_context.sha().calculateHash(DataHelper.getUTF8(login)).getData()); - File userFile = new File(_userDir, userHash); - if (userFile.exists()) { - return "Cannot register the login " + login + ": it already exists"; - } else { - BlogInfo info = createBlog(blogName, blogDescription, contactURL, null); - String hashedPassword = Base64.encode(_context.sha().calculateHash(DataHelper.getUTF8(password)).getData()); - FileOutputStream out = null; - try { - out = new FileOutputStream(userFile); - BufferedWriter bw = new BufferedWriter(new OutputStreamWriter(out, "UTF-8")); - bw.write("password=" + hashedPassword + "\n"); - bw.write("blog=" + Base64.encode(info.getKey().calculateHash().getData()) + "\n"); - bw.write("lastid=-1\n"); - bw.write("lastmetaedition=0\n"); - bw.write("addressbook=userhosts-"+userHash + ".txt\n"); - bw.write("showimages=false\n"); - bw.write("showexpanded=false\n"); - bw.flush(); - } catch (IOException ioe) { - _log.error("Error registering the user", ioe); - return "Internal error registering - " + ioe.getMessage() + ""; - } finally { - if (out != null) try { out.close(); } catch (IOException ioe) {} - } - String loginResult = login(user, login, password); - _archive.regenerateIndex(); - return loginResult; - } - } - - public String exportHosts(User user) { - if (!user.getAuthenticated() || !user.getAllowAccessRemote()) - return "Not authorized to export the hosts"; - PetNameDB userDb = user.getPetNameDB(); - PetNameDB routerDb = _context.petnameDb(); - // horribly inefficient... - for (Iterator iter = userDb.iterator(); iter.hasNext();) { - PetName pn = (PetName)iter.next(); - if (pn == null) continue; - Destination existing = _context.namingService().lookup(pn.getName()); - if (existing == null && pn.getNetwork().equalsIgnoreCase("i2p")) { - routerDb.add(pn); - try { - routerDb.store(); - } catch (IOException ioe) { - _log.error("Error exporting the hosts", ioe); - return "Error exporting the hosts: " + ioe.getMessage() + ""; - } - } - } - return "Hosts exported"; - } - - /** - * Guess what the next entry ID should be for the given user. Rounds down to - * midnight of the current day + 1 for each post in that day. - */ - public long getNextBlogEntry(User user) { - long entryId = -1; - long now = _context.clock().now(); - long dayBegin = getDayBegin(now); - if (user.getMostRecentEntry() >= dayBegin) - entryId = user.getMostRecentEntry() + 1; - else - entryId = dayBegin; - return entryId; - } - - public BlogURI createBlogEntry(User user, String subject, String tags, String entryHeaders, String sml) { - return createBlogEntry(user, true, subject, tags, entryHeaders, sml, null, null, null); - } - public BlogURI createBlogEntry(User user, String subject, String tags, String entryHeaders, String sml, List fileNames, List fileStreams, List fileTypes) { - return createBlogEntry(user, true, subject, tags, entryHeaders, sml, fileNames, fileStreams, fileTypes); - } - public BlogURI createBlogEntry(User user, boolean shouldAuthenticate, String subject, String tags, String entryHeaders, String sml, List fileNames, List fileStreams, List fileTypes) { - if (shouldAuthenticate && !user.getAuthenticated()) return null; - BlogInfo info = getArchive().getBlogInfo(user.getBlog()); - if (info == null) return null; - SigningPrivateKey privkey = getMyPrivateKey(info); - if (privkey == null) return null; - - long entryId = getNextBlogEntry(user); - - _log.debug("Next blog entry ID = " + entryId + " for user " + user.getUsername()); - - StringTokenizer tok = new StringTokenizer(tags, " ,\n\t"); - String tagList[] = new String[tok.countTokens()]; - for (int i = 0; i < tagList.length; i++) - tagList[i] = tok.nextToken().trim(); - - BlogURI uri = new BlogURI(user.getBlog(), entryId); - - try { - StringBuffer raw = new StringBuffer(sml.length() + 128); - raw.append("Subject: ").append(subject).append('\n'); - raw.append("Tags: "); - for (int i = 0; i < tagList.length; i++) - raw.append(tagList[i]).append('\t'); - raw.append('\n'); - if ( (entryHeaders != null) && (entryHeaders.trim().length() > 0) ) { - if (_log.shouldLog(Log.DEBUG)) - _log.debug("Creating entry with headers: " + entryHeaders); - BufferedReader userHeaders = new BufferedReader(new InputStreamReader(new ByteArrayInputStream(DataHelper.getUTF8(entryHeaders)), "UTF-8")); - String line = null; - while ( (line = userHeaders.readLine()) != null) { - line = line.trim(); - if (_log.shouldLog(Log.DEBUG)) - _log.debug("header line: " + line); - if (line.length() <= 0) continue; - int split = line.indexOf('='); - int split2 = line.indexOf(':'); - if ( (split < 0) || ( (split2 > 0) && (split2 < split) ) ) split = split2; - if ( (split < 0) && (split2 < 0) ) - continue; - String key = line.substring(0,split).trim(); - String val = line.substring(split+1).trim(); - raw.append(key).append(": ").append(val).append('\n'); - } - } - raw.append('\n'); - raw.append(sml); - - EntryContainer c = new EntryContainer(uri, tagList, DataHelper.getUTF8(raw)); - if ((fileNames != null) && (fileStreams != null) && (fileNames.size() == fileStreams.size()) ) { - for (int i = 0; i < fileNames.size(); i++) { - String name = (String)fileNames.get(i); - InputStream in = (InputStream)fileStreams.get(i); - String fileType = (fileTypes != null ? (String)fileTypes.get(i) : "application/octet-stream"); - ByteArrayOutputStream baos = new ByteArrayOutputStream(1024); - byte buf[] = new byte[1024]; - while (true) { - int read = in.read(buf); - if (read == -1) break; - baos.write(buf, 0, read); - } - byte att[] = baos.toByteArray(); - if ( (att != null) && (att.length > 0) ) - c.addAttachment(att, new File(name).getName(), null, fileType); - } - } - //for (int i = 7; i < args.length; i++) { - // c.addAttachment(read(args[i]), new File(args[i]).getName(), - // "Attached file", "application/octet-stream"); - //} - SessionKey entryKey = null; - //if (!"NONE".equals(args[5])) - // entryKey = new SessionKey(Base64.decode(args[5])); - c.seal(_context, privkey, null); - boolean ok = getArchive().storeEntry(c); - if (ok) { - getArchive().regenerateIndex(); - long prevEntryId = user.getMostRecentEntry(); - user.setMostRecentEntry(entryId); - if(shouldAuthenticate) { - saveUser(user); - } else { - storeUser(user); - } - _log.debug("New entry posted, entryId=" + entryId + " prev=" + prevEntryId); - return uri; - } else { - return null; - } - } catch (IOException ioe) { - _log.error("Error creating post", ioe); - return null; - } - } - - /** - * read in the syndie blog metadata file from the stream, verifying it and adding it to - * the archive if necessary - * - */ - public boolean importBlogMetadata(InputStream metadataStream) throws IOException { - try { - BlogInfo info = new BlogInfo(); - info.load(metadataStream); - if (isBanned(info.getKey().calculateHash())) - return false; - return _archive.storeBlogInfo(info); - } catch (IOException ioe) { - _log.error("Error importing meta", ioe); - return false; - } - } - - /** - * read in the syndie entry file from the stream, verifying it and adding it to - * the archive if necessary - * - */ - public boolean importBlogEntry(InputStream entryStream) throws IOException { - try { - EntryContainer c = new EntryContainer(); - c.load(entryStream); - if (isBanned(c.getURI().getKeyHash())) - return false; - return _archive.storeEntry(c); - } catch (IOException ioe) { - _log.error("Error importing entry", ioe); - return false; - } - } - - public String addAddress(User user, String name, String protocol, String location, String schema) { - if (!user.getAuthenticated()) return "Not logged in"; - boolean ok = validateAddressName(name); - if (!ok) return "Invalid name: " + HTMLRenderer.sanitizeString(name) + ""; - ok = validateAddressLocation(location); - if (!ok) return "Invalid location: " + HTMLRenderer.sanitizeString(location) + ""; - if (!validateAddressSchema(schema)) return "Unsupported schema: " + HTMLRenderer.sanitizeString(schema) + ""; - // no need to quote user/location further, as they've been sanitized - - PetNameDB names = user.getPetNameDB(); - if (names.containsName(name)) - return "Name is already in use"; - PetName pn = new PetName(name, schema, protocol, location); - names.add(pn); - - try { - names.store(user.getAddressbookLocation()); - return "Address " + name + " written to your addressbook"; - } catch (IOException ioe) { - return "Error writing out the name: " + ioe.getMessage() + ""; - } - } - - public Properties getKnownHosts(User user, boolean includePublic) throws IOException { - Properties rv = new Properties(); - if ( (user != null) && (user.getAuthenticated()) ) { - File userHostsFile = new File(user.getAddressbookLocation()); - rv.putAll(getKnownHosts(userHostsFile)); - } - if (includePublic) { - rv.putAll(getKnownHosts(new File("hosts.txt"))); - } - return rv; - } - private Properties getKnownHosts(File filename) throws IOException { - Properties rv = new Properties(); - if (filename.exists()) { - FileInputStream in = null; - try { - in = new FileInputStream(filename); - rv.load(in); - } finally { - if (in != null) try { in.close(); } catch (IOException ioe) {} - } - } - return rv; - } - - private boolean validateAddressName(String name) { - if ( (name == null) || (name.trim().length() <= 0) ) return false; - for (int i = 0; i < name.length(); i++) { - char c = name.charAt(i); - if (!Character.isLetterOrDigit(c) && ('.' != c) && ('-' != c) && ('_' != c) ) - return false; - } - return true; - } - - private boolean validateAddressLocation(String location) { - if ( (location == null) || (location.trim().length() <= 0) ) return false; - if (false) { - try { - Destination d = new Destination(location); - return (d.getPublicKey() != null); - } catch (DataFormatException dfe) { - _log.error("Error validating address location", dfe); - return false; - } - } else { - // not everything is an i2p destination... - return true; - } - } - - private boolean validateAddressSchema(String schema) { - if ( (schema == null) || (schema.trim().length() <= 0) ) return false; - if (true) { - return true; - } else { - return "eep".equals(schema) || "i2p".equals(schema); - } - } - - private final SimpleDateFormat _dateFormat = new SimpleDateFormat("yyyy/MM/dd", Locale.UK); - public final long getDayBegin() { return getDayBegin(_context.clock().now()); } - public final long getDayBegin(long now) { - synchronized (_dateFormat) { - try { - String str = _dateFormat.format(new Date(now)); - return _dateFormat.parse(str).getTime(); - } catch (ParseException pe) { - pe.printStackTrace(); - // wtf - return -1; - } - } - } - - public void scheduleSyndication(String location) { - String archives[] = getUpdateArchives(); - StringBuffer buf = new StringBuffer(64); - if ( (archives != null) && (archives.length > 0) ) { - for (int i = 0; i < archives.length; i++) - if ( (!archives[i].equals(location)) && (archives[i].trim().length() > 0) ) - buf.append(archives[i]).append(","); - } - if ( (location != null) && (location.trim().length() > 0) ) - buf.append(location.trim()); - System.setProperty("syndie.updateArchives", buf.toString()); - writeConfig(); - Updater.wakeup(); - } - public void scheduleSyndication(String locations[]) { - String archives[] = getUpdateArchives(); - HashSet locs = new HashSet(); - for (int i = 0; (archives != null) && (i < archives.length); i++) - locs.add(archives[i]); - for (int i = 0; (locations != null) && (i < locations.length); i++) - locs.add(locations[i]); - - StringBuffer buf = new StringBuffer(64); - for (Iterator iter = locs.iterator(); iter.hasNext(); ) - buf.append(iter.next().toString().trim()).append(','); - System.setProperty("syndie.updateArchives", buf.toString()); - writeConfig(); - Updater.wakeup(); - } - public void unscheduleSyndication(String location) { - String archives[] = getUpdateArchives(); - if ( (archives != null) && (archives.length > 0) ) { - StringBuffer buf = new StringBuffer(64); - for (int i = 0; i < archives.length; i++) - if ( (!archives[i].equals(location)) && (archives[i].trim().length() > 0) ) - buf.append(archives[i]).append(","); - System.setProperty("syndie.updateArchives", buf.toString()); - } - writeConfig(); - } - public boolean syndicationScheduled(String location) { - String archives[] = getUpdateArchives(); - if ( (location == null) || (archives == null) || (archives.length <= 0) ) - return false; - for (int i = 0; i < archives.length; i++) - if (location.equals(archives[i])) - return true; - return false; - } - - public boolean isBanned(Hash blog) { - if ( (blog == null) || (blog.getData() == null) || (blog.getData().length <= 0) ) return false; - String str = blog.toBase64(); - String banned = System.getProperty("syndie.bannedBlogs", ""); - return (banned.indexOf(str) >= 0); - } - - public String[] getBannedBlogs() { - List blogs = new ArrayList(); - String str = System.getProperty("syndie.bannedBlogs", ""); - StringTokenizer tok = new StringTokenizer(str, ","); - while (tok.hasMoreTokens()) { - String blog = tok.nextToken(); - try { - Hash h = new Hash(); - h.fromBase64(blog); - blogs.add(blog); // the base64 string, but verified - } catch (DataFormatException dfe) { - // ignored - } - } - String rv[] = new String[blogs.size()]; - for (int i = 0; i < blogs.size(); i++) - rv[i] = (String)blogs.get(i); - return rv; - } - - /** - * Delete the blog from the archive completely, and ban them from ever being added again - */ - public void purgeAndBan(Hash blog) { - String banned[] = getBannedBlogs(); - StringBuffer buf = new StringBuffer(); - String str = blog.toBase64(); - buf.append(str); - for (int i = 0; banned != null && i < banned.length; i++) { - if (!banned[i].equals(str)) - buf.append(",").append(banned[i]); - } - System.setProperty("syndie.bannedBlogs", buf.toString()); - writeConfig(); - _archive.delete(blog); - _archive.regenerateIndex(); - } -} diff --git a/apps/syndie/java/src/net/i2p/syndie/CLI.java b/apps/syndie/java/src/net/i2p/syndie/CLI.java deleted file mode 100644 index 556886d89..000000000 --- a/apps/syndie/java/src/net/i2p/syndie/CLI.java +++ /dev/null @@ -1,270 +0,0 @@ -package net.i2p.syndie; - -import java.io.File; -import java.io.FileInputStream; -import java.io.FileOutputStream; -import java.io.IOException; -import java.io.OutputStreamWriter; -import java.io.Writer; -import java.util.ArrayList; -import java.util.List; -import java.util.StringTokenizer; - -import net.i2p.I2PAppContext; -import net.i2p.data.Base64; -import net.i2p.data.DataHelper; -import net.i2p.data.Hash; -import net.i2p.data.SessionKey; -import net.i2p.data.SigningPrivateKey; -import net.i2p.syndie.data.Attachment; -import net.i2p.syndie.data.BlogInfo; -import net.i2p.syndie.data.BlogURI; -import net.i2p.syndie.data.EntryContainer; -import net.i2p.syndie.sml.HTMLRenderer; - -/** - */ -public class CLI { - public static final String USAGE = "Usage: \n" + - "rootDir regenerateIndex\n" + - "rootDir createBlog name description contactURL[ archiveURL]*\n" + - "rootDir createEntry blogPublicKeyHash tag[,tag]* (NEXT|NOW|entryId) (NONE|entryKeyBase64) smlFile[ attachmentFile attachmentName attachmentDescription mimeType]*\n" + - "rootDir listMyBlogs\n" + - "rootDir listTags blogPublicKeyHash\n" + - "rootDir listEntries blogPublicKeyHash blogTag\n" + - "rootDir renderEntry blogPublicKeyHash entryId (NONE|entryKeyBase64) summaryOnly includeImages\n"; - - public static void main(String args[]) { - //args = new String[] { "~/.syndie/", "listEntries", "9qXCJUyUBCCaiIShURo02ckxjrMvrtiDYENv2ATL3-Y=", "/" }; - //args = new String[] { "~/.syndie/", "renderEntry", "Vq~AlW-r7OM763okVUFIDvVFzxOjpNNsAx0rFb2yaE8=", "/", "20050811001", "NONE", "true", "false" }; - if (args.length < 2) { - System.err.print(USAGE); - return; - } - String command = args[1]; - if ("createBlog".equals(command)) - createBlog(args); - else if ("listMyBlogs".equals(command)) - listMyBlogs(args); - else if ("createEntry".equals(command)) - createEntry(args); - else if ("listTags".equals(command)) - listPaths(args); - else if ("listEntries".equals(command)) - listEntries(args); - else if ("regenerateIndex".equals(command)) - regenerateIndex(args); - else if ("renderEntry".equals(command)) - renderEntry(args); - else - System.out.print(USAGE); - } - - private static void createBlog(String args[]) { - BlogManager mgr = new BlogManager(I2PAppContext.getGlobalContext(), args[0]); - String archives[] = new String[args.length - 5]; - System.arraycopy(args, 5, archives, 0, archives.length); - BlogInfo info = mgr.createBlog(args[2], args[3], args[4], archives); - System.out.println("Blog created: " + info); - mgr.getArchive().regenerateIndex(); - } - private static void listMyBlogs(String args[]) { - BlogManager mgr = new BlogManager(I2PAppContext.getGlobalContext(), args[0]); - List info = mgr.listMyBlogs(); - for (int i = 0; i < info.size(); i++) - System.out.println(info.get(i).toString()); - } - - private static void listPaths(String args[]) { - // "rootDir listTags blogPublicKeyHash\n"; - BlogManager mgr = new BlogManager(I2PAppContext.getGlobalContext(), args[0]); - List tags = mgr.getArchive().listTags(new Hash(Base64.decode(args[2]))); - System.out.println("tag count: " + tags.size()); - for (int i = 0; i < tags.size(); i++) - System.out.println("Tag " + i + ": " + tags.get(i).toString()); - } - - private static void regenerateIndex(String args[]) { - // "rootDir regenerateIndex\n"; - BlogManager mgr = new BlogManager(I2PAppContext.getGlobalContext(), args[0]); - mgr.getArchive().regenerateIndex(); - System.out.println("Index regenerated"); - } - - private static void listEntries(String args[]) { - // "rootDir listEntries blogPublicKeyHash tag\n"; - BlogManager mgr = new BlogManager(I2PAppContext.getGlobalContext(), args[0]); - List entries = mgr.getArchive().listEntries(new Hash(Base64.decode(args[2])), -1, args[3], null); - System.out.println("Entry count: " + entries.size()); - for (int i = 0; i < entries.size(); i++) { - EntryContainer entry = (EntryContainer)entries.get(i); - System.out.println("***************************************************"); - System.out.println("Entry " + i + ": " + entry.getURI().toString()); - System.out.println("==================================================="); - System.out.println(entry.getEntry().getText()); - System.out.println("==================================================="); - Attachment attachments[] = entry.getAttachments(); - for (int j = 0; j < attachments.length; j++) { - System.out.println("Attachment " + j + ": " + attachments[j]); - } - System.out.println("==================================================="); - } - } - - private static void renderEntry(String args[]) { - //"rootDir renderEntry blogPublicKeyHash entryId (NONE|entryKeyBase64) summaryOnly includeImages\n"; - BlogManager mgr = new BlogManager(I2PAppContext.getGlobalContext(), args[0]); - long id = -1; - try { - id = Long.parseLong(args[3]); - } catch (NumberFormatException nfe) { - nfe.printStackTrace(); - return; - } - SessionKey entryKey = null; - if (!("NONE".equals(args[4]))) - entryKey = new SessionKey(Base64.decode(args[5])); - EntryContainer entry = mgr.getArchive().getEntry(new BlogURI(new Hash(Base64.decode(args[2])), id), entryKey); - if (entry != null) { - HTMLRenderer renderer = new HTMLRenderer(I2PAppContext.getGlobalContext()); - boolean summaryOnly = "true".equalsIgnoreCase(args[5]); - boolean showImages = "true".equalsIgnoreCase(args[6]); - try { - File f = File.createTempFile("syndie", ".html"); - Writer out = new OutputStreamWriter(new FileOutputStream(f), "UTF-8"); - renderer.render(null, mgr.getArchive(), entry, out, summaryOnly, showImages); - out.flush(); - out.close(); - System.out.println("Rendered to " + f.getAbsolutePath() + ": " + f.length()); - } catch (IOException ioe) { - ioe.printStackTrace(); - } - } else { - System.err.println("Entry does not exist"); - } - } - - private static void createEntry(String args[]) { - // "rootDir createEntry blogPublicKeyHash tag[,tag]* (NEXT|NOW|entryId) (NONE|entryKeyBase64) " - // smlFile[ attachmentFile attachmentName attachmentDescription mimeType]*\n" - String rootDir = args[0]; - String hashStr = args[2]; - List tags = new ArrayList(); - StringTokenizer tok = new StringTokenizer(args[3], ","); - while (tok.hasMoreTokens()) - tags.add(tok.nextToken().trim()); - String entryIdDef = args[4]; - String entryKeyDef = args[5]; - String smlFile = args[6]; - List attachmentFilenames = new ArrayList(); - List attachmentNames = new ArrayList(); - List attachmentDescriptions = new ArrayList(); - List attachmentMimeTypes = new ArrayList(); - for (int i = 7; i + 3 < args.length; i += 4) { - attachmentFilenames.add(args[i].trim()); - attachmentNames.add(args[i+1].trim()); - attachmentDescriptions.add(args[i+2].trim()); - attachmentMimeTypes.add(args[i+3].trim()); - } - I2PAppContext ctx = I2PAppContext.getGlobalContext(); - BlogManager mgr = new BlogManager(ctx, rootDir); - EntryContainer entry = createEntry(ctx, mgr, hashStr, tags, entryIdDef, entryKeyDef, smlFile, true, - attachmentFilenames, attachmentNames, attachmentDescriptions, - attachmentMimeTypes); - if (entry != null) - mgr.getArchive().regenerateIndex(); - } - - /** - * Create a new entry, storing it into the blogManager's archive and incrementing the - * blog's "most recent id" setting. This does not however regenerate the manager's index. - * - * @param blogHashStr base64(SHA256(blog public key)) - * @param tags list of tags/categories to post under (String elements - * @param entryIdDef NEXT (for next entry id for the blog, or midnight of the current day), - * NOW (current time), or an explicit entry id - * @param entryKeyDef session key under which the entry should be encrypted - * @param smlFilename file in which the sml entry is to be found - * @param storeLocal if true, should this entry be stored in the mgr.getArchive() - * @param attachmentFilenames list of filenames for attachments to load - * @param attachmentNames list of names to use for the given attachments - * @param attachmentDescriptions list of descriptions for the given attachments - * @param attachmentMimeTypes list of mime types to use for the given attachments - * @return blog URI posted, or null - */ - public static EntryContainer createEntry(I2PAppContext ctx, BlogManager mgr, String blogHashStr, List tags, - String entryIdDef, String entryKeyDef, String smlFilename, boolean storeLocal, - List attachmentFilenames, List attachmentNames, - List attachmentDescriptions, List attachmentMimeTypes) { - Hash blogHash = new Hash(Base64.decode(blogHashStr)); - User user = mgr.getUser(blogHash); - long entryId = -1; - if ("NOW".equalsIgnoreCase(entryIdDef)) { - entryId = ctx.clock().now(); - } else if ("NEXT".equalsIgnoreCase(entryIdDef) || (entryIdDef == null)) { - entryId = mgr.getNextBlogEntry(user); - } else { - try { - entryId = Long.parseLong(entryIdDef); - } catch (NumberFormatException nfe) { - nfe.printStackTrace(); - return null; - } - } - String tagVals[] = new String[(tags != null ? tags.size() : 0)]; - if (tags != null) - for (int i = 0; i < tags.size(); i++) - tagVals[i] = ((String)tags.get(i)).trim(); - BlogURI uri = new BlogURI(blogHash, entryId); - BlogInfo blog = mgr.getArchive().getBlogInfo(uri); - if (blog == null) { - System.err.println("Blog does not exist: " + uri); - return null; - } - SigningPrivateKey key = mgr.getMyPrivateKey(blog); - - try { - byte smlData[] = read(smlFilename); - EntryContainer c = new EntryContainer(uri, tagVals, smlData); - if ( (attachmentFilenames != null) && - (attachmentFilenames.size() == attachmentNames.size()) && - (attachmentFilenames.size() == attachmentDescriptions.size()) && - (attachmentFilenames.size() == attachmentMimeTypes.size()) ) { - for (int i = 0; i < attachmentFilenames.size(); i++) { - File attachmentFile = new File((String)attachmentFilenames.get(i)); - String name = (String)attachmentNames.get(i); - String descr = (String)attachmentDescriptions.get(i); - String mimetype = (String)attachmentMimeTypes.get(i); - c.addAttachment(read(attachmentFile.getAbsolutePath()), name, descr, mimetype); - } - } - SessionKey entryKey = null; - if ( (entryKeyDef != null) && (entryKeyDef.trim().length() > 0) && (!"NONE".equalsIgnoreCase(entryKeyDef)) ) - entryKey = new SessionKey(Base64.decode(entryKeyDef)); - c.seal(ctx, key, entryKey); - if (storeLocal) { - boolean ok = mgr.getArchive().storeEntry(c); - //System.out.println("Blog entry created: " + c+ "? " + ok); - if (!ok) { - System.err.println("Error: store failed"); - return null; - } - } - user.setMostRecentEntry(uri.getEntryId()); - mgr.storeUser(user); // saves even if !user.getAuthenticated() - return c; - } catch (IOException ioe) { - ioe.printStackTrace(); - return null; - } - } - - private static final byte[] read(String file) throws IOException { - File f = new File(file); - FileInputStream in = new FileInputStream(f); - byte rv[] = new byte[(int)f.length()]; - if (rv.length != DataHelper.read(in, rv)) - throw new IOException("File not read completely"); - return rv; - } -} diff --git a/apps/syndie/java/src/net/i2p/syndie/CLIPost.java b/apps/syndie/java/src/net/i2p/syndie/CLIPost.java deleted file mode 100644 index 59f0bb6a7..000000000 --- a/apps/syndie/java/src/net/i2p/syndie/CLIPost.java +++ /dev/null @@ -1,218 +0,0 @@ -package net.i2p.syndie; - -import java.io.BufferedReader; -import java.io.File; -import java.io.FileInputStream; -import java.io.FileOutputStream; -import java.io.IOException; -import java.io.InputStreamReader; -import java.util.ArrayList; -import java.util.HashMap; -import java.util.List; -import java.util.Map; -import java.util.StringTokenizer; - -import net.i2p.I2PAppContext; -import net.i2p.syndie.data.BlogInfo; -import net.i2p.syndie.data.EntryContainer; -import net.i2p.util.EepPost; - -/** - * Simple CLI to post an entry. - * - */ -public class CLIPost { - public static final String USAGE = "Usage: \"" + CLIPost.class.getName() + " [args]\", where args are:" - + "\n --syndieDir $syndieRootDir // syndie root dir, under which syndie.config exists" - + "\n --blog $blogHash // base64 of the blog's key" - + "\n --sml $smlFile // file with the SML entry" - + "\n [--importurl ($url|none)] // defaults to http://localhost:7657/syndie/import.jsp" - + "\n [--proxyhost $hostname] // HTTP proxy host for sending the data to the import URL" - + "\n [--proxyport $portnum] // HTTP proxy port for sending the data to the import URL" - + "\n [--storelocal (true|false)] // should it be stored directly with the file system" - + "\n // (false by default, since its stored locally via importurl)" - + "\n [--entryId ($num|next|now)] // entryId to use: explicit, the blog's next (default), or timestamp" - + "\n [--attachment$N $file $name $desc $type]" - + "\n // Nth file / suggested name / description / mime type"; - - public static void main(String args[]) { - String rootDir = getArg(args, "syndieDir"); - String hashStr = getArg(args, "blog"); - String smlFile = getArg(args, "sml"); - if ( (rootDir == null) || (hashStr == null) || (smlFile == null) ) { - System.err.println(USAGE); - return; - } - - String url = getArg(args, "importurl"); - String entryIdDef = getArg(args, "entryId"); - - List attachmentFilenames = new ArrayList(); - List attachmentNames = new ArrayList(); - List attachmentDescriptions = new ArrayList(); - List attachmentMimeTypes = new ArrayList(); - while (true) { - // --attachment$N $file $name $desc $type] - String file = getAttachmentParam(args, attachmentFilenames.size(), 0); - String name = getAttachmentParam(args, attachmentFilenames.size(), 1); - String desc = getAttachmentParam(args, attachmentFilenames.size(), 2); - String type = getAttachmentParam(args, attachmentFilenames.size(), 3); - if ( (file != null) && (name != null) && (desc != null) && (type != null) ) { - attachmentFilenames.add(file); - attachmentNames.add(name); - attachmentDescriptions.add(desc); - attachmentMimeTypes.add(type); - } else { - break; - } - } - - List tags = readTags(smlFile); - - // don't support the entry key stuff yet... - String entryKeyDef = null; //args[5]; - - String loc = getArg(args, "storelocal"); - boolean storeLocal = false; - if (loc != null) - storeLocal = Boolean.valueOf(loc).booleanValue(); - - if (!storeLocal && "none".equalsIgnoreCase(url)) { - System.err.println("You need to post it somewhere, so either specify \"--storelocal true\""); - System.err.println("or don't specify \"--importurl none\""); - return; - } - - I2PAppContext ctx = I2PAppContext.getGlobalContext(); - BlogManager mgr = new BlogManager(ctx, rootDir, false); - EntryContainer entry = CLI.createEntry(ctx, mgr, hashStr, tags, entryIdDef, entryKeyDef, smlFile, storeLocal, - attachmentFilenames, attachmentNames, attachmentDescriptions, - attachmentMimeTypes); - if (entry != null) { - if (storeLocal) - mgr.getArchive().regenerateIndex(); - if (!("none".equalsIgnoreCase(url))) { - if ( (url == null) || (url.trim().length() <= 0) ) - url = "http://localhost:7657/syndie/import.jsp"; - - // now send it to the import URL - BlogInfo info = mgr.getArchive().getBlogInfo(entry.getURI().getKeyHash()); - File fMeta = null; - File fData = null; - - try { - fMeta = File.createTempFile("cli", ".snm", mgr.getTempDir()); - fData = File.createTempFile("cli", ".snd", mgr.getTempDir()); - FileOutputStream out = new FileOutputStream(fMeta); - info.write(out); - out.close(); - out = new FileOutputStream(fData); - entry.write(out, true); - out.close(); - fMeta.deleteOnExit(); - fData.deleteOnExit(); - } catch (IOException ioe) { - System.err.println("Error writing temp files: " + ioe.getMessage()); - return; - } - - Map uploads = new HashMap(2); - uploads.put("blogmeta0", fMeta); - uploads.put("blogpost0", fData); - - String proxyHost = getArg(args, "proxyhost"); - String proxyPortStr = getArg(args, "proxyport"); - int proxyPort = -1; - if (proxyPortStr != null) - try { proxyPort = Integer.parseInt(proxyPortStr); } catch (NumberFormatException nfe) { } - - OnCompletion job = new OnCompletion(); - EepPost post = new EepPost(); - post.postFiles(url, (proxyPort > 0 ? proxyHost : null), proxyPort, uploads, job); - boolean posted = job.waitForCompletion(30*1000); - if (posted) - System.out.println("Posted successfully: " + entry.getURI().toString()); - else - System.out.println("Posting failed"); - } else if (storeLocal) { - System.out.println("Store local successfully: " + entry.getURI().toString()); - } else { - // foo - } - } else { - System.err.println("Error creating the blog entry"); - } - } - - private static class OnCompletion implements Runnable { - private boolean _complete; - public OnCompletion() { _complete = false; } - public void run() { - _complete = true; - synchronized (OnCompletion.this) { - OnCompletion.this.notifyAll(); - } - } - public boolean waitForCompletion(long max) { - long end = max + System.currentTimeMillis(); - while (!_complete) { - long now = System.currentTimeMillis(); - if (now >= end) - return false; - try { - synchronized (OnCompletion.this) { - OnCompletion.this.wait(end-now); - } - } catch (InterruptedException ie) {} - } - return true; - } - } - - private static String getArg(String args[], String param) { - if (args != null) - for (int i = 0; i + 1< args.length; i++) - if (args[i].equalsIgnoreCase("--"+param)) - return args[i+1]; - return null; - } - private static String getAttachmentParam(String args[], int attachmentNum, int paramNum) { - if (args != null) - for (int i = 0; i + 4 < args.length; i++) - if (args[i].equalsIgnoreCase("--attachment"+attachmentNum)) - return args[i+1+paramNum]; - return null; - } - - private static List readTags(String smlFile) { - BufferedReader in = null; - try { - in = new BufferedReader(new InputStreamReader(new FileInputStream(smlFile), "UTF-8")); - String line = null; - while ( (line = in.readLine()) != null) { - if (line.length() <= 0) - return new ArrayList(); - else if (line.startsWith("Tags:")) - return parseTags(line.substring("Tags:".length())); - } - return null; - } catch (IOException ioe) { - return new ArrayList(); - } finally { - if (in != null) try { in.close(); } catch (IOException ioe) {} - } - } - - private static List parseTags(String tags) { - if (tags == null) - return new ArrayList(); - StringTokenizer tok = new StringTokenizer(tags, " ,\t\n"); - List rv = new ArrayList(); - while (tok.hasMoreTokens()) { - String cur = tok.nextToken().trim(); - if (cur.length() > 0) - rv.add(cur); - } - return rv; - } -} diff --git a/apps/syndie/java/src/net/i2p/syndie/CachedEntry.java b/apps/syndie/java/src/net/i2p/syndie/CachedEntry.java deleted file mode 100644 index b1fdaa811..000000000 --- a/apps/syndie/java/src/net/i2p/syndie/CachedEntry.java +++ /dev/null @@ -1,268 +0,0 @@ -package net.i2p.syndie; - -import java.io.BufferedReader; -import java.io.File; -import java.io.FileInputStream; -import java.io.IOException; -import java.io.InputStream; -import java.io.InputStreamReader; -import java.io.OutputStream; -import java.util.ArrayList; -import java.util.List; -import java.util.Map; -import java.util.Properties; - -import net.i2p.I2PAppContext; -import net.i2p.data.Base64; -import net.i2p.data.DataHelper; -import net.i2p.data.Hash; -import net.i2p.data.SessionKey; -import net.i2p.data.Signature; -import net.i2p.syndie.data.Attachment; -import net.i2p.syndie.data.BlogInfo; -import net.i2p.syndie.data.BlogURI; -import net.i2p.syndie.data.Entry; -import net.i2p.syndie.data.EntryContainer; - -/** - * Lazy loading wrapper for an entry, pulling data out of a cached & extracted dir, - * rather than dealing with the crypto, zip, etc. - * - */ -class CachedEntry extends EntryContainer { - private File _entryDir; - - private int _format; - private int _size; - private BlogURI _blog; - private Properties _headers; - private Entry _entry; - private Attachment _attachments[]; - - public CachedEntry(File entryDir) throws IOException { - _entryDir = entryDir; - importMeta(); - _entry = new CachedEntryDetails(); - _attachments = null; - } - - public boolean isValid() { - return (_entry != null) && (_blog != null); - } - - // always available, loaded from meta - public int getFormat() { return _format; } - public BlogURI getURI() { return _blog; } - public int getCompleteSize() { return _size; } - - // dont need to override it, as it works off getHeader - //public String[] getTags() { return super.getTags(); } - - public Entry getEntry() { return _entry; } - public Attachment[] getAttachments() { - importAttachments(); - return _attachments; - } - public String getHeader(String key) { - importHeaders(); - return _headers.getProperty(key); - } - - public String toString() { return getURI().toString(); } - public boolean verifySignature(I2PAppContext ctx, BlogInfo info) { return true; } - - // not supported... - public void parseRawData(I2PAppContext ctx) throws IOException { - throw new IllegalStateException("Not supported on cached entries"); - } - public void parseRawData(I2PAppContext ctx, SessionKey zipKey) throws IOException { - throw new IllegalStateException("Not supported on cached entries"); - } - public void setHeader(String name, String val) { - throw new IllegalStateException("Not supported on cached entries"); - } - public void addAttachment(byte data[], String name, String description, String mimeType) { - throw new IllegalStateException("Not supported on cached entries"); - } - public void write(OutputStream out, boolean includeRealSignature) throws IOException { - throw new IllegalStateException("Not supported on cached entries"); - } - public Signature getSignature() { - throw new IllegalStateException("Not supported on cached entries"); - } - - // now the actual lazy loading code - private void importMeta() throws IOException { - Properties meta = readProps(new File(_entryDir, EntryExtractor.META)); - _format = getInt(meta, "format"); - _size = getInt(meta, "size"); - _blog = new BlogURI(new Hash(Base64.decode(meta.getProperty("blog"))), getLong(meta, "entry")); - } - - private Properties importHeaders() { - if (_headers == null) { - try { - _headers = readProps(new File(_entryDir, EntryExtractor.HEADERS)); - } catch (IOException ioe) { - ioe.printStackTrace(); - _headers = new Properties(); - } - } - return _headers; - } - - private void importAttachments() { - if (_attachments == null) { - List attachments = new ArrayList(); - int i = 0; - while (true) { - File meta = new File(_entryDir, EntryExtractor.ATTACHMENT_PREFIX + i + EntryExtractor.ATTACHMENT_META_SUFFIX); - if (meta.exists()) - attachments.add(new CachedAttachment(i, meta)); - else - break; - i++; - } - Attachment a[] = new Attachment[attachments.size()]; - for (i = 0; i < a.length; i++) - a[i] = (Attachment)attachments.get(i); - _attachments = a; - } - return; - } - - private static Properties readProps(File propsFile) throws IOException { - Properties rv = new Properties(); - BufferedReader in = null; - try { - in = new BufferedReader(new InputStreamReader(new FileInputStream(propsFile), "UTF-8")); - String line = null; - while ( (line = in.readLine()) != null) { - int split = line.indexOf('='); - if ( (split <= 0) || (split >= line.length()) ) continue; - rv.setProperty(line.substring(0, split).trim(), line.substring(split+1).trim()); - } - } finally { - if (in != null) try { in.close(); } catch (IOException ioe) {} - } - return rv; - } - - private static final int getInt(Properties props, String key) { - String val = props.getProperty(key); - try { return Integer.parseInt(val); } catch (NumberFormatException nfe) {} - return -1; - } - private static final long getLong(Properties props, String key) { - String val = props.getProperty(key); - try { return Long.parseLong(val); } catch (NumberFormatException nfe) {} - return -1l; - } - - - private class CachedEntryDetails extends Entry { - private String _text; - public CachedEntryDetails() { - super(null); - } - public String getText() { - importText(); - return _text; - } - private void importText() { - if (_text == null) { - InputStream in = null; - try { - File f = new File(_entryDir, EntryExtractor.ENTRY); - byte buf[] = new byte[(int)f.length()]; // hmm - in = new FileInputStream(f); - int read = DataHelper.read(in, buf); - if (read != buf.length) throw new IOException("read: " + read + " file size: " + buf.length + " for " + f.getPath()); - _text = DataHelper.getUTF8(buf); - } catch (IOException ioe) { - ioe.printStackTrace(); - } finally { - if (in != null) try { in.close(); } catch (IOException ioe) {} - } - } - } - } - - private class CachedAttachment extends Attachment { - private int _attachmentId; - private File _metaFile; - private Properties _attachmentHeaders; - private int _dataSize; - - public CachedAttachment(int id, File meta) { - super(null, null); - _attachmentId = id; - _metaFile = meta; - _attachmentHeaders = null; - } - - public int getDataLength() { - importAttachmentHeaders(); - return _dataSize; - } - - public byte[] getData() { - throw new IllegalStateException("Not supported on cached entries"); - } - public InputStream getDataStream() throws IOException { - String name = EntryExtractor.ATTACHMENT_PREFIX + _attachmentId + EntryExtractor.ATTACHMENT_DATA_SUFFIX; - File f = new File(_entryDir, name); - return new FileInputStream(f); - } - - public byte[] getRawMetadata() { - throw new IllegalStateException("Not supported on cached entries"); - } - - public String getMeta(String key) { - importAttachmentHeaders(); - return _attachmentHeaders.getProperty(key); - } - - //public String getName() { return getMeta(NAME); } - //public String getDescription() { return getMeta(DESCRIPTION); } - //public String getMimeType() { return getMeta(MIMETYPE); } - - public void setMeta(String key, String val) { - throw new IllegalStateException("Not supported on cached entries"); - } - - public Map getMeta() { - importAttachmentHeaders(); - return _attachmentHeaders; - } - - public String toString() { - importAttachmentHeaders(); - int len = _dataSize; - return getName() - + (getDescription() != null ? ": " + getDescription() : "") - + (getMimeType() != null ? ", type: " + getMimeType() : "") - + ", size: " + len; - } - - private void importAttachmentHeaders() { - if (_attachmentHeaders == null) { - try { - Properties props = readProps(_metaFile); - String sz = (String)props.remove(EntryExtractor.ATTACHMENT_DATA_SIZE); - if (sz != null) { - try { - _dataSize = Integer.parseInt(sz); - } catch (NumberFormatException nfe) {} - } - - _attachmentHeaders = props; - } catch (IOException ioe) { - ioe.printStackTrace(); - _attachmentHeaders = new Properties(); - } - } - } - } -} diff --git a/apps/syndie/java/src/net/i2p/syndie/EntryExtractor.java b/apps/syndie/java/src/net/i2p/syndie/EntryExtractor.java deleted file mode 100644 index 179a35f9e..000000000 --- a/apps/syndie/java/src/net/i2p/syndie/EntryExtractor.java +++ /dev/null @@ -1,150 +0,0 @@ -package net.i2p.syndie; - -import java.io.File; -import java.io.FileInputStream; -import java.io.FileOutputStream; -import java.io.IOException; -import java.io.InputStream; -import java.util.Iterator; -import java.util.Map; - -import net.i2p.I2PAppContext; -import net.i2p.data.DataHelper; -import net.i2p.data.SessionKey; -import net.i2p.syndie.data.Attachment; -import net.i2p.syndie.data.BlogInfo; -import net.i2p.syndie.data.Entry; -import net.i2p.syndie.data.EntryContainer; - -/** - * To cut down on unnecessary IO/cpu load, extract entries onto the disk for - * faster access later. Individual entries are stored in subdirectories based on - * their name - $archiveDir/$blogDir/$entryId.snd extracts its files into various - * files under $cacheDir/$blogDir/$entryId/: - * headers.txt: name=value pairs for attributes of the entry container itself - * info.txt: name=value pairs for implicit attributes of the container (blog, id, format, size) - * entry.sml: raw sml file - * attachmentN_data.dat: raw binary data for attachment N - * attachmentN_meta.dat: name=value pairs for attributes of attachment N - * - */ -public class EntryExtractor { - private I2PAppContext _context; - - static final String HEADERS = "headers.txt"; - static final String META = "meta.txt"; - static final String ENTRY = "entry.sml"; - static final String ATTACHMENT_PREFIX = "attachment"; - static final String ATTACHMENT_DATA_SUFFIX = "_data.dat"; - static final String ATTACHMENT_META_SUFFIX = "_meta.txt"; - static final String ATTACHMENT_DATA_SIZE = "EntryExtractor__dataSize"; - - public EntryExtractor(I2PAppContext context) { - _context = context; - } - - public boolean extract(File entryFile, File entryDir, SessionKey entryKey, BlogInfo info) throws IOException { - EntryContainer entry = new EntryContainer(); - FileInputStream in = null; - try { - in = new FileInputStream(entryFile); - entry.load(in); - } finally { - if (in != null) try { in.close(); } catch (IOException ioe) {} - } - boolean ok = entry.verifySignature(_context, info); - if (!ok) { - return false; - } else { - entry.setCompleteSize((int)entryFile.length()); - if (entryKey != null) - entry.parseRawData(_context, entryKey); - else - entry.parseRawData(_context); - extract(entry, entryDir); - return true; - } - } - - public void extract(EntryContainer entry, File entryDir) throws IOException { - extractEntry(entry, entryDir); - extractHeaders(entry, entryDir); - extractMeta(entry, entryDir); - Attachment attachments[] = entry.getAttachments(); - if (attachments != null) { - for (int i = 0; i < attachments.length; i++) { - extractAttachmentData(i, attachments[i], entryDir); - extractAttachmentMetadata(i, attachments[i], entryDir); - } - } - } - private void extractHeaders(EntryContainer entry, File entryDir) throws IOException { - FileOutputStream out = null; - try { - out = new FileOutputStream(new File(entryDir, HEADERS)); - Map headers = entry.getHeaders(); - for (Iterator iter = headers.keySet().iterator(); iter.hasNext(); ) { - String k = (String)iter.next(); - String v = (String)headers.get(k); - out.write(DataHelper.getUTF8(k.trim() + '=' + v.trim() + '\n')); - } - } finally { - out.close(); - } - } - private void extractMeta(EntryContainer entry, File entryDir) throws IOException { - FileOutputStream out = null; - try { - out = new FileOutputStream(new File(entryDir, META)); - out.write(DataHelper.getUTF8("format=" + entry.getFormat() + '\n')); - out.write(DataHelper.getUTF8("size=" + entry.getCompleteSize() + '\n')); - out.write(DataHelper.getUTF8("blog=" + entry.getURI().getKeyHash().toBase64() + '\n')); - out.write(DataHelper.getUTF8("entry=" + entry.getURI().getEntryId() + '\n')); - } finally { - out.close(); - } - } - private void extractEntry(EntryContainer entry, File entryDir) throws IOException { - Entry e = entry.getEntry(); - if (e == null) throw new IOException("Entry is null"); - String text = e.getText(); - if (text == null) throw new IOException("Entry text is null"); - FileOutputStream out = null; - try { - out = new FileOutputStream(new File(entryDir, ENTRY)); - out.write(DataHelper.getUTF8(text)); - } finally { - out.close(); - } - } - private void extractAttachmentData(int num, Attachment attachment, File entryDir) throws IOException { - FileOutputStream out = null; - try { - out = new FileOutputStream(new File(entryDir, ATTACHMENT_PREFIX + num + ATTACHMENT_DATA_SUFFIX)); - //out.write(attachment.getData()); - InputStream data = attachment.getDataStream(); - byte buf[] = new byte[1024]; - int read = 0; - while ( (read = data.read(buf)) != -1) - out.write(buf, 0, read); - data.close(); - } finally { - out.close(); - } - } - private void extractAttachmentMetadata(int num, Attachment attachment, File entryDir) throws IOException { - FileOutputStream out = null; - try { - out = new FileOutputStream(new File(entryDir, ATTACHMENT_PREFIX + num + ATTACHMENT_META_SUFFIX)); - Map meta = attachment.getMeta(); - for (Iterator iter = meta.keySet().iterator(); iter.hasNext(); ) { - String k = (String)iter.next(); - String v = (String)meta.get(k); - out.write(DataHelper.getUTF8(k + '=' + v + '\n')); - } - out.write(DataHelper.getUTF8(ATTACHMENT_DATA_SIZE + '=' + attachment.getDataLength())); - } finally { - out.close(); - } - } -} diff --git a/apps/syndie/java/src/net/i2p/syndie/HeaderReceiver.java b/apps/syndie/java/src/net/i2p/syndie/HeaderReceiver.java deleted file mode 100644 index 69c1ce1f8..000000000 --- a/apps/syndie/java/src/net/i2p/syndie/HeaderReceiver.java +++ /dev/null @@ -1,45 +0,0 @@ -package net.i2p.syndie; - -import java.util.List; -import java.util.Properties; - -import net.i2p.syndie.sml.SMLParser; - -public class HeaderReceiver implements SMLParser.EventReceiver { - private Properties _headers; - public HeaderReceiver() { _headers = null; } - public String getHeader(String name) { return (_headers != null ? _headers.getProperty(name) : null); } - public void receiveHeader(String header, String value) { - if (_headers == null) _headers = new Properties(); - _headers.setProperty(header, value); - } - - public void receiveAddress(String name, String schema, String protocol, String location, String anchorText) {} - public void receiveArchive(String name, String description, String locationSchema, String location, String postingKey, String anchorText) {} - public void receiveAttachment(int id, int thumbnail, String anchorText) {} - public void receiveBegin() {} - public void receiveBlog(String name, String blogKeyHash, String blogPath, long blogEntryId, List blogArchiveLocations, String anchorText) {} - public void receiveBold(String text) {} - public void receiveCode(String text, String codeLocationSchema, String codeLocation) {} - public void receiveCut(String summaryText) {} - public void receiveEnd() {} - public void receiveGT() {} - public void receiveH1(String text) {} - public void receiveH2(String text) {} - public void receiveH3(String text) {} - public void receiveH4(String text) {} - public void receiveH5(String text) {} - public void receiveHR() {} - public void receiveHeaderEnd() {} - public void receiveImage(String alternateText, int attachmentId) {} - public void receiveItalic(String text) {} - public void receiveLT() {} - public void receiveLeftBracket() {} - public void receiveLink(String schema, String location, String text) {} - public void receiveNewline() {} - public void receivePlain(String text) {} - public void receivePre(String text) {} - public void receiveQuote(String text, String whoQuoted, String quoteLocationSchema, String quoteLocation) {} - public void receiveRightBracket() {} - public void receiveUnderline(String text) {} -} diff --git a/apps/syndie/java/src/net/i2p/syndie/NewestEntryFirstComparator.java b/apps/syndie/java/src/net/i2p/syndie/NewestEntryFirstComparator.java deleted file mode 100644 index aeb7ccf45..000000000 --- a/apps/syndie/java/src/net/i2p/syndie/NewestEntryFirstComparator.java +++ /dev/null @@ -1,21 +0,0 @@ -package net.i2p.syndie; - -import java.util.Comparator; - -import net.i2p.data.DataHelper; -import net.i2p.syndie.data.BlogURI; - -/** sort BlogURI instances with the highest entryId first */ -public class NewestEntryFirstComparator implements Comparator { - public int compare(Object lhs, Object rhs) { - BlogURI left = (BlogURI)lhs; - BlogURI right = (BlogURI)rhs; - if (left.getEntryId() > right.getEntryId()) { - return -1; - } else if (left.getEntryId() == right.getEntryId()) { - return DataHelper.compareTo(left.getKeyHash().getData(), right.getKeyHash().getData()); - } else { - return 1; - } - } -} diff --git a/apps/syndie/java/src/net/i2p/syndie/NewestNodeFirstComparator.java b/apps/syndie/java/src/net/i2p/syndie/NewestNodeFirstComparator.java deleted file mode 100644 index 50ba5bac0..000000000 --- a/apps/syndie/java/src/net/i2p/syndie/NewestNodeFirstComparator.java +++ /dev/null @@ -1,31 +0,0 @@ -package net.i2p.syndie; - -import java.util.Comparator; - -import net.i2p.data.DataHelper; - -/** sort ThreadNodeImpl instances with the highest entryId first */ -public class NewestNodeFirstComparator implements Comparator { - public int compare(Object lhs, Object rhs) { - ThreadNodeImpl left = (ThreadNodeImpl)lhs; - ThreadNodeImpl right = (ThreadNodeImpl)rhs; - long l = left.getMostRecentPostDate(); - long r = right.getMostRecentPostDate(); - if (l > r) { - return -1; - } else if (l == r) { - // ok, the newest responses match, so lets fall back and compare the roots themselves - l = left.getEntry().getEntryId(); - r = right.getEntry().getEntryId(); - if (l > r) { - return -1; - } else if (l == r) { - return DataHelper.compareTo(left.getEntry().getKeyHash().getData(), right.getEntry().getKeyHash().getData()); - } else { - return 1; - } - } else { - return 1; - } - } -} diff --git a/apps/syndie/java/src/net/i2p/syndie/Sucker.java b/apps/syndie/java/src/net/i2p/syndie/Sucker.java deleted file mode 100644 index e399392a8..000000000 --- a/apps/syndie/java/src/net/i2p/syndie/Sucker.java +++ /dev/null @@ -1,995 +0,0 @@ -package net.i2p.syndie; - -import java.io.File; -import java.io.FileInputStream; -import java.io.FileNotFoundException; -import java.io.FileOutputStream; -import java.io.IOException; -import java.io.InputStream; -import java.net.MalformedURLException; -import java.security.MessageDigest; -import java.security.NoSuchAlgorithmException; -import java.util.ArrayList; -import java.util.Date; -import java.util.Iterator; -import java.util.List; - -import net.i2p.I2PAppContext; -import net.i2p.data.Base64; -import net.i2p.data.DataFormatException; -import net.i2p.data.DataHelper; -import net.i2p.data.Hash; -import net.i2p.syndie.data.BlogURI; -import net.i2p.util.EepGet; -import net.i2p.util.Log; - -import com.sun.syndication.feed.synd.SyndCategory; -import com.sun.syndication.feed.synd.SyndContent; -import com.sun.syndication.feed.synd.SyndEnclosure; -import com.sun.syndication.feed.synd.SyndEntry; -import com.sun.syndication.feed.synd.SyndFeed; -import com.sun.syndication.io.FeedException; -import com.sun.syndication.io.SyndFeedInput; -import com.sun.syndication.io.XmlReader; - -/** - * - * todo: - * - factor out the parsing / formatting / posting to let the sucker pull in arbitrary HTML pages - * (importing the images and SMLizing some stuff) - * - push the posts out to a remote syndie instance too - */ -public class Sucker { - private static final Log _log = I2PAppContext.getGlobalContext().logManager().getLog(Sucker.class); - private SuckerState _state; - - public Sucker() {} - - public Sucker(String[] strings) throws IllegalArgumentException { - SuckerState state = new SuckerState(); - state.pushToSyndie=true; - state.urlToLoad = strings[0]; - state.blog = strings[1]; - state.feedTag = strings[2]; - state.outputDir = "blog-"+state.blog; - try { - state.historyPath=BlogManager.instance().getRootDir().getCanonicalPath()+"/rss.history"; - } catch (IOException e) { - e.printStackTrace(); - } - state.proxyPort = BlogManager.instance().getDefaultProxyPort(); - state.proxyHost = BlogManager.instance().getDefaultProxyHost(); - - state.bm = BlogManager.instance(); - Hash blogHash = new Hash(); - try { - blogHash.fromBase64(state.blog); - } catch (DataFormatException e1) { - throw new IllegalArgumentException("ooh, bad $blog"); - } - - state.user = state.bm.getUser(blogHash); - if(state.user==null) - throw new IllegalArgumentException("wtf, user==null? hash:"+blogHash); - state.history = new ArrayList(); - _state = state; - } - - public boolean parseArgs(String args[]) { - for (int i = 0; i < args.length; i++) { - if ("--load".equals(args[i])) - _state.urlToLoad = args[++i]; - if ("--outputdir".equals(args[i])) - _state.outputDir = args[++i]; - if ("--history".equals(args[i])) - _state.historyPath = args[++i]; - if ("--tag".equals(args[i])) - _state.feedTag = args[++i]; - if ("--proxyhost".equals(args[i])) - _state.proxyHost = args[++i]; - if ("--proxyport".equals(args[i])) - _state.proxyPort = args[++i]; - if ("--exec".equals(args[i])) - _state.pushScript = args[++i]; - if ("--importenclosures".equals(args[i])) - _state.importEnclosures= args[++i].equals("true"); - if ("--importenrefs".equals(args[i])) - _state.importRefs= args[++i].equals("true"); - } - - // Cut ending '/' from outputDir - if (_state.outputDir.endsWith("/")) - _state.outputDir = _state.outputDir.substring(0, _state.outputDir.length() - 1); - - if (_state.urlToLoad == null) - return false; - - return true; - } - - /** - * Fetch urlToLoad and call convertToHtml() on any new entries. - * @return list of BlogURI entries posted, if any - */ - public List suck() { - _state.entriesPosted = new ArrayList(); - SyndFeed feed; - File fetched=null; - - _state.tempFiles = new ArrayList(); - - // Find base url - int idx=_state.urlToLoad.lastIndexOf('/'); - if(idx>0) - _state.baseUrl=_state.urlToLoad.substring(0,idx); - else - _state.baseUrl=_state.urlToLoad; - - infoLog("Processing: "+_state.urlToLoad); - debugLog("Base url: "+_state.baseUrl); - - // - try { - File lastIdFile=null; - - // Get next message number to use (for messageId in history only) - if(!_state.pushToSyndie) { - - lastIdFile = new File(_state.historyPath + ".lastId"); - if (!lastIdFile.exists()) - lastIdFile.createNewFile(); - - FileInputStream fis = null; - try { - fis = new FileInputStream(lastIdFile); - String number = readLine(fis); - _state.messageNumber = Integer.parseInt(number); - } catch (NumberFormatException e) { - _state.messageNumber = 0; - } finally { - if (fis != null) try { fis.close(); } catch (IOException ioe) {} - } - - // Create outputDir if missing - File f = new File(_state.outputDir); - f.mkdirs(); - } else { - _state.messageNumber=_state.bm.getNextBlogEntry(_state.user); - } - - _log.debug("message number: " + _state.messageNumber); - - _state.shouldProxy = false; - _state.proxyPortNum = -1; - if ( (_state.proxyHost != null) && (_state.proxyPort != null) ) { - try { - _state.proxyPortNum = Integer.parseInt(_state.proxyPort); - if (_state.proxyPortNum > 0) - _state.shouldProxy = true; - } catch (NumberFormatException nfe) { - nfe.printStackTrace(); - } - } - - // fetch - int numRetries = 2; - fetched = File.createTempFile("sucker", ".fetch"); - EepGet get = new EepGet(I2PAppContext.getGlobalContext(), _state.shouldProxy, _state.proxyHost, _state.proxyPortNum, - numRetries, fetched.getAbsolutePath(), _state.urlToLoad); - SuckerFetchListener lsnr = new SuckerFetchListener(); - get.addStatusListener(lsnr); - - _log.debug("fetching [" + _state.urlToLoad + "] / " + _state.shouldProxy + "/" + _state.proxyHost + "/" + _state.proxyHost); - - get.fetch(); - _log.debug("fetched: " + get.getNotModified() + "/" + get.getETag()); - boolean ok = lsnr.waitForSuccess(); - if (!ok) { - _log.debug("success? " + ok); - System.err.println("Unable to retrieve the url [" + _state.urlToLoad + "] after " + numRetries + " tries."); - fetched.delete(); - return _state.entriesPosted; - } - _log.debug("fetched successfully? " + ok); - if(get.getNotModified()) { - debugLog("not modified, saving network bytes from useless fetch"); - fetched.delete(); - return _state.entriesPosted; - } - - // Build entry list from fetched rss file - SyndFeedInput input = new SyndFeedInput(); - feed = input.build(new XmlReader(fetched)); - - List entries = feed.getEntries(); - - _log.debug("entries: " + entries.size()); - - loadHistory(); - - // Process list backwards to get syndie to display the - // entries in the right order. (most recent at top) - List feedMessageIds = new ArrayList(); - for (int i = entries.size()-1; i >= 0; i--) { - SyndEntry e = (SyndEntry) entries.get(i); - - _state.attachmentCounter=0; - - if (_log.shouldLog(Log.DEBUG)) - _log.debug("Syndicate entry: " + e.getLink()); - - // Calculate messageId, and check if we have got the message already - String feedHash = sha1(_state.urlToLoad); - String itemHash = sha1(e.getTitle() + e.getDescription()); - Date d = e.getPublishedDate(); - String time; - if(d!=null) - time = "" + d.getTime(); - else - time = "" + new Date().getTime(); - String outputFileName = _state.outputDir + "/" + _state.messageNumber; - String messageId = feedHash + ":" + itemHash + ":" + time + ":" + outputFileName; - - // Make sure these messageIds get into the history file - feedMessageIds.add(messageId); - - // Check if we already have this - if (existsInHistory(_state, messageId)) - continue; - - infoLog("new: " + messageId); - - // process the new entry - processEntry(_state, e, time); - } - - // update history - pruneHistory(_state.urlToLoad, 42*10); // could use 0 if we were sure old entries never re-appear - Iterator iter = feedMessageIds.iterator(); - while(iter.hasNext()) - { - String newMessageId = (String)iter.next(); - if(!existsInHistory(_state, newMessageId)) - addHistory(newMessageId); // add new message ids from current feed to history - } - storeHistory(); - - // call script if we don't just feed syndie - if(!_state.pushToSyndie) { - FileOutputStream fos = null; - try { - fos = new FileOutputStream(lastIdFile); - fos.write(("" + _state.messageNumber).getBytes()); - } finally { - if (fos != null) try { fos.close(); } catch (IOException ioe) {} - } - } - - _log.debug("done fetching"); - } catch (MalformedURLException e) { - e.printStackTrace(); - } catch (IllegalArgumentException e) { - e.printStackTrace(); - } catch (FeedException e) { - e.printStackTrace(); - } catch (IOException e) { - e.printStackTrace(); - } - if(fetched!=null) - fetched.delete(); - debugLog("Done."); - return _state.entriesPosted; - } - - private void loadHistory() { - try { - // Create historyFile if missing - _state.historyFile = new File(_state.historyPath); - if (!_state.historyFile.exists()) - _state.historyFile.createNewFile(); - - FileInputStream is = new FileInputStream(_state.historyFile); - String s; - while((s=readLine(is))!=null) - { - addHistory(s); - } - } catch (FileNotFoundException e) { - // TODO Auto-generated catch block - e.printStackTrace(); - } catch (IOException e) { - // TODO Auto-generated catch block - e.printStackTrace(); - } - - } - - private boolean existsInHistory(SuckerState state, String messageId) { - int idx; - idx = messageId.lastIndexOf(":"); - String lineToCompare = messageId.substring(0, idx-1); - idx = lineToCompare.lastIndexOf(":"); - lineToCompare = lineToCompare.substring(0, idx-1); - Iterator iter = _state.history.iterator(); - while(iter.hasNext()) - { - String line = (String)iter.next(); - idx = line.lastIndexOf(":"); - if (idx < 0) - return false; - line = line.substring(0, idx-1); - idx = line.lastIndexOf(":"); - if (idx < 0) - return false; - line = line.substring(0, idx-1); - if (line.equals(lineToCompare)) - return true; - } - return false; - } - - private void addHistory(String messageId) { - _state.history.add(messageId); - } - - private void pruneHistory(String url, int nrToKeep) { - int i=0; - String urlHash=sha1(url); - - // Count nr of entries containing url hash - Iterator iter = _state.history.iterator(); - while(iter.hasNext()) - { - String historyLine = (String) iter.next(); - if(historyLine.startsWith(urlHash)) - { - i++; - } - } - - // keep first nrToKeep entries - i = i - nrToKeep; - if(i>0) - { - iter = _state.history.iterator(); - while(i>0 && iter.hasNext()) - { - String historyLine = (String) iter.next(); - if(historyLine.startsWith(urlHash)) - { - iter.remove(); - i--; - } - } - } - } - - private void storeHistory() { - FileOutputStream hos = null; - try { - hos = new FileOutputStream(_state.historyFile, false); - Iterator iter = _state.history.iterator(); - while(iter.hasNext()) - { - String historyLine = (String) iter.next(); - hos.write(historyLine.getBytes()); - hos.write("\n".getBytes()); - } - } catch (FileNotFoundException e) { - // TODO Auto-generated catch block - e.printStackTrace(); - } catch (IOException e) { - // TODO Auto-generated catch block - e.printStackTrace(); - } finally { - if (hos != null) try { hos.close(); } catch (IOException ioe) {} - } - } - - public static void main(String[] args) { - Sucker sucker = new Sucker(); - boolean ok = sucker.parseArgs(args); - if (!ok) { - System.out.println("sucker --load $urlToFeed \n" - + "--proxyhost \n" - + "--proxyport \n" - + "--importenclosures true \n" - + "--importrefs true \n" - + "--tag feed \n" - + "--outputdir ./sucker_out \n" - + "--exec pushscript.sh OUTPUTDIR UNIQUEID ENTRYTIMESTAMP \n" - + "--history ./sucker.history"); - System.exit(1); - } - - sucker.suck(); - } - - /** - * Call the specified script with "$outputDir $id and $time". - */ - private static boolean execPushScript(SuckerState state, String id, String time) { - try { - String cli = state.pushScript + " " + state.outputDir + " " + id + " " + time; - Process pushScript_proc = Runtime.getRuntime().exec(cli); - - // get its output (your input) stream - - InputStream ls_in = pushScript_proc.getInputStream(); - - try { - StringBuffer buf = new StringBuffer(); - while (true) { - boolean eof = DataHelper.readLine(ls_in, buf); - if (buf.length() > 0) - infoLog(state.pushScript + ": " + buf.toString()); - buf.setLength(0); - if (eof) - break; - } - } catch (IOException e) { - return false; - } - try { - pushScript_proc.waitFor(); - if(pushScript_proc.exitValue()==0) - return true; - } catch (InterruptedException e) { - e.printStackTrace(); - } - return false; - } catch (IOException e1) { - System.err.println(e1); - return false; - } - } - - /** - * Converts the SyndEntry e to sml and fetches any images as attachments - */ - private static boolean processEntry(SuckerState state, SyndEntry e, String time) { - String subject; - - state.stripNewlines=false; - - try { - - String sml=""; - subject=e.getTitle(); - List cats = e.getCategories(); - Iterator iter = cats.iterator(); - String tags = state.feedTag; - while (iter.hasNext()) { - SyndCategory c = (SyndCategory) iter.next(); - debugLog("Name: "+c.getName()); - debugLog("uri:"+c.getTaxonomyUri()); - String tag=c.getName(); - tag=tag.replaceAll("[^a-zA-z.-_:]","_"); - tags += "\t" + state.feedTag + "." + tag; - } - - SyndContent content; - - List l = e.getContents(); - if(l!=null) - { - iter = l.iterator(); - while(iter.hasNext()) - { - content = (SyndContent)iter.next(); - String c = content.getValue(); - debugLog("Content: "+c); - sml += htmlToSml(state, c); - sml += "\n"; - } - } - - List enclosures = e.getEnclosures(); - debugLog("Enclosures: " + enclosures.size()); - for (int i = 0; i < enclosures.size(); i++) { - SyndEnclosure enc = (SyndEnclosure)enclosures.get(i); - String enclosureURL = enc.getUrl(); - if (enclosureURL != null) { - if (!enclosureURL.startsWith("http://")) { - // e.g. postman's rss feed @ http://tracker.postman.i2p/rss.jsp has - // baseUrl = http://tracker.postman.i2p - // and enclosure URLs are /download.php?id=123&file=blah - if (enclosureURL.startsWith("/") || state.baseUrl.endsWith("/")) - enclosureURL = state.baseUrl + enclosureURL; - else - enclosureURL = state.baseUrl + '/' + enclosureURL; - } - fetchAttachment(state, enclosureURL, enc.getType()); // fetches and adds to our streams - } - } - - String source=e.getLink(); //Uri(); - if(!source.startsWith("http://")) - source=state.baseUrl+source; - sml += "[link schema=\"web\" location=\""+source+"\"]source[/link]\n"; - - if(state.pushToSyndie) { - debugLog("user.blog: "+state.user.getBlogStr()); - debugLog("user.id: "+state.bm.getNextBlogEntry(state.user)); - debugLog("subject: "+subject); - debugLog("tags: "+tags); - debugLog("sml: "+sml); - debugLog(""); - BlogURI uri = state.bm.createBlogEntry( - state.user, - false, - subject, - tags, - null, - sml, - state.fileNames, - state.fileStreams, - state.fileTypes); - - if(uri==null) { - errorLog("pushToSyndie failure."); - return false; - } else { - state.entriesPosted.add(uri); - infoLog("pushToSyndie success, uri: "+uri.toString()); - } - } - else - { - FileOutputStream fos; - fos = new FileOutputStream(state.messagePath); - sml=subject + "\nTags: " + tags + "\n\n" + sml; - fos.write(sml.getBytes()); - if (state.pushScript != null) { - if (!execPushScript(state, ""+state.messageNumber, time)) { - errorLog("push script failed"); - } else { - infoLog("push script success: nr "+state.messageNumber); - } - } - } - state.messageNumber++; - deleteTempFiles(state); - return true; - } catch (FileNotFoundException e1) { - e1.printStackTrace(); - } catch (IOException e2) { - e2.printStackTrace(); - } - deleteTempFiles(state); - return false; - } - - private static void deleteTempFiles(SuckerState state) { - Iterator iter = state.tempFiles.iterator(); - while(iter.hasNext()) { - File tempFile = (File)iter.next(); - tempFile.delete(); - } - } - - private static String htmlToSml(SuckerState state, String html) { - - String sml=""; - int i=0; - - state.pendingEndLink=false; - - while(i -> [link][/link][img][/img] - ret="[/link]"; - state.pendingEndLink=false; - } - - ret += "[img attachment=\""+""+ state.attachmentCounter +"\"]"; - - a=htmlTagLowerCase.indexOf("alt=\"")+5; - if(a>=5) - { - b=a; - if(htmlTagLowerCase.charAt(b)!='\"') { - while(htmlTagLowerCase.charAt(b)!='\"') - b++; - String altText=htmlTag.substring(a,b); - ret+=altText; - } - } - - ret+="[/img]"; - - if(!imageLink.startsWith("http://")) - imageLink=state.baseUrl+"/"+imageLink; - - fetchAttachment(state, imageLink); - - debugLog("Converted to: "+ret); - - return ret; - - } - if(htmlTagLowerCase.startsWith("= htmlTagLowerCase.length()) - return null; // abort the b0rked tag - String link=htmlTag.substring(a,b); - if(!link.startsWith("http://")) - link=state.baseUrl+"/"+link; - - String schema="web"; - - ret += "[link schema=\""+schema+"\" location=\""+link+"\"]"; - if(htmlTagLowerCase.endsWith("/>")) - ret += "[/link]"; - else - state.pendingEndLink=true; - - debugLog("Converted to: "+ret); - - return ret; - } - - if ("".equals(htmlTagLowerCase)) { - if (state.pendingEndLink) { - state.pendingEndLink=false; - return "[/link]"; - } - return ""; - } - - if("".equals(htmlTagLowerCase)) - return "[b]"; - if("".equals(htmlTagLowerCase)) - return "[/b]"; - if("".equals(htmlTagLowerCase)) - return "[i]"; - if("".equals(htmlTagLowerCase)) - return "[/i]"; - if("".equals(htmlTagLowerCase)) - return "[i]"; - if("".equals(htmlTagLowerCase)) - return "[/i]"; - if("".equals(htmlTagLowerCase)) - return "[b]"; - if("".equals(htmlTagLowerCase)) - return "[/b]"; - if(htmlTagLowerCase.startsWith("".equals(htmlTagLowerCase)) - return "\n\n"; - if("

      ".equals(htmlTagLowerCase)) - return ""; - if("
    • ".equals(htmlTagLowerCase)) - return "\n * "; - if("
    • ".equals(htmlTagLowerCase)) - return ""; - if("
      ".equals(htmlTagLowerCase)) - return ""; - if(htmlTagLowerCase.startsWith("".equals(htmlTagLowerCase)) - return ""; - if("".equals(htmlTagLowerCase)) - return ""; - if("
      ".equals(htmlTagLowerCase)) - return "[quote]"; - if("
      ".equals(htmlTagLowerCase)) - return "[/quote]"; - if(htmlTagLowerCase.startsWith("".equals(htmlTagLowerCase)) // emulate table with hr :) - return "[hr][/hr]"; - if(htmlTagLowerCase.startsWith("') - return i+1; - if(s.charAt(i)=='"') - { - i++; - while(i= so we can give reasonable order when a child is a reply to a parent - // (since the child must have been posted after the parent) - if (node.getMostRecentPostDate() >= _mostRecentPostDate) { - _mostRecentPostDate = node.getMostRecentPostDate(); - _mostRecentPostAuthor = node.getMostRecentPostAuthor(); - } - _recursiveTags.addAll(node.getRecursiveTags()); - _recursiveAuthors.addAll(node.getRecursiveAuthors()); - _recursiveEntries.addAll(node.getRecursiveEntries()); - } - - if (_mostRecentPostDate < 0) { - _mostRecentPostDate = _entry.getEntryId(); - _mostRecentPostAuthor = _entry.getKeyHash(); - } - - // now reorder the children - TreeSet ordered = new TreeSet(new NewestNodeFirstComparator()); - for (int i = 0; i < _children.size(); i++) { - ThreadNodeImpl kid = (ThreadNodeImpl)_children.get(i); - ordered.add(kid); - } - List kids = new ArrayList(ordered.size()); - for (Iterator iter = ordered.iterator(); iter.hasNext(); ) - kids.add(iter.next()); - _children = kids; - } - - public String toString() { - StringBuffer buf = new StringBuffer(); - buf.append("").append(getEntry().toString()).append("\n"); - buf.append("").append(getTags()).append("\n"); - buf.append("").append(getMostRecentPostDate()).append("\n"); - buf.append("").append(getRecursiveTags()).append("\n"); - buf.append("\n"); - for (int i = 0; i < _children.size(); i++) - buf.append(_children.get(i).toString()); - buf.append("\n"); - buf.append("\n"); - return buf.toString(); - } - - private Collection getRecursiveAuthors() { return _recursiveAuthors; } - private Collection getRecursiveEntries() { return _recursiveEntries; } - - // interface-specified methods doing what one would expect... - public boolean containsAuthor(Hash author) { return _recursiveAuthors.contains(author); } - public boolean containsEntry(BlogURI uri) { return _recursiveEntries.contains(uri); } - public ThreadNode getChild(int index) { return (ThreadNode)_children.get(index); } - public int getChildCount() { return _children.size(); } - public BlogURI getEntry() { return _entry; } - public ThreadNode getParent() { return _parent; } - public BlogURI getParentEntry() { return _parentEntry; } - public boolean containsTag(String tag) { return _tags.contains(tag); } - public Collection getTags() { return _tags; } - public Collection getRecursiveTags() { return _recursiveTags; } - public long getMostRecentPostDate() { return _mostRecentPostDate; } - public Hash getMostRecentPostAuthor() { return _mostRecentPostAuthor; } - public Iterator getRecursiveAuthorIterator() { return _recursiveAuthors.iterator(); } -} diff --git a/apps/syndie/java/src/net/i2p/syndie/Updater.java b/apps/syndie/java/src/net/i2p/syndie/Updater.java deleted file mode 100644 index 72a13e603..000000000 --- a/apps/syndie/java/src/net/i2p/syndie/Updater.java +++ /dev/null @@ -1,141 +0,0 @@ -package net.i2p.syndie; - -import java.util.ArrayList; -import java.util.HashMap; -import java.util.Iterator; -import java.util.List; - -import net.i2p.I2PAppContext; -import net.i2p.client.naming.PetName; -import net.i2p.client.naming.PetNameDB; -import net.i2p.syndie.web.RemoteArchiveBean; -import net.i2p.util.Log; - -public class Updater { - public static final String VERSION = "1.0"; - private static final Log _log = I2PAppContext.getGlobalContext().logManager().getLog(Updater.class); - private static final Updater _instance = new Updater(); - private long _lastUpdate; - private static boolean _woken; - - private static boolean ALLOW_REMOTE_PUSH = false; - - public void update() { - BlogManager bm = BlogManager.instance(); - if (_lastUpdate + bm.getUpdateDelay()*60*60*1000 > System.currentTimeMillis()) { - if (!_woken) - return; - } - _lastUpdate = System.currentTimeMillis(); - _log.debug("Update started."); - String[] archives = bm.getUpdateArchives(); - for (int i = 0; i < archives.length; i++) { - _log.debug("Fetching [" + archives[i] + "]"); - fetchArchive(archives[i]); - _log.debug("Done fetching " + archives[i]); - } - _log.debug("Done fetching archives"); - List rssFeeds = bm.getRssFeeds(); - List allEntries = new ArrayList(); - Iterator iter = rssFeeds.iterator(); - while(iter.hasNext()) { - String args[] = (String[])iter.next(); - _log.debug("rss feed begin: " + args[0]); - Sucker sucker = new Sucker(args); - allEntries.addAll(sucker.suck()); - _log.debug("rss feed end: " + args[0]); - } - - if (ALLOW_REMOTE_PUSH && (allEntries.size() > 0) ) { - String pushedRemoteArchive = getAutomaticallyPushedArchive(); - if (pushedRemoteArchive != null) { - _log.debug("Pushing all of the new entries to " + pushedRemoteArchive + ": " + allEntries); - // push all of the new entries to the configured default archive - User user = new User(); - RemoteArchiveBean rab = new RemoteArchiveBean(); - - rab.fetchIndex(user, "web", pushedRemoteArchive, bm.getDefaultProxyHost(), bm.getDefaultProxyPort(), true); - if (rab.getRemoteIndex() != null) { - rab.postSelectedEntries(user, allEntries, pushedRemoteArchive); - _log.debug(rab.getStatus()); - } - } - } - _log.debug("Done with all updating"); - } - - /** - * Pick the archive to which any posts imported from a feed should be pushed to, - * beyond the local archive. This currently pushes it to the first (alphabetically) - * syndie archive in the default user's addressbook that is marked as 'public'. - * - * @return archive location, or null if no archive should be used - */ - private String getAutomaticallyPushedArchive() { - BlogManager bm = BlogManager.instance(); - User user = bm.getDefaultUser(); - PetNameDB db = user.getPetNameDB(); - for (Iterator iter = db.getNames().iterator(); iter.hasNext(); ) { - String name = (String)iter.next(); - PetName pn = db.getByName(name); - String proto = pn.getProtocol(); - if ( (proto != null) && ("syndiearchive".equals(proto)) ) - if (pn.getIsPublic()) - return pn.getLocation(); - } - return null; - } - - public void fetchArchive(String archive) { - if ( (archive == null) || (archive.trim().length() <= 0) ) { - _log.error("Fetch a null archive?" + new Exception("source")); - return; - } - BlogManager bm = BlogManager.instance(); - User user = new User(); - RemoteArchiveBean rab = new RemoteArchiveBean(); - - rab.fetchIndex(user, "web", archive, bm.getDefaultProxyHost(), bm.getDefaultProxyPort(), true); - if (rab.getRemoteIndex() != null) { - HashMap parameters = new HashMap(); - parameters.put("action", new String[] {"Fetch all new entries"}); - rab.fetchSelectedBulk(user, parameters, true); - } - _log.debug(rab.getStatus()); - } - - public static void main() { - _woken = false; - _instance.run(); - } - - public void run() { - - // wait - try { - Thread.currentThread().sleep(5*60*1000); - } catch (InterruptedException ie) {} - - // creates the default user if necessary - BlogManager.instance().getDefaultUser(); - while (true) { - int delay = BlogManager.instance().getUpdateDelay(); - update(); - try { - synchronized (this) { - _woken = false; - wait(delay * 60 * 60 * 1000); - } - } catch (InterruptedException exp) { - } - - } - } - - public static void wakeup() { - synchronized (_instance) { - _woken = true; - _instance.notifyAll(); - } - } -} diff --git a/apps/syndie/java/src/net/i2p/syndie/UpdaterServlet.java b/apps/syndie/java/src/net/i2p/syndie/UpdaterServlet.java deleted file mode 100644 index 8ade5e9cb..000000000 --- a/apps/syndie/java/src/net/i2p/syndie/UpdaterServlet.java +++ /dev/null @@ -1,40 +0,0 @@ -package net.i2p.syndie; - -import javax.servlet.GenericServlet; -import javax.servlet.ServletConfig; -import javax.servlet.ServletException; -import javax.servlet.ServletRequest; -import javax.servlet.ServletResponse; - -/** - * A wrapper for syndie updater to allow it to be started as a web application. - * - * @author Ragnarok - * - */ -public class UpdaterServlet extends GenericServlet { - - /* (non-Javadoc) - * @see javax.servlet.Servlet#service(javax.servlet.ServletRequest, javax.servlet.ServletResponse) - */ - public void service(ServletRequest request, ServletResponse response) { - } - - /* (non-Javadoc) - * @see javax.servlet.Servlet#init(javax.servlet.ServletConfig) - */ - public void init(ServletConfig config) { - try { - super.init(config); - } catch (ServletException exp) { - } - /* - UpdaterThread thread = new UpdaterThread(); - thread.setDaemon(true); - thread.start(); - System.out.println("INFO: Starting Syndie Updater " + Updater.VERSION); - */ - System.out.println("INFO: Syndie Updater DISABLED. Use the new Syndie from http://syndie.i2p.net/"); - } - -} \ No newline at end of file diff --git a/apps/syndie/java/src/net/i2p/syndie/UpdaterThread.java b/apps/syndie/java/src/net/i2p/syndie/UpdaterThread.java deleted file mode 100644 index fda8a09a5..000000000 --- a/apps/syndie/java/src/net/i2p/syndie/UpdaterThread.java +++ /dev/null @@ -1,27 +0,0 @@ -package net.i2p.syndie; - -/** - * A thread that runs the updater. - * - * @author Ragnarok - * - */ -public class UpdaterThread extends Thread { - - /** - * Construct an UpdaterThread. - */ - public UpdaterThread() { - } - - /* (non-Javadoc) - * @see java.lang.Runnable#run() - */ - public void run() { - //try { - // Thread.sleep(5 * 60 * 1000); - //} catch (InterruptedException exp) { - //} - Updater.main(); - } -} \ No newline at end of file diff --git a/apps/syndie/java/src/net/i2p/syndie/User.java b/apps/syndie/java/src/net/i2p/syndie/User.java deleted file mode 100644 index fd386685a..000000000 --- a/apps/syndie/java/src/net/i2p/syndie/User.java +++ /dev/null @@ -1,350 +0,0 @@ -package net.i2p.syndie; - -import java.io.IOException; -import java.util.ArrayList; -import java.util.HashMap; -import java.util.Iterator; -import java.util.List; -import java.util.Map; -import java.util.Properties; -import java.util.StringTokenizer; - -import net.i2p.I2PAppContext; -import net.i2p.client.naming.PetName; -import net.i2p.client.naming.PetNameDB; -import net.i2p.data.Base64; -import net.i2p.data.DataHelper; -import net.i2p.data.Hash; -import net.i2p.syndie.web.AddressesServlet; - -/** - * User session state and preferences. - * - */ -public class User { - private I2PAppContext _context; - private String _username; - private String _hashedPassword; - private Hash _blog; - private String _userHash; - private long _mostRecentEntry; - /** Group name to List of blog selectors, where the selectors are of the form - * blog://$key, entry://$key/$entryId, blogtag://$key/$tag, tag://$tag - */ - private Map _blogGroups; - /** list of blogs (Hash) we never want to see entries from */ - private List _shitlistedBlogs; - /** where our userhosts.txt is */ - private String _addressbookLocation; - private boolean _showImagesByDefault; - private boolean _showExpandedByDefault; - private String _defaultSelector; - private long _lastLogin; - private long _lastMetaEntry; - private boolean _allowAccessRemote; - private boolean _authenticated; - private String _eepProxyHost; - private int _eepProxyPort; - private String _webProxyHost; - private int _webProxyPort; - private String _torProxyHost; - private int _torProxyPort; - private PetNameDB _petnames; - private boolean _importAddresses; - private boolean _dataImported; - - static final String PROP_USERHASH = "__userHash"; - - private static final String DEFAULT_FAVORITE_TAGS[] = { - "syndie", "syndie.tech", "syndie.intro", "syndie.bugs", "syndie.featurerequest", "syndie.announce", - "i2p", "i2p.tech", "i2p.bugs", "i2p.i2phex", "i2p.susimail", "i2p.irc", - "bt.i2psnark", "bt.i2prufus", "bt.i2p-bt", "bt.azureus", "bt.misc", - "security.misc", - "chat", - "test" - }; - - /** - * Ugly hack to fetch the default User instance - this is the default - * constructor so it can be used as a bean on the web interface. If - * the Syndie instance isn't in single user mode, the default User - * is an empty unauthenticated User. If the instance IS in single user - * mode, this will contain the logged in 'default' user (creating a new - * one as necessary). If you just want to create a User object, use the - * new User(I2PAppContext ctx) constructor. - * - */ - public User() { - this(I2PAppContext.getGlobalContext()); - BlogManager.instance().getDefaultUser(this); - } - - public User(I2PAppContext ctx) { - _context = ctx; - init(); - } - private void init() { - _authenticated = false; - _username = null; - _userHash = null; - _hashedPassword = null; - _blog = null; - _mostRecentEntry = -1; - _blogGroups = new HashMap(); - _shitlistedBlogs = new ArrayList(); - _defaultSelector = null; - _addressbookLocation = "userhosts.txt"; - _showImagesByDefault = true; - _showExpandedByDefault = false; - _allowAccessRemote = false; - _eepProxyHost = null; - _webProxyHost = null; - _torProxyHost = null; - _eepProxyPort = -1; - _webProxyPort = -1; - _torProxyPort = -1; - _lastLogin = -1; - _lastMetaEntry = 0; - _petnames = new PetNameDB(); - _importAddresses = false; - _dataImported = false; - } - - public boolean getAuthenticated() { return _authenticated; } - public String getUsername() { return _username; } - public String getUserHash() { return _userHash; } - public Hash getBlog() { return _blog; } - public String getBlogStr() { - if (_blog != null) - return Base64.encode(_blog.getData()); - else - return null; - } - public long getMostRecentEntry() { return _mostRecentEntry; } - public Map getBlogGroups() { return _blogGroups; } - public List getShitlistedBlogs() { return _shitlistedBlogs; } - public List getFavoriteTags() { - List rv = new ArrayList(); - for (Iterator iter = _petnames.getNames().iterator(); iter.hasNext(); ) { - String name = (String)iter.next(); - PetName pn = _petnames.getByName(name); - if (pn == null) continue; - String proto = pn.getProtocol(); - String loc = pn.getLocation(); - if ( (proto != null) && (AddressesServlet.PROTO_TAG.equals(proto)) && (loc != null) ) - rv.add(loc); - } - if (rv.size() <= 0) { - for (int i = 0; i < DEFAULT_FAVORITE_TAGS.length; i++) { - if (!_petnames.containsName(DEFAULT_FAVORITE_TAGS[i])) { - _petnames.add(new PetName(DEFAULT_FAVORITE_TAGS[i], AddressesServlet.NET_SYNDIE, - AddressesServlet.PROTO_TAG, DEFAULT_FAVORITE_TAGS[i])); - } - } - } - return rv; - } - public String getAddressbookLocation() { return _addressbookLocation; } - public boolean getShowImages() { return _showImagesByDefault; } - public boolean getShowExpanded() { return _showExpandedByDefault; } - public long getLastLogin() { return _lastLogin; } - public String getHashedPassword() { return _hashedPassword; } - public long getLastMetaEntry() { return _lastMetaEntry; } - public String getDefaultSelector() { return _defaultSelector; } - public void setDefaultSelector(String sel) { _defaultSelector = sel; } - public boolean getAllowAccessRemote() { return _allowAccessRemote; } - public void setAllowAccessRemote(boolean allow) { _allowAccessRemote = true; } - - public void setMostRecentEntry(long id) { _mostRecentEntry = id; } - public void setLastMetaEntry(long id) { _lastMetaEntry = id; } - - public String getEepProxyHost() { return _eepProxyHost; } - public int getEepProxyPort() { return _eepProxyPort; } - public String getWebProxyHost() { return _webProxyHost; } - public int getWebProxyPort() { return _webProxyPort; } - public String getTorProxyHost() { return _torProxyHost; } - public int getTorProxyPort() { return _torProxyPort; } - - public PetNameDB getPetNameDB() { return _petnames; } - public boolean getImportAddresses() { return _importAddresses; } - - public void invalidate() { - if (_authenticated) - BlogManager.instance().saveUser(this); - init(); - } - - public void dataImported() { _dataImported = true; } - public boolean resetDataImported() { - boolean rv = _dataImported; - _dataImported = false; - return rv; - } - - public boolean changePassword(String oldPass, String pass0, String pass1) { - String curPass = _hashedPassword; - Hash curPassHash = _context.sha().calculateHash(DataHelper.getUTF8(oldPass)); - Hash newPassHash = _context.sha().calculateHash(DataHelper.getUTF8(pass0)); - if (curPassHash.toBase64().equals(curPass)) { - if ( (pass0 != null) && (pass1 != null) && (pass0.equals(pass1)) ) { - _hashedPassword = newPassHash.toBase64(); - return true; - } else { - return false; - } - } else { - return false; - } - } - - public String login(String login, String pass, Properties props) { - _username = login; - load(props); - String hpass = Base64.encode(_context.sha().calculateHash(DataHelper.getUTF8(pass)).getData()); - if (!hpass.equals(_hashedPassword)) { - return "Incorrect password"; - } - _lastLogin = _context.clock().now(); - _authenticated = true; - return LOGIN_OK; - } - - - public void load(Properties props) { - _authenticated = false; - _hashedPassword = props.getProperty("password"); - _userHash = props.getProperty(PROP_USERHASH); - - // blog=luS9d3uaf....HwAE= - String b = props.getProperty("blog"); - if (b != null) _blog = new Hash(Base64.decode(b)); - // lastid=12345 - String id = props.getProperty("lastid"); - if (id != null) try { _mostRecentEntry = Long.parseLong(id); } catch (NumberFormatException nfe) {} - // lastmetaedition=12345 - id = props.getProperty("lastmetaedition"); - if (id != null) try { _lastMetaEntry = Long.parseLong(id); } catch (NumberFormatException nfe) {} - // groups=abc:selector,selector,selector,selector def:selector,selector,selector - StringTokenizer tok = new StringTokenizer(props.getProperty("groups", ""), " "); - while (tok.hasMoreTokens()) { - String group = tok.nextToken(); - int endName = group.indexOf(':'); - if (endName <= 0) - continue; - String groupName = group.substring(0, endName); - String sel = group.substring(endName+1); - List selectors = new ArrayList(); - while ( (sel != null) && (sel.length() > 0) ) { - int end = sel.indexOf(','); - if (end < 0) { - selectors.add(sel); - sel = null; - } else { - if (end + 1 >= sel.length()) { - selectors.add(sel.substring(0,end)); - sel = null; - } else if (end == 0) { - sel = sel.substring(1); - } else { - selectors.add(sel.substring(0, end)); - sel = sel.substring(end+1); - } - } - } - _blogGroups.put(groupName.trim(), selectors); - } - // shitlist=hash,hash,hash - tok = new StringTokenizer(props.getProperty("shitlistedblogs", ""), ","); - while (tok.hasMoreTokens()) { - String blog = tok.nextToken(); - byte bl[] = Base64.decode(blog); - if ( (bl != null) && (bl.length == Hash.HASH_LENGTH) ) - _shitlistedBlogs.add(new Hash(bl)); - } - - String addr = props.getProperty("addressbook", "userhosts.txt"); - if (addr != null) { - _addressbookLocation = addr; - try { - _petnames.load(addr); - } catch (IOException ioe) { - ioe.printStackTrace(); - } - } - - String show = props.getProperty("showimages", "true"); - _showImagesByDefault = (show != null) && (show.equals("true")); - show = props.getProperty("showexpanded", "false"); - _showExpandedByDefault = (show != null) && (show.equals("true")); - _defaultSelector = props.getProperty("defaultselector"); - String allow = props.getProperty("allowaccessremote", "false"); - _allowAccessRemote = (allow != null) && (allow.equals("true")); - _eepProxyPort = getInt(props.getProperty("eepproxyport")); - _webProxyPort = getInt(props.getProperty("webproxyport")); - _torProxyPort = getInt(props.getProperty("torproxyport")); - _eepProxyHost = props.getProperty("eepproxyhost"); - _webProxyHost = props.getProperty("webproxyhost"); - _torProxyHost = props.getProperty("torproxyhost"); - String importadr = props.getProperty("importaddresses", "false"); - _importAddresses = (importadr != null) && (importadr.equals("true")); - } - - private int getInt(String val) { - if (val == null) return -1; - try { return Integer.parseInt(val); } catch (NumberFormatException nfe) { return -1; } - } - - public static final String LOGIN_OK = "Logged in"; - - public String export() { - StringBuffer buf = new StringBuffer(512); - buf.append("password=" + getHashedPassword() + "\n"); - buf.append("blog=" + getBlog().toBase64() + "\n"); - buf.append("lastid=" + getMostRecentEntry() + "\n"); - buf.append("lastmetaedition=" + getLastMetaEntry() + "\n"); - buf.append("lastlogin=" + getLastLogin() + "\n"); - buf.append("addressbook=" + getAddressbookLocation() + "\n"); - buf.append("showimages=" + getShowImages() + "\n"); - buf.append("showexpanded=" + getShowExpanded() + "\n"); - buf.append("defaultselector=" + getDefaultSelector() + "\n"); - buf.append("allowaccessremote=" + _allowAccessRemote + "\n"); - buf.append("importaddresses=" + getImportAddresses() + "\n"); - buf.append("groups="); - Map groups = getBlogGroups(); - for (Iterator iter = groups.keySet().iterator(); iter.hasNext(); ) { - String name = (String)iter.next(); - List selectors = (List)groups.get(name); - buf.append(name).append(':'); - for (int i = 0; i < selectors.size(); i++) { - buf.append(selectors.get(i)); - if (i + 1 < selectors.size()) - buf.append(","); - } - if (iter.hasNext()) - buf.append(' '); - } - buf.append('\n'); - // shitlist=hash,hash,hash - List shitlistedBlogs = getShitlistedBlogs(); - if (shitlistedBlogs.size() > 0) { - //buf.setLength(0); - buf.append("shitlistedblogs="); - for (int i = 0; i < shitlistedBlogs.size(); i++) { - Hash blog = (Hash)shitlistedBlogs.get(i); - buf.append(blog.toBase64()); - if (i + 1 < shitlistedBlogs.size()) - buf.append(','); - } - buf.append('\n'); - } - List favoriteTags = getFavoriteTags(); - if (favoriteTags.size() > 0) { - buf.append("favoritetags="); - for (int i = 0; i < favoriteTags.size(); i++) - buf.append(((String)favoriteTags.get(i)).trim()).append(" "); - buf.append('\n'); - } - - return buf.toString(); - } -} diff --git a/apps/syndie/java/src/net/i2p/syndie/Version.java b/apps/syndie/java/src/net/i2p/syndie/Version.java deleted file mode 100644 index 4500e5d56..000000000 --- a/apps/syndie/java/src/net/i2p/syndie/Version.java +++ /dev/null @@ -1,11 +0,0 @@ -package net.i2p.syndie; - -/** - * - */ -public class Version { - public static final String VERSION = "0-alpha"; - public static final String BUILD = "0"; - public static final String INDEX_VERSION = "1.0"; - public static final String ID = "$Id$"; -} diff --git a/apps/syndie/java/src/net/i2p/syndie/WritableThreadIndex.java b/apps/syndie/java/src/net/i2p/syndie/WritableThreadIndex.java deleted file mode 100644 index 996148bdf..000000000 --- a/apps/syndie/java/src/net/i2p/syndie/WritableThreadIndex.java +++ /dev/null @@ -1,145 +0,0 @@ -package net.i2p.syndie; - -import java.util.HashMap; -import java.util.Iterator; -import java.util.Map; -import java.util.TreeMap; -import java.util.TreeSet; - -import net.i2p.I2PAppContext; -import net.i2p.syndie.data.BlogURI; -import net.i2p.syndie.data.EntryContainer; -import net.i2p.syndie.data.ThreadIndex; -import net.i2p.syndie.data.ThreadNode; -import net.i2p.syndie.sml.HTMLRenderer; -import net.i2p.syndie.sml.SMLParser; - -/** - * - */ -class WritableThreadIndex extends ThreadIndex { - /** map of child (BlogURI) to parent (BlogURI) */ - private Map _parents; - /** map of entry (BlogURI) to tags (String[]) */ - private Map _tags; - private static final String[] NO_TAGS = new String[0]; - /** b0rk if the thread seems to go too deep */ - private static final int MAX_THREAD_DEPTH = 64; - - WritableThreadIndex() { - super(); - _parents = new HashMap(); - _tags = new TreeMap(new NewestEntryFirstComparator()); - } - - void addParent(BlogURI parent, BlogURI child) { _parents.put(child, parent); } - void addEntry(BlogURI entry, String tags[]) { - if (tags == null) tags = NO_TAGS; - Object old = _tags.get(entry); - if (old != null) { - System.err.println("Old value: " + old + " new tags: " + tags + " entry: " + entry); - } else { - _tags.put(entry, tags); - } - } - - /** - * pull the data added together into threads, and stash them in the - * roots, organized chronologically - * - */ - void organizeTree() { - Map nodes = new HashMap(_tags.size()); - for (Iterator iter = _tags.keySet().iterator(); iter.hasNext(); ) { - BlogURI entry = (BlogURI)iter.next(); - String tags[] = (String[])_tags.get(entry); - BlogURI parent = (BlogURI)_parents.get(entry); - ThreadNodeImpl node = new ThreadNodeImpl(); - node.setEntry(entry); - if (tags != null) - for (int i = 0; i < tags.length; i++) - node.addTag(tags[i]); - if (parent != null) - node.setParentEntry(parent); - addEntry(entry, node); - nodes.put(entry, node); - } - - SMLParser parser = new SMLParser(I2PAppContext.getGlobalContext()); - HeaderReceiver rec = new HeaderReceiver(); - Archive archive = BlogManager.instance().getArchive(); - - for (Iterator iter = nodes.keySet().iterator(); iter.hasNext(); ) { - BlogURI entry = (BlogURI)iter.next(); - ThreadNodeImpl node = (ThreadNodeImpl)nodes.get(entry); - int depth = 0; - // climb the tree - while (node.getParentEntry() != null) { - ThreadNodeImpl parent = (ThreadNodeImpl)nodes.get(node.getParentEntry()); - if (parent == null) break; - - // if the parent doesn't want replies, only include replies under the tree - // if they're written by the same author - BlogURI parentURI = parent.getEntry(); - EntryContainer parentEntry = archive.getEntry(parentURI); - if (parentEntry != null) { - parser.parse(parentEntry.getEntry().getText(), rec); - String refuse = rec.getHeader(HTMLRenderer.HEADER_REFUSE_REPLIES); - if ( (refuse != null) && (Boolean.valueOf(refuse).booleanValue()) ) { - if (parent.getEntry().getKeyHash().equals(entry.getKeyHash())) { - // same author, allow the reply - } else { - // different author, refuse - parent = null; - break; - } - } - } - - node.setParent(parent); - parent.addChild(node); - node = parent; - depth++; - if (depth > MAX_THREAD_DEPTH) - break; - } - - node.summarizeThread(); - } - - // we do this in a second pass, since we need the data built by the - // summarizeThread() of a fully constructed tree - - TreeSet roots = new TreeSet(new NewestNodeFirstComparator()); - for (Iterator iter = nodes.keySet().iterator(); iter.hasNext(); ) { - BlogURI entry = (BlogURI)iter.next(); - ThreadNode node = (ThreadNode)nodes.get(entry); - int depth = 0; - // climb the tree - while (node.getParent() != null) - node = node.getParent(); - - if (!roots.contains(node)) { - roots.add(node); - } - } - - // store them, sorted by most recently updated thread first - for (Iterator iter = roots.iterator(); iter.hasNext(); ) - addRoot((ThreadNode)iter.next()); - - _parents.clear(); - _tags.clear(); - } - - public String toString() { - StringBuffer buf = new StringBuffer(); - buf.append(""); - for (int i = 0; i < getRootCount(); i++) { - ThreadNode root = getRoot(i); - buf.append(root.toString()); - } - buf.append("\n"); - return buf.toString(); - } -} diff --git a/apps/syndie/java/src/net/i2p/syndie/data/ArchiveIndex.java b/apps/syndie/java/src/net/i2p/syndie/data/ArchiveIndex.java deleted file mode 100644 index 4e92ce2c8..000000000 --- a/apps/syndie/java/src/net/i2p/syndie/data/ArchiveIndex.java +++ /dev/null @@ -1,510 +0,0 @@ -package net.i2p.syndie.data; - -import java.io.BufferedReader; -import java.io.File; -import java.io.FileInputStream; -import java.io.IOException; -import java.io.InputStream; -import java.io.InputStreamReader; -import java.text.ParseException; -import java.text.SimpleDateFormat; -import java.util.ArrayList; -import java.util.Collections; -import java.util.Date; -import java.util.HashMap; -import java.util.HashSet; -import java.util.Iterator; -import java.util.List; -import java.util.Locale; -import java.util.Map; -import java.util.Properties; -import java.util.Set; -import java.util.StringTokenizer; -import java.util.TreeMap; - -import net.i2p.I2PAppContext; -import net.i2p.data.Base64; -import net.i2p.data.Hash; -import net.i2p.syndie.Archive; -import net.i2p.syndie.BlogManager; -import net.i2p.util.Log; - -/** - * Simple read-only summary of an archive - */ -public class ArchiveIndex { - private I2PAppContext _context; - private Log _log; - protected String _version; - protected long _generatedOn; - protected int _allBlogs; - protected int _newBlogs; - protected int _allEntries; - protected int _newEntries; - protected long _totalSize; - protected long _newSize; - /** list of BlogSummary objects */ - protected List _blogs; - /** list of Hash objects */ - protected List _newestBlogs; - /** list of BlogURI objects */ - protected List _newestEntries; - /** parent message to a set of replies, ordered with the oldest first */ - protected Map _replies; - protected Properties _headers; - private ThreadIndex _threadedIndex; - - public ArchiveIndex() { - this(I2PAppContext.getGlobalContext(), false); - } - public ArchiveIndex(I2PAppContext ctx) { - this(ctx, false); //true); - } - public ArchiveIndex(I2PAppContext ctx, boolean shouldLoad) { - _context = ctx; - _log = ctx.logManager().getLog(ArchiveIndex.class); - _blogs = new ArrayList(); - _newestBlogs = new ArrayList(); - _newestEntries = new ArrayList(); - _headers = new Properties(); - _replies = Collections.synchronizedMap(new HashMap()); - _generatedOn = -1; - _threadedIndex = null; - if (shouldLoad) - setIsLocal("true"); - } - - public String getVersion() { return _version; } - public Properties getHeaders() { return _headers; } - public int getAllBlogs() { return _allBlogs; } - public int getNewBlogs() { return _newBlogs; } - public int getAllEntries() { return _allEntries; } - public int getNewEntries() { return _newEntries; } - public long getTotalSize() { return _totalSize; } - public long getNewSize() { return _newSize; } - public long getGeneratedOn() { return _generatedOn; } - public ThreadIndex getThreadedIndex() { return _threadedIndex; } - public void setThreadedIndex(ThreadIndex index) { _threadedIndex = index; } - - public String getNewSizeStr() { - if (_newSize < 1024) return _newSize + ""; - if (_newSize < 1024*1024) return _newSize/1024 + "KB"; - else return _newSize/(1024*1024) + "MB"; - } - public String getTotalSizeStr() { - if (_totalSize < 1024) return _totalSize + ""; - if (_totalSize < 1024*1024) return _totalSize/1024 + "KB"; - else return _totalSize/(1024*1024) + "MB"; - } - - /** how many blogs/tags are indexed */ - public int getIndexBlogs() { return _blogs.size(); } - /** get the blog used for the given blog/tag pair */ - public Hash getBlog(int index) { return ((BlogSummary)_blogs.get(index)).blog; } - /** get the tag used for the given blog/tag pair */ - public String getBlogTag(int index) { return ((BlogSummary)_blogs.get(index)).tag; } - /** get the highest entry ID for the given blog/tag pair */ - public long getBlogLastUpdated(int index) { return ((BlogSummary)_blogs.get(index)).lastUpdated; } - /** get the entry count for the given blog/tag pair */ - public int getBlogEntryCount(int index) { return ((BlogSummary)_blogs.get(index)).entries.size(); } - /** get the entry from the given blog/tag pair */ - public BlogURI getBlogEntry(int index, int entryIndex) { return ((EntrySummary)((BlogSummary)_blogs.get(index)).entries.get(entryIndex)).entry; } - /** get the raw entry size (including attachments) from the given blog/tag pair */ - public long getBlogEntrySizeKB(int index, int entryIndex) { return ((EntrySummary)((BlogSummary)_blogs.get(index)).entries.get(entryIndex)).size; } - - public boolean getEntryIsKnown(BlogURI uri) { return getEntry(uri) != null; } - public long getBlogEntrySizeKB(BlogURI uri) { - EntrySummary entry = getEntry(uri); - if (entry == null) return -1; - return entry.size; - } - private EntrySummary getEntry(BlogURI uri) { - if ( (uri == null) || (uri.getKeyHash() == null) || (uri.getEntryId() < 0) ) return null; - for (int i = 0; i < _blogs.size(); i++) { - BlogSummary summary = (BlogSummary)_blogs.get(i); - if (summary.blog.equals(uri.getKeyHash())) { - for (int j = 0; j < summary.entries.size(); j++) { - EntrySummary entry = (EntrySummary)summary.entries.get(j); - if (entry.entry.equals(uri)) - return entry; - } - } - } - return null; - } - public Set getBlogEntryTags(BlogURI uri) { - Set tags = new HashSet(); - if ( (uri == null) || (uri.getKeyHash() == null) || (uri.getEntryId() < 0) ) return tags; - for (int i = 0; i < _blogs.size(); i++) { - BlogSummary summary = (BlogSummary)_blogs.get(i); - if (summary.blog.equals(uri.getKeyHash())) { - for (int j = 0; j < summary.entries.size(); j++) { - EntrySummary entry = (EntrySummary)summary.entries.get(j); - if (entry.entry.equals(uri)) { - tags.add(summary.tag); - break; - } - } - } - } - return tags; - } - public int getBlogEntryCount(Hash blog) { - Set uris = new HashSet(64); - for (int i = 0; i < _blogs.size(); i++) { - BlogSummary summary = (BlogSummary)_blogs.get(i); - if (summary.blog.equals(blog)) { - uris.addAll(summary.entries); - //for (int j = 0; j < summary.entries.size(); j++) { - // EntrySummary entry = (EntrySummary)summary.entries.get(j); - // uris.add(entry.entry); - //} - } - } - return uris.size(); - } - - /** how many 'new' blogs are listed */ - public int getNewestBlogCount() { return _newestBlogs.size(); } - public Hash getNewestBlog(int index) { return (Hash)_newestBlogs.get(index); } - /** how many 'new' entries are listed */ - public int getNewestBlogEntryCount() { return _newestEntries.size(); } - public BlogURI getNewestBlogEntry(int index) { return (BlogURI)_newestEntries.get(index); } - - /** list of locally known tags (String) under the given blog */ - public List getBlogTags(Hash blog) { - List rv = new ArrayList(); - for (int i = 0; i < _blogs.size(); i++) { - if (getBlog(i).equals(blog)) - rv.add(getBlogTag(i)); - } - return rv; - } - /** list of unique blogs locally known (set of Hash) */ - public Set getUniqueBlogs() { - Set rv = new HashSet(); - for (int i = 0; i < _blogs.size(); i++) { - rv.add(getBlog(i)); - } - return rv; - } - public List getReplies(BlogURI uri) { - Set replies = (Set)_replies.get(uri); - if (replies == null) return Collections.EMPTY_LIST; - synchronized (replies) { - return new ArrayList(replies); - } - } - public void setLocation(String location) { - try { - File l = new File(location); - if (l.exists()) - load(l); - } catch (IOException ioe) { - ioe.printStackTrace(); - } - } - public void setIsLocal(String val) { - if ("true".equals(val)) { - try { - File dir = BlogManager.instance().getArchive().getArchiveDir(); - load(new File(dir, Archive.INDEX_FILE)); - } catch (IOException ioe) {} - } - } - - public void load(File location) throws IOException { - FileInputStream in = null; - try { - in = new FileInputStream(location); - load(in); - } finally { - if (in != null) - try { in.close(); } catch (IOException ioe) {} - } - } - - /** load up the index from an archive.txt */ - public void load(InputStream index) throws IOException { - _allBlogs = 0; - _allEntries = 0; - _newBlogs = 0; - _newEntries = 0; - _newSize = 0; - _totalSize = 0; - _version = null; - _blogs = new ArrayList(); - _newestBlogs = new ArrayList(); - _newestEntries = new ArrayList(); - _headers = new Properties(); - BufferedReader in = new BufferedReader(new InputStreamReader(index, "UTF-8")); - String line = null; - line = in.readLine(); - if (line == null) - return; - if (!line.startsWith("SyndieVersion:")) - throw new IOException("Index is invalid - it starts with " + line); - _version = line.substring("SyndieVersion:".length()).trim(); - if (!_version.startsWith("1.")) - throw new IOException("Index is not supported, we only handle versions 1.*, but it is " + _version); - while ( (line = in.readLine()) != null) { - if (line.length() <= 0) - break; - if (line.startsWith("Blog:")) break; - int split = line.indexOf(':'); - if (split <= 0) continue; - if (split >= line.length()-1) continue; - _headers.setProperty(line.substring(0, split), line.substring(split+1)); - } - if (line != null) { - do { - if (!line.startsWith("Blog:")) - break; - loadBlog(line); - } while ( (line = in.readLine()) != null); - } - - // ignore the first line that doesnt start with blog - its blank - while ( (line = in.readLine()) != null) { - int split = line.indexOf(':'); - if (split <= 0) continue; - if (split >= line.length()-1) continue; - String key = line.substring(0, split); - String val = line.substring(split+1); - if (key.equals("AllBlogs")) - _allBlogs = getInt(val); - else if (key.equals("NewBlogs")) - _newBlogs = getInt(val); - else if (key.equals("AllEntries")) - _allEntries = getInt(val); - else if (key.equals("NewEntries")) - _newEntries = getInt(val); - else if (key.equals("TotalSize")) - _totalSize = getInt(val); - else if (key.equals("NewSize")) - _newSize = getInt(val); - else if (key.equals("NewestBlogs")) - _newestBlogs = parseNewestBlogs(val); - else if (key.equals("NewestEntries")) - _newestEntries = parseNewestEntries(val); - //else - // System.err.println("Key: " + key + " val: " + val); - } - } - - /** - * Dig through the index for BlogURIs matching the given criteria, ordering the results by - * their own entryIds. - * - * @param out where to store the matches - * @param blog if set, what blog key must the entries be under - * @param tag if set, what tag must the entry be in - * - */ - public void selectMatchesOrderByEntryId(List out, Hash blog, String tag) { - selectMatchesOrderByEntryId(out, blog, tag, 0); - } - public void selectMatchesOrderByEntryId(List out, Hash blog, String tag, long lowestEntryId) { - TreeMap ordered = new TreeMap(); - for (int i = 0; i < _blogs.size(); i++) { - BlogSummary summary = (BlogSummary)_blogs.get(i); - if (blog != null) { - if (!blog.equals(summary.blog)) - continue; - } - - if ( (tag != null) && (tag.trim().length() > 0) ) { - if (!tag.equals(summary.tag)) { - if (_log.shouldLog(Log.DEBUG)) - _log.debug("Tag [" + summary.tag + "] does not match the requested [" + tag + "] in " + summary.blog.toBase64()); - if (false) { - StringBuffer b = new StringBuffer(tag.length()*2); - for (int j = 0; j < tag.length(); j++) { - b.append((int)tag.charAt(j)); - b.append('.'); - if (summary.tag.length() > j+1) - b.append((int)summary.tag.charAt(j)); - else - b.append('_'); - b.append(' '); - } - if (_log.shouldLog(Log.DEBUG)) - _log.debug("tag.summary: " + b.toString()); - } - continue; - } - } - - for (int j = 0; j < summary.entries.size(); j++) { - EntrySummary entry = (EntrySummary)summary.entries.get(j); - if (entry.entry.getEntryId() < lowestEntryId) { - long daysAgo1 = entry.entry.getEntryId() / (24*60*60*1000l); - long daysAgo2 = lowestEntryId / (24*60*60*1000l); - continue; - } else { - String k = (Long.MAX_VALUE-entry.entry.getEntryId()) + "-" + entry.entry.getKeyHash().toBase64(); - ordered.put(k, entry.entry); - } - } - } - for (Iterator iter = ordered.values().iterator(); iter.hasNext(); ) { - BlogURI entry = (BlogURI)iter.next(); - if (entry.getEntryId() < lowestEntryId) { - continue; - } - if (!out.contains(entry)) - out.add(entry); - } - } - - private static final int getInt(String val) { - try { - return Integer.parseInt(val.trim()); - } catch (NumberFormatException nfe) { - nfe.printStackTrace(); - return 0; - } - } - - private List parseNewestBlogs(String vals) { - List rv = new ArrayList(); - StringTokenizer tok = new StringTokenizer(vals, " \t\n"); - while (tok.hasMoreTokens()) - rv.add(new Hash(Base64.decode(tok.nextToken()))); - return rv; - } - private List parseNewestEntries(String vals) { - List rv = new ArrayList(); - StringTokenizer tok = new StringTokenizer(vals, " \t\n"); - while (tok.hasMoreTokens()) - rv.add(new BlogURI(tok.nextToken())); - return rv; - } - - private void loadBlog(String line) throws IOException { - // Blog: hash YYYYMMDD tag\t[ yyyymmdd_n_sizeKB]* - StringTokenizer tok = new StringTokenizer(line.trim(), " \n\t"); - if (tok.countTokens() < 4) - return; - tok.nextToken(); - String keyStr = tok.nextToken(); - byte k[] = Base64.decode(keyStr); - if ( (k == null) || (k.length != Hash.HASH_LENGTH) ) - return; // ignore bad hashes - Hash keyHash = new Hash(k); - String whenStr = tok.nextToken(); - long when = getIndexDate(whenStr); - String tag = tok.nextToken(); - BlogSummary summary = new BlogSummary(); - summary.blog = keyHash; - summary.tag = tag.trim(); - summary.lastUpdated = when; - summary.entries = new ArrayList(); - while (tok.hasMoreTokens()) { - String entry = tok.nextToken(); - long id = Archive.getEntryIdFromIndexName(entry); - int kb = Archive.getSizeFromIndexName(entry); - summary.entries.add(new EntrySummary(new BlogURI(keyHash, id), kb)); - } - _blogs.add(summary); - } - - private SimpleDateFormat _dateFmt = new SimpleDateFormat("yyyyMMdd", Locale.UK); - private long getIndexDate(String yyyymmdd) { - synchronized (_dateFmt) { - try { - return _dateFmt.parse(yyyymmdd).getTime(); - } catch (ParseException pe) { - return -1; - } - } - } - private String getIndexDate(long when) { - synchronized (_dateFmt) { - return _dateFmt.format(new Date(when)); - } - } - - protected class BlogSummary { - Hash blog; - String tag; - long lastUpdated; - /** list of EntrySummary objects */ - List entries; - - public BlogSummary() { - entries = new ArrayList(); - } - } - protected class EntrySummary { - BlogURI entry; - long size; - public EntrySummary(BlogURI uri, long kb) { - size = kb; - entry = uri; - } - public int hashCode() { - return entry.hashCode(); - } - public boolean equals(Object obj) { - if ( (obj instanceof EntrySummary) && (((EntrySummary)obj).entry.equals(entry)) ) - return true; - return false; - } - } - - /** export the index into an archive.txt */ - public String toString() { - StringBuffer rv = new StringBuffer(1024); - rv.append("SyndieVersion: ").append(_version).append('\n'); - for (Iterator iter = _headers.keySet().iterator(); iter.hasNext(); ) { - String key = (String)iter.next(); - String val = _headers.getProperty(key); - rv.append(key).append(": ").append(val).append('\n'); - } - for (int i = 0; i < _blogs.size(); i++) { - rv.append("Blog: "); - Hash blog = getBlog(i); - String tag = getBlogTag(i); - rv.append(Base64.encode(blog.getData())).append(' '); - rv.append(getIndexDate(getBlogLastUpdated(i))).append(' '); - rv.append(tag).append('\t'); - int entries = getBlogEntryCount(i); - for (int j = 0; j < entries; j++) { - BlogURI entry = getBlogEntry(i, j); - long kb = getBlogEntrySizeKB(i, j); - rv.append(Archive.getIndexName(entry.getEntryId(), (int)kb*1024)).append(' '); - } - rv.append('\n'); - } - - rv.append('\n'); - rv.append("AllBlogs: ").append(_allBlogs).append('\n'); - rv.append("NewBlogs: ").append(_newBlogs).append('\n'); - rv.append("AllEntries: ").append(_allEntries).append('\n'); - rv.append("NewEntries: ").append(_newEntries).append('\n'); - rv.append("TotalSize: ").append(_totalSize).append('\n'); - rv.append("NewSize: ").append(_newSize).append('\n'); - - rv.append("NewestBlogs: "); - for (int i = 0; i < _newestBlogs.size(); i++) - rv.append(((Hash)(_newestBlogs.get(i))).toBase64()).append(' '); - rv.append('\n'); - - rv.append("NewestEntries: "); - for (int i = 0; i < _newestEntries.size(); i++) - rv.append(((BlogURI)_newestEntries.get(i)).toString()).append(' '); - rv.append('\n'); - return rv.toString(); - } - - - /** Usage: ArchiveIndex archive.txt */ - public static void main(String args[]) { - try { - ArchiveIndex i = new ArchiveIndex(); - i.load(new File(args[0])); - System.out.println(i.toString()); - } catch (IOException ioe) { ioe.printStackTrace(); } - } -} diff --git a/apps/syndie/java/src/net/i2p/syndie/data/Attachment.java b/apps/syndie/java/src/net/i2p/syndie/data/Attachment.java deleted file mode 100644 index f3efa2396..000000000 --- a/apps/syndie/java/src/net/i2p/syndie/data/Attachment.java +++ /dev/null @@ -1,128 +0,0 @@ -package net.i2p.syndie.data; - -import java.io.ByteArrayInputStream; -import java.io.IOException; -import java.io.InputStream; -import java.util.ArrayList; -import java.util.HashMap; -import java.util.List; -import java.util.Map; - -import net.i2p.data.DataHelper; - -/** - * - */ -public class Attachment { - private byte _data[]; - private byte _rawMetadata[]; - private List _keys; - private List _values; - - public Attachment(byte data[], byte metadata[]) { - _data = data; - _rawMetadata = metadata; - _keys = new ArrayList(); - _values = new ArrayList(); - parseMeta(); - } - - public static final String NAME = "Name"; - public static final String DESCRIPTION = "Description"; - public static final String MIMETYPE = "MimeType"; - - public Attachment(byte data[], String name, String description, String mimeType) { - _data = data; - _keys = new ArrayList(); - _values = new ArrayList(); - _keys.add(NAME); - _values.add(name); - if ( (description != null) && (description.trim().length() > 0) ) { - _keys.add(DESCRIPTION); - _values.add(description); - } - if ( (mimeType != null) && (mimeType.trim().length() > 0) ) { - _keys.add(MIMETYPE); - _values.add(mimeType); - } - createMeta(); - } - - public byte[] getData() { return _data; } - public int getDataLength() { return _data.length; } - public byte[] getRawMetadata() { return _rawMetadata; } - - public InputStream getDataStream() throws IOException { return new ByteArrayInputStream(_data); } - - public String getMeta(String key) { - for (int i = 0; i < _keys.size(); i++) { - if (key.equals(_keys.get(i))) - return (String)_values.get(i); - } - return null; - } - - public String getName() { return getMeta(NAME); } - public String getDescription() { return getMeta(DESCRIPTION); } - public String getMimeType() { return getMeta(MIMETYPE); } - - public void setMeta(String key, String val) { - for (int i = 0; i < _keys.size(); i++) { - if (key.equals(_keys.get(i))) { - _values.set(i, val); - return; - } - } - _keys.add(key); - _values.add(val); - } - - public Map getMeta() { - Map rv = new HashMap(_keys.size()); - for (int i = 0; i < _keys.size(); i++) { - String k = (String)_keys.get(i); - String v = (String)_values.get(i); - rv.put(k,v); - } - return rv; - } - - private void createMeta() { - StringBuffer meta = new StringBuffer(64); - for (int i = 0; i < _keys.size(); i++) { - meta.append(_keys.get(i)).append(':').append(_values.get(i)).append('\n'); - } - _rawMetadata = DataHelper.getUTF8(meta); - } - - private void parseMeta() { - if (_rawMetadata == null) return; - String key = null; - String val = null; - int keyBegin = 0; - int valBegin = -1; - for (int i = 0; i < _rawMetadata.length; i++) { - if (_rawMetadata[i] == ':') { - key = DataHelper.getUTF8(_rawMetadata, keyBegin, i - keyBegin); - valBegin = i + 1; - } else if (_rawMetadata[i] == '\n') { - val = DataHelper.getUTF8(_rawMetadata, valBegin, i - valBegin); - _keys.add(key); - _values.add(val); - keyBegin = i + 1; - key = null; - val = null; - } - } - } - - public String toString() { - int len = 0; - if (_data != null) - len = _data.length; - return getName() - + (getDescription() != null ? ": " + getDescription() : "") - + (getMimeType() != null ? ", type: " + getMimeType() : "") - + ", size: " + len; - } -} diff --git a/apps/syndie/java/src/net/i2p/syndie/data/BlogInfo.java b/apps/syndie/java/src/net/i2p/syndie/data/BlogInfo.java deleted file mode 100644 index 82613096c..000000000 --- a/apps/syndie/java/src/net/i2p/syndie/data/BlogInfo.java +++ /dev/null @@ -1,299 +0,0 @@ -package net.i2p.syndie.data; - -import java.io.BufferedReader; -import java.io.ByteArrayInputStream; -import java.io.ByteArrayOutputStream; -import java.io.FileInputStream; -import java.io.FileOutputStream; -import java.io.IOException; -import java.io.InputStream; -import java.io.InputStreamReader; -import java.io.OutputStream; -import java.util.ArrayList; -import java.util.Iterator; -import java.util.List; -import java.util.Properties; -import java.util.StringTokenizer; - -import net.i2p.I2PAppContext; -import net.i2p.data.Base64; -import net.i2p.data.DataHelper; -import net.i2p.data.Signature; -import net.i2p.data.SigningPrivateKey; -import net.i2p.data.SigningPublicKey; -import net.i2p.util.Log; - -/** - * Blog metadata. Formatted as:
      - * [key:val\n]*
      - * 
      - * - * Required keys: - * Owner: base64 of their signing public key - * Signature: base64 of the DSA signature of the rest of the ordered metadata - * Edition: base10 unique identifier for this metadata (higher clobbers lower) - * - * Optional keys: - * Posters: comma delimited list of base64 signing public keys that - * can post to the blog - * Name: name of the blog - * Description: brief description of the blog - * - */ -public class BlogInfo { - private SigningPublicKey _key; - private SigningPublicKey _posters[]; - private String _optionNames[]; - private String _optionValues[]; - private Signature _signature; - - public BlogInfo() {} - - public BlogInfo(SigningPublicKey key, SigningPublicKey posters[], Properties opts) { - _optionNames = new String[0]; - _optionValues = new String[0]; - setKey(key); - setPosters(posters); - for (Iterator iter = opts.keySet().iterator(); iter.hasNext(); ) { - String k = (String)iter.next(); - String v = opts.getProperty(k); - setProperty(k.trim(), v.trim()); - } - } - - public SigningPublicKey getKey() { return _key; } - public void setKey(SigningPublicKey key) { - _key = key; - setProperty(OWNER_KEY, Base64.encode(key.getData())); - } - - public static final String OWNER_KEY = "Owner"; - public static final String POSTERS = "Posters"; - public static final String SIGNATURE = "Signature"; - public static final String NAME = "Name"; - public static final String DESCRIPTION = "Description"; - public static final String CONTACT_URL = "ContactURL"; - public static final String EDITION = "Edition"; - public static final String SUMMARY_ENTRY_ID = "SummaryEntryId"; - - public void load(InputStream in) throws IOException { - Log log = I2PAppContext.getGlobalContext().logManager().getLog(getClass()); - BufferedReader reader = new BufferedReader(new InputStreamReader(in, "UTF-8")); - List names = new ArrayList(); - List vals = new ArrayList(); - String line = null; - while ( (line = reader.readLine()) != null) { - if (log.shouldLog(Log.DEBUG)) - log.debug("Read info line [" + line + "]"); - line = line.trim(); - int len = line.length(); - int split = line.indexOf(':'); - if ( (len <= 0) || (split <= 0) ) { - continue; - } else if (split >= len - 1) { - names.add(line.substring(0, split).trim()); - vals.add(""); - continue; - } - - String key = line.substring(0, split).trim(); - String val = line.substring(split+1).trim(); - names.add(key); - vals.add(val); - } - _optionNames = new String[names.size()]; - _optionValues = new String[names.size()]; - for (int i = 0; i < _optionNames.length; i++) { - _optionNames[i] = (String)names.get(i); - _optionValues[i] = (String)vals.get(i); - //System.out.println("Loaded info: [" + _optionNames[i] + "] = [" + _optionValues[i] + "]"); - } - - String keyStr = getProperty(OWNER_KEY); - if (keyStr == null) throw new IOException("Owner not found"); - _key = new SigningPublicKey(Base64.decode(keyStr)); - - String postersStr = getProperty(POSTERS); - if (postersStr != null) { - StringTokenizer tok = new StringTokenizer(postersStr, ", \t"); - _posters = new SigningPublicKey[tok.countTokens()]; - for (int i = 0; tok.hasMoreTokens(); i++) - _posters[i] = new SigningPublicKey(Base64.decode(tok.nextToken())); - } - - String sigStr = getProperty(SIGNATURE); - if (sigStr == null) throw new IOException("Signature not found"); - _signature = new Signature(Base64.decode(sigStr)); - } - - public void write(OutputStream out) throws IOException { write(out, true); } - public void write(OutputStream out, boolean includeRealSignature) throws IOException { - StringBuffer buf = new StringBuffer(512); - for (int i = 0; i < _optionNames.length; i++) { - if ( (includeRealSignature) || (!SIGNATURE.equals(_optionNames[i])) ) - buf.append(_optionNames[i]).append(':').append(_optionValues[i]).append('\n'); - } - String s = buf.toString(); - out.write(s.getBytes("UTF-8")); - } - - public String getProperty(String name) { - for (int i = 0; i < _optionNames.length; i++) { - if (_optionNames[i].equals(name)) { - String val = _optionValues[i]; - //System.out.println("getProperty[" + name + "] = [" + val + "] [sz=" + val.length() +"]"); - //for (int j = 0; j < val.length(); j++) { - // char c = (char)val.charAt(j); - // if (c != (c & 0x7F)) - // System.out.println("char " + j + ": " + (int)c); - //} - return val; - } - } - return null; - } - - private void setProperty(String name, String val) { - for (int i = 0; i < _optionNames.length; i++) { - if (_optionNames[i].equals(name)) { - _optionValues[i] = val; - return; - } - } - - String names[] = new String[_optionNames.length + 1]; - String values[] = new String[_optionValues.length + 1]; - for (int i = 0; i < _optionNames.length; i++) { - names[i] = _optionNames[i]; - values[i] = _optionValues[i]; - } - names[names.length-1] = name; - values[values.length-1] = val; - _optionNames = names; - _optionValues = values; - } - - public int getEdition() { - String e = getProperty(EDITION); - if (e != null) { - try { - return Integer.parseInt(e); - } catch (NumberFormatException nfe) { - return 0; - } - } - return 0; - } - - public String[] getProperties() { return _optionNames; } - - public SigningPublicKey[] getPosters() { return _posters; } - public void setPosters(SigningPublicKey posters[]) { - _posters = posters; - StringBuffer buf = new StringBuffer(); - for (int i = 0; posters != null && i < posters.length; i++) { - buf.append(Base64.encode(posters[i].getData())); - if (i + 1 < posters.length) - buf.append(','); - } - setProperty(POSTERS, buf.toString()); - } - - public boolean verify(I2PAppContext ctx) { - try { - ByteArrayOutputStream out = new ByteArrayOutputStream(512); - write(out, false); - out.close(); - byte data[] = out.toByteArray(); - return ctx.dsa().verifySignature(_signature, data, _key); - } catch (IOException ioe) { - return false; - } - } - - public void sign(I2PAppContext ctx, SigningPrivateKey priv) { - try { - ByteArrayOutputStream out = new ByteArrayOutputStream(512); - write(out, false); - byte data[] = out.toByteArray(); - Signature sig = ctx.dsa().sign(data, priv); - if (sig == null) - throw new IOException("wtf, why is the signature null? data.len = " + data.length + " priv: " + priv); - setProperty(SIGNATURE, Base64.encode(sig.getData())); - _signature = sig; - } catch (IOException ioe) { - ioe.printStackTrace(); - } - } - - public String toString() { - StringBuffer buf = new StringBuffer(); - buf.append("Blog ").append(getKey().calculateHash().toBase64()); - for (int i = 0; i < _optionNames.length; i++) { - if ( (!SIGNATURE.equals(_optionNames[i])) && - (!OWNER_KEY.equals(_optionNames[i])) && - (!SIGNATURE.equals(_optionNames[i])) ) - buf.append(' ').append(_optionNames[i]).append(": ").append(_optionValues[i]); - } - - if ( (_posters != null) && (_posters.length > 0) ) { - buf.append(" additional posts by"); - for (int i = 0; i < _posters.length; i++) { - buf.append(' ').append(_posters[i].calculateHash().toBase64()); - if (i + 1 < _posters.length) - buf.append(','); - } - } - return buf.toString(); - } - - private static final String TEST_STRING = "\u20AC\u00DF\u6771\u10400\u00F6"; - - public static void main(String args[]) { - I2PAppContext ctx = I2PAppContext.getGlobalContext(); - if (true) { - try { - Object keys[] = ctx.keyGenerator().generateSigningKeypair(); - SigningPublicKey pub = (SigningPublicKey)keys[0]; - SigningPrivateKey priv = (SigningPrivateKey)keys[1]; - - Properties opts = new Properties(); - opts.setProperty("Name", TEST_STRING); - opts.setProperty("Description", TEST_STRING); - opts.setProperty("Edition", "0"); - opts.setProperty("ContactURL", TEST_STRING); - - String nameOrig = opts.getProperty("Name"); - BlogInfo info = new BlogInfo(pub, null, opts); - info.sign(ctx, priv); - boolean ok = info.verify(ctx); - System.err.println("sign&verify: " + ok); - - FileOutputStream o = new FileOutputStream("bloginfo-test.dat"); - info.write(o, true); - o.close(); - FileInputStream i = new FileInputStream("bloginfo-test.dat"); - byte buf[] = new byte[4096]; - int sz = DataHelper.read(i, buf); - BlogInfo read = new BlogInfo(); - read.load(new ByteArrayInputStream(buf, 0, sz)); - ok = read.verify(ctx); - System.err.println("write to disk, verify read: " + ok); - System.err.println("Data: " + Base64.encode(buf, 0, sz)); - System.err.println("Str : " + new String(buf, 0, sz)); - - System.err.println("Name ok? " + read.getProperty("Name").equals(TEST_STRING)); - System.err.println("Desc ok? " + read.getProperty("Description").equals(TEST_STRING)); - System.err.println("Name ok? " + read.getProperty("ContactURL").equals(TEST_STRING)); - } catch (Exception e) { e.printStackTrace(); } - } else { - try { - FileInputStream in = new FileInputStream(args[0]); - BlogInfo info = new BlogInfo(); - info.load(in); - boolean ok = info.verify(I2PAppContext.getGlobalContext()); - System.out.println("OK? " + ok + " :" + info); - } catch (Exception e) { e.printStackTrace(); } - } - } -} diff --git a/apps/syndie/java/src/net/i2p/syndie/data/BlogInfoData.java b/apps/syndie/java/src/net/i2p/syndie/data/BlogInfoData.java deleted file mode 100644 index 8cda08be3..000000000 --- a/apps/syndie/java/src/net/i2p/syndie/data/BlogInfoData.java +++ /dev/null @@ -1,135 +0,0 @@ -package net.i2p.syndie.data; - -import java.io.IOException; -import java.io.InputStream; -import java.io.OutputStream; -import java.util.ArrayList; -import java.util.List; -import java.util.Properties; - -import net.i2p.client.naming.PetName; -import net.i2p.data.DataHelper; - -/** - * Contain the current supplementary data for rendering a blog, as opposed to - * just verifying and rendering a post. - */ -public class BlogInfoData { - private BlogURI _dataEntryId; - /** list of List of PetName instances that the blog refers to */ - private List _referenceGroups; - /** customized style config */ - private Properties _styleOverrides; - /** the blog's logo */ - private Attachment _logo; - private List _otherAttachments; - - public static final String ATTACHMENT_LOGO = "logo.png"; - public static final String ATTACHMENT_REFERENCE_GROUPS = "groups.txt"; - public static final String ATTACHMENT_STYLE_OVERRIDE = "style.cfg"; - /** identifies a post as being a blog info data, not a content bearing post */ - public static final String TAG = "BlogInfoData"; - - public static final int MAX_LOGO_SIZE = 128*1024; - - public BlogInfoData() {} - - public BlogURI getEntryId() { return _dataEntryId; } - public boolean isLogoSpecified() { return _logo != null; } - public Attachment getLogo() { return _logo; } - public boolean isStyleSpecified() { return _styleOverrides != null; } - public Properties getStyleOverrides() { return _styleOverrides; } - public int getReferenceGroupCount() { return _referenceGroups != null ? _referenceGroups.size() : 0; } - /** list of PetName elements to be included in the list */ - public List getReferenceGroup(int groupNum) { return (List)_referenceGroups.get(groupNum); } - public int getOtherAttachmentCount() { return _otherAttachments != null ? _otherAttachments.size() : 0; } - public Attachment getOtherAttachment(int num) { return (Attachment)_otherAttachments.get(num); } - public Attachment getOtherAttachment(String name) { - for (int i = 0; i < _otherAttachments.size(); i++) { - Attachment a = (Attachment)_otherAttachments.get(i); - if (a.getName().equals(name)) - return a; - } - return null; - } - - public void writeLogo(OutputStream out) throws IOException { - InputStream in = null; - try { - in = _logo.getDataStream(); - byte buf[] = new byte[4096]; - int read = 0; - while ( (read = in.read(buf)) != -1) - out.write(buf, 0, read); - } finally { - if (in != null) try { in.close(); } catch (IOException ioe) {} - } - } - - - public void load(EntryContainer entry) throws IOException { - _dataEntryId = entry.getURI(); - Attachment attachments[] = entry.getAttachments(); - for (int i = 0; i < attachments.length; i++) { - if (ATTACHMENT_LOGO.equals(attachments[i].getName())) { - _logo = attachments[i]; - } else if (ATTACHMENT_REFERENCE_GROUPS.equals(attachments[i].getName())) { - readReferenceGroups(attachments[i]); - } else if (ATTACHMENT_STYLE_OVERRIDE.equals(attachments[i].getName())) { - readStyleOverride(attachments[i]); - } else { - if (_otherAttachments == null) - _otherAttachments = new ArrayList(); - _otherAttachments.add(attachments[i]); - } - } - } - - private void readReferenceGroups(Attachment att) throws IOException { - InputStream in = null; - try { - in = att.getDataStream(); - StringBuffer line = new StringBuffer(128); - List groups = new ArrayList(); - String prevGroup = null; - List defaultGroup = new ArrayList(); - while (true) { - boolean ok = DataHelper.readLine(in, line); - if (line.length() > 0) { - PetName pn = new PetName(line.toString().trim()); - if (pn.getGroupCount() <= 0) { - defaultGroup.add(pn); - } else if (pn.getGroup(0).equals(prevGroup)) { - List curGroup = (List)groups.get(groups.size()-1); - curGroup.add(pn); - } else { - List curGroup = new ArrayList(); - curGroup.add(pn); - groups.add(curGroup); - prevGroup = pn.getGroup(0); - } - } - line.setLength(0); - if (!ok) - break; - } - if (defaultGroup.size() > 0) - groups.add(defaultGroup); - _referenceGroups = groups; - } finally { - if (in != null) try { in.close(); } catch (IOException ioe) {} - } - } - - private void readStyleOverride(Attachment att) throws IOException { - InputStream in = null; - try { - in = att.getDataStream(); - Properties props = new Properties(); - DataHelper.loadProps(props, in); - _styleOverrides = props; - } finally { - if (in != null) try { in.close(); } catch (IOException ioe) {} - } - } -} diff --git a/apps/syndie/java/src/net/i2p/syndie/data/BlogURI.java b/apps/syndie/java/src/net/i2p/syndie/data/BlogURI.java deleted file mode 100644 index a164cb32c..000000000 --- a/apps/syndie/java/src/net/i2p/syndie/data/BlogURI.java +++ /dev/null @@ -1,119 +0,0 @@ -package net.i2p.syndie.data; - -import java.util.Comparator; - -import net.i2p.data.Base64; -import net.i2p.data.DataHelper; -import net.i2p.data.Hash; - -/** - * - */ -public class BlogURI { - private Hash _blogHash; - private long _entryId; - - public static final Comparator COMPARATOR = new NewestFirstComparator(); - - public BlogURI() { - this(null, -1); - } - public BlogURI(Hash blogHash, long entryId) { - _blogHash = blogHash; - _entryId = entryId; - } - public BlogURI(String uri) { - if (uri.startsWith("blog://")) { - int off = "blog://".length(); - _blogHash = new Hash(Base64.decode(uri.substring(off, off+44))); // 44 chars == base64(32 bytes) - int entryStart = uri.indexOf('/', off+1); - if (entryStart < 0) { - _entryId = -1; - } else { - try { - _entryId = Long.parseLong(uri.substring(entryStart+1).trim()); - } catch (NumberFormatException nfe) { - _entryId = -1; - } - } - } else if (uri.startsWith("entry://")) { - int off = "entry://".length(); - _blogHash = new Hash(Base64.decode(uri.substring(off, off+44))); // 44 chars == base64(32 bytes) - int entryStart = uri.indexOf('/', off+1); - if (entryStart < 0) { - _entryId = -1; - } else { - try { - _entryId = Long.parseLong(uri.substring(entryStart+1).trim()); - } catch (NumberFormatException nfe) { - _entryId = -1; - } - } - } else { - _blogHash = null; - _entryId = -1; - } - } - - public Hash getKeyHash() { return _blogHash; } - public long getEntryId() { return _entryId; } - - public void setKeyHash(Hash hash) { _blogHash = hash; } - public void setEntryId(long id) { _entryId = id; } - - public String toString() { - if ( (_blogHash == null) || (_blogHash.getData() == null) ) - return ""; - StringBuffer rv = new StringBuffer(64); - rv.append("blog://").append(Base64.encode(_blogHash.getData())); - rv.append('/'); - if (_entryId >= 0) - rv.append(_entryId); - return rv.toString(); - } - - public boolean equals(Object obj) { - if (obj == null) return false; - if (obj.getClass() != getClass()) return false; - return DataHelper.eq(_entryId, ((BlogURI)obj)._entryId) && - DataHelper.eq(_blogHash, ((BlogURI)obj)._blogHash); - } - public int hashCode() { - int rv = (int)((_entryId >>> 32) & 0x7FFFFFFF); - rv += (_entryId & 0x7FFFFFFF); - - if (_blogHash != null) - rv += _blogHash.hashCode(); - return rv; - } - - public static void main(String args[]) { - test("http://asdf/"); - test("blog://Vq~AlW-r7OM763okVUFIDvVFzxOjpNNsAx0rFb2yaE8="); - test("blog://Vq~AlW-r7OM763okVUFIDvVFzxOjpNNsAx0rFb2yaE8=/"); - test("blog://Vq~AlW-r7OM763okVUFIDvVFzxOjpNNsAx0rFb2yaE8=/123456789"); - test("entry://Vq~AlW-r7OM763okVUFIDvVFzxOjpNNsAx0rFb2yaE8=/"); - test("entry://Vq~AlW-r7OM763okVUFIDvVFzxOjpNNsAx0rFb2yaE8=/123456789"); - } - private static void test(String uri) { - BlogURI u = new BlogURI(uri); - if (!u.toString().equals(uri)) - System.err.println("Not a match: [" + uri + "] != [" + u.toString() + "]"); - } - - /** - * Order the BlogURIs by entryId, with the highest entryId first - */ - private static class NewestFirstComparator implements Comparator { - public int compare(Object lhs, Object rhs) { - BlogURI l = (BlogURI)lhs; - BlogURI r = (BlogURI)rhs; - if (l.getEntryId() > r.getEntryId()) - return -1; - else if (l.getEntryId() < r.getEntryId()) - return 1; - else // same date, compare by blog hash (aka randomly) - return DataHelper.compareTo(l.getKeyHash().getData(), r.getKeyHash().getData()); - } - } -} diff --git a/apps/syndie/java/src/net/i2p/syndie/data/EncodingTestGenerator.java b/apps/syndie/java/src/net/i2p/syndie/data/EncodingTestGenerator.java deleted file mode 100644 index 7bc0cae36..000000000 --- a/apps/syndie/java/src/net/i2p/syndie/data/EncodingTestGenerator.java +++ /dev/null @@ -1,92 +0,0 @@ -package net.i2p.syndie.data; - -import java.io.ByteArrayInputStream; -import java.io.File; -import java.io.FileInputStream; -import java.io.FileOutputStream; -import java.util.Properties; - -import net.i2p.I2PAppContext; -import net.i2p.data.DataHelper; -import net.i2p.data.SigningPrivateKey; -import net.i2p.data.SigningPublicKey; - -/** - * Create a new blog metadata & set of entries using some crazy UTF8 encoded chars, - * then make sure they're always valid. These blogs & entries can then be fed into - * jetty/syndie/etc to see how and where they are getting b0rked. - */ -public class EncodingTestGenerator { - public EncodingTestGenerator() {} - public static final String TEST_STRING = "\u20AC\u00DF\u6771\u10400\u00F6"; - - public static void main(String args[]) { - I2PAppContext ctx = I2PAppContext.getGlobalContext(); - try { - Object keys[] = ctx.keyGenerator().generateSigningKeypair(); - SigningPublicKey pub = (SigningPublicKey)keys[0]; - SigningPrivateKey priv = (SigningPrivateKey)keys[1]; - - Properties opts = new Properties(); - opts.setProperty("Name", TEST_STRING); - opts.setProperty("Description", TEST_STRING); - opts.setProperty("Edition", "0"); - opts.setProperty("ContactURL", TEST_STRING); - - String nameOrig = opts.getProperty("Name"); - BlogInfo info = new BlogInfo(pub, null, opts); - info.sign(ctx, priv); - boolean ok = info.verify(ctx); - System.err.println("sign&verify: " + ok); - - FileOutputStream o = new FileOutputStream("encodedMeta.dat"); - info.write(o, true); - o.close(); - FileInputStream i = new FileInputStream("encodedMeta.dat"); - byte buf[] = new byte[4096]; - int sz = DataHelper.read(i, buf); - BlogInfo read = new BlogInfo(); - read.load(new ByteArrayInputStream(buf, 0, sz)); - ok = read.verify(ctx); - System.err.println("write to disk, verify read: " + ok); - System.err.println("Name ok? " + read.getProperty("Name").equals(TEST_STRING)); - System.err.println("Desc ok? " + read.getProperty("Description").equals(TEST_STRING)); - System.err.println("Name ok? " + read.getProperty("ContactURL").equals(TEST_STRING)); - - // ok now lets create some entries - BlogURI uri = new BlogURI(read.getKey().calculateHash(), 0); - String tags[] = new String[4]; - for (int j = 0; j < tags.length; j++) - tags[j] = TEST_STRING + "_" + j; - StringBuffer smlOrig = new StringBuffer(512); - smlOrig.append("Subject: ").append(TEST_STRING).append("\n\n"); - smlOrig.append("Hi with ").append(TEST_STRING); - EntryContainer container = new EntryContainer(uri, tags, DataHelper.getUTF8(smlOrig)); - container.seal(ctx, priv, null); - ok = container.verifySignature(ctx, read); - System.err.println("Sealed and verified entry: " + ok); - FileOutputStream fos = new FileOutputStream("encodedEntry.dat"); - container.write(fos, true); - fos.close(); - System.out.println("Written to " + new File("encodedEntry.dat").getAbsolutePath()); - - FileInputStream fis = new FileInputStream("encodedEntry.dat"); - EntryContainer read2 = new EntryContainer(); - read2.load(fis); - ok = read2.verifySignature(ctx, read); - System.out.println("Read ok? " + ok); - - read2.parseRawData(ctx); - String tagsRead[] = read2.getTags(); - for (int j = 0; j < tagsRead.length; j++) { - if (!tags[j].equals(tagsRead[j])) - System.err.println("Tag error [" + j + "]: read = [" + tagsRead[j] + "] want [" + tags[j] + "]"); - else - System.err.println("Tag ok [" + j + "]"); - } - String readText = read2.getEntry().getText(); - ok = readText.equals(smlOrig.toString()); - System.err.println("SML text ok? " + ok); - } catch (Exception e) { e.printStackTrace(); } - } -} diff --git a/apps/syndie/java/src/net/i2p/syndie/data/Entry.java b/apps/syndie/java/src/net/i2p/syndie/data/Entry.java deleted file mode 100644 index 55e23f133..000000000 --- a/apps/syndie/java/src/net/i2p/syndie/data/Entry.java +++ /dev/null @@ -1,14 +0,0 @@ -package net.i2p.syndie.data; - -/** - * - */ -public class Entry { - private String _text; - - public Entry(String raw) { - _text = raw; - } - - public String getText() { return _text; } -} diff --git a/apps/syndie/java/src/net/i2p/syndie/data/EntryContainer.java b/apps/syndie/java/src/net/i2p/syndie/data/EntryContainer.java deleted file mode 100644 index 892d58ade..000000000 --- a/apps/syndie/java/src/net/i2p/syndie/data/EntryContainer.java +++ /dev/null @@ -1,460 +0,0 @@ -package net.i2p.syndie.data; - -import java.io.ByteArrayInputStream; -import java.io.ByteArrayOutputStream; -import java.io.IOException; -import java.io.InputStream; -import java.io.OutputStream; -import java.util.ArrayList; -import java.util.HashMap; -import java.util.List; -import java.util.Map; -import java.util.StringTokenizer; -import java.util.zip.ZipEntry; -import java.util.zip.ZipInputStream; -import java.util.zip.ZipOutputStream; - -import net.i2p.I2PAppContext; -import net.i2p.data.Base64; -import net.i2p.data.DataHelper; -import net.i2p.data.Hash; -import net.i2p.data.SessionKey; -import net.i2p.data.Signature; -import net.i2p.data.SigningPrivateKey; -import net.i2p.util.Log; - -/** - * Securely wrap up an entry and any attachments. Container format:
      - * $format\n
      - * [$key: $val\n]*
      - * \n
      - * Signature: $base64(DSA signature)\n
      - * Size: sizeof(data)\n
      - * [data bytes]
      - * 
      - * - * Required keys: - * BlogKey: base64 of the SHA256 of the blog's public key - * BlogTags: tab delimited list of tags under which this entry should be organized - * BlogEntryId: base10 unique identifier of this entry within the key/path. Typically starts - * as the current day (in unix time, milliseconds) plus further milliseconds for - * each entry within the day. - * - * The data bytes contains zip file, either in the clear or encrypted. If the format - * is encrypted, the BlogPath key will (likely) be encrypted as well. - * - */ -public class EntryContainer { - private List _rawKeys; - private List _rawValues; - private Signature _signature; - private byte _rawData[]; - - private BlogURI _entryURI; - private int _format; - private Entry _entryData; - private Attachment _attachments[]; - private int _completeSize; - - public static final int FORMAT_ZIP_UNENCRYPTED = 0; - public static final int FORMAT_ZIP_ENCRYPTED = 1; - public static final String FORMAT_ZIP_UNENCRYPTED_STR = "syndie.entry.zip-unencrypted"; - public static final String FORMAT_ZIP_ENCRYPTED_STR = "syndie.entry.zip-encrypted"; - - public static final String HEADER_BLOGKEY = "BlogKey"; - public static final String HEADER_BLOGTAGS = "BlogTags"; - public static final String HEADER_ENTRYID = "BlogEntryId"; - - public EntryContainer() { - _rawKeys = new ArrayList(); - _rawValues = new ArrayList(); - _completeSize = -1; - } - - public EntryContainer(BlogURI uri, String tags[], byte smlData[]) { - this(); - _entryURI = uri; - if ( (smlData == null) || (smlData.length <= 0) ) - _entryData = new Entry(""); //null); - else - _entryData = new Entry(DataHelper.getUTF8(smlData)); - setHeader(HEADER_BLOGKEY, Base64.encode(uri.getKeyHash().getData())); - StringBuffer buf = new StringBuffer(); - for (int i = 0; tags != null && i < tags.length; i++) - buf.append(tags[i]).append('\t'); - setHeader(HEADER_BLOGTAGS, buf.toString()); - if (uri.getEntryId() < 0) - uri.setEntryId(System.currentTimeMillis()); - setHeader(HEADER_ENTRYID, Long.toString(uri.getEntryId())); - } - - public int getFormat() { return _format; } - - private String readLine(InputStream in) throws IOException { - ByteArrayOutputStream baos = new ByteArrayOutputStream(512); - int i = 0; - while (true) { - int c = in.read(); - if ( (c == (int)'\n') || (c == (int)'\r') ) { - break; - } else if (c == -1) { - if (i == 0) - return null; - else - break; - } else { - baos.write(c); - } - i++; - } - - return DataHelper.getUTF8(baos.toByteArray()); - //BufferedReader r = new BufferedReader(new InputStreamReader(in, "UTF-8"), 1); - //String line = r.readLine(); - //return line; - } - - public void load(InputStream source) throws IOException { - String line = readLine(source); - if (line == null) throw new IOException("No format line in the entry"); - //System.err.println("read container format line [" + line + "]"); - String fmt = line.trim(); - if (FORMAT_ZIP_UNENCRYPTED_STR.equals(fmt)) { - _format = FORMAT_ZIP_UNENCRYPTED; - } else if (FORMAT_ZIP_ENCRYPTED_STR.equals(fmt)) { - _format = FORMAT_ZIP_ENCRYPTED; - } else { - throw new IOException("Unsupported entry format: " + fmt); - } - - while ( (line = readLine(source)) != null) { - //System.err.println("read container header line [" + line + "]"); - line = line.trim(); - int len = line.length(); - if (len <= 0) - break; - int split = line.indexOf(':'); - if (split <= 0) { - throw new IOException("Invalid format of the syndie entry: line=" + line); - } else if (split >= len - 2) { - // foo:\n - String key = line.substring(0, split); - _rawKeys.add(key); - _rawValues.add(""); - } else { - String key = line.substring(0, split); - String val = line.substring(split+1); - _rawKeys.add(key); - _rawValues.add(val); - } - } - - parseHeaders(); - - String sigStr = readLine(source); - //System.err.println("read container signature line [" + line + "]"); - if ( (sigStr == null) || (sigStr.indexOf("Signature:") == -1) ) - throw new IOException("No signature line"); - sigStr = sigStr.substring("Signature:".length()+1).trim(); - - _signature = new Signature(Base64.decode(sigStr)); - //System.out.println("Sig: " + _signature.toBase64()); - - line = readLine(source); - //System.err.println("read container size line [" + line + "]"); - if (line == null) - throw new IOException("No size line"); - line = line.trim(); - int dataSize = -1; - try { - int index = line.indexOf("Size:"); - if (index == 0) - dataSize = Integer.parseInt(line.substring("Size:".length()+1).trim()); - else - throw new IOException("Invalid size line"); - } catch (NumberFormatException nfe) { - throw new IOException("Invalid entry size: " + line); - } - - byte data[] = new byte[dataSize]; - int read = DataHelper.read(source, data); - if (read != dataSize) - throw new IOException("Incomplete entry: read " + read + " expected " + dataSize); - - _rawData = data; - } - - public void seal(I2PAppContext ctx, SigningPrivateKey signingKey, SessionKey entryKey) throws IOException { - Log l = ctx.logManager().getLog(getClass()); - if (l.shouldLog(Log.DEBUG)) - l.debug("Sealing " + _entryURI); - if (entryKey == null) - _format = FORMAT_ZIP_UNENCRYPTED; - else - _format = FORMAT_ZIP_ENCRYPTED; - setHeader(HEADER_BLOGKEY, Base64.encode(_entryURI.getKeyHash().getData())); - if (_entryURI.getEntryId() < 0) - _entryURI.setEntryId(ctx.clock().now()); - setHeader(HEADER_ENTRYID, Long.toString(_entryURI.getEntryId())); - _rawData = createRawData(ctx, entryKey); - ByteArrayOutputStream baos = new ByteArrayOutputStream(1024); - write(baos, false); - byte data[] = baos.toByteArray(); - _signature = ctx.dsa().sign(data, signingKey); - } - - private byte[] createRawData(I2PAppContext ctx, SessionKey entryKey) throws IOException { - byte raw[] = createRawData(); - if (entryKey != null) { - byte iv[] = new byte[16]; - ctx.random().nextBytes(iv); - byte rv[] = new byte[raw.length + iv.length]; - ctx.aes().encrypt(raw, 0, rv, iv.length, entryKey, iv, raw.length); - System.arraycopy(iv, 0, rv, 0, iv.length); - return rv; - } else { - return raw; - } - } - - private byte[] createRawData() throws IOException { - ByteArrayOutputStream baos = new ByteArrayOutputStream(); - ZipOutputStream out = new ZipOutputStream(baos); - ZipEntry ze = new ZipEntry(ZIP_ENTRY); - byte data[] = null; - if (_entryData.getText() != null) - data = DataHelper.getUTF8(_entryData.getText()); - ze.setTime(0); - out.putNextEntry(ze); - if (data != null) - out.write(data); - out.closeEntry(); - for (int i = 0; (_attachments != null) && (i < _attachments.length); i++) { - ze = new ZipEntry(ZIP_ATTACHMENT_PREFIX + i + ZIP_ATTACHMENT_SUFFIX); - data = _attachments[i].getData(); - out.putNextEntry(ze); - out.write(data); - out.closeEntry(); - ze = new ZipEntry(ZIP_ATTACHMENT_META_PREFIX + i + ZIP_ATTACHMENT_META_SUFFIX); - data = _attachments[i].getRawMetadata(); - out.putNextEntry(ze); - out.write(data); - out.closeEntry(); - } - out.finish(); - out.close(); - return baos.toByteArray(); - } - - public static final String ZIP_ENTRY = "entry.sml"; - public static final String ZIP_ATTACHMENT_PREFIX = "attachmentdata"; - public static final String ZIP_ATTACHMENT_SUFFIX = ".szd"; - public static final String ZIP_ATTACHMENT_META_PREFIX = "attachmentmeta"; - public static final String ZIP_ATTACHMENT_META_SUFFIX = ".szm"; - - public void parseRawData(I2PAppContext ctx) throws IOException { parseRawData(ctx, null); } - public void parseRawData(I2PAppContext ctx, SessionKey zipKey) throws IOException { - int dataOffset = 0; - if (zipKey != null) { - byte iv[] = new byte[16]; - System.arraycopy(_rawData, 0, iv, 0, iv.length); - ctx.aes().decrypt(_rawData, iv.length, _rawData, iv.length, zipKey, iv, _rawData.length - iv.length); - dataOffset = iv.length; - } - - ByteArrayInputStream in = new ByteArrayInputStream(_rawData, dataOffset, _rawData.length - dataOffset); - ZipInputStream zi = new ZipInputStream(in); - Map attachments = new HashMap(); - Map attachmentMeta = new HashMap(); - while (true) { - ZipEntry entry = zi.getNextEntry(); - if (entry == null) - break; - - ByteArrayOutputStream out = new ByteArrayOutputStream(1024); - byte buf[] = new byte[1024]; - int read = -1; - while ( (read = zi.read(buf)) != -1) - out.write(buf, 0, read); - - byte entryData[] = out.toByteArray(); - - String name = entry.getName(); - if (ZIP_ENTRY.equals(name)) { - _entryData = new Entry(DataHelper.getUTF8(entryData)); - } else if (name.startsWith(ZIP_ATTACHMENT_PREFIX)) { - attachments.put(name, (Object)entryData); - } else if (name.startsWith(ZIP_ATTACHMENT_META_PREFIX)) { - attachmentMeta.put(name, (Object)entryData); - } - - //System.out.println("Read entry [" + name + "] with size=" + entryData.length); - } - - if (_entryData == null) - _entryData = new Entry(""); //null); - - _attachments = new Attachment[attachments.size()]; - - for (int i = 0; i < attachments.size(); i++) { - byte data[] = (byte[])attachments.get(ZIP_ATTACHMENT_PREFIX + i + ZIP_ATTACHMENT_SUFFIX); - byte metadata[] = (byte[])attachmentMeta.get(ZIP_ATTACHMENT_META_PREFIX + i + ZIP_ATTACHMENT_META_SUFFIX); - if ( (data != null) && (metadata != null) ) { - _attachments[i] = new Attachment(data, metadata); - } else { - Log l = ctx.logManager().getLog(getClass()); - if (l.shouldLog(Log.WARN)) - l.warn("Unable to get " + i + ": " + data + "/" + metadata); - } - } - - //System.out.println("Attachments: " + _attachments.length + "/" + attachments.size() + ": " + attachments); - } - - public BlogURI getURI() { return _entryURI; } - public static final String NO_TAGS_TAG = "[none]"; - private static final String NO_TAGS[] = new String[] { NO_TAGS_TAG }; - public String[] getTags() { - String tags = getHeader(HEADER_BLOGTAGS); - if ( (tags == null) || (tags.trim().length() <= 0) ) { - return NO_TAGS; - } else { - StringTokenizer tok = new StringTokenizer(tags, "\t"); - String rv[] = new String[tok.countTokens()]; - for (int i = 0; i < rv.length; i++) - rv[i] = tok.nextToken().trim(); - return rv; - } - } - public Signature getSignature() { return _signature; } - public Entry getEntry() { return _entryData; } - public Attachment[] getAttachments() { return _attachments; } - - public void setCompleteSize(int bytes) { _completeSize = bytes; } - public int getCompleteSize() { return _completeSize; } - - public String getHeader(String key) { - for (int i = 0; i < _rawKeys.size(); i++) { - String k = (String)_rawKeys.get(i); - if (k.equals(key)) - return (String)_rawValues.get(i); - } - return null; - } - - public Map getHeaders() { - Map rv = new HashMap(_rawKeys.size()); - for (int i = 0; i < _rawKeys.size(); i++) { - String k = (String)_rawKeys.get(i); - String v = (String)_rawValues.get(i); - rv.put(k,v); - } - return rv; - } - - public void setHeader(String name, String val) { - int index = _rawKeys.indexOf(name); - if (index < 0) { - _rawKeys.add(name); - _rawValues.add(val); - } else { - _rawValues.set(index, val); - } - } - - public void addAttachment(byte data[], String name, String description, String mimeType) { - Attachment a = new Attachment(data, name, description, mimeType); - int old = (_attachments == null ? 0 : _attachments.length); - Attachment nv[] = new Attachment[old+1]; - if (old > 0) - for (int i = 0; i < old; i++) - nv[i] = _attachments[i]; - nv[old] = a; - _attachments = nv; - } - - private void parseHeaders() throws IOException { - String keyHash = getHeader(HEADER_BLOGKEY); - String idVal = getHeader(HEADER_ENTRYID); - - if (keyHash == null) { - throw new IOException("Missing " + HEADER_BLOGKEY + " header"); - } - - long entryId = -1; - if ( (idVal != null) && (idVal.length() > 0) ) { - try { - entryId = Long.parseLong(idVal.trim()); - } catch (NumberFormatException nfe) { - throw new IOException("Invalid format of entryId (" + idVal + ")"); - } - } - - _entryURI = new BlogURI(new Hash(Base64.decode(keyHash)), entryId); - } - - public boolean verifySignature(I2PAppContext ctx, BlogInfo info) { - if (_signature == null) throw new NullPointerException("sig is null"); - if (info == null) throw new NullPointerException("info is null"); - if (info.getKey() == null) throw new NullPointerException("info key is null"); - if (info.getKey().getData() == null) throw new NullPointerException("info key data is null"); - //System.out.println("Verifying " + _entryURI + " for " + info); - - ByteArrayOutputStream out = new ByteArrayOutputStream(_rawData.length + 512); - try { - write(out, false); - byte dat[] = out.toByteArray(); - //System.out.println("Raw data to verify: " + ctx.sha().calculateHash(dat).toBase64() + " sig: " + _signature.toBase64()); - ByteArrayInputStream in = new ByteArrayInputStream(dat); - boolean ok = ctx.dsa().verifySignature(_signature, in, info.getKey()); - if (!ok && info.getPosters() != null) { - for (int i = 0; !ok && i < info.getPosters().length; i++) { - in.reset(); - ok = ctx.dsa().verifySignature(_signature, in, info.getPosters()[i]); - } - } - //System.out.println("Verified ok? " + ok + " key: " + info.getKey().calculateHash().toBase64()); - //new Exception("verifying").printStackTrace(); - return ok; - } catch (IOException ioe) { - //System.out.println("Verification failed! " + ioe.getMessage()); - return false; - } - } - - public void write(OutputStream out, boolean includeRealSignature) throws IOException { - StringBuffer buf = new StringBuffer(512); - switch (_format) { - case FORMAT_ZIP_ENCRYPTED: - buf.append(FORMAT_ZIP_ENCRYPTED_STR).append('\n'); - break; - case FORMAT_ZIP_UNENCRYPTED: - buf.append(FORMAT_ZIP_UNENCRYPTED_STR).append('\n'); - break; - default: - throw new IOException("Invalid format " + _format); - } - - for (int i = 0; i < _rawKeys.size(); i++) { - String k = (String)_rawKeys.get(i); - buf.append(k.trim()); - buf.append(": "); - buf.append(((String)_rawValues.get(i)).trim()); - buf.append('\n'); - } - - buf.append('\n'); - buf.append("Signature: "); - if (includeRealSignature) - buf.append(Base64.encode(_signature.getData())); - buf.append("\n"); - buf.append("Size: ").append(_rawData.length).append('\n'); - String str = buf.toString(); - - //System.out.println("Writing raw: \n[" + str + "] / " + I2PAppContext.getGlobalContext().sha().calculateHash(str.getBytes()) + ", raw data: " + I2PAppContext.getGlobalContext().sha().calculateHash(_rawData).toBase64() + "\n"); - out.write(DataHelper.getUTF8(str)); - out.write(_rawData); - } - - public String toString() { return _entryURI.toString(); } -} diff --git a/apps/syndie/java/src/net/i2p/syndie/data/FilteredThreadIndex.java b/apps/syndie/java/src/net/i2p/syndie/data/FilteredThreadIndex.java deleted file mode 100644 index 8cf4a66e9..000000000 --- a/apps/syndie/java/src/net/i2p/syndie/data/FilteredThreadIndex.java +++ /dev/null @@ -1,129 +0,0 @@ -package net.i2p.syndie.data; - -import java.util.ArrayList; -import java.util.Collection; -import java.util.Collections; -import java.util.Iterator; -import java.util.List; - -import net.i2p.client.naming.PetName; -import net.i2p.data.DataFormatException; -import net.i2p.data.Hash; -import net.i2p.syndie.Archive; -import net.i2p.syndie.User; - -/** - * - */ -public class FilteredThreadIndex extends ThreadIndex { - private User _user; - private Archive _archive; - private ThreadIndex _baseIndex; - private Collection _filteredTags; - private List _roots; - private List _ignoredAuthors; - private Collection _filteredAuthors; - private boolean _filterAuthorsByRoot; - - public static final String GROUP_FAVORITE = "Favorite"; - public static final String GROUP_IGNORE = "Ignore"; - - public FilteredThreadIndex(User user, Archive archive, Collection tags, Collection authors, boolean filterAuthorsByRoot) { - super(); - _user = user; - _archive = archive; - _baseIndex = _archive.getIndex().getThreadedIndex(); - _filteredTags = tags; - if (_filteredTags == null) - _filteredTags = Collections.EMPTY_SET; - _filteredAuthors = authors; - if (_filteredAuthors == null) - _filteredAuthors = Collections.EMPTY_SET; - _filterAuthorsByRoot = filterAuthorsByRoot; - - _ignoredAuthors = new ArrayList(); - for (Iterator iter = user.getPetNameDB().iterator(); iter.hasNext(); ) { - PetName pn = (PetName)iter.next(); - if (pn.isMember(GROUP_IGNORE)) { - try { - Hash h = new Hash(); - h.fromBase64(pn.getLocation()); - _ignoredAuthors.add(h); - } catch (DataFormatException dfe) { - // ignore - } - } - } - - filter(); - } - - private void filter() { - _roots = new ArrayList(_baseIndex.getRootCount()); - for (int i = 0; i < _baseIndex.getRootCount(); i++) { - ThreadNode node = _baseIndex.getRoot(i); - if (!isIgnored(node, _ignoredAuthors, _filteredTags, _filteredAuthors, _filterAuthorsByRoot)) - _roots.add(node); - } - } - - private boolean isIgnored(ThreadNode node, List ignoredAuthors, Collection requestedTags, Collection filteredAuthors, boolean filterAuthorsByRoot) { - if (node.getTags().contains(BlogInfoData.TAG)) - return true; // its a fake post, containing some updated metadata for the blog - if (filteredAuthors.size() <= 0) { - boolean allAuthorsIgnored = true; - for (Iterator iter = node.getRecursiveAuthorIterator(); iter.hasNext(); ) { - Hash author = (Hash)iter.next(); - if (!ignoredAuthors.contains(author)) { - allAuthorsIgnored = false; - break; - } - } - - if ( (allAuthorsIgnored) && (ignoredAuthors.size() > 0) ) - return true; - } else { - boolean filteredAuthorMatches = false; - for (Iterator iter = filteredAuthors.iterator(); iter.hasNext(); ) { - Hash author = (Hash)iter.next(); - if (filterAuthorsByRoot) { - if (node.getEntry().getKeyHash().equals(author)) { - filteredAuthorMatches = true; - break; - } - } else { - if (node.containsAuthor(author)) { - filteredAuthorMatches = true; - break; - } - } - } - if (!filteredAuthorMatches) - return true; - } - - // ok, author checking passed, so only ignore the thread if tags were specified and the - // thread doesn't contain that tag - - if (requestedTags.size() > 0) { - Collection nodeTags = node.getRecursiveTags(); - for (Iterator iter = requestedTags.iterator(); iter.hasNext(); ) - if (nodeTags.contains(iter.next())) - return false; - // authors we aren't ignoring have posted in the thread, but the user is filtering - // posts by tags, and this thread doesn't include any of those tags - return true; - } else { - // we aren't filtering by tags, and we haven't been refused by the author - // filtering - return false; - } - } - - public int getRootCount() { return _roots.size(); } - public ThreadNode getRoot(int index) { return (ThreadNode)_roots.get(index); } - public ThreadNode getNode(BlogURI uri) { return _baseIndex.getNode(uri); } - public Collection getFilteredTags() { return _filteredTags; } - public Collection getFilteredAuthors() { return _filteredAuthors; } - public boolean getFilterAuthorsByRoot() { return _filterAuthorsByRoot; } -} diff --git a/apps/syndie/java/src/net/i2p/syndie/data/LocalArchiveIndex.java b/apps/syndie/java/src/net/i2p/syndie/data/LocalArchiveIndex.java deleted file mode 100644 index b14f6156a..000000000 --- a/apps/syndie/java/src/net/i2p/syndie/data/LocalArchiveIndex.java +++ /dev/null @@ -1,112 +0,0 @@ -package net.i2p.syndie.data; - -import java.util.Collections; -import java.util.Comparator; -import java.util.Properties; -import java.util.Set; -import java.util.TreeSet; - -import net.i2p.I2PAppContext; -import net.i2p.data.DataHelper; -import net.i2p.data.Hash; -import net.i2p.syndie.Archive; -import net.i2p.util.Log; - -/** - * writable archive index (most are readonly) - */ -public class LocalArchiveIndex extends ArchiveIndex { - private Log _log; - public LocalArchiveIndex(I2PAppContext ctx) { - super(ctx, false); - _log = ctx.logManager().getLog(getClass()); - } - - public void setGeneratedOn(long when) { _generatedOn = when; } - - public void setVersion(String v) { _version = v; } - public void setHeaders(Properties headers) { _headers = headers; } - public void setHeader(String key, String val) { _headers.setProperty(key, val); } - public void setAllBlogs(int count) { _allBlogs = count; } - public void setNewBlogs(int count) { _newBlogs = count; } - public void setAllEntries(int count) { _allEntries = count; } - public void setNewEntries(int count) { _newEntries = count; } - public void setTotalSize(long bytes) { _totalSize = bytes; } - public void setNewSize(long bytes) { _newSize = bytes; } - - public void addBlog(Hash key, String tag, long lastUpdated) { - for (int i = 0; i < _blogs.size(); i++) { - BlogSummary s = (BlogSummary)_blogs.get(i); - if ( (s.blog.equals(key)) && (s.tag.equals(tag)) ) { - s.lastUpdated = Math.max(s.lastUpdated, lastUpdated); - return; - } - } - BlogSummary summary = new ArchiveIndex.BlogSummary(); - summary.blog = key; - summary.tag = tag; - summary.lastUpdated = lastUpdated; - _blogs.add(summary); - } - - public void addBlogEntry(Hash key, String tag, String entry) { - for (int i = 0; i < _blogs.size(); i++) { - BlogSummary summary = (BlogSummary)_blogs.get(i); - if (summary.blog.equals(key) && (summary.tag.equals(tag)) ) { - long entryId = Archive.getEntryIdFromIndexName(entry); - int kb = Archive.getSizeFromIndexName(entry); - if (_log.shouldLog(Log.INFO)) - _log.info("Adding entry " + entryId + ", size=" + kb + "KB [" + entry + "]"); - EntrySummary entrySummary = new EntrySummary(new BlogURI(key, entryId), kb); - for (int j = 0; j < summary.entries.size(); j++) { - EntrySummary cur = (EntrySummary)summary.entries.get(j); - if (cur.entry.equals(entrySummary.entry)) - return; - } - summary.entries.add(entrySummary); - return; - } - } - } - - public void addNewestBlog(Hash key) { - if (!_newestBlogs.contains(key)) - _newestBlogs.add(key); - } - public void addNewestEntry(BlogURI entry) { - if (!_newestEntries.contains(entry)) - _newestEntries.add(entry); - } - - public void addReply(BlogURI parent, BlogURI reply) { - Set replies = (Set)_replies.get(parent); - if (replies == null) { - replies = Collections.synchronizedSet(new TreeSet(BlogURIComparator.HIGHEST_ID_FIRST)); - _replies.put(parent, replies); - } - replies.add(reply); - //System.err.println("Adding reply to " + parent + " from child " + reply + " (# replies: " + replies.size() + ")"); - } - - private static class BlogURIComparator implements Comparator { - public static final BlogURIComparator HIGHEST_ID_FIRST = new BlogURIComparator(true); - public static final BlogURIComparator HIGHEST_ID_LAST = new BlogURIComparator(false); - private boolean _highestFirst; - public BlogURIComparator(boolean highestFirst) { - _highestFirst = highestFirst; - } - - public int compare(Object lhs, Object rhs) { - if ( (lhs == null) || !(lhs instanceof BlogURI) ) return 1; - if ( (rhs == null) || !(rhs instanceof BlogURI) ) return -1; - BlogURI l = (BlogURI)lhs; - BlogURI r = (BlogURI)rhs; - if (l.getEntryId() > r.getEntryId()) - return (_highestFirst ? 1 : -1); - else if (l.getEntryId() < r.getEntryId()) - return (_highestFirst ? -1 : 1); - else - return DataHelper.compareTo(l.getKeyHash().getData(), r.getKeyHash().getData()); - } - } -} diff --git a/apps/syndie/java/src/net/i2p/syndie/data/SafeURL.java b/apps/syndie/java/src/net/i2p/syndie/data/SafeURL.java deleted file mode 100644 index 31fec23f2..000000000 --- a/apps/syndie/java/src/net/i2p/syndie/data/SafeURL.java +++ /dev/null @@ -1,32 +0,0 @@ -package net.i2p.syndie.data; - -/** - * - */ -public class SafeURL { - private String _schema; - private String _location; - private String _name; - private String _description; - - public SafeURL(String raw) { - parse(raw); - } - - private void parse(String raw) { - if (raw != null) { - int index = raw.indexOf("://"); - if ( (index <= 0) || (index + 1 >= raw.length()) ) - return; - _schema = raw.substring(0, index); - _location = raw.substring(index+3); - _location.replace('>', '_'); - _location.replace('<', '^'); - } - } - - public String getSchema() { return _schema; } - public String getLocation() { return _location; } - - public String toString() { return _schema + "://" + _location; } -} diff --git a/apps/syndie/java/src/net/i2p/syndie/data/ThreadIndex.java b/apps/syndie/java/src/net/i2p/syndie/data/ThreadIndex.java deleted file mode 100644 index 810dceb1e..000000000 --- a/apps/syndie/java/src/net/i2p/syndie/data/ThreadIndex.java +++ /dev/null @@ -1,52 +0,0 @@ -package net.i2p.syndie.data; - -import java.util.ArrayList; -import java.util.HashMap; -import java.util.List; -import java.util.Map; - -/** - * List of threads, ordered with the most recently updated thread first. - * Each node in the tree summarizes everything underneath it as well. - * - */ -public class ThreadIndex { - /** ordered list of threads, with most recent first */ - private List _roots; - /** map of BlogURI to ThreadNode */ - private Map _nodes; - - protected ThreadIndex() { - // no need to synchronize, since the thread index doesn't change after - // its first built - _roots = new ArrayList(); - _nodes = new HashMap(64); - } - - public int getRootCount() { return _roots.size(); } - public ThreadNode getRoot(int index) { return (ThreadNode)_roots.get(index); } - public ThreadNode getNode(BlogURI uri) { return (ThreadNode)_nodes.get(uri); } - /** - * get the root of the thread that the given uri is located in, or -1. - * The implementation depends only on getRoot/getNode/getRootCount and not on the - * data structures, so should be safe for subclasses who adjust those methods - * - */ - public int getRoot(BlogURI uri) { - ThreadNode node = getNode(uri); - if (node == null) return -1; - while (node.getParent() != null) - node = node.getParent(); - for (int i = 0; i < getRootCount(); i++) { - ThreadNode cur = getRoot(i); - if (cur.equals(node)) - return i; - } - return -1; - } - - /** call this in the right order - most recently updated thread first */ - protected void addRoot(ThreadNode node) { _roots.add(node); } - /** invocation order here doesn't matter */ - protected void addEntry(BlogURI uri, ThreadNode node) { _nodes.put(uri, node); } -} diff --git a/apps/syndie/java/src/net/i2p/syndie/data/ThreadNode.java b/apps/syndie/java/src/net/i2p/syndie/data/ThreadNode.java deleted file mode 100644 index 437772d71..000000000 --- a/apps/syndie/java/src/net/i2p/syndie/data/ThreadNode.java +++ /dev/null @@ -1,36 +0,0 @@ -package net.i2p.syndie.data; - -import java.util.Collection; -import java.util.Iterator; - -import net.i2p.data.Hash; - -/** - * - */ -public interface ThreadNode { - /** current post */ - public BlogURI getEntry(); - /** how many direct replies there are to the current entry */ - public int getChildCount(); - /** the given direct reply */ - public ThreadNode getChild(int index); - /** parent this is actually a reply to */ - public BlogURI getParentEntry(); - /** parent in the tree, maybe not a direct parent, but the closest one */ - public ThreadNode getParent(); - /** true if this entry, or any child, is written by the given author */ - public boolean containsAuthor(Hash author); - /** true if this node, or any child, includes the given URI */ - public boolean containsEntry(BlogURI uri); - /** list of tags (String) of this node only */ - public Collection getTags(); - /** list of tags (String) of this node or any children in the tree */ - public Collection getRecursiveTags(); - /** date of the most recent post, recursive */ - public long getMostRecentPostDate(); - /** author of the most recent post, recurisve */ - public Hash getMostRecentPostAuthor(); - /** walk across the authors of the entire thread */ - public Iterator getRecursiveAuthorIterator(); -} diff --git a/apps/syndie/java/src/net/i2p/syndie/data/TransparentArchiveIndex.java b/apps/syndie/java/src/net/i2p/syndie/data/TransparentArchiveIndex.java deleted file mode 100644 index 85d15996d..000000000 --- a/apps/syndie/java/src/net/i2p/syndie/data/TransparentArchiveIndex.java +++ /dev/null @@ -1,85 +0,0 @@ -package net.i2p.syndie.data; - -import java.io.File; -import java.io.IOException; -import java.io.InputStream; -import java.util.List; -import java.util.Properties; -import java.util.Set; - -import net.i2p.I2PAppContext; -import net.i2p.data.Hash; -import net.i2p.syndie.BlogManager; - -/** - * Simple read-only summary of an archive, proxied to the BlogManager's instance - */ -public class TransparentArchiveIndex extends ArchiveIndex { - public TransparentArchiveIndex() { super(I2PAppContext.getGlobalContext(), false); } - - private static ArchiveIndex index() { return BlogManager.instance().getArchive().getIndex(); } - - public String getVersion() { return index().getVersion(); } - public Properties getHeaders() { return index().getHeaders(); } - public int getAllBlogs() { return index().getAllBlogs(); } - public int getNewBlogs() { return index().getNewBlogs(); } - public int getAllEntries() { return index().getAllEntries(); } - public int getNewEntries() { return index().getNewEntries(); } - public long getTotalSize() { return index().getTotalSize(); } - public long getNewSize() { return index().getNewSize(); } - public long getGeneratedOn() { return index().getGeneratedOn(); } - public ThreadIndex getThreadedIndex() { return index().getThreadedIndex(); } - - public String getNewSizeStr() { return index().getNewSizeStr(); } - public String getTotalSizeStr() { return index().getTotalSizeStr(); } - - /** how many blogs/tags are indexed */ - public int getIndexBlogs() { return index().getIndexBlogs(); } - /** get the blog used for the given blog/tag pair */ - public Hash getBlog(int index) { return index().getBlog(index); } - /** get the tag used for the given blog/tag pair */ - public String getBlogTag(int index) { return index().getBlogTag(index); } - /** get the highest entry ID for the given blog/tag pair */ - public long getBlogLastUpdated(int index) { return index().getBlogLastUpdated(index); } - /** get the entry count for the given blog/tag pair */ - public int getBlogEntryCount(int index) { return index().getBlogEntryCount(index); } - /** get the entry from the given blog/tag pair */ - public BlogURI getBlogEntry(int index, int entryIndex) { return index().getBlogEntry(index, entryIndex); } - /** get the raw entry size (including attachments) from the given blog/tag pair */ - public long getBlogEntrySizeKB(int index, int entryIndex) { return index().getBlogEntrySizeKB(index, entryIndex); } - public boolean getEntryIsKnown(BlogURI uri) { return index().getEntryIsKnown(uri); } - public long getBlogEntrySizeKB(BlogURI uri) { return index().getBlogEntrySizeKB(uri); } - public Set getBlogEntryTags(BlogURI uri) { return index().getBlogEntryTags(uri); } - /** how many 'new' blogs are listed */ - public int getNewestBlogCount() { return index().getNewestBlogCount(); } - public Hash getNewestBlog(int index) { return index().getNewestBlog(index); } - /** how many 'new' entries are listed */ - public int getNewestBlogEntryCount() { return index().getNewestBlogEntryCount(); } - public BlogURI getNewestBlogEntry(int index) { return index().getNewestBlogEntry(index); } - - /** list of locally known tags (String) under the given blog */ - public List getBlogTags(Hash blog) { return index().getBlogTags(blog); } - /** list of unique blogs locally known (set of Hash) */ - public Set getUniqueBlogs() { return index().getUniqueBlogs(); } - public void setLocation(String location) { return; } - public void setIsLocal(String val) { return; } - public void load(File location) throws IOException { return; } - /** load up the index from an archive.txt */ - public void load(InputStream index) throws IOException { return; } - - /** - * Dig through the index for BlogURIs matching the given criteria, ordering the results by - * their own entryIds. - * - * @param out where to store the matches - * @param blog if set, what blog key must the entries be under - * @param tag if set, what tag must the entry be in - * - */ - public void selectMatchesOrderByEntryId(List out, Hash blog, String tag) { - index().selectMatchesOrderByEntryId(out, blog, tag); - } - - /** export the index into an archive.txt */ - public String toString() { return index().toString(); } -} diff --git a/apps/syndie/java/src/net/i2p/syndie/sml/Address.java b/apps/syndie/java/src/net/i2p/syndie/sml/Address.java deleted file mode 100644 index c8caac56d..000000000 --- a/apps/syndie/java/src/net/i2p/syndie/sml/Address.java +++ /dev/null @@ -1,16 +0,0 @@ -package net.i2p.syndie.sml; - -import net.i2p.data.DataHelper; - -/** contains intermediary rendering state */ -class Address { - public String name; - public String schema; - public String location; - public String protocol; - public int hashCode() { return -1; } - public boolean equals(Object o) { - Address a = (Address)o; - return DataHelper.eq(schema, a.schema) && DataHelper.eq(location, a.location) && DataHelper.eq(protocol, a.protocol) && DataHelper.eq(name, a.name); - } -} diff --git a/apps/syndie/java/src/net/i2p/syndie/sml/ArchiveRef.java b/apps/syndie/java/src/net/i2p/syndie/sml/ArchiveRef.java deleted file mode 100644 index 1b3df20e4..000000000 --- a/apps/syndie/java/src/net/i2p/syndie/sml/ArchiveRef.java +++ /dev/null @@ -1,18 +0,0 @@ -package net.i2p.syndie.sml; - -import net.i2p.data.DataHelper; - -/** contains intermediary rendering state */ -class ArchiveRef { - public String name; - public String description; - public String locationSchema; - public String location; - public int hashCode() { return -1; } - public boolean equals(Object o) { - ArchiveRef a = (ArchiveRef)o; - return DataHelper.eq(name, a.name) && DataHelper.eq(description, a.description) - && DataHelper.eq(locationSchema, a.locationSchema) - && DataHelper.eq(location, a.location); - } -} diff --git a/apps/syndie/java/src/net/i2p/syndie/sml/Blog.java b/apps/syndie/java/src/net/i2p/syndie/sml/Blog.java deleted file mode 100644 index 0eb4c14b0..000000000 --- a/apps/syndie/java/src/net/i2p/syndie/sml/Blog.java +++ /dev/null @@ -1,20 +0,0 @@ -package net.i2p.syndie.sml; - -import java.util.List; - -import net.i2p.data.DataHelper; - -/** contains intermediary rendering state */ -class Blog { - public String name; - public String hash; - public String tag; - public long entryId; - public List locations; - public int hashCode() { return -1; } - public boolean equals(Object o) { - Blog b = (Blog)o; - return DataHelper.eq(hash, b.hash) && DataHelper.eq(tag, b.tag) && DataHelper.eq(name, b.name) - && DataHelper.eq(entryId, b.entryId) && DataHelper.eq(locations, b.locations); - } -} \ No newline at end of file diff --git a/apps/syndie/java/src/net/i2p/syndie/sml/BlogPostInfoRenderer.java b/apps/syndie/java/src/net/i2p/syndie/sml/BlogPostInfoRenderer.java deleted file mode 100644 index 98a1a3ea5..000000000 --- a/apps/syndie/java/src/net/i2p/syndie/sml/BlogPostInfoRenderer.java +++ /dev/null @@ -1,466 +0,0 @@ -package net.i2p.syndie.sml; - -import java.io.IOException; -import java.io.Writer; -import java.text.ParseException; -import java.text.SimpleDateFormat; -import java.util.ArrayList; -import java.util.Date; -import java.util.HashMap; -import java.util.List; -import java.util.Locale; -import java.util.Map; -import java.util.StringTokenizer; - -import net.i2p.I2PAppContext; -import net.i2p.client.naming.PetName; -import net.i2p.client.naming.PetNameDB; -import net.i2p.data.Base64; -import net.i2p.data.Hash; -import net.i2p.syndie.Archive; -import net.i2p.syndie.BlogManager; -import net.i2p.syndie.User; -import net.i2p.syndie.data.Attachment; -import net.i2p.syndie.data.BlogURI; -import net.i2p.syndie.data.EntryContainer; -import net.i2p.syndie.data.SafeURL; -import net.i2p.syndie.web.AddressesServlet; -import net.i2p.syndie.web.ViewBlogServlet; -import net.i2p.util.Log; - -/** - * render the metadata of a post for display in the left nav of the blog view - * (showing the attachments, etc). Most of this is just duplicated from the HTMLRenderer - * - */ -public class BlogPostInfoRenderer extends EventReceiverImpl { - private Log _log; - protected SMLParser _parser; - protected Writer _out; - protected User _user; - protected Archive _archive; - protected EntryContainer _entry; - protected int _lastNewlineAt; - protected Map _headers; - protected List _addresses; - protected List _links; - protected List _blogs; - protected List _archives; - protected StringBuffer _bodyBuffer; - - public BlogPostInfoRenderer(I2PAppContext ctx) { - super(ctx); - _log = ctx.logManager().getLog(getClass()); - _parser = new SMLParser(ctx); - } - - public void render(User user, Archive archive, EntryContainer entry, Writer out) throws IOException { - if (entry == null) - return; - render(user, archive, entry, entry.getEntry().getText(), out); - } - public void render(User user, Archive archive, EntryContainer entry, String rawSML, Writer out) throws IOException { - prepare(user, archive, entry, rawSML, out); - _out.write(_bodyBuffer.toString()); - } - protected void prepare(User user, Archive archive, EntryContainer entry, String rawSML, Writer out) throws IOException { - _user = user; - _archive = archive; - _entry = entry; - _out = out; - _headers = new HashMap(); - _bodyBuffer = new StringBuffer(1024); - _addresses = new ArrayList(); - _links = new ArrayList(); - _blogs = new ArrayList(); - _archives = new ArrayList(); - _parser.parse(rawSML, this); - } - - public void receiveEnd() { - BlogURI postURI = null; - Attachment attachments[] = null; - if (_entry != null) { - attachments = _entry.getAttachments(); - postURI = _entry.getURI(); - } - renderAttachments(postURI, attachments); - renderBlogs(postURI); - renderLinks(postURI); - renderAddresses(postURI); - renderArchives(postURI); - } - - private void renderAttachments(BlogURI postURI, Attachment attachments[]) { - renderAttachments(postURI, "syndieBlogPostInfo", attachments, _bodyBuffer); - } - public static void renderAttachments(BlogURI postURI, String baseStyleName, Attachment attachments[], StringBuffer out) { - if ( (attachments != null) && (attachments.length > 0) ) { - out.append("
      \n"); - out.append("Attachments\n
        "); - - // for each attachment: - //
      1. $name\n($size of type $type)
      2. - for (int i = 0; i < attachments.length; i++) { - out.append("
      3. "); - String name = attachments[i].getName(); - if ( (name == null) && (name.trim().length() <= 0) ) - name = "Attachment " + i; - - if (postURI != null) { - out.append(""); - } - out.append(HTMLRenderer.sanitizeString(name, 40)); - if (postURI != null) - out.append(""); - - out.append("\n("); - int bytes = attachments[i].getDataLength(); - if (bytes > 10*1024*1024) - out.append(bytes/(1024*1024)).append("MBytes"); - else if (bytes > 10*1024) - out.append(bytes/(10*1024)).append("KBytes"); - else - out.append(bytes).append("Bytes"); - - String type = attachments[i].getMimeType(); - if (type != null) { - if ("application/octet-stream".equals(type)) { - out.append(", binary"); - } else { - int split = type.lastIndexOf('/'); - if (split > 0) - out.append(", ").append(HTMLRenderer.sanitizeString(type.substring(split+1), 30)); - else - out.append(", ").append(HTMLRenderer.sanitizeString(type, 30)); - } - } - - out.append(")"); - - String desc = attachments[i].getDescription(); - if ( (desc != null) && (desc.trim().length() > 0) ) - out.append("
        \n").append(HTMLRenderer.sanitizeString(desc, 120)); - - out.append("
      4. \n"); - } - out.append("
      \n"); - out.append("
      \n"); - } - } - - private void renderBlogs(BlogURI postURI) { - renderBlogs(postURI, _user, "syndieBlogPostInfo", _blogs, _bodyBuffer); - } - public static void renderBlogs(BlogURI postURI, User user, String baseStyleName, List blogs, StringBuffer out) { - if ( (blogs != null) && (blogs.size() > 0) ) { - out.append("
      \n"); - out.append("Blogs\n
        "); - - // for each blog ref: - //
      1. $name\n ? :) :(
      2. - for (int i = 0; i < blogs.size(); i++) { - out.append("
      3. "); - Blog blog = (Blog)blogs.get(i); - PetNameDB db = user.getPetNameDB(); - PetName pn = db.getByLocation(blog.hash); - - if ( (blog.entryId > 0) && (blog.hash != null) ) { - // view a specific post in their blog (jumping to their blog, rather than keeping the - // current blog's formatting... is that the right thing to do?) - out.append(""); - if (pn != null) - out.append(HTMLRenderer.sanitizeString(pn.getName())); - else - out.append(HTMLRenderer.sanitizeTagParam(blog.name)); - out.append(" on ").append(getEntryDate(blog.entryId)); - out.append(""); - } else if (blog.hash != null) { - // view their full blog - out.append(""); - - if (pn != null) { - // we already have a petname for this user - out.append(pn.getName()).append(""); - /* "); - _bodyBuffer.append("?"); - */ - } else { - // this name is already in the addressbook with another location, - // generate a new nym - while ( (pn = db.getByName(blog.name)) != null) - blog.name = blog.name + "."; - out.append(HTMLRenderer.sanitizeTagParam(blog.name)).append(""); - /* "); - _bodyBuffer.append("?"); - */ - // should probably add on some inline-bookmarking support, but we'd need requestURL for that - } - } - out.append("
      4. \n"); - } - out.append("
      \n"); - } - } - - private static final SimpleDateFormat _dateFormat = new SimpleDateFormat("yyyy/MM/dd", Locale.UK); - private static final String getEntryDate(long when) { - synchronized (_dateFormat) { - try { - String str = _dateFormat.format(new Date(when)); - long dayBegin = _dateFormat.parse(str).getTime(); - return str + " [" + (when - dayBegin) + "]"; - } catch (ParseException pe) { - // wtf - return "unknown"; - } - } - } - - private void renderLinks(BlogURI postURI) { - renderLinks(postURI, _user, "syndieBlogPostInfo", _links, _bodyBuffer); - } - public static void renderLinks(BlogURI postURI, User user, String baseStyleName, List links, StringBuffer out) { - if ( (links != null) && (links.size() > 0) ) { - out.append("
      \n"); - out.append("Links\n
        "); - - // for each link: - //
      1. $location
      2. - for (int i = 0; i < links.size(); i++) { - out.append("
      3. "); - - Link l = (Link)links.get(i); - String schema = l.schema; - out.append(""); - out.append(HTMLRenderer.sanitizeString(l.location, 30)).append(" ("); - out.append(HTMLRenderer.sanitizeString(l.schema, 6)).append(")"); - - out.append("
      4. \n"); - } - - out.append("
      \n"); - } - } - - private void renderAddresses(BlogURI postURI) { - renderAddresses(postURI, _user, "syndieBlogPostInfo", _addresses, _bodyBuffer); - } - public static void renderAddresses(BlogURI postURI, User user, String baseStyleName, List addresses, StringBuffer out) { - if ( (addresses != null) && (addresses.size() > 0) ) { - out.append("
      \n"); - out.append("Addresses\n
        "); - - // for each address: - //
      1. $name
      2. - for (int i = 0; i < addresses.size(); i++) { - out.append("
      3. "); - Address a = (Address)addresses.get(i); - importAddress(a, user); - PetName pn = null; - if (user != null) - pn = user.getPetNameDB().getByLocation(a.location); - if (pn != null) { - out.append(HTMLRenderer.sanitizeString(pn.getName())); - } else { - out.append("").append(HTMLRenderer.sanitizeString(a.name)).append(""); - } - out.append("
      4. \n"); - } - - out.append("
      \n"); - } - } - - public static void importAddress(Address a, User user) { - if (user != null && user.getImportAddresses() && !user.getPetNameDB().containsName(a.name)) { - PetName pn = new PetName(a.name, a.schema, a.protocol, a.location); - user.getPetNameDB().add(pn); - try { - user.getPetNameDB().store(user.getAddressbookLocation()); - } catch (IOException ioe) { - //ignore - } - } - if (BlogManager.instance().getImportAddresses() - && I2PAppContext.getGlobalContext().namingService().lookup(a.name) == null - && a.schema.equalsIgnoreCase("i2p")) { - PetName pn = new PetName(a.name, a.schema, a.protocol, a.location); - I2PAppContext.getGlobalContext().petnameDb().add(pn); - try { - I2PAppContext.getGlobalContext().petnameDb().store(); - } catch (IOException ioe) { - //ignore - } - } - } - - private void renderArchives(BlogURI postURI) { - renderArchives(postURI, _user, "syndieBlogPostInfo", _archives, _bodyBuffer); - } - public static void renderArchives(BlogURI postURI, User user, String baseStyleName, List archives, StringBuffer out) { - if ( (archives != null) && (archives.size() > 0) ) { - out.append("
      \n"); - out.append("Archives\n
        "); - - // for each archive: - //
      1. $name :)
        $description
      2. - for (int i = 0; i < archives.size(); i++) { - out.append("
      3. "); - ArchiveRef a = (ArchiveRef)archives.get(i); - boolean authRemote = BlogManager.instance().authorizeRemote(user); - if (authRemote) { - out.append(""); - } - - out.append(HTMLRenderer.sanitizeString(a.name)); - - if (authRemote) { - out.append(""); - } - - if ( (a.description != null) && (a.description.trim().length() > 0) ) - out.append(" ").append(HTMLRenderer.sanitizeString(a.description, 64)); - /* - _bodyBuffer.append(" bookmark it"); - */ - out.append("
      4. \n"); - } - - out.append("
      \n"); - } - } - - public void receiveHeader(String header, String value) { - //System.err.println("Receive header [" + header + "] = [" + value + "]"); - if (HTMLRenderer.HEADER_PETNAME.equals(header)) { - StringTokenizer tok = new StringTokenizer(value, "\t\n"); - if (tok.countTokens() != 4) - return; - String name = tok.nextToken(); - String net = tok.nextToken(); - String proto = tok.nextToken(); - String loc = tok.nextToken(); - Address a = new Address(); - a.name = HTMLRenderer.sanitizeString(name, false); - a.schema = HTMLRenderer.sanitizeString(net, false); - a.protocol = HTMLRenderer.sanitizeString(proto, false); - a.location = HTMLRenderer.sanitizeString(loc, false); - _addresses.add(a); - } - } - - public void receiveHeaderEnd() { } - - public void receivePlain(String text) { } - - public void receiveBold(String text) { } - public void receiveItalic(String text) { } - public void receiveUnderline(String text) { } - public void receiveHR() { } - public void receiveH1(String body) { } - public void receiveH2(String body) { } - public void receiveH3(String body) { } - public void receiveH4(String body) { } - public void receiveH5(String body) { } - public void receivePre(String body) { } - public void receiveQuote(String text, String whoQuoted, String quoteLocationSchema, String quoteLocation) { } - public void receiveCode(String text, String codeLocationSchema, String codeLocation) { } - public void receiveImage(String alternateText, int attachmentId) { } - public void receiveCut(String summaryText) { } - public void receiveNewline() { } - public void receiveLT() { } - public void receiveGT() { } - public void receiveBegin() {} - public void receiveLeftBracket() { } - public void receiveRightBracket() { } - - /** - * when we see a link to a blog, we may want to: - * = view the blog entry - * = view all entries in that blog - * = view all entries in that blog with the given tag - * = view the blog's metadata - * = [fetch the blog from other locations] - * = [add the blog's locations to our list of known locations] - * = [shitlist the blog] - * = [add the blog to one of our groups] - * - * [blah] implies *later*. - */ - public void receiveBlog(String name, String hash, String tag, long entryId, List locations, String description) { - if (_log.shouldLog(Log.DEBUG)) - _log.debug("Receiving the blog: " + name + "/" + hash + "/" + tag + "/" + entryId +"/" + locations + ": "+ description); - byte blogData[] = Base64.decode(hash); - if ( (blogData == null) || (blogData.length != Hash.HASH_LENGTH) ) - return; - - Blog b = new Blog(); - b.name = name; - b.hash = hash; - b.tag = tag; - b.entryId = entryId; - b.locations = locations; - if (!_blogs.contains(b)) - _blogs.add(b); - } - - public void receiveArchive(String name, String description, String locationSchema, String location, - String postingKey, String anchorText) { - ArchiveRef a = new ArchiveRef(); - a.name = name; - a.description = description; - a.locationSchema = locationSchema; - a.location = location; - if (!_archives.contains(a)) - _archives.add(a); - } - - public void receiveLink(String schema, String location, String text) { - Link l = new Link(); - l.schema = schema; - l.location = location; - if (!_links.contains(l)) - _links.add(l); - } - - public void receiveAddress(String name, String schema, String protocol, String location, String anchorText) { - Address a = new Address(); - a.name = name; - a.schema = schema; - a.location = location; - a.protocol = protocol; - if (!_addresses.contains(a)) - _addresses.add(a); - } - - public void receiveAttachment(int id, int thumb, String anchorText) { } -} diff --git a/apps/syndie/java/src/net/i2p/syndie/sml/BlogRenderer.java b/apps/syndie/java/src/net/i2p/syndie/sml/BlogRenderer.java deleted file mode 100644 index 55b19f5f4..000000000 --- a/apps/syndie/java/src/net/i2p/syndie/sml/BlogRenderer.java +++ /dev/null @@ -1,244 +0,0 @@ -package net.i2p.syndie.sml; - -import java.io.IOException; -import java.io.Writer; - -import net.i2p.I2PAppContext; -import net.i2p.client.naming.PetName; -import net.i2p.data.Base64; -import net.i2p.data.Hash; -import net.i2p.syndie.Archive; -import net.i2p.syndie.User; -import net.i2p.syndie.data.Attachment; -import net.i2p.syndie.data.BlogInfo; -import net.i2p.syndie.data.BlogInfoData; -import net.i2p.syndie.data.BlogURI; -import net.i2p.syndie.data.EntryContainer; -import net.i2p.syndie.data.ThreadNode; -import net.i2p.syndie.web.PostServlet; -import net.i2p.syndie.web.ViewBlogServlet; - -/** - * Renders posts for display within the blog view - * - */ -public class BlogRenderer extends HTMLRenderer { - private BlogInfo _blog; - private BlogInfoData _data; - private boolean _isComment; - public BlogRenderer(I2PAppContext ctx, BlogInfo info, BlogInfoData data) { - super(ctx); - _blog = info; - _data = data; - _isComment = false; - } - - public void renderPost(User user, Archive archive, EntryContainer entry, Writer out, boolean cutBody, boolean showImages) throws IOException { - _isComment = false; - render(user, archive, entry, out, cutBody, showImages); - } - public void renderComment(User user, Archive archive, EntryContainer entry, Writer out) throws IOException { - _isComment = true; - render(user, archive, entry, out, false, true); - } - - public void receiveHeaderEnd() { - _preBodyBuffer.append("

      \n"); - _preBodyBuffer.append("
      \n"); - _preBodyBuffer.append("
      "); - String subject = (String)_headers.get(HEADER_SUBJECT); - if (subject == null) - subject = "[no subject]"; - String tags[] = _entry.getTags(); - for (int i = 0; (tags != null) && (i < tags.length); i++) - displayTag(_preBodyBuffer, _data, tags[i]); - _preBodyBuffer.append(getSpan("subjectText")).append(sanitizeString(subject)).append("
      \n"); - - String name = getAuthor(); - String when = getEntryDate(_entry.getURI().getEntryId()); - _preBodyBuffer.append("
      Posted by: "); - _preBodyBuffer.append(sanitizeString(name)); - _preBodyBuffer.append(" on "); - _preBodyBuffer.append(when); - _preBodyBuffer.append("
      \n"); - _preBodyBuffer.append("
      \n"); - - _preBodyBuffer.append("
      \n"); - } - - public void receiveEnd() { - _postBodyBuffer.append("
      \n"); - _postBodyBuffer.append("
      \n"); - int childCount = getChildCount(_archive.getIndex().getThreadedIndex().getNode(_entry.getURI())); - if ( (_cutReached || childCount > 0) && (_cutBody) ) { - _postBodyBuffer.append("Read more "); - } - if (childCount > 0) { - _postBodyBuffer.append(childCount).append(" "); - if (childCount > 1) - _postBodyBuffer.append(" comments already, "); - else - _postBodyBuffer.append(" comment already, "); - } - _postBodyBuffer.append("Leave a comment\n"); - if (_isComment) - renderCommentMeta(); - _postBodyBuffer.append("
      \n"); - _postBodyBuffer.append("
      \n\n"); - } - - private void renderCommentMeta() { - BlogURI postURI = null; - Attachment attachments[] = null; - if (_entry != null) { - postURI = _entry.getURI(); - attachments = _entry.getAttachments(); - } - BlogPostInfoRenderer.renderAttachments(postURI, "syndieBlogCommentInfo", attachments, _postBodyBuffer); - BlogPostInfoRenderer.renderBlogs(postURI, _user, "syndieBlogCommentInfo", _blogs, _postBodyBuffer); - BlogPostInfoRenderer.renderLinks(postURI, _user, "syndieBlogCommentInfo", _links, _postBodyBuffer); - BlogPostInfoRenderer.renderAddresses(postURI, _user, "syndieBlogCommentInfo", _addresses, _postBodyBuffer); - BlogPostInfoRenderer.renderArchives(postURI, _user, "syndieBlogCommentInfo", _archives, _postBodyBuffer); - } - - private int getChildCount(ThreadNode node) { - int nodes = 0; - for (int i = 0; i < node.getChildCount(); i++) { - nodes++; - nodes += getChildCount(node.getChild(i)); - } - return nodes; - } - - private String getAuthor() { - PetName pn = null; - if ( (_entry != null) && (_user != null) ) - pn = _user.getPetNameDB().getByLocation(_entry.getURI().getKeyHash().toBase64()); - if (pn != null) - return pn.getName(); - BlogInfo info = null; - if (_entry != null) { - info = _archive.getBlogInfo(_entry.getURI()); - if (info != null) { - String str = info.getProperty(BlogInfo.NAME); - if (str != null) - return str; - } - return _entry.getURI().getKeyHash().toBase64().substring(0,6); - } else { - return "No name?"; - } - } - - private void displayTag(StringBuffer buf, BlogInfoData data, String tag) { - //buf.append(""); - if ( (tag == null) || ("[none]".equals(tag) ) ) - return; - buf.append("\"");"); - //buf.append(""); - buf.append(" "); - } - - public String getMetadataURL(Hash blog) { return ThreadedHTMLRenderer.buildProfileURL(blog); } - private String getTagIconURL(String tag) { - return "viewicon.jsp?tag=" + Base64.encode(tag) + "&" + - ViewBlogServlet.PARAM_BLOG + "=" + _blog.getKey().calculateHash().toBase64(); - } - - private String getReplyURL() { - String subject = (String)_headers.get(HEADER_SUBJECT); - if (subject != null) { - if (!subject.startsWith("re:")) - subject = "re: " + subject; - } else { - subject = "re: "; - } - return "post.jsp?" + PostServlet.PARAM_PARENT + "=" - + Base64.encode(_entry.getURI().getKeyHash().toBase64() + "/" + _entry.getURI().getEntryId()) + "&" - + PostServlet.PARAM_SUBJECT + "=" + sanitizeTagParam(subject) + "&"; - } - - protected String getEntryURL() { return getEntryURL(_user != null ? _user.getShowImages() : true); } - protected String getEntryURL(boolean showImages) { - return getEntryURL(_entry, _blog, showImages); - } - static String getEntryURL(EntryContainer entry, BlogInfo blog, boolean showImages) { - if (entry == null) return "unknown"; - return getEntryURL(entry.getURI(), blog, null, showImages); - } - static String getEntryURL(BlogURI entry, BlogInfo blog, BlogURI comment, boolean showImages) { - if (entry == null) return "unknown"; - if (comment == null) { - return "blog.jsp?" - + ViewBlogServlet.PARAM_BLOG + "=" + (blog != null ? blog.getKey().calculateHash().toBase64() : "") + "&" - + ViewBlogServlet.PARAM_ENTRY + "=" - + Base64.encode(entry.getKeyHash().getData()) + '/' + entry.getEntryId(); - } else { - return "blog.jsp?" - + ViewBlogServlet.PARAM_BLOG + "=" + (blog != null ? blog.getKey().calculateHash().toBase64() : "") + "&" - + ViewBlogServlet.PARAM_ENTRY + "=" - + Base64.encode(entry.getKeyHash().getData()) + '/' + entry.getEntryId() - + '#' + Base64.encode(comment.getKeyHash().getData()) + '/' + comment.getEntryId(); - } - } - - protected String getAttachmentURLBase() { - return "invalid"; - } - - protected String getAttachmentURL(int id) { - if (_entry == null) return "unknown"; - return "blog.jsp?" - + ViewBlogServlet.PARAM_BLOG + "=" + _blog.getKey().calculateHash().toBase64() + "&" - + ViewBlogServlet.PARAM_ATTACHMENT + "=" - + Base64.encode(_entry.getURI().getKeyHash().getData()) + "/" - + _entry.getURI().getEntryId() + "/" - + id; - } - - public String getPageURL(String entry) { - StringBuffer buf = new StringBuffer(128); - buf.append("blog.jsp?"); - buf.append(ViewBlogServlet.PARAM_BLOG).append(_blog.getKey().calculateHash().toBase64()).append("&"); - - if (entry != null) { - if (entry.startsWith("entry://")) - entry = entry.substring("entry://".length()); - else if (entry.startsWith("blog://")) - entry = entry.substring("blog://".length()); - int split = entry.indexOf('/'); - if (split > 0) { - buf.append(ViewBlogServlet.PARAM_ENTRY).append("="); - buf.append(sanitizeTagParam(entry.substring(0, split))).append('/'); - buf.append(sanitizeTagParam(entry.substring(split+1))).append("&"); - } - } - return buf.toString(); - } - public String getPageURL(Hash blog, String tag, long entryId, String group, int numPerPage, int pageNum, boolean expandEntries, boolean showImages) { - StringBuffer buf = new StringBuffer(128); - buf.append("blog.jsp?"); - buf.append(ViewBlogServlet.PARAM_BLOG).append("="); - buf.append(_blog.getKey().calculateHash().toBase64()).append("&"); - - if ( (blog != null) && (entryId > 0) ) { - buf.append(ViewBlogServlet.PARAM_ENTRY).append("="); - buf.append(blog.toBase64()).append('/'); - buf.append(entryId).append("&"); - } - if (tag != null) - buf.append(ViewBlogServlet.PARAM_TAG).append('=').append(sanitizeTagParam(tag)).append("&"); - if ( (pageNum >= 0) && (numPerPage > 0) ) - buf.append(ViewBlogServlet.PARAM_OFFSET).append('=').append(pageNum*numPerPage).append("&"); - return buf.toString(); - } -} diff --git a/apps/syndie/java/src/net/i2p/syndie/sml/EventReceiverImpl.java b/apps/syndie/java/src/net/i2p/syndie/sml/EventReceiverImpl.java deleted file mode 100644 index 515f80b86..000000000 --- a/apps/syndie/java/src/net/i2p/syndie/sml/EventReceiverImpl.java +++ /dev/null @@ -1,116 +0,0 @@ -package net.i2p.syndie.sml; - -import java.util.List; - -import net.i2p.I2PAppContext; -import net.i2p.util.Log; - -/** - * - */ -public class EventReceiverImpl implements SMLParser.EventReceiver { - protected I2PAppContext _context; - private Log _log; - - public EventReceiverImpl(I2PAppContext ctx) { - _context = ctx; - _log = ctx.logManager().getLog(EventReceiverImpl.class); - } - public void receiveHeader(String header, String value) { - if (_log.shouldLog(Log.DEBUG)) - _log.debug("Receive header [" + header + "] = [" + value + "]"); - } - public void receiveLink(String schema, String location, String text) { - if (_log.shouldLog(Log.DEBUG)) - _log.debug("Receive link [" + schema + "]/[" + location+ "]/[" + text + "]"); - } - public void receiveBlog(String name, String blogKeyHash, String blogPath, long blogEntryId, - List blogArchiveLocations, String anchorText) { - if (_log.shouldLog(Log.DEBUG)) - _log.debug("Receive blog [" + name + "]/[" + blogKeyHash + "]/[" + blogPath - + "]/[" + blogEntryId + "]/[" + blogArchiveLocations + "]/[" + anchorText + "]"); - } - public void receiveArchive(String name, String description, String locationSchema, String location, - String postingKey, String anchorText) { - if (_log.shouldLog(Log.DEBUG)) - _log.debug("Receive archive [" + name + "]/[" + description + "]/[" + locationSchema - + "]/[" + location + "]/[" + postingKey + "]/[" + anchorText + "]"); - } - public void receiveImage(String alternateText, int attachmentId) { - if (_log.shouldLog(Log.DEBUG)) - _log.debug("Receive image [" + alternateText + "]/[" + attachmentId + "]"); - } - public void receiveAddress(String name, String schema, String protocol, String location, String anchorText) { - if (_log.shouldLog(Log.DEBUG)) - _log.debug("Receive address [" + name + "]/[" + schema + "]/[" + location + "]/[" + anchorText+ "]"); - } - public void receiveBold(String text) { - if (_log.shouldLog(Log.DEBUG)) - _log.debug("Receive bold [" + text+ "]"); - } - public void receiveItalic(String text) { - if (_log.shouldLog(Log.DEBUG)) - _log.debug("Receive italic [" + text+ "]"); - } - public void receiveUnderline(String text) { - if (_log.shouldLog(Log.DEBUG)) - _log.debug("Receive underline [" + text+ "]"); - } - public void receiveQuote(String text, String whoQuoted, String quoteLocationSchema, String quoteLocation) { - if (_log.shouldLog(Log.DEBUG)) - _log.debug("Receive quote [" + text + "]/[" + whoQuoted + "]/[" + quoteLocationSchema + "]/[" + quoteLocation + "]"); - } - public void receiveCode(String text, String codeLocationSchema, String codeLocation) { - if (_log.shouldLog(Log.DEBUG)) - _log.debug("Receive code [" + text+ "]/[" + codeLocationSchema + "]/[" + codeLocation + "]"); - } - public void receiveCut(String summaryText) { - if (_log.shouldLog(Log.DEBUG)) - _log.debug("Receive cut [" + summaryText + "]"); - } - public void receivePlain(String text) { - if (_log.shouldLog(Log.DEBUG)) - _log.debug("Receive plain [" + text + "]"); - } - public void receiveNewline() { - if (_log.shouldLog(Log.DEBUG)) - _log.debug("Receive NL"); - } - public void receiveLT() { - if (_log.shouldLog(Log.DEBUG)) - _log.debug("Receive LT"); - } - public void receiveGT() { - if (_log.shouldLog(Log.DEBUG)) - _log.debug("Receive GT"); - } - public void receiveBegin() { - if (_log.shouldLog(Log.DEBUG)) - _log.debug("Receive begin"); - } - public void receiveEnd() { - if (_log.shouldLog(Log.DEBUG)) - _log.debug("Receive end"); - } - public void receiveHeaderEnd() { - if (_log.shouldLog(Log.DEBUG)) - _log.debug("Receive header end"); - } - public void receiveLeftBracket() { - if (_log.shouldLog(Log.DEBUG)) - _log.debug("Receive ["); - } - public void receiveRightBracket() { - if (_log.shouldLog(Log.DEBUG)) - _log.debug("Receive ]"); - } - - public void receiveH1(String text) {} - public void receiveH2(String text) {} - public void receiveH3(String text) {} - public void receiveH4(String text) {} - public void receiveH5(String text) {} - public void receivePre(String text) {} - public void receiveHR() {} - public void receiveAttachment(int id, int thumbnail, String anchorText) {} -} diff --git a/apps/syndie/java/src/net/i2p/syndie/sml/HTMLPreviewRenderer.java b/apps/syndie/java/src/net/i2p/syndie/sml/HTMLPreviewRenderer.java deleted file mode 100644 index 603842304..000000000 --- a/apps/syndie/java/src/net/i2p/syndie/sml/HTMLPreviewRenderer.java +++ /dev/null @@ -1,164 +0,0 @@ -package net.i2p.syndie.sml; - -import java.io.File; -import java.util.List; - -import net.i2p.I2PAppContext; -import net.i2p.client.naming.PetName; -import net.i2p.data.Base64; -import net.i2p.data.Hash; -import net.i2p.syndie.data.SafeURL; -import net.i2p.syndie.web.AddressesServlet; -import net.i2p.syndie.web.ArchiveViewerBean; - -/** - * - */ -public class HTMLPreviewRenderer extends HTMLRenderer { - private List _filenames; - private List _fileTypes; - private List _files; - - public HTMLPreviewRenderer(I2PAppContext ctx, List filenames, List fileTypes, List files) { - super(ctx); - _filenames = filenames; - _fileTypes = fileTypes; - _files = files; - } - - protected String getAttachmentURLBase() { return "viewtempattachment.jsp"; } - protected String getAttachmentURL(int id) { - return getAttachmentURLBase() + "?" + - ArchiveViewerBean.PARAM_ATTACHMENT + "=" + id; - } - - public void receiveAttachment(int id, int thumb, String anchorText) { - anchorText = sanitizeString(anchorText); - if (!continueBody()) { return; } - if ( (id < 0) || (_files == null) || (id >= _files.size()) ) { - _bodyBuffer.append(anchorText); - } else { - File f = (File)_files.get(id); - String name = (String)_filenames.get(id); - String type = (String)_fileTypes.get(id); - _bodyBuffer.append(""); - if(thumb >= 0) { - _bodyBuffer.append("\"").append(anchorText)."); - } else { - _bodyBuffer.append(anchorText); - } - - _bodyBuffer.append(""); - _bodyBuffer.append(getSpan("attachmentSummary")).append(" ("); - _bodyBuffer.append(getSpan("attachmentSummarySize")).append(f.length()/1024).append("KB, "); - _bodyBuffer.append(getSpan("attachmentSummaryName")).append(" \"").append(sanitizeString(name)).append("\", "); - _bodyBuffer.append(getSpan("attachmentSummaryType")).append(sanitizeString(type)).append(")"); - } - } - - public void receiveEnd() { - _postBodyBuffer.append("\n"); - _postBodyBuffer.append("\n"); - _postBodyBuffer.append("
      \n"); - _postBodyBuffer.append(" \n"); - - if (_files.size() > 0) { - _postBodyBuffer.append(getSpan("summDetailAttachment")).append("Attachments: "); - _postBodyBuffer.append("\n"); - _postBodyBuffer.append("
      \n"); - } - - if (_blogs.size() > 0) { - _postBodyBuffer.append(getSpan("summDetailBlog")).append("Blog references: "); - for (int i = 0; i < _blogs.size(); i++) { - Blog b = (Blog)_blogs.get(i); - boolean expanded = (_user != null ? _user.getShowExpanded() : false); - boolean images = (_user != null ? _user.getShowImages() : false); - _postBodyBuffer.append("").append(sanitizeString(b.name)).append(" "); - } - _postBodyBuffer.append("
      \n"); - } - - if (_links.size() > 0) { - _postBodyBuffer.append(getSpan("summDetailExternal")).append("External links: "); - for (int i = 0; i < _links.size(); i++) { - Link l = (Link)_links.get(i); - String schema = l.schema; - _postBodyBuffer.append("").append(sanitizeString(l.location)); - _postBodyBuffer.append(getSpan("summDetailExternalNet")).append(" (").append(sanitizeString(l.schema)).append(") "); - } - _postBodyBuffer.append("
      \n"); - } - - if (_addresses.size() > 0) { - _postBodyBuffer.append(getSpan("summDetailAddr")).append("Addresses:"); - for (int i = 0; i < _addresses.size(); i++) { - Address a = (Address)_addresses.get(i); - importAddress(a); - PetName pn = null; - if (_user != null) - pn = _user.getPetNameDB().getByLocation(a.location); - if (pn != null) { - _postBodyBuffer.append(' ').append(getSpan("summDetailAddrKnown")); - _postBodyBuffer.append(sanitizeString(pn.getName())).append(""); - } else { - _postBodyBuffer.append(" ").append(sanitizeString(a.name)).append(""); - } - } - _postBodyBuffer.append("
      \n"); - } - - if (_archives.size() > 0) { - _postBodyBuffer.append(getSpan("summDetailArchive")).append("Archives:"); - for (int i = 0; i < _archives.size(); i++) { - ArchiveRef a = (ArchiveRef)_archives.get(i); - _postBodyBuffer.append(" ").append(sanitizeString(a.name)).append(""); - if (a.description != null) - _postBodyBuffer.append(": ").append(getSpan("summDetailArchiveDesc")).append(sanitizeString(a.description)).append(""); - if (null == _user.getPetNameDB().getByLocation(a.location)) { - _postBodyBuffer.append(" bookmark"); - } - } - _postBodyBuffer.append("
      \n"); - } - - _postBodyBuffer.append("\n
      \n\n"); - _postBodyBuffer.append("\n"); - } - - protected void renderMetaCell() { _preBodyBuffer.append(""); } -} diff --git a/apps/syndie/java/src/net/i2p/syndie/sml/HTMLRenderer.java b/apps/syndie/java/src/net/i2p/syndie/sml/HTMLRenderer.java deleted file mode 100644 index 3c0346ab9..000000000 --- a/apps/syndie/java/src/net/i2p/syndie/sml/HTMLRenderer.java +++ /dev/null @@ -1,1076 +0,0 @@ -package net.i2p.syndie.sml; - -import java.io.ByteArrayOutputStream; -import java.io.FileInputStream; -import java.io.FileOutputStream; -import java.io.IOException; -import java.io.OutputStreamWriter; -import java.io.Writer; -import java.text.ParseException; -import java.text.SimpleDateFormat; -import java.util.ArrayList; -import java.util.Date; -import java.util.HashMap; -import java.util.List; -import java.util.Locale; -import java.util.Map; -import java.util.StringTokenizer; - -import net.i2p.I2PAppContext; -import net.i2p.client.naming.PetName; -import net.i2p.data.Base64; -import net.i2p.data.DataHelper; -import net.i2p.data.Hash; -import net.i2p.syndie.Archive; -import net.i2p.syndie.BlogManager; -import net.i2p.syndie.User; -import net.i2p.syndie.data.Attachment; -import net.i2p.syndie.data.BlogInfo; -import net.i2p.syndie.data.BlogURI; -import net.i2p.syndie.data.EntryContainer; -import net.i2p.syndie.data.SafeURL; -import net.i2p.syndie.web.AddressesServlet; -import net.i2p.syndie.web.ArchiveViewerBean; -import net.i2p.syndie.web.PostServlet; -import net.i2p.syndie.web.SyndicateServlet; -import net.i2p.util.Log; - -/** - * - */ -public class HTMLRenderer extends EventReceiverImpl { - private Log _log; - protected SMLParser _parser; - protected Writer _out; - protected User _user; - protected Archive _archive; - protected EntryContainer _entry; - protected boolean _showImages; - protected boolean _cutBody; - protected boolean _cutReached; - protected int _cutSize; - protected int _lastNewlineAt; - protected Map _headers; - protected List _addresses; - protected List _links; - protected List _blogs; - protected List _archives; - protected StringBuffer _preBodyBuffer; - protected StringBuffer _bodyBuffer; - protected StringBuffer _postBodyBuffer; - - public HTMLRenderer(I2PAppContext ctx) { - super(ctx); - _log = ctx.logManager().getLog(HTMLRenderer.class); - _parser = new SMLParser(ctx); - } - - /** - * Usage: HTMLRenderer smlFile outputFile - */ - public static void main(String args[]) { - if (args.length != 2) { - System.err.println("Usage: HTMLRenderer smlFile outputFile"); - return; - } - HTMLRenderer renderer = new HTMLRenderer(I2PAppContext.getGlobalContext()); - Writer out = null; - try { - ByteArrayOutputStream baos = new ByteArrayOutputStream(1024*512); - FileInputStream in = new FileInputStream(args[0]); - byte buf[] = new byte[1024]; - int read = 0; - while ( (read = in.read(buf)) != -1) - baos.write(buf, 0, read); - out = new OutputStreamWriter(new FileOutputStream(args[1]), "UTF-8"); - renderer.render(new User(), BlogManager.instance().getArchive(), null, DataHelper.getUTF8(baos.toByteArray()), out, false, true); - } catch (IOException ioe) { - ioe.printStackTrace(); - } finally { - if (out != null) try { out.close(); } catch (IOException ioe) {} - } - } - - /** - * Retrieve: class="s_summary_$element" or class="s_detail_$element ss_$style_detail_$element" - */ - protected String getClass(String element) { - StringBuffer rv = new StringBuffer(64); - rv.append(" class=\"s_"); - if (_cutBody) - rv.append("summary_"); - else - rv.append("detail_"); - rv.append(element); - if (_entry != null) { - String style = sanitizeStyle(_entry.getHeader(HEADER_STYLE)); - if (style != null) { - rv.append(" ss_").append(style); - if (_cutBody) - rv.append("summary_"); - else - rv.append("detail_"); - rv.append(element); - } - } - rv.append("\" "); - return rv.toString(); - } - protected String getSpan(String element) { - return ""; - } - - public void renderUnknownEntry(User user, Archive archive, BlogURI uri, Writer out) throws IOException { - BlogInfo info = archive.getBlogInfo(uri); - if (info == null) - out.write("
      The blog " + uri.getKeyHash().toBase64() + " is not known locally. " - + "Please get it from an archive and try again"); - else - out.write("
      The blog " + info.getProperty(BlogInfo.NAME) + " is known, but the entry " + uri.getEntryId() + " is not. " - + "Please get it from an archive and try again"); - } - - public void render(User user, Archive archive, EntryContainer entry, Writer out, boolean cutBody, boolean showImages) throws IOException { - if (entry == null) - return; - render(user, archive, entry, entry.getEntry().getText(), out, cutBody, showImages); - } - public void render(User user, Archive archive, EntryContainer entry, String rawSML, Writer out, boolean cutBody, boolean showImages) throws IOException { - prepare(user, archive, entry, rawSML, out, cutBody, showImages); - - _out.write(_preBodyBuffer.toString()); - _out.write(_bodyBuffer.toString()); - _out.write(_postBodyBuffer.toString()); - //int len = _preBodyBuffer.length() + _bodyBuffer.length() + _postBodyBuffer.length(); - //System.out.println("Wrote " + len); - } - protected void prepare(User user, Archive archive, EntryContainer entry, String rawSML, Writer out, boolean cutBody, boolean showImages) throws IOException { - _user = user; - _archive = archive; - _entry = entry; - _out = out; - _headers = new HashMap(); - _preBodyBuffer = new StringBuffer(1024); - _bodyBuffer = new StringBuffer(1024); - _postBodyBuffer = new StringBuffer(1024); - _addresses = new ArrayList(); - _links = new ArrayList(); - _blogs = new ArrayList(); - _archives = new ArrayList(); - _cutBody = cutBody; - _showImages = showImages; - _cutReached = false; - _cutSize = 1024; - _parser.parse(rawSML, this); - } - - public void receivePlain(String text) { - if (!continueBody()) { return; } - if (_log.shouldLog(Log.DEBUG)) _log.debug("receive plain [" + text + "]"); - _bodyBuffer.append(sanitizeString(text)); - } - - public void receiveBold(String text) { - if (!continueBody()) { return; } - _bodyBuffer.append("").append(sanitizeString(text)).append(""); - } - public void receiveItalic(String text) { - if (!continueBody()) { return; } - _bodyBuffer.append("").append(sanitizeString(text)).append(""); - } - public void receiveUnderline(String text) { - if (!continueBody()) { return; } - _bodyBuffer.append("").append(sanitizeString(text)).append(""); - } - public void receiveHR() { - if (!continueBody()) { return; } - if (_log.shouldLog(Log.DEBUG)) _log.debug("receive HR"); - _bodyBuffer.append(getSpan("hr")).append("
      "); - } - public void receiveH1(String body) { - if (!continueBody()) { return; } - _bodyBuffer.append("

      ").append(sanitizeString(body)).append("

      "); - } - public void receiveH2(String body) { - if (!continueBody()) { return; } - _bodyBuffer.append("

      ").append(sanitizeString(body)).append("

      "); - } - public void receiveH3(String body) { - if (!continueBody()) { return; } - _bodyBuffer.append("

      ").append(sanitizeString(body)).append("

      "); - } - public void receiveH4(String body) { - if (!continueBody()) { return; } - _bodyBuffer.append("

      ").append(sanitizeString(body)).append("

      "); - } - public void receiveH5(String body) { - if (!continueBody()) { return; } - _bodyBuffer.append("
      ").append(sanitizeString(body)).append("
      "); - } - public void receivePre(String body) { - if (!continueBody()) { return; } - if (_log.shouldLog(Log.DEBUG)) _log.debug("receive pre: [" + sanitizeString(body) + "]"); - _bodyBuffer.append("
      ").append(sanitizeString(body)).append("
      "); - } - - public void receiveQuote(String text, String whoQuoted, String quoteLocationSchema, String quoteLocation) { - if (!continueBody()) { return; } - _bodyBuffer.append("").append(sanitizeString(text)).append(""); - } - public void receiveCode(String text, String codeLocationSchema, String codeLocation) { - if (!continueBody()) { return; } - _bodyBuffer.append("").append(sanitizeString(text)).append(""); - } - public void receiveImage(String alternateText, int attachmentId) { - if (!continueBody()) { return; } - if (_showImages) { - _bodyBuffer.append("\"").append(sanitizeTagParam(alternateText)).append("\"");"); - } else { - _bodyBuffer.append(getSpan("imgSummary")).append("[image: ").append(getSpan("imgSummaryAttachment")).append(" attachment ").append(attachmentId); - _bodyBuffer.append(": ").append(getSpan("imgSummaryAlt")).append(sanitizeString(alternateText)); - _bodyBuffer.append(" view images]"); - } - } - - public void receiveCut(String summaryText) { - if (!continueBody()) { return; } - _cutReached = true; - if (_cutBody) { - _bodyBuffer.append(""); - if ( (summaryText != null) && (summaryText.length() > 0) ) - _bodyBuffer.append(sanitizeString(summaryText)); - else - _bodyBuffer.append("more inside..."); - _bodyBuffer.append("\n"); - } else { - if (summaryText != null) - _bodyBuffer.append(getSpan("cutIgnore")).append(sanitizeString(summaryText)).append("\n"); - } - } - - /** are we either before the cut or rendering without cutting? */ - protected boolean continueBody() { - boolean rv = ( (!_cutReached) && (_bodyBuffer.length() <= _cutSize) ) || (!_cutBody); - //if (!rv) - // System.out.println("rv: " + rv + " Cut reached: " + _cutReached + " bodyBufferSize: " + _bodyBuffer.length() + " cutBody? " + _cutBody); - if (!rv && !_cutReached) { - // exceeded the allowed size - _bodyBuffer.append("more inside...\n"); - _cutReached = true; - } - return rv; - } - - public void receiveNewline() { - if (!continueBody()) { return; } - if (_log.shouldLog(Log.DEBUG)) _log.debug("receive NL"); - if (true || (_lastNewlineAt >= _bodyBuffer.length())) - _bodyBuffer.append(getSpan("nl")).append("
      \n"); - else - _lastNewlineAt = _bodyBuffer.length(); - } - public void receiveLT() { - if (!continueBody()) { return; } - _bodyBuffer.append(getSpan("lt")).append("<"); - } - public void receiveGT() { - if (!continueBody()) { return; } - _bodyBuffer.append(getSpan("gt")).append(">"); - } - public void receiveBegin() {} - public void receiveLeftBracket() { - if (!continueBody()) { return; } - if (_log.shouldLog(Log.DEBUG)) _log.debug("receive ["); - _bodyBuffer.append(getSpan("lb")).append("["); - } - public void receiveRightBracket() { - if (!continueBody()) { return; } - if (_log.shouldLog(Log.DEBUG)) _log.debug("receive ]"); - _bodyBuffer.append(getSpan("rb")).append("]"); - } - - /** - * when we see a link to a blog, we may want to: - * = view the blog entry - * = view all entries in that blog - * = view all entries in that blog with the given tag - * = view the blog's metadata - * = [fetch the blog from other locations] - * = [add the blog's locations to our list of known locations] - * = [shitlist the blog] - * = [add the blog to one of our groups] - * - * [blah] implies *later*. - * - * Currently renders to: - * $description - * [blog: $name (meta) - * [tag: $tag] - * archived at $location*] - * - */ - public void receiveBlog(String name, String hash, String tag, long entryId, List locations, String description) { - if (_log.shouldLog(Log.DEBUG)) - _log.debug("Receiving the blog: " + name + "/" + hash + "/" + tag + "/" + entryId +"/" + locations + ": "+ description); - byte blogData[] = Base64.decode(hash); - if ( (blogData == null) || (blogData.length != Hash.HASH_LENGTH) ) - return; - - Blog b = new Blog(); - b.name = name; - b.hash = hash; - b.tag = tag; - b.entryId = entryId; - b.locations = locations; - if (!_blogs.contains(b)) - _blogs.add(b); - - if (!continueBody()) { return; } - if (hash == null) return; - - Hash blog = new Hash(blogData); - if (entryId > 0) { - String pageURL = getPageURL(blog, tag, entryId, -1, -1, true, (_user != null ? _user.getShowImages() : false)); - _bodyBuffer.append(""); - if ( (description != null) && (description.trim().length() > 0) ) { - _bodyBuffer.append(sanitizeString(description)); - } else if ( (name != null) && (name.trim().length() > 0) ) { - _bodyBuffer.append(sanitizeTagParam(name)); - } else { - _bodyBuffer.append("[view entry]"); - } - _bodyBuffer.append(""); - } else if ( (description != null) && (description.trim().length() > 0) ) { - _bodyBuffer.append(sanitizeString(description)); - } - - //String url = getPageURL(blog, null, -1, -1, -1, (_user != null ? _user.getShowExpanded() : false), (_user != null ? _user.getShowImages() : false)); - String url = getMetadataURL(blog); - _bodyBuffer.append(getSpan("blogEntrySummary")); - _bodyBuffer.append(" ["); - if ( (name != null) && (name.trim().length() > 0) ) - _bodyBuffer.append(sanitizeTagParam(name)); - else - _bodyBuffer.append("view"); - _bodyBuffer.append(""); - //_bodyBuffer.append(" (meta)"); - if ( (tag != null) && (tag.trim().length() > 0) ) { - url = getPageURL(blog, tag, -1, -1, -1, false, false); - _bodyBuffer.append(" Tag: ").append(sanitizeString(tag)).append(""); - } - if ( (locations != null) && (locations.size() > 0) ) { - _bodyBuffer.append(getSpan("blogArchive")).append(" Archives: "); - for (int i = 0; i < locations.size(); i++) { - SafeURL surl = (SafeURL)locations.get(i); - if (_user.getAuthenticated() && BlogManager.instance().authorizeRemote(_user) ) - _bodyBuffer.append(" ").append(sanitizeString(surl.toString())).append(" "); - else - _bodyBuffer.append(getSpan("blogArchiveURL")).append(sanitizeString(surl.toString())).append(" "); - } - _bodyBuffer.append(""); - } - _bodyBuffer.append("] "); - } - - public void receiveArchive(String name, String description, String locationSchema, String location, - String postingKey, String anchorText) { - ArchiveRef a = new ArchiveRef(); - a.name = name; - a.description = description; - a.locationSchema = locationSchema; - a.location = location; - if (!_archives.contains(a)) - _archives.add(a); - - if (!continueBody()) { return; } - - _bodyBuffer.append(getSpan("archive")).append(sanitizeString(anchorText)).append(""); - _bodyBuffer.append(getSpan("archiveSummary")).append(" [Archive "); - if (name != null) - _bodyBuffer.append(getSpan("archiveSummaryName")).append(sanitizeString(name)).append(""); - if (location != null) { - _bodyBuffer.append(" at "); - SafeURL surl = new SafeURL(locationSchema + "://" + location); - if (BlogManager.instance().authorizeRemote(_user)) { - _bodyBuffer.append("").append(sanitizeString(surl.toString())).append(""); - } else { - _bodyBuffer.append(sanitizeString(surl.getLocation())); - } - if (_user.getAuthenticated()) { - _bodyBuffer.append(" bookmark it"); - } - } - if (description != null) - _bodyBuffer.append(": ").append(getSpan("archiveSummaryDesc")).append(sanitizeString(description)).append(""); - _bodyBuffer.append("]"); - } - - public void receiveLink(String schema, String location, String text) { - Link l = new Link(); - l.schema = schema; - l.location = location; - if (!_links.contains(l)) - _links.add(l); - if (!continueBody()) { return; } - if ( (schema == null) || (location == null) ) return; - _bodyBuffer.append(""). - append(sanitizeString(text)). - append(""); - } - - public void importAddress(Address a) { - BlogPostInfoRenderer.importAddress(a, _user); - } - - public void receiveAddress(String name, String schema, String protocol, String location, String anchorText) { - Address a = new Address(); - a.name = name; - a.schema = schema; - a.location = location; - a.protocol = protocol; - if (!_addresses.contains(a)) - _addresses.add(a); - if (!continueBody()) { return; } - if ( (schema == null) || (location == null) ) return; - PetName pn = null; - if (_user != null) - pn = _user.getPetNameDB().getByLocation(location); - if (pn != null) { - _bodyBuffer.append(getSpan("addr")).append(sanitizeString(anchorText)).append(""); - _bodyBuffer.append(getSpan("addrKnownName")).append("(").append(sanitizeString(pn.getName())).append(")"); - } else { - if (_log.shouldLog(Log.DEBUG)) - _log.debug("Receiving address [" + location + "]"); - _bodyBuffer.append("(view parent)\n"); - - _postBodyBuffer.append("\n"); - _postBodyBuffer.append("\n"); - } else { - _postBodyBuffer.append("\n"); - _postBodyBuffer.append("\n"); - _postBodyBuffer.append("
      \n"); - _postBodyBuffer.append("\n"); - _postBodyBuffer.append("\n"); - _postBodyBuffer.append("\n"); - - if ( (_entry != null) && (_entry.getAttachments() != null) && (_entry.getAttachments().length > 0) ) { - _postBodyBuffer.append(getSpan("summDetailAttachment")).append("Attachments: "); - _postBodyBuffer.append("\n"); - _postBodyBuffer.append("
      \n"); - } - - if (_blogs.size() > 0) { - _postBodyBuffer.append(getSpan("summDetailBlog")).append("Blog references:"); - for (int i = 0; i < _blogs.size(); i++) { - Blog b = (Blog)_blogs.get(i); - _postBodyBuffer.append("").append(sanitizeString(b.name)).append(" "); - } - _postBodyBuffer.append("
      \n"); - } - - if (_links.size() > 0) { - _postBodyBuffer.append(getSpan("summDetailExternal")).append("External links: "); - for (int i = 0; i < _links.size(); i++) { - Link l = (Link)_links.get(i); - String schema = l.schema; - _postBodyBuffer.append("").append(sanitizeString(l.location, 30)); - _postBodyBuffer.append(getSpan("summDetailExternalNet")).append(" (").append(sanitizeString(l.schema)).append(") "); - } - _postBodyBuffer.append("
      \n"); - } - - if (_addresses.size() > 0) { - _postBodyBuffer.append(getSpan("summDetailAddr")).append("Addresses:"); - for (int i = 0; i < _addresses.size(); i++) { - Address a = (Address)_addresses.get(i); - importAddress(a); - PetName pn = null; - if (_user != null) - pn = _user.getPetNameDB().getByLocation(a.location); - if (pn != null) { - _postBodyBuffer.append(' ').append(getSpan("summDetailAddrKnown")); - _postBodyBuffer.append(sanitizeString(pn.getName())).append(""); - } else { - _postBodyBuffer.append(" ").append(sanitizeString(a.name)).append(""); - } - } - _postBodyBuffer.append("
      \n"); - } - - if (_archives.size() > 0) { - _postBodyBuffer.append(getSpan("summDetailArchive")).append("Archives:"); - for (int i = 0; i < _archives.size(); i++) { - ArchiveRef a = (ArchiveRef)_archives.get(i); - _postBodyBuffer.append(" ").append(sanitizeString(a.name)).append(""); - if (a.description != null) - _postBodyBuffer.append(": ").append(getSpan("summDetailArchiveDesc")).append(sanitizeString(a.description)).append(""); - if (null == _user.getPetNameDB().getByLocation(a.location)) { - _postBodyBuffer.append(" bookmark it"); - } - } - _postBodyBuffer.append("
      \n"); - } - - if (_entry != null) { - List replies = _archive.getIndex().getReplies(_entry.getURI()); - if ( (replies != null) && (replies.size() > 0) ) { - _postBodyBuffer.append(getSpan("summDetailReplies")).append("Replies: "); - for (int i = 0; i < replies.size(); i++) { - BlogURI reply = (BlogURI)replies.get(i); - _postBodyBuffer.append(""); - _postBodyBuffer.append(getSpan("summDetailReplyAuthor")); - BlogInfo replyAuthor = _archive.getBlogInfo(reply); - if (replyAuthor != null) { - _postBodyBuffer.append(sanitizeString(replyAuthor.getProperty(BlogInfo.NAME))); - } else { - _postBodyBuffer.append(reply.getKeyHash().toBase64().substring(0,16)); - } - _postBodyBuffer.append(" on "); - _postBodyBuffer.append(getSpan("summDetailReplyDate")); - _postBodyBuffer.append(getEntryDate(reply.getEntryId())); - _postBodyBuffer.append(" "); - } - _postBodyBuffer.append("
      "); - } - } - - String inReplyTo = (String)_headers.get(HEADER_IN_REPLY_TO); - if ( (inReplyTo != null) && (inReplyTo.trim().length() > 0) ) { - _postBodyBuffer.append(" (view parent)\n"); - } - - _postBodyBuffer.append("\n
      \n\n"); - _postBodyBuffer.append("\n"); - } - _postBodyBuffer.append("\n"); - } - - public void receiveHeader(String header, String value) { - //System.err.println("Receive header [" + header + "] = [" + value + "]"); - if (HEADER_PETNAME.equals(header)) { - StringTokenizer tok = new StringTokenizer(value, "\t\n"); - if (tok.countTokens() != 4) - return; - String name = tok.nextToken(); - String net = tok.nextToken(); - String proto = tok.nextToken(); - String loc = tok.nextToken(); - Address a = new Address(); - a.name = sanitizeString(name, false); - a.schema = sanitizeString(net, false); - a.protocol = sanitizeString(proto, false); - a.location = sanitizeString(loc, false); - _addresses.add(a); - } else { - _headers.put(header, value); - } - } - - public void receiveHeaderEnd() { - _preBodyBuffer.append("\n"); - renderSubjectCell(); - renderMetaCell(); - renderPreBodyCell(); - } - - public static final String HEADER_SUBJECT = "Subject"; - public static final String HEADER_BGCOLOR = "bgcolor"; - public static final String HEADER_IN_REPLY_TO = "InReplyTo"; - public static final String HEADER_STYLE = "Style"; - public static final String HEADER_PETNAME = "PetName"; - public static final String HEADER_TAGS = "Tags"; - /** if set to true, don't display the message in the same thread, though keep a parent reference */ - public static final String HEADER_FORCE_NEW_THREAD = "ForceNewThread"; - /** if set to true, don't let anyone else reply in the same thread (but let the original author reply) */ - public static final String HEADER_REFUSE_REPLIES = "RefuseReplies"; - - private void renderSubjectCell() { - _preBodyBuffer.append(""); - _preBodyBuffer.append(""); - _preBodyBuffer.append("\n"); - } - - private void renderPreBodyCell() { - _preBodyBuffer.append(""); - String bgcolor = (String)_headers.get(HEADER_BGCOLOR); - _preBodyBuffer.append(""); - _preBodyBuffer.append(""); - _preBodyBuffer.append("\n"); - } - - private final SimpleDateFormat _dateFormat = new SimpleDateFormat("yyyy/MM/dd", Locale.UK); - public final String getEntryDate(long when) { - synchronized (_dateFormat) { - try { - String str = _dateFormat.format(new Date(when)); - long dayBegin = _dateFormat.parse(str).getTime(); - return str + " [" + (when - dayBegin) + "]"; - } catch (ParseException pe) { - if (_log.shouldLog(Log.WARN)) - _log.warn("Error formatting", pe); - // wtf - return "unknown"; - } - } - } - - public static final String sanitizeString(String str) { return sanitizeString(str, true); } - public static final String sanitizeString(String str, int maxLen) { return sanitizeString(str, true, maxLen); } - public static final String sanitizeString(String str, boolean allowNL) { return sanitizeString(str, allowNL, -1); } - public static final String sanitizeString(String str, boolean allowNL, int maxLen) { - if (str == null) return null; - boolean unsafe = false; - unsafe = unsafe || str.indexOf('<') >= 0; - unsafe = unsafe || str.indexOf('>') >= 0; - if (!allowNL) { - unsafe = unsafe || str.indexOf('\n') >= 0; - unsafe = unsafe || str.indexOf('\r') >= 0; - unsafe = unsafe || str.indexOf('\f') >= 0; - } - if (unsafe) { - //str = str.replace('<', '_'); // this should be < - //str = str.replace('>', '-'); // this should be > - str = str.replaceAll("<", "<"); - str = str.replaceAll(">", ">"); - if (!allowNL) { - //str = str.replace('\n', ' '); - //str = str.replace('\r', ' '); - //str = str.replace('\f', ' '); - str = str.replaceAll("\n", "
      "); // no class - str = str.replaceAll("\r", "
      "); // no class - str = str.replaceAll("\f", "
      "); // no class - } - } - if ( (maxLen > 0) && (str.length() > maxLen) ) - return str.substring(0, maxLen) + "..."; - else - return str; - } - - public static final String sanitizeURL(String str) { - if (str == null) return ""; - return Base64.encode(DataHelper.getUTF8(str)); - } - public static final String sanitizeTagParam(String str) { - if (str == null) return ""; - //str = str.replace('&', '_'); // this should be & - str = str.replaceAll("&", "&"); - - if (str.indexOf("\"") < 0 && str.indexOf("'") < 0) - return sanitizeString(str); - - str = str.replaceAll("\"", """); - str = str.replaceAll("'", "'"); // as ', but supported by IE - - return sanitizeString(str); - } - - public static final String sanitizeXML(String orig) { - if (orig == null) return ""; - if (orig.indexOf('&') < 0) return orig; - if (true) return orig.replaceAll("&", "&"); - StringBuffer rv = new StringBuffer(orig.length()+32); - for (int i = 0; i < orig.length(); i++) { - if (orig.charAt(i) == '&') - rv.append("&"); - else - rv.append(orig.charAt(i)); - } - return rv.toString(); - } - public static final String sanitizeXML(StringBuffer orig) { - if (orig == null) return ""; - if (orig.indexOf("&") >= 0) - return orig.toString().replaceAll("&", "&"); - else - return orig.toString(); - } - public static final String sanitizeStrippedXML(String orig) { - if (orig == null) return ""; - orig = orig.replaceAll("&", "&"); - orig = orig.replaceAll("<", "<"); - orig = orig.replaceAll(">", ">"); - return orig; - } - - private static final String STYLE_CHARS = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789_"; - public static String sanitizeStyle(String style) { - if ( (style == null) || (style.trim().length() <= 0) ) return null; - char c[] = style.toCharArray(); - for (int i = 0; i < c.length; i++) - if (STYLE_CHARS.indexOf(c[i]) < 0) - c[i] = '_'; - return new String(c); - } - - protected String getEntryURL() { return getEntryURL(_user != null ? _user.getShowImages() : false); } - protected String getEntryURL(boolean showImages) { - if (_entry == null) return "unknown"; - return "threads.jsp?" + ThreadedHTMLRenderer.PARAM_AUTHOR + '=' + - Base64.encode(_entry.getURI().getKeyHash().getData()) + '&' + - ThreadedHTMLRenderer.PARAM_VIEW_POST + '=' + _entry.getURI().getKeyHash().toBase64() + '/' + - _entry.getURI().getEntryId(); - } - - protected String getAttachmentURLBase() { return "viewattachment.jsp?"; } - protected String getAttachmentURL(int id) { - if (_entry == null) return "unknown"; - return getAttachmentURLBase() + - ArchiveViewerBean.PARAM_BLOG + "=" + - Base64.encode(_entry.getURI().getKeyHash().getData()) + - "&" + ArchiveViewerBean.PARAM_ENTRY + "=" + _entry.getURI().getEntryId() + - "&" + ArchiveViewerBean.PARAM_ATTACHMENT + "=" + id; - } - - public String getMetadataURL() { - if (_entry == null) return "unknown"; - return getMetadataURL(_entry.getURI().getKeyHash()); - } - public String getMetadataURL(Hash blog) { - return "viewmetadata.jsp?" + ArchiveViewerBean.PARAM_BLOG + "=" + - Base64.encode(blog.getData()); - } - - public String getPostURL(Hash blog) { - return "post.jsp?" + ArchiveViewerBean.PARAM_BLOG + "=" + Base64.encode(blog.getData()); - } - public String getPostURL(Hash blog, boolean asReply, String subject, String tags) { - if (asReply && _entry != null) { - StringBuffer rv = new StringBuffer(128); - rv.append("post.jsp?").append(ArchiveViewerBean.PARAM_BLOG).append("=").append(Base64.encode(blog.getData())); - rv.append('&').append(PostServlet.PARAM_PARENT).append('='); - rv.append(Base64.encode("entry://" + _entry.getURI().getKeyHash().toBase64() + "/" + _entry.getURI().getEntryId())); - if (subject != null) - rv.append('&').append(ArchiveViewerBean.PARAM_SUBJECT).append('=').append(Base64.encode(subject)); - if (tags != null) - rv.append('&').append(ArchiveViewerBean.PARAM_TAGS).append('=').append(Base64.encode(tags)); - rv.append('&').append(ArchiveViewerBean.PARAM_PARENT).append('=').append(Base64.encode(_entry.getURI().toString())); - return rv.toString(); - } else { - return getPostURL(blog); - } - } - - /** - * entry may take the form of "base64/messageId", "entry://base64/messageId", or "blog://base64/messageId" - * - */ - public String getPageURL(String entry) { - StringBuffer buf = new StringBuffer(128); - buf.append("threads.jsp?"); - if (entry != null) { - if (entry.startsWith("entry://")) - entry = entry.substring("entry://".length()); - else if (entry.startsWith("blog://")) - entry = entry.substring("blog://".length()); - if (entry.length() > 0) { - buf.append(ThreadedHTMLRenderer.PARAM_VIEW_POST).append('=').append(entry).append('&'); - buf.append(ThreadedHTMLRenderer.PARAM_VISIBLE).append('=').append(entry).append('&'); - } - } - return buf.toString(); - } - - public String getPageURL(Hash blog, String tag, long entryId, int numPerPage, int pageNum, boolean expandEntries, boolean showImages) { - return getPageURL(blog, tag, entryId, null, numPerPage, pageNum, expandEntries, showImages); - } - public String getPageURL(Hash blog, String tag, long entryId, String group, int numPerPage, int pageNum, boolean expandEntries, boolean showImages) { - StringBuffer buf = new StringBuffer(128); - buf.append("threads.jsp?"); - if (blog != null) - buf.append(ThreadedHTMLRenderer.PARAM_AUTHOR).append('=').append(blog.toBase64()).append('&'); - if (tag != null) - buf.append(ThreadedHTMLRenderer.PARAM_TAGS).append('=').append(sanitizeTagParam(tag)).append('&'); - String entry = null; - if (entryId >= 0) { - entry = blog.toBase64() + '/' + entryId; - buf.append(ThreadedHTMLRenderer.PARAM_VIEW_POST).append('=').append(entry).append('&'); - buf.append(ThreadedHTMLRenderer.PARAM_VISIBLE).append('=').append(entry).append('&'); - } - if ( (pageNum >= 0) && (numPerPage > 0) ) - buf.append(ThreadedHTMLRenderer.PARAM_OFFSET).append('=').append(pageNum*numPerPage).append('&'); - return buf.toString(); - } - public static String getArchiveURL(Hash blog, SafeURL archiveLocation) { - return "syndicate.jsp?" - //+ "action=Continue..." // should this be the case? - + "&" + SyndicateServlet.PARAM_SCHEMA + "=" + sanitizeTagParam(archiveLocation.getSchema()) - + "&" + SyndicateServlet.PARAM_LOCATION + "=" + sanitizeTagParam(archiveLocation.getLocation()); - } - public static String getBookmarkURL(String name, String location, String schema, String protocol) { - return "addresses.jsp?" + AddressesServlet.PARAM_NAME + '=' + sanitizeTagParam(name) - + "&" + AddressesServlet.PARAM_NET + '=' + sanitizeTagParam(schema) - + "&" + AddressesServlet.PARAM_PROTO + '=' + sanitizeTagParam(protocol) - + "&" + AddressesServlet.PARAM_LOC + '=' + sanitizeTagParam(location); - } -} diff --git a/apps/syndie/java/src/net/i2p/syndie/sml/Link.java b/apps/syndie/java/src/net/i2p/syndie/sml/Link.java deleted file mode 100644 index c343216f7..000000000 --- a/apps/syndie/java/src/net/i2p/syndie/sml/Link.java +++ /dev/null @@ -1,14 +0,0 @@ -package net.i2p.syndie.sml; - -import net.i2p.data.DataHelper; - -/** contains intermediary rendering state */ -class Link { - public String schema; - public String location; - public int hashCode() { return -1; } - public boolean equals(Object o) { - Link l = (Link)o; - return DataHelper.eq(schema, l.schema) && DataHelper.eq(location, l.location); - } -} diff --git a/apps/syndie/java/src/net/i2p/syndie/sml/RSSRenderer.java b/apps/syndie/java/src/net/i2p/syndie/sml/RSSRenderer.java deleted file mode 100644 index 8e99a50bf..000000000 --- a/apps/syndie/java/src/net/i2p/syndie/sml/RSSRenderer.java +++ /dev/null @@ -1,341 +0,0 @@ -package net.i2p.syndie.sml; - -import java.io.IOException; -import java.io.Writer; -import java.text.SimpleDateFormat; -import java.util.Date; -import java.util.List; - -import net.i2p.I2PAppContext; -import net.i2p.client.naming.PetName; -import net.i2p.data.Base64; -import net.i2p.data.Hash; -import net.i2p.syndie.Archive; -import net.i2p.syndie.User; -import net.i2p.syndie.data.Attachment; -import net.i2p.syndie.data.BlogInfo; -import net.i2p.syndie.data.EntryContainer; - -/** - * - */ -public class RSSRenderer extends HTMLRenderer { - - public RSSRenderer(I2PAppContext ctx) { - super(ctx); - } - - private static final boolean RSS_EXCERPT_ONLY = false; - - public void render(User user, Archive archive, EntryContainer entry, String urlPrefix, Writer out) throws IOException { - if (entry == null) return; - prepare(user, archive, entry, entry.getEntry().getText(), out, RSS_EXCERPT_ONLY, false); - BlogInfo info = archive.getBlogInfo(entry.getURI()); - - out.write(" \n"); - String subject = sanitizeXML(sanitizeString((String)_headers.get(HEADER_SUBJECT))); - if ( (subject == null) || (subject.length() <= 0) ) - subject = "not specified"; - out.write(" " + subject + "\n"); - out.write(" " + urlPrefix + BlogRenderer.getEntryURL(entry, info, true) + "\n"); - out.write(" syndie://" + entry.getURI().toString() + "\n"); - out.write(" " + getRFC822Date(entry.getURI().getEntryId()) + "\n"); - PetName pn = user.getPetNameDB().getByLocation(entry.getURI().getKeyHash().toBase64()); - String author = null; - if (pn != null) - author = pn.getName(); - if (author == null) { - if (info != null) - author = info.getProperty(BlogInfo.NAME); - } - if (author == null) - author = entry.getURI().getKeyHash().toBase64(); - out.write(" " + sanitizeXML(sanitizeString(author)) + "@syndie.invalid\n"); - String tags[] = entry.getTags(); - if (tags != null) - for (int i = 0; i < tags.length; i++) - out.write(" " + sanitizeXML(sanitizeString(tags[i])) + "\n"); - - out.write(" " + sanitizeXML(_bodyBuffer.toString()) + "\n"); - - renderEnclosures(user, entry, urlPrefix, out); - - out.write(" \n"); - } - - - public void receiveBold(String text) { - if (!continueBody()) { return; } - _bodyBuffer.append(sanitizeString(text)); - } - public void receiveItalic(String text) { - if (!continueBody()) { return; } - _bodyBuffer.append(sanitizeString(text)); - } - public void receiveUnderline(String text) { - if (!continueBody()) { return; } - _bodyBuffer.append(sanitizeString(text)); - } - public void receiveHR() { - if (!continueBody()) { return; } - } - public void receiveH1(String body) { - if (!continueBody()) { return; } - _bodyBuffer.append(sanitizeString(body)); - } - public void receiveH2(String body) { - if (!continueBody()) { return; } - _bodyBuffer.append(sanitizeString(body)); - } - public void receiveH3(String body) { - if (!continueBody()) { return; } - _bodyBuffer.append(sanitizeString(body)); - } - public void receiveH4(String body) { - if (!continueBody()) { return; } - _bodyBuffer.append(sanitizeString(body)); - } - public void receiveH5(String body) { - if (!continueBody()) { return; } - _bodyBuffer.append(sanitizeString(body)); - } - public void receivePre(String body) { - if (!continueBody()) { return; } - _bodyBuffer.append(sanitizeString(body)); - } - - public void receiveQuote(String text, String whoQuoted, String quoteLocationSchema, String quoteLocation) { - if (!continueBody()) { return; } - _bodyBuffer.append(sanitizeString(text)); - } - public void receiveCode(String text, String codeLocationSchema, String codeLocation) { - if (!continueBody()) { return; } - _bodyBuffer.append(sanitizeString(text)); - } - public void receiveImage(String alternateText, int attachmentId) { - if (!continueBody()) { return; } - _bodyBuffer.append(sanitizeString(alternateText)); - } - public void receiveCut(String summaryText) { - if (!continueBody()) { return; } - _cutReached = true; - if (_cutBody) { - if ( (summaryText != null) && (summaryText.length() > 0) ) - _bodyBuffer.append(sanitizeString(summaryText)); - else - _bodyBuffer.append("more inside..."); - } else { - if (summaryText != null) - _bodyBuffer.append(sanitizeString(summaryText)); - } - } - /** are we either before the cut or rendering without cutting? */ - protected boolean continueBody() { - boolean rv = ( (!_cutReached) && (_bodyBuffer.length() <= _cutSize) ) || (!_cutBody); - //if (!rv) - // System.out.println("rv: " + rv + " Cut reached: " + _cutReached + " bodyBufferSize: " + _bodyBuffer.length() + " cutBody? " + _cutBody); - if (!rv && !_cutReached) { - // exceeded the allowed size - _bodyBuffer.append("more inside..."); - _cutReached = true; - } - return rv; - } - public void receiveNewline() { - if (!continueBody()) { return; } - if (true || (_lastNewlineAt >= _bodyBuffer.length())) - _bodyBuffer.append("\n"); - else - _lastNewlineAt = _bodyBuffer.length(); - } - public void receiveBlog(String name, String hash, String tag, long entryId, List locations, String description) { - byte blogData[] = Base64.decode(hash); - if ( (blogData == null) || (blogData.length != Hash.HASH_LENGTH) ) - return; - - Blog b = new Blog(); - b.name = name; - b.hash = hash; - b.tag = tag; - b.entryId = entryId; - b.locations = locations; - if (!_blogs.contains(b)) - _blogs.add(b); - - if (!continueBody()) { return; } - if (hash == null) return; - - Hash blog = new Hash(blogData); - if ( (description != null) && (description.trim().length() > 0) ) { - _bodyBuffer.append(sanitizeString(description)); - } else if ( (name != null) && (name.trim().length() > 0) ) { - _bodyBuffer.append(sanitizeTagParam(name)); - } else { - _bodyBuffer.append("[view entry]"); - } - } - public void receiveArchive(String name, String description, String locationSchema, String location, - String postingKey, String anchorText) { - ArchiveRef a = new ArchiveRef(); - a.name = name; - a.description = description; - a.locationSchema = locationSchema; - a.location = location; - if (!_archives.contains(a)) - _archives.add(a); - - if (!continueBody()) { return; } - - _bodyBuffer.append(sanitizeString(anchorText)); - } - public void receiveLink(String schema, String location, String text) { - Link l = new Link(); - l.schema = schema; - l.location = location; - if (!_links.contains(l)) - _links.add(l); - if (!continueBody()) { return; } - if ( (schema == null) || (location == null) ) return; - _bodyBuffer.append(sanitizeString(text)); - } - public void receiveAddress(String name, String schema, String protocol, String location, String anchorText) { - Address a = new Address(); - a.name = name; - a.schema = schema; - a.location = location; - a.protocol = protocol; - if (!_addresses.contains(a)) - _addresses.add(a); - if (!continueBody()) { return; } - if ( (schema == null) || (location == null) ) return; - PetName pn = null; - if (_user != null) - pn = _user.getPetNameDB().getByLocation(location); - if (pn != null) { - _bodyBuffer.append(sanitizeString(anchorText)); - } else { - _bodyBuffer.append(sanitizeString(anchorText)); - } - } - public void receiveAttachment(int id, int thumb, String anchorText) { - if (!continueBody()) { return; } - _bodyBuffer.append(sanitizeString(anchorText)); - } - - // Mon, 03 Jun 2005 13:04:11 +0000 - private static final SimpleDateFormat _rfc822Date = new SimpleDateFormat("EEE, dd MMM yyyy HH:mm:ss Z"); - private static final String getRFC822Date(long when) { - synchronized (_rfc822Date) { - return _rfc822Date.format(new Date(when)); - } - } - - private void renderEnclosures(User user, EntryContainer entry, String urlPrefix, Writer out) throws IOException { - int included = 0; - if (entry.getAttachments() != null) { - for (int i = 0; i < _entry.getAttachments().length; i++) { - Attachment a = _entry.getAttachments()[i]; - String url = urlPrefix + sanitizeXML(getAttachmentURL(i)) - + "#" + sanitizeTagParam(a.getName()); // tacked on for readability - out.write(" "); - // we can do neat stuff with Media RSS (http://search.yahoo.com/mrss) here, such as - // include descriptions, titles, keywords, thumbnails, etc - out.write(" \n"); - - if (included == 0) // plain RSS enclosures can only have one enclosure per entry, unlike Media RSS - out.write(" \n"); - included++; - } - } - - /* - if (_blogs.size() > 0) { - for (int i = 0; i < _blogs.size(); i++) { - Blog b = (Blog)_blogs.get(i); - out.write(" \n"); - } - } - - if (_links.size() > 0) { - for (int i = 0; i < _links.size(); i++) { - Link l = (Link)_links.get(i); - StringBuffer url = new StringBuffer(128); - url.append("externallink.jsp?schema="); - url.append(sanitizeURL(l.schema)).append("&location="); - url.append(sanitizeURL(l.location)); - out.write(" \n"); - } - } - - if (_addresses.size() > 0) { - for (int i = 0; i < _addresses.size(); i++) { - Address a = (Address)_addresses.get(i); - - PetName pn = null; - if (_user != null) - pn = _user.getPetNameDB().getByLocation(a.location); - if (pn == null) { - StringBuffer url = new StringBuffer(128); - url.append("addresses.jsp?").append(AddressesServlet.PARAM_NAME).append('='); - url.append(sanitizeTagParam(a.schema)).append("&").append(AddressesServlet.PARAM_LOC).append("="); - url.append(sanitizeTagParam(a.location)).append("&").append(AddressesServlet.PARAM_NAME).append("="); - url.append(sanitizeTagParam(a.name)).append("&").append(AddressesServlet.PARAM_PROTO).append("="); - url.append(sanitizeTagParam(a.protocol)); - out.write(" \n"); - } - } - } - - if (_archives.size() > 0) { - for (int i = 0; i < _archives.size(); i++) { - ArchiveRef a = (ArchiveRef)_archives.get(i); - String url = getArchiveURL(null, new SafeURL(a.locationSchema + "://" + a.location)); - out.write(" \n"); - } - } - - if (_entry != null) { - List replies = _archive.getIndex().getReplies(_entry.getURI()); - if ( (replies != null) && (replies.size() > 0) ) { - for (int i = 0; i < replies.size(); i++) { - BlogURI reply = (BlogURI)replies.get(i); - String url = getPageURL(reply.getKeyHash(), null, reply.getEntryId(), -1, -1, true, _user.getShowImages()); - out.write(" \n"); - } - } - } - - String inReplyTo = (String)_headers.get(HEADER_IN_REPLY_TO); - if ( (inReplyTo != null) && (inReplyTo.trim().length() > 0) ) { - String url = getPageURL(sanitizeTagParam(inReplyTo)); - out.write(" \n"); - } - */ - } - - public void receiveHeaderEnd() {} - public void receiveEnd() {} - - public static void main(String args[]) { - test(""); - test("&"); - test("a&"); - test("&a"); - test("a&a"); - test("aa&aa"); - } - private static final void test(String str) { - StringBuffer t = new StringBuffer(str); - String sanitized = sanitizeXML(t); - System.out.println("[" + str + "] --> [" + sanitized + "]"); - } -} diff --git a/apps/syndie/java/src/net/i2p/syndie/sml/SMLParser.java b/apps/syndie/java/src/net/i2p/syndie/sml/SMLParser.java deleted file mode 100644 index fd9d64387..000000000 --- a/apps/syndie/java/src/net/i2p/syndie/sml/SMLParser.java +++ /dev/null @@ -1,472 +0,0 @@ -package net.i2p.syndie.sml; - -import java.util.ArrayList; -import java.util.HashMap; -import java.util.List; -import java.util.Map; - -import net.i2p.I2PAppContext; -import net.i2p.syndie.data.SafeURL; -import net.i2p.util.Log; - -/** - * Parse out the SML from the text, firing off info to the receiver whenever certain - * elements are available. This is a very simple parser, with no support for nested - * tags. A simple stack would be good to add, but DTSTTCPW. - * - * - */ -public class SMLParser { - private Log _log; - private static final char TAG_BEGIN = '['; - private static final char TAG_END = ']'; - private static final char LT = '<'; - private static final char GT = '>'; - private static final char EQ = '='; - private static final char DQUOTE = '"'; - private static final char QUOTE = '\''; - private static final String WHITESPACE = " \t\n\r"; - private static final char NL = '\n'; - private static final char CR = '\n'; - private static final char LF = '\f'; - - public SMLParser(I2PAppContext ctx) { - _log = ctx.logManager().getLog(SMLParser.class); - } - - public void parse(String rawSML, EventReceiver receiver) { - receiver.receiveBegin(); - int off = 0; - off = parseHeaders(rawSML, off, receiver); - receiver.receiveHeaderEnd(); - parseBody(rawSML, off, receiver); - receiver.receiveEnd(); - } - - private int parseHeaders(String rawSML, int off, EventReceiver receiver) { - if (rawSML == null) return off; - int len = rawSML.length(); - if (len == off) return off; - int keyBegin = off; - int valBegin = -1; - while (off < len) { - char c = rawSML.charAt(off); - if ( (c == ':') && (valBegin < 0) ) { - // moving on to the value - valBegin = off + 1; - } else if (c == '\n') { - if (valBegin < 0) { - // end of the headers - off++; - break; - } else { - String key = rawSML.substring(keyBegin, valBegin-1); - String val = rawSML.substring(valBegin, off); - receiver.receiveHeader(key.trim(), val.trim()); - valBegin = -1; - keyBegin = off + 1; - } - } - off++; - } - if ( (off >= len) && (valBegin > 0) ) { - String key = rawSML.substring(keyBegin, valBegin-1); - String val = rawSML.substring(valBegin, len); - receiver.receiveHeader(key.trim(), val.trim()); - } - return off; - } - - private void parseBody(String rawSMLBody, int off, EventReceiver receiver) { - if (rawSMLBody == null) return; - int begin = off; - int len = rawSMLBody.length(); - if (len <= off) return; - int openTagBegin = -1; - int openTagEnd = -1; - int closeTagBegin = -1; - int closeTagEnd = -1; - while (off < len) { - char c = rawSMLBody.charAt(off); - if ( (c == NL) || (c == CR) || (c == LF) ) { - // we only deal with newlines outside of a tag, since this is a ghetto parser - // without a stack, and the tag event is fired only when the tag is completed. - if (openTagBegin < 0) { - if (begin < off) - receiver.receivePlain(rawSMLBody.substring(begin, off)); - receiver.receiveNewline(); - off++; - begin = off; - continue; - } - } else if (c == TAG_BEGIN) { - if ( (off + 1 < len) && (TAG_BEGIN == rawSMLBody.charAt(off+1))) { - if (begin < off) - receiver.receivePlain(rawSMLBody.substring(begin, off)); - receiver.receiveLeftBracket(); - off += 2; - begin = off; - continue; - } else if (openTagBegin < 0) { - // push everything seen and not accounted for into a plain area - if (closeTagEnd < 0) { - if (begin < off) - receiver.receivePlain(rawSMLBody.substring(begin, off)); - } else { - if (closeTagEnd + 1 < off) - receiver.receivePlain(rawSMLBody.substring(closeTagEnd+1, off)); - } - openTagBegin = off; - closeTagBegin = -1; - begin = off + 1; - } else { - // ok, we are at the end of the tag, process it - closeTagBegin = off; - while ( (c != TAG_END) && (off < len) ) { - off++; - c = rawSMLBody.charAt(off); - } - parseTag(rawSMLBody, openTagBegin, openTagEnd, closeTagBegin, off, receiver); - begin = off + 1; - openTagBegin = -1; - openTagEnd = -1; - closeTagBegin = -1; - closeTagEnd = -1; - } - } else if (c == TAG_END) { - if ( (openTagBegin > 0) && (closeTagBegin < 0) ) { - openTagEnd = off; - } else if ( (off + 1 < len) && (TAG_END == rawSMLBody.charAt(off+1))) { - if (begin < off) - receiver.receivePlain(rawSMLBody.substring(begin, off)); - receiver.receiveRightBracket(); - off += 2; - begin = off; - continue; - } - } else if (c == LT) { - // see above re: newlines inside tags for why we check openTagBegin<0 - if (openTagBegin < 0) { - if (begin < off) - receiver.receivePlain(rawSMLBody.substring(begin, off)); - receiver.receiveLT(); - off++; - begin = off; - continue; - } - } else if (c == GT) { - // see above re: newlines inside tags for why we check openTagBegin<0 - if (openTagBegin < 0) { - if (begin < off) - receiver.receivePlain(rawSMLBody.substring(begin, off)); - receiver.receiveGT(); - off++; - begin = off; - continue; - } - } - - off++; - } - if ( (off >= len) && (openTagBegin < 0) ) { - if (closeTagEnd < 0) { - if (begin < off) - receiver.receivePlain(rawSMLBody.substring(begin, off)); - } else { - if (closeTagEnd + 1 < off) - receiver.receivePlain(rawSMLBody.substring(closeTagEnd+1, off)); - } - } - } - - private void parseTag(String source, int openTagBegin, int openTagEnd, int closeTagBegin, int closeTagEnd, EventReceiver receiver) { - String tagName = getTagName(source, openTagBegin+1); - Map attributes = getAttributes(source, openTagBegin+1+tagName.length(), openTagEnd); - String body = null; - if (openTagEnd + 1 >= closeTagBegin) - body = ""; - else - body = source.substring(openTagEnd+1, closeTagBegin); - - //System.out.println("Receiving tag [" + tagName + "] w/ open [" + source.substring(openTagBegin+1, openTagEnd) - // + "], close [" + source.substring(closeTagBegin+1, closeTagEnd) + "] body [" - // + body + "] attributes: " + attributes); - parseTag(tagName, attributes, body, receiver); - } - - private static final String T_BOLD = "b"; - private static final String T_ITALIC = "i"; - private static final String T_UNDERLINE = "u"; - private static final String T_CUT = "cut"; - private static final String T_IMAGE = "img"; - private static final String T_QUOTE = "quote"; - private static final String T_CODE = "code"; - private static final String T_BLOG = "blog"; - private static final String T_LINK = "link"; - private static final String T_ADDRESS = "address"; - private static final String T_H1 = "h1"; - private static final String T_H2 = "h2"; - private static final String T_H3 = "h3"; - private static final String T_H4 = "h4"; - private static final String T_H5 = "h5"; - private static final String T_HR = "hr"; - private static final String T_PRE = "pre"; - private static final String T_ATTACHMENT = "attachment"; - private static final String T_ARCHIVE = "archive"; - - private static final String P_THUMBNAIL = "thumbnail"; - private static final String P_ATTACHMENT = "attachment"; - private static final String P_WHO_QUOTED = "author"; - private static final String P_QUOTE_LOCATION = "location"; - private static final String P_CODE_LOCATION = "location"; - private static final String P_BLOG_NAME = "name"; - private static final String P_BLOG_HASH = "bloghash"; - private static final String P_BLOG_TAG = "blogtag"; - private static final String P_BLOG_ENTRY = "blogentry"; - private static final String P_LINK_LOCATION = "location"; - private static final String P_LINK_SCHEMA = "schema"; - private static final String P_ADDRESS_NAME = "name"; - private static final String P_ADDRESS_LOCATION = "location"; - private static final String P_ADDRESS_SCHEMA = "schema"; - private static final String P_ADDRESS_PROTOCOL = "proto"; - private static final String P_ATTACHMENT_ID = "id"; - private static final String P_ARCHIVE_NAME = "name"; - private static final String P_ARCHIVE_DESCRIPTION = "description"; - private static final String P_ARCHIVE_LOCATION_SCHEMA = "schema"; - private static final String P_ARCHIVE_LOCATION = "location"; - private static final String P_ARCHIVE_POSTING_KEY = "postingkey"; - - private void parseTag(String tagName, Map attr, String body, EventReceiver receiver) { - tagName = tagName.toLowerCase(); - if (T_BOLD.equals(tagName)) { - receiver.receiveBold(body); - } else if (T_ITALIC.equals(tagName)) { - receiver.receiveItalic(body); - } else if (T_UNDERLINE.equals(tagName)) { - receiver.receiveUnderline(body); - } else if (T_CUT.equals(tagName)) { - receiver.receiveCut(body); - } else if (T_IMAGE.equals(tagName)) { - receiver.receiveImage(body, getInt(P_ATTACHMENT, attr)); - } else if (T_QUOTE.equals(tagName)) { - receiver.receiveQuote(body, getString(P_WHO_QUOTED, attr), getSchema(P_QUOTE_LOCATION, attr), getLocation(P_QUOTE_LOCATION, attr)); - } else if (T_CODE.equals(tagName)) { - receiver.receiveCode(body, getSchema(P_CODE_LOCATION, attr), getLocation(P_CODE_LOCATION, attr)); - } else if (T_BLOG.equals(tagName)) { - List locations = new ArrayList(); - int i = 0; - while (true) { - String s = getString("archive" + i, attr); - if (s != null) - locations.add(new SafeURL(s)); - else - break; - i++; - } - receiver.receiveBlog(getString(P_BLOG_NAME, attr), getString(P_BLOG_HASH, attr), getString(P_BLOG_TAG, attr), - getLong(P_BLOG_ENTRY, attr), locations, body); - } else if (T_ARCHIVE.equals(tagName)) { - receiver.receiveArchive(getString(P_ARCHIVE_NAME, attr), getString(P_ARCHIVE_DESCRIPTION, attr), - getString(P_ARCHIVE_LOCATION_SCHEMA, attr), getString(P_ARCHIVE_LOCATION, attr), - getString(P_ARCHIVE_POSTING_KEY, attr), body); - } else if (T_LINK.equals(tagName)) { - receiver.receiveLink(getString(P_LINK_SCHEMA, attr), getString(P_LINK_LOCATION, attr), body); - } else if (T_ADDRESS.equals(tagName)) { - receiver.receiveAddress(getString(P_ADDRESS_NAME, attr), getString(P_ADDRESS_SCHEMA, attr), getString(P_ADDRESS_PROTOCOL, attr), getString(P_ADDRESS_LOCATION, attr), body); - } else if (T_H1.equals(tagName)) { - receiver.receiveH1(body); - } else if (T_H2.equals(tagName)) { - receiver.receiveH2(body); - } else if (T_H3.equals(tagName)) { - receiver.receiveH3(body); - } else if (T_H4.equals(tagName)) { - receiver.receiveH4(body); - } else if (T_H5.equals(tagName)) { - receiver.receiveH5(body); - } else if (T_HR.equals(tagName)) { - receiver.receiveHR(); - } else if (T_PRE.equals(tagName)) { - receiver.receivePre(body); - } else if (T_ATTACHMENT.equals(tagName)) { - receiver.receiveAttachment( - (int)getLong(P_ATTACHMENT_ID, attr), - (int)getLong(P_THUMBNAIL, attr), - body); - } else { - if (_log.shouldLog(Log.WARN)) - _log.warn("need to learn how to parse the tag [" + tagName + "]"); - } - } - - private String getString(String param, Map attributes) { return (String)attributes.get(param); } - private String getSchema(String param, Map attributes) { - String url = getString(param, attributes); - if (url != null) { - SafeURL u = new SafeURL(url); - return u.getSchema(); - } else { - return null; - } - } - - private String getLocation(String param, Map attributes) { - String url = getString(param, attributes); - if (url != null) { - SafeURL u = new SafeURL(url); - return u.getLocation(); - } else { - return null; - } - } - - private int getInt(String attributeName, Map attributes) { - String val = (String)attributes.get(attributeName.toLowerCase()); - if (val != null) { - try { - return Integer.parseInt(val.trim()); - } catch (NumberFormatException nfe) { - //nfe.printStackTrace(); - return -1; - } - } else { - return -1; - } - } - - private long getLong(String attributeName, Map attributes) { - String val = (String)attributes.get(attributeName.toLowerCase()); - if (val != null) { - try { - return Long.parseLong(val.trim()); - } catch (NumberFormatException nfe) { - //nfe.printStackTrace(); - return -1; - } - } else { - return -1; - } - } - - private String getTagName(String source, int nameStart) { - int off = nameStart; - while (true) { - char c = source.charAt(off); - if ( (c == TAG_END) || (WHITESPACE.indexOf(c) >= 0) ) - return source.substring(nameStart, off); - off++; - } - } - private Map getAttributes(String source, int attributesStart, int openTagEnd) { - Map rv = new HashMap(); - int off = attributesStart; - int nameStart = -1; - int nameEnd = -1; - int valStart = -1; - int valEnd = -1; - while (true) { - char c = source.charAt(off); - if ( (c == TAG_END) || (off >= openTagEnd) ) - break; - if (WHITESPACE.indexOf(c) < 0) { - if (nameStart < 0) { - nameStart = off; - } else if (c == EQ) { - if (nameEnd < 0) - nameEnd = off; - } else if ( c == DQUOTE ) { - if (valStart < 0) { - valStart = off; - } else { - valEnd = off; - - if ( ( nameStart >= 0 ) && - ( nameEnd >= 0 ) && - ( valStart >= 0 ) && - ( valEnd >= 0 )) { - String name = source.substring(nameStart, nameEnd); - String val = source.substring(valStart+1, valEnd); - rv.put(name.trim(), val.trim()); - } - nameStart = -1; - nameEnd = -1; - valStart = -1; - valEnd = -1; - } - } - } - off++; - } - return rv; - } - - public interface EventReceiver { - public void receiveHeader(String header, String value); - public void receiveLink(String schema, String location, String text); - /** @param blogArchiveLocations list of SafeURL */ - public void receiveBlog(String name, String blogKeyHash, String blogPath, long blogEntryId, - List blogArchiveLocations, String anchorText); - public void receiveArchive(String name, String description, String locationSchema, String location, - String postingKey, String anchorText); - public void receiveImage(String alternateText, int attachmentId); - public void receiveAddress(String name, String schema, String protocol, String location, String anchorText); - public void receiveAttachment(int id, int thumb, String anchorText); - public void receiveBold(String text); - public void receiveItalic(String text); - public void receiveUnderline(String text); - public void receiveH1(String text); - public void receiveH2(String text); - public void receiveH3(String text); - public void receiveH4(String text); - public void receiveH5(String text); - public void receivePre(String text); - public void receiveHR(); - public void receiveQuote(String text, String whoQuoted, String quoteLocationSchema, String quoteLocation); - public void receiveCode(String text, String codeLocationSchema, String codeLocation); - public void receiveCut(String summaryText); - public void receivePlain(String text); - public void receiveNewline(); - public void receiveLT(); - public void receiveGT(); - public void receiveLeftBracket(); - public void receiveRightBracket(); - public void receiveBegin(); - public void receiveEnd(); - public void receiveHeaderEnd(); - } - - public static void main(String args[]) { - test(null); - test(""); - test("A: B"); - test("A: B\n"); - test("A: B\nC: D"); - test("A: B\nC: D\n"); - test("A: B\nC: D\n\n"); - - test("A: B\nC: D\n\nblah"); - test("A: B\nC: D\n\nblah[["); - test("A: B\nC: D\n\nblah]]"); - test("A: B\nC: D\n\nblah]]blah"); - test("A: B\nC: D\n\nfoo[a]b[/a]bar"); - test("A: B\nC: D\n\nfoo[a]b[/a]bar[b][/b]"); - test("A: B\nC: D\n\nfoo[a]b[/a]bar[b][/b]baz"); - - test("A: B\nC: D\n\nhi"); - - test("A: B\n\n[a b=\"c\"]d[/a]"); - test("A: B\n\n[a b=\"c\" d=\"e\" f=\"g\"]h[/a]"); - test("A: B\n\n[a b=\"c\" d=\"e\" f=\"g\"]h[/a][a b=\"c\" d=\"e\" f=\"g\"]h[/a][a b=\"c\" d=\"e\" f=\"g\"]h[/a]"); - - test("A: B\n\n[a b=\"plural c's\" ]d[/a]"); - test("A: B\n\n[a b=\"c\" ]d[/a]"); - - test("A: B\n\n[b]This[/b] is [i]special[/i][cut]why?[/cut][u]because I say so[/u].\neven if you dont care"); - test("A: B\n\nHi\n[pre]>foo&bar<>blah!blah\nblah\nblah[/pre]foo![pre]bar[/pre]"); - //(openTagEnd seems wrong) test("A: B\n\n[link schema=\"web\" location=\"http://w.i2p?i2paddr...\"] Try it [[i2p]] [/link]"); - } - private static void test(String rawSML) { - I2PAppContext ctx = I2PAppContext.getGlobalContext(); - SMLParser parser = new SMLParser(ctx); - parser.parse(rawSML, new EventReceiverImpl(ctx)); - } -} diff --git a/apps/syndie/java/src/net/i2p/syndie/sml/ThreadedHTMLRenderer.java b/apps/syndie/java/src/net/i2p/syndie/sml/ThreadedHTMLRenderer.java deleted file mode 100644 index 70137b6f6..000000000 --- a/apps/syndie/java/src/net/i2p/syndie/sml/ThreadedHTMLRenderer.java +++ /dev/null @@ -1,601 +0,0 @@ -package net.i2p.syndie.sml; - -import java.io.IOException; -import java.io.Writer; -import java.util.ArrayList; -import java.util.Collection; -import java.util.HashMap; -import java.util.Iterator; -import java.util.List; - -import net.i2p.I2PAppContext; -import net.i2p.client.naming.PetName; -import net.i2p.data.Base64; -import net.i2p.data.Hash; -import net.i2p.syndie.Archive; -import net.i2p.syndie.BlogManager; -import net.i2p.syndie.User; -import net.i2p.syndie.data.Attachment; -import net.i2p.syndie.data.BlogInfo; -import net.i2p.syndie.data.BlogURI; -import net.i2p.syndie.data.EntryContainer; -import net.i2p.syndie.data.SafeURL; -import net.i2p.syndie.data.ThreadIndex; -import net.i2p.syndie.data.ThreadNode; -import net.i2p.syndie.web.AddressesServlet; -import net.i2p.syndie.web.ArchiveViewerBean; -import net.i2p.syndie.web.BaseServlet; -import net.i2p.syndie.web.PostServlet; -import net.i2p.util.Log; - -/** - * - */ -public class ThreadedHTMLRenderer extends HTMLRenderer { - private Log _log; - private String _baseURI; - private boolean _inlineReply; - - public ThreadedHTMLRenderer(I2PAppContext ctx) { - super(ctx); - _log = ctx.logManager().getLog(ThreadedHTMLRenderer.class); - } - - /** what, if any, post should be rendered */ - public static final String PARAM_VIEW_POST = "post"; - /** what, if any, thread should be rendered in its entirety */ - public static final String PARAM_VIEW_THREAD = "thread"; - /** what post should be visible in the nav tree */ - public static final String PARAM_VISIBLE = "visible"; - public static final String PARAM_ADD_TO_GROUP_LOCATION = "addLocation"; - public static final String PARAM_ADD_TO_GROUP_NAME = "addGroup"; - /** name of the bookmarked entry to remove */ - public static final String PARAM_REMOVE_FROM_GROUP_NAME = "removeName"; - /** group to remove from the bookmarked entry, or if blank, remove the entry itself */ - public static final String PARAM_REMOVE_FROM_GROUP = "removeGroup"; - /** add the specified tag to the favorites list */ - public static final String PARAM_ADD_TAG = "addTag"; - /** index into the nav tree to start displaying */ - public static final String PARAM_OFFSET = "offset"; - public static final String PARAM_TAGS = "tags"; - /** only show threads that the given author participates in */ - public static final String PARAM_AUTHOR = "author"; - /** only show threads started by the given author */ - public static final String PARAM_THREAD_AUTHOR = "threadAuthorOnly"; - /** search back through the blog for entries this many days */ - public static final String PARAM_DAYS_BACK = "daysBack"; - // parameters for editing one's profile - public static final String PARAM_PROFILE_NAME = "profileName"; - public static final String PARAM_PROFILE_DESC = "profileDesc"; - public static final String PARAM_PROFILE_URL = "profileURL"; - public static final String PARAM_PROFILE_OTHER = "profileOther"; - - public static String getFilterByTagLink(String uri, ThreadNode node, User user, String tag, String author) { - StringBuffer buf = new StringBuffer(64); - buf.append(uri).append('?'); - if (node != null) { - buf.append(PARAM_VIEW_POST).append('='); - buf.append(node.getEntry().getKeyHash().toBase64()).append('/'); - buf.append(node.getEntry().getEntryId()).append('&'); - } - - if (!empty(tag)) - buf.append(PARAM_TAGS).append('=').append(tag).append('&'); - - if (!empty(author)) - buf.append(PARAM_AUTHOR).append('=').append(author).append('&'); - - return buf.toString(); - } - - public static String getAddTagToFavoritesLink(String uri, String tag, String author, String visible, String viewPost, - String viewThread, String offset) { - //protected String getAddToGroupLink(User user, Hash author, String group, String uri, String visible, - // String viewPost, String viewThread, String offset, String tags, String filteredAuthor) { - StringBuffer buf = new StringBuffer(64); - buf.append(uri); - buf.append('?'); - if (!empty(visible)) - buf.append(PARAM_VISIBLE).append('=').append(visible).append('&'); - buf.append(PARAM_ADD_TAG).append('=').append(sanitizeTagParam(tag)).append('&'); - - if (!empty(viewPost)) - buf.append(PARAM_VIEW_POST).append('=').append(viewPost).append('&'); - else if (!empty(viewThread)) - buf.append(PARAM_VIEW_THREAD).append('=').append(viewThread).append('&'); - - if (!empty(offset)) - buf.append(PARAM_OFFSET).append('=').append(offset).append('&'); - - if (!empty(author)) - buf.append(PARAM_AUTHOR).append('=').append(author).append('&'); - - BaseServlet.addAuthActionParams(buf); - return buf.toString(); - } - - public static String getNavLink(String uri, String viewPost, String viewThread, String tags, String author, boolean authorOnly, int offset) { - StringBuffer buf = new StringBuffer(64); - buf.append(uri); - buf.append('?'); - if (!empty(viewPost)) - buf.append(PARAM_VIEW_POST).append('=').append(viewPost).append('&'); - else if (!empty(viewThread)) - buf.append(PARAM_VIEW_THREAD).append('=').append(viewThread).append('&'); - - if (!empty(tags)) - buf.append(PARAM_TAGS).append('=').append(tags).append('&'); - - if (!empty(author)) { - buf.append(PARAM_AUTHOR).append('=').append(author).append('&'); - if (authorOnly) - buf.append(PARAM_THREAD_AUTHOR).append("=true&"); - } - - buf.append(PARAM_OFFSET).append('=').append(offset).append('&'); - - return buf.toString(); - } - - public static String getViewPostLink(String uri, ThreadNode node, User user, boolean isPermalink, - String offset, String tags, String author, boolean authorOnly) { - if (isPermalink) { - // link to the blog view of the original poster - BlogURI rootBlog = null; - ThreadNode parent = node; - while (parent != null) { - if (parent.getParent() != null) { - parent = parent.getParent(); - } else { - rootBlog = parent.getEntry(); - break; - } - } - BlogInfo root = BlogManager.instance().getArchive().getBlogInfo(rootBlog.getKeyHash()); - return BlogRenderer.getEntryURL(parent.getEntry(), root, node.getEntry(), true); - } else { - StringBuffer buf = new StringBuffer(64); - buf.append(uri); - if (node.getChildCount() > 0) { - buf.append('?').append(PARAM_VISIBLE).append('='); - ThreadNode child = node.getChild(0); - buf.append(child.getEntry().getKeyHash().toBase64()).append('/'); - buf.append(child.getEntry().getEntryId()).append('&'); - } else { - buf.append('?').append(PARAM_VISIBLE).append('='); - buf.append(node.getEntry().getKeyHash().toBase64()).append('/'); - buf.append(node.getEntry().getEntryId()).append('&'); - } - buf.append(PARAM_VIEW_POST).append('='); - buf.append(node.getEntry().getKeyHash().toBase64()).append('/'); - buf.append(node.getEntry().getEntryId()).append('&'); - - if (!isPermalink) { - if (!empty(offset)) - buf.append(PARAM_OFFSET).append('=').append(offset).append('&'); - if (!empty(tags)) - buf.append(PARAM_TAGS).append('=').append(tags).append('&'); - } - - if (authorOnly && !empty(author)) { - buf.append(PARAM_AUTHOR).append('=').append(author).append('&'); - buf.append(PARAM_THREAD_AUTHOR).append("=true&"); - } else if (!isPermalink && !empty(author)) - buf.append(PARAM_AUTHOR).append('=').append(author).append('&'); - - return buf.toString(); - } - } - - public static String getViewPostLink(String uri, BlogURI post, User user, boolean isPermalink, - String offset, String tags, String author, boolean authorOnly) { - StringBuffer buf = new StringBuffer(64); - buf.append(uri); - buf.append('?').append(PARAM_VISIBLE).append('='); - buf.append(post.getKeyHash().toBase64()).append('/'); - buf.append(post.getEntryId()).append('&'); - buf.append(PARAM_VIEW_POST).append('='); - buf.append(post.getKeyHash().toBase64()).append('/'); - buf.append(post.getEntryId()).append('&'); - - if (!isPermalink) { - if (!empty(offset)) - buf.append(PARAM_OFFSET).append('=').append(offset).append('&'); - if (!empty(tags)) - buf.append(PARAM_TAGS).append('=').append(tags).append('&'); - if (!empty(author)) { - buf.append(PARAM_AUTHOR).append('=').append(author).append('&'); - if (authorOnly) - buf.append(PARAM_THREAD_AUTHOR).append("=true&"); - } - } - - return buf.toString(); - } - - private static final boolean empty(String val) { return (val == null) || (val.trim().length() <= 0); } - - /** - * @param replyHiddenFields HTML of hidden input fields necessary for the reply form to be honored - */ - public void render(User user, Writer out, Archive archive, BlogURI post, - boolean inlineReply, ThreadIndex index, String baseURI, String replyHiddenFields, - String offset, String requestTags, String filteredAuthor, boolean authorOnly) throws IOException { - EntryContainer entry = archive.getEntry(post); - if (entry == null) return; - ThreadNode node = index.getNode(post); - if (node == null) { - _log.error("Post is not in the index: " + post.toString()); - return; - } - _entry = entry; - - _baseURI = baseURI; - _user = user; - _out = out; - _archive = archive; - _cutBody = false; - _showImages = true; - _inlineReply = inlineReply; - _headers = new HashMap(); - _bodyBuffer = new StringBuffer(1024); - _postBodyBuffer = new StringBuffer(1024); - _addresses = new ArrayList(); - _links = new ArrayList(); - _blogs = new ArrayList(); - _archives = new ArrayList(); - - _parser.parse(entry.getEntry().getText(), this); - - out.write("\n"); - out.write("\n"); - out.write("\n"); - - String subject = (String)_headers.get(HTMLRenderer.HEADER_SUBJECT); - if (subject == null) - subject = ""; - out.write(" \n"); - out.write("\n\n"); - out.write("\n"); - out.write("\n"); - out.write("\n"); - out.write("\n\n"); - out.write("\n"); - out.write("\n"); - out.write(_postBodyBuffer.toString()); -/* -"\n" + -" \n" + -" \n" + -" \n" + -"\n" + - */ - out.write("\n"); - if (inlineReply && user.getAuthenticated() ) { - String refuseReplies = (String)_headers.get(HTMLRenderer.HEADER_REFUSE_REPLIES); - // show the reply form if we are the author or replies have not been explicitly rejected - if ( (user.getBlog().equals(post.getKeyHash())) || - (refuseReplies == null) || (!Boolean.valueOf(refuseReplies).booleanValue()) ) { - out.write("\n"); - out.write("\n"); - out.write(replyHiddenFields); - out.write(""); - out.write(""); - out.write("\n"); - out.write("\n\n"); - out.write("\n"); - out.write("\n"); - out.write("\n"); - out.write("\n"); - out.write(" \n\n\n"); - out.write("\n"); - } - } - out.write("\n"); - } - - public void receiveEnd() { - _postBodyBuffer.append("\n"); - _postBodyBuffer.append(" \n"); - _postBodyBuffer.append(" \n"); - _postBodyBuffer.append(" \n"); - _postBodyBuffer.append("\n"); - } - - public void receiveHeaderEnd() { - //_preBodyBuffer.append("
      "); - String subject = (String)_headers.get(HEADER_SUBJECT); - if (subject == null) - subject = "[no subject]"; - _preBodyBuffer.append(getSpan("subjectText")).append(sanitizeString(subject)); - _preBodyBuffer.append("
      "); - } - - private void renderMetaPetname(PetName pn, BlogInfo info) { - if (info != null) { - _preBodyBuffer.append(""); - if (pn != null) { - _preBodyBuffer.append(getSpan("metaKnown")).append(sanitizeString(pn.getName())).append(""); - } else { - String nameStr = info.getProperty("Name"); - if (nameStr == null) - _preBodyBuffer.append(getSpan("metaUnknown")).append("[no name]"); - else - _preBodyBuffer.append(getSpan("metaUnknown")).append(sanitizeString(nameStr)).append(""); - } - _preBodyBuffer.append(""); - } else { - _preBodyBuffer.append(getSpan("metaUnknown")).append("[unknown blog]"); - } - } - - protected void renderMetaCell() { - String tags[] = (_entry != null ? _entry.getTags() : null); - _preBodyBuffer.append("\n"); - - PetName pn = null; - if ( (_entry != null) && (_user != null) ) - pn = _user.getPetNameDB().getByLocation(_entry.getURI().getKeyHash().toBase64()); - //if (knownName != null) - // _preBodyBuffer.append("Pet name: ").append(sanitizeString(knownName)).append(" "); - - BlogInfo info = null; - if (_entry != null) - info = _archive.getBlogInfo(_entry.getURI()); - renderMetaPetname(pn, info); - - if ( (_user != null) && (_user.getAuthenticated()) && (_entry != null) ) { - if ( (pn == null) || (!pn.isMember("Favorites")) ) - _preBodyBuffer.append(" "); - if ( (pn == null) || (!pn.isMember("Ignore")) ) - _preBodyBuffer.append(" "); - else - _preBodyBuffer.append(" "); - _preBodyBuffer.append(" "); - if (info != null) - _preBodyBuffer.append(" "); - } - - - if ( (tags != null) && (tags.length > 0) ) { - _preBodyBuffer.append(getSpan("metaTags")).append(" Tags: "); - _preBodyBuffer.append(""); - _preBodyBuffer.append("\n"); - //_preBodyBuffer.append(""); - } - _preBodyBuffer.append(" "); - /* - String inReplyTo = (String)_headers.get(HEADER_IN_REPLY_TO); - if ( (inReplyTo != null) && (inReplyTo.trim().length() > 0) ) - _preBodyBuffer.append(" In reply to\n"); - */ - - _preBodyBuffer.append(getSpan("metaDate")); - if (_entry != null) - _preBodyBuffer.append(getEntryDate(_entry.getURI().getEntryId())); - else - _preBodyBuffer.append(getEntryDate(new Date().getTime())); - _preBodyBuffer.append(""); - - if ( (_user != null) && (_user.getAuthenticated()) ) { - _preBodyBuffer.append(" 0) ) - for (int i = 0; i < tags.length; i++) - tagStr.append(tags[i]).append('\t'); - String replyURL = getPostURL(_user.getBlog(), true, subject, tagStr.toString()); - _preBodyBuffer.append(" href=\"").append(replyURL).append("\">Reply\n"); - } - _preBodyBuffer.append("\n
      "); - out.write(subject); - out.write("
      \n"); - out.write(""); - - String author = null; - PetName pn = user.getPetNameDB().getByLocation(post.getKeyHash().toBase64()); - if (pn == null) { - BlogInfo info = archive.getBlogInfo(post.getKeyHash()); - if (info != null) - author = info.getProperty(BlogInfo.NAME); - } else { - author = pn.getName(); - } - if ( (author == null) || (author.trim().length() <= 0) ) - author = post.getKeyHash().toBase64().substring(0,6); - - out.write(author); - out.write(" @ "); - out.write(getEntryDate(post.getEntryId())); - - Collection tags = node.getTags(); - if ( (tags != null) && (tags.size() > 0) ) { - out.write("\nTags: \n"); - for (Iterator tagIter = tags.iterator(); tagIter.hasNext(); ) { - String tag = (String)tagIter.next(); - out.write(""); - out.write(" " + tag); - out.write("\n"); - if (user.getAuthenticated() && (!user.getFavoriteTags().contains(tag)) && (!"[none]".equals(tag)) ) { - out.write(""); - out.write("\":)\""); - out.write("\n"); - } - } - } - - out.write("\npermalink\n"); - - if (true || (!inlineReply) ) { - String refuseReply = (String)_headers.get(HEADER_REFUSE_REPLIES); - boolean allowReply = false; - if ( (refuseReply != null) && (Boolean.valueOf(refuseReply).booleanValue()) ) { - if (_entry == null ) - allowReply = false; - else if ( (_user == null) || (_user.getBlog() == null) ) - allowReply = false; - else if (_entry.getURI().getKeyHash().equals(_user.getBlog())) - allowReply = true; - else - allowReply = false; - } else { - allowReply = true; - } - if (allowReply && (_entry != null) ) { - out.write("Reply
      \n"); - } - } - - out.write("
      \n"); - out.write(_bodyBuffer.toString()); - out.write("
      \n" + -" External links:\n" + -" http://foo.i2p/\n" + -" http://bar.i2p/\n" + -"
      \n" + -" Attachments: \n" + -"
      Full thread\n" + -" Prev in thread \n" + -" Next in thread \n" + -"
      Reply: (SML reference)
      \n"); - out.write(" \n"); - out.write(" Tags: "); - BaseServlet.writeTagField(_user, "", out, "Optional tags to categorize your response", "No tags", false); - // \n"); - out.write(" in a new thread? \n"); - out.write(" refuse replies? \n"); - out.write(" attachment: \n"); - out.write("
      \n"); - - _postBodyBuffer.append("\n"); - _postBodyBuffer.append("\n"); - - //_postBodyBuffer.append("\n"); - - if ( (_entry != null) && (_entry.getAttachments() != null) && (_entry.getAttachments().length > 0) ) { - _postBodyBuffer.append(getSpan("summDetailAttachment")).append("Attachments: "); - _postBodyBuffer.append("\n"); - _postBodyBuffer.append("
      \n"); - } - - if (_blogs.size() > 0) { - _postBodyBuffer.append(getSpan("summDetailBlog")).append("Blog references:"); - for (int i = 0; i < _blogs.size(); i++) { - Blog b = (Blog)_blogs.get(i); - _postBodyBuffer.append("").append(sanitizeString(b.name, 30)).append(" "); - } - _postBodyBuffer.append("
      \n"); - } - - if (_links.size() > 0) { - _postBodyBuffer.append(getSpan("summDetailExternal")).append("External links: "); - for (int i = 0; i < _links.size(); i++) { - Link l = (Link)_links.get(i); - String schema = l.schema; - _postBodyBuffer.append("").append(sanitizeString(l.location, 30)); - _postBodyBuffer.append(getSpan("summDetailExternalNet")).append(" (").append(sanitizeString(l.schema)).append(") "); - } - _postBodyBuffer.append("
      \n"); - } - - if (_addresses.size() > 0) { - _postBodyBuffer.append(getSpan("summDetailAddr")).append("Addresses:"); - for (int i = 0; i < _addresses.size(); i++) { - Address a = (Address)_addresses.get(i); - importAddress(a); - PetName pn = null; - if (_user != null) - pn = _user.getPetNameDB().getByLocation(a.location); - if (pn != null) { - _postBodyBuffer.append(' ').append(getSpan("summDetailAddrKnown")); - _postBodyBuffer.append(sanitizeString(pn.getName())).append(""); - } else { - _postBodyBuffer.append(" ").append(sanitizeString(a.name, 30)).append(""); - } - } - _postBodyBuffer.append("
      \n"); - } - - if (_archives.size() > 0) { - _postBodyBuffer.append(getSpan("summDetailArchive")).append("Archives:"); - for (int i = 0; i < _archives.size(); i++) { - ArchiveRef a = (ArchiveRef)_archives.get(i); - _postBodyBuffer.append(" ").append(sanitizeString(a.name)).append(""); - if (a.description != null) - _postBodyBuffer.append(": ").append(getSpan("summDetailArchiveDesc")).append(sanitizeString(a.description)).append(""); - if (null == _user.getPetNameDB().getByLocation(a.location)) { - _postBodyBuffer.append(" bookmark it"); - } - } - _postBodyBuffer.append("
      \n"); - } - - if (_entry != null) { - List replies = _archive.getIndex().getReplies(_entry.getURI()); - if ( (replies != null) && (replies.size() > 0) ) { - _postBodyBuffer.append(getSpan("summDetailReplies")).append("Replies: "); - for (int i = 0; i < replies.size(); i++) { - BlogURI reply = (BlogURI)replies.get(i); - _postBodyBuffer.append(""); - _postBodyBuffer.append(getSpan("summDetailReplyAuthor")); - BlogInfo replyAuthor = _archive.getBlogInfo(reply); - if (replyAuthor != null) { - _postBodyBuffer.append(sanitizeString(replyAuthor.getProperty(BlogInfo.NAME))); - } else { - _postBodyBuffer.append(reply.getKeyHash().toBase64().substring(0,16)); - } - _postBodyBuffer.append(" on "); - _postBodyBuffer.append(getSpan("summDetailReplyDate")); - _postBodyBuffer.append(getEntryDate(reply.getEntryId())); - _postBodyBuffer.append(" "); - } - _postBodyBuffer.append("
      "); - } - } - - String inReplyTo = (String)_headers.get(HEADER_IN_REPLY_TO); - if ( (inReplyTo != null) && (inReplyTo.trim().length() > 0) ) { - BlogURI replyURI = new BlogURI(inReplyTo); - if (replyURI.getEntryId() > 0) { - _postBodyBuffer.append(" (view parent)
      \n"); - } - } - - _postBodyBuffer.append("
      \n"); - //renderSubjectCell(); - //renderMetaCell(); - //renderPreBodyCell(); - } - - public String getMetadataURL(Hash blog) { - return buildProfileURL(blog); - } - public static String buildProfileURL(Hash blog) { - if ( (blog != null) && (blog.getData() != null) ) - return "profile.jsp?" + ThreadedHTMLRenderer.PARAM_AUTHOR + "=" + - Base64.encode(blog.getData()); - else - return "profile.jsp"; - } - protected String getEntryURL() { return getEntryURL(_user != null ? _user.getShowImages() : false); } - protected String getEntryURL(boolean showImages) { - if (_entry == null) - return _baseURI; - else - return _baseURI + '?' + PARAM_VIEW_POST + '=' + - Base64.encode(_entry.getURI().getKeyHash().getData()) + '/' - + _entry.getURI().getEntryId() + '&'; - } - - public String getPageURL(User user, String selector, int numPerPage, int pageNum) { return _baseURI; } - - public String getPageURL(Hash blog, String tag, long entryId, String group, int numPerPage, int pageNum, boolean expandEntries, boolean showImages) { - StringBuffer buf = new StringBuffer(128); - buf.append(_baseURI).append('?'); - String entry = null; - if ( (blog != null) && (entryId > 0) ) { - entry = blog.toBase64() + '/' + entryId; - buf.append(PARAM_VIEW_THREAD).append('=').append(entry).append('&'); - buf.append(PARAM_VISIBLE).append('=').append(Base64.encode(blog.getData())).append('/').append(entryId).append('&'); - } else if (blog != null) { - buf.append(PARAM_AUTHOR).append('=').append(blog.toBase64()).append('&'); - } - if (tag != null) - buf.append(PARAM_TAGS).append('=').append(sanitizeTagParam(tag)).append('&'); - if ( (blog != null) && (entryId > 0) ) - buf.append("#blog://").append(entry); - return buf.toString(); - } -} diff --git a/apps/syndie/java/src/net/i2p/syndie/web/AddressesServlet.java b/apps/syndie/java/src/net/i2p/syndie/web/AddressesServlet.java deleted file mode 100644 index ff1a49ccc..000000000 --- a/apps/syndie/java/src/net/i2p/syndie/web/AddressesServlet.java +++ /dev/null @@ -1,504 +0,0 @@ -package net.i2p.syndie.web; - -import java.io.IOException; -import java.io.PrintWriter; -import java.util.Iterator; -import java.util.TreeSet; - -import javax.servlet.http.HttpServletRequest; - -import net.i2p.client.naming.PetName; -import net.i2p.client.naming.PetNameDB; -import net.i2p.syndie.Archive; -import net.i2p.syndie.BlogManager; -import net.i2p.syndie.User; -import net.i2p.syndie.data.BlogURI; -import net.i2p.syndie.data.FilteredThreadIndex; -import net.i2p.syndie.data.ThreadIndex; -import net.i2p.syndie.sml.ThreadedHTMLRenderer; - -/** - * Show the user's addressbook - * - */ -public class AddressesServlet extends BaseServlet { - public static final String PARAM_IS_PUBLIC = "addrPublic"; - public static final String PARAM_NAME = "addrName"; - public static final String PARAM_LOC = "addrLoc"; - public static final String PARAM_FAVORITE = "addrFavorite"; - public static final String PARAM_IGNORE = "addrIgnore"; - public static final String PARAM_NET = "addrNet"; - public static final String PARAM_PROTO = "addrProto"; - public static final String PARAM_SYNDICATE = "addrSyndicate"; - public static final String PARAM_TAG = "addrTag"; - public static final String PARAM_ACTION = "action"; - - public static final String PROTO_BLOG = "syndieblog"; - public static final String PROTO_ARCHIVE = "syndiearchive"; - public static final String PROTO_I2PHEX = "i2phex"; - public static final String PROTO_EEPSITE = "eep"; - public static final String PROTO_TAG = "syndietag"; - - public static final String NET_SYNDIE = "syndie"; - public static final String NET_I2P = "i2p"; - public static final String NET_IP = "ip"; - public static final String NET_FREENET = "freenet"; - public static final String NET_TOR = "tor"; - - public static final String ACTION_DELETE_BLOG = "Delete author"; - public static final String ACTION_UPDATE_BLOG = "Update author"; - public static final String ACTION_ADD_BLOG = "Add author"; - public static final String ACTION_PURGE_AND_BAN_BLOG = "Purge and ban author"; - - public static final String ACTION_DELETE_ARCHIVE = "Delete archive"; - public static final String ACTION_UPDATE_ARCHIVE = "Update archive"; - public static final String ACTION_ADD_ARCHIVE = "Add archive"; - - public static final String ACTION_DELETE_PEER = "Delete peer"; - public static final String ACTION_UPDATE_PEER = "Update peer"; - public static final String ACTION_ADD_PEER = "Add peer"; - - public static final String ACTION_DELETE_EEPSITE = "Delete eepsite"; - public static final String ACTION_UPDATE_EEPSITE = "Update eepsite"; - public static final String ACTION_ADD_EEPSITE = "Add eepsite"; - - public static final String ACTION_DELETE_TAG = "Delete tag"; - public static final String ACTION_UPDATE_TAG = "Update tag"; - public static final String ACTION_ADD_TAG = "Add tag"; - - public static final String ACTION_DELETE_OTHER = "Delete address"; - public static final String ACTION_UPDATE_OTHER = "Update address"; - public static final String ACTION_ADD_OTHER = "Add other address"; - - protected void renderServletDetails(User user, HttpServletRequest req, PrintWriter out, ThreadIndex index, - int threadOffset, BlogURI visibleEntry, Archive archive) throws IOException { - if (!user.getAuthenticated()) { - out.write("\n"); - } else { - PetNameDB db = user.getPetNameDB(); - String uri = req.getRequestURI(); - - PetName pn = buildNewName(req, PROTO_BLOG); - _log.debug("pn for protoBlog [" + req.getParameter(PARAM_PROTO) + "]: " + pn); - renderBlogs(user, db, uri, pn, out); - pn = buildNewName(req, PROTO_ARCHIVE); - _log.debug("pn for protoArchive [" + req.getParameter(PARAM_PROTO) + "]: " + pn); - renderArchives(user, db, uri, pn, out); - pn = buildNewName(req, PROTO_TAG); - _log.debug("pn for protoTag [" + req.getParameter(PARAM_TAG) + "]: " + pn); - renderTags(user, db, uri, pn, out); - pn = buildNewName(req, PROTO_I2PHEX); - _log.debug("pn for protoPhex [" + req.getParameter(PARAM_PROTO) + "]: " + pn); - renderI2Phex(user, db, uri, pn, out); - pn = buildNewName(req, PROTO_EEPSITE); - _log.debug("pn for protoEep [" + req.getParameter(PARAM_PROTO) + "]: " + pn); - renderEepsites(user, db, uri, pn, out); - pn = buildNewName(req); - _log.debug("pn for proto other [" + req.getParameter(PARAM_PROTO) + "]: " + pn); - renderOther(user, db, uri, pn, out); - } - } - - private void renderBlogs(User user, PetNameDB db, String baseURI, PetName newName, PrintWriter out) throws IOException { - TreeSet names = new TreeSet(); - for (Iterator iter = db.getNames().iterator(); iter.hasNext(); ) { - String name = (String)iter.next(); - PetName pn = db.getByName(name); - if (PROTO_BLOG.equals(pn.getProtocol())) - names.add(name); - } - out.write("\n"); - for (Iterator iter = names.iterator(); iter.hasNext(); ) { - PetName pn = db.getByName((String)iter.next()); - out.write(""); - out.write(""); - out.write(""); - writeAuthActionFields(out); - out.write("\n"); - out.write("\n"); - - out.write("\n"); - } - - private void renderArchives(User user, PetNameDB db, String baseURI, PetName newName, PrintWriter out) throws IOException { - TreeSet names = new TreeSet(); - for (Iterator iter = db.getNames().iterator(); iter.hasNext(); ) { - String name = (String)iter.next(); - PetName pn = db.getByName(name); - if (PROTO_ARCHIVE.equals(pn.getProtocol())) - names.add(name); - } - out.write("\n"); - for (Iterator iter = names.iterator(); iter.hasNext(); ) { - PetName pn = db.getByName((String)iter.next()); - out.write(""); - writeAuthActionFields(out); - out.write(""); - out.write(""); - out.write("\n"); - out.write("\n"); - } - - out.write(""); - writeAuthActionFields(out); - out.write(""); - out.write(""); - out.write("\n"); - out.write("\n"); - - out.write("\n"); - } - - private void renderI2Phex(User user, PetNameDB db, String baseURI, PetName newName, PrintWriter out) throws IOException { - TreeSet names = new TreeSet(); - for (Iterator iter = db.getNames().iterator(); iter.hasNext(); ) { - String name = (String)iter.next(); - PetName pn = db.getByName(name); - if (PROTO_I2PHEX.equals(pn.getProtocol())) - names.add(name); - } - out.write("\n"); - - for (Iterator iter = names.iterator(); iter.hasNext(); ) { - PetName pn = db.getByName((String)iter.next()); - out.write(""); - writeAuthActionFields(out); - out.write(""); - out.write(""); - out.write("\n"); - out.write("\n"); - } - - out.write(""); - writeAuthActionFields(out); - out.write(""); - out.write(""); - out.write("\n"); - out.write("\n"); - - out.write("\n"); - } - private void renderEepsites(User user, PetNameDB db, String baseURI, PetName newName, PrintWriter out) throws IOException { - TreeSet names = new TreeSet(); - for (Iterator iter = db.getNames().iterator(); iter.hasNext(); ) { - String name = (String)iter.next(); - PetName pn = db.getByName(name); - if (PROTO_EEPSITE.equals(pn.getProtocol())) - names.add(name); - } - out.write("\n"); - - for (Iterator iter = names.iterator(); iter.hasNext(); ) { - PetName pn = db.getByName((String)iter.next()); - out.write(""); - writeAuthActionFields(out); - out.write(""); - out.write(""); - out.write("\n"); - out.write("\n"); - } - - out.write(""); - writeAuthActionFields(out); - out.write(""); - out.write(""); - out.write("\n"); - out.write("\n"); - - out.write("\n"); - } - - private void renderTags(User user, PetNameDB db, String baseURI, PetName newName, PrintWriter out) throws IOException { - TreeSet names = new TreeSet(); - for (Iterator iter = db.getNames().iterator(); iter.hasNext(); ) { - String name = (String)iter.next(); - PetName pn = db.getByName(name); - if (PROTO_TAG.equals(pn.getProtocol())) - names.add(name); - } - out.write("\n"); - - for (Iterator iter = names.iterator(); iter.hasNext(); ) { - PetName pn = db.getByName((String)iter.next()); - out.write(""); - writeAuthActionFields(out); - out.write(""); - out.write(""); - out.write("\n"); - out.write("\n"); - } - - out.write(""); - writeAuthActionFields(out); - out.write(""); - out.write(""); - out.write("\n"); - out.write("\n"); - - out.write("\n"); - } - - private void renderOther(User user, PetNameDB db, String baseURI, PetName newName, PrintWriter out) throws IOException { - TreeSet names = new TreeSet(); - for (Iterator iter = db.getNames().iterator(); iter.hasNext(); ) { - String name = (String)iter.next(); - PetName pn = db.getByName(name); - if (isRightProtocol(pn.getProtocol())) - names.add(name); - } - out.write("\n"); - - for (Iterator iter = names.iterator(); iter.hasNext(); ) { - PetName pn = db.getByName((String)iter.next()); - out.write(""); - writeAuthActionFields(out); - out.write("\n"); - out.write("\n"); - } - - out.write(""); - writeAuthActionFields(out); - - out.write("\n"); - out.write("\n"); - - out.write("\n"); - } - - /** build the 'other' name passed in */ - private PetName buildNewName(HttpServletRequest req) { return buildNewName(req, null); } - /** build a petname based by the request passed in, if the new entry is of the given protocol */ - private PetName buildNewName(HttpServletRequest req, String protocol) { - PetName pn = new PetName(); - if (!isRightProtocol(req, protocol)) { - pn.setIsPublic(true); - pn.setName(""); - pn.setLocation(""); - if (protocol == null) - pn.setProtocol(""); - else - pn.setProtocol(protocol); - pn.setNetwork(""); - return pn; - } else { - pn = buildNewAddress(req); - } - return pn; - } - - private String getParam(HttpServletRequest req, String param) { - if (empty(req, param)) { - return ""; - } else { - String val = req.getParameter(param); - return val; - } - } - - - private boolean isRightProtocol(HttpServletRequest req, String protocol) { - // if they hit submit, they are actually updating stuff, so don't include a 'new' one - if (!empty(req, PARAM_ACTION)) - return false; - - return isRightProtocol(protocol, req.getParameter(PARAM_PROTO)); - } - private boolean isRightProtocol(String proto) { return isRightProtocol((String)null, proto); } - private boolean isRightProtocol(String proto, String reqProto) { - if (empty(reqProto)) - return false; - if (proto == null) { - if (PROTO_ARCHIVE.equals(reqProto) || - PROTO_BLOG.equals(reqProto) || - PROTO_EEPSITE.equals(reqProto) || - PROTO_TAG.equals(reqProto) || - PROTO_I2PHEX.equals(reqProto)) - return false; - else // its something other than the four default types - return true; - } else { - return proto.equals(reqProto); - } - } - - protected String getTitle() { return "Syndie :: Addressbook"; } -} diff --git a/apps/syndie/java/src/net/i2p/syndie/web/AdminServlet.java b/apps/syndie/java/src/net/i2p/syndie/web/AdminServlet.java deleted file mode 100644 index fa9b852ba..000000000 --- a/apps/syndie/java/src/net/i2p/syndie/web/AdminServlet.java +++ /dev/null @@ -1,79 +0,0 @@ -package net.i2p.syndie.web; - -import java.io.IOException; -import java.io.PrintWriter; - -import javax.servlet.http.HttpServletRequest; - -import net.i2p.syndie.Archive; -import net.i2p.syndie.BlogManager; -import net.i2p.syndie.User; -import net.i2p.syndie.data.BlogURI; -import net.i2p.syndie.data.ThreadIndex; - -/** - * Admin form - * - */ -public class AdminServlet extends BaseServlet { - protected void renderServletDetails(User user, HttpServletRequest req, PrintWriter out, ThreadIndex index, - int threadOffset, BlogURI visibleEntry, Archive archive) throws IOException { - if (BlogManager.instance().authorizeRemote(user)) { - displayForm(user, req, out); - } else { - out.write("\n"); - } - } - - private void displayForm(User user, HttpServletRequest req, PrintWriter out) throws IOException { - out.write("\n"); - writeAuthActionFields(out); - out.write("\n"); - out.write("\n"); - } - - protected String getTitle() { return "Syndie :: Configuration"; } -} diff --git a/apps/syndie/java/src/net/i2p/syndie/web/ArchiveServlet.java b/apps/syndie/java/src/net/i2p/syndie/web/ArchiveServlet.java deleted file mode 100644 index afd701b9c..000000000 --- a/apps/syndie/java/src/net/i2p/syndie/web/ArchiveServlet.java +++ /dev/null @@ -1,226 +0,0 @@ -package net.i2p.syndie.web; - -import java.io.File; -import java.io.FileInputStream; -import java.io.IOException; -import java.io.OutputStream; -import java.util.ArrayList; -import java.util.Iterator; -import java.util.List; -import java.util.Set; - -import javax.servlet.ServletException; -import javax.servlet.http.HttpServlet; -import javax.servlet.http.HttpServletRequest; -import javax.servlet.http.HttpServletResponse; - -import net.i2p.I2PAppContext; -import net.i2p.data.Base64; -import net.i2p.data.DataHelper; -import net.i2p.data.Hash; -import net.i2p.syndie.Archive; -import net.i2p.syndie.BlogManager; -import net.i2p.syndie.data.ArchiveIndex; -import net.i2p.syndie.data.BlogInfo; -import net.i2p.syndie.data.BlogURI; - -/** - * - */ -public class ArchiveServlet extends HttpServlet { - - public void service(HttpServletRequest req, HttpServletResponse resp) throws ServletException, IOException { - handle(req, resp); - } - - public void doPost(HttpServletRequest req, HttpServletResponse resp) throws ServletException, IOException { - handle(req, resp); - } - public void doGet(HttpServletRequest req, HttpServletResponse resp) throws ServletException, IOException { - handle(req, resp); - } - public void doPut(HttpServletRequest req, HttpServletResponse resp) throws ServletException, IOException { - handle(req, resp); - } - - public void handle(HttpServletRequest req, HttpServletResponse resp) throws ServletException, IOException { - String path = req.getPathInfo(); - if ( (path == null) || (path.trim().length() <= 1) ) { - renderRootIndex(resp); - return; - } else if (path.endsWith(Archive.INDEX_FILE)) { - renderSummary(req.getHeader("If-None-Match"), resp); - } else if (path.indexOf("export.zip") != -1) { - ExportServlet.export(req, resp); - } else { - String blog = getBlog(path); - if (path.endsWith(Archive.METADATA_FILE)) { - renderMetadata(blog, resp); - } else if (path.endsWith(".snd")) { - renderEntry(blog, getEntry(path), resp); - } else { - renderBlogIndex(blog, resp); - } - } - } - - private String getBlog(String path) { - //System.err.println("Blog: [" + path + "]"); - int start = 0; - int end = -1; - int len = path.length(); - for (int i = 0; i < len; i++) { - if (path.charAt(i) != '/') { - start = i; - break; - } - } - for (int j = start + 1; j < len; j++) { - if (path.charAt(j) == '/') { - end = j; - break; - } - } - if (end < 0) end = len; - String rv = path.substring(start, end); - //System.err.println("Blog: [" + path + "] rv: [" + rv + "]"); - return rv; - } - - private long getEntry(String path) { - int start = path.lastIndexOf('/'); - if (start < 0) return -1; - if (!(path.endsWith(".snd"))) return -1; - String rv = path.substring(start+1, path.length()-".snd".length()); - //System.err.println("Entry: [" + path + "] rv: [" + rv + "]"); - try { - return Long.parseLong(rv); - } catch (NumberFormatException nfe) { - return -1; - } - } - - private void renderRootIndex(HttpServletResponse resp) throws ServletException, IOException { - resp.setContentType("text/html;charset=utf-8"); - //resp.setCharacterEncoding("UTF-8"); - OutputStream out = resp.getOutputStream(); - out.write(DataHelper.getUTF8("archive.txt
      \n")); - ArchiveIndex index = BlogManager.instance().getArchive().getIndex(); - Set blogs = index.getUniqueBlogs(); - for (Iterator iter = blogs.iterator(); iter.hasNext(); ) { - Hash blog = (Hash)iter.next(); - String s = blog.toBase64(); - out.write(DataHelper.getUTF8("" + s + "
      \n")); - } - out.close(); - } - - public static final String HEADER_EXPORT_CAPABLE = "X-Syndie-Export-Capable"; - - private void renderSummary(String etag, HttpServletResponse resp) throws ServletException, IOException { - resp.setContentType("text/plain;charset=utf-8"); - //resp.setCharacterEncoding("UTF-8"); - ArchiveIndex index = BlogManager.instance().getArchive().getIndex(); - byte[] indexUTF8 = DataHelper.getUTF8(index.toString()); - String newEtag = "\"" + I2PAppContext.getGlobalContext().sha().calculateHash(indexUTF8).toBase64() + "\""; - if (etag != null && etag.equals(newEtag)) { - resp.sendError(304, "Archive not modified"); - return; - } - resp.setHeader(HEADER_EXPORT_CAPABLE, "true"); - resp.setHeader("ETag", newEtag); - OutputStream out = resp.getOutputStream(); - out.write(indexUTF8); - out.close(); - } - - private void renderMetadata(String blog, HttpServletResponse resp) throws ServletException, IOException { - byte b[] = Base64.decode(blog); - if ( (b == null) || (b.length != Hash.HASH_LENGTH) ) { - resp.sendError(404, "Invalid blog requested"); - return; - } - Hash h = new Hash(b); - BlogInfo info = BlogManager.instance().getArchive().getBlogInfo(h); - if (info == null) { - resp.sendError(404, "Blog does not exist"); - return; - } - resp.setContentType("application/x-syndie-meta"); - OutputStream out = resp.getOutputStream(); - info.write(out); - out.close(); - } - - private void renderBlogIndex(String blog, HttpServletResponse resp) throws ServletException, IOException { - byte b[] = Base64.decode(blog); - if ( (b == null) || (b.length != Hash.HASH_LENGTH) ) { - resp.sendError(404, "Invalid blog requested"); - return; - } - Hash h = new Hash(b); - - BlogInfo info = BlogManager.instance().getArchive().getBlogInfo(h); - if (info == null) { - resp.sendError(404, "Blog does not exist"); - return; - } - resp.setContentType("text/html;charset=utf-8"); - //resp.setCharacterEncoding("UTF-8"); - OutputStream out = resp.getOutputStream(); - out.write(DataHelper.getUTF8("..
      \n")); - out.write(DataHelper.getUTF8("" + Archive.METADATA_FILE + "
      \n")); - List entries = new ArrayList(64); - BlogManager.instance().getArchive().getIndex().selectMatchesOrderByEntryId(entries, h, null); - for (int i = 0; i < entries.size(); i++) { - BlogURI entry = (BlogURI)entries.get(i); - out.write(DataHelper.getUTF8("" + entry.getEntryId() + ".snd
      \n")); - } - out.close(); - } - - private void renderEntry(String blog, long entryId, HttpServletResponse resp) throws ServletException, IOException { - byte b[] = Base64.decode(blog); - if ( (b == null) || (b.length != Hash.HASH_LENGTH) ) { - resp.sendError(404, "Invalid blog requested"); - return; - } - Hash h = new Hash(b); - BlogInfo info = BlogManager.instance().getArchive().getBlogInfo(h); - if (info == null) { - resp.sendError(404, "Blog does not exist"); - return; - } - File root = BlogManager.instance().getArchive().getArchiveDir(); - File blogDir = new File(root, blog); - if (!blogDir.exists()) { - resp.sendError(404, "Blog does not exist"); - return; - } - File entry = new File(blogDir, entryId + ".snd"); - if (!entry.exists()) { - resp.sendError(404, "Entry does not exist"); - return; - } - resp.setContentType("application/x-syndie-post"); - dump(entry, resp); - } - - private void dump(File source, HttpServletResponse resp) throws ServletException, IOException { - FileInputStream in = null; - OutputStream out = null; - try { - in = new FileInputStream(source); - out = resp.getOutputStream(); - byte buf[] = new byte[1024]; - int read = 0; - while ( (read = in.read(buf)) != -1) - out.write(buf, 0, read); - out.close(); - in.close(); - } finally { - if (in != null) try { in.close(); } catch (IOException ioe) {} - if (out != null) try { out.close(); } catch (IOException ioe) {} - } - } -} diff --git a/apps/syndie/java/src/net/i2p/syndie/web/ArchiveViewerBean.java b/apps/syndie/java/src/net/i2p/syndie/web/ArchiveViewerBean.java deleted file mode 100644 index 56ed1b60b..000000000 --- a/apps/syndie/java/src/net/i2p/syndie/web/ArchiveViewerBean.java +++ /dev/null @@ -1,822 +0,0 @@ -package net.i2p.syndie.web; - -import java.io.IOException; -import java.io.InputStream; -import java.io.OutputStream; -import java.io.Writer; -import java.util.ArrayList; -import java.util.Collection; -import java.util.Iterator; -import java.util.List; -import java.util.Map; -import java.util.Properties; -import java.util.Set; -import java.util.TreeMap; - -import net.i2p.I2PAppContext; -import net.i2p.client.naming.PetName; -import net.i2p.client.naming.PetNameDB; -import net.i2p.data.Base64; -import net.i2p.data.DataHelper; -import net.i2p.data.Hash; -import net.i2p.data.SigningPublicKey; -import net.i2p.syndie.Archive; -import net.i2p.syndie.BlogManager; -import net.i2p.syndie.User; -import net.i2p.syndie.data.ArchiveIndex; -import net.i2p.syndie.data.Attachment; -import net.i2p.syndie.data.BlogInfo; -import net.i2p.syndie.data.BlogURI; -import net.i2p.syndie.data.EntryContainer; -import net.i2p.syndie.sml.HTMLRenderer; -import net.i2p.syndie.sml.ThreadedHTMLRenderer; - -/** - * - */ -public class ArchiveViewerBean { - public static String getBlogName(String keyHash) { - BlogInfo info = BlogManager.instance().getArchive().getBlogInfo(new Hash(Base64.decode(keyHash))); - if (info == null) - return HTMLRenderer.sanitizeString(keyHash); - else - return HTMLRenderer.sanitizeString(info.getProperty("Name")); - } - - /** base64 encoded hash of the blog's public key, or null for no filtering by blog */ - public static final String PARAM_BLOG = "blog"; - /** base64 encoded tag to filter by, or blank for no filtering by tags */ - public static final String PARAM_TAG = "tag"; - /** entry id within the blog if we only want to see that one */ - public static final String PARAM_ENTRY = "entry"; - /** base64 encoded group within the user's filters */ - public static final String PARAM_GROUP = "group"; - /** how many entries per page to show at once */ - public static final String PARAM_NUM_PER_PAGE = "pageSize"; - /** which page of entries to render */ - public static final String PARAM_PAGE_NUMBER = "pageNum"; - /** should we expand each entry to show the full contents */ - public static final String PARAM_EXPAND_ENTRIES = "expand"; - /** should entries be rendered with the images shown inline */ - public static final String PARAM_SHOW_IMAGES = "images"; - /** should we regenerate an index to the archive before rendering */ - public static final String PARAM_REGENERATE_INDEX = "regenerateIndex"; - /** which attachment should we serve up raw */ - public static final String PARAM_ATTACHMENT = "attachment"; - /** we are replying to a particular blog/tag/entry/whatever (value == base64 encoded selector) */ - public static final String PARAM_IN_REPLY_TO = "inReplyTo"; - - /** prepopulate the subject field with the given value */ - public static final String PARAM_SUBJECT = "replySubject"; - /** prepopulate the tags with the given value */ - public static final String PARAM_TAGS = "replyTags"; - /** prepopulate the body with the given value */ - public static final String PARAM_PARENT = "parentURI"; - - /** - * Drop down multichooser: - * blog://base64(key) - * tag://base64(tag) - * blogtag://base64(key)/base64(tag) - * entry://base64(key)/entryId - * group://base64(groupName) - * ALL - */ - public static final String PARAM_SELECTOR = "selector"; - public static final String SEL_ALL = "ALL"; - public static final String SEL_BLOG = "blog://"; - public static final String SEL_TAG = "tag://"; - public static final String SEL_BLOGTAG = "blogtag://"; - public static final String SEL_ENTRY = "entry://"; - public static final String SEL_GROUP = "group://"; - /** submit field for the selector form */ - public static final String PARAM_SELECTOR_ACTION = "action"; - public static final String SEL_ACTION_SET_AS_DEFAULT = "Set as default"; - - public static void renderBlogSelector(User user, Map parameters, Writer out) throws IOException { - String sel = getString(parameters, PARAM_SELECTOR); - String action = getString(parameters, PARAM_SELECTOR_ACTION); - if ( (sel != null) && (action != null) && (SEL_ACTION_SET_AS_DEFAULT.equals(action)) ) { - user.setDefaultSelector(HTMLRenderer.sanitizeString(sel, false)); - BlogManager.instance().saveUser(user); - } - - out.write(""); - - int numPerPage = getInt(parameters, PARAM_NUM_PER_PAGE, 5); - int pageNum = getInt(parameters, PARAM_PAGE_NUMBER, 0); - boolean expandEntries = getBool(parameters, PARAM_EXPAND_ENTRIES, (user != null ? user.getShowExpanded() : false)); - boolean showImages = getBool(parameters, PARAM_SHOW_IMAGES, (user != null ? user.getShowImages() : false)); - - out.write(""); - out.write(""); - out.write(""); - out.write(""); - - } - - private static String getDefaultSelector(User user, Map parameters) { - if ( (user == null) || (user.getDefaultSelector() == null) ) - return BlogManager.instance().getArchive().getDefaultSelector(); - else - return user.getDefaultSelector(); - } - - public static void renderBlogs(User user, Map parameters, Writer out, String afterPagination) throws IOException { - String blogStr = getString(parameters, PARAM_BLOG); - Hash blog = null; - if (blogStr != null) blog = new Hash(Base64.decode(blogStr)); - if ( (blog != null) && (blog.getData() == null) ) blog = null; - String tag = getString(parameters, PARAM_TAG); - if (tag != null) tag = DataHelper.getUTF8(Base64.decode(tag)); - - long entryId = -1; - if (blog != null) { - String entryIdStr = getString(parameters, PARAM_ENTRY); - try { - entryId = Long.parseLong(entryIdStr); - } catch (NumberFormatException nfe) {} - } - String group = getString(parameters, PARAM_GROUP); - if (group != null) group = DataHelper.getUTF8(Base64.decode(group)); - - String sel = getString(parameters, PARAM_SELECTOR); - - if (getString(parameters, "action") != null) { - tag = null; - blog = null; - sel = null; - group = null; - } - - if ( (sel == null) && (blog == null) && (group == null) && (tag == null) ) - sel = getDefaultSelector(user, parameters); - if (sel != null) { - Selector s = new Selector(sel); - blog = s.blog; - tag = s.tag; - entryId = s.entry; - group = s.group; - } - - int numPerPage = getInt(parameters, PARAM_NUM_PER_PAGE, 5); - int pageNum = getInt(parameters, PARAM_PAGE_NUMBER, 0); - boolean expandEntries = getBool(parameters, PARAM_EXPAND_ENTRIES, (user != null ? user.getShowExpanded() : false)); - boolean showImages = getBool(parameters, PARAM_SHOW_IMAGES, (user != null ? user.getShowImages() : false)); - boolean regenerateIndex = getBool(parameters, PARAM_REGENERATE_INDEX, false); - try { - renderBlogs(user, blog, tag, entryId, group, numPerPage, pageNum, expandEntries, showImages, regenerateIndex, sel, out, afterPagination); - } catch (IOException ioe) { - ioe.printStackTrace(); - throw ioe; - } catch (RuntimeException re) { - re.printStackTrace(); - throw re; - } - } - - public static class Selector { - public Hash blog; - public String tag; - public long entry; - public String group; - public Selector(String selector) { - entry = -1; - blog = null; - tag = null; - if (selector != null) { - if (selector.startsWith(SEL_BLOG)) { - String blogStr = selector.substring(SEL_BLOG.length()); - //System.out.println("Selector [" + selector + "] blogString: [" + blogStr + "]"); - byte h[] = Base64.decode(blogStr); - if (h != null) - blog = new Hash(h); - //else - // System.out.println("blog string does not decode properly: [" + blogStr + "]"); - } else if (selector.startsWith(SEL_BLOGTAG)) { - int tagStart = selector.lastIndexOf('/'); - String blogStr = selector.substring(SEL_BLOGTAG.length(), tagStart); - blog = new Hash(Base64.decode(blogStr)); - if (blog.getData() == null) { - System.out.println("Blog string [" + blogStr + "] does not decode"); - blog = null; - return; - } - tag = selector.substring(tagStart+1); - String origTag = tag; - byte rawDecode[] = null; - if (tag != null) { - rawDecode = Base64.decode(tag); - tag = DataHelper.getUTF8(rawDecode); - } - //System.out.println("Selector [" + selector + "] blogString: [" + blogStr + "] tag: [" + tag + "]"); - if (false && tag != null) { - StringBuffer b = new StringBuffer(tag.length()*2); - for (int j = 0; j < tag.length(); j++) { - b.append((int)tag.charAt(j)); - if (rawDecode.length > j) - b.append('.').append((int)rawDecode[j]); - b.append(' '); - } - b.append("encoded as "); - for (int j = 0; j < origTag.length(); j++) { - b.append((int)origTag.charAt(j)).append(' '); - } - //System.out.println("selected tag: " + b.toString()); - } - } else if (selector.startsWith(SEL_TAG)) { - tag = selector.substring(SEL_TAG.length()); - byte rawDecode[] = null; - if (tag != null) { - rawDecode = Base64.decode(tag); - tag = DataHelper.getUTF8(rawDecode); - } - //System.out.println("Selector [" + selector + "] tag: [" + tag + "]"); - if (false && tag != null) { - StringBuffer b = new StringBuffer(tag.length()*2); - for (int j = 0; j < tag.length(); j++) { - b.append((int)tag.charAt(j)); - if (rawDecode.length > j) - b.append('.').append((int)rawDecode[j]); - b.append(' '); - } - //System.out.println("selected tag: " + b.toString()); - } - } else if (selector.startsWith(SEL_ENTRY)) { - int entryStart = selector.lastIndexOf('/'); - String blogStr = blogStr = selector.substring(SEL_ENTRY.length(), entryStart); - String entryStr = selector.substring(entryStart+1); - try { - entry = Long.parseLong(entryStr); - Hash h = new Hash(Base64.decode(blogStr)); - if (h.getData() != null) - blog = h; - //else - // System.out.println("Blog does not decode [" + blogStr + "]"); - //System.out.println("Selector [" + selector + "] blogString: [" + blogStr + "] entry: [" + entry + "]"); - } catch (NumberFormatException nfe) {} - } else if (selector.startsWith(SEL_GROUP)) { - group = DataHelper.getUTF8(Base64.decode(selector.substring(SEL_GROUP.length()))); - //System.out.println("Selector [" + selector + "] group: [" + group + "]"); - } - } - } - } - - private static void renderBlogs(User user, Hash blog, String tag, long entryId, String group, int numPerPage, int pageNum, - boolean expandEntries, boolean showImages, boolean regenerateIndex, String selector, Writer out, String afterPagination) throws IOException { - Archive archive = BlogManager.instance().getArchive(); - if (regenerateIndex) - archive.regenerateIndex(); - ArchiveIndex index = archive.getIndex(); - List entries = pickEntryURIs(user, index, blog, tag, entryId, group); - //System.out.println("Searching for " + blog + "/" + tag + "/" + entryId + "/" + pageNum + "/" + numPerPage + "/" + group); - //System.out.println("Entry URIs: " + entries); - - HTMLRenderer renderer = new HTMLRenderer(I2PAppContext.getGlobalContext()); - int start = pageNum * numPerPage; - int end = start + numPerPage; - int pages = 1; - if (entries.size() <= 1) { - // just one, so no pagination, etc - start = 0; - end = 1; - } else { - if (end >= entries.size()) - end = entries.size(); - if ( (pageNum < 0) || (numPerPage <= 0) ) { - start = 0; - end = entries.size() - 1; - } else { - HTMLRenderer rend = new ThreadedHTMLRenderer(I2PAppContext.getGlobalContext()); - pages = entries.size() / numPerPage; - if (numPerPage * pages < entries.size()) - pages++; - if (pageNum > 0) { - String prevURL = null; - prevURL = rend.getPageURL(blog, tag, entryId, group, numPerPage, pageNum-1, expandEntries, showImages); - //System.out.println("prevURL: " + prevURL); - out.write(" <<"); - } else { - out.write(" << "); - } - out.write("Page " + (pageNum+1) + " of " + pages + ""); - if (pageNum + 1 < pages) { - String nextURL = null; - nextURL = rend.getPageURL(blog, tag, entryId, group, numPerPage, pageNum+1, expandEntries, showImages); - //System.out.println("nextURL: " + nextURL); - out.write(" >>"); - } else { - out.write(" >>"); - } - } - } - - /* - out.write(" "); - - if (showImages) - out.write("Hide images"); - else - out.write("Show images"); - - if (expandEntries) - out.write(" Hide details"); - else - out.write(" Expand details"); - - out.write(""); - */ - - if (afterPagination != null) - out.write(afterPagination); - - if (entries.size() <= 0) end = -1; - //System.out.println("Entries.size: " + entries.size() + " start=" + start + " end=" + end); - for (int i = start; i < end; i++) { - BlogURI uri = (BlogURI)entries.get(i); - EntryContainer c = archive.getEntry(uri); - try { - if (c == null) - renderer.renderUnknownEntry(user, archive, uri, out); - else - renderer.render(user, archive, c, out, !expandEntries, showImages); - } catch (RuntimeException e) { - e.printStackTrace(); - throw e; - } - } - } - - public static List pickEntryURIs(User user, ArchiveIndex index, Hash blog, String tag, long entryId, String group) { - if ( (blog != null) && ( (blog.getData() == null) || (blog.getData().length != Hash.HASH_LENGTH) ) ) - blog = null; - List rv = new ArrayList(16); - if ( (blog != null) && (entryId >= 0) ) { - rv.add(new BlogURI(blog, entryId)); - return rv; - } - - if ( (group != null) && (user != null) ) { - List selectors = (List)user.getBlogGroups().get(group); - if (selectors != null) { - //System.out.println("Selectors for group " + group + ": " + selectors); - for (int i = 0; i < selectors.size(); i++) { - String sel = (String)selectors.get(i); - Selector s = new Selector(sel); - if ( (s.entry >= 0) && (s.blog != null) && (s.group == null) && (s.tag == null) ) - rv.add(new BlogURI(s.blog, s.entry)); - else - index.selectMatchesOrderByEntryId(rv, s.blog, s.tag); - } - } - PetNameDB db = user.getPetNameDB(); - for (Iterator iter = db.getNames().iterator(); iter.hasNext(); ) { - String name = (String)iter.next(); - PetName pn = db.getByName(name); - if ("syndie".equals(pn.getNetwork()) && "syndieblog".equals(pn.getProtocol()) && pn.isMember(group)) { - byte pnLoc[] = Base64.decode(pn.getLocation()); - if (pnLoc != null) { - Hash pnHash = new Hash(pnLoc); - index.selectMatchesOrderByEntryId(rv, pnHash, null); - } - } - } - sort(rv); - if (rv.size() > 0) - return rv; - } - index.selectMatchesOrderByEntryId(rv, blog, tag); - filterIgnored(user, rv); - return rv; - } - - private static void filterIgnored(User user, List uris) { - for (int i = 0; i < uris.size(); i++) { - BlogURI uri = (BlogURI)uris.get(i); - Hash k = uri.getKeyHash(); - if (k == null) continue; - PetName pn = user.getPetNameDB().getByLocation(k.toBase64()); - if ( (pn != null) && (pn.isMember("Ignore")) ) { - uris.remove(i); - i--; - } - } - } - - private static void sort(List uris) { - TreeMap ordered = new TreeMap(); - while (uris.size() > 0) { - BlogURI uri = (BlogURI)uris.remove(0); - int off = 0; - while (ordered.containsKey(new Long(0 - off - uri.getEntryId()))) - off++; - ordered.put(new Long(0-off-uri.getEntryId()), uri); - } - for (Iterator iter = ordered.values().iterator(); iter.hasNext(); ) - uris.add(iter.next()); - } - - public static final String getString(Map parameters, String param) { - if ( (parameters == null) || (parameters.get(param) == null) ) - return null; - Object vals = parameters.get(param); - if (vals.getClass().isArray()) { - String v[] = (String[])vals; - if (v.length > 0) - return ((String[])vals)[0]; - else - return null; - } else if (vals instanceof Collection) { - Collection c = (Collection)vals; - if (c.size() > 0) - return (String)c.iterator().next(); - else - return null; - } else if (vals instanceof String) { - return (String)vals; - } else { - return null; - } - } - public static final String[] getStrings(Map parameters, String param) { - if ( (parameters == null) || (parameters.get(param) == null) ) - return null; - Object vals = parameters.get(param); - if (vals.getClass().isArray()) { - return (String[])vals; - } else if (vals instanceof Collection) { - Collection c = (Collection)vals; - if (c.size() <= 0) return null; - String rv[] = new String[c.size()]; - int i = 0; - for (Iterator iter = c.iterator(); iter.hasNext(); i++) - rv[i] = (String)iter.next(); - return rv; - } else { - return null; - } - } - - private static final int getInt(Map param, String key, int defaultVal) { - String val = getString(param, key); - if (val != null) { - try { return Integer.parseInt(val); } catch (NumberFormatException nfe) {} - } - return defaultVal; - } - - private static final boolean getBool(Map param, String key, boolean defaultVal) { - String val = getString(param, key); - if (val != null) { - return ("true".equals(val) || "yes".equals(val)); - } - return defaultVal; - } - - public static void renderAttachment(Map parameters, OutputStream out) throws IOException { - renderAttachment(getAttachment(parameters), out); - } - public static void renderAttachment(Attachment a, OutputStream out) throws IOException { - if (a == null) { - renderInvalidAttachment(out); - } else { - InputStream data = a.getDataStream(); - byte buf[] = new byte[1024]; - int read = 0; - while ( (read = data.read(buf)) != -1) - out.write(buf, 0, read); - data.close(); - } - } - - public static final String getAttachmentContentType(Map parameters) { - return getAttachmentContentType(getAttachment(parameters)); - } - public static final String getAttachmentContentType(Attachment attachment) { - if (attachment == null) - return "text/html"; - String mime = attachment.getMimeType(); - if ( (mime != null) && ((mime.startsWith("image/") || mime.startsWith("text/plain"))) ) - return mime; - return "application/octet-stream"; - } - - public static final boolean getAttachmentShouldShowInline(Map parameters) { - return getAttachmentShouldShowInline(getAttachment(parameters)); - } - public static final boolean getAttachmentShouldShowInline(Attachment a) { - if (a == null) - return true; - String mime = a.getMimeType(); - if ( (mime != null) && ((mime.startsWith("image/") || mime.startsWith("text/plain"))) ) - return true; - else - return false; - } - - public static final int getAttachmentContentLength(Map parameters) { - return getAttachmentContentLength(getAttachment(parameters)); - } - public static final int getAttachmentContentLength(Attachment a) { - if (a != null) - return a.getDataLength(); - else - return -1; - } - - public static final String getAttachmentFilename(Map parameters) { - return getAttachmentFilename(getAttachment(parameters)); - } - public static final String getAttachmentFilename(Attachment a) { - if (a != null) - return a.getName(); - else - return "attachment.dat"; - } - - private static final Attachment getAttachment(Map parameters) { - String blogStr = getString(parameters, PARAM_BLOG); - Hash blog = null; - if (blogStr != null) blog = new Hash(Base64.decode(blogStr)); - long entryId = -1; - if (blogStr != null) { - String entryIdStr = getString(parameters, PARAM_ENTRY); - try { - entryId = Long.parseLong(entryIdStr); - } catch (NumberFormatException nfe) {} - } - int attachment = getInt(parameters, PARAM_ATTACHMENT, -1); - - Archive archive = BlogManager.instance().getArchive(); - EntryContainer entry = archive.getEntry(new BlogURI(blog, entryId)); - if ( (entry != null) && (attachment >= 0) && (attachment < entry.getAttachments().length) ) { - return entry.getAttachments()[attachment]; - } - return null; - } - - private static void renderInvalidAttachment(OutputStream out) throws IOException { - out.write(DataHelper.getUTF8("No such entry, or no such attachment")); - } - - private static String getURL(String uri, Map parameters) { - StringBuffer rv = new StringBuffer(128); - rv.append(uri); - rv.append('?'); - if (parameters != null) { - for (Iterator iter = parameters.keySet().iterator(); iter.hasNext(); ) { - String key = (String)iter.next(); - String vals[] = getStrings(parameters, key); - // we are already looking at the page with the given parameters, no need to further sanitize - if ( (key != null) && (vals != null) ) - for (int i = 0; i < vals.length; i++) - rv.append(key).append('=').append(vals[i]).append('&'); - } - } - return rv.toString(); - } - - private static void updateMetadata(User viewer, Map parameters, Writer out) throws IOException { - if ( (viewer == null) || (!viewer.getAuthenticated()) ) - return; - String blogStr = getString(parameters, PARAM_BLOG); - if (blogStr != null) { - Hash blog = new Hash(Base64.decode(blogStr)); - Archive archive = BlogManager.instance().getArchive(); - BlogInfo info = archive.getBlogInfo(blog); - if (info != null) { - boolean isUser = viewer.getBlog().equals(info.getKey().calculateHash()); - if (!isUser) - return; - Properties toSave = new Properties(); - String existing[] = info.getProperties(); - for (int i = 0; i < existing.length; i++) { - String newVal = getString(parameters, existing[i]); - if ( (newVal != null) && (newVal.length() > 0) ) - toSave.setProperty(existing[i], newVal.trim()); - else - toSave.setProperty(existing[i], info.getProperty(existing[i])); - } - boolean saved = BlogManager.instance().updateMetadata(viewer, blog, toSave); - if (saved) - out.write("

      Blog metadata saved

      \n"); - else - out.write("

      Blog metadata could not be saved

      \n"); - } - } - } - - /** - * @param currentURI URI of the with current page without any parameters tacked on - */ - public static void renderMetadata(User viewer, String currentURI, Map parameters, Writer out) throws IOException { - if (parameters.get("action") != null) { - updateMetadata(viewer, parameters, out); - } - String blogStr = getString(parameters, PARAM_BLOG); - if (blogStr != null) { - Hash blog = new Hash(Base64.decode(blogStr)); - Archive archive = BlogManager.instance().getArchive(); - BlogInfo info = archive.getBlogInfo(blog); - if (info == null) { - out.write("Blog " + blog.toBase64() + " does not exist"); - return; - } - boolean isUser = ( (viewer != null) && (viewer.getAuthenticated()) && (viewer.getBlog().equals(info.getKey().calculateHash())) ); - String props[] = info.getProperties(); - if (isUser) { - out.write("\n"); - out.write("\n"); - } - out.write("
      You must log in to view your addressbook
      Syndie authors
      "); - out.write("\n"); - out.write("Name: " + pn.getName() + " "); - out.write("Location: "); - if (pn.isMember(FilteredThreadIndex.GROUP_FAVORITE)) - out.write("Favorite? "); - else - out.write("Favorite? "); - - if (pn.isMember(FilteredThreadIndex.GROUP_IGNORE)) { - out.write("Ignored? "); - if (BlogManager.instance().authorizeRemote(user)) - out.write(" "); - } else { - out.write("Ignored? "); - out.write(" "); - out.write("Location: "); - if (newName.isMember(FilteredThreadIndex.GROUP_FAVORITE)) - out.write("Favorite? "); - else - out.write("Favorite? "); - - if (newName.isMember(FilteredThreadIndex.GROUP_IGNORE)) { - out.write("Ignored? "); - } else { - out.write("Ignored? "); - } - - out.write(" "); - out.write("

      Syndie archives
      "); - out.write("\n"); - out.write("Name: " + pn.getName() + " "); - out.write("Location: "); - if (BlogManager.instance().authorizeRemote(user)) { - if (BlogManager.instance().syndicationScheduled(pn.getLocation())) - out.write("Syndicate? "); - else - out.write("Syndicate? "); - - out.write("Sync manually "); - } else { - out.write("You are not authorized to syndicate with the archive "); - } - out.write(" "); - out.write(" "); - out.write("
      "); - out.write("\n"); - out.write("Name: "); - out.write("Location: "); - if (BlogManager.instance().authorizeRemote(user)) { - if (BlogManager.instance().syndicationScheduled(newName.getLocation())) - out.write("Syndicate? "); - else - out.write("Syndicate? "); - } - - out.write(" "); - out.write("

      I2Phex peers
      "); - out.write("\n"); - out.write("Name: " + pn.getName() + " "); - out.write("Location: "); - - out.write(" "); - out.write(" "); - out.write("
      "); - out.write("\n"); - out.write("Name: "); - out.write("Location: "); - - out.write(" "); - out.write("

      Eepsites
      "); - out.write("\n"); - out.write("Name: " + pn.getName() + " "); - out.write("Location: "); - - out.write(" "); - out.write(" "); - out.write("
      "); - out.write("\n"); - out.write("Name: "); - out.write("Location: "); - - out.write(" "); - out.write("

      Favorite tags
      "); - out.write("\n"); - out.write("Name: " + pn.getName() + " "); - out.write(" "); - - out.write(" "); - out.write("
      "); - out.write("\n"); - out.write("Name: "); - - out.write(" "); - out.write("

      Other addresses
      "); - out.write("\n"); - out.write("Network: "); - out.write("Protocol: "); - out.write("Name: " + pn.getName() +" "); - out.write("Location: "); - - out.write(" "); - out.write(" "); - out.write("
      "); - out.write("\n"); - out.write("Network: "); - out.write("Protocol: "); - out.write("Name: "); - out.write("Location: "); - - out.write(" "); - out.write("

      You are not authorized to configure this Syndie instance
      "); - - // stop people from shooting themselves in the foot - only geeks can enable multiuser mode - // (by adding the single user flag to their syndie.config) - if (BlogManager.instance().isSingleUser()) - out.write("\n"); - /* - out.write("Single user?
      \n"); - - out.write("If this is checked, the registration, admin, and remote passwords are unnecessary - anyone"); - out.write("can register and administer Syndie, as well as use any remote functionality. This should not be checked if untrusted"); - out.write("parties can access this web interface.
      \n"); - */ - out.write("Default user: \n"); - out.write("pass:
      \n"); - out.write("If Syndie is in single user mode, it will create a new 'default' user automatically and use that "); - out.write("whenever you access Syndie unless you explicitly log in to another account. If you want Syndie to use an existing account as "); - out.write("your default account, you can specify them here, in which case it will automatically log you in under that account.
      \n"); - out.write("Registration password:
      \n"); - out.write("Users must specify this password on the registration form to proceed. If this is "); - out.write("blank, anyone can register.
      \n"); - out.write("Remote password:
      \n"); - out.write("To access remote archives, users must first provide this password on their "); - out.write("metadata page. Remote access is 'dangerous', as it allows the user to instruct "); - out.write("this Syndie instance to establish HTTP connections with arbitrary locations. If "); - out.write("this field is not specified, no one can use remote archives.
      \n"); - out.write("Default remote proxy host:
      \n"); - out.write("Default remote proxy port:
      \n"); - out.write("This is the default HTTP proxy shown on the remote archive page.
      \n"); - out.write("
      \n"); - out.write("\n"); - - out.write("
      "); - for (int i = 0; i < props.length; i++) { - if (props[i].equals(BlogInfo.OWNER_KEY)) { - out.write(""); - out.write("\n"); - } else if (props[i].equals(BlogInfo.SIGNATURE)) { - continue; - } else if (props[i].equals(BlogInfo.POSTERS)) { - SigningPublicKey keys[] = info.getPosters(); - if ( (keys != null) && (keys.length > 0) ) { - out.write(""); - out.write("\n"); - } - } else { - String field = HTMLRenderer.sanitizeString(props[i]); - String val = HTMLRenderer.sanitizeString(info.getProperty(props[i])); - out.write("\n"); - - if (isUser && (!field.equals("Edition"))) - out.write(""); - } - } - List tags = BlogManager.instance().getArchive().getIndex().getBlogTags(blog); - if ( (tags != null) && (tags.size() > 0) ) { - out.write(""); - } - if (isUser) - out.write("\n"); - out.write("
      Blog:" + Base64.encode(blog.getData()) + "
      Allowed authors:"); - for (int j = 0; j < keys.length; j++) { - out.write("" + keys[j].calculateHash().toBase64() + ""); - if (j + 1 < keys.length) - out.write("
      \n"); - } - out.write("
      " + field - + ":" + val + "
       
      Known tags:"); - for (int i = 0; i < tags.size(); i++) { - String tag = (String)tags.get(i); - HTMLRenderer rend = new HTMLRenderer(I2PAppContext.getGlobalContext()); - out.write("" + - HTMLRenderer.sanitizeString(tag) + " "); - } - out.write("
      "); - } else { - out.write("Blog not specified"); - } - } -} diff --git a/apps/syndie/java/src/net/i2p/syndie/web/BaseServlet.java b/apps/syndie/java/src/net/i2p/syndie/web/BaseServlet.java deleted file mode 100644 index aba868a15..000000000 --- a/apps/syndie/java/src/net/i2p/syndie/web/BaseServlet.java +++ /dev/null @@ -1,1302 +0,0 @@ -package net.i2p.syndie.web; - -import java.io.IOException; -import java.io.InputStream; -import java.io.InputStreamReader; -import java.io.PrintWriter; -import java.io.Reader; -import java.io.Writer; -import java.util.ArrayList; -import java.util.Collection; -import java.util.Collections; -import java.util.Enumeration; -import java.util.Iterator; -import java.util.List; -import java.util.Properties; -import java.util.Set; -import java.util.StringTokenizer; -import java.util.TreeSet; - -import javax.servlet.ServletException; -import javax.servlet.http.HttpServlet; -import javax.servlet.http.HttpServletRequest; -import javax.servlet.http.HttpServletResponse; - -import net.i2p.I2PAppContext; -import net.i2p.client.naming.PetName; -import net.i2p.client.naming.PetNameDB; -import net.i2p.data.Base64; -import net.i2p.data.DataFormatException; -import net.i2p.data.Hash; -import net.i2p.syndie.Archive; -import net.i2p.syndie.BlogManager; -import net.i2p.syndie.User; -import net.i2p.syndie.data.BlogInfo; -import net.i2p.syndie.data.BlogURI; -import net.i2p.syndie.data.FilteredThreadIndex; -import net.i2p.syndie.data.ThreadIndex; -import net.i2p.syndie.data.ThreadNode; -import net.i2p.syndie.sml.HTMLRenderer; -import net.i2p.syndie.sml.ThreadedHTMLRenderer; -import net.i2p.util.FileUtil; -import net.i2p.util.Log; - -/** - * Base servlet for handling request and rendering the templates - * - */ -public abstract class BaseServlet extends HttpServlet { - protected static final String PARAM_AUTH_ACTION = "syndie.auth"; - protected static long _authNonce; - protected I2PAppContext _context; - protected Log _log; - - public void init() throws ServletException { - super.init(); - _context = I2PAppContext.getGlobalContext(); - _log = _context.logManager().getLog(getClass()); - _authNonce = _context.random().nextLong(); - } - - protected boolean authAction(HttpServletRequest req) { - return authAction(req.getParameter(PARAM_AUTH_ACTION)); - } - protected boolean authAction(String auth) { - if (auth == null) { - return false; - } else { - try { - boolean rv = (Long.valueOf(auth).longValue() == _authNonce); - return rv; - } catch (NumberFormatException nfe) { - return false; - } - } - } - - /** - * write out hidden fields for params that need to be tacked onto an http request that updates - * data, to prevent spoofing - */ - protected void writeAuthActionFields(Writer out) throws IOException { - out.write(""); - } - protected String getAuthActionFields() throws IOException { - return ""; - } - /** - * key=value& of params that need to be tacked onto an http request that updates data, to - * prevent spoofing - */ - protected static String getAuthActionParams() { return PARAM_AUTH_ACTION + '=' + _authNonce + "&"; } - /** - * key=value& of params that need to be tacked onto an http request that updates data, to - * prevent spoofing - */ - public static void addAuthActionParams(StringBuffer buf) { - buf.append(PARAM_AUTH_ACTION).append('=').append(_authNonce).append("&"); - } - - public void service(HttpServletRequest req, HttpServletResponse resp) throws ServletException, IOException { - req.setCharacterEncoding("UTF-8"); - resp.setCharacterEncoding("UTF-8"); - resp.setContentType("text/html;charset=UTF-8"); - resp.setHeader("cache-control", "no-cache"); - resp.setHeader("pragma", "no-cache"); - - User user = (User)req.getSession().getAttribute("user"); - String login = req.getParameter("login"); - String pass = req.getParameter("password"); - String action = req.getParameter("action"); - boolean forceNewIndex = false; - - boolean authAction = authAction(req); - - if (req.getParameter("regenerateIndex") != null) - forceNewIndex = true; - - User oldUser = user; - if (authAction) - user = handleRegister(user, req); - if (oldUser != user) - forceNewIndex = true; - - if (user == null) { - if ("Login".equals(action)) { - user = BlogManager.instance().login(login, pass); // ignore failures - user will just be unauthorized - if (!user.getAuthenticated()) { - user = BlogManager.instance().getDefaultUser(); - if (_log.shouldLog(Log.INFO)) - _log.info("Explicit login failed for [" + login + "], using default login"); - } else { - if (_log.shouldLog(Log.INFO)) - _log.info("Explicit login successful for [" + login + "]"); - } - } else { - user = BlogManager.instance().getDefaultUser(); - if (_log.shouldLog(Log.INFO)) - _log.info("Implicit login for the default user"); - } - forceNewIndex = true; - } else if (authAction && "Login".equals(action)) { - user = BlogManager.instance().login(login, pass); // ignore failures - user will just be unauthorized - if (!user.getAuthenticated()) { - if (_log.shouldLog(Log.INFO)) - _log.info("Explicit relogin failed for [" + login + "] from [" + user.getUsername() + "], using default user"); - user = BlogManager.instance().getDefaultUser(); - } else { - if (_log.shouldLog(Log.INFO)) - _log.info("Explicit relogin successful for [" + login + "] from [" + user.getUsername() + "]"); - } - forceNewIndex = true; - } else if (authAction && "Logout".equals(action)) { - if (_log.shouldLog(Log.INFO)) - _log.info("Explicit logout successful for [" + user.getUsername() + "], using default login"); - user = BlogManager.instance().getDefaultUser(); - forceNewIndex = true; - } - - req.getSession().setAttribute("user", user); - - if (authAction) { - handleAdmin(user, req); - - forceNewIndex = handleAddressbook(user, req) || forceNewIndex; - forceNewIndex = handleBookmarking(user, req) || forceNewIndex; - forceNewIndex = handleManageTags(user, req) || forceNewIndex; - handleUpdateProfile(user, req); - req.setAttribute(BaseServlet.class.getName() + ".auth", "true"); - } - - // the 'dataImported' flag is set by successful fetches in the SyndicateServlet/RemoteArchiveBean - if (user.resetDataImported()) { - forceNewIndex = true; - if (_log.shouldLog(Log.INFO)) - _log.info("Data imported, force regenerate"); - } - - FilteredThreadIndex index = (FilteredThreadIndex)req.getSession().getAttribute("threadIndex"); - - boolean authorOnly = Boolean.valueOf(req.getParameter(ThreadedHTMLRenderer.PARAM_THREAD_AUTHOR)).booleanValue(); - if ( (index != null) && (authorOnly != index.getFilterAuthorsByRoot()) ) - forceNewIndex = true; - - Collection tags = getFilteredTags(req); - Collection filteredAuthors = getFilteredAuthors(req); - boolean tagsChanged = ( (index != null) && (!index.getFilteredTags().equals(tags)) ); - boolean authorsChanged = ( (index != null) && (!index.getFilteredAuthors().equals(filteredAuthors)) ); - - if (_log.shouldLog(Log.DEBUG)) - _log.debug("authorOnly=" + authorOnly + " forceNewIndex? " + forceNewIndex + " authors=" + filteredAuthors); - - if (forceNewIndex || (index == null) || (tagsChanged) || (authorsChanged) ) { - index = new FilteredThreadIndex(user, BlogManager.instance().getArchive(), getFilteredTags(req), filteredAuthors, authorOnly); - req.getSession().setAttribute("threadIndex", index); - if (_log.shouldLog(Log.INFO)) - _log.info("New filtered index created (forced? " + forceNewIndex + ", tagsChanged? " + tagsChanged + ", authorsChanged? " + authorsChanged + ")"); - } - - render(user, req, resp, index); - } - protected void render(User user, HttpServletRequest req, HttpServletResponse resp, ThreadIndex index) throws IOException, ServletException { - render(user, req, resp.getWriter(), index); - } - protected boolean isAuthed(HttpServletRequest req) { - String auth = (String)req.getAttribute(BaseServlet.class.getName() + ".auth"); - return (auth != null) && (Boolean.valueOf(auth).booleanValue()); - } - - private boolean handleBookmarking(User user, HttpServletRequest req) { - if (!user.getAuthenticated()) - return false; - - boolean rv = false; - - String loc = req.getParameter(ThreadedHTMLRenderer.PARAM_ADD_TO_GROUP_LOCATION); - String group = req.getParameter(ThreadedHTMLRenderer.PARAM_ADD_TO_GROUP_NAME); - if ( (loc != null) && (group != null) && (group.trim().length() > 0) ) { - try { - Hash key = new Hash(); - key.fromBase64(loc); - PetNameDB db = user.getPetNameDB(); - PetName pn = db.getByLocation(loc); - boolean isNew = false; - if (pn == null) { - isNew = true; - BlogInfo info = BlogManager.instance().getArchive().getBlogInfo(key); - String name = null; - if (info != null) - name = info.getProperty(BlogInfo.NAME); - else - name = loc.substring(0,6); - - if (db.containsName(name)) { - int i = 0; - while (db.containsName(name + i)) - i++; - name = name + i; - } - - pn = new PetName(name, AddressesServlet.NET_SYNDIE, AddressesServlet.PROTO_BLOG, loc); - } - pn.addGroup(group); - if (isNew) - db.add(pn); - BlogManager.instance().saveUser(user); - // if we are ignoring someone, we need to recalculate the filters - if (FilteredThreadIndex.GROUP_IGNORE.equals(group)) - rv = true; - } catch (DataFormatException dfe) { - // bad loc, ignore - } - } - - String name = req.getParameter(ThreadedHTMLRenderer.PARAM_REMOVE_FROM_GROUP_NAME); - group = req.getParameter(ThreadedHTMLRenderer.PARAM_REMOVE_FROM_GROUP); - if ( (name != null) && (name.trim().length() > 0) ) { - PetNameDB db = user.getPetNameDB(); - PetName pn = db.getByName(name); - boolean changed = false; - if (pn != null) { - if ( (group != null) && (group.trim().length() > 0) ) { - // just remove them from the group - changed = pn.isMember(group); - pn.removeGroup(group); - if ( (changed) && (FilteredThreadIndex.GROUP_IGNORE.equals(group)) ) - rv = true; - } else { - // remove it completely - if (pn.isMember(FilteredThreadIndex.GROUP_IGNORE)) - rv = true; - db.remove(pn); - changed = true; - } - } - if (changed) - BlogManager.instance().saveUser(user); - } - - if (rv) - _log.debug("Bookmarking required rebuild"); - return rv; - } - - private boolean handleManageTags(User user, HttpServletRequest req) { - if (!user.getAuthenticated()) - return false; - - boolean rv = false; - - String tag = req.getParameter(ThreadedHTMLRenderer.PARAM_ADD_TAG); - if ( (tag != null) && (tag.trim().length() > 0) ) { - tag = HTMLRenderer.sanitizeString(tag, false); - String name = tag; - PetNameDB db = user.getPetNameDB(); - PetName pn = db.getByLocation(tag); - if (pn == null) { - if (db.containsName(name)) { - int i = 0; - while (db.containsName(name + i)) - i++; - name = tag + i; - } - - pn = new PetName(name, AddressesServlet.NET_SYNDIE, AddressesServlet.PROTO_TAG, tag); - db.add(pn); - BlogManager.instance().saveUser(user); - } - } - - return false; - } - - private boolean handleAddressbook(User user, HttpServletRequest req) { - if ( (!user.getAuthenticated()) || (empty(AddressesServlet.PARAM_ACTION)) ) { - return false; - } - - String action = req.getParameter(AddressesServlet.PARAM_ACTION); - - if (AddressesServlet.ACTION_ADD_TAG.equals(action)) { - String name = req.getParameter(AddressesServlet.PARAM_NAME); - if ((name != null) && (name.trim().length() > 0) && (!user.getPetNameDB().containsName(name)) ) { - PetName pn = new PetName(name, AddressesServlet.NET_SYNDIE, AddressesServlet.PROTO_TAG, name); - user.getPetNameDB().add(pn); - BlogManager.instance().saveUser(user); - } - return false; - } else if ( (AddressesServlet.ACTION_ADD_ARCHIVE.equals(action)) || - (AddressesServlet.ACTION_ADD_BLOG.equals(action)) || - (AddressesServlet.ACTION_ADD_EEPSITE.equals(action)) || - (AddressesServlet.ACTION_ADD_OTHER.equals(action)) || - (AddressesServlet.ACTION_ADD_PEER.equals(action)) ) { - PetName pn = buildNewAddress(req); - if ( (pn != null) && (pn.getName() != null) && (pn.getName().trim().length() > 0) && (pn.getLocation() != null) && - (!user.getPetNameDB().containsName(pn.getName())) ) { - user.getPetNameDB().add(pn); - BlogManager.instance().saveUser(user); - - updateSyndication(user, pn.getLocation(), !empty(req, AddressesServlet.PARAM_SYNDICATE)); - - if (pn.isMember(FilteredThreadIndex.GROUP_FAVORITE) || - pn.isMember(FilteredThreadIndex.GROUP_IGNORE)) - return true; - else - return false; - } else { - // not valid, ignore - return false; - } - } else if ( (AddressesServlet.ACTION_UPDATE_ARCHIVE.equals(action)) || - (AddressesServlet.ACTION_UPDATE_BLOG.equals(action)) || - (AddressesServlet.ACTION_UPDATE_EEPSITE.equals(action)) || - (AddressesServlet.ACTION_UPDATE_OTHER.equals(action)) || - (AddressesServlet.ACTION_UPDATE_PEER.equals(action)) ) { - return updateAddress(user, req); - } else if (AddressesServlet.ACTION_PURGE_AND_BAN_BLOG.equals(action)) { - String name = req.getParameter(AddressesServlet.PARAM_NAME); - PetName pn = user.getPetNameDB().getByName(name); - if (pn != null) { - boolean purged = false; - if (BlogManager.instance().authorizeRemote(user)) { - Hash h = null; - BlogURI uri = new BlogURI(pn.getLocation()); - if (uri.getKeyHash() != null) { - h = uri.getKeyHash(); - } - if (h == null) { - byte b[] = Base64.decode(pn.getLocation()); - if ( (b != null) && (b.length == Hash.HASH_LENGTH) ) - h = new Hash(b); - } - if (h != null) { - BlogManager.instance().purgeAndBan(h); - purged = true; - } - } - if (purged) // force a new thread index - return true; - else - return false; - } else { - return false; - } - } else if ( (AddressesServlet.ACTION_DELETE_ARCHIVE.equals(action)) || - (AddressesServlet.ACTION_DELETE_BLOG.equals(action)) || - (AddressesServlet.ACTION_DELETE_EEPSITE.equals(action)) || - (AddressesServlet.ACTION_DELETE_OTHER.equals(action)) || - (AddressesServlet.ACTION_DELETE_TAG.equals(action)) || - (AddressesServlet.ACTION_DELETE_PEER.equals(action)) ) { - String name = req.getParameter(AddressesServlet.PARAM_NAME); - PetName pn = user.getPetNameDB().getByName(name); - if (pn != null) { - user.getPetNameDB().remove(pn); - BlogManager.instance().saveUser(user); - updateSyndication(user, pn.getLocation(), false); - if (pn.isMember(FilteredThreadIndex.GROUP_FAVORITE) || - pn.isMember(FilteredThreadIndex.GROUP_IGNORE)) - return true; - else - return false; - } else { - return false; - } - } else { - // not an addressbook op - return false; - } - } - - private boolean updateAddress(User user, HttpServletRequest req) { - PetName pn = user.getPetNameDB().getByName(req.getParameter(AddressesServlet.PARAM_NAME)); - if (pn != null) { - boolean wasIgnored = pn.isMember(FilteredThreadIndex.GROUP_IGNORE); - boolean wasFavorite = pn.isMember(FilteredThreadIndex.GROUP_FAVORITE); - - pn.setIsPublic(!empty(req, AddressesServlet.PARAM_IS_PUBLIC)); - pn.setLocation(req.getParameter(AddressesServlet.PARAM_LOC)); - pn.setNetwork(req.getParameter(AddressesServlet.PARAM_NET)); - pn.setProtocol(req.getParameter(AddressesServlet.PARAM_PROTO)); - if (empty(req, AddressesServlet.PARAM_FAVORITE)) - pn.removeGroup(FilteredThreadIndex.GROUP_FAVORITE); - else - pn.addGroup(FilteredThreadIndex.GROUP_FAVORITE); - if (empty(req, AddressesServlet.PARAM_IGNORE)) - pn.removeGroup(FilteredThreadIndex.GROUP_IGNORE); - else - pn.addGroup(FilteredThreadIndex.GROUP_IGNORE); - - BlogManager.instance().saveUser(user); - - if (AddressesServlet.PROTO_ARCHIVE.equals(pn.getProtocol())) - updateSyndication(user, pn.getLocation(), !empty(req, AddressesServlet.PARAM_SYNDICATE)); - - return (wasIgnored != pn.isMember(FilteredThreadIndex.GROUP_IGNORE)) || - (wasFavorite != pn.isMember(FilteredThreadIndex.GROUP_IGNORE)); - } else { - return false; - } - } - - protected void updateSyndication(User user, String loc, boolean shouldAutomate) { - if (BlogManager.instance().authorizeRemote(user)) { - if (shouldAutomate) - BlogManager.instance().scheduleSyndication(loc); - else - BlogManager.instance().unscheduleSyndication(loc); - } - } - - protected PetName buildNewAddress(HttpServletRequest req) { - PetName pn = new PetName(); - pn.setName(req.getParameter(AddressesServlet.PARAM_NAME)); - pn.setIsPublic(!empty(req, AddressesServlet.PARAM_IS_PUBLIC)); - pn.setLocation(req.getParameter(AddressesServlet.PARAM_LOC)); - pn.setNetwork(req.getParameter(AddressesServlet.PARAM_NET)); - pn.setProtocol(req.getParameter(AddressesServlet.PARAM_PROTO)); - if (empty(req, AddressesServlet.PARAM_FAVORITE)) - pn.removeGroup(FilteredThreadIndex.GROUP_FAVORITE); - else - pn.addGroup(FilteredThreadIndex.GROUP_FAVORITE); - if (empty(req, AddressesServlet.PARAM_IGNORE)) - pn.removeGroup(FilteredThreadIndex.GROUP_IGNORE); - else - pn.addGroup(FilteredThreadIndex.GROUP_IGNORE); - return pn; - } - - protected void handleUpdateProfile(User user, HttpServletRequest req) { - if ( (user == null) || (!user.getAuthenticated()) || (user.getBlog() == null) ) - return; - - String action = req.getParameter("action"); - if ( (action == null) || !("Update profile".equals(action)) ) - return; - - String name = req.getParameter(ThreadedHTMLRenderer.PARAM_PROFILE_NAME); - String desc = req.getParameter(ThreadedHTMLRenderer.PARAM_PROFILE_DESC); - String url = req.getParameter(ThreadedHTMLRenderer.PARAM_PROFILE_URL); - String other = req.getParameter(ThreadedHTMLRenderer.PARAM_PROFILE_OTHER); - - Properties opts = new Properties(); - if (!empty(name)) - opts.setProperty(BlogInfo.NAME, name.trim()); - if (!empty(desc)) - opts.setProperty(BlogInfo.DESCRIPTION, desc.trim()); - if (!empty(url)) - opts.setProperty(BlogInfo.CONTACT_URL, url.trim()); - if (!empty(other)) { - StringBuffer key = new StringBuffer(); - StringBuffer val = null; - for (int i = 0; i < other.length(); i++) { - char c = other.charAt(i); - if ( (c == ':') || (c == '=') ) { - if (val != null) { - val.append(c); - } else { - val = new StringBuffer(); - } - } else if ( (c == '\n') || (c == '\r') ) { - String k = key.toString().trim(); - String v = (val != null ? val.toString().trim() : ""); - if ( (k.length() > 0) && (v.length() > 0) ) { - opts.setProperty(k, v); - } - key.setLength(0); - val = null; - } else if (val != null) { - val.append(c); - } else { - key.append(c); - } - } - // now finish the last of it - String k = key.toString().trim(); - String v = (val != null ? val.toString().trim() : ""); - if ( (k.length() > 0) && (v.length() > 0) ) { - opts.setProperty(k, v); - } - } - - String pass0 = req.getParameter("password"); - String pass1 = req.getParameter("passwordConfirm"); - String oldPass = req.getParameter("oldPassword"); - - if ( (pass0 != null) && (pass1 != null) && (pass0.equals(pass1)) ) { - BlogManager.instance().changePasswrd(user, oldPass, pass0, pass1); - } - - if (user.getAuthenticated() && !BlogManager.instance().authorizeRemote(user)) { - String adminPass = req.getParameter("adminPass"); - if (adminPass != null) { - boolean authorized = BlogManager.instance().authorizeRemote(adminPass); - if (authorized) { - user.setAllowAccessRemote(authorized); - BlogManager.instance().saveUser(user); - } - } - } - - boolean updated = BlogManager.instance().updateMetadata(user, user.getBlog(), opts); - } - - private User handleRegister(User user, HttpServletRequest req) { - String l = req.getParameter("login"); - String p = req.getParameter("password"); - String name = req.getParameter("accountName"); - String desc = req.getParameter("description"); - String contactURL = req.getParameter("url"); - String regPass = req.getParameter("registrationPass"); - String action = req.getParameter("action"); - - if ( (action != null) && ("Register".equals(action)) && !empty(l) ) { - return BlogManager.instance().register(l, p, regPass, name, desc, contactURL); - } else { - return user; - } - } - - private void handleAdmin(User user, HttpServletRequest req) throws IOException { - if (BlogManager.instance().authorizeRemote(user)) { - String action = req.getParameter("action"); - if ( (action != null) && ("Save config".equals(action)) ) { - boolean wantSingle = !empty(req, "singleuser"); - String defaultUser = req.getParameter("defaultUser"); - String defaultPass = req.getParameter("defaultPass"); - String regPass = req.getParameter("regpass"); - String remotePass = req.getParameter("remotepass"); - String proxyHost = req.getParameter("proxyhost"); - String proxyPort = req.getParameter("proxyport"); - - // default user cannot be empty, but the rest can be blank - if ( (!empty(defaultUser)) && (defaultPass != null) && (regPass != null) && (remotePass != null) && - (proxyHost != null) && (proxyPort != null) ) { - int port = 4444; - try { port = Integer.parseInt(proxyPort); } catch (NumberFormatException nfe) {} - BlogManager.instance().configure(regPass, remotePass, null, null, proxyHost, port, wantSingle, - null, defaultUser, defaultPass); - } - } - } - } - - - protected void render(User user, HttpServletRequest req, PrintWriter out, ThreadIndex index) throws ServletException, IOException { - Archive archive = BlogManager.instance().getArchive(); - int numThreads = 10; - int threadOffset = getOffset(req); - if (threadOffset == -1) { - threadOffset = index.getRootCount() - numThreads; - } - if (threadOffset < 0) { - threadOffset = 0; - } - - BlogURI visibleEntry = getVisible(req); - - int offset = 0; - if ( empty(req, ThreadedHTMLRenderer.PARAM_OFFSET) && (visibleEntry != null) ) { - // we're on a permalink, so jump the tree to the given thread - threadOffset = index.getRoot(visibleEntry); - if (threadOffset < 0) - threadOffset = 0; - } - - renderBegin(user, req, out, index); - renderNavBar(user, req, out); - renderControlBar(user, req, out, index); - renderServletDetails(user, req, out, index, threadOffset, visibleEntry, archive); - renderEnd(user, req, out, index); - } - - protected void renderBegin(User user, HttpServletRequest req, PrintWriter out, ThreadIndex index) throws IOException { - out.write("\n"); - out.write("\n"); - out.write("\n\n" + getTitle() + "\n"); - out.write("\n"); - out.write("\n"); - out.write("\n"); - out.write(BEGIN_HTML); - } - - protected void renderNavBar(User user, HttpServletRequest req, PrintWriter out) throws IOException { - out.write("
      \n"); - out.write("\n"); - out.write("\n"); - out.write("Threads Blogs "); - if (user.getAuthenticated() && (user.getBlog() != null) ) { - out.write("Logged in as "); - out.write(user.getUsername()); - out.write("\n"); - out.write("(switch)\n"); - out.write("Post\n"); - out.write("Addressbook\n"); - } else { - out.write("Log in\n"); - } - out.write("\n"); - out.write("About "); - if (BlogManager.instance().authorizeRemote(user)) { - out.write("Syndicate\n"); - out.write("Import RSS/Atom\n"); - out.write("Admin\n"); - } - out.write("\n
      \n"); - } - - /* - - protected void renderNavBar(User user, HttpServletRequest req, PrintWriter out, ThreadIndex index) throws IOException { - //out.write("\n"); - out.write("\n"); - out.write("\n"); - out.write("Threads Blogs "); - if (user.getAuthenticated() && (user.getBlog() != null) ) { - out.write("Logged in as "); - out.write(user.getUsername()); - out.write("\n"); - out.write("(switch)\n"); - out.write("Post\n"); - out.write("Addressbook\n"); - } else { - out.write("\n"); - writeAuthActionFields(out); - out.write("Login: \n"); - out.write("Password: \n"); - out.write("\n"); - } - //out.write("\n"); - out.write("\n"); - out.write("About "); - if (BlogManager.instance().authorizeRemote(user)) { - out.write("Syndicate\n"); - out.write("Import RSS/Atom\n"); - out.write("Admin\n"); - } - out.write("\n\n"); - } - */ - - protected String getSyndicateLink(User user, String location) { - if (location != null) - return "syndicate.jsp?" + SyndicateServlet.PARAM_LOCATION + "=" + location; - return "syndicate.jsp"; - } - - protected static final ArrayList SKIP_TAGS = new ArrayList(); - static { - SKIP_TAGS.add("action"); - SKIP_TAGS.add("filter"); - // post and visible are skipped since we aren't good at filtering by tag when the offset will - // skip around randomly. at least, not yet. - SKIP_TAGS.add(ThreadedHTMLRenderer.PARAM_VISIBLE); - //SKIP_TAGS.add("post"); - //SKIP_TAGS.add("thread"); - SKIP_TAGS.add(ThreadedHTMLRenderer.PARAM_OFFSET); // if we are adjusting the filter, ignore the previous offset - SKIP_TAGS.add(ThreadedHTMLRenderer.PARAM_DAYS_BACK); - SKIP_TAGS.add("addLocation"); - SKIP_TAGS.add("addGroup"); - SKIP_TAGS.add("login"); - SKIP_TAGS.add("password"); - } - - private static final String CONTROL_TARGET = "threads.jsp"; - protected String getControlTarget() { return CONTROL_TARGET; } - protected String getPostURI() { return "post.jsp"; } - - protected void renderControlBar(User user, HttpServletRequest req, PrintWriter out, ThreadIndex index) throws IOException { - out.write("
      \n"); - String tags = ""; - String author = ""; - Enumeration params = req.getParameterNames(); - while (params.hasMoreElements()) { - String param = (String)params.nextElement(); - String val = req.getParameter(param); - if (ThreadedHTMLRenderer.PARAM_TAGS.equals(param)) { - tags = val; - } else if (ThreadedHTMLRenderer.PARAM_AUTHOR.equals(param)) { - author = val; - } else if (SKIP_TAGS.contains(param)) { - // skip - } else if (param.length() <= 0) { - // skip - } else { - out.write("\n"); - } - } - out.write("\n"); - out.write("\n"); - out.write("Filter: \n"); - - out.write("Tags: "); - writeTagField(user, tags, out); - - String days = req.getParameter(ThreadedHTMLRenderer.PARAM_DAYS_BACK); - if (days == null) - days = ""; - out.write("Age: days\n"); - - out.write("\n"); - out.write(""); - - if ( (req.getParameter(ThreadedHTMLRenderer.PARAM_VIEW_POST) != null) || - (req.getParameter(ThreadedHTMLRenderer.PARAM_VIEW_THREAD) != null) ) - out.write("Threads"); - out.write("\n"); - out.write("\n"); - out.write("\n"); - out.write("
      \n"); - } - - protected void writeTagField(User user, String selectedTags, PrintWriter out) throws IOException { - writeTagField(user, selectedTags, out, "Threads are filtered to include only ones with posts containing these tags", "Any tags - no filtering", true); - } - public static void writeTagField(User user, String selectedTags, Writer out, String title, String blankTitle, boolean includeFavoritesTag) throws IOException { - Set favoriteTags = new TreeSet(user.getFavoriteTags()); - if (favoriteTags.size() <= 0) { - out.write("\n"); - } else { - out.write("\n"); - } - } - - protected abstract void renderServletDetails(User user, HttpServletRequest req, PrintWriter out, - ThreadIndex index, int threadOffset, BlogURI visibleEntry, - Archive archive) throws IOException; - - protected static final int getOffset(HttpServletRequest req) { - String off = req.getParameter(ThreadedHTMLRenderer.PARAM_OFFSET); - try { - return Integer.parseInt(off); - } catch (NumberFormatException nfe) { - return 0; - } - } - protected static final BlogURI getVisible(HttpServletRequest req) { - return getAsBlogURI(req.getParameter(ThreadedHTMLRenderer.PARAM_VISIBLE)); - } - protected static final BlogURI getAsBlogURI(String uri) { - if (uri != null) { - int split = uri.indexOf('/'); - if ( (split <= 0) || (split + 1 >= uri.length()) ) - return null; - String blog = uri.substring(0, split); - String id = uri.substring(split+1); - try { - Hash hash = new Hash(); - hash.fromBase64(blog); - long msgId = Long.parseLong(id); - if (msgId > 0) - return new BlogURI(hash, msgId); - } catch (DataFormatException dfe) { - return null; - } catch (NumberFormatException nfe) { - return null; - } - } - return null; - } - - - protected String trim(String orig, int maxLen) { - if ( (orig == null) || (orig.length() <= maxLen) ) - return orig; - return orig.substring(0, maxLen) + "..."; - } - - protected static final boolean empty(HttpServletRequest req, String param) { - String val = req.getParameter(param); - return (val == null) || (val.trim().length() <= 0); - } - - protected static final boolean empty(String val) { - return (val == null) || (val.trim().length() <= 0); - } - - protected String getExpandLink(HttpServletRequest req, ThreadNode node) { - return getExpandLink(node, req.getRequestURI(), req.getParameter(ThreadedHTMLRenderer.PARAM_VIEW_POST), - req.getParameter(ThreadedHTMLRenderer.PARAM_VIEW_THREAD), - req.getParameter(ThreadedHTMLRenderer.PARAM_OFFSET), - req.getParameter(ThreadedHTMLRenderer.PARAM_TAGS), - req.getParameter(ThreadedHTMLRenderer.PARAM_AUTHOR)); - } - protected static String getExpandLink(ThreadNode node, String uri, String viewPost, String viewThread, - String offset, String tags, String author) { - StringBuffer buf = new StringBuffer(64); - buf.append(uri); - buf.append('?'); - // expand node == let one of node's children be visible - if (node.getChildCount() > 0) { - ThreadNode child = node.getChild(0); - buf.append(ThreadedHTMLRenderer.PARAM_VISIBLE).append('='); - buf.append(child.getEntry().getKeyHash().toBase64()).append('/'); - buf.append(child.getEntry().getEntryId()).append("&"); - } - - if (!empty(viewPost)) - buf.append(ThreadedHTMLRenderer.PARAM_VIEW_POST).append('=').append(viewPost).append("&"); - else if (!empty(viewThread)) - buf.append(ThreadedHTMLRenderer.PARAM_VIEW_THREAD).append('=').append(viewThread).append("&"); - - if (!empty(offset)) - buf.append(ThreadedHTMLRenderer.PARAM_OFFSET).append('=').append(offset).append("&"); - - if (!empty(tags)) - buf.append(ThreadedHTMLRenderer.PARAM_TAGS).append('=').append(tags).append("&"); - - if (!empty(author)) - buf.append(ThreadedHTMLRenderer.PARAM_AUTHOR).append('=').append(author).append("&"); - - return buf.toString(); - } - protected String getCollapseLink(HttpServletRequest req, ThreadNode node) { - return getCollapseLink(node, req.getRequestURI(), - req.getParameter(ThreadedHTMLRenderer.PARAM_VIEW_POST), - req.getParameter(ThreadedHTMLRenderer.PARAM_VIEW_THREAD), - req.getParameter(ThreadedHTMLRenderer.PARAM_OFFSET), - req.getParameter(ThreadedHTMLRenderer.PARAM_TAGS), - req.getParameter(ThreadedHTMLRenderer.PARAM_AUTHOR)); - } - - protected String getCollapseLink(ThreadNode node, String uri, String viewPost, String viewThread, - String offset, String tags, String author) { - StringBuffer buf = new StringBuffer(64); - buf.append(uri); - // collapse node == let the node be visible - buf.append('?').append(ThreadedHTMLRenderer.PARAM_VISIBLE).append('='); - buf.append(node.getEntry().getKeyHash().toBase64()).append('/'); - buf.append(node.getEntry().getEntryId()).append("&"); - - if (!empty(viewPost)) - buf.append(ThreadedHTMLRenderer.PARAM_VIEW_POST).append('=').append(viewPost).append("&"); - else if (!empty(viewThread)) - buf.append(ThreadedHTMLRenderer.PARAM_VIEW_THREAD).append('=').append(viewThread).append("&"); - - if (!empty(offset)) - buf.append(ThreadedHTMLRenderer.PARAM_OFFSET).append('=').append(offset).append("&"); - - if (!empty(tags)) - buf.append(ThreadedHTMLRenderer.PARAM_TAGS).append('=').append(tags).append("&"); - - if (!empty(author)) - buf.append(ThreadedHTMLRenderer.PARAM_AUTHOR).append('=').append(author).append("&"); - - return buf.toString(); - } - protected String getProfileLink(HttpServletRequest req, Hash author) { - return getProfileLink(author); - } - protected String getProfileLink(Hash author) { return ThreadedHTMLRenderer.buildProfileURL(author); } - - protected String getAddToGroupLink(HttpServletRequest req, Hash author, User user, String group) { - return getAddToGroupLink(user, author, group, req.getRequestURI(), - req.getParameter(ThreadedHTMLRenderer.PARAM_VISIBLE), - req.getParameter(ThreadedHTMLRenderer.PARAM_VIEW_POST), - req.getParameter(ThreadedHTMLRenderer.PARAM_VIEW_THREAD), - req.getParameter(ThreadedHTMLRenderer.PARAM_OFFSET), - req.getParameter(ThreadedHTMLRenderer.PARAM_TAGS), - req.getParameter(ThreadedHTMLRenderer.PARAM_AUTHOR)); - } - protected String getAddToGroupLink(User user, Hash author, String group, String uri, String visible, - String viewPost, String viewThread, String offset, String tags, String filteredAuthor) { - StringBuffer buf = new StringBuffer(64); - buf.append(uri); - buf.append('?'); - if (!empty(visible)) - buf.append(ThreadedHTMLRenderer.PARAM_VISIBLE).append('=').append(visible).append("&"); - buf.append(ThreadedHTMLRenderer.PARAM_ADD_TO_GROUP_LOCATION).append('=').append(author.toBase64()).append("&"); - buf.append(ThreadedHTMLRenderer.PARAM_ADD_TO_GROUP_NAME).append('=').append(group).append("&"); - - if (!empty(viewPost)) - buf.append(ThreadedHTMLRenderer.PARAM_VIEW_POST).append('=').append(viewPost).append("&"); - else if (!empty(viewThread)) - buf.append(ThreadedHTMLRenderer.PARAM_VIEW_THREAD).append('=').append(viewThread).append("&"); - - if (!empty(offset)) - buf.append(ThreadedHTMLRenderer.PARAM_OFFSET).append('=').append(offset).append("&"); - - if (!empty(tags)) - buf.append(ThreadedHTMLRenderer.PARAM_TAGS).append('=').append(tags).append("&"); - - if (!empty(filteredAuthor)) - buf.append(ThreadedHTMLRenderer.PARAM_AUTHOR).append('=').append(filteredAuthor).append("&"); - - addAuthActionParams(buf); - return buf.toString(); - } - protected String getRemoveFromGroupLink(User user, String name, String group, String uri, String visible, - String viewPost, String viewThread, String offset, String tags, String filteredAuthor) { - StringBuffer buf = new StringBuffer(64); - buf.append(uri); - buf.append('?'); - if (!empty(visible)) - buf.append(ThreadedHTMLRenderer.PARAM_VISIBLE).append('=').append(visible).append("&"); - buf.append(ThreadedHTMLRenderer.PARAM_REMOVE_FROM_GROUP_NAME).append('=').append(name).append("&"); - buf.append(ThreadedHTMLRenderer.PARAM_REMOVE_FROM_GROUP).append('=').append(group).append("&"); - - if (!empty(viewPost)) - buf.append(ThreadedHTMLRenderer.PARAM_VIEW_POST).append('=').append(viewPost).append("&"); - else if (!empty(viewThread)) - buf.append(ThreadedHTMLRenderer.PARAM_VIEW_THREAD).append('=').append(viewThread).append("&"); - - if (!empty(offset)) - buf.append(ThreadedHTMLRenderer.PARAM_OFFSET).append('=').append(offset).append("&"); - - if (!empty(tags)) - buf.append(ThreadedHTMLRenderer.PARAM_TAGS).append('=').append(tags).append("&"); - - if (!empty(filteredAuthor)) - buf.append(ThreadedHTMLRenderer.PARAM_AUTHOR).append('=').append(filteredAuthor).append("&"); - - addAuthActionParams(buf); - return buf.toString(); - } - protected String getViewPostLink(HttpServletRequest req, ThreadNode node, User user, boolean isPermalink) { - return ThreadedHTMLRenderer.getViewPostLink(req.getRequestURI(), node, user, isPermalink, - req.getParameter(ThreadedHTMLRenderer.PARAM_OFFSET), - req.getParameter(ThreadedHTMLRenderer.PARAM_TAGS), - req.getParameter(ThreadedHTMLRenderer.PARAM_AUTHOR), - Boolean.valueOf(req.getParameter(ThreadedHTMLRenderer.PARAM_THREAD_AUTHOR)).booleanValue()); - } - protected String getViewPostLink(HttpServletRequest req, BlogURI post, User user) { - return ThreadedHTMLRenderer.getViewPostLink(req.getRequestURI(), post, user, false, - req.getParameter(ThreadedHTMLRenderer.PARAM_OFFSET), - req.getParameter(ThreadedHTMLRenderer.PARAM_TAGS), - req.getParameter(ThreadedHTMLRenderer.PARAM_AUTHOR), - Boolean.valueOf(req.getParameter(ThreadedHTMLRenderer.PARAM_THREAD_AUTHOR)).booleanValue()); - } - protected String getViewThreadLink(HttpServletRequest req, ThreadNode node, User user) { - return getViewThreadLink(req.getRequestURI(), node, user, - req.getParameter(ThreadedHTMLRenderer.PARAM_OFFSET), - req.getParameter(ThreadedHTMLRenderer.PARAM_TAGS), - req.getParameter(ThreadedHTMLRenderer.PARAM_AUTHOR), - Boolean.valueOf(req.getParameter(ThreadedHTMLRenderer.PARAM_THREAD_AUTHOR)).booleanValue()); - } - protected static String getViewThreadLink(String uri, ThreadNode node, User user, String offset, - String tags, String author, boolean authorOnly) { - StringBuffer buf = new StringBuffer(64); - buf.append(uri); - BlogURI expandTo = node.getEntry(); - if (node.getChildCount() > 0) { - if (true) { - // lets expand to the leaf - expandTo = new BlogURI(node.getMostRecentPostAuthor(), node.getMostRecentPostDate()); - } else { - // only expand one level - expandTo = node.getChild(0).getEntry(); - } - } - buf.append('?').append(ThreadedHTMLRenderer.PARAM_VISIBLE).append('='); - buf.append(expandTo.getKeyHash().toBase64()).append('/'); - buf.append(expandTo.getEntryId()).append("&"); - - buf.append(ThreadedHTMLRenderer.PARAM_VIEW_THREAD).append('='); - buf.append(node.getEntry().getKeyHash().toBase64()).append('/'); - buf.append(node.getEntry().getEntryId()).append("&"); - - if (!empty(offset)) - buf.append(ThreadedHTMLRenderer.PARAM_OFFSET).append('=').append(offset).append("&"); - - if (!empty(tags)) - buf.append(ThreadedHTMLRenderer.PARAM_TAGS).append('=').append(tags).append("&"); - - if (!empty(author)) { - buf.append(ThreadedHTMLRenderer.PARAM_AUTHOR).append('=').append(author).append("&"); - if (authorOnly) - buf.append(ThreadedHTMLRenderer.PARAM_THREAD_AUTHOR).append("=true&"); - } - buf.append("#").append(node.getEntry().toString()); - return buf.toString(); - } - protected String getFilterByTagLink(HttpServletRequest req, ThreadNode node, User user, String tag, String author) { - return ThreadedHTMLRenderer.getFilterByTagLink(req.getRequestURI(), node, user, tag, author); - } - protected String getNavLink(HttpServletRequest req, int offset) { - return ThreadedHTMLRenderer.getNavLink(req.getRequestURI(), - req.getParameter(ThreadedHTMLRenderer.PARAM_VIEW_POST), - req.getParameter(ThreadedHTMLRenderer.PARAM_VIEW_THREAD), - req.getParameter(ThreadedHTMLRenderer.PARAM_TAGS), - req.getParameter(ThreadedHTMLRenderer.PARAM_AUTHOR), - Boolean.valueOf(req.getParameter(ThreadedHTMLRenderer.PARAM_THREAD_AUTHOR)).booleanValue(), - offset); - } - - protected void renderEnd(User user, HttpServletRequest req, PrintWriter out, ThreadIndex index) throws IOException { - out.write(END_HTML); - } - - protected Collection getFilteredTags(HttpServletRequest req) { - String tags = req.getParameter(ThreadedHTMLRenderer.PARAM_TAGS); - if (tags != null) { - StringTokenizer tok = new StringTokenizer(tags, "\n\t "); - ArrayList rv = new ArrayList(); - while (tok.hasMoreTokens()) { - String tag = tok.nextToken().trim(); - if (tag.length() > 0) - rv.add(tag); - } - return rv; - } else { - return Collections.EMPTY_LIST; - } - } - - protected Collection getFilteredAuthors(HttpServletRequest req) { - List rv = new ArrayList(); - rv.addAll(getAuthors(req.getParameter(ThreadedHTMLRenderer.PARAM_AUTHOR))); - //rv.addAll(getAuthors(req.getParameter(ThreadedHTMLRenderer.PARAM_THREAD_AUTHOR))); - return rv; - } - - private Collection getAuthors(String authors) { - if (authors != null) { - StringTokenizer tok = new StringTokenizer(authors, "\n\t "); - ArrayList rv = new ArrayList(); - while (tok.hasMoreTokens()) { - try { - Hash h = new Hash(); - h.fromBase64(tok.nextToken().trim()); - rv.add(h); - } catch (DataFormatException dfe) {} - } - return rv; - } else { - return Collections.EMPTY_LIST; - } - } - - private static final String BEGIN_HTML = "\n" + -"\n" + -"\n" + -"Jump to the beginning of the first post rendered, if any\n" + -"Jump to the thread navigation\n\n" + -"\n"; - private static final String STYLE_HTML = "* {\n" + -" margin: 0;\n" + -" padding: 0;\n" + -"}\n" + -"body {\n" + -" font-family: Arial, Helvetica, sans-serif;\n" + -" font-size: 100%;\n" + -" background-color : #EEEEEE;\n" + -" color: #000000;\n" + -"}\n" + -"select {\n" + -" min-width: 1.5em;\n" + -"}\n" + -".overallTable {\n" + -" border-spacing: 0px;\n" + -" border-collapse:collapse;\n" + -" float:left;\n" + -"}\n" + -".topNav {\n" + -" background-color: #BBBBBB;\n" + -"}\n" + -".topNav_user {\n" + -" text-align: left;\n" + -" float: left;\n" + -" display: inline;\n" + -"}\n" + -".topNav_admin {\n" + -" text-align: right;\n" + -" float: right;\n" + -" margin: 0 5px 0 0;\n" + -" display: inline;\n" + -"}\n" + -".controlBar {\n" + -" background-color: #BBBBBB;\n" + -"}\n" + -".controlBarRight {\n" + -" text-align: right;\n" + -"}\n" + -".threadEven {\n" + -" background-color: #FFFFFF;\n" + -" white-space: nowrap;\n" + -"}\n" + -".threadOdd {\n" + -" background-color: #EEEEEE;\n" + -" white-space: nowrap;\n" + -"}\n" + -".threadLeft {\n" + -" text-align: left;\n" + -" align: left;\n" + -"}\n" + -".threadNav {\n" + -" background-color: #BBBBBB;\n" + -"}\n" + -".threadNavRight {\n" + -" text-align: right;\n" + -" float: right;\n" + -" background-color: #BBBBBB;\n" + -"}\n" + -".rightOffset {\n" + -" float: right;\n" + -" margin: 0 5px 0 0;\n" + -" display: inline;\n" + -"}\n" + -".threadInfoLeft {\n" + -" float: left;\n" + -" margin: 5px 0px 0 0;\n" + -" display: inline;\n" + -"}\n" + -".threadInfoRight {\n" + -" float: right;\n" + -" margin: 0 5px 0 0;\n" + -" display: inline;\n" + -"}\n" + -".postMeta {\n" + -" background-color: #BBBBFF;\n" + -"}\n" + -".postMetaSubject {\n" + -" text-align: left;\n" + -"}\n" + -".postMetaLink {\n" + -" text-align: right;\n" + -"}\n" + -".postDetails {\n" + -" background-color: #DDDDFF;\n" + -"}\n" + -".postReply {\n" + -" background-color: #BBBBFF;\n" + -"}\n" + -".postReplyText {\n" + -" background-color: #BBBBFF;\n" + -"}\n" + -".postReplyOptions {\n" + -" background-color: #BBBBFF;\n" + -"}\n" + -".syndieBlogTopNav {\n" + -" float:left;\n" + -" width: 100%;\n" + -" background-color: #BBBBBB;\n" + -"}\n" + -".syndieBlogTopNavUser {\n" + -" text-align: left;\n" + -" float: left;\n" + -"}\n" + -".syndieBlogTopNavAdmin {\n" + -" text-align: left;\n" + -" float: right;\n" + -"}\n" + -".syndieBlogFavorites {\n" + -" float: left;\n" + -" margin: 5px 0px 0 0;\n" + -" display: inline;\n" + -"}\n" + -".syndieBlogList {\n" + -" float: right;\n" + -" margin: 5px 0px 0 0;\n" + -" display: inline;\n" + -"}\n"; - - private static final String END_HTML = "
      \n" + -"\n"; - - protected String getTitle() { return "Syndie"; } - - protected static class TreeRenderState { - private int _rowsWritten; - private int _rowsSkipped; - private List _ignored; - public TreeRenderState(List ignored) { - _rowsWritten = 0; - _rowsSkipped = 0; - _ignored = ignored; - } - public int getRowsWritten() { return _rowsWritten; } - public void incrementRowsWritten() { _rowsWritten++; } - public int getRowsSkipped() { return _rowsSkipped; } - public void incrementRowsSkipped() { _rowsSkipped++; } - public List getIgnoredAuthors() { return _ignored; } - } -} diff --git a/apps/syndie/java/src/net/i2p/syndie/web/BlogConfigBean.java b/apps/syndie/java/src/net/i2p/syndie/web/BlogConfigBean.java deleted file mode 100644 index 1652a525a..000000000 --- a/apps/syndie/java/src/net/i2p/syndie/web/BlogConfigBean.java +++ /dev/null @@ -1,308 +0,0 @@ -package net.i2p.syndie.web; - -import java.io.ByteArrayInputStream; -import java.io.File; -import java.io.FileInputStream; -import java.io.FileOutputStream; -import java.io.IOException; -import java.io.InputStream; -import java.util.ArrayList; -import java.util.Iterator; -import java.util.List; -import java.util.Properties; - -import net.i2p.I2PAppContext; -import net.i2p.client.naming.PetName; -import net.i2p.data.DataHelper; -import net.i2p.syndie.Archive; -import net.i2p.syndie.BlogManager; -import net.i2p.syndie.User; -import net.i2p.syndie.data.BlogInfo; -import net.i2p.syndie.data.BlogInfoData; -import net.i2p.syndie.data.BlogURI; -import net.i2p.syndie.data.EntryContainer; -import net.i2p.util.Log; - -/** - * - */ -public class BlogConfigBean { - private I2PAppContext _context; - private Log _log; - private User _user; - private String _title; - private String _description; - private String _contactInfo; - /** list of list of PetNames */ - private List _groups; - private Properties _styleOverrides; - private File _logo; - private boolean _loaded; - private boolean _updated; - - public BlogConfigBean() { - _context = I2PAppContext.getGlobalContext(); - _log = _context.logManager().getLog(BlogConfigBean.class); - _groups = new ArrayList(); - _styleOverrides = new Properties(); - } - - public boolean isUpdated() { return _updated; } - - public User getUser() { return _user; } - public void setUser(User user) { - _user = user; - _title = null; - _description = null; - _contactInfo = null; - _groups.clear(); - _styleOverrides.clear(); - if (_logo != null) - _logo.delete(); - _logo = null; - _loaded = false; - _updated = false; - load(); - } - public String getTitle() { return _title; } - public void setTitle(String title) { - _title = title; - _updated = true; - } - public String getDescription() { return _description; } - public void setDescription(String desc) { - _description = desc; - _updated = true; - } - public String getContactInfo() { return _contactInfo; } - public void setContactInfo(String info) { - _contactInfo = info; - _updated = true; - } - public int getGroupCount() { return _groups.size(); } - /** gets the actual modifiable list of PetName instances */ - public List getGroup(int i) { return (List)_groups.get(i); } - /** gets the actual modifiable list of PetName instances */ - public List getGroup(String name) { - for (int i = 0; i < _groups.size(); i++) { - List grp = (List)_groups.get(i); - if (grp.size() > 0) { - PetName pn = (PetName)grp.get(0); - if ( (pn.getGroupCount() == 0) && ( (name == null) || (name.length() <= 0) ) ) - return grp; - if (pn.getGroupCount() == 0) - continue; - String curGroup = pn.getGroup(0); - if (curGroup.equals(name)) - return grp; - } - } - return null; - } - /** adds the given element to the appropriate group (creating a new one if necessary) */ - public void add(PetName pn) { - String groupName = null; - if (pn.getGroupCount() > 0) - groupName = pn.getGroup(0); - List group = getGroup(groupName); - if (group == null) { - group = new ArrayList(4); - group.add(pn); - _groups.add(group); - } else { - group.add(pn); - } - _updated = true; - } - public void remove(PetName pn) { - String groupName = null; - if (pn.getGroupCount() > 0) - groupName = pn.getGroup(0); - List group = getGroup(groupName); - if (group != null) { - group.remove(pn); - if (group.size() <= 0) - _groups.remove(group); - } - _updated = true; - } - public void remove(String name) { - for (int i = 0; i < getGroupCount(); i++) { - List group = getGroup(i); - for (int j = 0; j < group.size(); j++) { - PetName pn = (PetName)group.get(j); - if (pn.getName().equals(name)) { - group.remove(j); - if (group.size() <= 0) - _groups.remove(group); - _updated = true; - return; - } - } - } - } - /** take note that the groups have been updated in some way (reordered, etc) */ - public void groupsUpdated() { _updated = true; } - public String getStyleOverride(String prop) { return _styleOverrides.getProperty(prop); } - public void setStyleOverride(String prop, String val) { - _styleOverrides.setProperty(prop, val); - _updated = true; - } - public void unsetStyleOverride(String prop) { - _styleOverrides.remove(prop); - _updated = true; - } - public File getLogo() { return _logo; } - public void setLogo(File logo) { - if ( (logo != null) && (logo.length() > BlogInfoData.MAX_LOGO_SIZE) ) { - _log.error("Refusing a logo of size " + logo.length()); - logo.delete(); - return; - } - if (_logo != null) - _logo.delete(); - _logo = logo; - _updated = true; - } - public boolean hasPendingChanges() { return _loaded && _updated; } - - private void load() { - Archive archive = BlogManager.instance().getArchive(); - BlogInfo info = archive.getBlogInfo(_user.getBlog()); - if (info != null) { - _title = info.getProperty(BlogInfo.NAME); - _description = info.getProperty(BlogInfo.DESCRIPTION); - _contactInfo = info.getProperty(BlogInfo.CONTACT_URL); - String id = info.getProperty(BlogInfo.SUMMARY_ENTRY_ID); - if (id != null) { - BlogURI uri = new BlogURI(id); - EntryContainer entry = archive.getEntry(uri); - if (entry != null) { - BlogInfoData data = new BlogInfoData(); - try { - data.load(entry); - if (data.isLogoSpecified()) { - File logo = File.createTempFile("logo", ".png", BlogManager.instance().getTempDir()); - FileOutputStream os = null; - try { - os = new FileOutputStream(logo); - data.writeLogo(os); - _logo = logo; - } finally { - if (os != null) try { os.close(); } catch (IOException ioe) {} - } - } - for (int i = 0; i < data.getReferenceGroupCount(); i++) { - List group = (List)data.getReferenceGroup(i); - for (int j = 0; j < group.size(); j++) { - PetName pn = (PetName)group.get(j); - add(pn); - } - } - Properties overrides = data.getStyleOverrides(); - if (overrides != null) - _styleOverrides.putAll(overrides); - } catch (IOException ioe) { - _log.warn("Unable to load the blog info data from " + uri, ioe); - } - } - } - } - _loaded = true; - _updated = false; - } - - public boolean publishChanges() { - FileInputStream logo = null; - try { - if (_logo != null) { - logo = new FileInputStream(_logo); - _log.debug("Logo file is: " + _logo.length() + "bytes @ " + _logo.getAbsolutePath()); - } - InputStream styleStream = createStyleStream(); - InputStream groupStream = createGroupStream(); - - String tags = BlogInfoData.TAG; - String subject = "n/a"; - String headers = ""; - String sml = ""; - List filenames = new ArrayList(); - List filestreams = new ArrayList(); - List filetypes = new ArrayList(); - if (logo != null) { - filenames.add(BlogInfoData.ATTACHMENT_LOGO); - filestreams.add(logo); - filetypes.add("image/png"); - } - filenames.add(BlogInfoData.ATTACHMENT_STYLE_OVERRIDE); - filestreams.add(styleStream); - filetypes.add("text/plain"); - filenames.add(BlogInfoData.ATTACHMENT_REFERENCE_GROUPS); - filestreams.add(groupStream); - filetypes.add("text/plain"); - - BlogURI uri = BlogManager.instance().createBlogEntry(_user, subject, tags, headers, sml, - filenames, filestreams, filetypes); - if (uri != null) { - Archive archive = BlogManager.instance().getArchive(); - BlogInfo info = archive.getBlogInfo(_user.getBlog()); - if (info != null) { - String props[] = info.getProperties(); - Properties opts = new Properties(); - for (int i = 0; i < props.length; i++) { - if (!props[i].equals(BlogInfo.SUMMARY_ENTRY_ID)) - opts.setProperty(props[i], info.getProperty(props[i])); - } - opts.setProperty(BlogInfo.SUMMARY_ENTRY_ID, uri.toString()); - boolean updated = BlogManager.instance().updateMetadata(_user, _user.getBlog(), opts); - if (updated) { - // ok great, published locally, though should we push it to others? - _log.info("Blog summary updated for " + _user + " in " + uri.toString()); - setUser(_user); - _log.debug("Updated? " + _updated); - return true; - } - } else { - _log.error("Info is not known for " + _user.getBlog().toBase64()); - return false; - } - } else { - _log.error("Error creating the summary entry"); - return false; - } - } catch (IOException ioe) { - _log.error("Error publishing", ioe); - } finally { - if (logo != null) try { logo.close(); } catch (IOException ioe) {} - // the other streams are in-memory, drop with the scope - if (_logo != null) _logo.delete(); - } - return false; - } - private InputStream createStyleStream() throws IOException { - StringBuffer buf = new StringBuffer(1024); - if (_styleOverrides != null) { - for (Iterator iter = _styleOverrides.keySet().iterator(); iter.hasNext(); ) { - String key = (String)iter.next(); - String val = _styleOverrides.getProperty(key); - buf.append(key).append('=').append(val).append('\n'); - } - } - return new ByteArrayInputStream(DataHelper.getUTF8(buf)); - } - private InputStream createGroupStream() throws IOException { - StringBuffer buf = new StringBuffer(1024); - for (int i = 0; i < _groups.size(); i++) { - List group = (List)_groups.get(i); - for (int j = 0; j < group.size(); j++) { - PetName pn = (PetName)group.get(j); - buf.append(pn.toString()).append('\n'); - } - } - return new ByteArrayInputStream(DataHelper.getUTF8(buf)); - } - - protected void finalize() { - if (_logo != null) _logo.delete(); - } -} diff --git a/apps/syndie/java/src/net/i2p/syndie/web/BlogConfigServlet.java b/apps/syndie/java/src/net/i2p/syndie/web/BlogConfigServlet.java deleted file mode 100644 index c2cfc5d14..000000000 --- a/apps/syndie/java/src/net/i2p/syndie/web/BlogConfigServlet.java +++ /dev/null @@ -1,451 +0,0 @@ -package net.i2p.syndie.web; - -import java.io.File; -import java.io.FileOutputStream; -import java.io.IOException; -import java.io.InputStream; -import java.io.PrintWriter; -import java.util.ArrayList; -import java.util.Hashtable; -import java.util.Iterator; -import java.util.List; -import java.util.Set; -import java.util.TreeSet; - -import javax.servlet.http.HttpServletRequest; - -import net.i2p.client.naming.PetName; -import net.i2p.client.naming.PetNameDB; -import net.i2p.syndie.Archive; -import net.i2p.syndie.BlogManager; -import net.i2p.syndie.User; -import net.i2p.syndie.data.BlogInfoData; -import net.i2p.syndie.data.BlogURI; -import net.i2p.syndie.data.ThreadIndex; -import net.i2p.syndie.sml.HTMLRenderer; - -/** - * Display our blog config, and let us edit it through several screens - * - */ -public class BlogConfigServlet extends BaseServlet { - private static final String ATTR_CONFIG_BEAN = "__blogConfigBean"; - public static final String PARAM_CONFIG_SCREEN = "screen"; - public static final String SCREEN_REFERENCES = "references"; - public static final String SCREEN_IMAGES = "images"; - - public static BlogConfigBean getConfigBean(HttpServletRequest req, User user) { - BlogConfigBean bean = (BlogConfigBean)req.getSession().getAttribute(ATTR_CONFIG_BEAN); - if (bean == null) { - bean = new BlogConfigBean(); - bean.setUser(user); - req.getSession().setAttribute(ATTR_CONFIG_BEAN, bean); - } - return bean; - } - public static BlogConfigBean getConfigBean(HttpServletRequest req) { - return (BlogConfigBean)req.getSession().getAttribute(ATTR_CONFIG_BEAN); - } - - protected void renderServletDetails(User user, HttpServletRequest req, PrintWriter out, ThreadIndex index, - int threadOffset, BlogURI visibleEntry, Archive archive) throws IOException { - if ( (user == null) || (!user.getAuthenticated() && !BlogManager.instance().isSingleUser())) { - out.write("You must be logged in to edit your profile"); - return; - } - - BlogConfigBean bean = getConfigBean(req, user); - - String screen = req.getParameter(PARAM_CONFIG_SCREEN); - if (screen == null) - screen = SCREEN_REFERENCES; - out.write("\n"); - showConfigNav(req, out); - - if (isAuthed(req)) { - StringBuffer buf = handleOtherAuthedActions(user, req, bean); - if (buf != null) out.write(buf.toString()); - } else { - String contentType = req.getContentType(); - if (!empty(contentType) && (contentType.indexOf("boundary=") != -1)) { - StringBuffer buf = handlePost(user, req, bean); - if (buf != null) out.write(buf.toString()); - } - } - if (bean.isUpdated()) - showCommitForm(req, out); - - if (SCREEN_REFERENCES.equals(screen)) { - displayReferencesScreen(req, out, user, bean); - } else if (SCREEN_IMAGES.equals(screen)) { - displayImagesScreen(req, out, user, bean); - } else { - displayUnknownScreen(out, screen); - } - out.write("\n"); - } - private StringBuffer handlePost(User user, HttpServletRequest rawRequest, BlogConfigBean bean) throws IOException { - StringBuffer rv = new StringBuffer(64); - MultiPartRequest req = new MultiPartRequest(rawRequest); - if (authAction(req.getString(PARAM_AUTH_ACTION))) { - // read in the logo if specified - String filename = req.getFilename("newLogo"); - if ( (filename != null) && (filename.trim().length() > 0) ) { - Hashtable params = req.getParams("newLogo"); - String type = "image/png"; - for (Iterator iter = params.keySet().iterator(); iter.hasNext(); ) { - String cur = (String)iter.next(); - if ("content-type".equalsIgnoreCase(cur)) { - type = (String)params.get(cur); - break; - } - } - InputStream logoSrc = req.getInputStream("newLogo"); - - File tmpLogo = File.createTempFile("blogLogo", ".png", BlogManager.instance().getTempDir()); - FileOutputStream out = null; - try { - out = new FileOutputStream(tmpLogo); - byte buf[] = new byte[4096]; - int read = 0; - while ( (read = logoSrc.read(buf)) != -1) - out.write(buf, 0, read); - } finally { - if (out != null) try { out.close(); } catch (IOException ioe) {} - } - - long len = tmpLogo.length(); - if (len > BlogInfoData.MAX_LOGO_SIZE) { - tmpLogo.delete(); - rv.append("Proposed logo is too large (" + len + ", max of " + BlogInfoData.MAX_LOGO_SIZE + ")
      \n"); - } else { - bean.setLogo(tmpLogo); - rv.append("Logo updated
      "); - } - } else { - // logo not specified - } - } else { - // noop - } - return rv; - } - - private void showCommitForm(HttpServletRequest req, PrintWriter out) throws IOException { - out.write("
      \n"); - writeAuthActionFields(out); - out.write("Note: Uncommitted changes outstanding \n\n"); - } - - private void showConfigNav(HttpServletRequest req, PrintWriter out) throws IOException { - out.write("References " - + "Images
      \n"); - } - - private String getScreenURL(HttpServletRequest req, String screen, boolean wantAuth) { - StringBuffer buf = new StringBuffer(128); - buf.append(req.getRequestURI()).append("?").append(PARAM_CONFIG_SCREEN).append("="); - buf.append(screen).append("&"); - if (wantAuth) - buf.append(PARAM_AUTH_ACTION).append('=').append(_authNonce).append("&"); - return buf.toString(); - } - - private void displayUnknownScreen(PrintWriter out, String screen) throws IOException { - out.write("

      The screen " + HTMLRenderer.sanitizeString(screen) + " has not yet been implemented"); - } - private void displayReferencesScreen(HttpServletRequest req, PrintWriter out, User user, BlogConfigBean bean) throws IOException { - out.write("
      \n"); - writeAuthActionFields(out); - out.write("
        \n"); - boolean defaultFound = false; - for (int i = 0; i < bean.getGroupCount(); i++) { - List group = bean.getGroup(i); - String groupName = null; - PetName pn = (PetName)group.get(0); - if (pn.getGroupCount() <= 0) { - groupName = ViewBlogServlet.DEFAULT_GROUP_NAME; - defaultFound = true; - } else { - groupName = pn.getGroup(0); - } - out.write("
      1. Group: " + HTMLRenderer.sanitizeString(groupName) + "\n"); - if (i > 0) - out.write(" ^"); - if (i + 1 < bean.getGroupCount()) - out.write(" v"); - out.write(" X"); - - out.write("
          \n"); - for (int j = 0; j < group.size(); j++) { - out.write("
        1. " + ViewBlogServlet.renderLink(user.getBlog(), (PetName)group.get(j))); - if (j > 0) - out.write(" ^"); - if (j + 1 < group.size()) - out.write(" v"); - out.write(" X"); - out.write("
        2. \n"); - } - out.write("
        \n"); - out.write("
      2. \n"); - } - out.write("
      \n"); - - - out.write("Add a new element:
      Group: or
      " + - "Type:
      \n" + - "Name:
      " + - "Location: \n" + - "
      • Blogs should be specified as $base64Key
      • \n" + - "
      • Blog posts should be specified as $base64Key/$postEntryId
      • \n" + - "
      • Blog post attachments should be specified as $base64Key/$postEntryId/$attachmentNum
      • \n" + - "

      \n"); - - out.write("\n"); - out.write("
      \n"); - } - - private void writePetnameDropdown(PrintWriter out, PetNameDB db) throws IOException { - Set names = db.getNames(); - TreeSet ordered = new TreeSet(names); - for (Iterator iter = ordered.iterator(); iter.hasNext(); ) { - String name = (String)iter.next(); - PetName pn = db.getByName(name); - String proto = pn.getProtocol(); - if ("syndietag".equals(proto)) - continue; - out.write("\n"); - } - } - - private void displayImagesScreen(HttpServletRequest req, PrintWriter out, User user, BlogConfigBean bean) throws IOException { - out.write("
      \n"); - writeAuthActionFields(out); - - File logo = bean.getLogo(); - if (logo != null) - out.write("Blog logo: \"Your
      \n"); - out.write("New logo:
      \n"); - out.write("\n"); - out.write("
      \n"); - } - - protected StringBuffer handleOtherAuthedActions(User user, HttpServletRequest req, BlogConfigBean bean) { - StringBuffer buf = new StringBuffer(); - req.setAttribute(getClass().getName() + ".output", buf); - String action = req.getParameter("action"); - if ("Publish blog configuration".equals(action)) { - if (bean.publishChanges()) { - buf.append("Changes published
      \n"); - } else { - buf.append("Changes could not be published (please check the log)
      \n"); - } - } else { - if ("Save changes".equals(action)) { - String newGroup = req.getParameter("new.group"); - if ( (newGroup == null) || (newGroup.trim().length() <= 0) ) - newGroup = req.getParameter("new.groupOther"); - if ( (newGroup != null) && (newGroup.trim().length() > 0) ) { - addElement(req, user, newGroup, buf, bean); - } else { - } - } else { - } - - handleDelete(req, user, bean, buf); - handleReorderGroup(req, user, bean, buf); - handleReorderRef(req, user, bean, buf); - } - return buf; - } - - private void addElement(HttpServletRequest req, User user, String newGroup, StringBuffer actionOutputHTML, BlogConfigBean bean) { - String type = req.getParameter("new.type"); - String loc = req.getParameter("new.location"); - String name = req.getParameter("new.name"); - - if (empty(type) || empty(loc) || empty(name)) return; - - PetName pn = null; - if ("blog".equals(type)) - pn = new PetName(name, "syndie", "syndieblog", loc); - else if ("blogpost".equals(type)) - pn = new PetName(name, "syndie", "syndieblogpost", loc); - else if ("blogpostattachment".equals(type)) - pn = new PetName(name, "syndie", "syndieblogattachment", loc); - else if ("eepsite".equals(type)) - pn = new PetName(name, "i2p", "http", loc); - else if ("website".equals(type)) - pn = new PetName(name, "web", "http", loc); - else { - // unknown type - } - - if (pn != null) { - if (!ViewBlogServlet.DEFAULT_GROUP_NAME.equals(newGroup)) - pn.addGroup(newGroup); - bean.add(pn); - actionOutputHTML.append("Reference '").append(HTMLRenderer.sanitizeString(name)); - actionOutputHTML.append("' for ").append(HTMLRenderer.sanitizeString(loc)).append(" added to "); - actionOutputHTML.append(HTMLRenderer.sanitizeString(newGroup)).append("
      \n"); - } - } - - private void handleDelete(HttpServletRequest req, User user, BlogConfigBean bean, StringBuffer actionOutputHTML) { - // control parameters: - // delete=$i removes group # $i - // delete=$i.$j removes element $j in group $i - String del = req.getParameter("delete"); - if (empty(del)) return; - int split = del.indexOf('.'); - int group = -1; - int elem = -1; - if (split <= 0) { - try { group = Integer.parseInt(del); } catch (NumberFormatException nfe) {} - } else { - try { - group = Integer.parseInt(del.substring(0, split)); - elem = Integer.parseInt(del.substring(split+1)); - } catch (NumberFormatException nfe) { - group = -1; - elem = -1; - } - } - if ( (elem >= 0) && (group >= 0) ) { - List l = bean.getGroup(group); - if (elem < l.size()) { - PetName pn = (PetName)l.get(elem); - bean.remove(pn); - actionOutputHTML.append("Reference '").append(HTMLRenderer.sanitizeString(pn.getName())); - actionOutputHTML.append("' for ").append(HTMLRenderer.sanitizeString(pn.getLocation())); - actionOutputHTML.append(" removed
      \n"); - } - } else if ( (elem == -1) && (group >= 0) ) { - List l = bean.getGroup(group); - for (int i = 0; i < l.size(); i++) { - PetName pn = (PetName)l.get(i); - bean.remove(pn); - } - actionOutputHTML.append("All references in the selected group were removed
      \n"); - } else { - // noop - } - } - - private void handleReorderGroup(HttpServletRequest req, User user, BlogConfigBean bean, StringBuffer actionOutputHTML) { - // control parameters: - // moveFrom=$i & moveTo=$j moves group $i to position $j - int from = -1; - int to = -1; - try { - String str = req.getParameter("moveFrom"); - if (str != null) - from = Integer.parseInt(str); - str = req.getParameter("moveTo"); - if (str != null) - to = Integer.parseInt(str); - - if ( (from >= 0) && (to >= 0) ) { - List src = bean.getGroup(from); - List dest = bean.getGroup(to); - List orig = new ArrayList(dest); - dest.clear(); - dest.addAll(src); - src.clear(); - src.addAll(orig); - bean.groupsUpdated(); - actionOutputHTML.append("Reference group moved
      \n"); - } - } catch (NumberFormatException nfe) { - // ignore - } - } - - private void handleReorderRef(HttpServletRequest req, User user, BlogConfigBean bean, StringBuffer actionOutputHTML) { - // control parameters: - // moveRefFrom=$i.$j & moveRefTo=$k.$l moves element $j in group $i to position $l in group l - // (i == k) - int from = -1; - int fromElem = -1; - int to = -1; // ignored - int toElem = -1; - try { - String str = req.getParameter("moveRefFrom"); - if (str != null) { - int split = str.indexOf('.'); - if (split > 0) { - try { - from = Integer.parseInt(str.substring(0, split)); - fromElem = Integer.parseInt(str.substring(split+1)); - } catch (NumberFormatException nfe) { - from = -1; - fromElem = -1; - } - } - } - str = req.getParameter("moveRefTo"); - if (str != null) { - int split = str.indexOf('.'); - if (split > 0) { - try { - to = Integer.parseInt(str.substring(0, split)); - toElem = Integer.parseInt(str.substring(split+1)); - } catch (NumberFormatException nfe) { - to = -1; - toElem = -1; - } - } - } - - if ( (from >= 0) && (fromElem >= 0) && (toElem >= 0) ) { - List src = bean.getGroup(from); - PetName pn = (PetName)src.remove(fromElem); - src.add(toElem, pn); - bean.groupsUpdated(); - actionOutputHTML.append("Reference element moved
      \n"); - } - } catch (NumberFormatException nfe) { - // ignore - } - } - - - protected String getTitle() { return "Syndie :: Configure blog"; } -} diff --git a/apps/syndie/java/src/net/i2p/syndie/web/ExportServlet.java b/apps/syndie/java/src/net/i2p/syndie/web/ExportServlet.java deleted file mode 100644 index 5bcd2f2c6..000000000 --- a/apps/syndie/java/src/net/i2p/syndie/web/ExportServlet.java +++ /dev/null @@ -1,210 +0,0 @@ -package net.i2p.syndie.web; - -import java.io.File; -import java.io.FileInputStream; -import java.io.IOException; -import java.io.InputStream; -import java.io.OutputStream; -import java.util.ArrayList; -import java.util.List; -import java.util.zip.ZipEntry; -import java.util.zip.ZipOutputStream; - -import javax.servlet.ServletException; -import javax.servlet.http.HttpServlet; -import javax.servlet.http.HttpServletRequest; -import javax.servlet.http.HttpServletResponse; - -import net.i2p.data.Base64; -import net.i2p.data.Hash; -import net.i2p.syndie.Archive; -import net.i2p.syndie.BlogManager; -import net.i2p.syndie.data.BlogURI; - -/** - * Dump out a whole series of blog metadata and entries as a zip stream. All metadata - * is written before any entries, so it can be processed in order safely. - * - * HTTP parameters: - * = meta (multiple values): base64 hash of the blog for which metadata is requested - * = entry (multiple values): blog URI of an entry being requested - */ -public class ExportServlet extends HttpServlet { - - public void service(HttpServletRequest req, HttpServletResponse resp) throws ServletException, IOException { - export(req, resp); - } - - public void doPost(HttpServletRequest req, HttpServletResponse resp) throws ServletException, IOException { - export(req, resp); - } - public void doGet(HttpServletRequest req, HttpServletResponse resp) throws ServletException, IOException { - export(req, resp); - } - public void doPut(HttpServletRequest req, HttpServletResponse resp) throws ServletException, IOException { - export(req, resp); - } - - public static void export(HttpServletRequest req, HttpServletResponse resp) throws ServletException, IOException { - try { - doExport(req, resp); - } catch (ServletException se) { - se.printStackTrace(); - throw se; - } catch (IOException ioe) { - ioe.printStackTrace(); - throw ioe; - } - } - private static void doExport(HttpServletRequest req, HttpServletResponse resp) throws ServletException, IOException { - String meta[] = null; - String entries[] = null; - String type = req.getHeader("Content-Type"); - if ( (type == null) || (type.indexOf("boundary") == -1) ) { - // it has to be POSTed with the request, name=value pairs. the export servlet doesn't allow any - // free form fields, so no worry about newlines, so lets parse 'er up - List metaList = new ArrayList(); - List entryList = new ArrayList(); - StringBuffer key = new StringBuffer(); - StringBuffer val = null; - String lenStr = req.getHeader("Content-length"); - int len = -1; - if (lenStr != null) - try { len = Integer.valueOf(lenStr).intValue(); } catch (NumberFormatException nfe) {} - - int read = 0; - int c = 0; - InputStream in = req.getInputStream(); - while ( (len == -1) || (read < len) ){ - c = in.read(); - if ( (c == '=') && (val == null) ) { - val = new StringBuffer(128); - } else if ( (c == -1) || (c == '&') ) { - String k = (key == null ? "" : key.toString()); - String v = (val == null ? "" : val.toString()); - if ("meta".equals(k)) - metaList.add(v.trim()); - else if ("entry".equals(k)) - entryList.add(v.trim()); - key.setLength(0); - val = null; - // no newlines in the export servlet - if (c == -1) - break; - } else { - if (val == null) - key.append((char)c); - else - val.append((char)c); - } - read++; - } - if (metaList != null) { - meta = new String[metaList.size()]; - for (int i = 0; i < metaList.size(); i++) - meta[i] = (String)metaList.get(i); - } - if (entryList != null) { - entries = new String[entryList.size()]; - for (int i = 0; i < entryList.size(); i++) - entries[i] = (String)entryList.get(i); - } - } else { - meta = req.getParameterValues("meta"); - entries = req.getParameterValues("entry"); - } - resp.setContentType("application/x-syndie-zip"); - resp.setStatus(200); - OutputStream out = resp.getOutputStream(); - - if (false) { - StringBuffer bbuf = new StringBuffer(1024); - bbuf.append("meta: "); - if (meta != null) - for (int i = 0; i < meta.length; i++) - bbuf.append(meta[i]).append(", "); - bbuf.append("entries: "); - if (entries != null) - for (int i = 0; i < entries.length; i++) - bbuf.append(entries[i]).append(", "); - System.out.println(bbuf.toString()); - } - - ZipOutputStream zo = null; - if ( (meta != null) && (entries != null) && (meta.length + entries.length > 0) ) - zo = new ZipOutputStream(out); - - List metaFiles = getMetaFiles(meta); - - ZipEntry ze = null; - byte buf[] = new byte[1024]; - int read = -1; - for (int i = 0; metaFiles != null && i < metaFiles.size(); i++) { - ze = new ZipEntry("meta" + i); - ze.setTime(0); - zo.putNextEntry(ze); - FileInputStream in = null; - try { - in = new FileInputStream((File)metaFiles.get(i)); - while ( (read = in.read(buf)) != -1) - zo.write(buf, 0, read); - zo.closeEntry(); - } finally { - if (in != null) try { in.close(); } catch (IOException ioe) {} - } - } - - List entryFiles = getEntryFiles(entries); - for (int i = 0; entryFiles != null && i < entryFiles.size(); i++) { - ze = new ZipEntry("entry" + i); - ze.setTime(0); - zo.putNextEntry(ze); - FileInputStream in = null; - try { - in = new FileInputStream((File)entryFiles.get(i)); - while ( (read = in.read(buf)) != -1) - zo.write(buf, 0, read); - zo.closeEntry(); - } finally { - if (in != null) try { in.close(); } catch (IOException ioe) {} - } - } - - if (zo != null) { - zo.finish(); - zo.close(); - } - } - - private static List getMetaFiles(String blogHashes[]) { - if ( (blogHashes == null) || (blogHashes.length <= 0) ) return null; - File dir = BlogManager.instance().getArchive().getArchiveDir(); - List rv = new ArrayList(blogHashes.length); - for (int i = 0; i < blogHashes.length; i++) { - byte hv[] = Base64.decode(blogHashes[i]); - if ( (hv == null) || (hv.length != Hash.HASH_LENGTH) ) - continue; - File blogDir = new File(dir, blogHashes[i]); - File metaFile = new File(blogDir, Archive.METADATA_FILE); - if (metaFile.exists()) - rv.add(metaFile); - } - return rv; - } - - private static List getEntryFiles(String blogURIs[]) { - if ( (blogURIs == null) || (blogURIs.length <= 0) ) return null; - File dir = BlogManager.instance().getArchive().getArchiveDir(); - List rv = new ArrayList(blogURIs.length); - for (int i = 0; i < blogURIs.length; i++) { - BlogURI uri = new BlogURI(blogURIs[i]); - if (uri.getEntryId() < 0) - continue; - File blogDir = new File(dir, uri.getKeyHash().toBase64()); - File entryFile = new File(blogDir, uri.getEntryId() + ".snd"); - if (entryFile.exists()) - rv.add(entryFile); - } - return rv; - } -} diff --git a/apps/syndie/java/src/net/i2p/syndie/web/ExternalLinkServlet.java b/apps/syndie/java/src/net/i2p/syndie/web/ExternalLinkServlet.java deleted file mode 100644 index f89e58079..000000000 --- a/apps/syndie/java/src/net/i2p/syndie/web/ExternalLinkServlet.java +++ /dev/null @@ -1,44 +0,0 @@ -package net.i2p.syndie.web; - -import java.io.IOException; -import java.io.PrintWriter; - -import javax.servlet.http.HttpServletRequest; - -import net.i2p.data.Base64; -import net.i2p.data.DataHelper; -import net.i2p.syndie.Archive; -import net.i2p.syndie.User; -import net.i2p.syndie.data.BlogURI; -import net.i2p.syndie.data.ThreadIndex; -import net.i2p.syndie.sml.HTMLRenderer; - -/** - * Confirm page before hitting a remote site - * - */ -public class ExternalLinkServlet extends BaseServlet { - protected String getTitle() { return "Syndie :: External link"; } - - protected void renderServletDetails(User user, HttpServletRequest req, PrintWriter out, ThreadIndex index, - int threadOffset, BlogURI visibleEntry, Archive archive) throws IOException { - String b64Schema = req.getParameter("schema"); - String b64Location = req.getParameter("location"); - if ( (b64Schema == null) || (b64Schema.trim().length() <= 0) || - (b64Location == null) || (b64Location.trim().length() <= 0) ) { - out.write("No location specified\n"); - } else { - byte loc[] = Base64.decode(b64Location); - if ( (loc == null) || (loc.length <= 0) ) { - out.write("Invalid location specified\n"); - } else { - String location = DataHelper.getUTF8(loc); - out.write("Are you sure you want to go to "); - out.write(HTMLRenderer.sanitizeString(location)); - out.write("\n"); - } - } - } -} diff --git a/apps/syndie/java/src/net/i2p/syndie/web/ImportFeedServlet.java b/apps/syndie/java/src/net/i2p/syndie/web/ImportFeedServlet.java deleted file mode 100644 index 9b603bbbf..000000000 --- a/apps/syndie/java/src/net/i2p/syndie/web/ImportFeedServlet.java +++ /dev/null @@ -1,149 +0,0 @@ -package net.i2p.syndie.web; - -import java.io.IOException; -import java.io.PrintWriter; -import java.util.Iterator; -import java.util.List; - -import javax.servlet.http.HttpServletRequest; - -import net.i2p.syndie.Archive; -import net.i2p.syndie.BlogManager; -import net.i2p.syndie.User; -import net.i2p.syndie.data.BlogURI; -import net.i2p.syndie.data.ThreadIndex; - -/** - * Schedule the import of atom/rss feeds - */ -public class ImportFeedServlet extends BaseServlet { - protected String getTitle() { return "Syndie :: Import feed"; } - - protected void renderServletDetails(User user, HttpServletRequest req, PrintWriter out, ThreadIndex index, - int threadOffset, BlogURI visibleEntry, Archive archive) throws IOException { - - if (!BlogManager.instance().authorizeRemote(user)) { - out.write("You are not authorized for remote access.\n"); - return; - } else { - out.write(""); - - String url=req.getParameter("url"); - if (url != null) - url = url.trim(); - String blog=req.getParameter("blog"); - if (blog != null) - blog=blog.trim(); - String tagPrefix = req.getParameter("tagprefix"); - if (tagPrefix != null) - tagPrefix=tagPrefix.trim(); - String action = req.getParameter("action"); - if ( (action != null) && ("Add".equals(action)) ) { - if(url==null || blog==null || tagPrefix==null) { - out.write("Please fill in all fields
      \n"); - } else { - boolean ret = BlogManager.instance().addRssFeed(url, blog, tagPrefix); - if (!ret) { - out.write("addRssFeed failure."); - } else { - out.write("RSS feed added."); - } - } - } else if ( (action != null) && ("Change".equals(action)) ) { - String lastUrl=req.getParameter("lasturl"); - String lastBlog=req.getParameter("lastblog"); - String lastTagPrefix=req.getParameter("lasttagprefix"); - - if (url == null || blog == null || tagPrefix == null || - lastUrl == null || lastBlog == null || lastTagPrefix == null) { - out.write("error, some fields were empty.
      "); - } else { - boolean ret = BlogManager.instance().deleteRssFeed(lastUrl,lastBlog,lastTagPrefix); - if (!ret) { - out.write("Could not delete while attempting to change."); - } else { - ret = BlogManager.instance().addRssFeed(url,blog,tagPrefix); - if (!ret) { - out.write("Could not add while attempting to change."); - } else { - out.write("Ok, changed successfully."); - } - } - } - } else if ( (action != null) && ("Delete".equals(action)) ) { - if (url == null || blog == null || tagPrefix == null) { - out.write("error, some fields were empty.
      "); - } else { - boolean ret = BlogManager.instance().deleteRssFeed(url,blog,tagPrefix); - if (!ret) { - out.write("error, could not delete."); - } else { - out.write("ok, deleted successfully."); - } - } - } - - String blogStr = user.getBlogStr(); - if (blogStr == null) - blogStr=""; - - out.write("

      Here you can add RSS feeds that will be periodically polled and added to your syndie.

      "); - out.write("
      "); - writeAuthActionFields(out); - out.write("RSS URL. (e.g. http://tracker.postman.i2p/rss.php)
      \n"); - out.write("url:
      \n"); - out.write("Blog hash to which the RSS entries will get posted, defaults to the one you're logged in to.
      \n"); - out.write("blog:
      \n"); - out.write("This will be prepended to any tags that the RSS feed contains. (e.g. feed.tracker)
      \n"); - out.write("tagprefix:\n"); - out.write("
      \n"); - out.write("\n"); - out.write("\n"); - out.write("
      \n"); - - List feedList = BlogManager.instance().getRssFeeds(); - if (feedList.size()>0) { - out.write("

      Subscriptions:


      \n"); - out.write("\n"); - out.write("\n"); - out.write("\n"); - out.write("\n"); - out.write("\n"); - out.write("\n"); - - Iterator iter = feedList.iterator(); - while (iter.hasNext()) { - String fields[] = (String[])iter.next(); - url = fields[0]; - blog = fields[1]; - tagPrefix = fields[2]; - StringBuffer buf = new StringBuffer(128); - - buf.append(""); - writeAuthActionFields(out); - buf.append(""); - buf.append(""); - buf.append(""); - - buf.append(""); - buf.append(""); - buf.append(""); - buf.append(""); - out.write(buf.toString()); - buf.setLength(0); - } - - out.write("
      UrlBlogTagPrefix 
      "); - - buf.append(""); - buf.append(""); - - buf.append("
      \n"); - } // end iterating over feeds - - out.write("\n"); - } - } -} diff --git a/apps/syndie/java/src/net/i2p/syndie/web/MultiPartRequest.java b/apps/syndie/java/src/net/i2p/syndie/web/MultiPartRequest.java deleted file mode 100644 index 22f73b40a..000000000 --- a/apps/syndie/java/src/net/i2p/syndie/web/MultiPartRequest.java +++ /dev/null @@ -1,422 +0,0 @@ -// see below for license info -package net.i2p.syndie.web; - -import java.io.ByteArrayInputStream; -import java.io.ByteArrayOutputStream; -import java.io.IOException; -import java.io.InputStream; -import java.io.UnsupportedEncodingException; -import java.util.Hashtable; -import java.util.List; -import java.util.Set; -import java.util.StringTokenizer; - -import javax.servlet.http.HttpServletRequest; - -import org.mortbay.http.HttpFields; -import org.mortbay.util.LineInput; -import org.mortbay.util.MultiMap; -import org.mortbay.util.StringUtil; - -/* ------------------------------------------------------------ */ -/** - * Hacked version of Jetty's MultiPartRequest handler, applying a tiny patch for - * charset handling [1]. These changes are public domain, and will hopefully - * be integrated into Jetty so we can drop this file altogether. Of course, - * until then, this file is APL2 licensed. - * - * Original code is up at [2] - * - * [1] http://article.gmane.org/gmane.comp.java.jetty.general/6031 - * [2] http://cvs.sourceforge.net/viewcvs.py/jetty/Jetty/src/org/mortbay/servlet/ - * (rev 1.15) - * - */ -public class MultiPartRequest -{ - /* ------------------------------------------------------------ */ - HttpServletRequest _request; - LineInput _in; - String _boundary; - String _encoding; - byte[] _byteBoundary; - MultiMap _partMap = new MultiMap(10); - int _char=-2; - boolean _lastPart=false; - - /* ------------------------------------------------------------ */ - /** Constructor. - * @param request The request containing a multipart/form-data - * request - * @exception IOException IOException - */ - public MultiPartRequest(HttpServletRequest request) - throws IOException - { - _request=request; - String content_type = request.getHeader(HttpFields.__ContentType); - if (!content_type.startsWith("multipart/form-data")) - throw new IOException("Not multipart/form-data request"); - - //if(log.isDebugEnabled())log.debug("Multipart content type = "+content_type); - - _encoding = request.getCharacterEncoding(); - if (_encoding != null) - _in = new LineInput(request.getInputStream(), 2048, _encoding); - else - _in = new LineInput(request.getInputStream()); - - // Extract boundary string - _boundary="--"+ - value(content_type.substring(content_type.indexOf("boundary="))); - - //if(log.isDebugEnabled())log.debug("Boundary="+_boundary); - _byteBoundary= (_boundary+"--").getBytes(StringUtil.__ISO_8859_1); - - loadAllParts(); - } - - /* ------------------------------------------------------------ */ - /** Get the part names. - * @return an array of part names - */ - public String[] getPartNames() - { - Set s = _partMap.keySet(); - return (String[]) s.toArray(new String[s.size()]); - } - - /* ------------------------------------------------------------ */ - /** Check if a named part is present - * @param name The part - * @return true if it was included - */ - public boolean contains(String name) - { - Part part = (Part)_partMap.get(name); - return (part!=null); - } - - /* ------------------------------------------------------------ */ - /** Get the data of a part as a string. - * @param name The part name - * @return The part data - */ - public String getString(String name) - { - List part = (List)_partMap.getValues(name); - if (part==null) - return null; - if (_encoding != null) - { - try - { - return new String(((Part)part.get(0))._data, _encoding); - } - catch (UnsupportedEncodingException uee) - { - //if (log.isDebugEnabled()) log.debug("Invalid character set: " + uee); - return null; - } - } - else - { - return new String(((Part)part.get(0))._data); - } - } - - /* ------------------------------------------------------------ */ - /** - * @param name The part name - * @return The parts data - */ - public String[] getStrings(String name) - { - List parts = (List)_partMap.getValues(name); - if (parts==null) - return null; - String[] strings = new String[parts.size()]; - if (_encoding == null) { - for (int i=0; i0) - value=value.substring(0,i); - } - return value; - } - - /* ------------------------------------------------------------ */ - private class Part - { - String _name=null; - String _filename=null; - Hashtable _headers= new Hashtable(10); - byte[] _data=null; - } -}; diff --git a/apps/syndie/java/src/net/i2p/syndie/web/PostBean.java b/apps/syndie/java/src/net/i2p/syndie/web/PostBean.java deleted file mode 100644 index 7e653587d..000000000 --- a/apps/syndie/java/src/net/i2p/syndie/web/PostBean.java +++ /dev/null @@ -1,226 +0,0 @@ -package net.i2p.syndie.web; - -import java.io.File; -import java.io.FileInputStream; -import java.io.FileOutputStream; -import java.io.IOException; -import java.io.InputStream; -import java.io.OutputStream; -import java.io.Writer; -import java.util.ArrayList; -import java.util.HashMap; -import java.util.List; -import java.util.Map; -import java.util.StringTokenizer; - -import net.i2p.I2PAppContext; -import net.i2p.client.naming.PetName; -import net.i2p.syndie.Archive; -import net.i2p.syndie.BlogManager; -import net.i2p.syndie.User; -import net.i2p.syndie.data.BlogInfo; -import net.i2p.syndie.data.BlogURI; -import net.i2p.syndie.data.EntryContainer; -import net.i2p.syndie.sml.HTMLPreviewRenderer; -import net.i2p.syndie.sml.HTMLRenderer; -import net.i2p.util.Log; - -/** - * - */ -public class PostBean { - private I2PAppContext _context; - private Log _log; - private User _user; - private String _subject; - private String _tags; - private String _headers; - private String _text; - private String _archive; - private List _filenames; - private List _fileStreams; - private List _localFiles; - private List _fileTypes; - private boolean _previewed; - - public PostBean() { - _context = I2PAppContext.getGlobalContext(); - _log = _context.logManager().getLog(PostBean.class); - reinitialize(); - } - - public void reinitialize() { - if (_log.shouldLog(Log.DEBUG)) - _log.debug("Reinitializing " + (_text != null ? "(with " + _text.length() + " bytes of sml!)" : "")); - _user = null; - _subject = null; - _tags = null; - _text = null; - _headers = null; - _archive = null; - _filenames = new ArrayList(); - _fileStreams = new ArrayList(); - _fileTypes = new ArrayList(); - if (_localFiles != null) - for (int i = 0; i < _localFiles.size(); i++) - ((File)_localFiles.get(i)).delete(); - - _localFiles = new ArrayList(); - _previewed = false; - } - - public User getUser() { return _user; } - public String getSubject() { return (_subject != null ? _subject : ""); } - public String getTags() { return (_tags != null ? _tags : ""); } - public String getText() { return (_text != null ? _text : ""); } - public String getHeaders() { return (_headers != null ? _headers : ""); } - public void setUser(User user) { _user = user; } - public void setSubject(String subject) { _subject = subject; } - public void setTags(String tags) { _tags = tags; } - public void setText(String text) { _text = text; } - public void setHeaders(String headers) { _headers = headers; } - public void setArchive(String archive) { _archive = archive; } - - public String getContentType(int id) { - if ( (id >= 0) && (id < _fileTypes.size()) ) - return (String)_fileTypes.get(id); - return "application/octet-stream"; - } - - public void writeAttachmentData(int id, OutputStream out) throws IOException { - FileInputStream in = null; - try { - in = new FileInputStream((File)_localFiles.get(id)); - byte buf[] = new byte[1024]; - int read = 0; - while ( (read = in.read(buf)) != -1) - out.write(buf, 0, read); - out.close(); - } finally { - if (in != null) try { in.close(); } catch (IOException ioe) {} - } - } - - public void addAttachment(String filename, InputStream fileStream, String mimeType) { - _filenames.add(filename); - _fileStreams.add(fileStream); - _fileTypes.add(mimeType); - } - public int getAttachmentCount() { return (_filenames != null ? _filenames.size() : 0); } - - public BlogURI postEntry() throws IOException { - if (!_previewed) return null; - List localStreams = new ArrayList(_localFiles.size()); - for (int i = 0; i < _localFiles.size(); i++) { - File f = (File)_localFiles.get(i); - localStreams.add(new FileInputStream(f)); - } - BlogURI uri = BlogManager.instance().createBlogEntry(_user, _subject, _tags, _headers, _text, - _filenames, localStreams, _fileTypes); - if (_log.shouldLog(Log.DEBUG)) - _log.debug("Posted the entry " + uri.toString() + " (archive = " + _archive + ")"); - if ( (uri != null) && BlogManager.instance().authorizeRemote(_user) ) { - PetName pn = _user.getPetNameDB().getByName(_archive); - if (_log.shouldLog(Log.DEBUG)) - _log.debug("Archive to petname? " + pn + " (protocol: " + (pn != null ? pn.getProtocol() : "") + ")"); - if ( (pn != null) && ("syndiearchive".equals(pn.getProtocol())) ) { - RemoteArchiveBean r = new RemoteArchiveBean(); - Map params = new HashMap(); - - String entries[] = null; - BlogInfo info = BlogManager.instance().getArchive().getBlogInfo(uri); - if (info != null) { - String str = info.getProperty(BlogInfo.SUMMARY_ENTRY_ID); - if (str != null) { - entries = new String[] { uri.toString(), str }; - } - } - if (entries == null) - entries = new String[] { uri.toString() }; - - params.put("localentry", entries); - String proxyHost = BlogManager.instance().getDefaultProxyHost(); - String port = BlogManager.instance().getDefaultProxyPort(); - int proxyPort = 4444; - try { proxyPort = Integer.parseInt(port); } catch (NumberFormatException nfe) {} - if (_log.shouldLog(Log.DEBUG)) - _log.debug("Posting the entry " + uri.toString() + " to " + pn.getLocation()); - r.postSelectedEntries(_user, params, proxyHost, proxyPort, pn.getLocation()); - if (_log.shouldLog(Log.DEBUG)) - _log.debug("Post status: " + r.getStatus()); - } - } - return uri; - } - - public void renderPreview(Writer out) throws IOException { - if (_log.shouldLog(Log.DEBUG)) - _log.debug("Subject: " + _subject); - if (_log.shouldLog(Log.DEBUG)) - _log.debug("Text: " + _text); - if (_log.shouldLog(Log.DEBUG)) - _log.debug("Headers: " + _headers); - // cache all the _fileStreams into temporary files, storing those files in _localFiles - // then render the page accordingly with an HTMLRenderer, altered to use a different - // 'view attachment' - cacheAttachments(); - String smlContent = renderSMLContent(); - HTMLPreviewRenderer r = new HTMLPreviewRenderer(_context, _filenames, _fileTypes, _localFiles); - r.render(_user, BlogManager.instance().getArchive(), null, smlContent, out, false, true); - _previewed = true; - } - - public void renderReplyPreview(Writer out, String parentURI) throws IOException { - HTMLRenderer r = new HTMLRenderer(_context); - Archive a = BlogManager.instance().getArchive(); - BlogURI uri = new BlogURI(parentURI); - if (uri.getEntryId() > 0) { - EntryContainer entry = a.getEntry(uri); - r.render(_user, a, entry, out, false, true); - } - } - - private String renderSMLContent() { - StringBuffer raw = new StringBuffer(); - raw.append("Subject: ").append(_subject).append('\n'); - raw.append("Tags: "); - StringTokenizer tok = new StringTokenizer(_tags, " \t\n"); - while (tok.hasMoreTokens()) - raw.append(tok.nextToken()).append('\t'); - raw.append('\n'); - raw.append(_headers.trim()); - raw.append("\n\n"); - raw.append(_text.trim()); - return raw.toString(); - } - - /** until we have a good filtering/preferences system, lets try to keep the content small */ - private static final int MAX_SIZE = 256*1024; - - private void cacheAttachments() throws IOException { - if (_user == null) throw new IOException("User not specified"); - File postCacheDir = new File(BlogManager.instance().getTempDir(), _user.getBlog().toBase64()); - if (!postCacheDir.exists()) - postCacheDir.mkdirs(); - for (int i = 0; i < _fileStreams.size(); i++) { - InputStream in = (InputStream)_fileStreams.get(i); - File f = File.createTempFile("attachment", ".dat", postCacheDir); - FileOutputStream o = new FileOutputStream(f); - byte buf[] = new byte[1024]; - int read = 0; - while ( (read = in.read(buf)) != -1) - o.write(buf, 0, read); - o.close(); - in.close(); - if (f.length() > MAX_SIZE) { - _log.error("Refusing to post the attachment, because it is too big: " + f.length()); - } else { - _localFiles.add(f); - if (_log.shouldLog(Log.DEBUG)) - _log.debug("Caching attachment " + i + " temporarily in " - + f.getAbsolutePath() + " w/ " + f.length() + "bytes"); - } - } - _fileStreams.clear(); - } -} diff --git a/apps/syndie/java/src/net/i2p/syndie/web/PostServlet.java b/apps/syndie/java/src/net/i2p/syndie/web/PostServlet.java deleted file mode 100644 index 8d7ce3814..000000000 --- a/apps/syndie/java/src/net/i2p/syndie/web/PostServlet.java +++ /dev/null @@ -1,381 +0,0 @@ -package net.i2p.syndie.web; - -import java.io.IOException; -import java.io.PrintWriter; -import java.util.Hashtable; -import java.util.Iterator; -import java.util.TreeSet; - -import javax.servlet.http.HttpServletRequest; - -import net.i2p.client.naming.PetName; -import net.i2p.client.naming.PetNameDB; -import net.i2p.data.Base64; -import net.i2p.data.DataHelper; -import net.i2p.syndie.Archive; -import net.i2p.syndie.BlogManager; -import net.i2p.syndie.User; -import net.i2p.syndie.data.BlogURI; -import net.i2p.syndie.data.ThreadIndex; -import net.i2p.syndie.sml.HTMLRenderer; -import net.i2p.syndie.sml.ThreadedHTMLRenderer; - -/** - * Post and preview form - * - */ -public class PostServlet extends BaseServlet { - public static final String PARAM_ACTION = "action"; - public static final String ACTION_CONFIRM = "confirm"; - - public static final String PARAM_SUBJECT = "entrysubject"; - public static final String PARAM_TAGS = ThreadedHTMLRenderer.PARAM_TAGS; - public static final String PARAM_INCLUDENAMES = "includenames"; - public static final String PARAM_TEXT = "entrytext"; - public static final String PARAM_HEADERS = "entryheaders"; - - public static final String PARAM_PARENT = "parentURI"; - public static final String PARAM_IN_NEW_THREAD = "replyInNewThread"; - public static final String PARAM_REFUSE_REPLIES = "refuseReplies"; - - public static final String PARAM_REMOTE_ARCHIVE = "archive"; - - private static final String ATTR_POST_BEAN = "post"; - - protected void renderServletDetails(User user, HttpServletRequest req, PrintWriter out, ThreadIndex index, - int threadOffset, BlogURI visibleEntry, Archive archive) throws IOException { - if (!user.getAuthenticated()) { - out.write("You must be logged in to post\n"); - } else { - PostBean post = getPostBean(user, req); - String action = req.getParameter(PARAM_ACTION); - if (!empty(action) && ACTION_CONFIRM.equals(action)) { - postEntry(user, req, archive, post, out); - post.reinitialize(); - post.setUser(user); - } else { - String contentType = req.getContentType(); - if (!empty(contentType) && (contentType.indexOf("boundary=") != -1)) { - previewPostedData(user, req, archive, contentType, post, out); - } else { - displayNewForm(user, req, post, out); - } - } - } - } - - private void previewPostedData(User user, HttpServletRequest rawRequest, Archive archive, String contentType, PostBean post, PrintWriter out) throws IOException { - MultiPartRequest req = new MultiPartRequest(rawRequest); - - if (!authAction(req.getString(PARAM_AUTH_ACTION))) { - out.write("Invalid form submission... stale data?"); - return; - } - - // not confirmed but they posted stuff... gobble up what they give - // and display it as a prview (then we show the confirm form - - out.write(""); - - //post.reinitialize(); - //post.setUser(user); - - boolean inNewThread = getInNewThread(req.getString(PARAM_IN_NEW_THREAD)); - boolean refuseReplies = getRefuseReplies(req.getString(PARAM_REFUSE_REPLIES)); - - String entrySubject = req.getString(PARAM_SUBJECT); - String entryTags = req.getString(PARAM_TAGS); - String entryText = req.getString(PARAM_TEXT); - String entryHeaders = req.getString(PARAM_HEADERS); - String style = ""; //req.getString("style"); - if ( (style != null) && (style.trim().length() > 0) ) { - if (entryHeaders == null) entryHeaders = HTMLRenderer.HEADER_STYLE + ": " + style; - else entryHeaders = entryHeaders + '\n' + HTMLRenderer.HEADER_STYLE + ": " + style; - } - String replyTo = req.getString(PARAM_PARENT); - if ( (replyTo != null) && (replyTo.trim().length() > 0) ) { - byte r[] = Base64.decode(replyTo); - if (r != null) { - replyTo = new String(r, "UTF-8"); - if (!replyTo.startsWith("entry://") && !replyTo.startsWith("blog://")) - replyTo = "entry://" + replyTo; - if (entryHeaders == null) entryHeaders = HTMLRenderer.HEADER_IN_REPLY_TO + ": " + replyTo; - else entryHeaders = entryHeaders + '\n' + HTMLRenderer.HEADER_IN_REPLY_TO + ": " + replyTo; - } else { - replyTo = null; - } - } - - if (entryTags == null) entryTags = ""; - - if ( (entryHeaders == null) || (entryHeaders.trim().length() <= 0) ) - entryHeaders = ThreadedHTMLRenderer.HEADER_FORCE_NEW_THREAD + ": " + inNewThread + '\n' + - ThreadedHTMLRenderer.HEADER_REFUSE_REPLIES + ": " + refuseReplies; - else - entryHeaders = entryHeaders.trim() + '\n' + - ThreadedHTMLRenderer.HEADER_FORCE_NEW_THREAD + ": " + inNewThread + '\n' + - ThreadedHTMLRenderer.HEADER_REFUSE_REPLIES + ": " + refuseReplies; - - String includeNames = req.getString(PARAM_INCLUDENAMES); - if ( (includeNames != null) && (includeNames.trim().length() > 0) ) { - PetNameDB db = user.getPetNameDB(); - if (entryHeaders == null) entryHeaders = ""; - for (Iterator iter = db.getNames().iterator(); iter.hasNext(); ) { - PetName pn = db.getByName((String)iter.next()); - if ( (pn != null) && (pn.getIsPublic()) ) { - entryHeaders = entryHeaders.trim() + '\n' + HTMLRenderer.HEADER_PETNAME + ": " + - pn.getName() + "\t" + pn.getNetwork() + "\t" + pn.getProtocol() + "\t" + pn.getLocation(); - } - } - } - - post.setSubject(entrySubject); - post.setTags(entryTags); - post.setText(entryText); - post.setHeaders(entryHeaders); - - for (int i = 0; i < 32; i++) { - String filename = req.getFilename("entryfile" + i); - if ( (filename != null) && (filename.trim().length() > 0) ) { - Hashtable params = req.getParams("entryfile" + i); - String type = "application/octet-stream"; - for (Iterator iter = params.keySet().iterator(); iter.hasNext(); ) { - String cur = (String)iter.next(); - if ("content-type".equalsIgnoreCase(cur)) { - type = (String)params.get(cur); - break; - } - } - post.addAttachment(filename.trim(), req.getInputStream("entryfile" + i), type); - } - } - - post.renderPreview(out); - out.write("
      \n"); - writeAuthActionFields(out); - out.write("Please confirm that the above is ok"); - if (BlogManager.instance().authorizeRemote(user)) { - out.write(", and select what additional archive you want the post transmitted to: "); - out.write("
      \n"); - out.write("If you don't push this post remotely now, you can do so later on the syndicate screen "); - out.write("by choosing an archive, verifying that they don't already have the post, and selecting which posts to push.\n"); - } - out.write("\n"); - - out.write("
      \n"); - - displayEditForm(user, req, post, out); - - out.write("\n"); - } - - private void postEntry(User user, HttpServletRequest req, Archive archive, PostBean post, PrintWriter out) throws IOException { - if (!authAction(req)) { - out.write("Invalid form submission... stale data?"); - return; - } - String remArchive = req.getParameter(PARAM_REMOTE_ARCHIVE); - post.setArchive(remArchive); - BlogURI uri = post.postEntry(); - if (uri != null) { - out.write("Entry posted!"); - } else { - out.write("There was an unknown error posting the entry..."); - } - } - - private void displayNewForm(User user, HttpServletRequest req, PostBean post, PrintWriter out) throws IOException { - // logged in and not confirmed because they didn't send us anything! - // give 'em a new form - - post.reinitialize(); - post.setUser(user); - - String parentURI = req.getParameter(PARAM_PARENT); - - String subject = getParam(req, PARAM_SUBJECT); - - out.write("
      \n"); - writeAuthActionFields(out); - out.write("\n"); - out.write("Post subject: "); - out.write("
      \n"); - out.write("Post content (in raw SML, no headers):
      \n"); - out.write("
      \n"); - out.write("SML post headers:
      \n"); - out.write("
      \n"); - - if ( (parentURI != null) && (parentURI.trim().length() > 0) ) - out.write("\n"); - - out.write(" Tags: "); - BaseServlet.writeTagField(user, getParam(req, PARAM_TAGS), out, "Optional tags to categorize your post", "No tags", false); - //
      \n"); - out.write("
      \n"); - - boolean inNewThread = getInNewThread(req); - boolean refuseReplies = getRefuseReplies(req); - - out.write("In a new thread?
      \n"); - out.write("Refuse replies?
      \n"); - - out.write("Include public names? "); - out.write("
      \n"); - - out.write(ATTACHMENT_FIELDS); - - out.write("
      \n"); - out.write(" "); - out.write("\n"); - - if (parentURI != null) { - out.write("
      "); - String decoded = DataHelper.getUTF8(Base64.decode(parentURI)); - post.renderReplyPreview(out, "entry://" + decoded); - out.write("
      \n"); - } - - out.write("\n"); - out.write("
      \n"); - } - - private void displayEditForm(User user, MultiPartRequest req, PostBean post, PrintWriter out) throws IOException { - String parentURI = req.getString(PARAM_PARENT); - - String subject = getParam(req, PARAM_SUBJECT); - - out.write("
      \n"); - out.write("
      \n"); - writeAuthActionFields(out); - out.write("\n"); - out.write("Post subject: "); - out.write("
      \n"); - out.write("Post content (in raw SML, no headers):
      \n"); - out.write("
      \n"); - out.write("SML post headers:
      \n"); - out.write("
      \n"); - - if ( (parentURI != null) && (parentURI.trim().length() > 0) ) - out.write("\n"); - - out.write(" Tags: "); - //
      \n"); - out.write(" Tags: "); - BaseServlet.writeTagField(user, getParam(req, PARAM_TAGS), out, "Optional tags to categorize your post", "No tags", false); - out.write("
      \n"); - - boolean inNewThread = getInNewThread(req); - boolean refuseReplies = getRefuseReplies(req); - - out.write("In a new thread?
      \n"); - out.write("Refuse replies?
      \n"); - - out.write("Include public names? "); - out.write("
      \n"); - - int newCount = 0; - for (int i = 0; i < 32 && newCount < 3; i++) { - String filename = req.getFilename("entryfile" + i); - if ( (filename != null) && (filename.trim().length() > 0) ) { - out.write("Attachment " + i + ": "); - out.write(HTMLRenderer.sanitizeString(filename)); - out.write("
      "); - } else { - out.write("Attachment " + i + ": "); - out.write("
      "); - newCount++; - } - } - - out.write("
      \n"); - out.write(" "); - out.write("\n"); - - out.write("
      \n"); - } - - private boolean getInNewThread(HttpServletRequest req) { - return getInNewThread(req.getParameter(PARAM_IN_NEW_THREAD)); - } - private boolean getInNewThread(MultiPartRequest req) { - return getInNewThread(getParam(req, PARAM_IN_NEW_THREAD)); - } - private boolean getInNewThread(String val) { - boolean rv = false; - String inNewThread = val; - if ( (inNewThread != null) && (Boolean.valueOf(inNewThread).booleanValue()) ) - rv = true; - return rv; - } - private boolean getRefuseReplies(HttpServletRequest req) { - return getRefuseReplies(req.getParameter(PARAM_REFUSE_REPLIES)); - } - private boolean getRefuseReplies(MultiPartRequest req) { - return getRefuseReplies(getParam(req, PARAM_REFUSE_REPLIES)); - } - private boolean getRefuseReplies(String val) { - boolean rv = false; - String refuseReplies = val; - if ( (refuseReplies != null) && (Boolean.valueOf(refuseReplies).booleanValue()) ) - rv = true; - return rv; - } - - private PostBean getPostBean(User user, HttpServletRequest req) { - PostBean bean = (PostBean)req.getSession().getAttribute(ATTR_POST_BEAN); - if (bean == null) { - bean = new PostBean(); - req.getSession().setAttribute(ATTR_POST_BEAN, bean); - } - bean.setUser(user); - return bean; - } - - private String getParam(HttpServletRequest req, String param) { - String val = req.getParameter(param); - if (val == null) val = ""; - return val; - } - private String getParam(MultiPartRequest req, String param) { - String val = req.getString(param); - if (val == null) return ""; - return val; - } - - private static final String ATTACHMENT_FIELDS = "" - + "Attachment 0:
      " - + "Attachment 1:
      " - + "Attachment 2:
      " - + "Attachment 3:
      \n"; - - protected String getTitle() { return "Syndie :: Post new content"; } -} diff --git a/apps/syndie/java/src/net/i2p/syndie/web/ProfileServlet.java b/apps/syndie/java/src/net/i2p/syndie/web/ProfileServlet.java deleted file mode 100644 index 1c961a768..000000000 --- a/apps/syndie/java/src/net/i2p/syndie/web/ProfileServlet.java +++ /dev/null @@ -1,232 +0,0 @@ -package net.i2p.syndie.web; - -import java.io.IOException; -import java.io.PrintWriter; - -import javax.servlet.http.HttpServletRequest; - -import net.i2p.client.naming.PetName; -import net.i2p.data.DataFormatException; -import net.i2p.data.Hash; -import net.i2p.syndie.Archive; -import net.i2p.syndie.BlogManager; -import net.i2p.syndie.User; -import net.i2p.syndie.data.BlogInfo; -import net.i2p.syndie.data.BlogURI; -import net.i2p.syndie.data.FilteredThreadIndex; -import net.i2p.syndie.data.ThreadIndex; -import net.i2p.syndie.sml.HTMLRenderer; -import net.i2p.syndie.sml.ThreadedHTMLRenderer; - -/** - * Render the requested profile - * - */ -public class ProfileServlet extends BaseServlet { - protected void renderServletDetails(User user, HttpServletRequest req, PrintWriter out, ThreadIndex index, - int threadOffset, BlogURI visibleEntry, Archive archive) throws IOException { - Hash author = null; - String str = req.getParameter(ThreadedHTMLRenderer.PARAM_AUTHOR); - if (str != null) { - try { - author = new Hash(); - author.fromBase64(str); - } catch (DataFormatException dfe) { - author = null; - } - } else { - author = user.getBlog(); - } - - String uri = req.getRequestURI(); - - if (author == null) { - renderInvalidProfile(out); - } else if ( (user.getBlog() != null) && (user.getBlog().equals(author)) ) { - renderMyProfile(user, uri, out, archive); - } else { - renderProfile(user, uri, out, author, archive); - } - } - - private void renderInvalidProfile(PrintWriter out) throws IOException { - out.write(INVALID_PROFILE); - } - - private void renderMyProfile(User user, String baseURI, PrintWriter out, Archive archive) throws IOException { - BlogInfo info = archive.getBlogInfo(user.getBlog()); - if (info == null) - return; - - out.write("\n"); - out.write("
      \n"); - writeAuthActionFields(out); - // now add the form to update - out.write("Your profile (configure your blog)\n"); - out.write("Name: \n"); - out.write("Account description: \n"); - out.write("Contact information: \n"); - out.write("Other attributes:
      \n"); - - if (user.getAuthenticated()) { - if ( (user.getUsername() == null) || (user.getUsername().equals(BlogManager.instance().getDefaultLogin())) ) { - // this is the default user, don't let them change the password - } else { - out.write("Old Password: \n"); - out.write("Password: \n"); - out.write("Password again: \n"); - } - if (!BlogManager.instance().authorizeRemote(user)) { - out.write("To access the remote functionality, please specify the administrative password:
      \n" + - "\n"); - } - } - - out.write("\n"); - out.write("
      \n"); - } - - private void renderProfile(User user, String baseURI, PrintWriter out, Hash author, Archive archive) throws IOException { - out.write("Profile for "); - PetName pn = user.getPetNameDB().getByLocation(author.toBase64()); - String name = null; - BlogInfo info = archive.getBlogInfo(author); - if (pn != null) { - out.write(pn.getName()); - name = null; - if (info != null) - name = info.getProperty(BlogInfo.NAME); - - if ( (name == null) || (name.trim().length() <= 0) ) - name = author.toBase64().substring(0, 6); - - out.write(" (" + name + ")"); - } else { - if (info != null) - name = info.getProperty(BlogInfo.NAME); - - if ( (name == null) || (name.trim().length() <= 0) ) - name = author.toBase64().substring(0, 6); - out.write(name); - } - out.write(""); - if (info != null) - out.write(" [edition " + info.getEdition() + "]"); - out.write("
      \n"); - out.write("View their blog or "); - out.write("\n"); - out.write("\n"); - out.write(" \n"); - out.write(" Syndie feed\n"); - String page = urlPrefix; - if (tags != null) - page = page + "threads.jsp?" + ThreadedHTMLRenderer.PARAM_TAGS + '=' + HTMLRenderer.sanitizeXML(tags); - out.write(" " + page +"\n"); - out.write(" Summary of the latest Syndie posts\n"); - out.write(" Syndie\n"); - - RSSRenderer r = new RSSRenderer(I2PAppContext.getGlobalContext()); - for (int i = 0; i < count && i < entries.size(); i++) { - BlogURI uri = (BlogURI)entries.get(i); - EntryContainer entry = archive.getEntry(uri); - r.render(user, archive, entry, urlPrefix, out); - } - - out.write(" \n"); - out.write("\n"); - out.close(); - } - - private void walkTree(List uris, ThreadNode node) { - if (node == null) - return; - if (uris.contains(node)) - return; - uris.add(node.getEntry()); - for (int i = 0; i < node.getChildCount(); i++) - walkTree(uris, node.getChild(i)); - } -} diff --git a/apps/syndie/java/src/net/i2p/syndie/web/RemoteArchiveBean.java b/apps/syndie/java/src/net/i2p/syndie/web/RemoteArchiveBean.java deleted file mode 100644 index 3601d4b24..000000000 --- a/apps/syndie/java/src/net/i2p/syndie/web/RemoteArchiveBean.java +++ /dev/null @@ -1,852 +0,0 @@ -package net.i2p.syndie.web; - -import java.io.ByteArrayInputStream; -import java.io.ByteArrayOutputStream; -import java.io.File; -import java.io.FileInputStream; -import java.io.IOException; -import java.io.Writer; -import java.text.ParseException; -import java.text.SimpleDateFormat; -import java.util.ArrayList; -import java.util.Date; -import java.util.HashMap; -import java.util.HashSet; -import java.util.Iterator; -import java.util.List; -import java.util.Locale; -import java.util.Map; -import java.util.Properties; -import java.util.Set; -import java.util.TreeSet; -import java.util.zip.ZipEntry; -import java.util.zip.ZipInputStream; - -import net.i2p.I2PAppContext; -import net.i2p.client.naming.PetName; -import net.i2p.client.naming.PetNameDB; -import net.i2p.data.Base64; -import net.i2p.data.DataHelper; -import net.i2p.data.Hash; -import net.i2p.syndie.Archive; -import net.i2p.syndie.BlogManager; -import net.i2p.syndie.User; -import net.i2p.syndie.data.ArchiveIndex; -import net.i2p.syndie.data.BlogInfo; -import net.i2p.syndie.data.BlogURI; -import net.i2p.syndie.data.EntryContainer; -import net.i2p.syndie.sml.HTMLRenderer; -import net.i2p.syndie.sml.ThreadedHTMLRenderer; -import net.i2p.util.EepGet; -import net.i2p.util.EepGetScheduler; -import net.i2p.util.EepPost; -import net.i2p.util.I2PThread; -import net.i2p.util.Log; - -/** - * - */ -public class RemoteArchiveBean { - private I2PAppContext _context; - private Log _log; - private String _remoteSchema; - private String _remoteLocation; - private String _proxyHost; - private int _proxyPort; - private ArchiveIndex _remoteIndex; - private List _statusMessages; - private boolean _fetchIndexInProgress; - private boolean _exportCapable; - - public RemoteArchiveBean() { - _context = I2PAppContext.getGlobalContext(); - _log = _context.logManager().getLog(RemoteArchiveBean.class); - reinitialize(); - } - public void reinitialize() { - _remoteSchema = null; - _remoteLocation = null; - _remoteIndex = null; - _fetchIndexInProgress = false; - _proxyHost = null; - _proxyPort = -1; - _exportCapable = false; - _statusMessages = new ArrayList(); - } - - public String getRemoteSchema() { return _remoteSchema; } - public String getRemoteLocation() { return _remoteLocation; } - public ArchiveIndex getRemoteIndex() { return _remoteIndex; } - public String getProxyHost() { return _proxyHost; } - public int getProxyPort() { return _proxyPort; } - public boolean getFetchIndexInProgress() { return _fetchIndexInProgress; } - public String getStatus() { - StringBuffer buf = new StringBuffer(); - while (_statusMessages.size() > 0) - buf.append(_statusMessages.remove(0)).append("\n"); - return buf.toString(); - } - - private boolean ignoreBlog(User user, Hash blog) { - if (BlogManager.instance().isBanned(blog)) - return true; - PetNameDB db = user.getPetNameDB(); - PetName pn = db.getByLocation(blog.toBase64()); - return ( (pn!= null) && (pn.isMember("Ignore")) ); - } - - public void fetchMetadata(User user, Map parameters) { - String meta = ArchiveViewerBean.getString(parameters, "blog"); - if (meta == null) return; - Set blogs = new HashSet(); - if ("ALL".equals(meta)) { - Set localBlogs = BlogManager.instance().getArchive().getIndex().getUniqueBlogs(); - Set remoteBlogs = _remoteIndex.getUniqueBlogs(); - for (Iterator iter = remoteBlogs.iterator(); iter.hasNext(); ) { - Hash blog = (Hash)iter.next(); - if (!localBlogs.contains(blog)) { - if (!ignoreBlog(user, blog)) - blogs.add(blog); - } - } - } else { - byte h[] = Base64.decode(meta.trim()); - if (h != null) { - Hash blog = new Hash(h); - if (!ignoreBlog(user, blog)) - blogs.add(blog); - } - } - List urls = new ArrayList(blogs.size()); - List tmpFiles = new ArrayList(blogs.size()); - for (Iterator iter = blogs.iterator(); iter.hasNext(); ) { - Hash blog = (Hash)iter.next(); - urls.add(buildMetaURL(blog)); - try { - tmpFiles.add(File.createTempFile("fetchMeta", ".txt", BlogManager.instance().getTempDir())); - } catch (IOException ioe) { - _statusMessages.add("Internal error creating temporary file to fetch " + blog.toBase64() + ": " + ioe.getMessage()); - } - } - - for (int i = 0; i < urls.size(); i++) - _statusMessages.add("Scheduling up metadata fetches for " + HTMLRenderer.sanitizeString((String)urls.get(i))); - fetch(urls, tmpFiles, user, new MetadataStatusListener()); - } - - private String buildMetaURL(Hash blog) { - String loc = _remoteLocation.trim(); - int root = loc.lastIndexOf('/'); - return loc.substring(0, root + 1) + blog.toBase64() + "/" + Archive.METADATA_FILE; - } - - public void fetchSelectedEntries(User user, Map parameters) { - String entries[] = ArchiveViewerBean.getStrings(parameters, "entry"); - if ( (entries == null) || (entries.length <= 0) ) return; - List urls = new ArrayList(entries.length); - List tmpFiles = new ArrayList(entries.length); - for (int i = 0; i < entries.length; i++) { - BlogURI uri = new BlogURI(entries[i]); - if (ignoreBlog(user, uri.getKeyHash())) - continue; - urls.add(buildEntryURL(uri)); - try { - tmpFiles.add(File.createTempFile("fetchBlog", ".txt", BlogManager.instance().getTempDir())); - } catch (IOException ioe) { - _statusMessages.add("Internal error creating temporary file to fetch " + HTMLRenderer.sanitizeString(entries[i]) + ": " + ioe.getMessage()); - } - } - - for (int i = 0; i < urls.size(); i++) - _statusMessages.add("Scheduling blog post fetching for " + HTMLRenderer.sanitizeString(entries[i])); - fetch(urls, tmpFiles, user, new BlogStatusListener(user)); - } - - public void fetchSelectedBulk(User user, Map parameters) { - fetchSelectedBulk(user, parameters, false); - } - - public void fetchSelectedBulk(User user, Map parameters, boolean shouldBlock) { - String entries[] = ArchiveViewerBean.getStrings(parameters, "entry"); - String action = ArchiveViewerBean.getString(parameters, "action"); - if ("Fetch all new entries".equals(action)) { - ArchiveIndex localIndex = BlogManager.instance().getArchive().getIndex(); - List uris = new ArrayList(); - List matches = new ArrayList(); - for (Iterator iter = _remoteIndex.getUniqueBlogs().iterator(); iter.hasNext(); ) { - Hash blog = (Hash)iter.next(); - if (ignoreBlog(user, blog)) - continue; - - _remoteIndex.selectMatchesOrderByEntryId(matches, blog, null); - for (int i = 0; i < matches.size(); i++) { - BlogURI uri = (BlogURI)matches.get(i); - if (!localIndex.getEntryIsKnown(uri)) - uris.add(uri); - } - matches.clear(); - } - entries = new String[uris.size()]; - for (int i = 0; i < uris.size(); i++) - entries[i] = ((BlogURI)uris.get(i)).toString(); - } - if ( (entries == null) || (entries.length <= 0) ) return; - if (_exportCapable) { - StringBuffer url = new StringBuffer(512); - url.append(buildExportURL()); - StringBuffer postData = new StringBuffer(512); - Set meta = new HashSet(); - for (int i = 0; i < entries.length; i++) { - BlogURI uri = new BlogURI(entries[i]); - if (uri.getEntryId() >= 0) { - postData.append("entry=").append(uri.toString()).append('&'); - meta.add(uri.getKeyHash()); - _statusMessages.add("Scheduling bulk blog post fetch of " + HTMLRenderer.sanitizeString(entries[i])); - } - } - for (Iterator iter = meta.iterator(); iter.hasNext(); ) { - Hash blog = (Hash)iter.next(); - postData.append("meta=").append(blog.toBase64()).append('&'); - _statusMessages.add("Scheduling bulk blog metadata fetch of " + blog.toBase64()); - } - try { - File tmp = File.createTempFile("fetchBulk", ".zip", BlogManager.instance().getTempDir()); - - boolean shouldProxy = (_proxyHost != null) && (_proxyPort > 0); - final EepGet get = new EepGet(_context, shouldProxy, _proxyHost, _proxyPort, 0, tmp.getAbsolutePath(), url.toString(), postData.toString()); - get.addStatusListener(new BulkFetchListener(user, tmp)); - - if (shouldBlock) { - get.fetch(); - } else { - I2PThread t = new I2PThread(new Runnable() { public void run() { get.fetch(); } }, "Syndie fetcher"); - t.setDaemon(true); - t.start(); - } - } catch (IOException ioe) { - _statusMessages.add("Internal error creating temporary file to fetch " + HTMLRenderer.sanitizeString(url.toString()) + ": " + ioe.getMessage()); - } - } else { - List urls = new ArrayList(entries.length+8); - for (int i = 0; i < entries.length; i++) { - BlogURI uri = new BlogURI(entries[i]); - if (uri.getEntryId() >= 0) { - String metaURL = buildMetaURL(uri.getKeyHash()); - if (!urls.contains(metaURL)) { - urls.add(metaURL); - _statusMessages.add("Scheduling blog metadata fetch of " + HTMLRenderer.sanitizeString(entries[i])); - } - urls.add(buildEntryURL(uri)); - _statusMessages.add("Scheduling blog post fetch of " + HTMLRenderer.sanitizeString(entries[i])); - } - } - List tmpFiles = new ArrayList(1); - try { - for (int i = 0; i < urls.size(); i++) { - File t = File.createTempFile("fetchBulk", ".dat", BlogManager.instance().getTempDir()); - tmpFiles.add(t); - } - fetch(urls, tmpFiles, user, new BlogStatusListener(user), shouldBlock); - } catch (IOException ioe) { - _statusMessages.add("Internal error creating temporary file to fetch posts: " + HTMLRenderer.sanitizeString(urls.toString())); - } - } - } - - private String buildExportURL() { - String loc = _remoteLocation.trim(); - int root = loc.lastIndexOf('/'); - return loc.substring(0, root + 1) + "export.zip?"; - } - - private String buildEntryURL(BlogURI uri) { - String loc = _remoteLocation.trim(); - int root = loc.lastIndexOf('/'); - return loc.substring(0, root + 1) + uri.getKeyHash().toBase64() + "/" + uri.getEntryId() + ".snd"; - } - - public void fetchAllEntries(User user, Map parameters) { - ArchiveIndex localIndex = BlogManager.instance().getArchive().getIndex(); - List uris = new ArrayList(); - List entries = new ArrayList(); - for (Iterator iter = _remoteIndex.getUniqueBlogs().iterator(); iter.hasNext(); ) { - Hash blog = (Hash)iter.next(); - if (ignoreBlog(user, blog)) - continue; - _remoteIndex.selectMatchesOrderByEntryId(entries, blog, null); - for (int i = 0; i < entries.size(); i++) { - BlogURI uri = (BlogURI)entries.get(i); - if (!localIndex.getEntryIsKnown(uri)) - uris.add(uri); - } - entries.clear(); - } - List urls = new ArrayList(uris.size()); - List tmpFiles = new ArrayList(uris.size()); - for (int i = 0; i < uris.size(); i++) { - urls.add(buildEntryURL((BlogURI)uris.get(i))); - try { - tmpFiles.add(File.createTempFile("fetchBlog", ".txt", BlogManager.instance().getTempDir())); - } catch (IOException ioe) { - _statusMessages.add("Internal error creating temporary file to fetch " + HTMLRenderer.sanitizeString(uris.get(i).toString()) + ": " + ioe.getMessage()); - } - } - - for (int i = 0; i < urls.size(); i++) - _statusMessages.add("Fetch all entries: " + HTMLRenderer.sanitizeString((String)urls.get(i))); - fetch(urls, tmpFiles, user, new BlogStatusListener(user)); - } - - private void fetch(List urls, List tmpFiles, User user, EepGet.StatusListener lsnr) { - fetch(urls, tmpFiles, user, lsnr, false); - } - - private void fetch(List urls, List tmpFiles, User user, EepGet.StatusListener lsnr, boolean shouldBlock) { - EepGetScheduler scheduler = new EepGetScheduler(I2PAppContext.getGlobalContext(), urls, tmpFiles, _proxyHost, _proxyPort, lsnr); - scheduler.fetch(shouldBlock); - } - - public void fetchIndex(User user, String schema, String location, String proxyHost, String proxyPort, boolean allowCaching) { - _fetchIndexInProgress = true; - _remoteIndex = null; - _remoteLocation = location; - _remoteSchema = schema; - _proxyHost = null; - _proxyPort = -1; - _exportCapable = false; - if (user == null) user = BlogManager.instance().getDefaultUser(); - - if ( (schema == null) || (schema.trim().length() <= 0) || - (location == null) || (location.trim().length() <= 0) ) { - _statusMessages.add("Location must be specified [" + location + "] [" + schema + "]"); - _fetchIndexInProgress = false; - return; - } - - if ("web".equals(schema)) { - if ( (proxyHost != null) && (proxyHost.trim().length() > 0) && - (proxyPort != null) && (proxyPort.trim().length() > 0) ) { - _proxyHost = proxyHost; - try { - _proxyPort = Integer.parseInt(proxyPort); - } catch (NumberFormatException nfe) { - _statusMessages.add("Proxy port " + HTMLRenderer.sanitizeString(proxyPort) + " is invalid"); - _fetchIndexInProgress = false; - return; - } - } - } else { - _statusMessages.add(new String("Remote schema " + HTMLRenderer.sanitizeString(schema) + " currently not supported")); - _fetchIndexInProgress = false; - return; - } - - _statusMessages.add("Fetching index from " + HTMLRenderer.sanitizeString(_remoteLocation) + - (_proxyHost != null ? " via " + HTMLRenderer.sanitizeString(_proxyHost) + ":" + _proxyPort : "")); - - File archiveFile; - if (user.getBlog() != null) { - archiveFile = new File(BlogManager.instance().getTempDir(), user.getBlog().toBase64() + "_remoteArchive.txt"); - } else { - archiveFile = new File(BlogManager.instance().getTempDir(), "remoteArchive.txt"); - } - archiveFile.delete(); - - Properties etags = new Properties(); - try { - DataHelper.loadProps(etags, new File(BlogManager.instance().getRootDir(), "etags")); - } catch (IOException ioe) { - //ignore - } - - String tag = null; - if (allowCaching) - tag = etags.getProperty(location); - EepGet eep = new EepGet(I2PAppContext.getGlobalContext(), ((_proxyHost != null) && (_proxyPort > 0)), - _proxyHost, _proxyPort, 0, archiveFile.getAbsolutePath(), location, allowCaching, tag); - eep.addStatusListener(new IndexFetcherStatusListener(archiveFile)); - eep.fetch(); - - if (eep.getETag() != null) { - etags.setProperty(location, eep.getETag()); - } - try { - DataHelper.storeProps(etags, new File(BlogManager.instance().getRootDir(), "etags")); - } catch (IOException ioe) { - //ignore - } - } - - private class IndexFetcherStatusListener implements EepGet.StatusListener { - private File _archiveFile; - public IndexFetcherStatusListener(File file) { - _archiveFile = file; - } - public void attemptFailed(String url, long bytesTransferred, long bytesRemaining, int currentAttempt, int numRetries, Exception cause) { - _statusMessages.add("Attempt " + currentAttempt + " failed after " + bytesTransferred + (cause != null ? " " + cause.getMessage() : "")); - } - - public void bytesTransferred(long alreadyTransferred, int currentWrite, long bytesTransferred, long bytesRemaining, String url) {} - public void transferComplete(long alreadyTransferred, long bytesTransferred, long bytesRemaining, String url, String outputFile, boolean notModified) { - _statusMessages.add("Fetch of " + HTMLRenderer.sanitizeString(url) + " successful"); - _fetchIndexInProgress = false; - ArchiveIndex i = new ArchiveIndex(I2PAppContext.getGlobalContext(), false); - if (notModified) { - _statusMessages.add("Archive unchanged since last fetch."); - _statusMessages.add("If you want to force a refetch, make a trivial modification to the URL, such as adding a \"?\""); - } else { - try { - i.load(_archiveFile); - _statusMessages.add("Archive fetched and loaded"); - _remoteIndex = i; - } catch (IOException ioe) { - _statusMessages.add("Archive is corrupt: " + ioe.getMessage()); - } - } - } - public void transferFailed(String url, long bytesTransferred, long bytesRemaining, int currentAttempt) { - _statusMessages.add("Fetch of " + HTMLRenderer.sanitizeString(url) + " failed after " + bytesTransferred); - _fetchIndexInProgress = false; - } - public void headerReceived(String url, int currentAttempt, String key, String val) { - if (ArchiveServlet.HEADER_EXPORT_CAPABLE.equals(key) && ("true".equals(val))) { - _statusMessages.add("Remote archive is bulk export capable"); - _exportCapable = true; - } else { - if (_log.shouldLog(Log.DEBUG)) - _log.debug("Header received: [" + key + "] = [" + val + "]"); - } - } - } - - private class MetadataStatusListener implements EepGet.StatusListener { - public MetadataStatusListener() {} - public void attemptFailed(String url, long bytesTransferred, long bytesRemaining, int currentAttempt, int numRetries, Exception cause) { - _statusMessages.add("Attempt " + currentAttempt + " failed after " + bytesTransferred + (cause != null ? " " + cause.getMessage() : "")); - } - - public void bytesTransferred(long alreadyTransferred, int currentWrite, long bytesTransferred, long bytesRemaining, String url) {} - public void transferComplete(long alreadyTransferred, long bytesTransferred, long bytesRemaining, String url, String outputFile, boolean notModified) { - handleMetadata(url, outputFile); - } - public void transferFailed(String url, long bytesTransferred, long bytesRemaining, int currentAttempt) { - _statusMessages.add("Fetch of " + HTMLRenderer.sanitizeString(url) + " failed after " + bytesTransferred);; - } - public void headerReceived(String url, int currentAttempt, String key, String val) {} - } - - private void handleMetadata(String url, String outputFile) { - _statusMessages.add("Fetch of " + HTMLRenderer.sanitizeString(url) + " successful"); - File info = new File(outputFile); - FileInputStream in = null; - try { - BlogInfo i = new BlogInfo(); - in = new FileInputStream(info); - i.load(in); - boolean ok = BlogManager.instance().getArchive().storeBlogInfo(i); - if (ok) { - _statusMessages.add("Blog info for " + HTMLRenderer.sanitizeString(i.getProperty(BlogInfo.NAME)) + " imported"); - BlogManager.instance().getArchive().reloadInfo(); - } else { - _statusMessages.add("Blog info at " + HTMLRenderer.sanitizeString(url) + " was corrupt / invalid / forged"); - } - } catch (IOException ioe) { - if (_log.shouldLog(Log.WARN)) - _log.warn("Error handling metadata", ioe); - } finally { - if (in != null) try { in.close(); } catch (IOException ioe) {} - info.delete(); - } - } - - private class BlogStatusListener implements EepGet.StatusListener { - private User _user; - public BlogStatusListener(User user) { - _user = user; - } - public void attemptFailed(String url, long bytesTransferred, long bytesRemaining, int currentAttempt, int numRetries, Exception cause) { - _statusMessages.add("Attempt " + currentAttempt + " failed after " + bytesTransferred + (cause != null ? " " + cause.getMessage() : "")); - } - - public void bytesTransferred(long alreadyTransferred, int currentWrite, long bytesTransferred, long bytesRemaining, String url) {} - public void transferComplete(long alreadyTransferred, long bytesTransferred, long bytesRemaining, String url, String outputFile, boolean notModified) { - if (url.endsWith(".snm")) { - handleMetadata(url, outputFile); - return; - } - _statusMessages.add("Fetch of " + HTMLRenderer.sanitizeString(url) + " successful"); - File file = new File(outputFile); - FileInputStream in = null; - try { - EntryContainer c = new EntryContainer(); - in = new FileInputStream(file); - c.load(in); - BlogURI uri = c.getURI(); - if ( (uri == null) || (uri.getKeyHash() == null) ) { - _statusMessages.add("Blog post at " + HTMLRenderer.sanitizeString(url) + " was corrupt - no URI"); - return; - } - Archive a = BlogManager.instance().getArchive(); - BlogInfo info = a.getBlogInfo(uri); - if (info == null) { - _statusMessages.add("Blog post " + uri.toString() + " cannot be imported, as we don't have their blog metadata"); - return; - } - boolean ok = a.storeEntry(c); - if (!ok) { - _statusMessages.add("Blog post at " + url + ": " + uri.toString() + " has an invalid signature"); - return; - } else { - _statusMessages.add("Blog post " + uri.toString() + " imported"); - BlogManager.instance().getArchive().regenerateIndex(); - _user.dataImported(); - } - } catch (IOException ioe) { - if (_log.shouldLog(Log.WARN)) - _log.warn("Error importing", ioe); - } finally { - if (in != null) try { in.close(); } catch (IOException ioe) {} - file.delete(); - } - } - public void transferFailed(String url, long bytesTransferred, long bytesRemaining, int currentAttempt) { - _statusMessages.add("Fetch of " + HTMLRenderer.sanitizeString(url) + " failed after " + bytesTransferred); - } - public void headerReceived(String url, int currentAttempt, String key, String val) {} - } - - /** - * Receive the status of a fetch for the zip containing blogs and metadata (as generated by - * the ExportServlet) - */ - private class BulkFetchListener implements EepGet.StatusListener { - private File _tmp; - private User _user; - public BulkFetchListener(User user, File tmp) { - _user = user; - _tmp = tmp; - } - public void attemptFailed(String url, long bytesTransferred, long bytesRemaining, int currentAttempt, int numRetries, Exception cause) { - _statusMessages.add("Attempt " + currentAttempt + " failed after " + bytesTransferred + (cause != null ? " " + cause.getMessage() : "")); - } - - public void bytesTransferred(long alreadyTransferred, int currentWrite, long bytesTransferred, long bytesRemaining, String url) {} - public void transferComplete(long alreadyTransferred, long bytesTransferred, long bytesRemaining, String url, String outputFile, boolean notModified) { - _statusMessages.add("Fetch of " + HTMLRenderer.sanitizeString(url.substring(0, url.indexOf('?'))) + " successful, importing the data"); - File file = new File(outputFile); - ZipInputStream zi = null; - try { - zi = new ZipInputStream(new FileInputStream(file)); - boolean postImported = false; - while (true) { - ZipEntry entry = zi.getNextEntry(); - if (entry == null) - break; - - ByteArrayOutputStream out = new ByteArrayOutputStream(1024); - byte buf[] = new byte[1024]; - int read = -1; - while ( (read = zi.read(buf)) != -1) - out.write(buf, 0, read); - - if (entry.getName().startsWith("meta")) { - BlogInfo i = new BlogInfo(); - i.load(new ByteArrayInputStream(out.toByteArray())); - boolean ok = BlogManager.instance().getArchive().storeBlogInfo(i); - if (ok) { - _statusMessages.add("Blog info for " + HTMLRenderer.sanitizeString(i.getProperty(BlogInfo.NAME)) + " imported"); - } else { - _statusMessages.add("Blog info at " + HTMLRenderer.sanitizeString(url) + " was corrupt / invalid / forged"); - } - } else if (entry.getName().startsWith("entry")) { - EntryContainer c = new EntryContainer(); - c.load(new ByteArrayInputStream(out.toByteArray())); - BlogURI uri = c.getURI(); - if ( (uri == null) || (uri.getKeyHash() == null) ) { - _statusMessages.add("Blog post " + HTMLRenderer.sanitizeString(entry.getName()) + " was corrupt - no URI"); - continue; - } - Archive a = BlogManager.instance().getArchive(); - BlogInfo info = a.getBlogInfo(uri); - if (info == null) { - _statusMessages.add("Blog post " + HTMLRenderer.sanitizeString(entry.getName()) + " cannot be imported, as we don't have their blog metadata"); - continue; - } - boolean ok = a.storeEntry(c); - if (!ok) { - _statusMessages.add("Blog post " + uri.toString() + " has an invalid signature"); - continue; - } else { - _statusMessages.add("Blog post " + uri.toString() + " imported"); - postImported = true; - } - } - } - - BlogManager.instance().getArchive().regenerateIndex(); - if (postImported) - _user.dataImported(); - } catch (IOException ioe) { - if (_log.shouldLog(Log.WARN)) - _log.debug("Error importing", ioe); - _statusMessages.add("Error importing from " + HTMLRenderer.sanitizeString(url) + ": " + ioe.getMessage()); - } finally { - if (zi != null) try { zi.close(); } catch (IOException ioe) {} - file.delete(); - } - } - public void transferFailed(String url, long bytesTransferred, long bytesRemaining, int currentAttempt) { - _statusMessages.add("Fetch of " + HTMLRenderer.sanitizeString(url) + " failed after " + bytesTransferred); - _tmp.delete(); - } - public void headerReceived(String url, int currentAttempt, String key, String val) {} - } - - public void postSelectedEntries(User user, Map parameters) { - postSelectedEntries(user, parameters, _proxyHost, _proxyPort, _remoteLocation); - } - public void postSelectedEntries(User user, Map parameters, String proxyHost, int proxyPort, String location) { - String entries[] = ArchiveViewerBean.getStrings(parameters, "localentry"); - if ( (entries == null) || (entries.length <= 0) ) return; - List uris = new ArrayList(entries.length); - for (int i = 0; i < entries.length; i++) - uris.add(new BlogURI(entries[i])); - postSelectedEntries(user, uris, proxyHost, proxyPort, location); - } - public void postSelectedEntries(User user, List uris, String location) { - postSelectedEntries(user, uris, _proxyHost, _proxyPort, location); - } - public void postSelectedEntries(User user, List uris, String proxyHost, int proxyPort, String location) { - if ( (proxyPort > 0) && (proxyHost != null) && (proxyHost.trim().length() > 0) ) { - _proxyPort = proxyPort; - _proxyHost = proxyHost; - } else { - _proxyPort = -1; - _proxyHost = null; - } - _remoteLocation = location; - post(uris, user); - } - - private void post(List blogURIs, User user) { - List files = new ArrayList(blogURIs.size()+1); - Set meta = new HashSet(4); - Map uploads = new HashMap(files.size()); - String importURL = getImportURL(); - _statusMessages.add("Uploading through " + HTMLRenderer.sanitizeString(importURL)); - for (int i = 0; i < blogURIs.size(); i++) { - BlogURI uri = (BlogURI)blogURIs.get(i); - File blogDir = new File(BlogManager.instance().getArchive().getArchiveDir(), uri.getKeyHash().toBase64()); - BlogInfo info = BlogManager.instance().getArchive().getBlogInfo(uri); - if (!meta.contains(uri.getKeyHash())) { - uploads.put("blogmeta" + meta.size(), new File(blogDir, Archive.METADATA_FILE)); - meta.add(uri.getKeyHash()); - _statusMessages.add("Scheduling upload of the blog metadata for " + HTMLRenderer.sanitizeString(info.getProperty(BlogInfo.NAME))); - } - uploads.put("blogpost" + i, new File(blogDir, uri.getEntryId() + ".snd")); - _statusMessages.add("Scheduling upload of " + HTMLRenderer.sanitizeString(info.getProperty(BlogInfo.NAME)) - + ": " + getEntryDate(uri.getEntryId())); - } - EepPost post = new EepPost(); - post.postFiles(importURL, _proxyHost, _proxyPort, uploads, new Runnable() { public void run() { _statusMessages.add("Upload complete"); } }); - } - - private String getImportURL() { - String loc = _remoteLocation.trim(); - int archiveRoot = loc.lastIndexOf('/'); - int syndieRoot = loc.lastIndexOf('/', archiveRoot-1); - return loc.substring(0, syndieRoot + 1) + "import.jsp"; - } - - public void renderDeltaForm(User user, ArchiveIndex localIndex, Writer out) throws IOException { - Archive archive = BlogManager.instance().getArchive(); - StringBuffer buf = new StringBuffer(512); - buf.append("New blogs:
      \n"); - } - - int newEntries = 0; - int localNew = 0; - out.write("\n"); - List entries = new ArrayList(); - for (Iterator iter = remoteBlogs.iterator(); iter.hasNext(); ) { - Hash blog = (Hash)iter.next(); - if (ignoreBlog(user, blog)) - continue; - buf.setLength(0); - int shownEntries = 0; - buf.append("\n"); - buf.append(""); - buf.append(""); - buf.append(""); - buf.append("\n"); - entries.clear(); - _remoteIndex.selectMatchesOrderByEntryId(entries, blog, null); - for (int i = 0; i < entries.size(); i++) { - BlogURI uri = (BlogURI)entries.get(i); - buf.append("\n"); - if (!archive.getIndex().getEntryIsKnown(uri)) { - buf.append("\n"); - newEntries++; - shownEntries++; - } else { - String page = "threads.jsp?" + ThreadedHTMLRenderer.PARAM_VIEW_POST + '=' + blog.toBase64() + '/' + uri.getEntryId(); - buf.append("\n"); - } - buf.append("\n"); - buf.append("\n"); - buf.append("\n"); - buf.append("\n"); - buf.append("\n"); - } - if (shownEntries > 0) { - out.write(buf.toString()); - buf.setLength(0); - } - int remote = shownEntries; - - // now for posts in known blogs that we have and they don't - entries.clear(); - localIndex.selectMatchesOrderByEntryId(entries, blog, null); - buf.append("\n"); - for (int i = 0; i < entries.size(); i++) { - BlogURI uri = (BlogURI)entries.get(i); - if (!_remoteIndex.getEntryIsKnown(uri)) { - buf.append("\n"); - buf.append("\n"); - shownEntries++; - newEntries++; - localNew++; - buf.append("\n"); - buf.append("\n"); - buf.append("\n"); - buf.append("\n"); - buf.append("\n"); - } - } - - if (shownEntries > remote) // skip blogs we have already syndicated - out.write(buf.toString()); - } - - // now for posts in blogs we have and they don't - int newBefore = localNew; - buf.setLength(0); - buf.append("\n"); - for (Iterator iter = localBlogs.iterator(); iter.hasNext(); ) { - Hash blog = (Hash)iter.next(); - if (remoteBlogs.contains(blog)) { - //System.err.println("Remote index has " + blog.toBase64()); - continue; - } else if (ignoreBlog(user, blog)) { - continue; - } - - entries.clear(); - localIndex.selectMatchesOrderByEntryId(entries, blog, null); - - for (int i = 0; i < entries.size(); i++) { - BlogURI uri = (BlogURI)entries.get(i); - buf.append("\n"); - buf.append("\n"); - buf.append("\n"); - buf.append("\n"); - buf.append("\n"); - buf.append("\n"); - buf.append("\n"); - localNew++; - } - } - if (localNew > newBefore) - out.write(buf.toString()); - - out.write("
      \n"); - BlogInfo info = archive.getBlogInfo(blog); - if (info != null) { - buf.append(HTMLRenderer.sanitizeString(info.getProperty(BlogInfo.NAME))).append(": "); - buf.append("").append(HTMLRenderer.sanitizeString(info.getProperty(BlogInfo.DESCRIPTION))); - buf.append("\n"); - } else { - buf.append("" + blog.toBase64() + "\n"); - } - buf.append("
       "); - buf.append("Posted on#SizeTags
      (local)" + getDate(uri.getEntryId()) + "" + getId(uri.getEntryId()) + "" + _remoteIndex.getBlogEntrySizeKB(uri) + "KB"); - for (Iterator titer = new TreeSet(_remoteIndex.getBlogEntryTags(uri)).iterator(); titer.hasNext(); ) { - String tag = (String)titer.next(); - String page = "threads.jsp?" + ThreadedHTMLRenderer.PARAM_TAGS + '=' + HTMLRenderer.sanitizeTagParam(tag); - buf.append("" + HTMLRenderer.sanitizeString(tag) + " \n"); - } - buf.append("
      Entries we have, but the remote Syndie doesn't:
      " + getDate(uri.getEntryId()) + "" + getId(uri.getEntryId()) + "" + localIndex.getBlogEntrySizeKB(uri) + "KB"); - for (Iterator titer = new TreeSet(localIndex.getBlogEntryTags(uri)).iterator(); titer.hasNext(); ) { - String tag = (String)titer.next(); - String page = "threads.jsp?" + ThreadedHTMLRenderer.PARAM_TAGS + '=' + HTMLRenderer.sanitizeTagParam(tag); - buf.append("" + HTMLRenderer.sanitizeString(tag) + " \n"); - } - buf.append("
      Blogs the remote Syndie doesn't have
      " + getDate(uri.getEntryId()) + "" + getId(uri.getEntryId()) + "" + localIndex.getBlogEntrySizeKB(uri) + "KB"); - for (Iterator titer = new TreeSet(localIndex.getBlogEntryTags(uri)).iterator(); titer.hasNext(); ) { - String tag = (String)titer.next(); - String page = "threads.jsp?" + ThreadedHTMLRenderer.PARAM_TAGS + '=' + HTMLRenderer.sanitizeTagParam(tag); - buf.append("" + HTMLRenderer.sanitizeString(tag) + " \n"); - } - buf.append("
      \n"); - if (newEntries > 0) { - out.write(" \n"); - out.write(" \n"); - } else { - out.write("" + HTMLRenderer.sanitizeString(_remoteLocation) + " has no new posts to offer us\n"); - } - if (localNew > 0) { - out.write(" \n"); - } - out.write("
      \n"); - } - private final SimpleDateFormat _dateFormat = new SimpleDateFormat("yyyy/MM/dd", Locale.UK); - private String getDate(long when) { - synchronized (_dateFormat) { - return _dateFormat.format(new Date(when)); - } - } - private final String getEntryDate(long when) { - synchronized (_dateFormat) { - try { - String str = _dateFormat.format(new Date(when)); - long dayBegin = _dateFormat.parse(str).getTime(); - return str + "." + (when - dayBegin); - } catch (ParseException pe) { - pe.printStackTrace(); - // wtf - return "unknown"; - } - } - } - - private long getId(long id) { - synchronized (_dateFormat) { - try { - String str = _dateFormat.format(new Date(id)); - long dayBegin = _dateFormat.parse(str).getTime(); - return (id - dayBegin); - } catch (ParseException pe) { - pe.printStackTrace(); - // wtf - return id; - } - } - } -} diff --git a/apps/syndie/java/src/net/i2p/syndie/web/RunStandalone.java b/apps/syndie/java/src/net/i2p/syndie/web/RunStandalone.java deleted file mode 100644 index 778d3e41a..000000000 --- a/apps/syndie/java/src/net/i2p/syndie/web/RunStandalone.java +++ /dev/null @@ -1,52 +0,0 @@ -package net.i2p.syndie.web; - -import java.io.File; - -import net.i2p.util.FileUtil; - -import org.mortbay.jetty.Server; - -public class RunStandalone { - private Server _server; - - static { - System.setProperty("org.mortbay.http.Version.paranoid", "true"); - System.setProperty("org.mortbay.xml.XmlParser.NotValidating", "true"); - System.setProperty("syndie.rootDir", "."); - System.setProperty("syndie.defaultSingleUserArchives", "http://syndiemedia.i2p.net:8000/archive/archive.txt"); - System.setProperty("syndie.defaultProxyHost", ""); - System.setProperty("syndie.defaultProxyPort", ""); - } - - private RunStandalone(String args[]) {} - - public static void main(String args[]) { - RunStandalone runner = new RunStandalone(args); - runner.start(); - } - - public void start() { - File workDir = new File("work"); - boolean workDirRemoved = FileUtil.rmdir(workDir, false); - if (!workDirRemoved) - System.err.println("ERROR: Unable to remove Jetty temporary work directory"); - boolean workDirCreated = workDir.mkdirs(); - if (!workDirCreated) - System.err.println("ERROR: Unable to create Jetty temporary work directory"); - - try { - _server = new Server("jetty-syndie.xml"); - _server.start(); - } catch (Exception e) { - e.printStackTrace(); - } - } - - public void stop() { - try { - _server.stop(); - } catch (InterruptedException ie) { - ie.printStackTrace(); - } - } -} diff --git a/apps/syndie/java/src/net/i2p/syndie/web/SwitchServlet.java b/apps/syndie/java/src/net/i2p/syndie/web/SwitchServlet.java deleted file mode 100644 index 44fd4e970..000000000 --- a/apps/syndie/java/src/net/i2p/syndie/web/SwitchServlet.java +++ /dev/null @@ -1,46 +0,0 @@ -package net.i2p.syndie.web; - -import java.io.IOException; -import java.io.PrintWriter; - -import javax.servlet.http.HttpServletRequest; - -import net.i2p.syndie.Archive; -import net.i2p.syndie.User; -import net.i2p.syndie.data.BlogURI; -import net.i2p.syndie.data.ThreadIndex; -import net.i2p.syndie.sml.ThreadedHTMLRenderer; - -/** - * Login/register form - * - */ -public class SwitchServlet extends BaseServlet { - protected String getTitle() { return "Syndie :: Login/Register"; } - - protected void renderServletDetails(User user, HttpServletRequest req, PrintWriter out, ThreadIndex index, - int threadOffset, BlogURI visibleEntry, Archive archive) throws IOException { - out.write("
      \n"); - writeAuthActionFields(out); - out.write("Log in to an existing account\n" + - "Login: \n" + - "Password: \n" + - "\n" + - "\n" + - "\n" + - "
      \n" + - "
      \n" + - "
      \n"); - writeAuthActionFields(out); - out.write("Register a new account\n" + - "Login: (only known locally)\n" + - "Password: \n" + - "Public name: \n" + - "Description: \n" + - "Contact URL: \n" + - "Registration password: " + - " (only necessary if the Syndie administrator requires it)\n" + - "\n" + - "
      \n"); - } -} diff --git a/apps/syndie/java/src/net/i2p/syndie/web/SyndicateServlet.java b/apps/syndie/java/src/net/i2p/syndie/web/SyndicateServlet.java deleted file mode 100644 index 337ac5aa1..000000000 --- a/apps/syndie/java/src/net/i2p/syndie/web/SyndicateServlet.java +++ /dev/null @@ -1,154 +0,0 @@ -package net.i2p.syndie.web; - -import java.io.IOException; -import java.io.PrintWriter; -import java.util.Iterator; - -import javax.servlet.http.HttpServletRequest; - -import net.i2p.client.naming.PetName; -import net.i2p.syndie.Archive; -import net.i2p.syndie.BlogManager; -import net.i2p.syndie.User; -import net.i2p.syndie.data.BlogURI; -import net.i2p.syndie.data.ThreadIndex; -import net.i2p.syndie.sml.HTMLRenderer; - -/** - * Syndicate with another remote Syndie node - * - */ -public class SyndicateServlet extends BaseServlet { - protected String getTitle() { return "Syndie :: Syndicate"; } - - public static final String PARAM_SCHEMA = "schema"; - public static final String PARAM_LOCATION = "location"; - public static final String PARAM_PETNAME = "petname"; - - protected void renderServletDetails(User user, HttpServletRequest req, PrintWriter out, ThreadIndex index, - int threadOffset, BlogURI visibleEntry, Archive archive) throws IOException { - if (!BlogManager.instance().authorizeRemote(user)) { - out.write("Sorry, you are not authorized to access remote archives\n"); - } else { - out.write("
      "); - displayForm(user, req, out); - handleRequest(user, req, index, out); - out.write("
      \n"); - } - } - - private void handleRequest(User user, HttpServletRequest req, ThreadIndex index, PrintWriter out) throws IOException { - RemoteArchiveBean remote = getRemote(req); - String action = req.getParameter("action"); - if ("Continue...".equals(action)) { - String location = req.getParameter(PARAM_LOCATION); - String pn = req.getParameter(PARAM_PETNAME); - if ( (pn != null) && (pn.trim().length() > 0) ) { - PetName pnval = user.getPetNameDB().getByName(pn); - if (pnval != null) location = pnval.getLocation(); - } - - // dont allow caching if they explicit ask for a fetch - boolean allowCaching = false; - remote.fetchIndex(user, req.getParameter(PARAM_SCHEMA), location, - req.getParameter("proxyhost"), - req.getParameter("proxyport"), allowCaching); - } else if ("Fetch metadata".equals(action)) { - remote.fetchMetadata(user, req.getParameterMap()); - } else if ("Fetch selected entries".equals(action)) { - //remote.fetchSelectedEntries(user, request.getParameterMap()); - remote.fetchSelectedBulk(user, req.getParameterMap()); - } else if ("Fetch all new entries".equals(action)) { - //remote.fetchAllEntries(user, request.getParameterMap()); - remote.fetchSelectedBulk(user, req.getParameterMap()); - } else if ("Post selected entries".equals(action)) { - remote.postSelectedEntries(user, req.getParameterMap()); - } - String msgs = remote.getStatus(); - if ( (msgs != null) && (msgs.length() > 0) ) { - out.write("
      ");
      -            out.write(msgs);
      -            out.write("Refresh

      \n"); - } - - if (remote.getFetchIndexInProgress()) { - out.write("Please wait while the index is being fetched "); - out.write("from "); - out.write(remote.getRemoteLocation()); - out.write("."); - } else if (remote.getRemoteIndex() != null) { - // remote index is NOT null! - out.write(""); - out.write(remote.getRemoteLocation()); - out.write(""); - out.write("(refetch):
      \n"); - - remote.renderDeltaForm(user, BlogManager.instance().getArchive().getIndex(), out); - out.write(""); - } - - out.write("\n"); - } - - private void displayForm(User user, HttpServletRequest req, PrintWriter out) throws IOException { - writeAuthActionFields(out); - out.write(""); - out.write("Import from:\n"); - out.write("\n"); - out.write("Proxy\n"); - out.write("\n"); - out.write("
      \n"); - out.write("Bookmarked archives:\n"); - out.write(" or "); - out.write("\n"); - out.write("
      \n"); - out.write("
      \n"); - } - - private static final String ATTR_REMOTE = "remote"; - protected RemoteArchiveBean getRemote(HttpServletRequest req) { - RemoteArchiveBean remote = (RemoteArchiveBean)req.getSession().getAttribute(ATTR_REMOTE); - if (remote == null) { - remote = new RemoteArchiveBean(); - req.getSession().setAttribute(ATTR_REMOTE, remote); - } - return remote; - } -} diff --git a/apps/syndie/java/src/net/i2p/syndie/web/ThreadNavServlet.java b/apps/syndie/java/src/net/i2p/syndie/web/ThreadNavServlet.java deleted file mode 100644 index abb6f852e..000000000 --- a/apps/syndie/java/src/net/i2p/syndie/web/ThreadNavServlet.java +++ /dev/null @@ -1,157 +0,0 @@ -package net.i2p.syndie.web; - -import java.io.IOException; -import java.io.PrintWriter; - -import javax.servlet.ServletException; -import javax.servlet.http.HttpServletRequest; -import javax.servlet.http.HttpServletResponse; - -import net.i2p.I2PAppContext; -import net.i2p.client.naming.PetName; -import net.i2p.syndie.Archive; -import net.i2p.syndie.BlogManager; -import net.i2p.syndie.HeaderReceiver; -import net.i2p.syndie.User; -import net.i2p.syndie.data.BlogInfo; -import net.i2p.syndie.data.BlogURI; -import net.i2p.syndie.data.EntryContainer; -import net.i2p.syndie.data.FilteredThreadIndex; -import net.i2p.syndie.data.ThreadIndex; -import net.i2p.syndie.data.ThreadNode; -import net.i2p.syndie.sml.HTMLRenderer; -import net.i2p.syndie.sml.SMLParser; - -/** - * Export the thread nav as either RDF or XML - * - */ -public class ThreadNavServlet extends BaseServlet { - public static final String PARAM_COUNT = "count"; - public static final String PARAM_OFFSET = "offset"; - public static final String PARAM_FORMAT = "format"; - - public static final String FORMAT_RDF = "rdf"; - public static final String FORMAT_XML = "xml"; - - protected void render(User user, HttpServletRequest req, HttpServletResponse resp, ThreadIndex index) throws ServletException, IOException { - int threadCount = empty(req, PARAM_COUNT) ? index.getRootCount() : getInt(req, PARAM_COUNT); - int offset = getInt(req, PARAM_OFFSET); - String uri = req.getRequestURI(); - if (uri.endsWith(FORMAT_XML)) { - resp.setContentType("text/xml; charset=UTF-8"); - render(user, index, resp.getWriter(), threadCount, offset, FORMAT_XML); - } else { - resp.setContentType("application/rdf+xml; charset=UTF-8"); - render(user, index, resp.getWriter(), threadCount, offset, FORMAT_RDF); - } - } - - private int getInt(HttpServletRequest req, String param) { - String val = req.getParameter(param); - if (val != null) { - try { - return Integer.parseInt(val); - } catch (NumberFormatException nfe) { - // ignore - } - } - return -1; - } - - private static final int DEFAULT_THREADCOUNT = 10; - private static final int DEFAULT_THREADOFFSET = 0; - - private void render(User user, ThreadIndex index, PrintWriter out, int threadCount, int offset, String format) throws IOException { - int startRoot = DEFAULT_THREADOFFSET; - if (offset >= 0) - startRoot = offset; - renderStart(out, format); - - int endRoot = startRoot + (threadCount > 0 ? threadCount : DEFAULT_THREADCOUNT); - if (endRoot >= index.getRootCount()) - endRoot = index.getRootCount() - 1; - for (int i = startRoot; i <= endRoot; i++) { - ThreadNode node = index.getRoot(i); - if (FORMAT_XML.equals(format)) - out.write(node.toString()); - else - render(user, node, out); - } - renderEnd(out, format); - } - private void renderStart(PrintWriter out, String format) throws IOException { - out.write("\n"); - if (FORMAT_XML.equals(format)) { - out.write(""); - } else { - out.write("\n"); - out.write("\n"); - } - } - private void renderEnd(PrintWriter out, String format) throws IOException { - if (FORMAT_XML.equals(format)) { - out.write(""); - } else { - out.write("\n"); - out.write("\n"); - } - } - private void render(User user, ThreadNode node, PrintWriter out) throws IOException { - Archive archive = BlogManager.instance().getArchive(); - String blog = node.getEntry().getKeyHash().toBase64(); - out.write("\n"); - out.write(""); - PetName pn = user.getPetNameDB().getByLocation(blog); - String name = null; - if (pn != null) { - if (pn.isMember(FilteredThreadIndex.GROUP_FAVORITE)) - out.write("\n"); - if (pn.isMember(FilteredThreadIndex.GROUP_IGNORE)) - out.write("\n"); - name = pn.getName(); - } else { - BlogInfo info = archive.getBlogInfo(node.getEntry().getKeyHash()); - if (info != null) - name = info.getProperty(BlogInfo.NAME); - if ( (name == null) || (name.trim().length() <= 0) ) - name = node.getEntry().getKeyHash().toBase64().substring(0,6); - } - out.write("" + HTMLRenderer.sanitizeStrippedXML(name) + "\n"); - if ( (user.getBlog() != null) && (node.containsAuthor(user.getBlog())) ) - out.write("\n"); - - EntryContainer entry = archive.getEntry(node.getEntry()); - if (entry == null) throw new RuntimeException("Unable to fetch the entry " + node.getEntry()); - - SMLParser parser = new SMLParser(I2PAppContext.getGlobalContext()); - HeaderReceiver rec = new HeaderReceiver(); - parser.parse(entry.getEntry().getText(), rec); - String subject = rec.getHeader(HTMLRenderer.HEADER_SUBJECT); - if ( (subject == null) || (subject.trim().length() <= 0) ) - subject = "(no subject)"; - - out.write("" + HTMLRenderer.sanitizeStrippedXML(subject) + "\n"); - - long dayBegin = BlogManager.instance().getDayBegin(); - long postId = node.getEntry().getEntryId(); - int daysAgo = (int)((dayBegin - postId + 24*60*60*1000l-1l)/(24*60*60*1000l)); - out.write("" + daysAgo + "\n"); - - out.write(""); - out.write(""); - for (int i = 0; i < node.getChildCount(); i++) - render(user, node.getChild(i), out); - out.write("\n"); - out.write("\n"); - - out.write("\n"); - out.write("\n"); - } - - protected void renderServletDetails(User user, HttpServletRequest req, PrintWriter out, ThreadIndex index, - int threadOffset, BlogURI visibleEntry, Archive archive) throws IOException { - throw new UnsupportedOperationException("Not relevant..."); - } -} diff --git a/apps/syndie/java/src/net/i2p/syndie/web/ViewBlogServlet.java b/apps/syndie/java/src/net/i2p/syndie/web/ViewBlogServlet.java deleted file mode 100644 index 6a254f521..000000000 --- a/apps/syndie/java/src/net/i2p/syndie/web/ViewBlogServlet.java +++ /dev/null @@ -1,819 +0,0 @@ -package net.i2p.syndie.web; - -import java.io.File; -import java.io.FileInputStream; -import java.io.IOException; -import java.io.InputStream; -import java.io.InputStreamReader; -import java.io.OutputStream; -import java.io.PrintWriter; -import java.io.Reader; -import java.util.ArrayList; -import java.util.Collection; -import java.util.List; - -import javax.servlet.ServletException; -import javax.servlet.http.HttpServletRequest; -import javax.servlet.http.HttpServletResponse; - -import net.i2p.client.naming.PetName; -import net.i2p.data.Base64; -import net.i2p.data.Hash; -import net.i2p.syndie.Archive; -import net.i2p.syndie.BlogManager; -import net.i2p.syndie.User; -import net.i2p.syndie.data.ArchiveIndex; -import net.i2p.syndie.data.Attachment; -import net.i2p.syndie.data.BlogInfo; -import net.i2p.syndie.data.BlogInfoData; -import net.i2p.syndie.data.BlogURI; -import net.i2p.syndie.data.EntryContainer; -import net.i2p.syndie.data.FilteredThreadIndex; -import net.i2p.syndie.data.ThreadIndex; -import net.i2p.syndie.data.ThreadNode; -import net.i2p.syndie.sml.BlogPostInfoRenderer; -import net.i2p.syndie.sml.BlogRenderer; -import net.i2p.syndie.sml.HTMLRenderer; -import net.i2p.syndie.sml.ThreadedHTMLRenderer; -import net.i2p.util.FileUtil; -import net.i2p.util.Log; - -/** - * Render the appropriate posts for the current blog, using any blog info data available - * - */ -public class ViewBlogServlet extends BaseServlet { - public static final String PARAM_OFFSET = "offset"; - /** $blogHash */ - public static final String PARAM_BLOG = "blog"; - /** $blogHash/$entryId */ - public static final String PARAM_ENTRY = "entry"; - /** tag,tag,tag */ - public static final String PARAM_TAG = "tag"; - /** $blogHash/$entryId/$attachmentId */ - public static final String PARAM_ATTACHMENT = "attachment"; - /** image within the BlogInfoData to load (e.g. logo.png, icon_$tagHash.png, etc) */ - public static final String PARAM_IMAGE = "image"; - - public void service(HttpServletRequest req, HttpServletResponse resp) throws ServletException, IOException { - req.setCharacterEncoding("UTF-8"); - String attachment = req.getParameter(PARAM_ATTACHMENT); - if (attachment != null) { - // if they requested an attachment, serve it up to 'em - if (renderAttachment(req, resp, attachment)) - return; - } - String img = req.getParameter(PARAM_IMAGE); - if (img != null) { - boolean rendered = renderUpdatedImage(img, req, resp); - if (!rendered) - rendered = renderPublishedImage(img, req, resp); - if (!rendered) - rendered = renderDefaultImage(img, req, resp); - if (rendered) return; - } - super.service(req, resp); - } - - protected void render(User user, HttpServletRequest req, PrintWriter out, ThreadIndex index) throws ServletException, IOException { - Archive archive = BlogManager.instance().getArchive(); - - Hash blog = null; - String name = req.getParameter(PARAM_BLOG); - if ( (name == null) || (name.trim().length() <= 0) ) { - blog = user.getBlog(); - } else { - byte val[] = Base64.decode(name); - if ( (val != null) && (val.length == Hash.HASH_LENGTH) ) - blog = new Hash(val); - } - - BlogInfo info = null; - if (blog != null) - info = archive.getBlogInfo(blog); - - int offset = 0; - String off = req.getParameter(PARAM_OFFSET); - if (off != null) try { offset = Integer.parseInt(off); } catch (NumberFormatException nfe) {} - - List posts = getPosts(user, archive, info, req, index); - render(user, req, out, archive, info, posts, offset); - } - - private BlogURI getEntry(HttpServletRequest req) { - String param = req.getParameter(PARAM_ENTRY); - if (param != null) - return new BlogURI("blog://" + param); - return null; - } - - private List getPosts(User user, Archive archive, BlogInfo info, HttpServletRequest req, ThreadIndex index) { - List rv = new ArrayList(1); - if (info == null) return rv; - - String entrySelected = req.getParameter(PARAM_ENTRY); - if (entrySelected != null) { - // $blogKey/$entryId - BlogURI uri = null; - if (entrySelected.startsWith("blog://")) - uri = new BlogURI(entrySelected); - else - uri = new BlogURI("blog://" + entrySelected.trim()); - if (uri.getEntryId() >= 0) { - rv.add(uri); - return rv; - } - } - - ArchiveIndex aindex = archive.getIndex(); - - BlogURI uri = getEntry(req); - if (uri != null) { - rv.add(uri); - return rv; - } - - aindex.selectMatchesOrderByEntryId(rv, info.getKey().calculateHash(), null); - - // lets filter out any posts that are not roots - for (int i = 0; i < rv.size(); i++) { - BlogURI curURI = (BlogURI)rv.get(i); - ThreadNode node = index.getNode(curURI); - if ( (node != null) && (node.getParent() == null) ) { - // ok, its a root - Collection tags = node.getTags(); - if ( (tags != null) && (tags.contains(BlogInfoData.TAG)) ) { - // skip this, as its an info post - rv.remove(i); - i--; - } - } else { - rv.remove(i); - i--; - } - } - return rv; - } - - private void render(User user, HttpServletRequest req, PrintWriter out, Archive archive, BlogInfo info, List posts, int offset) throws IOException { - String title = null; - String desc = null; - BlogInfoData data = null; - if (info != null) { - title = info.getProperty(BlogInfo.NAME); - desc = info.getProperty(BlogInfo.DESCRIPTION); - String dataURI = info.getProperty(BlogInfo.SUMMARY_ENTRY_ID); - if (dataURI != null) { - EntryContainer entry = archive.getEntry(new BlogURI(dataURI)); - if (entry != null) { - data = new BlogInfoData(); - try { - data.load(entry); - } catch (IOException ioe) { - data = null; - if (_log.shouldLog(Log.WARN)) - _log.warn("Error loading the blog info data from " + dataURI, ioe); - } - } - } - } - String pageTitle = "Syndie :: Blogs" + (desc != null ? " :: " + desc : ""); - if (title != null) pageTitle = pageTitle + " (" + title + ")"; - pageTitle = HTMLRenderer.sanitizeString(pageTitle); - out.write("\n"); - out.write("\n"); - out.write("\n\n" + pageTitle + "\n"); - if (info != null) - out.write("\n"); - out.write(""); - renderHeader(user, req, out, info, data, title, desc); - renderReferences(user, out, info, data, req, posts); - renderBody(user, out, info, data, posts, offset, archive, req); - out.write("\n"); - } - private void renderStyle(PrintWriter out, BlogInfo info, BlogInfoData data, HttpServletRequest req) throws IOException { - // modify it based on data.getStyleOverrides()... - out.write(CSS); - Reader css = null; - try { - InputStream in = req.getSession().getServletContext().getResourceAsStream("/syndie.css"); - if (in != null) { - css = new InputStreamReader(in, "UTF-8"); - char buf[] = new char[1024]; - int read = 0; - while ( (read = css.read(buf)) != -1) - out.write(buf, 0, read); - } - } finally { - if (css != null) - css.close(); - } - String content = FileUtil.readTextFile("./docs/syndie_standard.css", -1, true); - if (content != null) out.write(content); - } - - public static String getLogoURL(Hash blog) { - if (blog == null) return ""; - return "blog.jsp?" + PARAM_BLOG + "=" + blog.toBase64() + "&" - + PARAM_IMAGE + "=" + BlogInfoData.ATTACHMENT_LOGO; - } - - private void renderHeader(User user, HttpServletRequest req, PrintWriter out, BlogInfo info, BlogInfoData data, String title, String desc) throws IOException { - out.write("\n" + - "Content\n"); - renderNavBar(user, req, out); - out.write("
      \n"); - Hash kh = null; - if ( (info != null) && (info.getKey() != null) ) - kh = info.getKey().calculateHash(); - out.write("\"\"\n"); - String name = desc; - if ( (name == null) || (name.trim().length() <= 0) ) - name = title; - if ( ( (name == null) || (name.trim().length() <= 0) ) && (info != null) && (kh != null) ) - name = kh.toBase64(); - if (name != null) { - String url = "blog.jsp?" + (info != null ? PARAM_BLOG + "=" + info.getKey().calculateHash().toBase64() : ""); - out.write("" - + HTMLRenderer.sanitizeString(name) + ""); - out.write("
      profile threads"); - } - out.write("
      \n"); - } - - public static final String DEFAULT_GROUP_NAME = "References"; - private void renderReferences(User user, PrintWriter out, BlogInfo info, BlogInfoData data, HttpServletRequest req, List posts) throws IOException { - out.write("
      \n"); - if (data != null) { - for (int i = 0; i < data.getReferenceGroupCount(); i++) { - List group = data.getReferenceGroup(i); - if (group.size() <= 0) continue; - PetName pn = (PetName)group.get(0); - String name = null; - if (pn.getGroupCount() <= 0) - name = DEFAULT_GROUP_NAME; - else - name = HTMLRenderer.sanitizeString(pn.getGroup(0)); - out.write("\n"); - out.write("
      \n"); - out.write("" + name + "\n"); - out.write("
        \n"); - for (int j = 0; j < group.size(); j++) { - pn = (PetName)group.get(j); - out.write("
      • " + renderLink(info.getKey().calculateHash(), pn) + "
      • \n"); - } - out.write("
      \n
      \n\n"); - } - } - //out.write("
      \n"); - //out.write("Custom links\n"); - //out.write("\n"); - //out.write("
      "); - - renderPostReferences(user, req, out, posts); - - out.write("
      "); - out.write("Secured by Syndie"); - out.write("
      \n"); - out.write("
      \n\n"); - } - - private void renderPostReferences(User user, HttpServletRequest req, PrintWriter out, List posts) throws IOException { - if (!empty(req, PARAM_ENTRY) && (posts.size() == 1)) { - BlogURI uri = (BlogURI)posts.get(0); - Archive archive = BlogManager.instance().getArchive(); - EntryContainer entry = archive.getEntry(uri); - if (entry != null) { - out.write("
      \n"); - - BlogPostInfoRenderer renderer = new BlogPostInfoRenderer(_context); - renderer.render(user, archive, entry, out); - - out.write("
      \n"); - } - } - } - - /** generate a link for the given petname within the scope of the given blog */ - public static String renderLink(Hash blogFrom, PetName pn) { - StringBuffer buf = new StringBuffer(64); - String type = pn.getProtocol(); - if ("syndieblog".equals(type)) { - String loc = pn.getLocation(); - if (loc != null) { - buf.append(""); - } - buf.append(HTMLRenderer.sanitizeString(pn.getName())); - if (loc != null) { - buf.append(""); - //buf.append(" \"\"\n"); - } - } else if ("syndieblogpost".equals(type)) { - String loc = pn.getLocation(); - if (loc != null) { - buf.append(""); - } - buf.append(HTMLRenderer.sanitizeString(pn.getName())); - if (loc != null) { - buf.append(""); - } - } else if ("syndieblogattachment".equals(type)) { - String loc = pn.getLocation(); - if (loc != null) { - int split = loc.lastIndexOf('/'); - try { - int attachmentId = -1; - if (split > 0) - attachmentId = Integer.parseInt(loc.substring(split+1)); - - if (attachmentId < 0) { - loc = null; - } else { - BlogURI post = null; - if (loc.startsWith("blog://")) - post = new BlogURI(loc.substring(0, split)); - else - post = new BlogURI("blog://" + loc.substring(0, split)); - - EntryContainer entry = BlogManager.instance().getArchive().getEntry(post); - if (entry != null) { - Attachment attachments[] = entry.getAttachments(); - if (attachmentId < attachments.length) { - buf.append(""); - buf.append(HTMLRenderer.sanitizeString(pn.getName())); - buf.append(""); - } else { - loc = null; - } - } else { - loc = null; - } - } - } catch (Exception e) { - e.printStackTrace(); - loc = null; - } - } - if (loc == null) - buf.append(HTMLRenderer.sanitizeString(pn.getName())); - } else if ( ("eepsite".equals(type)) || ("i2p".equals(type)) || - ("website".equals(type)) || ("http".equals(type)) || ("web".equals(type)) ) { - String loc = pn.getLocation(); - if (loc != null) { - buf.append(""); - } - buf.append(HTMLRenderer.sanitizeString(pn.getName())); - if (loc != null) { - buf.append(""); - } - } else { - buf.append(""); - buf.append(HTMLRenderer.sanitizeString(pn.getName())).append(""); - } - return buf.toString(); - } - - private static final int POSTS_PER_PAGE = 5; - private void renderBody(User user, PrintWriter out, BlogInfo info, BlogInfoData data, List posts, int offset, Archive archive, HttpServletRequest req) throws IOException { - out.write("
      \n\n\n"); - if (info == null) { - out.write("No blog specified\n"); - return; - } - - BlogRenderer renderer = new BlogRenderer(_context, info, data); - - if ( (posts.size() == 1) && (req.getParameter(PARAM_ENTRY) != null) ) { - BlogURI uri = (BlogURI)posts.get(0); - EntryContainer entry = archive.getEntry(uri); - renderer.renderPost(user, archive, entry, out, false, true); - renderComments(user, out, info, data, entry, archive, renderer); - } else { - for (int i = offset; i < posts.size() && i < offset + POSTS_PER_PAGE; i++) { - BlogURI uri = (BlogURI)posts.get(i); - EntryContainer entry = archive.getEntry(uri); - renderer.renderPost(user, archive, entry, out, true, true); - } - - renderNav(out, info, data, posts, offset, archive, req); - } - - out.write("
      \n"); - } - - private void renderComments(User user, PrintWriter out, BlogInfo info, BlogInfoData data, EntryContainer entry, - Archive archive, BlogRenderer renderer) throws IOException { - ArchiveIndex index = archive.getIndex(); - out.write("
      \n"); - renderComments(user, out, entry.getURI(), archive, index, renderer); - out.write("
      \n"); - } - private void renderComments(User user, PrintWriter out, BlogURI parentURI, Archive archive, ArchiveIndex index, BlogRenderer renderer) throws IOException { - List replies = index.getReplies(parentURI); - if (replies.size() > 0) { - out.write("
        \n"); - for (int i = 0; i < replies.size(); i++) { - BlogURI uri = (BlogURI)replies.get(i); - out.write("
      • "); - if (!shouldIgnore(user, uri)) { - EntryContainer cur = archive.getEntry(uri); - renderer.renderComment(user, archive, cur, out); - // recurse - renderComments(user, out, uri, archive, index, renderer); - } - out.write("
      • \n"); - } - out.write("
      \n"); - } - } - - private boolean shouldIgnore(User user, BlogURI uri) { - PetName pn = user.getPetNameDB().getByLocation(uri.getKeyHash().toBase64()); - return ( (pn != null) && pn.isMember(FilteredThreadIndex.GROUP_IGNORE)); - } - - private void renderNav(PrintWriter out, BlogInfo info, BlogInfoData data, List posts, int offset, Archive archive, HttpServletRequest req) throws IOException { - out.write("

      \n"); - String uri = req.getRequestURI() + "?"; - if (info != null) - uri = uri + PARAM_BLOG + "=" + info.getKey().calculateHash().toBase64() + "&"; - if (offset + POSTS_PER_PAGE >= posts.size()) - out.write(POSTS_PER_PAGE + " more older entries"); - else - out.write("" - + POSTS_PER_PAGE + " older entries"); - out.write(" | "); - if (offset <= 0) - out.write(POSTS_PER_PAGE + " more recent entries"); - else - out.write("" + POSTS_PER_PAGE + " more recent entries"); - - out.write("
      \n"); - } - - /** - * render the attachment to the browser, using the appropriate mime types, etc - * @param attachment formatted as $blogHash/$entryId/$attachmentId - * @return true if rendered - */ - private boolean renderAttachment(HttpServletRequest req, HttpServletResponse resp, String attachment) throws ServletException, IOException { - int split = attachment.lastIndexOf('/'); - if (split <= 0) - return false; - BlogURI uri = new BlogURI("blog://" + attachment.substring(0, split)); - try { - int attachmentId = Integer.parseInt(attachment.substring(split+1)); - if (attachmentId < 0) return false; - EntryContainer entry = BlogManager.instance().getArchive().getEntry(uri); - if (entry == null) { - System.out.println("Could not render the attachment [" + uri + "] / " + attachmentId); - return false; - } - Attachment attachments[] = entry.getAttachments(); - if (attachmentId >= attachments.length) { - System.out.println("Out of range attachment on " + uri + ": " + attachmentId); - return false; - } - - resp.setContentType(ArchiveViewerBean.getAttachmentContentType(attachments[attachmentId])); - boolean inline = ArchiveViewerBean.getAttachmentShouldShowInline(attachments[attachmentId]); - String filename = ArchiveViewerBean.getAttachmentFilename(attachments[attachmentId]); - if (inline) - resp.setHeader("Content-Disposition", "inline; filename=\"" + filename + "\""); - else - resp.setHeader("Content-Disposition", "attachment; filename=\"" + filename + "\""); - int len = ArchiveViewerBean.getAttachmentContentLength(attachments[attachmentId]); - if (len >= 0) - resp.setContentLength(len); - ArchiveViewerBean.renderAttachment(attachments[attachmentId], resp.getOutputStream()); - return true; - } catch (NumberFormatException nfe) {} - return false; - } - - - private boolean renderUpdatedImage(String requestedImage, HttpServletRequest req, HttpServletResponse resp) throws IOException { - BlogConfigBean bean = BlogConfigServlet.getConfigBean(req); - if ( (bean != null) && (bean.isUpdated()) && (bean.getLogo() != null) ) { - // the updated image only affects *our* blog... - User u = bean.getUser(); - if (u != null) { - String reqBlog = req.getParameter(PARAM_BLOG); - if ( (reqBlog == null) || (u.getBlog().toBase64().equals(reqBlog)) ) { - if (BlogInfoData.ATTACHMENT_LOGO.equals(requestedImage)) { - File logo = bean.getLogo(); - if (logo != null) { - byte buf[] = new byte[4096]; - resp.setContentType("image/png"); - resp.setContentLength((int)logo.length()); - OutputStream out = resp.getOutputStream(); - FileInputStream in = null; - try { - in = new FileInputStream(logo); - int read = 0; - while ( (read = in.read(buf)) != -1) - out.write(buf, 0, read); - _log.debug("Done writing the updated full length logo"); - } finally { - if (in != null) try { in.close(); } catch (IOException ioe) {} - if (out != null) try { out.close(); } catch (IOException ioe) {} - } - _log.debug("Returning from writing the updated full length logo"); - return true; - } - } else { - // ok, the blogConfigBean doesn't let people configure other things yet... fall through - } - } - } - } - return false; - } - - private boolean renderPublishedImage(String requestedImage, HttpServletRequest req, HttpServletResponse resp) throws IOException { - // nothing matched in the updated config, lets look at the current published info - String blog = req.getParameter(PARAM_BLOG); - if (blog != null) { - Archive archive = BlogManager.instance().getArchive(); - byte h[] = Base64.decode(blog); - if ( (h != null) && (h.length == Hash.HASH_LENGTH) ) { - Hash blogHash = new Hash(h); - BlogInfo info = archive.getBlogInfo(blogHash); - String entryId = info.getProperty(BlogInfo.SUMMARY_ENTRY_ID); - _log.debug("Author's entryId: " + entryId); - if (entryId != null) { - BlogURI dataURI = new BlogURI(entryId); - EntryContainer entry = archive.getEntry(dataURI); - if (entry != null) { - BlogInfoData data = new BlogInfoData(); - try { - data.load(entry); - - _log.debug("Blog info data loaded from: " + entryId); - Attachment toWrite = null; - if (BlogInfoData.ATTACHMENT_LOGO.equals(requestedImage)) { - toWrite = data.getLogo(); - } else { - toWrite = data.getOtherAttachment(requestedImage); - } - if (toWrite != null) { - resp.setContentType("image/png"); - resp.setContentLength(toWrite.getDataLength()); - InputStream in = null; - OutputStream out = null; - try { - in = toWrite.getDataStream(); - out = resp.getOutputStream(); - byte buf[] = new byte[4096]; - int read = -1; - while ( (read = in.read(buf)) != -1) - out.write(buf, 0, read); - - _log.debug("Write image from: " + entryId); - return true; - } finally { - if (in != null) try { in.close(); } catch (IOException ioe) {} - if (out != null) try { out.close(); } catch (IOException ioe) {} - } - } - } catch (IOException ioe) { - _log.debug("Error reading/writing: " + entryId, ioe); - data = null; - } - } - } - } - } - return false; - } - - /** 1px png, base64 encoded, used if they asked for an image that we dont know of */ - private static final byte BLANK_IMAGE[] = Base64.decode("iVBORw0KGgoAAAANSUhEUgAAAAEAAAABCAQAAAC1HAwCAAAAC0lEQVQI12NgYAAAAAMAASDVlMcAAAAASUVORK5CYII="); - private boolean renderDefaultImage(String requestedImage, HttpServletRequest req, HttpServletResponse resp) throws IOException { - if (requestedImage.equals("logo.png")) { - InputStream in = req.getSession().getServletContext().getResourceAsStream("/images/default_blog_logo.png"); - if (in != null) { - resp.setContentType("image/png"); - OutputStream out = resp.getOutputStream(); - try { - byte buf[] = new byte[4096]; - int read = -1; - while ( (read = in.read(buf)) != -1) - out.write(buf, 0, read); - _log.debug("Done writing default logo"); - } finally { - try { in.close(); } catch (IOException ioe) {} - try { out.close(); } catch (IOException ioe) {} - } - return true; - } - } - resp.setContentType("img.png"); - resp.setContentLength(BLANK_IMAGE.length); - OutputStream out = resp.getOutputStream(); - try { - out.write(BLANK_IMAGE); - } finally { - try { out.close(); } catch (IOException ioe) {} - } - _log.debug("Done writing default image"); - return true; - } - - private static final String CSS = "\n" + -"* {\n" + -" margin: 0px;\n" + -" padding: 0px;\n" + -"}\n" + -"body {\n" + -" font-family: Arial, Helvetica, sans-serif;\n" + -" font-size: 100%;\n" + -" background-color : #EEEEEE;\n" + -"}\n" + -"a {\n" + -" text-decoration: none;\n" + -"}\n" + -"a:hover {\n" + -" color: red;\n" + -"}\n" + -"select {\n" + -" min-width: 1.5em;\n" + -"}\n" + -".syndieBlog {\n" + -"}\n" + -".syndieBlogTopNav {\n" + -" float:left;\n" + -" width: 100%;\n" + -" background-color: #BBBBBB;\n" + -"}\n" + -".syndieBlogTopNavUser {\n" + -" text-align: left;\n" + -" float: left;\n" + -" margin: 2px;\n" + -"}\n" + -".syndieBlogTopNavAdmin {\n" + -" text-align: left;\n" + -" float: right;\n" + -" margin: 2px;\n" + -"}\n" + -".syndieBlogHeader {\n" + -" width: 100%;\n" + -" background-color: black;\n" + -" float:left;\n" + -"}\n" + -".syndieBlogHeader a {\n" + -" color: white;\n" + -" padding: 4px;\n" + -"}\n" + -".syndieBlogHeader b {\n" + -" font-size: 1.2em;\n" + -"}\n" + -".syndieBlogLogo {\n" + -" float: left;\n" + -"}\n" + -".syndieBlogLinks {\n" + -" width: 20%;\n" + -" float: left;\n" + -"}\n" + -".syndieBlogLinkGroup {\n" + -" font-size: 0.8em;\n" + -" background-color: #DDD;\n" + -" border: 1px solid black;\n" + -" margin: 5px;\n" + -" padding: 2px;\n" + -"}\n" + -".syndieBlogLinkGroup ul {\n" + -" list-style: none;\n" + -"}\n" + -".syndieBlogLinkGroup li {\n" + -" width: 100%;\n" + -" overflow: hidden;\n" + -" white-space: nowrap;\n" + -"}\n" + -".syndieBlogLinkGroupName {\n" + -" font-weight: bold;\n" + -" width: 100%;\n" + -" border-bottom: 1px dashed black;\n" + -" display: block;\n" + -" overflow: hidden;\n" + -" white-space: nowrap;\n" + -"}\n" + -".syndieBlogPostInfoGroup {\n" + -" font-size: 0.8em;\n" + -" background-color: #FFEA9F;\n" + -" border: 1px solid black;\n" + -" margin: 5px;\n" + -" padding: 2px;\n" + -"}\n" + -".syndieBlogPostInfoGroup ol {\n" + -" list-style: none;\n" + -"}\n" + -".syndieBlogPostInfoGroup li {\n" + -" white-space: nowrap;\n" + -" width: 100%;\n" + -" overflow: hidden;\n" + -"}\n" + -".syndieBlogPostInfoGroupName {\n" + -" font-weight: bold;\n" + -" width: 100%;\n" + -" border-bottom: 1px dashed black;\n" + -" display: block;\n" + -" overflow: hidden;\n" + -" white-space: nowrap;\n" + -"}\n" + -".syndieBlogMeta {\n" + -" text-align: left;\n" + -" font-size: 0.8em;\n" + -" background-color: #DDD;\n" + -" border: 1px solid black;\n" + -" margin: 5px;\n" + -" padding: 2px;\n" + -"}\n" + -".syndieBlogBody {\n" + -" width: 80%;\n" + -" float: left;\n" + -"}\n" + -".syndieBlogPost {\n" + -" border: 1px solid black;\n" + -" margin-top: 5px;\n" + -" margin-right: 5px;\n" + -" word-wrap: break-word;\n" + -"}\n" + -".syndieBlogPostHeader {\n" + -" background-color: #BBB;\n" + -" padding: 2px;\n" + -"}\n" + -".syndieBlogPostSubject {\n" + -" font-weight: bold;\n" + -"}\n" + -".syndieBlogPostFrom {\n" + -" text-align: right;\n" + -"}\n" + -".syndieBlogPostSummary {\n" + -" background-color: #FFFFFF;\n" + -" padding: 2px;\n" + -"}\n" + -".syndieBlogPostDetails {\n" + -" background-color: #DDD;\n" + -" padding: 2px;\n" + -"}\n" + -".syndieBlogNav {\n" + -" text-align: center;\n" + -"}\n" + -".syndieBlogComments {\n" + -" border: none;\n" + -" margin-top: 5px;\n" + -" margin-left: 0px;\n" + -" float: left;\n" + -"}\n" + -".syndieBlogComments ul {\n" + -" list-style: none;\n" + -" margin-left: 10px;\n" + -"}\n" + -".syndieBlogCommentInfoGroup {\n" + -" font-size: 0.8em;\n" + -" margin-right: 5px;\n" + -"}\n" + -".syndieBlogCommentInfoGroup ol {\n" + -" list-style: none;\n" + -"}\n" + -".syndieBlogCommentInfoGroup li {\n" + -"}\n" + -".syndieBlogCommentInfoGroupName {\n" + -" font-size: 0.8em;\n" + -" font-weight: bold;\n" + -"}\n"; - protected String getTitle() { return "unused"; } - protected void renderServletDetails(User user, HttpServletRequest req, PrintWriter out, ThreadIndex index, - int threadOffset, BlogURI visibleEntry, Archive archive) throws IOException { - throw new RuntimeException("unused"); - } -} diff --git a/apps/syndie/java/src/net/i2p/syndie/web/ViewBlogsServlet.java b/apps/syndie/java/src/net/i2p/syndie/web/ViewBlogsServlet.java deleted file mode 100644 index 4a1d4d258..000000000 --- a/apps/syndie/java/src/net/i2p/syndie/web/ViewBlogsServlet.java +++ /dev/null @@ -1,168 +0,0 @@ -package net.i2p.syndie.web; - -import java.io.IOException; -import java.io.PrintWriter; -import java.util.ArrayList; -import java.util.Iterator; -import java.util.List; -import java.util.TreeSet; - -import javax.servlet.http.HttpServletRequest; - -import net.i2p.client.naming.PetName; -import net.i2p.client.naming.PetNameDB; -import net.i2p.data.Hash; -import net.i2p.syndie.Archive; -import net.i2p.syndie.BlogManager; -import net.i2p.syndie.NewestEntryFirstComparator; -import net.i2p.syndie.User; -import net.i2p.syndie.data.BlogInfo; -import net.i2p.syndie.data.BlogURI; -import net.i2p.syndie.data.FilteredThreadIndex; -import net.i2p.syndie.data.ThreadIndex; -import net.i2p.syndie.data.ThreadNode; -import net.i2p.syndie.sml.HTMLRenderer; - -/** - * List the blogs known in the archive - * - */ -public class ViewBlogsServlet extends BaseServlet { - private static final int MAX_AUTHORS_AT_ONCE = 20; - private static final int MAX_TAGS = 50; - - /** renders the posts from the last 3 days */ - private String getViewBlogLink(Hash blog, long lastPost) { - long dayBegin = BlogManager.instance().getDayBegin(); - int daysAgo = 2; - if ( (lastPost > 0) && (dayBegin - 3*24*60*60*1000l >= lastPost) ) // last post was old 3 days ago - daysAgo = (int)((dayBegin - lastPost + 24*60*60*1000l-1)/(24*60*60*1000l)); - daysAgo++; - return "blog.jsp?" + ViewBlogServlet.PARAM_BLOG + "=" + blog.toBase64(); - //return getControlTarget() + "?" + ThreadedHTMLRenderer.PARAM_AUTHOR + '=' + blog.toBase64() - // + '&' + ThreadedHTMLRenderer.PARAM_THREAD_AUTHOR + "=true&daysBack=" + daysAgo; - } - - private String getPostDate(long when) { - String age = null; - long dayBegin = BlogManager.instance().getDayBegin(); - long postId = when; - if (postId >= dayBegin) { - age = "today"; - } else if (postId >= dayBegin - 24*60*60*1000) { - age = "yesterday"; - } else { - int daysAgo = (int)((dayBegin - postId + 24*60*60*1000-1)/(24*60*60*1000)); - age = daysAgo + " days ago"; - } - return age; - } - - protected void renderServletDetails(User user, HttpServletRequest req, PrintWriter out, ThreadIndex index, - int threadOffset, BlogURI visibleEntry, Archive archive) throws IOException { - TreeSet orderedRoots = new TreeSet(new NewestEntryFirstComparator()); - // The thread index is ordered by last updated date, as opposed to root posting date, - // so lets reorder things - int count = index.getRootCount(); - for (int i = 0; i < count; i++) { - ThreadNode node = index.getRoot(i); - orderedRoots.add(node.getEntry()); - } - - TreeSet tags = new TreeSet(); - List writtenAuthors = new ArrayList(); - - - out.write(""); - if ( (user != null) && (user.getAuthenticated()) ) { - out.write("Favorite blogs: view all
      \n"); - out.write("Your blog
      \n"); - - PetNameDB db = user.getPetNameDB(); - for (Iterator iter = orderedRoots.iterator(); iter.hasNext() && writtenAuthors.size() < MAX_AUTHORS_AT_ONCE; ) { - BlogURI uri= (BlogURI)iter.next(); - if (writtenAuthors.contains(uri.getKeyHash())) { - // skip - } else { - PetName pn = db.getByLocation(uri.getKeyHash().toBase64()); - if (pn != null) { - if (pn.isMember(FilteredThreadIndex.GROUP_FAVORITE)) { - out.write(""); - out.write(HTMLRenderer.sanitizeString(pn.getName(), 32)); - out.write(" (" + getPostDate(uri.getEntryId()) + ")
      \n"); - writtenAuthors.add(uri.getKeyHash()); - } else if (pn.isMember(FilteredThreadIndex.GROUP_IGNORE)) { - // ignore 'em - writtenAuthors.add(uri.getKeyHash()); - } else { - // bookmarked, but not a favorite... leave them for later - } - } else { - // not bookmarked, leave them for later - } - } - } - } - out.write("
      \n"); - - // now for the non-bookmarked people - out.write(""); - out.write("Most recently updated blogs:
      \n"); - for (Iterator iter = orderedRoots.iterator(); iter.hasNext() && writtenAuthors.size() < MAX_AUTHORS_AT_ONCE; ) { - BlogURI uri= (BlogURI)iter.next(); - String curTags[] = archive.getEntry(uri).getTags(); - if (curTags != null) - for (int i = 0; i < curTags.length && tags.size() < MAX_TAGS; i++) - tags.add(curTags[i]); - if (writtenAuthors.contains(uri.getKeyHash())) { - // skip - } else { - BlogInfo info = archive.getBlogInfo(uri); - if (info == null) - continue; - String name = info.getProperty(BlogInfo.NAME); - if ( (name == null) || (name.trim().length() <= 0) ) - name = uri.getKeyHash().toBase64().substring(0,8); - String desc = info.getProperty(BlogInfo.DESCRIPTION); - if ( (desc == null) || (desc.trim().length() <= 0) ) - desc = name + "'s blog"; - String age = null; - long dayBegin = BlogManager.instance().getDayBegin(); - long postId = uri.getEntryId(); - if (postId >= dayBegin) { - age = "today"; - } else if (postId >= dayBegin - 24*60*60*1000) { - age = "yesterday"; - } else { - int daysAgo = (int)((dayBegin - postId + 24*60*60*1000-1)/(24*60*60*1000)); - age = daysAgo + " days ago"; - } - - out.write(""); - out.write(HTMLRenderer.sanitizeString(desc, 32)); - out.write(" (" + getPostDate(uri.getEntryId()) + ")
      \n"); - writtenAuthors.add(uri.getKeyHash()); - } - } - - out.write("
      \n"); - /* - out.write("Topics:\n"); - out.write(""); - for (Iterator iter = tags.iterator(); iter.hasNext(); ) { - String tag = (String)iter.next(); - out.write(""); - out.write(HTMLRenderer.sanitizeString(tag, 32)); - out.write(" "); - } - */ - out.write("\n"); - } - - protected String getTitle() { return "Syndie :: View blogs"; } -} diff --git a/apps/syndie/java/src/net/i2p/syndie/web/ViewThreadedServlet.java b/apps/syndie/java/src/net/i2p/syndie/web/ViewThreadedServlet.java deleted file mode 100644 index 065bed377..000000000 --- a/apps/syndie/java/src/net/i2p/syndie/web/ViewThreadedServlet.java +++ /dev/null @@ -1,478 +0,0 @@ -package net.i2p.syndie.web; - -import java.io.IOException; -import java.io.PrintWriter; -import java.util.ArrayList; -import java.util.Collections; -import java.util.Iterator; -import java.util.List; - -import javax.servlet.http.HttpServletRequest; - -import net.i2p.I2PAppContext; -import net.i2p.client.naming.PetName; -import net.i2p.client.naming.PetNameDB; -import net.i2p.data.Base64; -import net.i2p.data.Hash; -import net.i2p.syndie.Archive; -import net.i2p.syndie.BlogManager; -import net.i2p.syndie.HeaderReceiver; -import net.i2p.syndie.User; -import net.i2p.syndie.data.ArchiveIndex; -import net.i2p.syndie.data.BlogInfo; -import net.i2p.syndie.data.BlogURI; -import net.i2p.syndie.data.EntryContainer; -import net.i2p.syndie.data.FilteredThreadIndex; -import net.i2p.syndie.data.ThreadIndex; -import net.i2p.syndie.data.ThreadNode; -import net.i2p.syndie.sml.HTMLRenderer; -import net.i2p.syndie.sml.SMLParser; -import net.i2p.syndie.sml.ThreadedHTMLRenderer; - -/** - * Render the appropriate posts and the thread tree - * - */ -public class ViewThreadedServlet extends BaseServlet { - protected void renderServletDetails(User user, HttpServletRequest req, PrintWriter out, ThreadIndex index, - int threadOffset, BlogURI visibleEntry, Archive archive) throws IOException { - List posts = getPosts(user, archive, req, index); - renderBody(user, req, out, index, archive, posts); - - renderThreadNav(user, req, out, threadOffset, index); - renderThreadTree(user, req, out, threadOffset, visibleEntry, archive, index, posts); - renderThreadNav(user, req, out, threadOffset, index); - } - - private void renderBody(User user, HttpServletRequest req, PrintWriter out, ThreadIndex index, Archive archive, List posts) throws IOException { - ThreadedHTMLRenderer renderer = new ThreadedHTMLRenderer(I2PAppContext.getGlobalContext()); - - String uri = req.getRequestURI(); - String off = req.getParameter(ThreadedHTMLRenderer.PARAM_OFFSET); - String tags = req.getParameter(ThreadedHTMLRenderer.PARAM_TAGS); - String author = req.getParameter(ThreadedHTMLRenderer.PARAM_AUTHOR); - - boolean authorOnly = Boolean.valueOf(req.getParameter(ThreadedHTMLRenderer.PARAM_THREAD_AUTHOR)).booleanValue(); - - for (int i = 0; i < posts.size(); i++) { - BlogURI post = (BlogURI)posts.get(i); - boolean inlineReply = (posts.size() == 1); - //if (true) - // inlineReply = true; - renderer.render(user, out, archive, post, inlineReply, index, uri, getAuthActionFields(), off, tags, author, authorOnly); - } - } - - private List getPosts(User user, Archive archive, HttpServletRequest req, ThreadIndex index) { - List rv = new ArrayList(1); - String author = req.getParameter(ThreadedHTMLRenderer.PARAM_AUTHOR); - String tags = req.getParameter(ThreadedHTMLRenderer.PARAM_TAGS); - String post = req.getParameter(ThreadedHTMLRenderer.PARAM_VIEW_POST); - String thread = req.getParameter(ThreadedHTMLRenderer.PARAM_VIEW_THREAD); - boolean threadAuthorOnly = Boolean.valueOf(req.getParameter(ThreadedHTMLRenderer.PARAM_THREAD_AUTHOR) + "").booleanValue(); - - long dayBegin = BlogManager.instance().getDayBegin(); - String daysStr = req.getParameter(ThreadedHTMLRenderer.PARAM_DAYS_BACK); - int days = 1; - try { - if (daysStr != null) - days = Integer.parseInt(daysStr); - } catch (NumberFormatException nfe) { - days = 1; - } - dayBegin -= (days-1) * 24*60*60*1000l; - - if ( (author != null) && empty(post) && empty(thread) ) { - ArchiveIndex aindex = archive.getIndex(); - PetNameDB db = user.getPetNameDB(); - if ("favorites".equals(author)) { - for (Iterator nameIter = db.getNames().iterator(); nameIter.hasNext(); ) { - PetName pn = db.getByName((String)nameIter.next()); - if (pn.isMember(FilteredThreadIndex.GROUP_FAVORITE) && AddressesServlet.PROTO_BLOG.equals(pn.getProtocol()) ) { - Hash loc = new Hash(); - byte key[] = Base64.decode(pn.getLocation()); - if ( (key != null) && (key.length == Hash.HASH_LENGTH) ) { - loc.setData(key); - aindex.selectMatchesOrderByEntryId(rv, loc, tags, dayBegin); - } - } - } - // always include ourselves... - aindex.selectMatchesOrderByEntryId(rv, user.getBlog(), tags, dayBegin); - - Collections.sort(rv, BlogURI.COMPARATOR); - } else { - Hash loc = new Hash(); - byte key[] = Base64.decode(author); - if ( (key != null) && (key.length == Hash.HASH_LENGTH) ) { - loc.setData(key); - aindex.selectMatchesOrderByEntryId(rv, loc, tags, dayBegin); - } else { - } - } - - // how inefficient can we get? - if (threadAuthorOnly && (rv.size() > 0)) { - // lets filter out any posts that are not roots - for (int i = 0; i < rv.size(); i++) { - BlogURI curURI = (BlogURI)rv.get(i); - ThreadNode node = index.getNode(curURI); - if ( (node != null) && (node.getParent() == null) ) { - // ok, its a root - } else { - rv.remove(i); - i--; - } - } - } - } - - BlogURI uri = getAsBlogURI(post); - if ( (uri != null) && (uri.getEntryId() > 0) ) { - rv.add(uri); - } else { - uri = getAsBlogURI(thread); - if ( (uri != null) && (uri.getEntryId() > 0) ) { - ThreadNode node = index.getNode(uri); - if (node != null) { - if (false) { - // entire thread, as a depth first search - while (node.getParent() != null) - node = node.getParent(); // hope the structure is loopless... - // depth first traversal - walkTree(rv, node); - } else { - // only the "current" unforked thread, as suggested by cervantes. - // e.g. - // a--b--c--d - // \-e--f--g - // \-h - // would show "a--e--f--g" if node == {e, f, or g}, - // or "a--b--c--d" if node == {a, b, c, or d}, - // or "a--e--f--h" if node == h - rv.add(node.getEntry()); - ThreadNode cur = node; - while (cur.getParent() != null) { - cur = cur.getParent(); - rv.add(0, cur.getEntry()); // parents go before children... - } - cur = node; - while ( (cur != null) && (cur.getChildCount() > 0) ) { - cur = cur.getChild(0); - rv.add(cur.getEntry()); // and children after parents - } - } - } else { - rv.add(uri); - } - } - } - - return rv; - } - - private void walkTree(List uris, ThreadNode node) { - if (node == null) - return; - if (uris.contains(node)) - return; - uris.add(node.getEntry()); - for (int i = 0; i < node.getChildCount(); i++) - walkTree(uris, node.getChild(i)); - } - private void renderThreadNav(User user, HttpServletRequest req, PrintWriter out, int threadOffset, ThreadIndex index) throws IOException { - out.write("\n"); - out.write("\n"); - if (threadOffset == 0) { - out.write("<< First Page "); - } else { - out.write("<< First Page "); - } - if (threadOffset > 0) { - out.write("< Prev Page\n"); - } else { - out.write("< Prev Page\n"); - } - out.write("\n"); - - out.write(""); - int max = index.getRootCount(); - if (threadOffset + 10 > max) { - out.write("Next Page> Last Page>>\n"); - } else { - out.write("Next Page> Last Page>>\n"); - } - out.write(""); - //out.write("\n"); - out.write("\n"); - } - - private void renderThreadTree(User user, HttpServletRequest req, PrintWriter out, int threadOffset, BlogURI visibleEntry, Archive archive, ThreadIndex index, List visibleURIs) throws IOException { - int numThreads = 10; - renderThreadTree(user, out, index, archive, req, threadOffset, numThreads, visibleEntry, visibleURIs); - } - - private void renderThreadTree(User user, PrintWriter out, ThreadIndex index, Archive archive, HttpServletRequest req, - int threadOffset, int numThreads, BlogURI visibleEntry, List visibleURIs) { - - if ( (visibleEntry != null) && (empty(req, ThreadedHTMLRenderer.PARAM_OFFSET)) ) { - // we want to jump to a specific thread in the nav - threadOffset = index.getRoot(visibleEntry); - } - - if (threadOffset < 0) - threadOffset = 0; - out.write("\n"); - if (threadOffset + numThreads > index.getRootCount()) - numThreads = index.getRootCount() - threadOffset; - TreeRenderState state = new TreeRenderState(new ArrayList()); - - int written = 0; - for (int curRoot = threadOffset; curRoot < numThreads + threadOffset; curRoot++) { - ThreadNode node = index.getRoot(curRoot); - out.write("\n"); - renderThread(user, out, index, archive, req, node, 0, visibleEntry, state, visibleURIs); - out.write("\n"); - written++; - } - - if (written <= 0) - out.write("No matching threads\n"); - - out.write("\n"); - } - - private boolean renderThread(User user, PrintWriter out, ThreadIndex index, Archive archive, HttpServletRequest req, - ThreadNode node, int depth, BlogURI visibleEntry, TreeRenderState state, List visibleURIs) { - boolean isFavorite = false; - boolean ignored = false; - boolean displayed = false; - - if ( (visibleURIs != null) && (visibleURIs.contains(node.getEntry())) ) - displayed = true; - - HTMLRenderer rend = new HTMLRenderer(I2PAppContext.getGlobalContext()); - SMLParser parser = new SMLParser(I2PAppContext.getGlobalContext()); - - PetName pn = user.getPetNameDB().getByLocation(node.getEntry().getKeyHash().toBase64()); - if (pn != null) { - if (pn.isMember(FilteredThreadIndex.GROUP_FAVORITE)) { - isFavorite = true; - } - if (pn.isMember(FilteredThreadIndex.GROUP_IGNORE)) - ignored = true; - } - - state.incrementRowsWritten(); - if (state.getRowsWritten() % 2 == 0) - out.write("\n"); - else - out.write("\n"); - - out.write(""); - out.write(""); - //out.write(""); - out.write(getFlagHTML(user, node)); - //out.write("\n\n"); - for (int i = 0; i < depth; i++) - out.write("\"\""); - - boolean showChildren = false; - - int childCount = node.getChildCount(); - - if (childCount > 0) { - boolean allowCollapse = false; - - if (visibleEntry != null) { - if (node.getEntry().equals(visibleEntry)) { - // noop - } else if (node.containsEntry(visibleEntry)) { - showChildren = true; - allowCollapse = true; - } - } else { - // noop - } - - if (allowCollapse) { - out.write("\"collapse\"\n"); - } else { - out.write("\"expand\"\n"); - } - } else { - out.write("\"\"\n"); - } - - out.write(""); - - if (displayed) out.write(""); - - if (pn == null) { - BlogInfo info = archive.getBlogInfo(node.getEntry().getKeyHash()); - String name = null; - if (info != null) - name = info.getProperty(BlogInfo.NAME); - if ( (name == null) || (name.trim().length() <= 0) ) - name = node.getEntry().getKeyHash().toBase64().substring(0,6); - out.write(trim(name, 30)); - } else { - out.write(trim(pn.getName(), 30)); - } - - if (displayed) out.write(""); - - out.write("\n"); - - if ( (user.getBlog() != null) && (node.getEntry().getKeyHash().equals(user.getBlog())) ) { - out.write("\"You\n"); - } else if (isFavorite) { - out.write("\"favorites\"\n"); - } else if (ignored) { - out.write("\"ignored\"\n"); - } else { - if (user.getAuthenticated()) { - // give them a link to bookmark or ignore the peer - out.write("(\"friend\"\n"); - out.write("/\"ignore\")\n"); - } - } - - out.write(": "); - out.write(""); - EntryContainer entry = archive.getEntry(node.getEntry()); - if (entry == null) throw new RuntimeException("Unable to fetch the entry " + node.getEntry()); - - HeaderReceiver rec = new HeaderReceiver(); - parser.parse(entry.getEntry().getText(), rec); - String subject = rec.getHeader(HTMLRenderer.HEADER_SUBJECT); - if ( (subject == null) || (subject.trim().length() <= 0) ) - subject = "(no subject)"; - if (displayed) { - // currently being rendered - out.write(""); - out.write(trim(subject, 40)); - out.write(""); - } else { - out.write(trim(subject, 40)); - } - //out.write("\n\n"); - out.write(""); - if (false) { - out.write(" (full thread)\n"); - } - - out.write(""); - - out.write(" 0) { - cur = (ThreadNode)paths.remove(0); - if (cur.getEntry().equals(newestURI)) - break; - for (int i = cur.getChildCount() - 1; i >= 0; i--) - paths.add(cur.getChild(i)); - if (paths.size() <= 0) - cur = null; - } - if (cur != null) - out.write(getViewThreadLink(req, cur, user)); - } - out.write("\" title=\"View the most recent post\">latest - "); - - long dayBegin = BlogManager.instance().getDayBegin(); - long postId = node.getMostRecentPostDate(); - if (postId >= dayBegin) { - out.write("today"); - } else if (postId >= dayBegin - 24*60*60*1000) { - out.write("yesterday"); - } else { - int daysAgo = (int)((dayBegin - postId + 24*60*60*1000-1)/(24*60*60*1000)); - out.write(daysAgo + " days ago"); - } - - out.write("\n"); - /* - out.write(" full thread\n"); - */ - out.write(""); - out.write("\n"); - - boolean rendered = true; - - if (showChildren) { - for (int i = 0; i < node.getChildCount(); i++) { - ThreadNode child = node.getChild(i); - boolean childRendered = renderThread(user, out, index, archive, req, child, depth+1, visibleEntry, state, visibleURIs); - rendered = rendered || childRendered; - } - } - - return rendered; - } - - private String getFlagHTML(User user, ThreadNode node) { - if ( (user.getBlog() != null) && (node.containsAuthor(user.getBlog())) ) - return "\"You"; - - // grab all of the peers in the user's favorites group and check to see if - // they posted something in the given thread, flagging it if they have - boolean favoriteFound = false; - for (Iterator iter = user.getPetNameDB().getNames().iterator(); iter.hasNext(); ) { - PetName pn = user.getPetNameDB().getByName((String)iter.next()); - if (pn.isMember(FilteredThreadIndex.GROUP_FAVORITE)) { - Hash cur = new Hash(); - try { - cur.fromBase64(pn.getLocation()); - if (node.containsAuthor(cur)) { - favoriteFound = true; - break; - } - } catch (Exception e) {} - } - } - if (favoriteFound) - return "\"flagged"; - else - return " "; - } - - protected String getTitle() { return "Syndie :: View threads"; } -} diff --git a/apps/syndie/jetty-syndie.xml b/apps/syndie/jetty-syndie.xml deleted file mode 100644 index 4cf61e494..000000000 --- a/apps/syndie/jetty-syndie.xml +++ /dev/null @@ -1,114 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - 0.0.0.0 - 8001 - - - 3 - 10 - 30000 - 1000 - 8443 - 8443 - main - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - syndie - - / - syndie.war - - - - - - - - ./logs/yyyy_mm_dd.syndie-request.log - 90 - true - false - false - GMT - - - - - - - 2000 - false - - diff --git a/apps/syndie/jsp/_bodyindex.jsp b/apps/syndie/jsp/_bodyindex.jsp deleted file mode 100644 index d997bbd2c..000000000 --- a/apps/syndie/jsp/_bodyindex.jsp +++ /dev/null @@ -1,40 +0,0 @@ -<%@page contentType="text/html; charset=UTF-8" import="net.i2p.syndie.web.ArchiveViewerBean, net.i2p.syndie.*, net.i2p.client.naming.PetName" %> -<% request.setCharacterEncoding("UTF-8"); %> -<% -if (user.getAuthenticated() && (null != request.getParameter("action")) ) { - %><% - String blog = request.getParameter("blog"); - String group = null; - if (request.getParameter("action").equals("Bookmark blog")) - group = "Favorites"; - else if (request.getParameter("action").equals("Ignore blog")) - group = "Ignore"; - boolean unignore = ("Unignore blog".equals(request.getParameter("action"))); - - PetName pn = user.getPetNameDB().getByLocation(blog); - String name = null; - if (pn != null) name = pn.getName(); - if (name == null) - name = request.getParameter("name"); - if (name == null) - name = blog; - if ( (name != null) && (blog != null) && ( (group != null) || (unignore) ) ) { - if (pn != null) { - if (unignore) - pn.removeGroup("Ignore"); - else - pn.addGroup(group); - } else { - pn = new PetName(name, "syndie", "syndieblog", blog); - pn.addGroup(group); - user.getPetNameDB().add(pn); - } - BlogManager.instance().saveUser(user); - } -} -%> -
      -Blogs: <%ArchiveViewerBean.renderBlogSelector(user, request.getParameterMap(), out);%> - - -<%ArchiveViewerBean.renderBlogs(user, request.getParameterMap(), out, "
      ");%>
      \ No newline at end of file diff --git a/apps/syndie/jsp/_leftnav.jsp b/apps/syndie/jsp/_leftnav.jsp deleted file mode 100644 index 2b342a5f5..000000000 --- a/apps/syndie/jsp/_leftnav.jsp +++ /dev/null @@ -1,3 +0,0 @@ -<%@page import="net.i2p.syndie.web.ArchiveViewerBean, net.i2p.syndie.*, net.i2p.data.Base64" %> - - \ No newline at end of file diff --git a/apps/syndie/jsp/_rightnav.jsp b/apps/syndie/jsp/_rightnav.jsp deleted file mode 100644 index 70f597e4f..000000000 --- a/apps/syndie/jsp/_rightnav.jsp +++ /dev/null @@ -1 +0,0 @@ - diff --git a/apps/syndie/jsp/_toplogo.jsp b/apps/syndie/jsp/_toplogo.jsp deleted file mode 100644 index 07d6f60ab..000000000 --- a/apps/syndie/jsp/_toplogo.jsp +++ /dev/null @@ -1,5 +0,0 @@ -<%@page import="net.i2p.syndie.BlogManager" %> - - \ No newline at end of file diff --git a/apps/syndie/jsp/_topnav.jsp b/apps/syndie/jsp/_topnav.jsp deleted file mode 100644 index 2f3536513..000000000 --- a/apps/syndie/jsp/_topnav.jsp +++ /dev/null @@ -1,47 +0,0 @@ -<%@page import="net.i2p.syndie.*, net.i2p.syndie.sml.*, net.i2p.syndie.web.*" %> - -
      "> - -Home -Syndie admin -Remote archives -RSS imports -Import -<% -if ("true".equals(request.getParameter("logout"))) { - user.invalidate(); - RemoteArchiveBean rem = (RemoteArchiveBean)session.getAttribute("remote"); - if (rem != null) rem.reinitialize(); - PostBean post = (PostBean)session.getAttribute("post"); - if (post != null) post.reinitialize(); -} -String login = request.getParameter("login"); -String pass = request.getParameter("password"); -String loginSubmit = request.getParameter("Login"); -if ( (login != null) && (pass != null) && (loginSubmit != null) && (loginSubmit.equals("Login")) ) { - String loginResult = BlogManager.instance().login(user, login, pass); - if (!user.getAuthenticated()) - out.write("" + loginResult + ""); -} -%> -<% if (user.getAuthenticated()) { %> -Logged in as: : -"><%=HTMLRenderer.sanitizeString(ArchiveViewerBean.getBlogName(user.getBlogStr()))%> -">Post -">Metadata -Addressbook -Logout -<%} else {%> -Login: -Pass: <% -java.util.Enumeration params = request.getParameterNames(); -while (params.hasMoreElements()) { - String p = (String)params.nextElement(); - String val = request.getParameter(p); - %><% -}%> - -Register -<% } %> - -
      \ No newline at end of file diff --git a/apps/syndie/jsp/about.html b/apps/syndie/jsp/about.html deleted file mode 100644 index 329b5a66f..000000000 --- a/apps/syndie/jsp/about.html +++ /dev/null @@ -1,31 +0,0 @@ -What is Syndie? - -

      Perhaps the best introduction to Syndie can be found in Syndie itself.

      - -

      Updates can be found by filtering for the syndie.intro tag (if you only want to -receive posts that jrandom - made with that tag, that can be achieved -as well).

      - -

      If you have any questions or problems with Syndie, just post them and -syndicate it up to syndiemedia.i2p (which should show up as the default archive -on new installs). You can also use the I2P -forums if you're having trouble getting Syndie to work, and people are -almost always around on the #i2p irc -channel.

      - -

      One FAQ which might keep people from getting more posts into their Syndie -node regards cookies. If you get "internal errors" when using the syndicate form, you probably have cookies disabled. -Syndie needs cookies to help maintain state, and while its good practice to -disable cookies in general, you should be able to tell your web browser to make -an exception and allow cookies to "localhost" (or wherever your Syndie instance -is). Further FAQs should be found -within syndie

      - diff --git a/apps/syndie/jsp/images/addToFavorites.png b/apps/syndie/jsp/images/addToFavorites.png deleted file mode 100644 index 95ded8d97..000000000 Binary files a/apps/syndie/jsp/images/addToFavorites.png and /dev/null differ diff --git a/apps/syndie/jsp/images/addToIgnored.png b/apps/syndie/jsp/images/addToIgnored.png deleted file mode 100644 index 5b87bf45b..000000000 Binary files a/apps/syndie/jsp/images/addToIgnored.png and /dev/null differ diff --git a/apps/syndie/jsp/images/collapse.png b/apps/syndie/jsp/images/collapse.png deleted file mode 100644 index 2ce31b36d..000000000 Binary files a/apps/syndie/jsp/images/collapse.png and /dev/null differ diff --git a/apps/syndie/jsp/images/default_blog_logo.png b/apps/syndie/jsp/images/default_blog_logo.png deleted file mode 100644 index 54ea2a917..000000000 Binary files a/apps/syndie/jsp/images/default_blog_logo.png and /dev/null differ diff --git a/apps/syndie/jsp/images/expand.png b/apps/syndie/jsp/images/expand.png deleted file mode 100644 index 95c0c4c81..000000000 Binary files a/apps/syndie/jsp/images/expand.png and /dev/null differ diff --git a/apps/syndie/jsp/images/favorites.png b/apps/syndie/jsp/images/favorites.png deleted file mode 100644 index 5fa5a83bb..000000000 Binary files a/apps/syndie/jsp/images/favorites.png and /dev/null differ diff --git a/apps/syndie/jsp/images/noSubthread.png b/apps/syndie/jsp/images/noSubthread.png deleted file mode 100644 index 3ea13d3a1..000000000 Binary files a/apps/syndie/jsp/images/noSubthread.png and /dev/null differ diff --git a/apps/syndie/jsp/images/self.png b/apps/syndie/jsp/images/self.png deleted file mode 100644 index f56c8a0df..000000000 Binary files a/apps/syndie/jsp/images/self.png and /dev/null differ diff --git a/apps/syndie/jsp/images/syndielogo.png b/apps/syndie/jsp/images/syndielogo.png deleted file mode 100644 index 9ed9b852a..000000000 Binary files a/apps/syndie/jsp/images/syndielogo.png and /dev/null differ diff --git a/apps/syndie/jsp/images/threadIndent.png b/apps/syndie/jsp/images/threadIndent.png deleted file mode 100644 index 3ea13d3a1..000000000 Binary files a/apps/syndie/jsp/images/threadIndent.png and /dev/null differ diff --git a/apps/syndie/jsp/import.jsp b/apps/syndie/jsp/import.jsp deleted file mode 100644 index ba482bd80..000000000 --- a/apps/syndie/jsp/import.jsp +++ /dev/null @@ -1,68 +0,0 @@ -<%@page contentType="text/html; charset=UTF-8" pageEncoding="UTF-8" import="net.i2p.data.Base64, net.i2p.syndie.web.*, net.i2p.syndie.sml.*, net.i2p.syndie.data.*, net.i2p.syndie.*, org.mortbay.servlet.MultiPartRequest, java.util.*, java.io.*" %><% -request.setCharacterEncoding("UTF-8"); -%> - - -SyndieMedia import - - - - - - - - - -
      <% - -String contentType = request.getContentType(); -if ((contentType != null) && (contentType.indexOf("boundary=") != -1) ) { - MultiPartRequest req = new MultiPartRequest(request); - int metaId = 0; - while (true) { - InputStream meta = req.getInputStream("blogmeta" + metaId); - if ( (meta == null) || (meta.available() <= 0) ) - break; - if (!BlogManager.instance().importBlogMetadata(meta)) { - %>Metadata <%=metaId%> failed to be imported
      <% - break; - } - metaId++; - } - int entryId = 0; - while (true) { - InputStream entry = req.getInputStream("blogpost" + entryId); - if ( (entry == null) || (entry.available() <= 0) ) - break; - if (!BlogManager.instance().importBlogEntry(entry)) { - %>Entry <%=entryId%> failed to be imported
      <% - break; - } - entryId++; - } - - if ( (entryId > 0) || (metaId > 0) ) { - BlogManager.instance().getArchive().regenerateIndex(); - session.setAttribute("index", BlogManager.instance().getArchive().getIndex()); - } -%>Imported <%=entryId%> posts and <%=metaId%> blog metadata files. -<% -} else { %>
      -Blog metadata 0:
      -Blog metadata 1:
      -Post 0:
      -Post 1:
      -Post 2:
      -Post 3:
      -Post 4:
      -Post 5:
      -Post 6:
      -Post 7:
      -Post 8:
      -Post 9:
      -
      - -<% } %> -
      - diff --git a/apps/syndie/jsp/index.html b/apps/syndie/jsp/index.html deleted file mode 100644 index 7244e85bf..000000000 --- a/apps/syndie/jsp/index.html +++ /dev/null @@ -1,19 +0,0 @@ -Welcome to Syndie
      - -
      -Welcome to Syndie!
      - -

      Jump right in and read discussion threads or -blogs

      -

      Create a new post of your own

      -

      Learn more about Syndie

      -

      NOTE: This version of Syndie is being replaced by -the new Syndie! -The new Syndie is a standalone application under active development. -Please give the new Syndie a try, as it has lots more traffic -than this version. Don't expect anybody to see your posts here.

      -
      -
      - diff --git a/apps/syndie/jsp/register.jsp b/apps/syndie/jsp/register.jsp deleted file mode 100644 index 660735ed4..000000000 --- a/apps/syndie/jsp/register.jsp +++ /dev/null @@ -1,50 +0,0 @@ -<%@page contentType="text/html; charset=UTF-8" pageEncoding="UTF-8" import="net.i2p.data.Base64, net.i2p.syndie.web.*, net.i2p.syndie.sml.*, net.i2p.syndie.*" %><% -request.setCharacterEncoding("UTF-8"); -%> - - -SyndieMedia - - - - - - - - - -
      <% -String regLogin = request.getParameter("login"); -boolean showForm = true; -if ( (regLogin != null) && ("Register".equals(request.getParameter("Register"))) ) { - String regUserPass = request.getParameter("password"); - String regPass = request.getParameter("registrationpassword"); - String blogName = request.getParameter("blogname"); - String desc = request.getParameter("description"); - String url = request.getParameter("contacturl"); - String regResult = BlogManager.instance().register(user, regLogin, regUserPass, regPass, blogName, desc, url); - if (User.LOGIN_OK.equals(regResult)) { - %>Registration successful. Continue... -<% showForm = false; - } else { - %><%=regResult%><% - } -} -if (showForm) {%> -

      To create a new blog (and Syndie user account), please fill out the following form. -You may need to enter a registration password given to you by this Syndie instance's -operator, or there may be no registration password in place (in which case you can -leave that field blank).

      -

      -Syndie login:
      -New password:
      -Registration password:
      -Blog name:
      -Brief description:
      -Contact URL: (e.g. mailto://user@mail.i2p, http://foo.i2p/, etc)
      - -

      -<% } %> -
      - \ No newline at end of file diff --git a/apps/syndie/jsp/smlref.jsp b/apps/syndie/jsp/smlref.jsp deleted file mode 100644 index ed2bd52ee..000000000 --- a/apps/syndie/jsp/smlref.jsp +++ /dev/null @@ -1,38 +0,0 @@ -<%@page contentType="text/html; charset=UTF-8" pageEncoding="UTF-8" import="java.util.*" %><% -request.setCharacterEncoding("UTF-8"); -%> - - -SML Quick Reference - - - -

      SML Quick Reference:

      -
        -
      • newlines are newlines are newlines.
      • -
      • all < and > are replaced with their &symbol;
      • -
      • the [ and ] characters delimit tags, or must be quoted by doubling them up ([[ displays as [, ]] displays as ])
      • -
      • [b][/b] = <b>bold</b>
      • -
      • [i][/i] = <i>italics</i>
      • -
      • [u][/u] = <i>underline</i>
      • -
      • [pre]foo[/pre] = <pre>preformatted section</pre>
      • -
      • [cut]more inside[/cut] = <a href="#">more inside...</a>
      • -
      • [quote][/quote] = Quoted text
      • -
      • [img attachment="1"]alt[/img] = use attachment 1 as an image with 'alt' as the alt text.
      • -
      • [attachment id="0"]text[/attachment] = offer attachment 0 as a link in your post
      • -
      • [attachment thumbnail="0" id="1"]text[/attachment] = offer attachment 1 as a link around a thumbnail image using attachment 0
      • -
      • [link schema="eep" location="http://forum.i2p"]text[/link] = offer a link to an external resource (accessible with the given schema)
      • -
      • [blog name="name" bloghash="base64hash"]description[/blog] = link to all posts in the blog
      • -
      • [blog name="name" bloghash="base64hash" blogentry="1234"]description[/blog] = link to the specified post in the blog
      • -
      • [blog name="name" bloghash="base64hash" blogtag="tag"]description[/blog] = link to all posts in the blog with the specified tag
      • -
      • [blog name="name" blogtag="tag"]description[/blog] = link to all posts in all blogs with the specified tag
      • -
      • [archive name="name" description="they have good stuff" schema="eep" location="http://syndiemedia.i2p/archive/archive.txt"]foo![/archive] = offer an easy way to sync up with a new Syndie archive
      • -
      • [address name="www.i2p" location="Nf3ab-ZFkmI-LyMt7Gjg...vobM57UpqSAAAA" schema="i2p" proto="eep"]official website[/address] = share a pet name reference to the given eepsite (using fields from the addresses page)
      • -
      -SML headers are newline delimited key:value pairs. Example keys are: -
        -
      • bgcolor = background color of the post (e.g. bgcolor:#ffccaa or bgcolor=red)
      • -
      • bgimage = attachment number to place as the background image for the post (only shown if images are enabled) (e.g. bgimage=1)
      • -
      • textfont = font to put most text into
      • -
      - diff --git a/apps/syndie/jsp/style.jsp b/apps/syndie/jsp/style.jsp deleted file mode 100644 index c1b37f3ee..000000000 --- a/apps/syndie/jsp/style.jsp +++ /dev/null @@ -1,7 +0,0 @@ -<%@page contentType="text/css; charset=UTF-8" pageEncoding="UTF-8" import="net.i2p.util.FileUtil" %> -<% request.setCharacterEncoding("UTF-8"); %> -<%@include file="syndie.css" %> -<% -String content = FileUtil.readTextFile("./docs/syndie_standard.css", -1, true); -if (content != null) out.write(content); -%> \ No newline at end of file diff --git a/apps/syndie/jsp/syndie.css b/apps/syndie/jsp/syndie.css deleted file mode 100644 index 3d7ccef24..000000000 --- a/apps/syndie/jsp/syndie.css +++ /dev/null @@ -1,444 +0,0 @@ -.b_topnavUser { - text-align: right; - background-color: #CCCCDD; -} -.b_topnavHome { - background-color: #CCCCDD; - color: #000000; - width: 50px; - text-align: left; -} -.b_topnav { - background-color: #CCCCDD; -} -.b_content { -} -.s_summary_overall { -} -.s_detail_overall { -} -.s_detail_subject { - font-size: 0.8em; - text-align: left; - background-color: #BBBBFF; -} -.s_detail_quote { - margin-left: 1em; - border: 1px solid #DBDBDB; - background-color: #E0E0E0; -} -.s_detail_italic { - font-style: italic; -} -.s_detail_bold { - font-style: normal; - font-weight: bold; -} -.s_detail_underline { - font-style: normal; - text-decoration: underline; -} -.s_detail_meta { - font-size: 0.8em; - text-align: right; - background-color: #BBBBFF; -} - -.s_summary_subject { - font-size: 0.8em; - text-align: left; - background-color: #BBBBFF; -} -.s_summary_meta { - font-size: 0.8em; - text-align: right; - background-color: #BBBBFF; -} -.s_summary_quote { - margin-left: 1em; - border-width: 1px solid #DBDBDB; - background-color: #E0E0E0; -} -.s_summary_italic { - font-style: italic; -} -.s_summary_bold { - font-style: normal; - font-weight: bold; -} -.s_summary_underline { - font-style: normal; - text-decoration: underline; -} -.s_summary_summDetail { - font-size: 0.8em; -} -.s_detail_summDetail { -} -.s_detail_summDetailBlog { -} -.s_detail_summDetailBlogLink { -} -td.s_detail_summDetail { - background-color: #DDDDFF; -} -td.s_summary_summ { - font-size: 0.8em; - background-color: #DDDDFF; -} - - -body { - margin : 0px; - padding : 0px; - width: 99%; - font-family : Arial, sans-serif, Helvetica; - background-color : #FFF; - color : black; - font-size : 100%; - - /* we've avoided Tantek Hacks so far, - ** but we can't avoid using the non-w3c method of - ** box rendering. (and therefore one of mozilla's - ** proprietry -moz properties (which hopefully they'll - ** drop soon). - */ - -moz-box-sizing : border-box; - box-sizing : border-box; -} -a:link{color:#007} -a:visited{color:#606} -a:hover{color:#720} -a:active{color:#900} - -select { - min-width: 1.5em; -} -.overallTable { - border-spacing: 0px; - border-collapse: collapse; - float: left; -} -.topNav { - background-color: #BBB; -} -.topNav_user { - text-align: left; - float: left; - display: inline; -} -.topNav_admin { - text-align: right; - float: right; - margin: 0 5px 0 0; - display: inline; -} -.controlBar { - border-bottom: thick double #CCF; - border-left: medium solid #CCF; - border-right: medium solid #CCF; - background-color: #EEF; - color: inherit; - font-size: small; - clear: left; /* fixes a bug in Opera */ -} -.controlBarRight { - text-align: right; -} -.threadEven { - background-color: #FFF; - white-space: nowrap; -} -.threadOdd { - background-color: #FFC; - white-space: nowrap; -} -.threadLeft { - text-align: left; - align: left; -} -.threadNav { - background-color: #EEF; - border: medium solid #CCF; -} -.threadNavRight { - text-align: right; - float: right; - background-color: #EEF; -} -.rightOffset { - float: right; - margin: 0 5px 0 0; - display: inline; -} -.threadInfoLeft { - float: left; - margin: 5px 0px 0 0; - display: inline; -} -.threadInfoRight { - float: right; - margin: 0 5px 0 0; - display: inline; -} -.postMeta { - border-top: 1px solid black; - background-color: #FFB; -} -.postMetaSubject { - text-align: left; - font-size: large; -} -.postMetaLink { - text-align: right; -} -.postDetails { - background-color: #FFC; -} -.postReply { - background-color: #CCF; -} -.postReplyText { - background-color: #CCF; -} -.postReplyOptions { - background-color: #CCF; -} -.syndieBlogTopNav { - padding: 0.5em; - width: 98%; - border: medium solid #CCF; - background-color: #EEF; - font-size: small; -} -.syndieBlogTopNavUser { - text-align: left; -} -.syndieBlogTopNavAdmin { - text-align: right; -} -.syndieBlogHeader { - width: 100%; - font-size: 1.4em; - background-color: #000; - text-align: Left; - float: Left; -} -.syndieBlogHeader a { - color: #FFF; - padding: 4px; -} -.syndieBlogHeader a:hover { - color:#88F; - padding: 4px; -} -.syndieBlogLogo { - float: left; - display: inline; -} -.syndieBlogLinks { - width: 20%; - float: left; -} -.syndieBlogLinkGroup { - font-size: 0.8em; - background-color: #DDD; - border: 1px solid black; - margin: 5px; - padding: 2px; -} -.syndieBlogLinkGroup ul { - list-style: none; -} -.syndieBlogLinkGroup li { -} -.syndieBlogLinkGroupName { - font-weight: bold; - width: 100%; - border-bottom: 1px dashed black; - display: block; -} -.syndieBlogPostInfoGroup { - font-size: 0.8em; - background-color: #FFEA9F; - border: 1px solid black; - margin: 5px; - padding: 2px; -} -.syndieBlogPostInfoGroup ol { - list-style: none; -} -.syndieBlogPostInfoGroup li { -} -.syndieBlogPostInfoGroup li a { - display: block; -} -.syndieBlogPostInfoGroupName { - font-weight: bold; - width: 100%; - border-bottom: 1px dashed black; - display: block; -} -.syndieBlogMeta { - text-align: left; - font-size: 0.8em; - background-color: #DDD; - border: 1px solid black; - margin: 5px; - padding: 2px; -} -.syndieBlogBody { - width: 80%; - float: left; -} -.syndieBlogPost { - border: 1px solid black; - margin-top: 5px; - margin-right: 5px; -} -.syndieBlogPostHeader { - background-color: #FFB; - padding: 2px; -} -.syndieBlogPostSubject { - font-weight: bold; -} -.syndieBlogPostFrom { - text-align: right; -} -.syndieBlogPostSummary { - background-color: #FFF; - padding: 2px; -} -.syndieBlogPostDetails { - background-color: #FFC; - padding: 2px; -} -.syndieBlogNav { - text-align: center; -} -.syndieBlogComments { - border: none; - margin-top: 5px; - margin-left: 0px; - float: left; -} -.syndieBlogComments ul { - list-style: none; - margin-left: 10px; -} -.syndieBlogCommentInfoGroup { - font-size: 0.8em; - margin-right: 5px; -} -.syndieBlogCommentInfoGroup ol { - list-style: none; -} -.syndieBlogCommentInfoGroup li { -} -.syndieBlogCommentInfoGroup li a { - display: block; -} -.syndieBlogCommentInfoGroupName { - font-size: 0.8em; - font-weight: bold; -} - -.syndieBlogFavorites { - float: left; - margin: 5px 0px 0 0; - display: inline; -} -.syndieBlogList { - float: right; - margin: 5px 0px 0 0; - display: inline; -} -.b_topnavUser { - text-align: right; - background-color: #CCD; -} -.b_topnavHome { - background-color: #CCD; - color: #000; - width: 50px; - text-align: left; -} -.b_topnav { - background-color: #CCD; -} -.b_content { -} -.s_summary_overall { -} -.s_detail_overall { -} -.s_detail_subject { - font-size: 0.8em; - text-align: left; - background-color: #CCF; -} -.s_detail_quote { - margin-left: 1em; - border: 1px solid #DBDBDB; - background-color: #E0E0E0; -} -.s_detail_italic { - font-style: italic; -} -.s_detail_bold { - font-style: normal; - font-weight: bold; -} -.s_detail_underline { - font-style: normal; - text-decoration: underline; -} -.s_detail_meta { - font-size: 0.8em; - text-align: right; - background-color: #CCF; -} - -.s_summary_subject { - font-size: 0.8em; - text-align: left; - background-color: #CCF; -} -.s_summary_meta { - font-size: 0.8em; - text-align: right; - background-color: #CCF; -} -.s_summary_quote { - margin-left: 1em; - border-width: 1px solid #DBDBDB; - background-color: #E0E0E0; -} -.s_summary_italic { - font-style: italic; -} -.s_summary_bold { - font-style: normal; - font-weight: bold; -} -.s_summary_underline { - font-style: normal; - text-decoration: underline; -} -.s_summary_summDetail { - font-size: 0.8em; -} -.s_detail_summDetail { -} -.s_detail_summDetailBlog { -} -.s_detail_summDetailBlogLink { -} -td.s_detail_summDetail { - background-color: #CCF; -} -td.s_summary_summ { width: 80%; - font-size: 0.8em; - background-color: #CCF; -} \ No newline at end of file diff --git a/apps/syndie/jsp/syndie/index.jsp b/apps/syndie/jsp/syndie/index.jsp deleted file mode 100644 index 5517346b6..000000000 --- a/apps/syndie/jsp/syndie/index.jsp +++ /dev/null @@ -1 +0,0 @@ -<%response.sendRedirect("../index.jsp");%> \ No newline at end of file diff --git a/apps/syndie/jsp/viewattachment.jsp b/apps/syndie/jsp/viewattachment.jsp deleted file mode 100644 index a8e76171b..000000000 --- a/apps/syndie/jsp/viewattachment.jsp +++ /dev/null @@ -1,16 +0,0 @@ -<%@page autoFlush="false" import="net.i2p.syndie.web.*" %><% - -request.setCharacterEncoding("UTF-8"); -java.util.Map params = request.getParameterMap(); -response.setContentType(ArchiveViewerBean.getAttachmentContentType(params)); -boolean inline = ArchiveViewerBean.getAttachmentShouldShowInline(params); -String filename = ArchiveViewerBean.getAttachmentFilename(params); -if (inline) - response.setHeader("Content-Disposition", "inline; filename=\"" + filename + "\""); -else - response.setHeader("Content-Disposition", "attachment; filename=\"" + filename + "\""); -int len = ArchiveViewerBean.getAttachmentContentLength(params); -if (len >= 0) - response.setContentLength(len); -ArchiveViewerBean.renderAttachment(params, response.getOutputStream()); -%> \ No newline at end of file diff --git a/apps/syndie/jsp/viewmetadata.jsp b/apps/syndie/jsp/viewmetadata.jsp deleted file mode 100644 index 9547f40cc..000000000 --- a/apps/syndie/jsp/viewmetadata.jsp +++ /dev/null @@ -1,35 +0,0 @@ -<%@page contentType="text/html; charset=UTF-8" pageEncoding="UTF-8" import="net.i2p.syndie.web.*, net.i2p.syndie.*" %><% -request.setCharacterEncoding("UTF-8"); -%> - - -SyndieMedia metadata - - - - - - - - - -
      <% -ArchiveViewerBean.renderMetadata(user, request.getRequestURI(), request.getParameterMap(), out); -if (user.getAuthenticated()) { - if ("Authorize".equals(request.getParameter("action"))) { - %><%=BlogManager.instance().authorizeRemoteAccess(user, request.getParameter("password"))%><% - } - if (!user.getAllowAccessRemote()) { - if (user.getBlog().toBase64().equals(request.getParameter("blog"))) { - %>
      -" /> -To access remote instances from this instance, please supply the Syndie administration password: - - -
      <% - } - } -} -%>
      - \ No newline at end of file diff --git a/apps/syndie/jsp/viewtempattachment.jsp b/apps/syndie/jsp/viewtempattachment.jsp deleted file mode 100644 index 0eae918f9..000000000 --- a/apps/syndie/jsp/viewtempattachment.jsp +++ /dev/null @@ -1,23 +0,0 @@ -<%@page import="net.i2p.syndie.web.ArchiveViewerBean" %><% -request.setCharacterEncoding("UTF-8"); -java.util.Map params = request.getParameterMap(); -String id = request.getParameter(ArchiveViewerBean.PARAM_ATTACHMENT); -if (id != null) { - try { - int attachmentId = Integer.parseInt(id); - if ( (attachmentId < 0) || (attachmentId >= post.getAttachmentCount()) ) { - %>Attachment <%=attachmentId%> does not exist<% - } else { - response.setContentType(post.getContentType(attachmentId)); - boolean inline = ArchiveViewerBean.getAttachmentShouldShowInline(params); - String filename = ArchiveViewerBean.getAttachmentFilename(params); - if (inline) - response.setHeader("Content-Disposition", "inline; filename=" + filename); - else - response.setHeader("Content-Disposition", "attachment; filename=" + filename); - post.writeAttachmentData(attachmentId, response.getOutputStream()); - } - } catch (NumberFormatException nfe) {} -} -%> \ No newline at end of file diff --git a/apps/syndie/jsp/web.xml b/apps/syndie/jsp/web.xml deleted file mode 100644 index 3da3b8bd3..000000000 --- a/apps/syndie/jsp/web.xml +++ /dev/null @@ -1,166 +0,0 @@ - - - - - - net.i2p.syndie.web.ArchiveServlet - net.i2p.syndie.web.ArchiveServlet - - - - net.i2p.syndie.web.RSSServlet - net.i2p.syndie.web.RSSServlet - - - - net.i2p.syndie.web.ViewThreadedServlet - net.i2p.syndie.web.ViewThreadedServlet - - - - net.i2p.syndie.web.ProfileServlet - net.i2p.syndie.web.ProfileServlet - - - - net.i2p.syndie.web.SwitchServlet - net.i2p.syndie.web.SwitchServlet - - - - net.i2p.syndie.web.AddressesServlet - net.i2p.syndie.web.AddressesServlet - - - - net.i2p.syndie.web.PostServlet - net.i2p.syndie.web.PostServlet - - - - net.i2p.syndie.web.AdminServlet - net.i2p.syndie.web.AdminServlet - - - - net.i2p.syndie.web.SyndicateServlet - net.i2p.syndie.web.SyndicateServlet - - - - net.i2p.syndie.web.ImportFeedServlet - net.i2p.syndie.web.ImportFeedServlet - - - - net.i2p.syndie.web.ExternalLinkServlet - net.i2p.syndie.web.ExternalLinkServlet - - - - net.i2p.syndie.web.ThreadNavServlet - net.i2p.syndie.web.ThreadNavServlet - - - - net.i2p.syndie.web.ViewBlogsServlet - net.i2p.syndie.web.ViewBlogsServlet - - - - net.i2p.syndie.web.BlogConfigServlet - net.i2p.syndie.web.BlogConfigServlet - - - - net.i2p.syndie.web.ViewBlogServlet - net.i2p.syndie.web.ViewBlogServlet - - - - net.i2p.syndie.UpdaterServlet - net.i2p.syndie.UpdaterServlet - 1 - - - - - - - net.i2p.syndie.web.ArchiveServlet - /archive/* - - - net.i2p.syndie.web.RSSServlet - /rss.jsp - - - net.i2p.syndie.web.ViewThreadedServlet - /threads.jsp - - - net.i2p.syndie.web.ProfileServlet - /profile.jsp - - - net.i2p.syndie.web.SwitchServlet - /switchuser.jsp - - - net.i2p.syndie.web.AddressesServlet - /addresses.jsp - - - net.i2p.syndie.web.PostServlet - /post.jsp - - - net.i2p.syndie.web.AdminServlet - /admin.jsp - - - net.i2p.syndie.web.SyndicateServlet - /syndicate.jsp - - - net.i2p.syndie.web.ImportFeedServlet - /importfeed.jsp - - - net.i2p.syndie.web.ExternalLinkServlet - /externallink.jsp - - - net.i2p.syndie.web.ThreadNavServlet - /threadnav/* - - - net.i2p.syndie.web.ViewBlogsServlet - /blogs.jsp - - - net.i2p.syndie.web.BlogConfigServlet - /configblog.jsp - - - net.i2p.syndie.web.ViewBlogServlet - /blog.jsp - - - - - 30 - - - - index.html - index.jsp - - diff --git a/apps/systray/java/src/net/i2p/apps/systray/SysTray.java b/apps/systray/java/src/net/i2p/apps/systray/SysTray.java index 380c5b172..4a635fd08 100644 --- a/apps/systray/java/src/net/i2p/apps/systray/SysTray.java +++ b/apps/systray/java/src/net/i2p/apps/systray/SysTray.java @@ -11,6 +11,7 @@ package net.i2p.apps.systray; import java.awt.Frame; +import net.i2p.util.SimpleScheduler; import net.i2p.util.SimpleTimer; import snoozesoft.systray4j.SysTrayMenu; import snoozesoft.systray4j.SysTrayMenuEvent; @@ -60,14 +61,13 @@ public class SysTray implements SysTrayMenuListener { private SysTray() { _sysTrayMenuIcon.addSysTrayMenuListener(this); createSysTrayMenu(); - SimpleTimer.getInstance().addEvent(new RefreshDisplayEvent(), REFRESH_DISPLAY_FREQUENCY); + SimpleScheduler.getInstance().addPeriodicEvent(new RefreshDisplayEvent(), REFRESH_DISPLAY_FREQUENCY); } private static final long REFRESH_DISPLAY_FREQUENCY = 30*1000; private class RefreshDisplayEvent implements SimpleTimer.TimedEvent { public void timeReached() { refreshDisplay(); - SimpleTimer.getInstance().addEvent(RefreshDisplayEvent.this, REFRESH_DISPLAY_FREQUENCY); } } diff --git a/build.xml b/build.xml index cc19c5671..ef84b2aab 100644 --- a/build.xml +++ b/build.xml @@ -15,6 +15,11 @@ + + +
      + + @@ -60,7 +65,6 @@ -
      @@ -87,7 +91,7 @@ - + @@ -219,7 +223,6 @@ - @@ -273,6 +276,7 @@ + + + + + + @@ -342,6 +344,11 @@ + + + + + @@ -354,11 +361,9 @@ - + - - - + @@ -373,17 +378,16 @@ - - + - + - + @@ -442,6 +446,11 @@ + + + + + @@ -454,7 +463,7 @@ - + diff --git a/checklist.txt b/checklist.txt index b649bef59..fcbe65028 100644 --- a/checklist.txt +++ b/checklist.txt @@ -15,45 +15,45 @@ Change revision in: core/java/src/net/i2p/CoreVersion.java Review the complete diff from the last release: - mtn diff -r t:i2p-0.6.(xx-1) > out.diff + mtn diff -r t:i2p-0.7.(xx-1) > out.diff vi out.diff Build and tag: ant pkg mtn ci - mtn tag h: i2p-0.6.xx + mtn tag h: i2p-0.7.xx Sync with mtn.i2p2.i2p Create a signed update file with: export I2P=~/i2p - java -cp $I2P/lib/i2p.jar net.i2p.crypto.TrustedUpdate sign i2pupdate.zip i2pupdate.sud /path/to/private.key 0.6.xx + java -cp $I2P/lib/i2p.jar net.i2p.crypto.TrustedUpdate sign i2pupdate.zip i2pupdate.sud /path/to/private.key 0.7.xx Verify signed update file with: java -cp $I2P/lib/i2p.jar net.i2p.crypto.TrustedUpdate showversion i2pupdate.sud java -cp $I2P/lib/i2p.jar net.i2p.crypto.TrustedUpdate verifysig i2pupdate.sud Make the source tarball: - Start with a clean checkout mtn -d i2p.mtn co --branch=i2p.i2p i2p-0.6.xx + Start with a clean checkout mtn -d i2p.mtn co --branch=i2p.i2p i2p-0.7.xx Double-check trust list - tar cjf i2psource-0.6.xx.tar.bz2 --exclude i2p-0.6.xx/_MTN i2p-0.6.xx - mv i2p-0.6.xx.tar.bz2 i2p.i2p + tar cjf i2psource-0.7.xx.tar.bz2 --exclude i2p-0.7.xx/_MTN i2p-0.7.xx + mv i2p-0.7.xx.tar.bz2 i2p.i2p Until the build script gets this ability, you need to rename some files: - mv i2pinstall.exe i2pinstall-0.6.xx.exe - mv i2p.tar.bz2 i2pheadless-0.6.xx.tar.bz2 - mv i2pupdate.zip i2pupdate-0.6.xx.zip + mv i2pinstall.exe i2pinstall-0.7.xx.exe + mv i2p.tar.bz2 i2pheadless-0.7.xx.tar.bz2 + mv i2pupdate.zip i2pupdate-0.7.xx.zip you probably don't need to rename i2pupdate.sud Generate hashes: - sha1sum i2p*0.6.xx.* + sha1sum i2p*0.7.xx.* sha1sum i2pupdate.sud now GPG-sign an announcement with the hashes Generate PGP signatures: - gpg -b i2pinstall-0.6.xx.exe - gpg -b i2pheadless-0.6.xx.tar.bz2 - gpg -b i2psource-0.6.xx.tar.bz2 - gpg -b i2pupdate-0.6.xx.zip + gpg -b i2pinstall-0.7.xx.exe + gpg -b i2pheadless-0.7.xx.tar.bz2 + gpg -b i2psource-0.7.xx.tar.bz2 + gpg -b i2pupdate-0.7.xx.zip gpg -b i2pupdate.sud Distribute files to download locations and to www.i2p2.i2p diff --git a/core/java/src/net/i2p/CoreVersion.java b/core/java/src/net/i2p/CoreVersion.java index d46c8a7fb..6c924fe10 100644 --- a/core/java/src/net/i2p/CoreVersion.java +++ b/core/java/src/net/i2p/CoreVersion.java @@ -15,7 +15,7 @@ package net.i2p; */ public class CoreVersion { public final static String ID = "$Revision: 1.72 $ $Date: 2008-08-24 12:00:00 $"; - public final static String VERSION = "0.6.5"; + public final static String VERSION = "0.7.1"; public static void main(String args[]) { System.out.println("I2P Core version: " + VERSION); diff --git a/core/java/src/net/i2p/I2PAppContext.java b/core/java/src/net/i2p/I2PAppContext.java index cac8cdb29..f26f74ab7 100644 --- a/core/java/src/net/i2p/I2PAppContext.java +++ b/core/java/src/net/i2p/I2PAppContext.java @@ -23,7 +23,9 @@ import net.i2p.crypto.SessionKeyManager; import net.i2p.data.RoutingKeyGenerator; import net.i2p.stat.StatManager; import net.i2p.util.Clock; +import net.i2p.util.ConcurrentHashSet; import net.i2p.util.FortunaRandomSource; +import net.i2p.util.KeyRing; import net.i2p.util.LogManager; import net.i2p.util.PooledRandomSource; import net.i2p.util.RandomSource; @@ -75,6 +77,7 @@ public class I2PAppContext { private RoutingKeyGenerator _routingKeyGenerator; private RandomSource _random; private KeyGenerator _keyGenerator; + protected KeyRing _keyRing; // overridden in RouterContext private volatile boolean _statManagerInitialized; private volatile boolean _sessionKeyManagerInitialized; private volatile boolean _namingServiceInitialized; @@ -91,6 +94,8 @@ public class I2PAppContext { private volatile boolean _routingKeyGeneratorInitialized; private volatile boolean _randomInitialized; private volatile boolean _keyGeneratorInitialized; + protected volatile boolean _keyRingInitialized; // used in RouterContext + private Set _shutdownTasks; /** @@ -141,12 +146,15 @@ public class I2PAppContext { _elGamalEngine = null; _elGamalAESEngine = null; _logManager = null; + _keyRing = null; _statManagerInitialized = false; _sessionKeyManagerInitialized = false; _namingServiceInitialized = false; _elGamalEngineInitialized = false; _elGamalAESEngineInitialized = false; _logManagerInitialized = false; + _keyRingInitialized = false; + _shutdownTasks = new ConcurrentHashSet(0); } /** @@ -179,6 +187,25 @@ public class I2PAppContext { return System.getProperty(propName, defaultValue); } + /** + * Return an int with an int default + */ + public int getProperty(String propName, int defaultVal) { + String val = null; + if (_overrideProps != null) { + val = _overrideProps.getProperty(propName); + if (val == null) + val = System.getProperty(propName); + } + int ival = defaultVal; + if (val != null) { + try { + ival = Integer.parseInt(val); + } catch (NumberFormatException nfe) {} + } + return ival; + } + /** * Access the configuration attributes of this context, listing the properties * provided during the context construction, as well as the ones included in @@ -493,6 +520,23 @@ public class I2PAppContext { } } + /** + * Basic hash map + */ + public KeyRing keyRing() { + if (!_keyRingInitialized) + initializeKeyRing(); + return _keyRing; + } + + protected void initializeKeyRing() { + synchronized (this) { + if (_keyRing == null) + _keyRing = new KeyRing(); + _keyRingInitialized = true; + } + } + /** * [insert snarky comment here] * @@ -516,4 +560,13 @@ public class I2PAppContext { _randomInitialized = true; } } + + public void addShutdownTask(Runnable task) { + _shutdownTasks.add(task); + } + + public Set getShutdownTasks() { + return new HashSet(_shutdownTasks); + } + } diff --git a/core/java/src/net/i2p/client/DestReplyMessageHandler.java b/core/java/src/net/i2p/client/DestReplyMessageHandler.java new file mode 100644 index 000000000..25699ad02 --- /dev/null +++ b/core/java/src/net/i2p/client/DestReplyMessageHandler.java @@ -0,0 +1,25 @@ +package net.i2p.client; + +/* + * Released into the public domain + * with no warranty of any kind, either expressed or implied. + */ + +import net.i2p.I2PAppContext; +import net.i2p.data.i2cp.I2CPMessage; +import net.i2p.data.i2cp.DestReplyMessage; + +/** + * Handle I2CP dest replies from the router + */ +class DestReplyMessageHandler extends HandlerImpl { + public DestReplyMessageHandler(I2PAppContext ctx) { + super(ctx, DestReplyMessage.MESSAGE_TYPE); + } + + public void handleMessage(I2CPMessage message, I2PSessionImpl session) { + _log.debug("Handle message " + message); + DestReplyMessage msg = (DestReplyMessage) message; + ((I2PSimpleSession)session).destReceived(msg.getDestination()); + } +} diff --git a/core/java/src/net/i2p/client/I2CPMessageProducer.java b/core/java/src/net/i2p/client/I2CPMessageProducer.java index 9af1fbd19..b897d22d0 100644 --- a/core/java/src/net/i2p/client/I2CPMessageProducer.java +++ b/core/java/src/net/i2p/client/I2CPMessageProducer.java @@ -9,6 +9,8 @@ package net.i2p.client; * */ +import java.util.Date; +import java.util.Properties; import java.util.Set; import net.i2p.I2PAppContext; @@ -26,8 +28,10 @@ import net.i2p.data.i2cp.CreateLeaseSetMessage; import net.i2p.data.i2cp.CreateSessionMessage; import net.i2p.data.i2cp.DestroySessionMessage; import net.i2p.data.i2cp.MessageId; +import net.i2p.data.i2cp.ReconfigureSessionMessage; import net.i2p.data.i2cp.ReportAbuseMessage; import net.i2p.data.i2cp.SendMessageMessage; +import net.i2p.data.i2cp.SendMessageExpiresMessage; import net.i2p.data.i2cp.SessionConfig; import net.i2p.util.Log; @@ -91,8 +95,13 @@ class I2CPMessageProducer { * */ public void sendMessage(I2PSessionImpl session, Destination dest, long nonce, byte[] payload, SessionTag tag, - SessionKey key, Set tags, SessionKey newKey) throws I2PSessionException { - SendMessageMessage msg = new SendMessageMessage(); + SessionKey key, Set tags, SessionKey newKey, long expires) throws I2PSessionException { + SendMessageMessage msg; + if (expires > 0) { + msg = new SendMessageExpiresMessage(); + ((SendMessageExpiresMessage)msg).setExpiration(new Date(expires)); + } else + msg = new SendMessageMessage(); msg.setDestination(dest); msg.setSessionId(session.getSessionId()); msg.setNonce(nonce); @@ -181,4 +190,33 @@ class I2CPMessageProducer { msg.setSessionId(session.getSessionId()); session.sendMessage(msg); } + + /** + * Update number of tunnels + * + * @param tunnels 0 for original configured number + */ + public void updateTunnels(I2PSessionImpl session, int tunnels) throws I2PSessionException { + ReconfigureSessionMessage msg = new ReconfigureSessionMessage(); + SessionConfig cfg = new SessionConfig(session.getMyDestination()); + Properties props = session.getOptions(); + if (tunnels > 0) { + Properties newprops = new Properties(); + newprops.putAll(props); + props = newprops; + props.setProperty("inbound.quantity", "" + tunnels); + props.setProperty("outbound.quantity", "" + tunnels); + props.setProperty("inbound.backupQuantity", "0"); + props.setProperty("outbound.backupQuantity", "0"); + } + cfg.setOptions(props); + try { + cfg.signSessionConfig(session.getPrivateKey()); + } catch (DataFormatException dfe) { + throw new I2PSessionException("Unable to sign the session config", dfe); + } + msg.setSessionConfig(cfg); + msg.setSessionId(session.getSessionId()); + session.sendMessage(msg); + } } diff --git a/core/java/src/net/i2p/client/I2PClientImpl.java b/core/java/src/net/i2p/client/I2PClientImpl.java index 4783458a3..5b1b44867 100644 --- a/core/java/src/net/i2p/client/I2PClientImpl.java +++ b/core/java/src/net/i2p/client/I2PClientImpl.java @@ -77,6 +77,6 @@ class I2PClientImpl implements I2PClient { * */ public I2PSession createSession(I2PAppContext context, InputStream destKeyStream, Properties options) throws I2PSessionException { - return new I2PSessionImpl2(context, destKeyStream, options); // thread safe + return new I2PSessionMuxedImpl(context, destKeyStream, options); // thread safe and muxed } } diff --git a/core/java/src/net/i2p/client/I2PClientMessageHandlerMap.java b/core/java/src/net/i2p/client/I2PClientMessageHandlerMap.java index 50b795571..6f0d95051 100644 --- a/core/java/src/net/i2p/client/I2PClientMessageHandlerMap.java +++ b/core/java/src/net/i2p/client/I2PClientMessageHandlerMap.java @@ -16,7 +16,6 @@ import net.i2p.data.i2cp.MessageStatusMessage; import net.i2p.data.i2cp.RequestLeaseSetMessage; import net.i2p.data.i2cp.SessionStatusMessage; import net.i2p.data.i2cp.SetDateMessage; -import net.i2p.util.Log; /** * Contains a map of message handlers that a session will want to use @@ -24,9 +23,11 @@ import net.i2p.util.Log; * @author jrandom */ class I2PClientMessageHandlerMap { - private final static Log _log = new Log(I2PClientMessageHandlerMap.class); /** map of message type id --> I2CPMessageHandler */ - private I2CPMessageHandler _handlers[]; + protected I2CPMessageHandler _handlers[]; + + /** for extension */ + public I2PClientMessageHandlerMap() {} public I2PClientMessageHandlerMap(I2PAppContext context) { int highest = DisconnectMessage.MESSAGE_TYPE; @@ -49,4 +50,4 @@ class I2PClientMessageHandlerMap { if ( (messageTypeId < 0) || (messageTypeId >= _handlers.length) ) return null; return _handlers[messageTypeId]; } -} \ No newline at end of file +} diff --git a/core/java/src/net/i2p/client/I2PSession.java b/core/java/src/net/i2p/client/I2PSession.java index 9d053ef5d..1776af5c0 100644 --- a/core/java/src/net/i2p/client/I2PSession.java +++ b/core/java/src/net/i2p/client/I2PSession.java @@ -12,6 +12,7 @@ package net.i2p.client; import java.util.Set; import net.i2p.data.Destination; +import net.i2p.data.Hash; import net.i2p.data.PrivateKey; import net.i2p.data.SessionKey; import net.i2p.data.SigningPrivateKey; @@ -39,6 +40,8 @@ public interface I2PSession { */ public boolean sendMessage(Destination dest, byte[] payload) throws I2PSessionException; public boolean sendMessage(Destination dest, byte[] payload, int offset, int size) throws I2PSessionException; + /** See I2PSessionMuxedImpl for details */ + public boolean sendMessage(Destination dest, byte[] payload, int proto, int fromport, int toport) throws I2PSessionException; /** * Like sendMessage above, except the key used and the tags sent are exposed to the @@ -69,6 +72,13 @@ public interface I2PSession { */ public boolean sendMessage(Destination dest, byte[] payload, SessionKey keyUsed, Set tagsSent) throws I2PSessionException; public boolean sendMessage(Destination dest, byte[] payload, int offset, int size, SessionKey keyUsed, Set tagsSent) throws I2PSessionException; + public boolean sendMessage(Destination dest, byte[] payload, int offset, int size, SessionKey keyUsed, Set tagsSent, long expire) throws I2PSessionException; + /** See I2PSessionMuxedImpl for details */ + public boolean sendMessage(Destination dest, byte[] payload, int offset, int size, SessionKey keyUsed, Set tagsSent, + int proto, int fromport, int toport) throws I2PSessionException; + /** See I2PSessionMuxedImpl for details */ + public boolean sendMessage(Destination dest, byte[] payload, int offset, int size, SessionKey keyUsed, Set tagsSent, long expire, + int proto, int fromport, int toport) throws I2PSessionException; /** Receive a message that the router has notified the client about, returning * the payload. @@ -126,4 +136,24 @@ public interface I2PSession { * Retrieve the signing SigningPrivateKey associated with the Destination */ public SigningPrivateKey getPrivateKey(); + + /** + * Lookup up a Hash + * + */ + public Destination lookupDest(Hash h) throws I2PSessionException; + + /** See I2PSessionMuxedImpl for details */ + public void addSessionListener(I2PSessionListener lsnr, int proto, int port); + /** See I2PSessionMuxedImpl for details */ + public void addMuxedSessionListener(I2PSessionMuxedListener l, int proto, int port); + /** See I2PSessionMuxedImpl for details */ + public void removeListener(int proto, int port); + + public static final int PORT_ANY = 0; + public static final int PORT_UNSPECIFIED = 0; + public static final int PROTO_ANY = 0; + public static final int PROTO_UNSPECIFIED = 0; + public static final int PROTO_STREAMING = 6; + public static final int PROTO_DATAGRAM = 17; } diff --git a/core/java/src/net/i2p/client/I2PSessionDemultiplexer.java b/core/java/src/net/i2p/client/I2PSessionDemultiplexer.java new file mode 100644 index 000000000..9a1ff42e3 --- /dev/null +++ b/core/java/src/net/i2p/client/I2PSessionDemultiplexer.java @@ -0,0 +1,135 @@ +package net.i2p.client; + +import java.util.concurrent.ConcurrentHashMap; +import java.util.Map; + +import net.i2p.I2PAppContext; +import net.i2p.util.Log; + +/* + * public domain + */ + +/** + * Implement multiplexing with a 1-byte 'protocol' and a two-byte 'port'. + * Listeners register with either addListener() or addMuxedListener(), + * depending on whether they want to hear about the + * protocol, from port, and to port for every received message. + * + * This only calls one listener, not all that apply. + * + * @author zzz + */ +public class I2PSessionDemultiplexer implements I2PSessionMuxedListener { + private Log _log; + private Map _listeners; + + public I2PSessionDemultiplexer(I2PAppContext ctx) { + _log = ctx.logManager().getLog(I2PSessionDemultiplexer.class); + _listeners = new ConcurrentHashMap(); + } + + /** unused */ + public void messageAvailable(I2PSession session, int msgId, long size) {} + + public void messageAvailable(I2PSession session, int msgId, long size, int proto, int fromport, int toport ) { + I2PSessionMuxedListener l = findListener(proto, toport); + if (l != null) + l.messageAvailable(session, msgId, size, proto, fromport, toport); + else { + // no listener, throw it out + _log.error("No listener found for proto: " + proto + " port: " + toport + "msg id: " + msgId + + " from pool of " + _listeners.size() + " listeners"); + try { + session.receiveMessage(msgId); + } catch (I2PSessionException ise) {} + } + } + + public void reportAbuse(I2PSession session, int severity) { + for (I2PSessionMuxedListener l : _listeners.values()) + l.reportAbuse(session, severity); + } + + public void disconnected(I2PSession session) { + for (I2PSessionMuxedListener l : _listeners.values()) + l.disconnected(session); + } + + public void errorOccurred(I2PSession session, String message, Throwable error) { + for (I2PSessionMuxedListener l : _listeners.values()) + l.errorOccurred(session, message, error); + } + + /** + * For those that don't need to hear about the protocol and ports + * in messageAvailable() + * (Streaming lib) + */ + public void addListener(I2PSessionListener l, int proto, int port) { + _listeners.put(key(proto, port), new NoPortsListener(l)); + } + + /** + * For those that do care + * UDP perhaps + */ + public void addMuxedListener(I2PSessionMuxedListener l, int proto, int port) { + _listeners.put(key(proto, port), l); + } + + public void removeListener(int proto, int port) { + _listeners.remove(key(proto, port)); + } + + /** find the one listener that most specifically matches the request */ + private I2PSessionMuxedListener findListener(int proto, int port) { + I2PSessionMuxedListener rv = getListener(proto, port); + if (rv != null) return rv; + if (port != I2PSession.PORT_ANY) { // try any port + rv = getListener(proto, I2PSession.PORT_ANY); + if (rv != null) return rv; + } + if (proto != I2PSession.PROTO_ANY) { // try any protocol + rv = getListener(I2PSession.PROTO_ANY, port); + if (rv != null) return rv; + } + if (proto != I2PSession.PROTO_ANY && port != I2PSession.PORT_ANY) { // try default + rv = getListener(I2PSession.PROTO_ANY, I2PSession.PORT_ANY); + } + return rv; + } + + private I2PSessionMuxedListener getListener(int proto, int port) { + return _listeners.get(key(proto, port)); + } + + private Integer key(int proto, int port) { + return Integer.valueOf(((port << 8) & 0xffff00) | proto); + } + + /** for those that don't care about proto and ports */ + private static class NoPortsListener implements I2PSessionMuxedListener { + private I2PSessionListener _l; + + public NoPortsListener(I2PSessionListener l) { + _l = l; + } + + public void messageAvailable(I2PSession session, int msgId, long size) { + throw new IllegalArgumentException("no"); + } + public void messageAvailable(I2PSession session, int msgId, long size, int proto, int fromport, int toport) { + _l.messageAvailable(session, msgId, size); + } + public void reportAbuse(I2PSession session, int severity) { + _l.reportAbuse(session, severity); + } + public void disconnected(I2PSession session) { + _l.disconnected(session); + } + public void errorOccurred(I2PSession session, String message, Throwable error) { + _l.errorOccurred(session, message, error); + } + } +} diff --git a/core/java/src/net/i2p/client/I2PSessionImpl.java b/core/java/src/net/i2p/client/I2PSessionImpl.java index 6b62513c5..0e13f2c56 100644 --- a/core/java/src/net/i2p/client/I2PSessionImpl.java +++ b/core/java/src/net/i2p/client/I2PSessionImpl.java @@ -14,6 +14,7 @@ import java.io.InputStream; import java.io.OutputStream; import java.net.Socket; import java.net.UnknownHostException; +import java.util.concurrent.ConcurrentHashMap; import java.util.ArrayList; import java.util.HashMap; import java.util.HashSet; @@ -26,6 +27,7 @@ import java.util.Set; import net.i2p.I2PAppContext; import net.i2p.data.DataFormatException; import net.i2p.data.Destination; +import net.i2p.data.Hash; import net.i2p.data.LeaseSet; import net.i2p.data.PrivateKey; import net.i2p.data.SessionKey; @@ -39,6 +41,7 @@ import net.i2p.data.i2cp.MessagePayloadMessage; import net.i2p.data.i2cp.SessionId; import net.i2p.util.I2PThread; import net.i2p.util.Log; +import net.i2p.util.SimpleScheduler; import net.i2p.util.SimpleTimer; /** @@ -48,7 +51,7 @@ import net.i2p.util.SimpleTimer; * @author jrandom */ abstract class I2PSessionImpl implements I2PSession, I2CPMessageReader.I2CPMessageEventListener { - private Log _log; + protected Log _log; /** who we are */ private Destination _myDestination; /** private key for decryption */ @@ -63,23 +66,23 @@ abstract class I2PSessionImpl implements I2PSession, I2CPMessageReader.I2CPMessa private LeaseSet _leaseSet; /** hostname of router */ - private String _hostname; + protected String _hostname; /** port num to router */ - private int _portNum; + protected int _portNum; /** socket for comm */ - private Socket _socket; + protected Socket _socket; /** reader that always searches for messages */ - private I2CPMessageReader _reader; + protected I2CPMessageReader _reader; /** where we pipe our messages */ - private OutputStream _out; + protected OutputStream _out; /** who we send events to */ - private I2PSessionListener _sessionListener; + protected I2PSessionListener _sessionListener; /** class that generates new messages */ protected I2CPMessageProducer _producer; /** map of Long --> MessagePayloadMessage */ - private Map _availableMessages; + protected Map _availableMessages; protected I2PClientMessageHandlerMap _handlerMap; @@ -90,10 +93,10 @@ abstract class I2PSessionImpl implements I2PSession, I2CPMessageReader.I2CPMessa private Object _leaseSetWait = new Object(); /** whether the session connection has already been closed (or not yet opened) */ - private boolean _closed; + protected boolean _closed; /** whether the session connection is in the process of being closed */ - private boolean _closing; + protected boolean _closing; /** have we received the current date from the router yet? */ private boolean _dateReceived; @@ -106,7 +109,10 @@ abstract class I2PSessionImpl implements I2PSession, I2CPMessageReader.I2CPMessa * reading of other messages (in turn, potentially leading to deadlock) * */ - private AvailabilityNotifier _availabilityNotifier; + protected AvailabilityNotifier _availabilityNotifier; + + private long _lastActivity; + private boolean _isReduced; void dateUpdated() { _dateReceived = true; @@ -117,6 +123,9 @@ abstract class I2PSessionImpl implements I2PSession, I2CPMessageReader.I2CPMessa public static final int LISTEN_PORT = 7654; + /** for extension */ + public I2PSessionImpl() {} + /** * Create a new session, reading the Destination, PrivateKey, and SigningPrivateKey * from the destKeyStream, and using the specified options to connect to the router @@ -131,7 +140,7 @@ abstract class I2PSessionImpl implements I2PSession, I2CPMessageReader.I2CPMessa _closing = false; _producer = new I2CPMessageProducer(context); _availabilityNotifier = new AvailabilityNotifier(); - _availableMessages = new HashMap(); + _availableMessages = new ConcurrentHashMap(); try { readDestination(destKeyStream); } catch (DataFormatException dfe) { @@ -144,14 +153,13 @@ abstract class I2PSessionImpl implements I2PSession, I2CPMessageReader.I2CPMessa loadConfig(options); _sessionId = null; _leaseSet = null; - _context.statManager().createRateStat("client.availableMessages", "How many messages are available for the current client", "ClientMessages", new long[] { 60*1000, 10*60*1000 }); } /** * Parse the config for anything we know about * */ - private void loadConfig(Properties options) { + protected void loadConfig(Properties options) { _options = new Properties(); _options.putAll(filter(options)); _hostname = _options.getProperty(I2PClient.PROP_TCP_HOST, "localhost"); @@ -285,6 +293,7 @@ abstract class I2PSessionImpl implements I2PSession, I2CPMessageReader.I2CPMessa _log.info(getPrefix() + "Lease set created with inbound tunnels after " + (connected - startConnect) + "ms - ready to participate in the network!"); + startIdleMonitor(); } catch (UnknownHostException uhe) { _closed = true; throw new I2PSessionException(getPrefix() + "Invalid session configuration", uhe); @@ -300,17 +309,12 @@ abstract class I2PSessionImpl implements I2PSession, I2CPMessageReader.I2CPMessa * */ public byte[] receiveMessage(int msgId) throws I2PSessionException { - int remaining = 0; - MessagePayloadMessage msg = null; - synchronized (_availableMessages) { - msg = (MessagePayloadMessage) _availableMessages.remove(new Long(msgId)); - remaining = _availableMessages.size(); - } - _context.statManager().addRateData("client.availableMessages", remaining, 0); + MessagePayloadMessage msg = _availableMessages.remove(new Long(msgId)); if (msg == null) { - _log.error("Receive message " + msgId + " had no matches, remaining=" + remaining); + _log.error("Receive message " + msgId + " had no matches"); return null; } + updateActivity(); return msg.getPayload().getUnencryptedData(); } @@ -347,12 +351,7 @@ abstract class I2PSessionImpl implements I2PSession, I2CPMessageReader.I2CPMessa */ public void addNewMessage(MessagePayloadMessage msg) { Long mid = new Long(msg.getMessageId()); - int avail = 0; - synchronized (_availableMessages) { - _availableMessages.put(mid, msg); - avail = _availableMessages.size(); - } - _context.statManager().addRateData("client.availableMessages", avail, 0); + _availableMessages.put(mid, msg); long id = msg.getMessageId(); byte data[] = msg.getPayload().getUnencryptedData(); if ((data == null) || (data.length <= 0)) { @@ -365,27 +364,20 @@ abstract class I2PSessionImpl implements I2PSession, I2CPMessageReader.I2CPMessa if (_log.shouldLog(Log.INFO)) _log.info(getPrefix() + "Notified availability for session " + _sessionId + ", message " + id); } - SimpleTimer.getInstance().addEvent(new VerifyUsage(mid), 30*1000); + SimpleScheduler.getInstance().addEvent(new VerifyUsage(mid), 30*1000); } - private class VerifyUsage implements SimpleTimer.TimedEvent { + protected class VerifyUsage implements SimpleTimer.TimedEvent { private Long _msgId; public VerifyUsage(Long id) { _msgId = id; } public void timeReached() { - MessagePayloadMessage removed = null; - int remaining = 0; - synchronized (_availableMessages) { - removed = (MessagePayloadMessage)_availableMessages.remove(_msgId); - remaining = _availableMessages.size(); - } - if (removed != null) { - _log.log(Log.CRIT, "Message NOT removed! id=" + _msgId + ": " + removed + ": remaining: " + remaining); - _context.statManager().addRateData("client.availableMessages", remaining, 0); - } + MessagePayloadMessage removed = _availableMessages.remove(_msgId); + if (removed != null && !isClosed()) + _log.error("Message NOT removed! id=" + _msgId + ": " + removed); } } - private class AvailabilityNotifier implements Runnable { + protected class AvailabilityNotifier implements Runnable { private List _pendingIds; private List _pendingSizes; private boolean _alive; @@ -432,8 +424,8 @@ abstract class I2PSessionImpl implements I2PSession, I2CPMessageReader.I2CPMessa long before = System.currentTimeMillis(); _sessionListener.messageAvailable(I2PSessionImpl.this, msgId.intValue(), size.intValue()); long duration = System.currentTimeMillis() - before; - if ((duration > 100) && _log.shouldLog(Log.WARN)) - _log.warn("Message availability notification for " + msgId.intValue() + " took " + if ((duration > 100) && _log.shouldLog(Log.INFO)) + _log.info("Message availability notification for " + msgId.intValue() + " took " + duration + " to " + _sessionListener); } catch (Exception e) { _log.log(Log.CRIT, "Error notifying app of message availability", e); @@ -546,10 +538,10 @@ abstract class I2PSessionImpl implements I2PSession, I2CPMessageReader.I2CPMessa * Pass off the error to the listener */ void propogateError(String msg, Throwable error) { - if (_log.shouldLog(Log.WARN)) - _log.warn(getPrefix() + "Error occurred: " + msg + " - " + error.getMessage()); - if (_log.shouldLog(Log.WARN)) - _log.warn(getPrefix() + " cause", error); + if (_log.shouldLog(Log.ERROR)) + _log.error(getPrefix() + "Error occurred: " + msg + " - " + error.getMessage()); + if (_log.shouldLog(Log.ERROR)) + _log.error(getPrefix() + " cause", error); if (_sessionListener != null) _sessionListener.errorOccurred(this, msg, error); } @@ -566,7 +558,7 @@ abstract class I2PSessionImpl implements I2PSession, I2CPMessageReader.I2CPMessa if (_log.shouldLog(Log.INFO)) _log.info(getPrefix() + "Destroy the session", new Exception("DestroySession()")); _closing = true; // we use this to prevent a race - if (sendDisconnect) { + if (sendDisconnect && _producer != null) { // only null if overridden by I2PSimpleSession try { _producer.disconnect(this); } catch (I2PSessionException ipe) { @@ -659,4 +651,40 @@ abstract class I2PSessionImpl implements I2PSession, I2CPMessageReader.I2CPMessa } protected String getPrefix() { return "[" + (_sessionId == null ? -1 : _sessionId.getSessionId()) + "]: "; } + + public Destination lookupDest(Hash h) throws I2PSessionException { + return null; + } + + protected void updateActivity() { + _lastActivity = _context.clock().now(); + if (_isReduced) { + _isReduced = false; + if (_log.shouldLog(Log.WARN)) + _log.warn(getPrefix() + "Restoring original tunnel quantity"); + try { + _producer.updateTunnels(this, 0); + } catch (I2PSessionException ise) { + _log.error(getPrefix() + "bork restore from reduced"); + } + } + } + + public long lastActivity() { + return _lastActivity; + } + + public void setReduced() { + _isReduced = true; + } + + private void startIdleMonitor() { + _isReduced = false; + boolean reduce = Boolean.valueOf(_options.getProperty("i2cp.reduceOnIdle")).booleanValue(); + boolean close = Boolean.valueOf(_options.getProperty("i2cp.closeOnIdle")).booleanValue(); + if (reduce || close) { + updateActivity(); + SimpleScheduler.getInstance().addEvent(new SessionIdleTimer(_context, this, reduce, close), SessionIdleTimer.MINIMUM_TIME); + } + } } diff --git a/core/java/src/net/i2p/client/I2PSessionImpl2.java b/core/java/src/net/i2p/client/I2PSessionImpl2.java index bbaf399f4..9abce4b72 100644 --- a/core/java/src/net/i2p/client/I2PSessionImpl2.java +++ b/core/java/src/net/i2p/client/I2PSessionImpl2.java @@ -31,7 +31,6 @@ import net.i2p.util.Log; * @author jrandom */ class I2PSessionImpl2 extends I2PSessionImpl { - private Log _log; /** set of MessageState objects, representing all of the messages in the process of being sent */ private Set _sendingStates; @@ -41,6 +40,9 @@ class I2PSessionImpl2 extends I2PSessionImpl { private final static boolean SHOULD_COMPRESS = true; private final static boolean SHOULD_DECOMPRESS = true; + /** for extension */ + public I2PSessionImpl2() {} + /** * Create a new session, reading the Destination, PrivateKey, and SigningPrivateKey * from the destKeyStream, and using the specified options to connect to the router @@ -91,7 +93,7 @@ class I2PSessionImpl2 extends I2PSessionImpl { * set to false. */ private static final int DONT_COMPRESS_SIZE = 66; - private boolean shouldCompress(int size) { + protected boolean shouldCompress(int size) { if (size <= DONT_COMPRESS_SIZE) return false; String p = getOptions().getProperty("i2cp.gzip"); @@ -100,22 +102,50 @@ class I2PSessionImpl2 extends I2PSessionImpl { return SHOULD_COMPRESS; } + public void addSessionListener(I2PSessionListener lsnr, int proto, int port) { + throw new IllegalArgumentException("Use MuxedImpl"); + } + public void addMuxedSessionListener(I2PSessionMuxedListener l, int proto, int port) { + throw new IllegalArgumentException("Use MuxedImpl"); + } + public void removeListener(int proto, int port) { + throw new IllegalArgumentException("Use MuxedImpl"); + } + public boolean sendMessage(Destination dest, byte[] payload, int proto, int fromport, int toport) throws I2PSessionException { + throw new IllegalArgumentException("Use MuxedImpl"); + } + public boolean sendMessage(Destination dest, byte[] payload, int offset, int size, SessionKey keyUsed, Set tagsSent, + int proto, int fromport, int toport) throws I2PSessionException { + throw new IllegalArgumentException("Use MuxedImpl"); + } + public boolean sendMessage(Destination dest, byte[] payload, int offset, int size, SessionKey keyUsed, Set tagsSent, long expire, + int proto, int fromport, int toport) throws I2PSessionException { + throw new IllegalArgumentException("Use MuxedImpl"); + } + @Override public boolean sendMessage(Destination dest, byte[] payload) throws I2PSessionException { return sendMessage(dest, payload, 0, payload.length); } public boolean sendMessage(Destination dest, byte[] payload, int offset, int size) throws I2PSessionException { - return sendMessage(dest, payload, offset, size, new SessionKey(), new HashSet(64)); + // we don't do end-to-end crypto any more + //return sendMessage(dest, payload, offset, size, new SessionKey(), new HashSet(64), 0); + return sendMessage(dest, payload, offset, size, null, null, 0); } @Override public boolean sendMessage(Destination dest, byte[] payload, SessionKey keyUsed, Set tagsSent) throws I2PSessionException { - return sendMessage(dest, payload, 0, payload.length, keyUsed, tagsSent); + return sendMessage(dest, payload, 0, payload.length, keyUsed, tagsSent, 0); } public boolean sendMessage(Destination dest, byte[] payload, int offset, int size, SessionKey keyUsed, Set tagsSent) throws I2PSessionException { + return sendMessage(dest, payload, offset, size, keyUsed, tagsSent, 0); + } + public boolean sendMessage(Destination dest, byte[] payload, int offset, int size, SessionKey keyUsed, Set tagsSent, long expires) + throws I2PSessionException { if (_log.shouldLog(Log.DEBUG)) _log.debug("sending message"); if (isClosed()) throw new I2PSessionException("Already closed"); + updateActivity(); // Sadly there is no way to send something completely uncompressed in a backward-compatible way, // so we have to still send it in a gzip format, which adds 23 bytes (2.4% for a 960-byte msg) @@ -140,7 +170,7 @@ class I2PSessionImpl2 extends I2PSessionImpl { } _context.statManager().addRateData("i2cp.tx.msgCompressed", compressed, 0); _context.statManager().addRateData("i2cp.tx.msgExpanded", size, 0); - return sendBestEffort(dest, payload, keyUsed, tagsSent); + return sendBestEffort(dest, payload, keyUsed, tagsSent, expires); } /** @@ -166,7 +196,7 @@ class I2PSessionImpl2 extends I2PSessionImpl { private static final int NUM_TAGS = 50; - private boolean sendBestEffort(Destination dest, byte payload[], SessionKey keyUsed, Set tagsSent) + protected boolean sendBestEffort(Destination dest, byte payload[], SessionKey keyUsed, Set tagsSent, long expires) throws I2PSessionException { SessionKey key = null; SessionKey newKey = null; @@ -174,6 +204,7 @@ class I2PSessionImpl2 extends I2PSessionImpl { Set sentTags = null; int oldTags = 0; long begin = _context.clock().now(); + /*********** if (I2CPMessageProducer.END_TO_END_CRYPTO) { if (_log.shouldLog(Log.DEBUG)) _log.debug("begin sendBestEffort"); key = _context.sessionKeyManager().getCurrentKey(dest.getPublicKey()); @@ -218,6 +249,7 @@ class I2PSessionImpl2 extends I2PSessionImpl { } else { // not using end to end crypto, so don't ever bundle any tags } + **********/ if (_log.shouldLog(Log.DEBUG)) _log.debug("before creating nonce"); @@ -231,14 +263,14 @@ class I2PSessionImpl2 extends I2PSessionImpl { if (_log.shouldLog(Log.DEBUG)) _log.debug(getPrefix() + "Setting key = " + key); if (keyUsed != null) { - if (I2CPMessageProducer.END_TO_END_CRYPTO) { - if (newKey != null) - keyUsed.setData(newKey.getData()); - else - keyUsed.setData(key.getData()); - } else { + //if (I2CPMessageProducer.END_TO_END_CRYPTO) { + // if (newKey != null) + // keyUsed.setData(newKey.getData()); + // else + // keyUsed.setData(key.getData()); + //} else { keyUsed.setData(SessionKey.INVALID_KEY.getData()); - } + //} } if (tagsSent != null) { if (sentTags != null) { @@ -259,7 +291,7 @@ class I2PSessionImpl2 extends I2PSessionImpl { + state.getNonce() + " for best effort " + " sync took " + (inSendingSync-beforeSendingSync) + " add took " + (afterSendingSync-inSendingSync)); - _producer.sendMessage(this, dest, nonce, payload, tag, key, sentTags, newKey); + _producer.sendMessage(this, dest, nonce, payload, tag, key, sentTags, newKey, expires); // since this is 'best effort', all we're waiting for is a status update // saying that the router received it - in theory, that should come back @@ -396,6 +428,8 @@ class I2PSessionImpl2 extends I2PSessionImpl { } private void clearStates() { + if (_sendingStates == null) // only null if overridden by I2PSimpleSession + return; synchronized (_sendingStates) { for (Iterator iter = _sendingStates.iterator(); iter.hasNext();) { MessageState state = (MessageState) iter.next(); diff --git a/core/java/src/net/i2p/client/I2PSessionListener.java b/core/java/src/net/i2p/client/I2PSessionListener.java index 4c78c6527..740ebeeab 100644 --- a/core/java/src/net/i2p/client/I2PSessionListener.java +++ b/core/java/src/net/i2p/client/I2PSessionListener.java @@ -20,7 +20,7 @@ public interface I2PSessionListener { * size # of bytes. * @param session session to notify * @param msgId message number available - * @param size size of the message + * @param size size of the message - why it's a long and not an int is a mystery */ void messageAvailable(I2PSession session, int msgId, long size); @@ -42,4 +42,4 @@ public interface I2PSessionListener { * */ void errorOccurred(I2PSession session, String message, Throwable error); -} \ No newline at end of file +} diff --git a/core/java/src/net/i2p/client/I2PSessionMuxedImpl.java b/core/java/src/net/i2p/client/I2PSessionMuxedImpl.java new file mode 100644 index 000000000..b08d01c26 --- /dev/null +++ b/core/java/src/net/i2p/client/I2PSessionMuxedImpl.java @@ -0,0 +1,320 @@ +package net.i2p.client; + +/* + * public domain + */ + +import java.io.IOException; +import java.io.InputStream; +import java.util.concurrent.LinkedBlockingQueue; +import java.util.HashSet; +import java.util.Properties; +import java.util.Set; + +import net.i2p.I2PAppContext; +import net.i2p.data.DataHelper; +import net.i2p.data.Destination; +import net.i2p.data.SessionKey; +import net.i2p.data.SessionTag; +import net.i2p.data.i2cp.MessagePayloadMessage; +import net.i2p.util.Log; +import net.i2p.util.SimpleScheduler; + +/** + * I2PSession with protocol and ports + * + * Streaming lib has been modified to send I2PSession.PROTO_STREAMING but + * still receives all. It sends with fromPort and toPort = 0, and receives on all ports. + * + * No datagram apps have been modified yet. + + * Therefore the compatibility situation is as follows: + * + * Compatibility: + * old streaming -> new streaming: sends proto anything, rcvs proto anything + * new streaming -> old streaming: sends PROTO_STREAMING, ignores rcvd proto + * old datagram -> new datagram: sends proto anything, rcvs proto anything + * new datagram -> old datagram: sends PROTO_DATAGRAM, ignores rcvd proto + * In all the above cases, streaming and datagram receive traffic for the other + * protocol, same as before. + * + * old datagram -> new muxed: doesn't work because the old sends proto 0 but the udp side + * of the mux registers with PROTO_DATAGRAM, so the datagrams + * go to the streaming side, same as before. + * old streaming -> new muxed: works + * + * Typical Usage: + * Streaming + datagrams: + * I2PSocketManager sockMgr = getSocketManager(); + * I2PSession session = sockMgr.getSession(); + * session.addMuxedSessionListener(myI2PSessionMuxedListener, I2PSession.PROTO_DATAGRAM, I2PSession.PORT_ANY); + * * or * + * session.addSessionListener(myI2PSessionListener, I2PSession.PROTO_DATAGRAM, I2PSession.PORT_ANY); + * session.sendMessage(dest, payload, I2PSession.PROTO_DATAGRAM, fromPort, toPort); + * + * Datagrams only, with multiple ports: + * I2PClient client = I2PClientFactory.createClient(); + * ... + * I2PSession session = client.createSession(...); + * session.addMuxedSessionListener(myI2PSessionMuxedListener, I2PSession.PROTO_DATAGRAM, I2PSession.PORT_ANY); + * * or * + * session.addSessionListener(myI2PSessionListener, I2PSession.PROTO_DATAGRAM, I2PSession.PORT_ANY); + * session.sendMessage(dest, payload, I2PSession.PROTO_DATAGRAM, fromPort, toPort); + * + * Multiple streaming ports: + * Needs some streaming lib hacking + * + * @author zzz + */ +class I2PSessionMuxedImpl extends I2PSessionImpl2 implements I2PSession { + private I2PSessionDemultiplexer _demultiplexer; + + public I2PSessionMuxedImpl(I2PAppContext ctx, InputStream destKeyStream, Properties options) throws I2PSessionException { + super(ctx, destKeyStream, options); + // also stored in _sessionListener but we keep it in _demultipexer + // as well so we don't have to keep casting + _demultiplexer = new I2PSessionDemultiplexer(ctx); + super.setSessionListener(_demultiplexer); + // discards the one in super(), sorry about that... (no it wasn't started yet) + _availabilityNotifier = new MuxedAvailabilityNotifier(); + } + + /** listen on all protocols and ports */ + @Override + public void setSessionListener(I2PSessionListener lsnr) { + _demultiplexer.addListener(lsnr, PROTO_ANY, PORT_ANY); + } + + /** + * Listen on specified protocol and port. + * + * An existing listener with the same proto and port is replaced. + * Only the listener with the best match is called back for each message. + * + * @param proto 1-254 or PROTO_ANY for all; recommended: + * I2PSession.PROTO_STREAMING + * I2PSession.PROTO_DATAGRAM + * 255 disallowed + * @param port 1-65535 or PORT_ANY for all + */ + public void addSessionListener(I2PSessionListener lsnr, int proto, int port) { + _demultiplexer.addListener(lsnr, proto, port); + } + + /** + * Listen on specified protocol and port, and receive notification + * of proto, fromPort, and toPort for every message. + * @param proto 1-254 or 0 for all; 255 disallowed + * @param port 1-65535 or 0 for all + */ + public void addMuxedSessionListener(I2PSessionMuxedListener l, int proto, int port) { + _demultiplexer.addMuxedListener(l, proto, port); + } + + /** removes the specified listener (only) */ + public void removeListener(int proto, int port) { + _demultiplexer.removeListener(proto, port); + } + + @Override + public boolean sendMessage(Destination dest, byte[] payload) throws I2PSessionException { + return sendMessage(dest, payload, 0, payload.length, null, null, + 0, PROTO_UNSPECIFIED, PORT_UNSPECIFIED, PORT_UNSPECIFIED); + } + + @Override + public boolean sendMessage(Destination dest, byte[] payload, int proto, int fromport, int toport) throws I2PSessionException { + return sendMessage(dest, payload, 0, payload.length, null, null, 0, proto, fromport, toport); + } + + @Override + public boolean sendMessage(Destination dest, byte[] payload, int offset, int size, + SessionKey keyUsed, Set tagsSent, long expires) + throws I2PSessionException { + return sendMessage(dest, payload, offset, size, keyUsed, tagsSent, 0, PROTO_UNSPECIFIED, PORT_UNSPECIFIED, PORT_UNSPECIFIED); + } + + @Override + public boolean sendMessage(Destination dest, byte[] payload, int offset, int size, SessionKey keyUsed, Set tagsSent, + int proto, int fromport, int toport) throws I2PSessionException { + return sendMessage(dest, payload, offset, size, keyUsed, tagsSent, 0, proto, fromport, toport); + } + + /** + * @param proto 1-254 or 0 for unset; recommended: + * I2PSession.PROTO_UNSPECIFIED + * I2PSession.PROTO_STREAMING + * I2PSession.PROTO_DATAGRAM + * 255 disallowed + * @param fromport 1-65535 or 0 for unset + * @param toport 1-65535 or 0 for unset + */ + public boolean sendMessage(Destination dest, byte[] payload, int offset, int size, + SessionKey keyUsed, Set tagsSent, long expires, + int proto, int fromPort, int toPort) + throws I2PSessionException { + if (isClosed()) throw new I2PSessionException("Already closed"); + updateActivity(); + + boolean sc = shouldCompress(size); + if (sc) + payload = DataHelper.compress(payload, offset, size); + else + payload = DataHelper.compress(payload, offset, size, DataHelper.NO_COMPRESSION); + + setProto(payload, proto); + setFromPort(payload, fromPort); + setToPort(payload, toPort); + + _context.statManager().addRateData("i2cp.tx.msgCompressed", payload.length, 0); + _context.statManager().addRateData("i2cp.tx.msgExpanded", size, 0); + return sendBestEffort(dest, payload, keyUsed, tagsSent, expires); + } + + /** + * Receive a payload message and let the app know its available + */ + @Override + public void addNewMessage(MessagePayloadMessage msg) { + Long mid = new Long(msg.getMessageId()); + _availableMessages.put(mid, msg); + long id = msg.getMessageId(); + byte data[] = msg.getPayload().getUnencryptedData(); + if ((data == null) || (data.length <= 0)) { + if (_log.shouldLog(Log.CRIT)) + _log.log(Log.CRIT, getPrefix() + "addNewMessage of a message with no unencrypted data", + new Exception("Empty message")); + return; + } + int size = data.length; + if (size < 10) { + _log.error(getPrefix() + "length too short for gzip header: " + size); + return; + } + ((MuxedAvailabilityNotifier)_availabilityNotifier).available(id, size, getProto(msg), + getFromPort(msg), getToPort(msg)); + SimpleScheduler.getInstance().addEvent(new VerifyUsage(mid), 30*1000); + } + + protected class MuxedAvailabilityNotifier extends AvailabilityNotifier { + private LinkedBlockingQueue _msgs; + private boolean _alive; + private static final int POISON_SIZE = -99999; + + public MuxedAvailabilityNotifier() { + _msgs = new LinkedBlockingQueue(); + } + + public void stopNotifying() { + _msgs.clear(); + if (_alive) { + _alive = false; + try { + _msgs.put(new MsgData(0, POISON_SIZE, 0, 0, 0)); + } catch (InterruptedException ie) {} + } + } + + /** unused */ + public void available(long msgId, int size) { throw new IllegalArgumentException("no"); } + + public void available(long msgId, int size, int proto, int fromPort, int toPort) { + try { + _msgs.put(new MsgData((int)(msgId & 0xffffffff), size, proto, fromPort, toPort)); + } catch (InterruptedException ie) {} + } + + public void run() { + _alive = true; + while (true) { + MsgData msg; + try { + msg = _msgs.take(); + } catch (InterruptedException ie) { + continue; + } + if (msg.size == POISON_SIZE) + break; + try { + _demultiplexer.messageAvailable(I2PSessionMuxedImpl.this, msg.id, + msg.size, msg.proto, msg.fromPort, msg.toPort); + } catch (Exception e) { + _log.error("Error notifying app of message availability"); + } + } + } + } + + /** let's keep this simple */ + private static class MsgData { + public int id, size, proto, fromPort, toPort; + public MsgData(int i, int s, int p, int f, int t) { + id = i; + size = s; + proto = p; + fromPort = f; + toPort = t; + } + } + + /** + * No, we couldn't put any protocol byte in front of everything and + * keep backward compatibility. But there are several bytes that + * are unused AND unchecked in the gzip header in releases <= 0.7. + * So let's use 5 of them for a protocol and two 2-byte ports. + * + * Following are all the methods to hide the + * protocol, fromPort, and toPort in the gzip header + * + * The fields used are all ignored on receive in ResettableGzipInputStream + * + * See also ResettableGzipOutputStream. + * Ref: RFC 1952 + * + */ + + /** OS byte in gzip header */ + private static final int PROTO_BYTE = 9; + + /** Upper two bytes of MTIME in gzip header */ + private static final int FROMPORT_BYTES = 4; + + /** Lower two bytes of MTIME in gzip header */ + private static final int TOPORT_BYTES = 6; + + /** Non-muxed sets the OS byte to 0xff */ + private static int getProto(MessagePayloadMessage msg) { + int rv = getByte(msg, PROTO_BYTE) & 0xff; + return rv == 0xff ? PROTO_UNSPECIFIED : rv; + } + + /** Non-muxed sets the MTIME bytes to 0 */ + private static int getFromPort(MessagePayloadMessage msg) { + return (((getByte(msg, FROMPORT_BYTES) & 0xff) << 8) | + (getByte(msg, FROMPORT_BYTES + 1) & 0xff)); + } + + /** Non-muxed sets the MTIME bytes to 0 */ + private static int getToPort(MessagePayloadMessage msg) { + return (((getByte(msg, TOPORT_BYTES) & 0xff) << 8) | + (getByte(msg, TOPORT_BYTES + 1) & 0xff)); + } + + private static int getByte(MessagePayloadMessage msg, int i) { + return msg.getPayload().getUnencryptedData()[i] & 0xff; + } + + private static void setProto(byte[] payload, int p) { + payload[PROTO_BYTE] = (byte) (p & 0xff); + } + + private static void setFromPort(byte[] payload, int p) { + payload[FROMPORT_BYTES] = (byte) ((p >> 8) & 0xff); + payload[FROMPORT_BYTES + 1] = (byte) (p & 0xff); + } + + private static void setToPort(byte[] payload, int p) { + payload[TOPORT_BYTES] = (byte) ((p >> 8) & 0xff); + payload[TOPORT_BYTES + 1] = (byte) (p & 0xff); + } +} diff --git a/core/java/src/net/i2p/client/I2PSessionMuxedListener.java b/core/java/src/net/i2p/client/I2PSessionMuxedListener.java new file mode 100644 index 000000000..118dc75ca --- /dev/null +++ b/core/java/src/net/i2p/client/I2PSessionMuxedListener.java @@ -0,0 +1,62 @@ +package net.i2p.client; + +/* + * public domain + */ + +/** + * Define a means for the router to asynchronously notify the client that a + * new message is available or the router is under attack. + * + * @author zzz extends I2PSessionListener + */ +public interface I2PSessionMuxedListener extends I2PSessionListener { + + /** + * Will be called only if you register via + * setSessionListener() or addSessionListener(). + * And if you are doing that, just use I2PSessionListener. + * + * If you register via addSessionListener(), + * this will be called only for the proto(s) and toport(s) you register for. + * + * @param session session to notify + * @param msgId message number available + * @param size size of the message - why it's a long and not an int is a mystery + */ + void messageAvailable(I2PSession session, int msgId, long size); + + /** + * Instruct the client that the given session has received a message + * + * Will be called only if you register via addMuxedSessionListener(). + * Will be called only for the proto(s) and toport(s) you register for. + * + * @param session session to notify + * @param msgId message number available + * @param size size of the message - why it's a long and not an int is a mystery + * @param proto 1-254 or 0 for unspecified + * @param fromport 1-65535 or 0 for unspecified + * @param toport 1-65535 or 0 for unspecified + */ + void messageAvailable(I2PSession session, int msgId, long size, int proto, int fromport, int toport); + + /** Instruct the client that the session specified seems to be under attack + * and that the client may wish to move its destination to another router. + * @param session session to report abuse to + * @param severity how bad the abuse is + */ + void reportAbuse(I2PSession session, int severity); + + /** + * Notify the client that the session has been terminated + * + */ + void disconnected(I2PSession session); + + /** + * Notify the client that some error occurred + * + */ + void errorOccurred(I2PSession session, String message, Throwable error); +} diff --git a/core/java/src/net/i2p/client/I2PSimpleClient.java b/core/java/src/net/i2p/client/I2PSimpleClient.java new file mode 100644 index 000000000..9ce4b8d6f --- /dev/null +++ b/core/java/src/net/i2p/client/I2PSimpleClient.java @@ -0,0 +1,47 @@ +package net.i2p.client; + +/* + * Released into the public domain + * with no warranty of any kind, either expressed or implied. + */ + +import java.io.IOException; +import java.io.InputStream; +import java.io.OutputStream; +import java.util.Properties; + +import net.i2p.I2PAppContext; +import net.i2p.I2PException; +import net.i2p.data.Certificate; +import net.i2p.data.Destination; + +/** + * Simple client implementation with no Destination, + * just used to talk to the router. + */ +public class I2PSimpleClient implements I2PClient { + /** Don't do this */ + public Destination createDestination(OutputStream destKeyStream) throws I2PException, IOException { + return null; + } + + /** or this */ + public Destination createDestination(OutputStream destKeyStream, Certificate cert) throws I2PException, IOException { + return null; + } + + /** + * Create a new session (though do not connect it yet) + * + */ + public I2PSession createSession(InputStream destKeyStream, Properties options) throws I2PSessionException { + return createSession(I2PAppContext.getGlobalContext(), options); + } + /** + * Create a new session (though do not connect it yet) + * + */ + public I2PSession createSession(I2PAppContext context, Properties options) throws I2PSessionException { + return new I2PSimpleSession(context, options); + } +} diff --git a/core/java/src/net/i2p/client/I2PSimpleSession.java b/core/java/src/net/i2p/client/I2PSimpleSession.java new file mode 100644 index 000000000..fcfafe767 --- /dev/null +++ b/core/java/src/net/i2p/client/I2PSimpleSession.java @@ -0,0 +1,123 @@ +package net.i2p.client; + +/* + * Released into the public domain + * with no warranty of any kind, either expressed or implied. + */ + +import java.io.IOException; +import java.io.InputStream; +import java.io.OutputStream; +import java.net.Socket; +import java.net.UnknownHostException; +import java.util.Properties; +import java.util.Set; + +import net.i2p.I2PAppContext; +import net.i2p.data.DataHelper; +import net.i2p.data.Destination; +import net.i2p.data.Hash; +import net.i2p.data.i2cp.DestLookupMessage; +import net.i2p.data.i2cp.DestReplyMessage; +import net.i2p.data.i2cp.I2CPMessageReader; +import net.i2p.util.I2PThread; +import net.i2p.util.Log; + +/** + * Create a new session for doing naming queries only. Do not create a Destination. + * Don't create a producer. Do not send/receive messages to other Destinations. + * Cannot handle multiple simultaneous queries atm. + * Could be expanded to ask the router other things. + */ +class I2PSimpleSession extends I2PSessionImpl2 { + private boolean _destReceived; + private Object _destReceivedLock; + private Destination _destination; + + /** + * Create a new session for doing naming queries only. Do not create a destination. + * + * @throws I2PSessionException if there is a problem + */ + public I2PSimpleSession(I2PAppContext context, Properties options) throws I2PSessionException { + _context = context; + _log = context.logManager().getLog(I2PSimpleSession.class); + _handlerMap = new SimpleMessageHandlerMap(context); + _closed = true; + _closing = false; + _availabilityNotifier = new AvailabilityNotifier(); + if (options == null) + options = System.getProperties(); + loadConfig(options); + } + + /** + * Connect to the router and establish a session. This call blocks until + * a session is granted. + * + * @throws I2PSessionException if there is a configuration error or the router is + * not reachable + */ + public void connect() throws I2PSessionException { + _closed = false; + _availabilityNotifier.stopNotifying(); + I2PThread notifier = new I2PThread(_availabilityNotifier); + notifier.setName("Simple Notifier"); + notifier.setDaemon(true); + notifier.start(); + + try { + _socket = new Socket(_hostname, _portNum); + _out = _socket.getOutputStream(); + synchronized (_out) { + _out.write(I2PClient.PROTOCOL_BYTE); + } + InputStream in = _socket.getInputStream(); + _reader = new I2CPMessageReader(in, this); + _reader.startReading(); + + } catch (UnknownHostException uhe) { + _closed = true; + throw new I2PSessionException(getPrefix() + "Bad host ", uhe); + } catch (IOException ioe) { + _closed = true; + throw new I2PSessionException(getPrefix() + "Problem connecting to " + _hostname + " on port " + _portNum, ioe); + } + } + + /** called by the message handler */ + void destReceived(Destination d) { + _destReceived = true; + _destination = d; + synchronized (_destReceivedLock) { + _destReceivedLock.notifyAll(); + } + } + + public Destination lookupDest(Hash h) throws I2PSessionException { + if (_closed) + return null; + _destReceivedLock = new Object(); + sendMessage(new DestLookupMessage(h)); + for (int i = 0; i < 10 && !_destReceived; i++) { + try { + synchronized (_destReceivedLock) { + _destReceivedLock.wait(1000); + } + } catch (InterruptedException ie) {} + } + _destReceived = false; + return _destination; + } + + /** + * Only map message handlers that we will use + */ + class SimpleMessageHandlerMap extends I2PClientMessageHandlerMap { + public SimpleMessageHandlerMap(I2PAppContext context) { + int highest = DestReplyMessage.MESSAGE_TYPE; + _handlers = new I2CPMessageHandler[highest+1]; + _handlers[DestReplyMessage.MESSAGE_TYPE] = new DestReplyMessageHandler(context); + } + } +} diff --git a/core/java/src/net/i2p/client/RequestLeaseSetMessageHandler.java b/core/java/src/net/i2p/client/RequestLeaseSetMessageHandler.java index 7d6d816c1..e662f6572 100644 --- a/core/java/src/net/i2p/client/RequestLeaseSetMessageHandler.java +++ b/core/java/src/net/i2p/client/RequestLeaseSetMessageHandler.java @@ -21,6 +21,7 @@ import net.i2p.data.Lease; import net.i2p.data.LeaseSet; import net.i2p.data.PrivateKey; import net.i2p.data.PublicKey; +import net.i2p.data.SessionKey; import net.i2p.data.SigningPrivateKey; import net.i2p.data.SigningPublicKey; import net.i2p.data.i2cp.I2CPMessage; @@ -78,6 +79,18 @@ class RequestLeaseSetMessageHandler extends HandlerImpl { leaseSet.setEncryptionKey(li.getPublicKey()); leaseSet.setSigningKey(li.getSigningPublicKey()); + boolean encrypt = Boolean.valueOf(session.getOptions().getProperty("i2cp.encryptLeaseSet")).booleanValue(); + String sk = session.getOptions().getProperty("i2cp.leaseSetKey"); + if (encrypt && sk != null) { + SessionKey key = new SessionKey(); + try { + key.fromBase64(sk); + leaseSet.encrypt(key); + _context.keyRing().put(session.getMyDestination().calculateHash(), key); + } catch (DataFormatException dfe) { + _log.error("Bad leaseset key: " + sk); + } + } try { leaseSet.sign(session.getPrivateKey()); session.getProducer().createLeaseSet(session, leaseSet, li.getSigningPrivateKey(), li.getPrivateKey()); @@ -137,4 +150,4 @@ class RequestLeaseSetMessageHandler extends HandlerImpl { && DataHelper.eq(_signingPrivKey, li.getSigningPrivateKey()); } } -} \ No newline at end of file +} diff --git a/core/java/src/net/i2p/client/SessionIdleTimer.java b/core/java/src/net/i2p/client/SessionIdleTimer.java new file mode 100644 index 000000000..f4661b73b --- /dev/null +++ b/core/java/src/net/i2p/client/SessionIdleTimer.java @@ -0,0 +1,117 @@ +package net.i2p.client; + +/* + * free (adj.): unencumbered; not under the control of others + * + */ + +import java.util.Properties; + +import net.i2p.I2PAppContext; +import net.i2p.data.DataHelper; +import net.i2p.util.Log; +import net.i2p.util.SimpleScheduler; +import net.i2p.util.SimpleTimer; + +/** + * Reduce tunnels or shutdown the session on idle if so configured + * + * @author zzz + */ +public class SessionIdleTimer implements SimpleTimer.TimedEvent { + public static final long MINIMUM_TIME = 5*60*1000; + private static final long DEFAULT_REDUCE_TIME = 20*60*1000; + private static final long DEFAULT_CLOSE_TIME = 30*60*1000; + private final static Log _log = new Log(SessionIdleTimer.class); + private I2PAppContext _context; + private I2PSessionImpl _session; + private boolean _reduceEnabled; + private int _reduceQuantity; + private long _reduceTime; + private boolean _shutdownEnabled; + private long _shutdownTime; + private long _minimumTime; + private long _lastActive; + + /** + * reduce, shutdown, or both must be true + */ + public SessionIdleTimer(I2PAppContext context, I2PSessionImpl session, boolean reduce, boolean shutdown) { + _context = context; + _session = session; + _reduceEnabled = reduce; + _shutdownEnabled = shutdown; + if (! (reduce || shutdown)) + throw new IllegalArgumentException("At least one must be enabled"); + Properties props = session.getOptions(); + _minimumTime = Long.MAX_VALUE; + _lastActive = 0; + if (reduce) { + _reduceQuantity = 1; + String p = props.getProperty("i2cp.reduceQuantity"); + if (p != null) { + try { + _reduceQuantity = Math.max(Integer.parseInt(p), 1); + // also check vs. configured quantities? + } catch (NumberFormatException nfe) {} + } + _reduceTime = DEFAULT_REDUCE_TIME; + p = props.getProperty("i2cp.reduceIdleTime"); + if (p != null) { + try { + _reduceTime = Math.max(Long.parseLong(p), MINIMUM_TIME); + } catch (NumberFormatException nfe) {} + } + _minimumTime = _reduceTime; + } + if (shutdown) { + _shutdownTime = DEFAULT_CLOSE_TIME; + String p = props.getProperty("i2cp.closeIdleTime"); + if (p != null) { + try { + _shutdownTime = Math.max(Long.parseLong(p), MINIMUM_TIME); + } catch (NumberFormatException nfe) {} + } + _minimumTime = Math.min(_minimumTime, _shutdownTime); + if (reduce && _shutdownTime <= _reduceTime) + reduce = false; + } + } + + public void timeReached() { + if (_session.isClosed()) + return; + long now = _context.clock().now(); + long lastActivity = _session.lastActivity(); + if (_log.shouldLog(Log.INFO)) + _log.info("Fire idle timer, last activity: " + DataHelper.formatDuration(now - lastActivity) + " ago "); + long nextDelay = 0; + if (_shutdownEnabled && now - lastActivity >= _shutdownTime) { + if (_log.shouldLog(Log.WARN)) + _log.warn("Closing on idle " + _session); + _session.destroySession(); + return; + } else if (lastActivity <= _lastActive && !_shutdownEnabled) { + if (_log.shouldLog(Log.WARN)) + _log.warn("Still idle, sleeping again " + _session); + nextDelay = _reduceTime; + } else if (_reduceEnabled && now - lastActivity >= _reduceTime) { + if (_log.shouldLog(Log.WARN)) + _log.warn("Reducing quantity on idle " + _session); + try { + _session.getProducer().updateTunnels(_session, _reduceQuantity); + } catch (I2PSessionException ise) { + _log.error("bork idle reduction " + ise); + } + _session.setReduced(); + _lastActive = lastActivity; + if (_shutdownEnabled) + nextDelay = _shutdownTime - (now - lastActivity); + else + nextDelay = _reduceTime; + } else { + nextDelay = _minimumTime - (now - lastActivity); + } + SimpleScheduler.getInstance().addEvent(this, nextDelay); + } +} diff --git a/core/java/src/net/i2p/client/naming/HostsTxtNamingService.java b/core/java/src/net/i2p/client/naming/HostsTxtNamingService.java index d4ee7e345..054bd9d8f 100644 --- a/core/java/src/net/i2p/client/naming/HostsTxtNamingService.java +++ b/core/java/src/net/i2p/client/naming/HostsTxtNamingService.java @@ -16,8 +16,10 @@ import java.util.Set; import java.util.StringTokenizer; import net.i2p.I2PAppContext; +import net.i2p.data.DataFormatException; import net.i2p.data.DataHelper; import net.i2p.data.Destination; +import net.i2p.data.Hash; import net.i2p.util.Log; /** @@ -39,6 +41,7 @@ public class HostsTxtNamingService extends NamingService { * given file for hostname=destKey values when resolving names */ public final static String PROP_HOSTS_FILE = "i2p.hostsfilelist"; + public final static String PROP_B32 = "i2p.naming.hostsTxt.useB32"; /** default hosts.txt filename */ public final static String DEFAULT_HOSTS_FILE = @@ -55,6 +58,8 @@ public class HostsTxtNamingService extends NamingService { return rv; } + private static final int BASE32_HASH_LENGTH = 52; // 1 + Hash.HASH_LENGTH * 8 / 5 + @Override public Destination lookup(String hostname) { Destination d = getCache(hostname); @@ -69,6 +74,16 @@ public class HostsTxtNamingService extends NamingService { return d; } + // Try Base32 decoding + if (hostname.length() == BASE32_HASH_LENGTH + 8 && hostname.endsWith(".b32.i2p") && + Boolean.valueOf(_context.getProperty(PROP_B32, "true")).booleanValue()) { + d = LookupDest.lookupBase32Hash(_context, hostname.substring(0, BASE32_HASH_LENGTH)); + if (d != null) { + putCache(hostname, d); + return d; + } + } + List filenames = getFilenames(); for (int i = 0; i < filenames.size(); i++) { String hostsfile = (String)filenames.get(i); @@ -122,4 +137,34 @@ public class HostsTxtNamingService extends NamingService { } return null; } + + @Override + public String reverseLookup(Hash h) { + List filenames = getFilenames(); + for (int i = 0; i < filenames.size(); i++) { + String hostsfile = (String)filenames.get(i); + Properties hosts = new Properties(); + try { + File f = new File(hostsfile); + if ( (f.exists()) && (f.canRead()) ) { + DataHelper.loadProps(hosts, f, true); + Set keyset = hosts.keySet(); + Iterator iter = keyset.iterator(); + while (iter.hasNext()) { + String host = (String)iter.next(); + String key = hosts.getProperty(host); + try { + Destination destkey = new Destination(); + destkey.fromBase64(key); + if (h.equals(destkey.calculateHash())) + return host; + } catch (DataFormatException dfe) {} + } + } + } catch (Exception ioe) { + _log.error("Error loading hosts file " + hostsfile, ioe); + } + } + return null; + } } diff --git a/core/java/src/net/i2p/client/naming/LookupDest.java b/core/java/src/net/i2p/client/naming/LookupDest.java new file mode 100644 index 000000000..775ae6bcc --- /dev/null +++ b/core/java/src/net/i2p/client/naming/LookupDest.java @@ -0,0 +1,72 @@ +/* + * Released into the public domain + * with no warranty of any kind, either expressed or implied. + */ +package net.i2p.client.naming; + +import java.util.Properties; + +import net.i2p.I2PAppContext; +import net.i2p.client.I2PSessionException; +import net.i2p.client.I2PClient; +import net.i2p.client.I2PSession; +import net.i2p.client.I2PSimpleClient; +import net.i2p.data.Base32; +import net.i2p.data.Base64; +import net.i2p.data.Destination; +import net.i2p.data.Hash; +import net.i2p.data.LeaseSet; + +/** + * Connect via I2CP and ask the router to look up + * the lease of a hash, convert it to a Destination and return it. + * Obviously this can take a while. + * + * All calls are blocking and return null on failure. + * Timeout is set to 10 seconds in I2PSimpleSession. + */ +class LookupDest { + + protected LookupDest(I2PAppContext context) {} + + static Destination lookupBase32Hash(I2PAppContext ctx, String key) { + byte[] h = Base32.decode(key); + if (h == null) + return null; + return lookupHash(ctx, h); + } + + /* Might be useful but not in the context of urls due to upper/lower case */ + /**** + static Destination lookupBase64Hash(I2PAppContext ctx, String key) { + byte[] h = Base64.decode(key); + if (h == null) + return null; + return lookupHash(ctx, h); + } + ****/ + + static Destination lookupHash(I2PAppContext ctx, byte[] h) { + Hash key = new Hash(h); + Destination rv = null; + try { + I2PClient client = new I2PSimpleClient(); + Properties opts = new Properties(); + String s = ctx.getProperty(I2PClient.PROP_TCP_HOST); + if (s != null) + opts.put(I2PClient.PROP_TCP_HOST, s); + s = ctx.getProperty(I2PClient.PROP_TCP_PORT); + if (s != null) + opts.put(I2PClient.PROP_TCP_PORT, s); + I2PSession session = client.createSession(null, opts); + session.connect(); + rv = session.lookupDest(key); + session.destroySession(); + } catch (I2PSessionException ise) {} + return rv; + } + + public static void main(String args[]) { + System.out.println(lookupBase32Hash(I2PAppContext.getGlobalContext(), args[0])); + } +} diff --git a/core/java/src/net/i2p/client/naming/NamingService.java b/core/java/src/net/i2p/client/naming/NamingService.java index 5b61b1bcf..ee02ec911 100644 --- a/core/java/src/net/i2p/client/naming/NamingService.java +++ b/core/java/src/net/i2p/client/naming/NamingService.java @@ -16,6 +16,7 @@ import java.util.Map; import net.i2p.I2PAppContext; import net.i2p.data.DataFormatException; import net.i2p.data.Destination; +import net.i2p.data.Hash; import net.i2p.util.Log; /** @@ -61,6 +62,7 @@ public abstract class NamingService { * null if no reverse lookup is possible. */ public abstract String reverseLookup(Destination dest); + public String reverseLookup(Hash h) { return null; }; /** * Check if host name is valid Base64 encoded dest and return this diff --git a/core/java/src/net/i2p/client/package.html b/core/java/src/net/i2p/client/package.html index 2f67c3688..000a5584d 100644 --- a/core/java/src/net/i2p/client/package.html +++ b/core/java/src/net/i2p/client/package.html @@ -15,7 +15,7 @@ receives asynchronous notification of network activity by providing an implement of {@link net.i2p.client.I2PSessionListener}.

      A simple example of how these base client classes can be used is the -{@link net.i2p.client.ATalk} application. It isn't really useful, but it is +ATalk application. It isn't really useful, but it is heavily documented code.

      This client package provides the basic necessity for communicating over I2P, diff --git a/core/java/src/net/i2p/crypto/TransientSessionKeyManager.java b/core/java/src/net/i2p/crypto/TransientSessionKeyManager.java index 1b160f8dd..0d71677a9 100644 --- a/core/java/src/net/i2p/crypto/TransientSessionKeyManager.java +++ b/core/java/src/net/i2p/crypto/TransientSessionKeyManager.java @@ -24,6 +24,7 @@ import net.i2p.data.PublicKey; import net.i2p.data.SessionKey; import net.i2p.data.SessionTag; import net.i2p.util.Log; +import net.i2p.util.SimpleScheduler; import net.i2p.util.SimpleTimer; /** @@ -70,7 +71,7 @@ class TransientSessionKeyManager extends SessionKeyManager { _inboundTagSets = new HashMap(1024); context.statManager().createRateStat("crypto.sessionTagsExpired", "How many tags/sessions are expired?", "Encryption", new long[] { 10*60*1000, 60*60*1000, 3*60*60*1000 }); context.statManager().createRateStat("crypto.sessionTagsRemaining", "How many tags/sessions are remaining after a cleanup?", "Encryption", new long[] { 10*60*1000, 60*60*1000, 3*60*60*1000 }); - SimpleTimer.getInstance().addEvent(new CleanupEvent(), 60*1000); + SimpleScheduler.getInstance().addPeriodicEvent(new CleanupEvent(), 60*1000); } private TransientSessionKeyManager() { this(null); } @@ -80,7 +81,6 @@ class TransientSessionKeyManager extends SessionKeyManager { int expired = aggressiveExpire(); long expireTime = _context.clock().now() - beforeExpire; _context.statManager().addRateData("crypto.sessionTagsExpired", expired, expireTime); - SimpleTimer.getInstance().addEvent(CleanupEvent.this, 60*1000); } } diff --git a/core/java/src/net/i2p/data/Base32.java b/core/java/src/net/i2p/data/Base32.java new file mode 100644 index 000000000..b2cc2d548 --- /dev/null +++ b/core/java/src/net/i2p/data/Base32.java @@ -0,0 +1,245 @@ +package net.i2p.data; + +/* + * Released into the public domain + * with no warranty of any kind, either expressed or implied. + */ + +import java.io.ByteArrayOutputStream; +import java.io.FileInputStream; +import java.io.FileOutputStream; +import java.io.IOException; +import java.io.InputStream; +import java.io.OutputStream; + +import net.i2p.util.Log; + +/** + * Encodes and decodes to and from Base32 notation. + * Ref: RFC 3548 + * + * Don't bother with '=' padding characters on encode or + * accept them on decode (i.e. don't require 5-character groups). + * No whitespace allowed. + * + * Decode accepts upper or lower case. + */ +public class Base32 { + + private final static Log _log = new Log(Base32.class); + + /** The 64 valid Base32 values. */ + private final static char[] ALPHABET = {'a', 'b', 'c', 'd', + 'e', 'f', 'g', 'h', 'i', 'j', + 'k', 'l', 'm', 'n', 'o', 'p', + 'q', 'r', 's', 't', 'u', 'v', + 'w', 'x', 'y', 'z', + '2', '3', '4', '5', '6', '7'}; + + /** + * Translates a Base32 value to either its 5-bit reconstruction value + * or a negative number indicating some other meaning. + * Allow upper or lower case. + **/ + private final static byte[] DECODABET = { + 26, 27, 28, 29, 30, 31, -9, -9, // Numbers two through nine + -9, -9, -9, // Decimal 58 - 60 + -1, // Equals sign at decimal 61 + -9, -9, -9, // Decimal 62 - 64 + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, // Letters 'A' through 'M' + 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, // Letters 'N' through 'Z' + -9, -9, -9, -9, -9, -9, // Decimal 91 - 96 + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, // Letters 'a' through 'm' + 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, // Letters 'n' through 'z' + -9, -9, -9, -9, -9 // Decimal 123 - 127 + }; + + private final static byte BAD_ENCODING = -9; // Indicates error in encoding + private final static byte EQUALS_SIGN_ENC = -1; // Indicates equals sign in encoding + + /** Defeats instantiation. */ + private Base32() { // nop + } + + public static void main(String[] args) { + if (args.length == 0) { + help(); + return; + } + runApp(args); + } + + private static void runApp(String args[]) { + try { + if ("encodestring".equalsIgnoreCase(args[0])) { + System.out.println(encode(args[1].getBytes())); + return; + } + InputStream in = System.in; + OutputStream out = System.out; + if (args.length >= 3) { + out = new FileOutputStream(args[2]); + } + if (args.length >= 2) { + in = new FileInputStream(args[1]); + } + if ("encode".equalsIgnoreCase(args[0])) { + encode(in, out); + return; + } + if ("decode".equalsIgnoreCase(args[0])) { + decode(in, out); + return; + } + } catch (IOException ioe) { + ioe.printStackTrace(System.err); + } + } + + private static byte[] read(InputStream in) throws IOException { + ByteArrayOutputStream baos = new ByteArrayOutputStream(4096); + byte buf[] = new byte[4096]; + while (true) { + int read = in.read(buf); + if (read < 0) break; + baos.write(buf, 0, read); + } + return baos.toByteArray(); + } + + private static void encode(InputStream in, OutputStream out) throws IOException { + String encoded = encode(read(in)); + for (int i = 0; i < encoded.length(); i++) + out.write((byte)(encoded.charAt(i) & 0xFF)); + } + + private static void decode(InputStream in, OutputStream out) throws IOException { + byte decoded[] = decode(new String(read(in))); + if (decoded == null) { + System.out.println("FAIL"); + return; + } + out.write(decoded); + } + + private static void help() { + System.out.println("Syntax: Base32 encode "); + System.out.println("or : Base32 encode "); + System.out.println("or : Base32 encodestring "); + System.out.println("or : Base32 encode"); + System.out.println("or : Base32 decode "); + System.out.println("or : Base32 decode "); + System.out.println("or : Base32 decode"); + } + + public static String encode(String source) { + return (source != null ? encode(source.getBytes()) : ""); + } + + public static String encode(byte[] source) { + StringBuffer buf = new StringBuffer((source.length + 7) * 8 / 5); + encodeBytes(source, buf); + return buf.toString(); + } + + private final static byte[] emask = { (byte) 0x1f, + (byte) 0x01, (byte) 0x03, (byte) 0x07, (byte) 0x0f }; + /** + * Encodes a byte array into Base32 notation. + * + * @param source The data to convert + */ + private static void encodeBytes(byte[] source, StringBuffer out) { + int usedbits = 0; + for (int i = 0; i < source.length; ) { + int fivebits; + if (usedbits < 3) { + fivebits = (source[i] >> (3 - usedbits)) & 0x1f; + usedbits += 5; + } else if (usedbits == 3) { + fivebits = source[i++] & 0x1f; + usedbits = 0; + } else { + fivebits = (source[i++] << (usedbits - 3)) & 0x1f; + if (i < source.length) { + usedbits -= 3; + fivebits |= (source[i] >> (8 - usedbits)) & emask[usedbits]; + } + } + out.append(ALPHABET[fivebits]); + } + } + + /** + * Decodes data from Base32 notation and + * returns it as a string. + * + * @param s the string to decode + * @return The data as a string or null on failure + */ + public static String decodeToString(String s) { + byte[] b = decode(s); + if (b == null) + return null; + return new String(b); + } + + public static byte[] decode(String s) { + return decode(s.getBytes()); + } + + private final static byte[] dmask = { (byte) 0xf8, (byte) 0x7c, (byte) 0x3e, (byte) 0x1f, + (byte) 0x0f, (byte) 0x07, (byte) 0x03, (byte) 0x01 }; + /** + * Decodes Base32 content in byte array format and returns + * the decoded byte array. + * + * @param source The Base32 encoded data + * @return decoded data + */ + private static byte[] decode(byte[] source) { + int len58; + if (source.length <= 1) + len58 = source.length; + else + len58 = source.length * 5 / 8; + byte[] outBuff = new byte[len58]; + int outBuffPosn = 0; + + int usedbits = 0; + for (int i = 0; i < source.length; i++) { + int fivebits; + if ((source[i] & 0x80) != 0 || source[i] < '2' || source[i] > 'z') + fivebits = BAD_ENCODING; + else + fivebits = DECODABET[source[i] - '2']; + + if (fivebits >= 0) { + if (usedbits == 0) { + outBuff[outBuffPosn] = (byte) ((fivebits << 3) & 0xf8); + usedbits = 5; + } else if (usedbits < 3) { + outBuff[outBuffPosn] |= (fivebits << (3 - usedbits)) & dmask[usedbits]; + usedbits += 5; + } else if (usedbits == 3) { + outBuff[outBuffPosn++] |= fivebits; + usedbits = 0; + } else { + outBuff[outBuffPosn++] |= (fivebits >> (usedbits - 3)) & dmask[usedbits]; + byte next = (byte) (fivebits << (11 - usedbits)); + if (outBuffPosn < len58) { + outBuff[outBuffPosn] = next; + usedbits -= 3; + } else if (next != 0) { + _log.warn("Extra data at the end: " + next + "(decimal)"); + return null; + } + } + } else { + _log.warn("Bad Base32 input character at " + i + ": " + source[i] + "(decimal)"); + return null; + } + } + return outBuff; + } +} diff --git a/core/java/src/net/i2p/data/DataHelper.java b/core/java/src/net/i2p/data/DataHelper.java index 835e6a0dd..53e32a347 100644 --- a/core/java/src/net/i2p/data/DataHelper.java +++ b/core/java/src/net/i2p/data/DataHelper.java @@ -25,6 +25,7 @@ import java.io.OutputStream; import java.io.PrintWriter; import java.io.UnsupportedEncodingException; import java.math.BigInteger; +import java.text.DecimalFormat; import java.util.ArrayList; import java.util.Arrays; import java.util.Collection; @@ -235,7 +236,7 @@ public class DataHelper { int split = line.indexOf('='); if (split <= 0) continue; String key = line.substring(0, split); - String val = line.substring(split+1); + String val = line.substring(split+1); //.trim() ?????????????? // Unescape line breaks after loading. // Remember: "\" needs escaping both for regex and string. val = val.replaceAll("\\\\r","\r"); @@ -343,8 +344,9 @@ public class DataHelper { long rv = 0; for (int i = 0; i < numBytes; i++) { - long cur = rawStream.read() & 0xFF; + long cur = rawStream.read(); if (cur == -1) throw new DataFormatException("Not enough bytes for the field"); + cur &= 0xFF; // we loop until we find a nonzero byte (or we reach the end) if (cur != 0) { // ok, data found, now iterate through it to fill the rv @@ -354,9 +356,10 @@ public class DataHelper { cur = cur << shiftAmount; rv += cur; if (j + 1 < remaining) { - cur = rawStream.read() & 0xFF; + cur = rawStream.read(); if (cur == -1) throw new DataFormatException("Not enough bytes for the field"); + cur &= 0xFF; } } break; @@ -842,6 +845,29 @@ public class DataHelper { } } + /** + * Caller should append 'B' or 'b' as appropriate + */ + public static String formatSize(long bytes) { + double val = bytes; + int scale = 0; + while (val >= 1024) { + scale++; + val /= 1024; + } + + DecimalFormat fmt = new DecimalFormat("##0.00"); + + String str = fmt.format(val); + switch (scale) { + case 1: return str + "K"; + case 2: return str + "M"; + case 3: return str + "G"; + case 4: return str + "T"; + default: return bytes + ""; + } + } + /** * Strip out any HTML (simply removing any less than / greater than symbols) */ diff --git a/core/java/src/net/i2p/data/LeaseSet.java b/core/java/src/net/i2p/data/LeaseSet.java index 7dd74a9d7..8a05dd956 100644 --- a/core/java/src/net/i2p/data/LeaseSet.java +++ b/core/java/src/net/i2p/data/LeaseSet.java @@ -9,6 +9,7 @@ package net.i2p.data; * */ +import java.io.ByteArrayInputStream; import java.io.ByteArrayOutputStream; import java.io.IOException; import java.io.InputStream; @@ -17,13 +18,34 @@ import java.util.ArrayList; import java.util.Iterator; import java.util.List; +import net.i2p.I2PAppContext; import net.i2p.crypto.DSAEngine; import net.i2p.util.Clock; import net.i2p.util.Log; +import net.i2p.util.RandomSource; /** * Defines the set of leases a destination currently has. * + * Support encryption and decryption with a supplied key. + * Only the gateways and tunnel IDs in the individual + * leases are encrypted. + * + * Encrypted leases are not indicated as such. + * The only way to tell a lease is encrypted is to + * determine that the listed gateways do not exist. + * Routers wishing to decrypt a leaseset must have the + * desthash and key in their keyring. + * This is required for the local router as well, since + * the encryption is done on the client side of I2CP, the + * router must decrypt it back again for local usage + * (but not for transmission to the floodfills) + * + * Decrypted leases are only available through the getLease() + * method, so that storage and network transmission via + * writeBytes() will output the original encrypted + * leases and the original leaseset signature. + * * @author jrandom */ public class LeaseSet extends DataStructureImpl { @@ -40,6 +62,9 @@ public class LeaseSet extends DataStructureImpl { // Store these since isCurrent() and getEarliestLeaseDate() are called frequently private long _firstExpiration; private long _lastExpiration; + private List _decryptedLeases; + private boolean _decrypted; + private boolean _checked; /** This seems like plenty */ private final static int MAX_LEASES = 6; @@ -55,6 +80,8 @@ public class LeaseSet extends DataStructureImpl { _receivedAsPublished = false; _firstExpiration = Long.MAX_VALUE; _lastExpiration = 0; + _decrypted = false; + _checked = false; } public Destination getDestination() { @@ -104,11 +131,17 @@ public class LeaseSet extends DataStructureImpl { } public int getLeaseCount() { - return _leases.size(); + if (isEncrypted()) + return _leases.size() - 1; + else + return _leases.size(); } public Lease getLease(int index) { - return (Lease) _leases.get(index); + if (isEncrypted()) + return (Lease) _decryptedLeases.get(index); + else + return (Lease) _leases.get(index); } public Signature getSignature() { @@ -335,4 +368,139 @@ public class LeaseSet extends DataStructureImpl { buf.append("]"); return buf.toString(); } + + private static final int DATA_LEN = Hash.HASH_LENGTH + 4; + private static final int IV_LEN = 16; + + /** + * Encrypt the gateway and tunnel ID of each lease, leaving the expire dates unchanged. + * This adds an extra dummy lease, because AES data must be padded to 16 bytes. + * The fact that it is encrypted is not stored anywhere. + * Must be called after all the leases are in place, but before sign(). + */ + public void encrypt(SessionKey key) { + if (_log.shouldLog(Log.WARN)) + _log.warn("encrypting lease: " + _destination.calculateHash()); + try { + encryp(key); + } catch (DataFormatException dfe) { + _log.error("Error encrypting lease: " + _destination.calculateHash()); + } catch (IOException ioe) { + _log.error("Error encrypting lease: " + _destination.calculateHash()); + } + } + + /** + * - Put the {Gateway Hash, TunnelID} pairs for all the leases in a buffer + * - Pad with random data to a multiple of 16 bytes + * - Use the first part of the dest's public key as an IV + * - Encrypt + * - Pad with random data to a multiple of 36 bytes + * - Add an extra lease + * - Replace the Hash and TunnelID in each Lease + */ + private void encryp(SessionKey key) throws DataFormatException, IOException { + int size = _leases.size(); + if (size < 1 || size > MAX_LEASES-1) + throw new IllegalArgumentException("Bad number of leases for encryption"); + int datalen = ((DATA_LEN * size / 16) + 1) * 16; + ByteArrayOutputStream baos = new ByteArrayOutputStream(datalen); + for (int i = 0; i < size; i++) { + ((Lease)_leases.get(i)).getGateway().writeBytes(baos); + ((Lease)_leases.get(i)).getTunnelId().writeBytes(baos); + } + // pad out to multiple of 16 with random data before encryption + int padlen = datalen - (DATA_LEN * size); + byte[] pad = new byte[padlen]; + RandomSource.getInstance().nextBytes(pad); + baos.write(pad); + byte[] iv = new byte[IV_LEN]; + System.arraycopy(_destination.getPublicKey().getData(), 0, iv, 0, IV_LEN); + byte[] enc = new byte[DATA_LEN * (size + 1)]; + I2PAppContext.getGlobalContext().aes().encrypt(baos.toByteArray(), 0, enc, 0, key, iv, datalen); + // pad out to multiple of 36 with random data after encryption + // (even for 4 leases, where 36*4 is a multiple of 16, we add another, just to be consistent) + padlen = enc.length - datalen; + pad = new byte[padlen]; + RandomSource.getInstance().nextBytes(pad); + System.arraycopy(pad, 0, enc, datalen, padlen); + // add the padded lease... + Lease padLease = new Lease(); + padLease.setEndDate(((Lease)_leases.get(0)).getEndDate()); + _leases.add(padLease); + // ...and replace all the gateways and tunnel ids + ByteArrayInputStream bais = new ByteArrayInputStream(enc); + for (int i = 0; i < size+1; i++) { + Hash h = new Hash(); + h.readBytes(bais); + ((Lease)_leases.get(i)).setGateway(h); + TunnelId t = new TunnelId(); + t.readBytes(bais); + ((Lease)_leases.get(i)).setTunnelId(t); + } + } + + /** + * Decrypt the leases, except for the last one which is partially padding. + * Store the new decrypted leases in a backing store, + * and keep the original leases so that verify() still works and the + * encrypted leaseset can be sent on to others (via writeBytes()) + */ + private void decrypt(SessionKey key) throws DataFormatException, IOException { + if (_log.shouldLog(Log.WARN)) + _log.warn("decrypting lease: " + _destination.calculateHash()); + int size = _leases.size(); + if (size < 2) + throw new DataFormatException("Bad number of leases for decryption"); + int datalen = DATA_LEN * size; + ByteArrayOutputStream baos = new ByteArrayOutputStream(datalen); + for (int i = 0; i < size; i++) { + ((Lease)_leases.get(i)).getGateway().writeBytes(baos); + ((Lease)_leases.get(i)).getTunnelId().writeBytes(baos); + } + byte[] iv = new byte[IV_LEN]; + System.arraycopy(_destination.getPublicKey().getData(), 0, iv, 0, IV_LEN); + int enclen = ((DATA_LEN * (size - 1) / 16) + 1) * 16; + byte[] enc = new byte[enclen]; + System.arraycopy(baos.toByteArray(), 0, enc, 0, enclen); + byte[] dec = new byte[enclen]; + I2PAppContext.getGlobalContext().aes().decrypt(enc, 0, dec, 0, key, iv, enclen); + ByteArrayInputStream bais = new ByteArrayInputStream(dec); + _decryptedLeases = new ArrayList(size - 1); + for (int i = 0; i < size-1; i++) { + Lease l = new Lease(); + Hash h = new Hash(); + h.readBytes(bais); + l.setGateway(h); + TunnelId t = new TunnelId(); + t.readBytes(bais); + l.setTunnelId(t); + l.setEndDate(((Lease)_leases.get(i)).getEndDate()); + _decryptedLeases.add(l); + } + } + + /** + * @return true if it was encrypted, and we decrypted it successfully. + * Decrypts on first call. + */ + private synchronized boolean isEncrypted() { + if (_decrypted) + return true; + if (_checked || _destination == null) + return false; + SessionKey key = I2PAppContext.getGlobalContext().keyRing().get(_destination.calculateHash()); + if (key != null) { + try { + decrypt(key); + _decrypted = true; + } catch (DataFormatException dfe) { + _log.error("Error decrypting lease: " + _destination.calculateHash() + dfe); + } catch (IOException ioe) { + _log.error("Error decrypting lease: " + _destination.calculateHash() + ioe); + } + } + _checked = true; + return _decrypted; + } } diff --git a/core/java/src/net/i2p/data/PrivateKeyFile.java b/core/java/src/net/i2p/data/PrivateKeyFile.java index 7680204d1..b5d68ee41 100644 --- a/core/java/src/net/i2p/data/PrivateKeyFile.java +++ b/core/java/src/net/i2p/data/PrivateKeyFile.java @@ -77,74 +77,25 @@ public class PrivateKeyFile { verifySignature(d); if (args.length == 1) return; - Certificate c = new Certificate(); if (args[0].equals("-n")) { // Cert constructor generates a null cert + pkf.setCertType(Certificate.CERTIFICATE_TYPE_NULL); } else if (args[0].equals("-u")) { - c.setCertificateType(99); + pkf.setCertType(99); } else if (args[0].equals("-x")) { - c.setCertificateType(Certificate.CERTIFICATE_TYPE_HIDDEN); + pkf.setCertType(Certificate.CERTIFICATE_TYPE_HIDDEN); } else if (args[0].equals("-h")) { int hashEffort = HASH_EFFORT; if (args.length == 3) hashEffort = Integer.parseInt(args[1]); System.out.println("Estimating hashcash generation time, stand by..."); - // takes a lot longer than the estimate usually... - // maybe because the resource string is much longer than used in the estimate? - long low = HashCash.estimateTime(hashEffort); - System.out.println("It is estimated this will take " + DataHelper.formatDuration(low) + - " to " + DataHelper.formatDuration(4*low)); - - long begin = System.currentTimeMillis(); - System.out.println("Starting hashcash generation now..."); - String resource = d.getPublicKey().toBase64() + d.getSigningPublicKey().toBase64(); - HashCash hc = HashCash.mintCash(resource, hashEffort); - System.out.println("Generation took: " + DataHelper.formatDuration(System.currentTimeMillis() - begin)); - System.out.println("Full Hashcash is: " + hc); - // Take the resource out of the stamp - String hcs = hc.toString(); - int end1 = 0; - for (int i = 0; i < 3; i++) { - end1 = 1 + hcs.indexOf(':', end1); - if (end1 < 0) { - System.out.println("Bad hashcash"); - return; - } - } - int start2 = hcs.indexOf(':', end1); - if (start2 < 0) { - System.out.println("Bad hashcash"); - return; - } - hcs = hcs.substring(0, end1) + hcs.substring(start2); - System.out.println("Short Hashcash is: " + hcs); - - c.setCertificateType(Certificate.CERTIFICATE_TYPE_HASHCASH); - c.setPayload(hcs.getBytes()); + System.out.println(estimateHashCashTime(hashEffort)); + pkf.setHashCashCert(hashEffort); } else if (args.length == 3 && args[0].equals("-s")) { // Sign dest1 with dest2's Signing Private Key - File f2 = new File(args[2]); - I2PClient client2 = I2PClientFactory.createClient(); - PrivateKeyFile pkf2 = new PrivateKeyFile(f2, client2); - Destination d2 = pkf2.getDestination(); - SigningPrivateKey spk2 = pkf2.getSigningPrivKey(); - System.out.println("Signing With Dest:"); - System.out.println(pkf2.toString()); - - int len = PublicKey.KEYSIZE_BYTES + SigningPublicKey.KEYSIZE_BYTES; // no cert - byte[] data = new byte[len]; - System.arraycopy(d.getPublicKey().getData(), 0, data, 0, PublicKey.KEYSIZE_BYTES); - System.arraycopy(d.getSigningPublicKey().getData(), 0, data, PublicKey.KEYSIZE_BYTES, SigningPublicKey.KEYSIZE_BYTES); - byte[] payload = new byte[Hash.HASH_LENGTH + Signature.SIGNATURE_BYTES]; - byte[] sig = DSAEngine.getInstance().sign(new ByteArrayInputStream(data), spk2).getData(); - System.arraycopy(sig, 0, payload, 0, Signature.SIGNATURE_BYTES); - // Add dest2's Hash for reference - byte[] h2 = d2.calculateHash().getData(); - System.arraycopy(h2, 0, payload, Signature.SIGNATURE_BYTES, Hash.HASH_LENGTH); - c.setCertificateType(Certificate.CERTIFICATE_TYPE_SIGNED); - c.setPayload(payload); + PrivateKeyFile pkf2 = new PrivateKeyFile(args[2]); + pkf.setSignedCert(pkf2); } - d.setCertificate(c); // do this rather than just change the existing cert so the hash is recalculated System.out.println("New signed destination is:"); System.out.println(pkf); pkf.write(); @@ -154,7 +105,10 @@ public class PrivateKeyFile { } } - + public PrivateKeyFile(String file) { + this(new File(file), I2PClientFactory.createClient()); + } + public PrivateKeyFile(File file, I2PClient client) { this.file = file; this.client = client; @@ -176,7 +130,7 @@ public class PrivateKeyFile { return getDestination(); } - /** Also sets the local privKay and signingPrivKey */ + /** Also sets the local privKey and signingPrivKey */ public Destination getDestination() throws I2PSessionException, IOException, DataFormatException { if (dest == null) { I2PSession s = open(); @@ -188,6 +142,86 @@ public class PrivateKeyFile { } return this.dest; } + + public void setDestination(Destination d) { + this.dest = d; + } + + /** change cert type - caller must also call write() */ + public Certificate setCertType(int t) { + if (this.dest == null) + throw new IllegalArgumentException("Dest is null"); + Certificate c = new Certificate(); + c.setCertificateType(t); + this.dest.setCertificate(c); + return c; + } + + /** change to hashcash cert - caller must also call write() */ + public Certificate setHashCashCert(int effort) { + Certificate c = setCertType(Certificate.CERTIFICATE_TYPE_HASHCASH); + long begin = System.currentTimeMillis(); + System.out.println("Starting hashcash generation now..."); + String resource = this.dest.getPublicKey().toBase64() + this.dest.getSigningPublicKey().toBase64(); + HashCash hc; + try { + hc = HashCash.mintCash(resource, effort); + } catch (Exception e) { + return null; + } + System.out.println("Generation took: " + DataHelper.formatDuration(System.currentTimeMillis() - begin)); + System.out.println("Full Hashcash is: " + hc); + // Take the resource out of the stamp + String hcs = hc.toString(); + int end1 = 0; + for (int i = 0; i < 3; i++) { + end1 = 1 + hcs.indexOf(':', end1); + if (end1 < 0) { + System.out.println("Bad hashcash"); + return null; + } + } + int start2 = hcs.indexOf(':', end1); + if (start2 < 0) { + System.out.println("Bad hashcash"); + return null; + } + hcs = hcs.substring(0, end1) + hcs.substring(start2); + System.out.println("Short Hashcash is: " + hcs); + + c.setPayload(hcs.getBytes()); + return c; + } + + /** sign this dest by dest found in pkf2 - caller must also call write() */ + public Certificate setSignedCert(PrivateKeyFile pkf2) { + Certificate c = setCertType(Certificate.CERTIFICATE_TYPE_SIGNED); + Destination d2; + try { + d2 = pkf2.getDestination(); + } catch (Exception e) { + return null; + } + if (d2 == null) + return null; + SigningPrivateKey spk2 = pkf2.getSigningPrivKey(); + System.out.println("Signing With Dest:"); + System.out.println(pkf2.toString()); + + int len = PublicKey.KEYSIZE_BYTES + SigningPublicKey.KEYSIZE_BYTES; // no cert + byte[] data = new byte[len]; + System.arraycopy(this.dest.getPublicKey().getData(), 0, data, 0, PublicKey.KEYSIZE_BYTES); + System.arraycopy(this.dest.getSigningPublicKey().getData(), 0, data, PublicKey.KEYSIZE_BYTES, SigningPublicKey.KEYSIZE_BYTES); + byte[] payload = new byte[Hash.HASH_LENGTH + Signature.SIGNATURE_BYTES]; + byte[] sig = DSAEngine.getInstance().sign(new ByteArrayInputStream(data), spk2).getData(); + System.arraycopy(sig, 0, payload, 0, Signature.SIGNATURE_BYTES); + // Add dest2's Hash for reference + byte[] h2 = d2.calculateHash().getData(); + System.arraycopy(h2, 0, payload, Signature.SIGNATURE_BYTES, Hash.HASH_LENGTH); + c.setCertificateType(Certificate.CERTIFICATE_TYPE_SIGNED); + c.setPayload(payload); + return c; + } public PrivateKey getPrivKey() { return this.privKey; @@ -227,7 +261,7 @@ public class PrivateKeyFile { public String toString() { StringBuffer s = new StringBuffer(128); s.append("Dest: "); - s.append(this.dest.toBase64()); + s.append(this.dest != null ? this.dest.toBase64() : "null"); s.append("\nContains: "); s.append(this.dest); s.append("\nPrivate Key: "); @@ -238,7 +272,25 @@ public class PrivateKeyFile { return s.toString(); } - + public static String estimateHashCashTime(int hashEffort) { + if (hashEffort <= 0 || hashEffort > 160) + return "Bad HashCash value: " + hashEffort; + long low = Long.MAX_VALUE; + try { + low = HashCash.estimateTime(hashEffort); + } catch (Exception e) {} + // takes a lot longer than the estimate usually... + // maybe because the resource string is much longer than used in the estimate? + return "It is estimated that generating a HashCash Certificate with value " + hashEffort + + " for the Destination will take " + + ((low < 1000l * 24l * 60l * 60l * 1000l) + ? + "approximately " + DataHelper.formatDuration(low) + + " to " + DataHelper.formatDuration(4*low) + : + "longer than three years!" + ); + } /** * Sample code to verify a 3rd party signature. diff --git a/core/java/src/net/i2p/data/i2cp/DestLookupMessage.java b/core/java/src/net/i2p/data/i2cp/DestLookupMessage.java new file mode 100644 index 000000000..13135a2a4 --- /dev/null +++ b/core/java/src/net/i2p/data/i2cp/DestLookupMessage.java @@ -0,0 +1,76 @@ +package net.i2p.data.i2cp; + +/* + * Released into the public domain + * with no warranty of any kind, either expressed or implied. + */ + +import java.io.ByteArrayOutputStream; +import java.io.IOException; +import java.io.InputStream; + +import net.i2p.data.DataFormatException; +import net.i2p.data.DataHelper; +import net.i2p.data.Hash; + +/** + * Request the router look up the dest for a hash + */ +public class DestLookupMessage extends I2CPMessageImpl { + public final static int MESSAGE_TYPE = 34; + private Hash _hash; + + public DestLookupMessage() { + super(); + } + + public DestLookupMessage(Hash h) { + _hash = h; + } + + public Hash getHash() { + return _hash; + } + + protected void doReadMessage(InputStream in, int size) throws I2CPMessageException, IOException { + Hash h = new Hash(); + try { + h.readBytes(in); + } catch (DataFormatException dfe) { + throw new I2CPMessageException("Unable to load the hash", dfe); + } + _hash = h; + } + + protected byte[] doWriteMessage() throws I2CPMessageException, IOException { + if (_hash == null) + throw new I2CPMessageException("Unable to write out the message as there is not enough data"); + ByteArrayOutputStream os = new ByteArrayOutputStream(Hash.HASH_LENGTH); + try { + _hash.writeBytes(os); + } catch (DataFormatException dfe) { + throw new I2CPMessageException("Error writing out the hash", dfe); + } + return os.toByteArray(); + } + + public int getType() { + return MESSAGE_TYPE; + } + + public boolean equals(Object object) { + if ((object != null) && (object instanceof DestLookupMessage)) { + DestLookupMessage msg = (DestLookupMessage) object; + return DataHelper.eq(getHash(), msg.getHash()); + } + return false; + } + + public String toString() { + StringBuffer buf = new StringBuffer(); + buf.append("[DestLookupMessage: "); + buf.append("\n\tHash: ").append(_hash); + buf.append("]"); + return buf.toString(); + } +} diff --git a/core/java/src/net/i2p/data/i2cp/DestReplyMessage.java b/core/java/src/net/i2p/data/i2cp/DestReplyMessage.java new file mode 100644 index 000000000..1ed601dc2 --- /dev/null +++ b/core/java/src/net/i2p/data/i2cp/DestReplyMessage.java @@ -0,0 +1,78 @@ +package net.i2p.data.i2cp; + +/* + * Released into the public domain + * with no warranty of any kind, either expressed or implied. + * + */ + +import java.io.ByteArrayOutputStream; +import java.io.IOException; +import java.io.InputStream; + +import net.i2p.data.DataFormatException; +import net.i2p.data.DataHelper; +import net.i2p.data.Destination; + +/** + * Response to DestLookupMessage + * + */ +public class DestReplyMessage extends I2CPMessageImpl { + public final static int MESSAGE_TYPE = 35; + private Destination _dest; + + public DestReplyMessage() { + super(); + } + + public DestReplyMessage(Destination d) { + _dest = d; + } + + public Destination getDestination() { + return _dest; + } + + protected void doReadMessage(InputStream in, int size) throws I2CPMessageException, IOException { + try { + Destination d = new Destination(); + d.readBytes(in); + _dest = d; + } catch (DataFormatException dfe) { + _dest = null; // null dest allowed + } + } + + protected byte[] doWriteMessage() throws I2CPMessageException, IOException { + if (_dest == null) + return new byte[0]; // null response allowed + ByteArrayOutputStream os = new ByteArrayOutputStream(_dest.size()); + try { + _dest.writeBytes(os); + } catch (DataFormatException dfe) { + throw new I2CPMessageException("Error writing out the dest", dfe); + } + return os.toByteArray(); + } + + public int getType() { + return MESSAGE_TYPE; + } + + public boolean equals(Object object) { + if ((object != null) && (object instanceof DestReplyMessage)) { + DestReplyMessage msg = (DestReplyMessage) object; + return DataHelper.eq(getDestination(), msg.getDestination()); + } + return false; + } + + public String toString() { + StringBuffer buf = new StringBuffer(); + buf.append("[DestReplyMessage: "); + buf.append("\n\tDestination: ").append(_dest); + buf.append("]"); + return buf.toString(); + } +} diff --git a/core/java/src/net/i2p/data/i2cp/I2CPMessageHandler.java b/core/java/src/net/i2p/data/i2cp/I2CPMessageHandler.java index 481d26f0e..61d865053 100644 --- a/core/java/src/net/i2p/data/i2cp/I2CPMessageHandler.java +++ b/core/java/src/net/i2p/data/i2cp/I2CPMessageHandler.java @@ -18,7 +18,7 @@ import net.i2p.data.DataHelper; import net.i2p.util.Log; /** - * Handle messages from the server for the client + * Handle messages from the server for the client or vice versa * */ public class I2CPMessageHandler { @@ -34,8 +34,13 @@ public class I2CPMessageHandler { * message - if it is an unknown type or has improper formatting, etc. */ public static I2CPMessage readMessage(InputStream in) throws IOException, I2CPMessageException { + int length = -1; + try { + length = (int) DataHelper.readLong(in, 4); + } catch (DataFormatException dfe) { + throw new IOException("Connection closed"); + } try { - int length = (int) DataHelper.readLong(in, 4); if (length < 0) throw new I2CPMessageException("Invalid message length specified"); int type = (int) DataHelper.readLong(in, 1); I2CPMessage msg = createMessage(in, length, type); @@ -69,18 +74,26 @@ public class I2CPMessageHandler { return new ReceiveMessageBeginMessage(); case ReceiveMessageEndMessage.MESSAGE_TYPE: return new ReceiveMessageEndMessage(); + case ReconfigureSessionMessage.MESSAGE_TYPE: + return new ReconfigureSessionMessage(); case ReportAbuseMessage.MESSAGE_TYPE: return new ReportAbuseMessage(); case RequestLeaseSetMessage.MESSAGE_TYPE: return new RequestLeaseSetMessage(); case SendMessageMessage.MESSAGE_TYPE: return new SendMessageMessage(); + case SendMessageExpiresMessage.MESSAGE_TYPE: + return new SendMessageExpiresMessage(); case SessionStatusMessage.MESSAGE_TYPE: return new SessionStatusMessage(); case GetDateMessage.MESSAGE_TYPE: return new GetDateMessage(); case SetDateMessage.MESSAGE_TYPE: return new SetDateMessage(); + case DestLookupMessage.MESSAGE_TYPE: + return new DestLookupMessage(); + case DestReplyMessage.MESSAGE_TYPE: + return new DestReplyMessage(); default: throw new I2CPMessageException("The type " + type + " is an unknown I2CP message"); } @@ -94,4 +107,4 @@ public class I2CPMessageHandler { e.printStackTrace(); } } -} \ No newline at end of file +} diff --git a/core/java/src/net/i2p/data/i2cp/I2CPMessageReader.java b/core/java/src/net/i2p/data/i2cp/I2CPMessageReader.java index 53650ec19..13b01a67a 100644 --- a/core/java/src/net/i2p/data/i2cp/I2CPMessageReader.java +++ b/core/java/src/net/i2p/data/i2cp/I2CPMessageReader.java @@ -134,9 +134,11 @@ public class I2CPMessageReader { public void cancelRunner() { _doRun = false; _stayAlive = false; - if (_stream != null) { + // prevent race NPE + InputStream in = _stream; + if (in != null) { try { - _stream.close(); + in.close(); } catch (IOException ioe) { _log.error("Error closing the stream", ioe); } @@ -164,6 +166,7 @@ public class I2CPMessageReader { _listener.disconnected(I2CPMessageReader.this); cancelRunner(); } catch (OutOfMemoryError oom) { + // ooms seen here... maybe log and keep going? throw oom; } catch (Exception e) { _log.log(Log.CRIT, "Unhandled error reading I2CP stream", e); @@ -182,4 +185,4 @@ public class I2CPMessageReader { // boom bye bye bad bwoy } } -} \ No newline at end of file +} diff --git a/core/java/src/net/i2p/data/i2cp/MessagePayloadMessage.java b/core/java/src/net/i2p/data/i2cp/MessagePayloadMessage.java index 1e77f3636..5dc4a5d71 100644 --- a/core/java/src/net/i2p/data/i2cp/MessagePayloadMessage.java +++ b/core/java/src/net/i2p/data/i2cp/MessagePayloadMessage.java @@ -19,8 +19,7 @@ import net.i2p.data.Payload; import net.i2p.util.Log; /** - * Defines the message a client sends to a router to ask it to deliver - * a new message + * Defines the payload message a router sends to the client * * @author jrandom */ diff --git a/core/java/src/net/i2p/data/i2cp/ReconfigureSessionMessage.java b/core/java/src/net/i2p/data/i2cp/ReconfigureSessionMessage.java new file mode 100644 index 000000000..7165f6d32 --- /dev/null +++ b/core/java/src/net/i2p/data/i2cp/ReconfigureSessionMessage.java @@ -0,0 +1,103 @@ +package net.i2p.data.i2cp; + +/* + * free (adj.): unencumbered; not under the control of others + * Written by jrandom in 2003 and released into the public domain + * with no warranty of any kind, either expressed or implied. + * It probably won't make your computer catch on fire, or eat + * your children, but it might. Use at your own risk. + * + */ + +import java.io.ByteArrayOutputStream; +import java.io.IOException; +import java.io.InputStream; + +import net.i2p.data.DataFormatException; +import net.i2p.data.DataHelper; +import net.i2p.util.Log; + +/** + * Defines the message a client sends to a router when + * updating the config on an existing session. + * + * @author zzz + */ +public class ReconfigureSessionMessage extends I2CPMessageImpl { + private final static Log _log = new Log(ReconfigureSessionMessage.class); + public final static int MESSAGE_TYPE = 2; + private SessionId _sessionId; + private SessionConfig _sessionConfig; + + public ReconfigureSessionMessage() { + _sessionId = null; + _sessionConfig = null; + } + + public SessionId getSessionId() { + return _sessionId; + } + + public void setSessionId(SessionId id) { + _sessionId = id; + } + + public SessionConfig getSessionConfig() { + return _sessionConfig; + } + + public void setSessionConfig(SessionConfig config) { + _sessionConfig = config; + } + + @Override + protected void doReadMessage(InputStream in, int size) throws I2CPMessageException, IOException { + try { + _sessionId = new SessionId(); + _sessionId.readBytes(in); + _sessionConfig = new SessionConfig(); + _sessionConfig.readBytes(in); + } catch (DataFormatException dfe) { + throw new I2CPMessageException("Unable to load the message data", dfe); + } + } + + @Override + protected byte[] doWriteMessage() throws I2CPMessageException, IOException { + if (_sessionId == null || _sessionConfig == null) + throw new I2CPMessageException("Unable to write out the message as there is not enough data"); + ByteArrayOutputStream os = new ByteArrayOutputStream(64); + try { + _sessionId.writeBytes(os); + _sessionConfig.writeBytes(os); + } catch (DataFormatException dfe) { + throw new I2CPMessageException("Error writing out the message data", dfe); + } + return os.toByteArray(); + } + + public int getType() { + return MESSAGE_TYPE; + } + + @Override + public boolean equals(Object object) { + if ((object != null) && (object instanceof ReconfigureSessionMessage)) { + ReconfigureSessionMessage msg = (ReconfigureSessionMessage) object; + return DataHelper.eq(getSessionId(), msg.getSessionId()) + && DataHelper.eq(getSessionConfig(), msg.getSessionConfig()); + } + + return false; + } + + @Override + public String toString() { + StringBuffer buf = new StringBuffer(); + buf.append("[ReconfigureSessionMessage: "); + buf.append("\n\tSessionId: ").append(getSessionId()); + buf.append("\n\tSessionConfig: ").append(getSessionConfig()); + buf.append("]"); + return buf.toString(); + } +} diff --git a/core/java/src/net/i2p/data/i2cp/RequestLeaseSetMessage.java b/core/java/src/net/i2p/data/i2cp/RequestLeaseSetMessage.java index 2cd630db6..b5fca013d 100644 --- a/core/java/src/net/i2p/data/i2cp/RequestLeaseSetMessage.java +++ b/core/java/src/net/i2p/data/i2cp/RequestLeaseSetMessage.java @@ -156,7 +156,7 @@ public class RequestLeaseSetMessage extends I2CPMessageImpl { return buf.toString(); } - private class TunnelEndpoint { + private static class TunnelEndpoint { private Hash _router; private TunnelId _tunnelId; @@ -186,4 +186,4 @@ public class RequestLeaseSetMessage extends I2CPMessageImpl { _tunnelId = tunnelId; } } -} \ No newline at end of file +} diff --git a/core/java/src/net/i2p/data/i2cp/SendMessageExpiresMessage.java b/core/java/src/net/i2p/data/i2cp/SendMessageExpiresMessage.java new file mode 100644 index 000000000..d15c1979c --- /dev/null +++ b/core/java/src/net/i2p/data/i2cp/SendMessageExpiresMessage.java @@ -0,0 +1,117 @@ +package net.i2p.data.i2cp; + +/* + * free (adj.): unencumbered; not under the control of others + * Written by jrandom in 2003 and released into the public domain + * with no warranty of any kind, either expressed or implied. + * It probably won't make your computer catch on fire, or eat + * your children, but it might. Use at your own risk. + * + */ + +import java.io.IOException; +import java.io.InputStream; +import java.io.OutputStream; +import java.util.Date; + +import net.i2p.data.DataFormatException; +import net.i2p.data.DataHelper; +import net.i2p.data.Destination; +import net.i2p.data.Payload; +import net.i2p.util.Log; + +/** + * Same as SendMessageMessage, but with an expiration to be passed to the router + * + * @author zzz + */ +public class SendMessageExpiresMessage extends SendMessageMessage { + private final static Log _log = new Log(SendMessageExpiresMessage.class); + public final static int MESSAGE_TYPE = 36; + private SessionId _sessionId; + private Destination _destination; + private Payload _payload; + private Date _expiration; + + public SendMessageExpiresMessage() { + super(); + setExpiration(null); + } + + public Date getExpiration() { + return _expiration; + } + + public void setExpiration(Date d) { + _expiration = d; + } + + /** + * Read the body into the data structures + * + * @throws IOException + */ + @Override + public void readMessage(InputStream in, int length, int type) throws I2CPMessageException, IOException { + super.readMessage(in, length, type); + + try { + _expiration = DataHelper.readDate(in); + } catch (DataFormatException dfe) { + throw new I2CPMessageException("Unable to load the message data", dfe); + } + } + + /** + * Write out the full message to the stream, including the 4 byte size and 1 + * byte type header. Override the parent so we can be more mem efficient + * + * @throws IOException + */ + @Override + public void writeMessage(OutputStream out) throws I2CPMessageException, IOException { + if ((getSessionId() == null) || (getDestination() == null) || (getPayload() == null) || (getNonce() <= 0) || (_expiration == null)) + throw new I2CPMessageException("Unable to write out the message as there is not enough data"); + int len = 2 + getDestination().size() + getPayload().getSize() + 4 + 4 + DataHelper.DATE_LENGTH; + + try { + DataHelper.writeLong(out, 4, len); + DataHelper.writeLong(out, 1, getType()); + getSessionId().writeBytes(out); + getDestination().writeBytes(out); + getPayload().writeBytes(out); + DataHelper.writeLong(out, 4, getNonce()); + DataHelper.writeDate(out, _expiration); + } catch (DataFormatException dfe) { + throw new I2CPMessageException("Error writing the msg", dfe); + } + } + + public int getType() { + return MESSAGE_TYPE; + } + + @Override + public boolean equals(Object object) { + if ((object != null) && (object instanceof SendMessageExpiresMessage)) { + SendMessageExpiresMessage msg = (SendMessageExpiresMessage) object; + return super.equals(object) + && DataHelper.eq(getExpiration(), msg.getExpiration()); + } + + return false; + } + + @Override + public String toString() { + StringBuffer buf = new StringBuffer(); + buf.append("[SendMessageMessage: "); + buf.append("\n\tSessionId: ").append(getSessionId()); + buf.append("\n\tNonce: ").append(getNonce()); + buf.append("\n\tDestination: ").append(getDestination()); + buf.append("\n\tExpiration: ").append(getExpiration()); + buf.append("\n\tPayload: ").append(getPayload()); + buf.append("]"); + return buf.toString(); + } +} diff --git a/core/java/src/net/i2p/stat/StatManager.java b/core/java/src/net/i2p/stat/StatManager.java index ffb7d6c52..56af55f71 100644 --- a/core/java/src/net/i2p/stat/StatManager.java +++ b/core/java/src/net/i2p/stat/StatManager.java @@ -1,5 +1,6 @@ package net.i2p.stat; +import java.text.Collator; import java.util.Collections; import java.util.HashMap; import java.util.HashSet; @@ -45,12 +46,12 @@ public class StatManager { public static final String DEFAULT_STAT_REQUIRED = "bw.recvRate,bw.sendBps,bw.sendRate,client.sendAckTime,clock.skew,crypto.elGamal.encrypt," + "jobQueue.jobLag,netDb.successTime,router.fastPeers," + - "prng.bufferFillTime,prng.bufferWaitTime," + + "prng.bufferFillTime,prng.bufferWaitTime,router.memoryUsed," + "transport.receiveMessageSize,transport.sendMessageSize,transport.sendProcessingTime," + "tunnel.acceptLoad,tunnel.buildRequestTime,tunnel.rejectOverloaded,tunnel.rejectTimeout" + "tunnel.buildClientExpire,tunnel.buildClientReject,tunnel.buildClientSuccess," + "tunnel.buildExploratoryExpire,tunnel.buildExploratoryReject,tunnel.buildExploratorySuccess," + - "tunnel.buildRatio.*,tunnel.corruptMessage," + + "tunnel.buildRatio.*,tunnel.corruptMessage,tunnel.dropLoad*," + "tunnel.decryptRequestTime,tunnel.fragmentedDropped,tunnel.participatingMessageCount,"+ "tunnel.participatingTunnels,tunnel.testFailedTime,tunnel.testSuccessTime," + "tunnel.participatingBandwidth,udp.sendPacketSize,udp.packetsRetransmitted" ; @@ -178,7 +179,7 @@ public class StatManager { /** Group name (String) to a Set of stat names, ordered alphabetically */ public Map getStatsByGroup() { - Map groups = new TreeMap(); + Map groups = new TreeMap(Collator.getInstance()); for (Iterator iter = _frequencyStats.values().iterator(); iter.hasNext();) { FrequencyStat stat = (FrequencyStat) iter.next(); if (!groups.containsKey(stat.getGroupName())) groups.put(stat.getGroupName(), new TreeSet()); diff --git a/core/java/src/net/i2p/util/ByteCache.java b/core/java/src/net/i2p/util/ByteCache.java index aadc721aa..4bd3da6ef 100644 --- a/core/java/src/net/i2p/util/ByteCache.java +++ b/core/java/src/net/i2p/util/ByteCache.java @@ -55,7 +55,7 @@ public final class ByteCache { _maxCached = maxCachedEntries; _entrySize = entrySize; _lastOverflow = -1; - SimpleTimer.getInstance().addEvent(new Cleanup(), CLEANUP_FREQUENCY); + SimpleScheduler.getInstance().addPeriodicEvent(new Cleanup(), CLEANUP_FREQUENCY); _log = I2PAppContext.getGlobalContext().logManager().getLog(ByteCache.class); } @@ -120,7 +120,6 @@ public final class ByteCache { _log.debug("Removing " + toRemove + " cached entries of size " + _entrySize); } } - SimpleTimer.getInstance().addEvent(Cleanup.this, CLEANUP_FREQUENCY); } } } diff --git a/core/java/src/net/i2p/util/Clock.java b/core/java/src/net/i2p/util/Clock.java index 66d20721c..87cf9d639 100644 --- a/core/java/src/net/i2p/util/Clock.java +++ b/core/java/src/net/i2p/util/Clock.java @@ -28,7 +28,7 @@ public class Clock implements Timestamper.UpdateListener { _context = context; _offset = 0; _alreadyChanged = false; - _listeners = new HashSet(64); + _listeners = new HashSet(1); _timestamper = new Timestamper(context, this); _startedOn = System.currentTimeMillis(); _statCreated = false; @@ -149,4 +149,4 @@ public class Clock implements Timestamper.UpdateListener { public static interface ClockUpdateListener { public void offsetChanged(long delta); } -} \ No newline at end of file +} diff --git a/core/java/src/net/i2p/util/ConcurrentHashSet.java b/core/java/src/net/i2p/util/ConcurrentHashSet.java new file mode 100644 index 000000000..2db9e195e --- /dev/null +++ b/core/java/src/net/i2p/util/ConcurrentHashSet.java @@ -0,0 +1,60 @@ +package net.i2p.util; + +import java.util.AbstractSet; +import java.util.Collection; +import java.util.Iterator; +import java.util.Map; +import java.util.Set; +import java.util.concurrent.ConcurrentHashMap; + +/** + * Implement on top of a ConcurrentHashMap with a dummy value. + * + * @author zzz + */ +public class ConcurrentHashSet extends AbstractSet implements Set { + private static final Object DUMMY = new Object(); + private Map _map; + + public ConcurrentHashSet() { + _map = new ConcurrentHashMap(); + } + public ConcurrentHashSet(int capacity) { + _map = new ConcurrentHashMap(capacity); + } + + public boolean add(E o) { + return _map.put(o, DUMMY) == null; + } + + public void clear() { + _map.clear(); + } + + public boolean contains(Object o) { + return _map.containsKey(o); + } + + public boolean isEmpty() { + return _map.isEmpty(); + } + + public boolean remove(Object o) { + return _map.remove(o) != null; + } + + public int size() { + return _map.size(); + } + + public Iterator iterator() { + return _map.keySet().iterator(); + } + + public boolean addAll(Collection c) { + boolean rv = false; + for (E e : c) + rv |= _map.put(e, DUMMY) == null; + return rv; + } +} diff --git a/core/java/src/net/i2p/util/ConvertToHash.java b/core/java/src/net/i2p/util/ConvertToHash.java new file mode 100644 index 000000000..087855640 --- /dev/null +++ b/core/java/src/net/i2p/util/ConvertToHash.java @@ -0,0 +1,76 @@ +package net.i2p.util; + +import net.i2p.I2PAppContext; +import net.i2p.data.Base32; +import net.i2p.data.DataFormatException; +import net.i2p.data.Destination; +import net.i2p.data.Hash; + +/** + * Convert any kind of destination String to a hash + * Supported: + * Base64 dest + * Base64 dest.i2p + * Base64 Hash + * Base32 Hash + * Base32 desthash.b32.i2p + * example.i2p + * + * @return null on failure + * + * @author zzz + */ +public class ConvertToHash { + + public static Hash getHash(String peer) { + if (peer == null) + return null; + Hash h = new Hash(); + String peerLC = peer.toLowerCase(); + // b64 hash + if (peer.length() == 44 && !peerLC.endsWith(".i2p")) { + try { + h.fromBase64(peer); + } catch (DataFormatException dfe) {} + } + // b64 dest.i2p + if (h.getData() == null && peer.length() >= 520 && peerLC.endsWith(".i2p")) { + try { + Destination d = new Destination(); + d.fromBase64(peer.substring(0, peer.length() - 4)); + h = d.calculateHash(); + } catch (DataFormatException dfe) {} + } + // b64 dest + if (h.getData() == null && peer.length() >= 516 && !peerLC.endsWith(".i2p")) { + try { + Destination d = new Destination(); + d.fromBase64(peer); + h = d.calculateHash(); + } catch (DataFormatException dfe) {} + } + // b32 hash.b32.i2p + // do this here rather than in naming service so it will work + // even if the leaseset is not found + if (h.getData() == null && peer.length() == 60 && peerLC.endsWith(".b32.i2p")) { + byte[] b = Base32.decode(peer.substring(0, 52)); + if (b != null && b.length == Hash.HASH_LENGTH) + h.setData(b); + } + // b32 hash + if (h.getData() == null && peer.length() == 52 && !peerLC.endsWith(".i2p")) { + byte[] b = Base32.decode(peer); + if (b != null && b.length == Hash.HASH_LENGTH) + h.setData(b); + } + // example.i2p + if (h.getData() == null) { + Destination d = I2PAppContext.getGlobalContext().namingService().lookup(peer); + if (d != null) + h = d.calculateHash(); + } + if (h.getData() == null) + return null; + return h; + } +} diff --git a/core/java/src/net/i2p/util/I2PAppThread.java b/core/java/src/net/i2p/util/I2PAppThread.java index fd7256578..a4e12c8b1 100644 --- a/core/java/src/net/i2p/util/I2PAppThread.java +++ b/core/java/src/net/i2p/util/I2PAppThread.java @@ -40,7 +40,7 @@ public class I2PAppThread extends I2PThread { super(r, name); } public I2PAppThread(Runnable r, String name, boolean isDaemon) { - super(r, name); + super(r, name, isDaemon); } protected void fireOOM(OutOfMemoryError oom) { diff --git a/core/java/src/net/i2p/util/KeyRing.java b/core/java/src/net/i2p/util/KeyRing.java new file mode 100644 index 000000000..6bbfb38de --- /dev/null +++ b/core/java/src/net/i2p/util/KeyRing.java @@ -0,0 +1,20 @@ +package net.i2p.util; + +import java.io.IOException; +import java.io.Writer; + +import java.util.concurrent.ConcurrentHashMap; + +import net.i2p.data.Hash; +import net.i2p.data.SessionKey; + +/** + * simple + */ +public class KeyRing extends ConcurrentHashMap { + public KeyRing() { + super(0); + } + + public void renderStatusHTML(Writer out) throws IOException {} +} diff --git a/core/java/src/net/i2p/util/NativeBigInteger.java b/core/java/src/net/i2p/util/NativeBigInteger.java index 7a64e24e4..970de52c8 100644 --- a/core/java/src/net/i2p/util/NativeBigInteger.java +++ b/core/java/src/net/i2p/util/NativeBigInteger.java @@ -23,6 +23,9 @@ import freenet.support.CPUInformation.CPUInfo; import freenet.support.CPUInformation.IntelCPUInfo; import freenet.support.CPUInformation.UnknownCPUException; +import net.i2p.I2PAppContext; +import net.i2p.util.Log; + /** *

      BigInteger that takes advantage of the jbigi library for the modPow operation, * which accounts for a massive segment of the processing cost of asymmetric @@ -89,6 +92,9 @@ public class NativeBigInteger extends BigInteger { * do we want to dump some basic success/failure info to stderr during * initialization? this would otherwise use the Log component, but this makes * it easier for other systems to reuse this class + * + * Well, we really want to use Log so if you are one of those "other systems" + * then comment out the I2PAppContext usage below. */ private static final boolean _doLog = System.getProperty("jbigi.dontLog") == null; @@ -401,38 +407,32 @@ public class NativeBigInteger extends BigInteger { boolean loaded = loadGeneric("jbigi"); if (loaded) { _nativeOk = true; - if (_doLog) - System.err.println("INFO: Locally optimized native BigInteger loaded from the library path"); + info("Locally optimized native BigInteger library loaded from the library path"); } else { loaded = loadFromResource("jbigi"); if (loaded) { _nativeOk = true; - if (_doLog) - System.err.println("INFO: Locally optimized native BigInteger loaded from resource"); + info("Locally optimized native BigInteger library loaded from resource"); } else { loaded = loadFromResource(true); if (loaded) { _nativeOk = true; - if (_doLog) - System.err.println("INFO: Optimized native BigInteger library '"+getResourceName(true)+"' loaded from resource"); + info("Optimized native BigInteger library '"+getResourceName(true)+"' loaded from resource"); } else { loaded = loadGeneric(true); if (loaded) { _nativeOk = true; - if (_doLog) - System.err.println("INFO: Optimized native BigInteger library '"+getMiddleName(true)+"' loaded from somewhere in the path"); + info("Optimized native BigInteger library '"+getMiddleName(true)+"' loaded from somewhere in the path"); } else { loaded = loadFromResource(false); if (loaded) { _nativeOk = true; - if (_doLog) - System.err.println("INFO: Non-optimized native BigInteger library '"+getResourceName(false)+"' loaded from resource"); + info("Non-optimized native BigInteger library '"+getResourceName(false)+"' loaded from resource"); } else { loaded = loadGeneric(false); if (loaded) { _nativeOk = true; - if (_doLog) - System.err.println("INFO: Non-optimized native BigInteger library '"+getMiddleName(false)+"' loaded from somewhere in the path"); + info("Non-optimized native BigInteger library '"+getMiddleName(false)+"' loaded from somewhere in the path"); } else { _nativeOk = false; } @@ -442,16 +442,27 @@ public class NativeBigInteger extends BigInteger { } } } - if (_doLog && !_nativeOk) - System.err.println("INFO: Native BigInteger library jbigi not loaded - using pure java"); + if (!_nativeOk) { + warn("Native BigInteger library jbigi not loaded - using pure Java - " + + "poor performance may result - see http://www.i2p2.i2p/jbigi.html for help"); + } }catch(Exception e){ - if (_doLog) { - System.err.println("INFO: Native BigInteger library jbigi not loaded, reason: '"+e.getMessage()+"' - using pure java"); - e.printStackTrace(); - } + warn("Native BigInteger library jbigi not loaded, reason: '"+e.getMessage()+"' - using pure java"); } } + private static void info(String s) { + if(_doLog) + System.err.println("INFO: " + s); + I2PAppContext.getGlobalContext().logManager().getLog(NativeBigInteger.class).info(s); + } + + private static void warn(String s) { + if(_doLog) + System.err.println("WARNING: " + s); + I2PAppContext.getGlobalContext().logManager().getLog(NativeBigInteger.class).warn(s); + } + /** *

      Try loading it from an explictly build jbigi.dll / libjbigi.so first, before * looking into a jbigi.jar for any other libraries.

      diff --git a/core/java/src/net/i2p/util/SimpleScheduler.java b/core/java/src/net/i2p/util/SimpleScheduler.java new file mode 100644 index 000000000..becf10099 --- /dev/null +++ b/core/java/src/net/i2p/util/SimpleScheduler.java @@ -0,0 +1,164 @@ +package net.i2p.util; + +import java.util.concurrent.Executors; +import java.util.concurrent.ScheduledFuture; +import java.util.concurrent.ScheduledThreadPoolExecutor; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.ThreadFactory; + +import net.i2p.I2PAppContext; + +/** + * Simple event scheduler - toss an event on the queue and it gets fired at the + * appropriate time. The method that is fired however should NOT block (otherwise + * they b0rk the timer). + * + * This is like SimpleTimer but addEvent() for an existing event adds a second + * job. Unlike SimpleTimer, events cannot be cancelled or rescheduled. + * + * For events that cannot or will not be cancelled or rescheduled - + * for example, a call such as: + * SimpleTimer.getInstance().addEvent(new FooEvent(bar), timeoutMs); + * use SimpleScheduler instead to reduce lock contention in SimpleTimer... + * + * For periodic events, use addPeriodicEvent(). Unlike SimpleTimer, + * uncaught Exceptions will not prevent subsequent executions. + * + * @author zzz + */ +public class SimpleScheduler { + private static final SimpleScheduler _instance = new SimpleScheduler(); + public static SimpleScheduler getInstance() { return _instance; } + private static final int THREADS = 4; + private I2PAppContext _context; + private Log _log; + private ScheduledThreadPoolExecutor _executor; + private String _name; + private int _count; + + protected SimpleScheduler() { this("SimpleScheduler"); } + protected SimpleScheduler(String name) { + _context = I2PAppContext.getGlobalContext(); + _log = _context.logManager().getLog(SimpleScheduler.class); + _name = name; + _count = 0; + _executor = new ScheduledThreadPoolExecutor(THREADS, new CustomThreadFactory()); + } + + /** + * Removes the SimpleScheduler. + */ + public void stop() { + _executor.shutdownNow(); + } + + /** + * Queue up the given event to be fired no sooner than timeoutMs from now. + * + * @param event + * @param timeoutMs + */ + public void addEvent(SimpleTimer.TimedEvent event, long timeoutMs) { + if (event == null) + throw new IllegalArgumentException("addEvent null"); + RunnableEvent re = new RunnableEvent(event, timeoutMs); + re.schedule(); + } + + public void addPeriodicEvent(SimpleTimer.TimedEvent event, long timeoutMs) { + addPeriodicEvent(event, timeoutMs, timeoutMs); + } + + /** + * Queue up the given event to be fired after initialDelay and every + * timeoutMs thereafter. The TimedEvent must not do its own rescheduling. + * As all Exceptions are caught in run(), these will not prevent + * subsequent executions (unlike SimpleTimer, where the TimedEvent does + * its own rescheduling) + * + * @param event + * @param initialDelay (ms) + * @param timeoutMs + */ + public void addPeriodicEvent(SimpleTimer.TimedEvent event, long initialDelay, long timeoutMs) { + if (event == null) + throw new IllegalArgumentException("addEvent null"); + RunnableEvent re = new PeriodicRunnableEvent(event, initialDelay, timeoutMs); + re.schedule(); + } + + private class CustomThreadFactory implements ThreadFactory { + public Thread newThread(Runnable r) { + Thread rv = Executors.defaultThreadFactory().newThread(r); + rv.setName(_name + ' ' + (++_count) + '/' + THREADS); + rv.setDaemon(true); + return rv; + } + } + + /** + * Same as SimpleTimer.TimedEvent but use run() instead of timeReached(), and remembers the time + */ + private class RunnableEvent implements Runnable { + protected SimpleTimer.TimedEvent _timedEvent; + protected long _scheduled; + + public RunnableEvent(SimpleTimer.TimedEvent t, long timeoutMs) { + if (_log.shouldLog(Log.DEBUG)) + _log.debug("Creating w/ delay " + timeoutMs + " : " + t); + _timedEvent = t; + _scheduled = timeoutMs + System.currentTimeMillis(); + } + public void schedule() { + _executor.schedule(this, _scheduled - System.currentTimeMillis(), TimeUnit.MILLISECONDS); + } + public void run() { + if (_log.shouldLog(Log.DEBUG)) + _log.debug("Running: " + _timedEvent); + long before = System.currentTimeMillis(); + if (_log.shouldLog(Log.WARN) && before < _scheduled - 100) + _log.warn(_name + " wtf, early execution " + (_scheduled - before) + ": " + _timedEvent); + else if (_log.shouldLog(Log.WARN) && before > _scheduled + 1000) + _log.warn(" wtf, late execution " + (before - _scheduled) + ": " + _timedEvent + debug()); + try { + _timedEvent.timeReached(); + } catch (Throwable t) { + _log.log(Log.CRIT, _name + " wtf, event borked: " + _timedEvent, t); + } + long time = System.currentTimeMillis() - before; + if (time > 1000 && _log.shouldLog(Log.WARN)) + _log.warn(_name + " wtf, event execution took " + time + ": " + _timedEvent); + long completed = _executor.getCompletedTaskCount(); + if (_log.shouldLog(Log.INFO) && completed % 250 == 0) + _log.info(debug()); + } + } + + /** Run every timeoutMs. TimedEvent must not do its own reschedule via addEvent() */ + private class PeriodicRunnableEvent extends RunnableEvent { + private long _timeoutMs; + private long _initialDelay; + public PeriodicRunnableEvent(SimpleTimer.TimedEvent t, long initialDelay, long timeoutMs) { + super(t, timeoutMs); + _initialDelay = initialDelay; + _timeoutMs = timeoutMs; + _scheduled = initialDelay + System.currentTimeMillis(); + } + public void schedule() { + _executor.scheduleWithFixedDelay(this, _initialDelay, _timeoutMs, TimeUnit.MILLISECONDS); + } + public void run() { + super.run(); + _scheduled = _timeoutMs + System.currentTimeMillis(); + } + } + + private String debug() { + return + " Pool: " + _name + + " Active: " + _executor.getActiveCount() + '/' + _executor.getPoolSize() + + " Completed: " + _executor.getCompletedTaskCount() + + " Queued: " + _executor.getQueue().size(); + } +} + diff --git a/core/java/src/net/i2p/util/SimpleTimer2.java b/core/java/src/net/i2p/util/SimpleTimer2.java new file mode 100644 index 000000000..6239ed42f --- /dev/null +++ b/core/java/src/net/i2p/util/SimpleTimer2.java @@ -0,0 +1,252 @@ +package net.i2p.util; + +import java.util.concurrent.Executors; +import java.util.concurrent.ScheduledFuture; +import java.util.concurrent.ScheduledThreadPoolExecutor; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.ThreadFactory; +import java.util.Map; + +import net.i2p.I2PAppContext; + +/** + * Simple event scheduler - toss an event on the queue and it gets fired at the + * appropriate time. The method that is fired however should NOT block (otherwise + * they b0rk the timer). + * + * This rewrites the old SimpleTimer to use the java.util.concurrent.ScheduledThreadPoolExecutor. + * SimpleTimer has problems with lock contention; + * this should work a lot better. + * + * This supports cancelling and arbitrary rescheduling. + * If you don't need that, use SimpleScheduler instead. + * + * SimpleTimer is deprecated, use this or SimpleScheduler. + * + * @author zzz + */ +public class SimpleTimer2 { + private static final SimpleTimer2 _instance = new SimpleTimer2(); + public static SimpleTimer2 getInstance() { return _instance; } + private static final int THREADS = 4; + private I2PAppContext _context; + private static Log _log; // static so TimedEvent can use it + private ScheduledThreadPoolExecutor _executor; + private String _name; + private int _count; + + protected SimpleTimer2() { this("SimpleTimer2"); } + protected SimpleTimer2(String name) { + _context = I2PAppContext.getGlobalContext(); + _log = _context.logManager().getLog(SimpleTimer2.class); + _name = name; + _count = 0; + _executor = new CustomScheduledThreadPoolExecutor(THREADS, new CustomThreadFactory()); + } + + /** + * Removes the SimpleTimer. + */ + public void stop() { + _executor.shutdownNow(); + } + + private class CustomScheduledThreadPoolExecutor extends ScheduledThreadPoolExecutor { + public CustomScheduledThreadPoolExecutor(int threads, ThreadFactory factory) { + super(threads, factory); + } + + protected void afterExecute(Runnable r, Throwable t) { + super.afterExecute(r, t); + if (t != null) // shoudn't happen, caught in RunnableEvent.run() + _log.log(Log.CRIT, "wtf, event borked: " + r, t); + } + } + + private class CustomThreadFactory implements ThreadFactory { + public Thread newThread(Runnable r) { + Thread rv = Executors.defaultThreadFactory().newThread(r); + rv.setName(_name + ' ' + (++_count) + '/' + THREADS); + rv.setDaemon(true); + return rv; + } + } + + private ScheduledFuture schedule(TimedEvent t, long timeoutMs) { + return _executor.schedule(t, timeoutMs, TimeUnit.MILLISECONDS); + } + + /** + * Similar to SimpleTimer.TimedEvent but users must extend instead of implement, + * and all schedule and cancel methods are through this class rather than SimpleTimer2. + * + * To convert over, change implements SimpleTimer.TimedEvent to extends SimpleTimer2.TimedEvent, + * and be sure to call super(SimpleTimer2.getInstance(), timeoutMs) in the constructor + * (or super(SimpleTimer2.getInstance()); .... schedule(timeoutMs); if there is other stuff + * in your constructor) + * + * Other porting: + * SimpleTimer.getInstance().addEvent(new foo(), timeout) => new foo(SimpleTimer2.getInstance(), timeout) + * SimpleTimer.getInstance().addEvent(this, timeout) => schedule(timeout) + * SimpleTimer.getInstance().addEvent(foo, timeout) => foo.reschedule(timeout) + * SimpleTimer.getInstance().removeEvent(foo) => foo.cancel() + * + * There's no global locking, but for scheduling, we synchronize on this + * to reduce the chance of duplicates on the queue. + * + * schedule(ms) can get create duplicates + * reschedule(ms) and reschedule(ms, true) can lose the timer + * reschedule(ms, false) and forceReschedule(ms) are relatively safe from either + * + */ + public static abstract class TimedEvent implements Runnable { + private SimpleTimer2 _pool; + private int _fuzz; + protected static final int DEFAULT_FUZZ = 3; + private ScheduledFuture _future; // _executor.remove() doesn't work so we have to use this + // ... and I expect cancelling this way is more efficient + + /** must call schedule() later */ + public TimedEvent(SimpleTimer2 pool) { + _pool = pool; + _fuzz = DEFAULT_FUZZ; + } + /** automatically schedules, don't use this one if you have other things to do first */ + public TimedEvent(SimpleTimer2 pool, long timeoutMs) { + this(pool); + schedule(timeoutMs); + } + + /** + * Don't bother rescheduling if +/- this many ms or less. + * Use this to reduce timer queue and object churn for a sloppy timer like + * an inactivity timer. + * Default 3 ms. + */ + public void setFuzz(int fuzz) { + _fuzz = fuzz; + } + + /** + * More efficient than reschedule(). + * Only call this after calling the non-scheduling constructor, + * or from within timeReached(), or you will get duplicates on the queue. + * Otherwise use reschedule(). + */ + public synchronized void schedule(long timeoutMs) { + if (_log.shouldLog(Log.DEBUG)) + _log.debug("Scheduling: " + this + " timeout = " + timeoutMs); + if (timeoutMs <= 0 && _log.shouldLog(Log.WARN)) + timeoutMs = 1; // otherwise we may execute before _future is updated, which is fine + // except it triggers 'early execution' warning logging + _future = _pool.schedule(this, timeoutMs); + } + + /** + * Use the earliest of the new time and the old time + * Do not call from within timeReached() + * + * @param timeoutMs + */ + public void reschedule(long timeoutMs) { + reschedule(timeoutMs, true); + } + + /** + * useEarliestTime must be false if called from within timeReached(), as + * it won't be rescheduled, in favor of the currently running task + * + * @param timeoutMs + * @param useEarliestTime if its already scheduled, use the earlier of the + * two timeouts, else use the later + */ + public synchronized void reschedule(long timeoutMs, boolean useEarliestTime) { + long oldTimeout; + boolean scheduled = _future != null && !_future.isDone(); + if (scheduled) + oldTimeout = _future.getDelay(TimeUnit.MILLISECONDS); + else + oldTimeout = timeoutMs; + // don't bother rescheduling if within _fuzz ms + if ((oldTimeout - _fuzz > timeoutMs && useEarliestTime) || + (oldTimeout + _fuzz < timeoutMs && !useEarliestTime)|| + (!scheduled)) { + if (scheduled) { + if (_log.shouldLog(Log.INFO)) + _log.info("Re-scheduling: " + this + " timeout = " + timeoutMs + " old timeout was " + oldTimeout); + cancel(); + } + schedule(timeoutMs); + } + } + + /** + * Always use the new time - ignores fuzz + * @param timeoutMs + */ + public synchronized void forceReschedule(long timeoutMs) { + cancel(); + schedule(timeoutMs); + } + + /** returns true if cancelled */ + public synchronized boolean cancel() { + if (_future == null) + return false; + return _future.cancel(false); + } + + public void run() { + if (_log.shouldLog(Log.DEBUG)) + _log.debug("Running: " + this); + long before = System.currentTimeMillis(); + long delay = 0; + if (_future != null) + delay = _future.getDelay(TimeUnit.MILLISECONDS); + else if (_log.shouldLog(Log.WARN)) + _log.warn(_pool + " wtf, no _future " + this); + // This can be an incorrect warning especially after a schedule(0) + if (_log.shouldLog(Log.WARN) && delay > 100) + _log.warn(_pool + " wtf, early execution " + delay + ": " + this); + else if (_log.shouldLog(Log.WARN) && delay < -1000) + _log.warn(" wtf, late execution " + delay + ": " + this + _pool.debug()); + try { + timeReached(); + } catch (Throwable t) { + _log.log(Log.CRIT, _pool + " wtf, event borked: " + this, t); + } + long time = System.currentTimeMillis() - before; + if (time > 500 && _log.shouldLog(Log.WARN)) + _log.warn(_pool + " wtf, event execution took " + time + ": " + this); + long completed = _pool.getCompletedTaskCount(); + if (_log.shouldLog(Log.INFO) && completed % 250 == 0) + _log.info(_pool.debug()); + } + + /** + * Simple interface for events to be queued up and notified on expiration + * the time requested has been reached (this call should NOT block, + * otherwise the whole SimpleTimer gets backed up) + * + */ + public abstract void timeReached(); + } + + public String toString() { + return _name; + } + + private long getCompletedTaskCount() { + return _executor.getCompletedTaskCount(); + } + + private String debug() { + _executor.purge(); // Remove cancelled tasks from the queue so we get a good queue size stat + return + " Pool: " + _name + + " Active: " + _executor.getActiveCount() + '/' + _executor.getPoolSize() + + " Completed: " + _executor.getCompletedTaskCount() + + " Queued: " + _executor.getQueue().size(); + } +} + diff --git a/debian/changelog b/debian/changelog new file mode 100644 index 000000000..bb17d98ae --- /dev/null +++ b/debian/changelog @@ -0,0 +1,4 @@ +i2p (0.7-0) testing; urgency=low + * just setting this debian thing up + um... + -- dream Wed, 01 Jan 2009 17:14:57 +0000 diff --git a/debian/control b/debian/control new file mode 100644 index 000000000..5c9d9f112 --- /dev/null +++ b/debian/control @@ -0,0 +1,24 @@ +Source: i2p +Maintainer: jrandom +Section: net +Priority: optional +Homepage: http://dev.i2p2.de +Build-Depends: java-sdk, ant +Recommends: libgmp3c2 +Version: 0.7-0 +Tags: implemented-in::java, interface::daemon, network::client, network::server, role::program, security::cryptography + +Package: i2p +Architecture: all +Section: net +Priority: optional +Depends: java-runtime +Recommends: libgmp3c2 +Description: load-balanced unspoofable packet switching network + I2P is an anonymizing network, offering a simple layer that identity-sensitive + applications can use to securely communicate. All data is wrapped with several + layers of encryption, and the network is both distributed and dynamic, with no + trusted parties. +Homepage: http://www.i2p2.de +Version: 0.7-0 +Tags: implemented-in::java, interface::daemon, network::client, network::server, role::program, security::cryptography diff --git a/debian/copyright b/debian/copyright new file mode 100644 index 000000000..0b434e175 --- /dev/null +++ b/debian/copyright @@ -0,0 +1,8 @@ +everything is released according to the +terms of the I2P license policy. For the I2P SDK, +that means everything contained within this +module is released into the public domain unless +otherwise marked. Alternate licenses that may be +used include BSD (used by thecrypto's DSA, ElGamal, +and SHA256 implementations), Cryptix (used by cryptix's +AES implementation), and MIT. diff --git a/debian/rules b/debian/rules new file mode 100755 index 000000000..6f8aa2c9b --- /dev/null +++ b/debian/rules @@ -0,0 +1,20 @@ +#!/usr/bin/make -f + +build: + ant preppkg && \ + (cd pkg-temp; chmod +x postinstall.sh) && \ + mkdir -p debian/tmp/var/lib && \ + mkdir -p debian/tmp/etc/init.d && \ + cp -a debian/scripts/init debian/tmp/etc/init.d/i2p && \ + cp -a pkg-temp debian/tmp/var/lib/i2p && \ + touch debian/build +binary: build + mkdir -p debian/tmp/DEBIAN && \ + dpkg-gencontrol && \ + cp -a debian/scripts/postinst debian/scripts/postrm debian/scripts/prerm debian/tmp/DEBIAN && \ + dpkg-deb -b debian/tmp .. +clean: + rm -f debian/build + ant clean + rm -Rf pkg-temp + @exit 0 diff --git a/debian/scripts/init b/debian/scripts/init new file mode 100755 index 000000000..5d04ac8fe --- /dev/null +++ b/debian/scripts/init @@ -0,0 +1,54 @@ +#! /bin/sh + +### BEGIN INIT INFO +# Provides: i2p +# Required-Start: $remote_fs $syslog +# Required-Stop: $remote_fs $syslog +# Default-Start: +# Default-Stop: 1 2 3 4 5 +# Short-Description: I2P anonymizing mixnet +### END INIT INFO + +set -e + +. /lib/lsb/init-functions + +function I2P { + su i2p -c "/var/lib/i2p/i2prouter $1" +} + +case "$1" in + start) + log_daemon_msg "Starting I2P" "i2p" + if I2P start; then + log_end_msg 0 + else + log_end_msg 1 + fi + ;; + stop) + log_daemon_msg "Stopping I2P" "i2p" + if I2P stop; then + log_end_msg 0 + else + log_end_msg 1 + fi + ;; + restart) + log_daemon_msg "Restarting I2P" "i2p" + if I2P restart; then + log_end_msg 0 + else + log_end_msg 1 + fi + ;; + status) + I2P status + ;; + + *) + log_action_msg "Usage: /etc/init.d/i2p {start|stop|restart|status}" + exit 1 +esac + +exit 0 diff --git a/debian/scripts/postinst b/debian/scripts/postinst new file mode 100755 index 000000000..a1f03b83a --- /dev/null +++ b/debian/scripts/postinst @@ -0,0 +1,9 @@ +#!/bin/sh +TOP=/var/lib/i2p +useradd -b $TOP -r i2p 2>/dev/null +chown i2p $TOP -R + +update-rc.d + +cd $TOP +exec su i2p -c ./postinstall.sh diff --git a/debian/scripts/postrm b/debian/scripts/postrm new file mode 100755 index 000000000..633f3a9f4 --- /dev/null +++ b/debian/scripts/postrm @@ -0,0 +1,2 @@ +#!/bin/sh +exec userdel i2p diff --git a/debian/scripts/prerm b/debian/scripts/prerm new file mode 100755 index 000000000..cd32c3754 --- /dev/null +++ b/debian/scripts/prerm @@ -0,0 +1,2 @@ +#!/bin/sh +exec /etc/init.d/i2p stop diff --git a/history.txt b/history.txt index 8a9ef5791..e76d41055 100644 --- a/history.txt +++ b/history.txt @@ -1,3 +1,309 @@ +* 2009-03-29 0.7.1 released + +2009-03-29 Complication + * Update versions, package release + +2009-03-27 zzz + * Add readme_fr.html + * License splash update + * Catch rare TunnelGatewayMessage AIOOB, root cause unknown + +2009-03-24 zzz + * I2PTunnel: + - Add some warnings about new features + - Fix encrypted leasesets broken in about -4 + - Suppress log error on manual stop + - Fix NPE on close of a tunnel not open yet + * Transport: + - Increase default bw to 64/32, burst 80/40 + * Tunnels: Change some fragmentation errors to warns + +2009-03-16 zzz + * help.jsp: Add some + * I2PTunnel: Cleanup + * I2PTunnelHTTPClient: Fix NPE on delayed open + * I2PTunnelHTTPServer: Maybe catch an NPE + * SOCKS: Allow .onion addresses for onioncat testing + * Tunnel: Catch a rare AIOOB + +2009-03-09 zzz + * Client: + - Clean up retry code + - Bring I2CP listen error to the summary bar + http://forum.i2p/viewtopic.php?t=3133 + * I2PSnark: Remove the http from the add torrent box + * I2PTunnel: + - Add persistent key option for standard and IRC clients + - Add delay-open option for clients + - Get regenerate-dest-on-reconnect working + - Add default key file name + - Add link to addressbook + - I2PSink: Send protocol byte + * OCMOSJ: + - Change from 5% reply requests to at least + once per minute, in hopes of reducing IRC drops + - More clean up of the cache cleaning + * Routerconsole: Don't OOM configpeer.jsp on huge blocklists + +2009-02-26 zzz + * I2CP Client: Add support for muxing + * I2PTunnel: + - Add new IRCServer tunnel type + - Add SOCKS 4/4a support + - Catch OOMs in HTTPServer + - Name the IRCClient filter threads + - Port Streamr to I2PTunnel + - The beginnings of SOCKS UDP support + * Naming: Add reverse lookup by hash + * OCMOSJ: Clean up the cache cleaning + * Router: Move addShutdownTask from Router to I2PAppContext + so that apps can register more easily + * Routerconsole: + - Thread hard shutdown and restart requests from the routerconsole, + and add a delay even if no tunnels, to allow time for a UI response + - Sort the summary bar destinations + - Move dest-to-hash converter to new helper class so we can + use it in i2ptunnel + +2009-02-22 sponge + * BOB: Orphan tunnel issue fix, bump BOB version + * bump to Build 6 + +2009-02-16 zzz + * Streaming lib: Plug timer leak, don't send keepalives + after close, don't disconnect hard after close + +2009-02-15 zzz + * Add licenses to all packages + * I2PSession: Concurrent _messagesReceived + * i2psnark: tmp file removal try #3 + * I2PTunnel: + - Don't buffer POST data in HTTPClient + - Display destination even when stopped + - Enable key generation, dest modification, and + hashcash estimation in the GUI + - Add new CONNECT client + * NetDb: Enforce 60s minimum leaseset publish interval + * Streaming lib: + - Plug connection leak + - Move ConEvent from SimpleTimer to SimpleScheduler + - Move RetransmissionTimer (ResendPacketEvent) + from SimpleTimer to new SimpleTimer2 + - Move ActivityTimer and Flusher from SimpleTimer to RetransmissionTimer + - SimpleTimer2 allows specifying "fuzz" to reduce + timer queue churn further + * Susidns: Fix save of new dest broken in 0.7 + * TunnelPool: + - Allow leasesets with reduced leases for robustness and startup speed + - Plug in-progress build leak + +2009-02-07 zzz + * ClientConnectionRunner, Shitlist, TunnelDispatcher: + Update using concurrent + * Streaming ConnectionHandler: Bound SYN queue and + use concurrent to prevent blowup + * HTTP Proxy: Fix error msg for b32 addresses + * I2CP: Implement optional reduce tunnels on idle - not hooked + in to i2ptunnel GUI yet - still needs tweaks + * I2CP MessageReader: Prevent rare NPE + * I2CP Writer: Rewrite using concurrent + * i2psnark: Add torrent and connection count + * I2PTunnel & I2CP: + - Fix tunnel reduction/restore, hook in the GUI + - Hook leaseset encryption into the GUI + - Implement saves for all the new stuff + - Add cancel button + - Add b32 display for non-http servers + - Prep for CONNECT + - Fix error msg when connection goes away + * NetDb: Remove all DataPublisher stuff + * Wrapper: Remove dup timeout + +2009-02-02 sponge + * Final? cleanups to Slackbuilds. + * ant target for Slackbuilds. + +2009-02-01 sponge + * Slackbuild files... if we can have them for Debian, why not :-) + +2009-02-01 zzz + * Convert some inner classes to static (findbugs) + * DataHelper.readLong(): Was returning -1 on EOF instead + of throwing exception + * i2psnark: Increase tunnels and pipeline to 3 + * NTCP: Use a java.util.concurrent execution queue instead of + SimpleTimer for afterSend() to reduce lock contention + * Remove source from susimail.war, susidns.war, i2ptunnel.war (85KB) + * Routerconsole: + - Move common methods to new HelperBase class + - Make reseed link a button + * SimpleScheduler: New replacement for SimpleTimer when events + will not be rescheduled or cancelled, to reduce SimpleTimer + lock contention + * Tunnel Pool: + - Remove tunnel from participating if can't contact next hop + - Fail outbound build faster if can't contact first hop + * Wrapper: Remove dup timeout + +2009-01-31 dream + * Debian files + +2009-01-31 sponge + * One line BOB discarded interger fix + (not that it mattered at this point) + +2009-01-25 zzz + * Build files: + - Don't bundle unneeded XML parser xercesImpl.jar (1MB) + - Don't include unneeded stuff in Copy, Delete, Exec.jar (300KB) + * I2CP: + Implement new I2CP message ReconfigureSessionMessage. + Will be used for tunnel reduction. + * I2PTunnel Edit Pages: + - Change default length to 2+0 + - Cleanup helper code + - Prevent null spoofhost + - Stub out the following new options (C=client, S=server): + + Access list (S) + + Certificate type (S) + + Encrypted LeaseSet (S) + + New dest on idle restart (C) + + Tunnel closure on idle (C) + + Tunnel reduction on idle (C,S) + * I2PTunnel Socks: + - Add support for SOCKS to GUI + - Don't NPE on SOCKS 4, just close + - Don't have SOCKS build a new dest for every request + - Beginnings of SOCKS configuration by port + - HTML error msg for attempted HTTP access + * LeaseSet: Add encrypt/decrypt methods + * netdb.jsp: Don't show stats by default + * OCMOSJ: Bundle a reply when we switch tunnel or lease, + to detect failure sooner + * PublishLocalRouterInfoJob: + - Delay for 5m at startup + - Run every 20m (was 7.5m) + * RebuildRouterInfoJob: Don't run it + * Router: Add a keyring for decrypting leases + * Routerconsole: Add configkeyring.jsp + * SummaryHelper.getTransferred() move to DataHelper, + rename to formatSize(), use on tunnels.jsp + * Streaming, I2CP, Client Message sending: + Pass message timeout through new I2CP message + SendMessageExpiresMessage, so that the router + uses the same expiration as the streaming lib. + Should help reliability. + * Streaming: TCB control block sharing + +* 2009-01-24 0.7 released + +2009-01-24 Complication + * Update versions, package release + +2009-01-17 zzz + * NTCP: Prevent two NTCP Pumpers + +2009-01-14 zzz + * config.jsp: Fix burst seconds display + * HTTPClient: Fix per-tunnel settings for i2cp.gzip and + i2ptunnel.httpclient.send* (thx tino) + * i2psnark: + - Fix double completion message + - Add crstrack + * initialNews.xml: Add .de (thx echelon) + * Message: Always distribute an inbound msg back out + a tunnel to foil a possible latency-measuring attack + (welterde) + * Naming: + - Change base32 names to *.b32.i2p + - Add i2p.naming.hostsTxt.useB32 config + * profiles.jsp: Remove 1m column + * SAM: Don't build tests by default + * Streaming: + - Prevent a rare NPE + - Reduce initial RTT to 8s (was 10s) + * tunnels.jsp: Add netdb links + +2009-01-08 zzz + * addressbook: Prevent Base32 hostnames + * build.xml: Remove readme_xx.html from updater + * configtunnels.jsp: Fix display of outbound backup count + * configupdate.jsp: Fix corruption of update URLs + * i2psnark: Recognize Robert 0.3 and 4 + * ExploreJob/SearchJob - fix brokenness: + - Give each search a minimum of time even at the end + - Fix ExploreJob exclude peer list + - Always add floodfills to exclude peer list + - Don't queue keys for exploration or run ExploreJob + if floodfill + - Allow floodfills to return non-floodfills in + a DSRM msg so exploration works + * ExploreJob/SearchJob - more fixes: + - Disable ExploreKeySelectorJob completely, just have + StartExplorersJob select a random key if queue is empty + - Add netDb.alwaysQuery=[B64Hash] for debugging + - Queue results of exploration for more exploration + - Floodfills periodically shuffle their KBuckets, and + FloodfillPeerSelector sorts more keys, so that + exploration works well + * Shitlist: Reduce max time to 30m (was 60m) + * Streaming: + - Reduce default initial window size from 12 to 6, + to account for the MTU increase in the last release + and try to limit initial packet loss + - Reduce fast retransmit threshold from 3 to 2 + * Transport: Don't shitlist a peer if we are at our + connection limit + +2009-01-03 zzz + * config.jsp: Move the buttons up + * configservice.jsp: Clean up and fix the broken (?) + browser launch configuration + * i2psnark: + - Try again to remove the i2psnarkurl files on shutdown + - Sort torrents with a locale-based sort + * NetDb: + - Expire routers with introducers after 90m. + This should improve reachability to firewalled routers + by keeping introducer info current. + - Expire routers with no addresses after 90m. + - Convert to java concurrent + * Stats: Add router.memoryUsed, graph by default + * Summary bar: Remove spurious UDP warning on startup + * UpdateHandler: Make extensible for upcoming + torrent updater + +2008-12-15 zzz + * Remove apps/ bogobot jdom pants q rome stasher syndie + +2008-12-14 zzz + * Contexts: Add int getProperty(String prop, int default) + * I2PAppThread: Constructor fix + * More split classes into their own files for mkvore + * Streaming: Don't build test cases by default + * Summary bar: Replace links with buttons + * Transport: + - Cleanup max connections code + - Add i2np.udp.maxConnections + - Set max connections based on share bandwidth + - Add haveCapacity() that can be used for connection + throttling in the router + - Reject IBGW/OBEP requests when near connection limit + - Reduce idle timeout when near connection limit + * Tunnel request handler: + - Require tunnel.dropLoad* stats + - Speed up request loop + * I2CP, HostsTxtNamingService, I2PTunnel: + Implement Base32 Hash hostnames, via the naming service. + Names are of the form [52-characters].i2p, where + the 52 characters are the Base32 representation of our + 256-byte hash. The client requests a lookup of the hash + via a brief I2CP session using new I2CP request/reply + messages. The router looks up the leaseset for the hash + to convert the hash to a dest. Convert the I2PTunnel + 'preview' links to use Base32 hostnames as a + demonstration. + 2008-12-08 zzz * ATalk: Move from core to apps * Blocklists: enable by default, include blocklist file diff --git a/hosts.txt b/hosts.txt index b32734a71..95ca28af6 100644 --- a/hosts.txt +++ b/hosts.txt @@ -312,3 +312,4 @@ galen.i2p=5jpwQMI5FT303YwKa5Rd38PYSX04pbIKgTaKQsWbqoWjIfoancFdWCShXHLI5G5ofOb0Xu tracker.mastertracker.i2p=VzXD~stRKbL3MOmeTn1iaCQ0CFyTmuFHiKYyo0Rd~dFPZFCYH-22rT8JD7i-C2xzYFa4jT5U2aqHzHI-Jre4HL3Ri5hFtZrLk2ax3ji7Qfb6qPnuYkuiF2E2UDmKUOppI8d9Ye7tjdhQVCy0izn55tBaB-U7UWdcvSK2i85sauyw3G0Gfads1Rvy5-CAe2paqyYATcDmGjpUNLoxbfv9KH1KmwRTNH6k1v4PyWYYnhbT39WfKMbBjSxVQRdi19cyJrULSWhjxaQfJHeWx5Z8Ev4bSPByBeQBFl2~4vqy0S5RypINsRSa3MZdbiAAyn5tr5slWR6QdoqY3qBQgBJFZppy-3iWkFqqKgSxCPundF8gdDLC5ddizl~KYcYKl42y9SGFHIukH-TZs8~em0~iahzsqWVRks3zRG~tlBcX2U3M2~OJs~C33-NKhyfZT7-XFBREvb8Szmd~p66jDxrwOnKaku-G6DyoQipJqIz4VHmY9-y5T8RrUcJcM-5lVoMpAAAA codevoid.i2p=tV-4GJjgYIoCDTTJ91nfDbhSnT8B2o3v-TUfHtiAAjJJdroCAEDbmJWFPUQJEEispvrjNe~fP7VAYkk9fAhSrmdBLtEGB3NUESdiZEPsDtKJBdxijPGb1erZF2Z6eYHoK-t5g7MWWTsgLz~4xn211Jpfa-T4pqL2tcjsa7ixsaMpHF8NXFrITdyxSJRPz8OnHYgDR~ULFyzroi255MpiSUBzGcUZEiQSFLHLhjT5D5tP~gfJirFnfgOHvzWBK9L7y91qY~gYvM2eDcxMxq4Ac1gw0JeahkzAk3j6Spco3LHW3bJvELopf1QmLFu3nfPaegH1Hejt9AhXEH~FV-~M9F1BePipcIYlm7nKyre3aVPLYDZSCvkUx~8nnD3HEpMijD8fdfqSFPU7aZQe19a7rZJUbX~a4M3rBDO-C4uAid6Uznb1tLu2XR1GVVITGHaLwmumImXjlU~1nEnluBQB6iBQPZ9xJccArlYgWSooR9gpyN93PwTPsPe5cPkxCFuxAAAA echelon.i2p=w6zK9m4fqSfvJck9EGIR1wRIbWsEQ2DkjZ-VI57ESFqLqbTIA1cD5nOfSSbpELqPyhjifdrNiBNAsSdyil3C0a2B7CGtwUcTS2dCG0tKf2nAbvpsbcCK17nI4Xbu5KqZU0y3hJ~l7rcJqQBR0nfV5cU30ZDrpQV6VL875cihGlnmwLFq6qSzNcEb88Nw6wFG~FIgB2PJ6A3jJyuTnLrdiMvwqgD6nSyeOylOgBCsNxXh8-drrhASjladfNrwjlGRCZTiQ~H92HIyOwiabDiG3TUugMaFWs87yuXnZ~ni9jgjoAMFo8xV8Od2BiRgCxkZoMU07FhgUjew9qtXNa04wkexf3gx77nVPhqE0GHqCuwHwmBVf92RdYEys76u~akaOMq5UhayDpCBCaHiYLkKDNqmh47tfMCwxf6z8VIcR4zv25QfJDIWPs~RA~9U7m4raytiAs5PvYZBn4B3SqOL8XdkL9sDT54sQXbsYCJr3olu6ieMtNWlmos0uohYXNUyAAAA +crstrack.i2p=b4G9sCdtfvccMAXh~SaZrPqVQNyGQbhbYMbw6supq2XGzbjU4NcOmjFI0vxQ8w1L05twmkOvg5QERcX6Mi8NQrWnR0stLExu2LucUXg1aYjnggxIR8TIOGygZVIMV3STKH4UQXD--wz0BUrqaLxPhrm2Eh9Hwc8TdB6Na4ShQUq5Xm8D4elzNUVdpM~RtChEyJWuQvoGAHY3ppX-EJJLkiSr1t77neS4Lc-KofMVmgI9a2tSSpNAagBiNI6Ak9L1T0F9uxeDfEG9bBSQPNMOSUbAoEcNxtt7xOW~cNOAyMyGydwPMnrQ5kIYPY8Pd3XudEko970vE0D6gO19yoBMJpKx6Dh50DGgybLQ9CpRaynh2zPULTHxm8rneOGRcQo8D3mE7FQ92m54~SvfjXjD2TwAVGI~ae~n9HDxt8uxOecAAvjjJ3TD4XM63Q9TmB38RmGNzNLDBQMEmJFpqQU8YeuhnS54IVdUoVQFqui5SfDeLXlSkh4vYoMU66pvBfWbAAAA diff --git a/initialNews.xml b/initialNews.xml index 34da38eb3..0306dcd7b 100644 --- a/initialNews.xml +++ b/initialNews.xml @@ -1,5 +1,5 @@ - - +

      Congratulations on getting I2P installed!

        @@ -18,12 +18,35 @@ If you can, open up port 8887 on your firewall, then enable inbound TC
      • Once you have a "shared clients" destination listed on the left, please check out our -FAQ. +FAQ.
      • Point your IRC client to localhost:6668 and say hi to us on #i2p.
      + + +

      Gratulation zur erfolgreichen Installation von I2P!

      +
        +
      • +Willkommen bei I2P! +Bitte noch etwas Geduld während I2P startet und weitere I2P Router findet. +
      • +
      • +Passe bitte In der Wartezeit deine Einstellungen zur Bandbreite auf der +Einstellungsseite an. +
      • +
      • +Bitte öffne sobald möglich den Port 8887 in deiner Firewall, aktiviere danach den eingehenden TCP Verkehr auf der Einstellungsseite. +
      • +
      • +Sobald auf der linken Seite eine "shared clients" Verbindung aufgelistet ist besuche bitte unsere FAQ. +
      • +
      • +Verbinde deinen IRC Klienten mit dem Server auf localhost:6668 und sage Hallo zu uns im Kanal #i2p. +
      • +
      +
      diff --git a/installer/install.xml b/installer/install.xml index c8a0787a5..125e92231 100644 --- a/installer/install.xml +++ b/installer/install.xml @@ -4,7 +4,7 @@ i2p - 0.6.5 + 0.7.1 diff --git a/installer/resources/blocklist.txt b/installer/resources/blocklist.txt index 8a1614e7f..1eedb0232 100644 --- a/installer/resources/blocklist.txt +++ b/installer/resources/blocklist.txt @@ -10,7 +10,7 @@ # A more reasonable list: http://www.bluetack.co.uk/config/level1.zip # # You may also wish to add the bogons from http://www.cymru.com/Documents/bogon-list.html , -# but you will have top update your blocklist manually as IP ranges are assigned. +# but you will have to update your blocklist manually as IP ranges are assigned. # You must update this list yourself, it is not overwritten by the update process. # # * Acceptable formats (IPV4 only): diff --git a/installer/resources/blogMeta.snm b/installer/resources/blogMeta.snm deleted file mode 100644 index 1345eab10..000000000 --- a/installer/resources/blogMeta.snm +++ /dev/null @@ -1,7 +0,0 @@ -Owner:U1oHd4XghnvqZzFaxx2Z8ogH9bfkJ4MCMUZKIu5lGV~0098TLaqB~pOc~GyAPtP55ckS54KX1uJN3pttaawwt61edntulHpatOCwrw5lpAytJcpZhJaahs64NhdnNeFCindHbXFxYU7BiRt7iyHswMlGjup~03uy7xp-JdWlNjw= -Posters: -Name:jrandom -Edition:0 -ContactURL:jrandom@i2p.net -Description:jrandom's ranting -Signature:PstiGeiWOV8VKARVNvk4NRe-EOAwS10yGHMkXb~FUS7GBMVHxaGeDA== diff --git a/installer/resources/blogPost.snd b/installer/resources/blogPost.snd deleted file mode 100644 index e0baf0626..000000000 Binary files a/installer/resources/blogPost.snd and /dev/null differ diff --git a/installer/resources/dnfh-header.ht b/installer/resources/dnfh-header.ht index 2f8548448..ff6195ca9 100644 --- a/installer/resources/dnfh-header.ht +++ b/installer/resources/dnfh-header.ht @@ -4,41 +4,44 @@ Cache-control: no-cache Connection: close Proxy-Connection: close - -Eepsite unknown - - - - - -
      -The eepsite was not found in your router's addressbook. -Check the link or find a BASE64 address. -If you have the BASE64 address, paste it into your userhosts.txt using -SusiDNS, -use a BASE64 address helper, or use a jump service link below. -

      Could not find the following destination:

      + +Eepsite unknown + + + + + +
      +The eepsite was not found in your router's addressbook. +Check the link or find a BASE64 address. +If you have the BASE64 address, paste it into your userhosts.txt using +SusiDNS, +use a BASE64 address helper, or use a jump service link below. +Seeing this page often? See the FAQ +for help in adding some subscriptions +to your addressbook. +

      Could not find the following destination:

      diff --git a/installer/resources/readme.license.txt b/installer/resources/readme.license.txt index d707e557c..9e40f595c 100644 --- a/installer/resources/readme.license.txt +++ b/installer/resources/readme.license.txt @@ -16,12 +16,13 @@ following non-public domain code: * Bouncycastle's hash routines (MIT license) * Cryptix's AES routines (Cryptix license) * Adam Buckley's SNTP routines (BSD) +* FSF's PRNG and GMP (LGPL) Also included in this distribution are a bunch of third party client applications, all with their own dependencies. Please see our license policy page for details: - http://www.i2p.net/licenses + http://www.i2p2.de/licenses One of the bundled client apps (routerconsole) requires us to say: @@ -29,8 +30,11 @@ requires us to say: the Apache Software Foundation (http://www.apache.org/) -Another (I2PTunnel) is GPL licensed. +I2PTunnel, I2PSnark, SusiDNS, and SusiMail +are GPL licensed. + +For more information see LICENSE.txt +in the install directory. For source, please see: - http://www.i2p.net/download -or http://www.i2p.net/cvs \ No newline at end of file + http://www.i2p2.de/monotone diff --git a/installer/resources/wrapper.config b/installer/resources/wrapper.config index 2550f4e3d..4d09ba12c 100644 --- a/installer/resources/wrapper.config +++ b/installer/resources/wrapper.config @@ -47,14 +47,13 @@ wrapper.java.classpath.12=lib/jasper-runtime.jar wrapper.java.classpath.13=lib/commons-logging.jar wrapper.java.classpath.14=lib/commons-el.jar wrapper.java.classpath.15=lib/ant.jar -wrapper.java.classpath.16=lib/xercesImpl.jar # java service wrapper, BSD -wrapper.java.classpath.17=lib/wrapper.jar +wrapper.java.classpath.16=lib/wrapper.jar # systray, LGPL -wrapper.java.classpath.18=lib/systray.jar -wrapper.java.classpath.19=lib/systray4j.jar +wrapper.java.classpath.17=lib/systray.jar +wrapper.java.classpath.18=lib/systray4j.jar # BOB -wrapper.java.classpath.20=lib/BOB.jar +wrapper.java.classpath.19=lib/BOB.jar # Java Library Path (location of Wrapper.DLL or libwrapper.so) wrapper.java.library.path.1=. @@ -137,14 +136,13 @@ wrapper.on_exit.4=RESTART wrapper.on_exit.5=RESTART # the router may take a few seconds to save state, etc -wrapper.jvm_exit.timeout=10 +wrapper.jvm_exit.timeout=30 # give the OS 60s to clear all the old sockets / etc before restarting wrapper.restart.delay=60 wrapper.ping.interval=600 wrapper.ping.timeout=605 -wrapper.jvm_exit.timeout=30 # use the wrapper's internal timer thread. otherwise this would # force a restart of the router during daylight savings time as well diff --git a/licenses/LICENSE-Addressbook.txt b/licenses/LICENSE-Addressbook.txt new file mode 100644 index 000000000..5b44d6a6b --- /dev/null +++ b/licenses/LICENSE-Addressbook.txt @@ -0,0 +1,20 @@ +/* + * Copyright (c) 2004 Ragnarok + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * COPYRIGHT HOLDER BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER + * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + */ diff --git a/licenses/LICENSE-Apache1.1.txt b/licenses/LICENSE-Apache1.1.txt new file mode 100644 index 000000000..cea737d38 --- /dev/null +++ b/licenses/LICENSE-Apache1.1.txt @@ -0,0 +1,60 @@ +/* + * $Header: /home/cvs/jakarta-commons/el/LICENSE.txt,v 1.1.1.1 2003/02/04 00:22:24 luehe Exp $ + * $Revision: 1.1.1.1 $ + * $Date: 2003/02/04 00:22:24 $ + * + * ==================================================================== + * + * The Apache Software License, Version 1.1 + * + * Copyright (c) 1999-2002 The Apache Software Foundation. All rights + * reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * + * 3. The end-user documentation included with the redistribution, if + * any, must include the following acknowlegement: + * "This product includes software developed by the + * Apache Software Foundation (http://www.apache.org/)." + * Alternately, this acknowlegement may appear in the software itself, + * if and wherever such third-party acknowlegements normally appear. + * + * 4. The names "The Jakarta Project", "Commons", and "Apache Software + * Foundation" must not be used to endorse or promote products derived + * from this software without prior written permission. For written + * permission, please contact apache@apache.org. + * + * 5. Products derived from this software may not be called "Apache" + * nor may "Apache" appear in their names without prior written + * permission of the Apache Group. + * + * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESSED OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE APACHE SOFTWARE FOUNDATION OR + * ITS CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF + * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT + * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * ==================================================================== + * + * This software consists of voluntary contributions made by many + * individuals on behalf of the Apache Software Foundation. For more + * information on the Apache Software Foundation, please see + * . + * + */ diff --git a/licenses/LICENSE-Apache2.0.txt b/licenses/LICENSE-Apache2.0.txt new file mode 100644 index 000000000..d64569567 --- /dev/null +++ b/licenses/LICENSE-Apache2.0.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/licenses/LICENSE-BSD.txt b/licenses/LICENSE-BSD.txt new file mode 100644 index 000000000..59a9311c9 --- /dev/null +++ b/licenses/LICENSE-BSD.txt @@ -0,0 +1,27 @@ +Copyright (c) 2009, The I2P Project +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the + distribution. + * Neither the name of the I2P nor the names of its + contributors may be used to endorse or promote products derived + from this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS +IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED +TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A +PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER +OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, +EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, +PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR +PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF +LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING +NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/licenses/LICENSE-ElGamalDSA.txt b/licenses/LICENSE-ElGamalDSA.txt new file mode 100644 index 000000000..6bf735772 --- /dev/null +++ b/licenses/LICENSE-ElGamalDSA.txt @@ -0,0 +1,28 @@ +/* + * Copyright (c) 2003, TheCrypto + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * - Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * - Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * - Neither the name of the TheCrypto may be used to endorse or promote + * products derived from this software without specific prior written + * permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ diff --git a/apps/bogobot/LICENSE.pircbot.txt b/licenses/LICENSE-GPLv2.txt similarity index 99% rename from apps/bogobot/LICENSE.pircbot.txt rename to licenses/LICENSE-GPLv2.txt index dcfa4c235..14db8fc79 100644 --- a/apps/bogobot/LICENSE.pircbot.txt +++ b/licenses/LICENSE-GPLv2.txt @@ -2,7 +2,7 @@ Version 2, June 1991 Copyright (C) 1989, 1991 Free Software Foundation, Inc. - 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA Everyone is permitted to copy and distribute verbatim copies of this license document, but changing it is not allowed. @@ -55,7 +55,7 @@ patent must be licensed for everyone's free use or not licensed at all. The precise terms and conditions for copying, distribution and modification follow. - + GNU GENERAL PUBLIC LICENSE TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION @@ -110,7 +110,7 @@ above, provided that you also meet all of these conditions: License. (Exception: if the Program itself is interactive but does not normally print such an announcement, your work based on the Program is not required to print an announcement.) - + These requirements apply to the modified work as a whole. If identifiable sections of that work are not derived from the Program, and can be reasonably considered independent and separate works in @@ -168,7 +168,7 @@ access to copy from a designated place, then offering equivalent access to copy the source code from the same place counts as distribution of the source code, even though third parties are not compelled to copy the source along with the object code. - + 4. You may not copy, modify, sublicense, or distribute the Program except as expressly provided under this License. Any attempt otherwise to copy, modify, sublicense or distribute the Program is @@ -225,7 +225,7 @@ impose that choice. This section is intended to make thoroughly clear what is believed to be a consequence of the rest of this License. - + 8. If the distribution and/or use of the Program is restricted in certain countries either by patents or by copyrighted interfaces, the original copyright holder who places the Program under this License @@ -278,7 +278,7 @@ PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. END OF TERMS AND CONDITIONS - + How to Apply These Terms to Your New Programs If you develop a new program, and you want it to be of the greatest diff --git a/licenses/LICENSE-HashCash.txt b/licenses/LICENSE-HashCash.txt new file mode 100644 index 000000000..7aa3f3f71 --- /dev/null +++ b/licenses/LICENSE-HashCash.txt @@ -0,0 +1,2 @@ + Copyright 2006 Gregory Rubin grrubin@gmail.com + Permission is given to use, modify, and or distribute this code so long as this message remains attached diff --git a/licenses/LICENSE-I2PTunnel.txt b/licenses/LICENSE-I2PTunnel.txt new file mode 100644 index 000000000..52af2190e --- /dev/null +++ b/licenses/LICENSE-I2PTunnel.txt @@ -0,0 +1,29 @@ +/* + * I2PTunnel + * (c) 2003 - 2004 mihi + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2, or (at + * your option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; see the file COPYING. If not, write to + * the Free Software Foundation, Inc., 59 Temple Place - Suite 330, + * Boston, MA 02111-1307, USA. + * + * In addition, as a special exception, mihi gives permission to link + * the code of this program with the proprietary Java implementation + * provided by Sun (or other vendors as well), and distribute linked + * combinations including the two. You must obey the GNU General + * Public License in all respects for all of the code used other than + * the proprietary Java implementation. If you modify this file, you + * may extend this exception to your version of the file, but you are + * not obligated to do so. If you do not wish to do so, delete this + * exception statement from your version. + */ diff --git a/licenses/LICENSE-LGPLv2.1.txt b/licenses/LICENSE-LGPLv2.1.txt new file mode 100644 index 000000000..5ab7695ab --- /dev/null +++ b/licenses/LICENSE-LGPLv2.1.txt @@ -0,0 +1,504 @@ + GNU LESSER GENERAL PUBLIC LICENSE + Version 2.1, February 1999 + + Copyright (C) 1991, 1999 Free Software Foundation, Inc. + 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + +[This is the first released version of the Lesser GPL. It also counts + as the successor of the GNU Library Public License, version 2, hence + the version number 2.1.] + + Preamble + + The licenses for most software are designed to take away your +freedom to share and change it. By contrast, the GNU General Public +Licenses are intended to guarantee your freedom to share and change +free software--to make sure the software is free for all its users. + + This license, the Lesser General Public License, applies to some +specially designated software packages--typically libraries--of the +Free Software Foundation and other authors who decide to use it. You +can use it too, but we suggest you first think carefully about whether +this license or the ordinary General Public License is the better +strategy to use in any particular case, based on the explanations below. + + When we speak of free software, we are referring to freedom of use, +not price. Our General Public Licenses are designed to make sure that +you have the freedom to distribute copies of free software (and charge +for this service if you wish); that you receive source code or can get +it if you want it; that you can change the software and use pieces of +it in new free programs; and that you are informed that you can do +these things. + + To protect your rights, we need to make restrictions that forbid +distributors to deny you these rights or to ask you to surrender these +rights. These restrictions translate to certain responsibilities for +you if you distribute copies of the library or if you modify it. + + For example, if you distribute copies of the library, whether gratis +or for a fee, you must give the recipients all the rights that we gave +you. You must make sure that they, too, receive or can get the source +code. If you link other code with the library, you must provide +complete object files to the recipients, so that they can relink them +with the library after making changes to the library and recompiling +it. And you must show them these terms so they know their rights. + + We protect your rights with a two-step method: (1) we copyright the +library, and (2) we offer you this license, which gives you legal +permission to copy, distribute and/or modify the library. + + To protect each distributor, we want to make it very clear that +there is no warranty for the free library. Also, if the library is +modified by someone else and passed on, the recipients should know +that what they have is not the original version, so that the original +author's reputation will not be affected by problems that might be +introduced by others. + + Finally, software patents pose a constant threat to the existence of +any free program. We wish to make sure that a company cannot +effectively restrict the users of a free program by obtaining a +restrictive license from a patent holder. Therefore, we insist that +any patent license obtained for a version of the library must be +consistent with the full freedom of use specified in this license. + + Most GNU software, including some libraries, is covered by the +ordinary GNU General Public License. This license, the GNU Lesser +General Public License, applies to certain designated libraries, and +is quite different from the ordinary General Public License. We use +this license for certain libraries in order to permit linking those +libraries into non-free programs. + + When a program is linked with a library, whether statically or using +a shared library, the combination of the two is legally speaking a +combined work, a derivative of the original library. The ordinary +General Public License therefore permits such linking only if the +entire combination fits its criteria of freedom. The Lesser General +Public License permits more lax criteria for linking other code with +the library. + + We call this license the "Lesser" General Public License because it +does Less to protect the user's freedom than the ordinary General +Public License. It also provides other free software developers Less +of an advantage over competing non-free programs. These disadvantages +are the reason we use the ordinary General Public License for many +libraries. However, the Lesser license provides advantages in certain +special circumstances. + + For example, on rare occasions, there may be a special need to +encourage the widest possible use of a certain library, so that it becomes +a de-facto standard. To achieve this, non-free programs must be +allowed to use the library. A more frequent case is that a free +library does the same job as widely used non-free libraries. In this +case, there is little to gain by limiting the free library to free +software only, so we use the Lesser General Public License. + + In other cases, permission to use a particular library in non-free +programs enables a greater number of people to use a large body of +free software. For example, permission to use the GNU C Library in +non-free programs enables many more people to use the whole GNU +operating system, as well as its variant, the GNU/Linux operating +system. + + Although the Lesser General Public License is Less protective of the +users' freedom, it does ensure that the user of a program that is +linked with the Library has the freedom and the wherewithal to run +that program using a modified version of the Library. + + The precise terms and conditions for copying, distribution and +modification follow. Pay close attention to the difference between a +"work based on the library" and a "work that uses the library". The +former contains code derived from the library, whereas the latter must +be combined with the library in order to run. + + GNU LESSER GENERAL PUBLIC LICENSE + TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION + + 0. This License Agreement applies to any software library or other +program which contains a notice placed by the copyright holder or +other authorized party saying it may be distributed under the terms of +this Lesser General Public License (also called "this License"). +Each licensee is addressed as "you". + + A "library" means a collection of software functions and/or data +prepared so as to be conveniently linked with application programs +(which use some of those functions and data) to form executables. + + The "Library", below, refers to any such software library or work +which has been distributed under these terms. A "work based on the +Library" means either the Library or any derivative work under +copyright law: that is to say, a work containing the Library or a +portion of it, either verbatim or with modifications and/or translated +straightforwardly into another language. (Hereinafter, translation is +included without limitation in the term "modification".) + + "Source code" for a work means the preferred form of the work for +making modifications to it. For a library, complete source code means +all the source code for all modules it contains, plus any associated +interface definition files, plus the scripts used to control compilation +and installation of the library. + + Activities other than copying, distribution and modification are not +covered by this License; they are outside its scope. The act of +running a program using the Library is not restricted, and output from +such a program is covered only if its contents constitute a work based +on the Library (independent of the use of the Library in a tool for +writing it). Whether that is true depends on what the Library does +and what the program that uses the Library does. + + 1. You may copy and distribute verbatim copies of the Library's +complete source code as you receive it, in any medium, provided that +you conspicuously and appropriately publish on each copy an +appropriate copyright notice and disclaimer of warranty; keep intact +all the notices that refer to this License and to the absence of any +warranty; and distribute a copy of this License along with the +Library. + + You may charge a fee for the physical act of transferring a copy, +and you may at your option offer warranty protection in exchange for a +fee. + + 2. You may modify your copy or copies of the Library or any portion +of it, thus forming a work based on the Library, and copy and +distribute such modifications or work under the terms of Section 1 +above, provided that you also meet all of these conditions: + + a) The modified work must itself be a software library. + + b) You must cause the files modified to carry prominent notices + stating that you changed the files and the date of any change. + + c) You must cause the whole of the work to be licensed at no + charge to all third parties under the terms of this License. + + d) If a facility in the modified Library refers to a function or a + table of data to be supplied by an application program that uses + the facility, other than as an argument passed when the facility + is invoked, then you must make a good faith effort to ensure that, + in the event an application does not supply such function or + table, the facility still operates, and performs whatever part of + its purpose remains meaningful. + + (For example, a function in a library to compute square roots has + a purpose that is entirely well-defined independent of the + application. Therefore, Subsection 2d requires that any + application-supplied function or table used by this function must + be optional: if the application does not supply it, the square + root function must still compute square roots.) + +These requirements apply to the modified work as a whole. If +identifiable sections of that work are not derived from the Library, +and can be reasonably considered independent and separate works in +themselves, then this License, and its terms, do not apply to those +sections when you distribute them as separate works. But when you +distribute the same sections as part of a whole which is a work based +on the Library, the distribution of the whole must be on the terms of +this License, whose permissions for other licensees extend to the +entire whole, and thus to each and every part regardless of who wrote +it. + +Thus, it is not the intent of this section to claim rights or contest +your rights to work written entirely by you; rather, the intent is to +exercise the right to control the distribution of derivative or +collective works based on the Library. + +In addition, mere aggregation of another work not based on the Library +with the Library (or with a work based on the Library) on a volume of +a storage or distribution medium does not bring the other work under +the scope of this License. + + 3. You may opt to apply the terms of the ordinary GNU General Public +License instead of this License to a given copy of the Library. To do +this, you must alter all the notices that refer to this License, so +that they refer to the ordinary GNU General Public License, version 2, +instead of to this License. (If a newer version than version 2 of the +ordinary GNU General Public License has appeared, then you can specify +that version instead if you wish.) Do not make any other change in +these notices. + + Once this change is made in a given copy, it is irreversible for +that copy, so the ordinary GNU General Public License applies to all +subsequent copies and derivative works made from that copy. + + This option is useful when you wish to copy part of the code of +the Library into a program that is not a library. + + 4. You may copy and distribute the Library (or a portion or +derivative of it, under Section 2) in object code or executable form +under the terms of Sections 1 and 2 above provided that you accompany +it with the complete corresponding machine-readable source code, which +must be distributed under the terms of Sections 1 and 2 above on a +medium customarily used for software interchange. + + If distribution of object code is made by offering access to copy +from a designated place, then offering equivalent access to copy the +source code from the same place satisfies the requirement to +distribute the source code, even though third parties are not +compelled to copy the source along with the object code. + + 5. A program that contains no derivative of any portion of the +Library, but is designed to work with the Library by being compiled or +linked with it, is called a "work that uses the Library". Such a +work, in isolation, is not a derivative work of the Library, and +therefore falls outside the scope of this License. + + However, linking a "work that uses the Library" with the Library +creates an executable that is a derivative of the Library (because it +contains portions of the Library), rather than a "work that uses the +library". The executable is therefore covered by this License. +Section 6 states terms for distribution of such executables. + + When a "work that uses the Library" uses material from a header file +that is part of the Library, the object code for the work may be a +derivative work of the Library even though the source code is not. +Whether this is true is especially significant if the work can be +linked without the Library, or if the work is itself a library. The +threshold for this to be true is not precisely defined by law. + + If such an object file uses only numerical parameters, data +structure layouts and accessors, and small macros and small inline +functions (ten lines or less in length), then the use of the object +file is unrestricted, regardless of whether it is legally a derivative +work. (Executables containing this object code plus portions of the +Library will still fall under Section 6.) + + Otherwise, if the work is a derivative of the Library, you may +distribute the object code for the work under the terms of Section 6. +Any executables containing that work also fall under Section 6, +whether or not they are linked directly with the Library itself. + + 6. As an exception to the Sections above, you may also combine or +link a "work that uses the Library" with the Library to produce a +work containing portions of the Library, and distribute that work +under terms of your choice, provided that the terms permit +modification of the work for the customer's own use and reverse +engineering for debugging such modifications. + + You must give prominent notice with each copy of the work that the +Library is used in it and that the Library and its use are covered by +this License. You must supply a copy of this License. If the work +during execution displays copyright notices, you must include the +copyright notice for the Library among them, as well as a reference +directing the user to the copy of this License. Also, you must do one +of these things: + + a) Accompany the work with the complete corresponding + machine-readable source code for the Library including whatever + changes were used in the work (which must be distributed under + Sections 1 and 2 above); and, if the work is an executable linked + with the Library, with the complete machine-readable "work that + uses the Library", as object code and/or source code, so that the + user can modify the Library and then relink to produce a modified + executable containing the modified Library. (It is understood + that the user who changes the contents of definitions files in the + Library will not necessarily be able to recompile the application + to use the modified definitions.) + + b) Use a suitable shared library mechanism for linking with the + Library. A suitable mechanism is one that (1) uses at run time a + copy of the library already present on the user's computer system, + rather than copying library functions into the executable, and (2) + will operate properly with a modified version of the library, if + the user installs one, as long as the modified version is + interface-compatible with the version that the work was made with. + + c) Accompany the work with a written offer, valid for at + least three years, to give the same user the materials + specified in Subsection 6a, above, for a charge no more + than the cost of performing this distribution. + + d) If distribution of the work is made by offering access to copy + from a designated place, offer equivalent access to copy the above + specified materials from the same place. + + e) Verify that the user has already received a copy of these + materials or that you have already sent this user a copy. + + For an executable, the required form of the "work that uses the +Library" must include any data and utility programs needed for +reproducing the executable from it. However, as a special exception, +the materials to be distributed need not include anything that is +normally distributed (in either source or binary form) with the major +components (compiler, kernel, and so on) of the operating system on +which the executable runs, unless that component itself accompanies +the executable. + + It may happen that this requirement contradicts the license +restrictions of other proprietary libraries that do not normally +accompany the operating system. Such a contradiction means you cannot +use both them and the Library together in an executable that you +distribute. + + 7. You may place library facilities that are a work based on the +Library side-by-side in a single library together with other library +facilities not covered by this License, and distribute such a combined +library, provided that the separate distribution of the work based on +the Library and of the other library facilities is otherwise +permitted, and provided that you do these two things: + + a) Accompany the combined library with a copy of the same work + based on the Library, uncombined with any other library + facilities. This must be distributed under the terms of the + Sections above. + + b) Give prominent notice with the combined library of the fact + that part of it is a work based on the Library, and explaining + where to find the accompanying uncombined form of the same work. + + 8. You may not copy, modify, sublicense, link with, or distribute +the Library except as expressly provided under this License. Any +attempt otherwise to copy, modify, sublicense, link with, or +distribute the Library is void, and will automatically terminate your +rights under this License. However, parties who have received copies, +or rights, from you under this License will not have their licenses +terminated so long as such parties remain in full compliance. + + 9. You are not required to accept this License, since you have not +signed it. However, nothing else grants you permission to modify or +distribute the Library or its derivative works. These actions are +prohibited by law if you do not accept this License. Therefore, by +modifying or distributing the Library (or any work based on the +Library), you indicate your acceptance of this License to do so, and +all its terms and conditions for copying, distributing or modifying +the Library or works based on it. + + 10. Each time you redistribute the Library (or any work based on the +Library), the recipient automatically receives a license from the +original licensor to copy, distribute, link with or modify the Library +subject to these terms and conditions. You may not impose any further +restrictions on the recipients' exercise of the rights granted herein. +You are not responsible for enforcing compliance by third parties with +this License. + + 11. If, as a consequence of a court judgment or allegation of patent +infringement or for any other reason (not limited to patent issues), +conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot +distribute so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you +may not distribute the Library at all. For example, if a patent +license would not permit royalty-free redistribution of the Library by +all those who receive copies directly or indirectly through you, then +the only way you could satisfy both it and this License would be to +refrain entirely from distribution of the Library. + +If any portion of this section is held invalid or unenforceable under any +particular circumstance, the balance of the section is intended to apply, +and the section as a whole is intended to apply in other circumstances. + +It is not the purpose of this section to induce you to infringe any +patents or other property right claims or to contest validity of any +such claims; this section has the sole purpose of protecting the +integrity of the free software distribution system which is +implemented by public license practices. Many people have made +generous contributions to the wide range of software distributed +through that system in reliance on consistent application of that +system; it is up to the author/donor to decide if he or she is willing +to distribute software through any other system and a licensee cannot +impose that choice. + +This section is intended to make thoroughly clear what is believed to +be a consequence of the rest of this License. + + 12. If the distribution and/or use of the Library is restricted in +certain countries either by patents or by copyrighted interfaces, the +original copyright holder who places the Library under this License may add +an explicit geographical distribution limitation excluding those countries, +so that distribution is permitted only in or among countries not thus +excluded. In such case, this License incorporates the limitation as if +written in the body of this License. + + 13. The Free Software Foundation may publish revised and/or new +versions of the Lesser General Public License from time to time. +Such new versions will be similar in spirit to the present version, +but may differ in detail to address new problems or concerns. + +Each version is given a distinguishing version number. If the Library +specifies a version number of this License which applies to it and +"any later version", you have the option of following the terms and +conditions either of that version or of any later version published by +the Free Software Foundation. If the Library does not specify a +license version number, you may choose any version ever published by +the Free Software Foundation. + + 14. If you wish to incorporate parts of the Library into other free +programs whose distribution conditions are incompatible with these, +write to the author to ask for permission. For software which is +copyrighted by the Free Software Foundation, write to the Free +Software Foundation; we sometimes make exceptions for this. Our +decision will be guided by the two goals of preserving the free status +of all derivatives of our free software and of promoting the sharing +and reuse of software generally. + + NO WARRANTY + + 15. BECAUSE THE LIBRARY IS LICENSED FREE OF CHARGE, THERE IS NO +WARRANTY FOR THE LIBRARY, TO THE EXTENT PERMITTED BY APPLICABLE LAW. +EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR +OTHER PARTIES PROVIDE THE LIBRARY "AS IS" WITHOUT WARRANTY OF ANY +KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE +LIBRARY IS WITH YOU. SHOULD THE LIBRARY PROVE DEFECTIVE, YOU ASSUME +THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION. + + 16. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN +WRITING WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY +AND/OR REDISTRIBUTE THE LIBRARY AS PERMITTED ABOVE, BE LIABLE TO YOU +FOR DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR +CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE +LIBRARY (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING +RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A +FAILURE OF THE LIBRARY TO OPERATE WITH ANY OTHER SOFTWARE), EVEN IF +SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH +DAMAGES. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Libraries + + If you develop a new library, and you want it to be of the greatest +possible use to the public, we recommend making it free software that +everyone can redistribute and change. You can do so by permitting +redistribution under these terms (or, alternatively, under the terms of the +ordinary General Public License). + + To apply these terms, attach the following notices to the library. It is +safest to attach them to the start of each source file to most effectively +convey the exclusion of warranty; and each file should have at least the +"copyright" line and a pointer to where the full notice is found. + + + Copyright (C) + + This library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public + License as published by the Free Software Foundation; either + version 2.1 of the License, or (at your option) any later version. + + This library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public + License along with this library; if not, write to the Free Software + Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + +Also add information on how to contact you by electronic and paper mail. + +You should also get your employer (if you work as a programmer) or your +school, if any, to sign a "copyright disclaimer" for the library, if +necessary. Here is a sample; alter the names: + + Yoyodyne, Inc., hereby disclaims all copyright interest in the + library `Frob' (a library for tweaking knobs) written by James Random Hacker. + + , 1 April 1990 + Ty Coon, President of Vice + +That's all there is to it! + + diff --git a/licenses/LICENSE-Ministreaming.txt b/licenses/LICENSE-Ministreaming.txt new file mode 100644 index 000000000..50a6a709a --- /dev/null +++ b/licenses/LICENSE-Ministreaming.txt @@ -0,0 +1,10 @@ +$Id$ + +the i2p/apps/ministreaming module is the root of the +ministreaming library, and everything within it +is released according to the terms of the I2P +license policy. That means everything contained +within the i2p/apps/ministreaming module is released +under a BSD license unless otherwise marked. +Alternate licenses that may be used include Cryptix, +MIT, as well as code granted into the public domain. diff --git a/licenses/LICENSE-SHA256.txt b/licenses/LICENSE-SHA256.txt new file mode 100644 index 000000000..1eb884a89 --- /dev/null +++ b/licenses/LICENSE-SHA256.txt @@ -0,0 +1,26 @@ +/* + * Copyright (c) 2000 - 2004 The Legion Of The Bouncy Castle + * (http://www.bouncycastle.org) + * + * Permission is hereby granted, free of charge, to any person + * obtaining a copy of this software and associated + * documentation files (the "Software"), to deal in the Software + * without restriction, including without limitation the rights to + * use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following + * conditions: + * + * The above copyright notice and this permission notice shall be + * included in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES + * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ diff --git a/licenses/LICENSE-SNTP.txt b/licenses/LICENSE-SNTP.txt new file mode 100644 index 000000000..26c104971 --- /dev/null +++ b/licenses/LICENSE-SNTP.txt @@ -0,0 +1,29 @@ +/* + * Copyright (c) 2004, Adam Buckley + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * - Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * - Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * - Neither the name of Adam Buckley nor the names of its contributors may be + * used to endorse or promote products derived from this software without + * specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + * + */ diff --git a/licenses/LICENSE-Wrapper.txt b/licenses/LICENSE-Wrapper.txt new file mode 100644 index 000000000..039b8927d --- /dev/null +++ b/licenses/LICENSE-Wrapper.txt @@ -0,0 +1,41 @@ +Copyright (c) 1999, 2004 Tanuki Software + +Permission is hereby granted, free of charge, to any person +obtaining a copy of the Java Service Wrapper and associated +documentation files (the "Software"), to deal in the Software +without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sub-license, +and/or sell copies of the Software, and to permit persons to +whom the Software is furnished to do so, subject to the +following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES +OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NON-INFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT +HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR +OTHER DEALINGS IN THE SOFTWARE. + + +Portions of the Software have been derived from source code +developed by Silver Egg Technology under the following license: + +Copyright (c) 2001 Silver Egg Technology + +Permission is hereby granted, free of charge, to any person +obtaining a copy of this software and associated documentation +files (the "Software"), to deal in the Software without +restriction, including without limitation the rights to use, +copy, modify, merge, publish, distribute, sub-license, and/or +sell copies of the Software, and to permit persons to whom the +Software is furnished to do so, subject to the following +conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + diff --git a/licenses/NOTICE-Ant.txt b/licenses/NOTICE-Ant.txt new file mode 100644 index 000000000..1fb6dde47 --- /dev/null +++ b/licenses/NOTICE-Ant.txt @@ -0,0 +1,15 @@ + ========================================================================= + == NOTICE file corresponding to the section 4 d of == + == the Apache License, Version 2.0, == + == in this case for the Apache Ant distribution. == + ========================================================================= + + This product includes software developed by + The Apache Software Foundation (http://www.apache.org/). + + This product includes also software developed by : + - the W3C consortium (http://www.w3c.org) , + - the SAX project (http://www.saxproject.org) + + Please read the different LICENSE files present in the root directory of + this distribution. diff --git a/licenses/NOTICE-Commons-Logging.txt b/licenses/NOTICE-Commons-Logging.txt new file mode 100644 index 000000000..439eb83b2 --- /dev/null +++ b/licenses/NOTICE-Commons-Logging.txt @@ -0,0 +1,3 @@ +This product includes software developed by +The Apache Software Foundation (http://www.apache.org/). + diff --git a/news.xml b/news.xml index 465654622..b85e0f130 100644 --- a/news.xml +++ b/news.xml @@ -1,5 +1,5 @@ - - +

      • -2008-12-01: 0.6.5 Released +2009-03-29: 0.7.1 Released

      -The 0.6.5 release introduces new components, -drops some old ones (like the old TCP transport) -and has been optimized to perform better. +The 0.7.1 release optimizes I2P towards better performance +and introduces new features.

      -The BOB (Basic Open Bridge) protocol is introduced, -for use by client applications which cannot import -I2CP libraries directly. This deprecates the old -SAM protocol which was previously used in such cases. -For now however, BOB is not started automatically yet -on new installations, and SAM remains active on old installations. +Multiple bugs are fixed, replacements to the SimpleTimer class +should waste less time on object locking. Some old components +are dropped and several classes refactored to avoid repeating code.

      -Improved code should be better at preventing congestion -by probabalistically dropping participating traffic, -and likewise behave better when congestion occurs. -The floodfill NetDB should operate more reliably, -the streaming library should choose better message sizes, -offer a socket timeout function, and work proceeds -on the "hidden" mode of operation for I2P routers. +Support for encrypted LeaseSets (for creation of links over I2P +which an adversary cannot obstruct by attacking its gateways) +becomes more complete. New tunnel types like IRC server tunnels +and new options like delayed start and idling of tunnels +also gain support, along with improved usability of the I2P +Socks proxy mechanism.

      -From this release onward, I2P requires Java 1.5 or higher. -If you are uncertain about your Java version, you can verify -by opening a terminal window or command prompt, -and entering the command "java -version". -If you have an older Java installed, please update it first!

      - +Work continues on streamlining and expanding the Router Console, +on the BOB protocol, on I2P ports for Debian and Slackware Linux, +on the I2PSnark client, on TCP connection properties +and multiple other fronts. Updating is highly recommended.

      diff --git a/readme.html b/readme.html index 9ba805a72..0e4891b5d 100644 --- a/readme.html +++ b/readme.html @@ -1,4 +1,4 @@ -

      English | Deutsch | Nederlands | Svenska

      +

      English | Deutsch | Français | Nederlands | Svenska

      If you've just started I2P, the Active: numbers on the left should start to grow over the next few minutes and you'll see a "shared clients" local destination listed on the left (if not, see below). Once those show up, diff --git a/readme_de.html b/readme_de.html index a6d233d6a..1534585da 100644 --- a/readme_de.html +++ b/readme_de.html @@ -1,4 +1,4 @@ -

      English | Deutsch | Nederlands | Svenska

      +

      English | Deutsch | Français | Nederlands | Svenska

      Wenn Du gerade I2P gestartet hast, sollten die "Active:" Zahlen links in den nächsten paar Minuten anwachsen und Du siehst dann dort ein "shared clients" lokales Ziel gelistet (falls nicht, siehe Unten). Sobald das erscheint, kannst Du:

      • "Eepsites" besuchen - In I2P sind anonym gehostete Websites - diff --git a/readme_fr.html b/readme_fr.html new file mode 100644 index 000000000..5e619cff2 --- /dev/null +++ b/readme_fr.html @@ -0,0 +1,37 @@ +

        Deutsch | English | Français | Nederlands | Svenska

        +

        Si vous venez juste de lancer I2P, les chiffres sur la gauche à coté de Active devraient commencer à augmenter dans les prochaines minutes et vous verrez un "Shared client" en destination locale listés sur la gauche (si non, voir plus bas). Une fois qu'ils apparaissent, vous pouvez:

        +
          +
        • parcourir les "eepsites" - sur I2P il y a des sites web anonymes hébergés - dites à votre navigateur d'utiliser le HTTP proxy a l'adresse localhost port 4444, ensuite vous pouvez naviguer sur les eepsites. + + Il y a bien plus d'eepsites - suivez juste les liens au départ de ceux sur lesquels vous êtes, mettez-les dans vos favoris et visitez-les souvent!
        • +
        • Parcourez le web - Il y a pour l'instant un outproxy HTTP sur I2P attaché à votre propre proxy HTTP sur le port 4444 - vous devez simplement configurer le proxy de votre navigateur pour l'utiliser (comme expliqué ci-dessus) et aller sur n'importe quel URL normale - vos requêtes seront relayées par le réseau i2p.
        • +
        • Transfer de fichiers - Il y a un port intégré de Snark le client BitTorrent.
        • +
        • Utiliser le service de mail anonyme - Postman a créé un sytème de mails compatible avec un client de messagerie normal (POP3 / SMTP) qui permet d'envoyer des emails autant au sein d'i2p que vers et à partir de l'internet normal! Créez-vous un compte à hq.postman.i2p. + Nous fournissons dans la version de base de i2p susimail, + un client web pop3/smtp orienté sur l'anonymat qui est configuré pour accéder aux services email de postman.
        • +
        • Chatter de manière anonyme - Activez votre client IRC et connectez-le sur le serveur localhost port 6668. Ceci pointe vers l'un des deux serveur IRC anonyme, mais ni vous ni eux ne savent qui est l'autre
        • +
        • Créez-vous un blog anonyme - Renseignez-vous chez Syndie
        • +
        • Et bien d'autres
        • +
        + +

        Vous voulez votre propre eepsite?

        + +

        Nous fournissons de base quelques logiciels pour vous permettre de créer votre propre eepsite - une instance +Jetty, qui écoute sur +http://localhost:7658/. Placer simplement vos fichiers dans le répertoire eepsite/docroot/ (ou placez n'importe quel fichier JSP/Servlet standard .war) dans eepsite/webapps, ou script CGI standard dans eepsite/cgi-bin) et ils apparaitront. Après avoir démarré un tunnel pour votre eepsite (le tunnel doit pointer sur l'adresse locale du eepsite), votre eepsite sera visible pour les autes. Des instructions plus détaillées pour créer un eepsite se trouvent sur Votre eepsite temporaire. +

        + +

        Dépannage

        + +

        Soyez patient - i2p peut s'avérer lent à démarrer la première fois car il recherche des pairs. Si, après 30 minutes, votre Actives: connecté/récent compte moins de 10 pairs connectés, vous devez ouvrir le port 8887 sur votre pare-feu pour avoir une meilleure connection. Si vous ne pouvez accéder à aucun eepsite (même www.i2p2.i2p), soyez sûr que votre navigateur utilise bien le proxy localhost sur le port 4444. Vous pouvez aussi faire part de votre démarche sur le site web I2P, poster des message sur le forum de discussion, ou passer par #i2p ou #i2p-chat sur IRC sur le serveur irc.freenode.net, irc.postman.i2p ou irc.freshcoffee.i2p (ils sont liés).

        + +

        Comme vous pouvez le remarquer, il suffit d'éditer la page "docs/readme.html" pour changer la page d'acceuil

        diff --git a/readme_nl.html b/readme_nl.html index 3203c79b5..427f75f91 100644 --- a/readme_nl.html +++ b/readme_nl.html @@ -1,4 +1,4 @@ -

        English | Deutsch | Nederlands | Svenska

        +

        English | Deutsch | Français | Nederlands | Svenska

        Als je net I2P opgestart hebt, zullen de 'Active:' (Actieve) getallen aan de linkerkant in de komende minuten stijgen, en je zal een "Shared clients" (Gedeelde clients) lokale bestemming zien staan aan de linkerkant (indien niet, zie hieronder). Eenmaal je deze bestemming ziet, kan je:

        • surfen naar "eepsites" - op I2P zijn er anonieme websites - stel je browser in om de HTTP proxy op localhost, poort 4444 te gebruiken, en surf vervolgens naar een eepsite - diff --git a/readme_sv.html b/readme_sv.html index a742f1fdf..b1f59fa04 100644 --- a/readme_sv.html +++ b/readme_sv.html @@ -1,61 +1,101 @@ -

          English | Deutsch | Nederlands | Svenska

          -

          Om du just har startat I2P kommer de "Aktiva: #/#" börja öka inom några få minuter och du kommer se en destination kallad "delade klienter" på den vänstra listan (om inte se nedan). När de syns kan du:

          +

          English +| Deutsch | Français | Nederlands | Svenska

          +

          Om du just har startat I2P kommer de "Aktiva: #/#" börja öka inom +några få minuter och du kommer se en destination kallad "delade +klienter" på den vänstra listan (om inte se +nedan). När de syns kan du:

            -
          • surfa pÃ¥ "eepsidor" - inom I2P finns det anonyma sajter - - ställ in din webbläsare till att använda HTTP proxy vid localhost port 4444, surfa sen till en eepsida - +
          • surfa på "eepsidor" - inom I2P finns det anonyma sajter - + ställ in din webbläsare till att använda HTTP proxy vid +localhost port 4444, surfa sen till en eepsida - - Det finns mÃ¥nga fler eepsidor - följ bara länkarna frÃ¥n dom du ser, - spara dina favoriter och besök dom ofta!
          • -
          • surfa pÃ¥ nätet - det finns för närvarande en "utproxy" i I2P som är ansluten - till din egen HTTP proxt pÃ¥ port 4444 - ställ helt enkelt in din webläsares proxy till - att använda den och gÃ¥ till vilken vanlig URL osm helst - dina fröfrÃ¥gningar kommer skickas - genom I2P nätverket.
          • -
          • överföra filer - det finns en integrerad adaption av - Snark BitTorrent + Det finns många fler eepsidor - följ bara länkarna från dom du ser, + spara dina favoriter och besök dom ofta!
          • +
          • surfa på nätet - det finns för närvarande en "utproxy" i I2P +som är ansluten + till din egen HTTP proxt på port 4444 - ställ helt enkelt in din +webläsares proxy till + att använda den och gå till vilken vanlig URL som helst - dina +fröfrågningar kommer skickas + genom I2P ntverket.
          • +
          • överföra filer - det finns en integrerad översätting av + Snark BitTorrent klienten.
          • -
          • maila anonymt - postman har skapat ett emailsystem som är fungerar med vanliga email-klienter - (POP3 / SMTP),som lÃ¥ter dig skicka email inom I2P sÃ¥ väl som till och frÃ¥n det vanliga Internet! - skaffa dig ett konto hos hq.postman.i2p. +
          • maila anonymt - postman har skapat ett emailsystem som +fungerar med vanliga email-klienter + (POP3 / SMTP),som låter dig skicka email inom I2P så väl som till +och från det vanliga Internet! + skaffa dig ett konto hos hq.postman.i2p. Vi skickar med susimail, - som är en webb-baserad anonymt inriktad pop3/smtp-klient, inställd till att ansluta till postmans email-tjänst.
          • -
          • chatta anonymt - starta din IRC-klient och anslut till servern vid - localhost port 6668. Den pekar mot en av tvÃ¥ anonyma IRC servrar, - men varken du eller dom vet var den andra är.
          • -
          • bloga anonymt - kika pÃ¥ Syndie
          • + som är en webb-baserad anonymt inriktad pop3/smtp-klient, inställd +till att ansluta till postmans email-tjänst. +
          • chatta anonymt - starta din IRC-klient och anslut till +servern vid + localhost port 6668. Den pekar mot en av två anonyma IRC +servrar, + men varken du eller dom vet var den andra är.
          • +
          • blogga anonymt - kika på Syndie
          • och mycket mer

          Vill du ha en egen eepsida?

          -

          Vi har skickat med mjukvara som låter dig driva en egen eepsida - en -Jetty instans lyssnar på -http://localhost:7658/. Lägg helt enkelt dina filer i -eepsite/docroot/ mappen (eller standard JSP/Servlet .war -filer i eepsite/webapps, eller standard CGI-script i eepsite/cgi-bin) -så kommer de synas. När du startat en eepsite tunnel som pekar på Jetty-server, så kommer sajten möjlig att nå för alla andra. -Mer detaljerade instruktionr för att skapa en eepsite finns på -din tillfälliga eepsite. +

          Vi har skickat med mjukvara som låter dig driva en egen eepsida - en +Jetty instans lyssnar på +http://localhost:7658/. Lägg helt +enkelt dina filer i +eepsite/docroot/ mappen (eller standard JSP/Servlet +.war +filer i eepsite/webapps, eller standard CGI-script i +eepsite/cgi-bin) +så kommer de synas. När du startat en eepsite +tunnel som pekar på Jetty-server, så kommer sajten vara möjlig att nå för +alla andra. +Mer detaljerade instruktioner för att skapa en eepsite finns på +din tillfälliga eepsite.

          Problem

          -

          Ha tålamod - I2P kan ta tid att starta första gången, medan den söker efter noder att ansluta till. -Om, efter 30 minuter, "Aktiva: anslutna/anslutna nyligen" statistiken visar mindre än 10 anslutna -noder, bör du öppna port 8887 i din brandvägg. -Om du inte lyckas besöka några eepsidor alls (inte ens www.i2p2.i2p), -försäkra dig om att din webbläsare är inställd till att avända en proxy, localhost på port 4444. -Du kanska också vill kika på information på -I2Ps webbsida, fråga frågor på -I2P diskussions forumet, eller kika förbi #i2p eller -#i2p-chat på IRC vid irc.freenode.net, irc.postman.i2p eller irc.freshcoffee.i2p (de är alla sammankopplade).

          +

          Ha tålamod - I2P kan ta tid att starta första gången, medan den söker +efter noder att ansluta till. +Om, efter 30 minuter, "Aktiva: anslutna/anslutna nyligen" statistiken +visar mindre än 10 anslutna +noder, bör du öppna port 8887 i din brandvägg. +Om du inte lyckas besöka några eepsidor alls (inte ens www.i2p2.i2p), +försäkra dig om att din webbläsare är inställd till att avända en proxy, +localhost på port 4444. +Du kanske också vill kika på information på +I2Ps webbsida, fråga frågor på +I2P diskussions forumet, eller kika +förbi #i2p eller +#i2p-chat på IRC på irc.freenode.net, irc.postman.i2p +eller irc.freshcoffee.i2p (de är alla sammankopplade).

          -

          Du kan förändra denhär sidan genom att ändra i filen "docs/readme_sv.html"

          +

          Du kan förändra denhär sidan genom att ändra i filen +"docs/readme_sv.html"

          diff --git a/router/java/src/net/i2p/data/i2np/TunnelGatewayMessage.java b/router/java/src/net/i2p/data/i2np/TunnelGatewayMessage.java index f611c3213..8fc7c9fd9 100644 --- a/router/java/src/net/i2p/data/i2np/TunnelGatewayMessage.java +++ b/router/java/src/net/i2p/data/i2np/TunnelGatewayMessage.java @@ -75,6 +75,11 @@ public class TunnelGatewayMessage extends I2NPMessageImpl { } DataHelper.toLong(out, curIndex, 2, _msgData.length); curIndex += 2; + // where is this coming from? + if (curIndex + _msgData.length > out.length) { + _log.log(Log.ERROR, "output buffer too small idx: " + curIndex + " len: " + _msgData.length + " outlen: " + out.length); + throw new I2NPMessageException("Too much data to write out (id=" + _tunnelId + " data=" + _msg + ")"); + } System.arraycopy(_msgData, 0, out, curIndex, _msgData.length); curIndex += _msgData.length; return curIndex; diff --git a/router/java/src/net/i2p/router/Blocklist.java b/router/java/src/net/i2p/router/Blocklist.java index 5f686c192..4e94d7709 100644 --- a/router/java/src/net/i2p/router/Blocklist.java +++ b/router/java/src/net/i2p/router/Blocklist.java @@ -256,7 +256,7 @@ public class Blocklist { } } - private class Entry { + private static class Entry { String comment; byte ip1[]; byte ip2[]; @@ -754,37 +754,36 @@ public class Blocklist { // We already shitlisted in shitlist(peer), that's good enough } + /** write directly to the stream so we don't OOM on a huge list */ public void renderStatusHTML(Writer out) throws IOException { - StringBuffer buf = new StringBuffer(1024); - buf.append("

          IP Blocklist

          "); + out.write("

          IP Blocklist

          "); Set singles = new TreeSet(); synchronized(_singleIPBlocklist) { singles.addAll(_singleIPBlocklist); } if (singles.size() > 0) { - buf.append(""); + out.write("
          Transient IPs
          "); for (Iterator iter = singles.iterator(); iter.hasNext(); ) { int ip = ((Integer) iter.next()).intValue(); - buf.append("\n"); + out.write("\n"); } - buf.append("
          Transient IPs
          ").append(toStr(ip)).append("
          "); out.write(toStr(ip)); out.write("
          "); + out.write(""); } if (_blocklistSize > 0) { - buf.append(""); + out.write("
          IPs from Blocklist File
          FromTo
          "); for (int i = 0; i < _blocklistSize; i++) { int from = getFrom(_blocklist[i]); - buf.append("\n"); - else - buf.append(" \n"); + if (to != from) { + out.write(toStr(to)); out.write("\n"); + } else + out.write(" \n"); } - buf.append("
          IPs from Blocklist File
          FromTo
          ").append(toStr(from)).append(""); + out.write("
          "); out.write(toStr(from)); out.write(""); int to = getTo(_blocklist[i]); - if (to != from) - buf.append(toStr(to)).append("
          "); + out.write(""); } else { - buf.append("
          No blocklist file entries"); + out.write("
          No blocklist file entries"); } - out.write(buf.toString()); out.flush(); } diff --git a/router/java/src/net/i2p/router/ClientManagerFacade.java b/router/java/src/net/i2p/router/ClientManagerFacade.java index 6dd1c8e21..2e441fefe 100644 --- a/router/java/src/net/i2p/router/ClientManagerFacade.java +++ b/router/java/src/net/i2p/router/ClientManagerFacade.java @@ -72,6 +72,7 @@ public abstract class ClientManagerFacade implements Service { public abstract void messageReceived(ClientMessage msg); public boolean verifyClientLiveliness() { return true; } + public boolean isAlive() { return true; } /** * Does the client specified want their leaseSet published? */ @@ -92,29 +93,3 @@ public abstract class ClientManagerFacade implements Service { public abstract SessionConfig getClientSessionConfig(Destination dest); public void renderStatusHTML(Writer out) throws IOException { } } - -class DummyClientManagerFacade extends ClientManagerFacade { - private RouterContext _context; - public DummyClientManagerFacade(RouterContext ctx) { - _context = ctx; - } - public boolean isLocal(Hash destHash) { return true; } - public boolean isLocal(Destination dest) { return true; } - public void reportAbuse(Destination dest, String reason, int severity) { } - public void messageReceived(ClientMessage msg) {} - public void requestLeaseSet(Destination dest, LeaseSet set, long timeout, - Job onCreateJob, Job onFailedJob) { - _context.jobQueue().addJob(onFailedJob); - } - public void startup() {} - public void stopAcceptingClients() { } - public void shutdown() {} - public void restart() {} - - public void messageDeliveryStatusUpdate(Destination fromDest, MessageId id, boolean delivered) {} - - public SessionConfig getClientSessionConfig(Destination _dest) { return null; } - - public void requestLeaseSet(Hash dest, LeaseSet set) {} - -} diff --git a/router/java/src/net/i2p/router/ClientMessage.java b/router/java/src/net/i2p/router/ClientMessage.java index 005f69a2d..ec7820d69 100644 --- a/router/java/src/net/i2p/router/ClientMessage.java +++ b/router/java/src/net/i2p/router/ClientMessage.java @@ -27,6 +27,7 @@ public class ClientMessage { private SessionConfig _senderConfig; private Hash _destinationHash; private MessageId _messageId; + private long _expiration; public ClientMessage() { setPayload(null); @@ -36,6 +37,7 @@ public class ClientMessage { setSenderConfig(null); setDestinationHash(null); setMessageId(null); + setExpiration(0); } /** @@ -91,4 +93,12 @@ public class ClientMessage { */ public SessionConfig getSenderConfig() { return _senderConfig; } public void setSenderConfig(SessionConfig config) { _senderConfig = config; } + + /** + * Expiration requested by the client that sent the message. This will only be available + * for locally originated messages. + * + */ + public long getExpiration() { return _expiration; } + public void setExpiration(long e) { _expiration = e; } } diff --git a/router/java/src/net/i2p/router/CommSystemFacade.java b/router/java/src/net/i2p/router/CommSystemFacade.java index fc354e384..6d0927c63 100644 --- a/router/java/src/net/i2p/router/CommSystemFacade.java +++ b/router/java/src/net/i2p/router/CommSystemFacade.java @@ -34,6 +34,7 @@ public abstract class CommSystemFacade implements Service { public int countActivePeers() { return 0; } public int countActiveSendPeers() { return 0; } + public boolean haveCapacity() { return true; } public List getMostRecentErrorMessages() { return Collections.EMPTY_LIST; } /** @@ -91,9 +92,11 @@ public abstract class CommSystemFacade implements Service { } +/** unused class DummyCommSystemFacade extends CommSystemFacade { public void shutdown() {} public void startup() {} public void restart() {} public void processMessage(OutNetMessage msg) { } } +**/ diff --git a/router/java/src/net/i2p/router/DummyClientManagerFacade.java b/router/java/src/net/i2p/router/DummyClientManagerFacade.java new file mode 100644 index 000000000..61e312875 --- /dev/null +++ b/router/java/src/net/i2p/router/DummyClientManagerFacade.java @@ -0,0 +1,47 @@ +package net.i2p.router; +/* + * free (adj.): unencumbered; not under the control of others + * Written by jrandom in 2003 and released into the public domain + * with no warranty of any kind, either expressed or implied. + * It probably won't make your computer catch on fire, or eat + * your children, but it might. Use at your own risk. + * + */ + +import net.i2p.data.Destination; +import net.i2p.data.Hash; +import net.i2p.data.LeaseSet; +import net.i2p.data.i2cp.MessageId; +import net.i2p.data.i2cp.SessionConfig; + +/** + * Manage all interactions with clients + * + * @author jrandom + */ +public class DummyClientManagerFacade extends ClientManagerFacade { + private RouterContext _context; + public DummyClientManagerFacade(RouterContext ctx) { + _context = ctx; + } + public boolean isLocal(Hash destHash) { return true; } + public boolean isLocal(Destination dest) { return true; } + public void reportAbuse(Destination dest, String reason, int severity) { } + public void messageReceived(ClientMessage msg) {} + public void requestLeaseSet(Destination dest, LeaseSet set, long timeout, + Job onCreateJob, Job onFailedJob) { + _context.jobQueue().addJob(onFailedJob); + } + public void startup() {} + public void stopAcceptingClients() { } + public void shutdown() {} + public void restart() {} + + public void messageDeliveryStatusUpdate(Destination fromDest, MessageId id, boolean delivered) {} + + public SessionConfig getClientSessionConfig(Destination _dest) { return null; } + + public void requestLeaseSet(Hash dest, LeaseSet set) {} + +} + diff --git a/router/java/src/net/i2p/router/DummyPeerManagerFacade.java b/router/java/src/net/i2p/router/DummyPeerManagerFacade.java new file mode 100644 index 000000000..221d0b2f1 --- /dev/null +++ b/router/java/src/net/i2p/router/DummyPeerManagerFacade.java @@ -0,0 +1,32 @@ +package net.i2p.router; +/* + * free (adj.): unencumbered; not under the control of others + * Written by jrandom in 2003 and released into the public domain + * with no warranty of any kind, either expressed or implied. + * It probably won't make your computer catch on fire, or eat + * your children, but it might. Use at your own risk. + * + */ + +import java.io.Writer; +import java.util.List; + +import net.i2p.data.Hash; + +/** + * Manage peer references and keep them up to date so that when asked for peers, + * it can provide appropriate peers according to the criteria provided. This + * includes periodically queueing up outbound messages to the peers to test them. + * + */ +class DummyPeerManagerFacade implements PeerManagerFacade { + public void shutdown() {} + public void startup() {} + public void restart() {} + public void renderStatusHTML(Writer out) { } + public List selectPeers(PeerSelectionCriteria criteria) { return null; } + public List getPeersByCapability(char capability) { return null; } + public void setCapabilities(Hash peer, String caps) {} + public void removeCapabilities(Hash peer) {} + public Hash selectRandomByCapability(char capability) { return null; } +} diff --git a/router/java/src/net/i2p/router/DummyTunnelManagerFacade.java b/router/java/src/net/i2p/router/DummyTunnelManagerFacade.java new file mode 100644 index 000000000..f7f204857 --- /dev/null +++ b/router/java/src/net/i2p/router/DummyTunnelManagerFacade.java @@ -0,0 +1,52 @@ +package net.i2p.router; +/* + * free (adj.): unencumbered; not under the control of others + * Written by jrandom in 2003 and released into the public domain + * with no warranty of any kind, either expressed or implied. + * It probably won't make your computer catch on fire, or eat + * your children, but it might. Use at your own risk. + * + */ + +import java.io.IOException; +import java.io.Writer; + +import net.i2p.data.Destination; +import net.i2p.data.Hash; +import net.i2p.data.TunnelId; + +/** + * Build and maintain tunnels throughout the network. + * + */ +class DummyTunnelManagerFacade implements TunnelManagerFacade { + + public TunnelInfo getTunnelInfo(TunnelId id) { return null; } + public TunnelInfo selectInboundTunnel() { return null; } + public TunnelInfo selectInboundTunnel(Hash destination) { return null; } + public TunnelInfo selectOutboundTunnel() { return null; } + public TunnelInfo selectOutboundTunnel(Hash destination) { return null; } + public boolean isInUse(Hash peer) { return false; } + public boolean isValidTunnel(Hash client, TunnelInfo tunnel) { return false; } + public int getParticipatingCount() { return 0; } + public int getFreeTunnelCount() { return 0; } + public int getOutboundTunnelCount() { return 0; } + public int getInboundClientTunnelCount() { return 0; } + public int getOutboundClientTunnelCount() { return 0; } + public long getLastParticipatingExpiration() { return -1; } + public void buildTunnels(Destination client, ClientTunnelSettings settings) {} + public TunnelPoolSettings getInboundSettings() { return null; } + public TunnelPoolSettings getOutboundSettings() { return null; } + public TunnelPoolSettings getInboundSettings(Hash client) { return null; } + public TunnelPoolSettings getOutboundSettings(Hash client) { return null; } + public void setInboundSettings(TunnelPoolSettings settings) {} + public void setOutboundSettings(TunnelPoolSettings settings) {} + public void setInboundSettings(Hash client, TunnelPoolSettings settings) {} + public void setOutboundSettings(Hash client, TunnelPoolSettings settings) {} + public int getInboundBuildQueueSize() { return 0; } + + public void renderStatusHTML(Writer out) throws IOException {} + public void restart() {} + public void shutdown() {} + public void startup() {} +} diff --git a/router/java/src/net/i2p/router/LoadTestManager.java b/router/java/src/net/i2p/router/LoadTestManager.java index 55adb6138..d3e5c46f5 100644 --- a/router/java/src/net/i2p/router/LoadTestManager.java +++ b/router/java/src/net/i2p/router/LoadTestManager.java @@ -117,12 +117,7 @@ public class LoadTestManager { private int getConcurrency() { if (!isEnabled(_context)) return 0; - int rv = CONCURRENT_PEERS; - try { - rv = Integer.parseInt(_context.getProperty("router.loadTestConcurrency", CONCURRENT_PEERS+"")); - } catch (NumberFormatException nfe) { - rv = CONCURRENT_PEERS; - } + int rv = _context.getProperty("router.loadTestConcurrency", CONCURRENT_PEERS); if (rv < 0) rv = 0; if (rv > 50) @@ -131,15 +126,7 @@ public class LoadTestManager { } private int getPeerMessages() { - String msgsPerPeer = _context.getProperty("router.loadTestMessagesPerPeer"); - int rv = CONCURRENT_MESSAGES; - if (msgsPerPeer != null) { - try { - rv = Integer.parseInt(msgsPerPeer); - } catch (NumberFormatException nfe) { - rv = CONCURRENT_MESSAGES; - } - } + int rv = _context.getProperty("router.loadTestMessagesPerPeer", CONCURRENT_MESSAGES); if (rv < 1) rv = 1; if (rv > 50) diff --git a/router/java/src/net/i2p/router/NetworkDatabaseFacade.java b/router/java/src/net/i2p/router/NetworkDatabaseFacade.java index ed4fe1555..e4a5ce08b 100644 --- a/router/java/src/net/i2p/router/NetworkDatabaseFacade.java +++ b/router/java/src/net/i2p/router/NetworkDatabaseFacade.java @@ -62,4 +62,5 @@ public abstract class NetworkDatabaseFacade implements Service { public int getKnownRouters() { return 0; } public int getKnownLeaseSets() { return 0; } public void renderRouterInfoHTML(Writer out, String s) throws IOException {} + public void renderStatusHTML(Writer out, boolean b) throws IOException {} } diff --git a/router/java/src/net/i2p/router/PeerManagerFacade.java b/router/java/src/net/i2p/router/PeerManagerFacade.java index bebe1a927..791b77616 100644 --- a/router/java/src/net/i2p/router/PeerManagerFacade.java +++ b/router/java/src/net/i2p/router/PeerManagerFacade.java @@ -32,15 +32,3 @@ public interface PeerManagerFacade extends Service { public void removeCapabilities(Hash peer); public Hash selectRandomByCapability(char capability); } - -class DummyPeerManagerFacade implements PeerManagerFacade { - public void shutdown() {} - public void startup() {} - public void restart() {} - public void renderStatusHTML(Writer out) { } - public List selectPeers(PeerSelectionCriteria criteria) { return null; } - public List getPeersByCapability(char capability) { return null; } - public void setCapabilities(Hash peer, String caps) {} - public void removeCapabilities(Hash peer) {} - public Hash selectRandomByCapability(char capability) { return null; } -} diff --git a/router/java/src/net/i2p/router/PersistentKeyRing.java b/router/java/src/net/i2p/router/PersistentKeyRing.java new file mode 100644 index 000000000..d02275ea2 --- /dev/null +++ b/router/java/src/net/i2p/router/PersistentKeyRing.java @@ -0,0 +1,103 @@ +package net.i2p.router; + +import java.io.IOException; +import java.io.Writer; + +import java.util.Iterator; +import java.util.Map; +import java.util.TreeMap; + +import net.i2p.data.Base64; +import net.i2p.data.DataFormatException; +import net.i2p.data.Destination; +import net.i2p.data.Hash; +import net.i2p.data.LeaseSet; +import net.i2p.data.SessionKey; +import net.i2p.router.TunnelPoolSettings; +import net.i2p.util.KeyRing; + +/** + * ConcurrentHashMap with backing in the router.config file. + * router.keyring.key.{base64 hash, with = replaced with $}={base64 session key} + * Caution - not all HashMap methods are overridden. + */ +public class PersistentKeyRing extends KeyRing { + private RouterContext _ctx; + private static final String PROP_PFX = "router.keyring.key."; + + public PersistentKeyRing(RouterContext ctx) { + super(); + _ctx = ctx; + addFromProperties(); + } + + public SessionKey put(Hash h, SessionKey sk) { + SessionKey old = super.put(h, sk); + if (!sk.equals(old)) { + _ctx.router().setConfigSetting(PROP_PFX + h.toBase64().replace("=", "$"), + sk.toBase64()); + _ctx.router().saveConfig(); + } + return old; + } + + public SessionKey remove(Hash h) { + _ctx.router().removeConfigSetting(PROP_PFX + h.toBase64().replace("=", "$")); + _ctx.router().saveConfig(); + return super.remove(h); + } + + private void addFromProperties() { + for (Iterator iter = _ctx.getPropertyNames().iterator(); iter.hasNext(); ) { + String prop = (String) iter.next(); + if (!prop.startsWith(PROP_PFX)) + continue; + String key = _ctx.getProperty(prop); + if (key == null || key.length() != 44) + continue; + String hb = prop.substring(PROP_PFX.length()); + hb.replace("$", "="); + Hash dest = new Hash(); + SessionKey sk = new SessionKey(); + try { + dest.fromBase64(hb); + sk.fromBase64(key); + super.put(dest, sk); + } catch (DataFormatException dfe) { continue; } + } + } + + public void renderStatusHTML(Writer out) throws IOException { + StringBuffer buf = new StringBuffer(1024); + buf.append("\n"); + for (Entry e : entrySet()) { + buf.append("\n
          Destination HashName or Dest.Session Key
          "); + Hash h = e.getKey(); + buf.append(h.toBase64().substring(0, 6)).append("..."); + buf.append(""); + LeaseSet ls = _ctx.netDb().lookupLeaseSetLocally(h); + if (ls != null) { + Destination dest = ls.getDestination(); + if (_ctx.clientManager().isLocal(dest)) { + TunnelPoolSettings in = _ctx.tunnelManager().getInboundSettings(h); + if (in != null && in.getDestinationNickname() != null) + buf.append(in.getDestinationNickname()); + else + buf.append(dest.toBase64().substring(0, 6)).append("..."); + } else { + String host = _ctx.namingService().reverseLookup(dest); + if (host != null) + buf.append(host); + else + buf.append(dest.toBase64().substring(0, 6)).append("..."); + } + } + buf.append(""); + SessionKey sk = e.getValue(); + buf.append(sk.toBase64()); + } + buf.append("\n
          \n"); + out.write(buf.toString()); + out.flush(); + } +} diff --git a/router/java/src/net/i2p/router/Router.java b/router/java/src/net/i2p/router/Router.java index 40ecc31be..13e801458 100644 --- a/router/java/src/net/i2p/router/Router.java +++ b/router/java/src/net/i2p/router/Router.java @@ -43,6 +43,7 @@ import net.i2p.stat.StatManager; import net.i2p.util.FileUtil; import net.i2p.util.I2PThread; import net.i2p.util.Log; +import net.i2p.util.SimpleScheduler; import net.i2p.util.SimpleTimer; /** @@ -64,7 +65,6 @@ public class Router { private I2PThread.OOMEventListener _oomListener; private ShutdownHook _shutdownHook; private I2PThread _gracefulShutdownDetector; - private Set _shutdownTasks; public final static String PROP_CONFIG_FILE = "router.configLocation"; @@ -170,7 +170,6 @@ public class Router { watchdog.setDaemon(true); watchdog.start(); - _shutdownTasks = new HashSet(0); } /** @@ -257,7 +256,7 @@ public class Router { _context.inNetMessagePool().startup(); startupQueue(); //_context.jobQueue().addJob(new CoalesceStatsJob(_context)); - SimpleTimer.getInstance().addEvent(new CoalesceStatsEvent(_context), 0); + SimpleScheduler.getInstance().addPeriodicEvent(new CoalesceStatsEvent(_context), 20*1000); _context.jobQueue().addJob(new UpdateRoutingKeyModifierJob(_context)); warmupCrypto(); _sessionKeyPersistenceHelper.startup(); @@ -346,7 +345,7 @@ public class Router { if (blockingRebuild) r.timeReached(); else - SimpleTimer.getInstance().addEvent(r, 0); + SimpleScheduler.getInstance().addEvent(r, 0); } catch (DataFormatException dfe) { _log.log(Log.CRIT, "Internal error - unable to sign our own address?!", dfe); } @@ -445,13 +444,14 @@ public class Router { */ private static final String _rebuildFiles[] = new String[] { "router.info", "router.keys", - "netDb/my.info", - "connectionTag.keys", + "netDb/my.info", // no longer used + "connectionTag.keys", // never used? "keyBackup/privateEncryption.key", "keyBackup/privateSigning.key", "keyBackup/publicEncryption.key", "keyBackup/publicSigning.key", - "sessionKeys.dat" }; + "sessionKeys.dat" // no longer used + }; static final String IDENTLOG = "identlog.txt"; public static void killKeys() { @@ -489,13 +489,12 @@ public class Router { */ public void rebuildNewIdentity() { killKeys(); - try { - for (Iterator iter = _shutdownTasks.iterator(); iter.hasNext(); ) { - Runnable task = (Runnable)iter.next(); + for (Runnable task : _context.getShutdownTasks()) { + try { task.run(); + } catch (Throwable t) { + _log.log(Log.CRIT, "Error running shutdown task", t); } - } catch (Throwable t) { - _log.log(Log.CRIT, "Error running shutdown task", t); } // hard and ugly finalShutdown(EXIT_HARD_RESTART); @@ -780,12 +779,6 @@ public class Router { buf.setLength(0); } - public void addShutdownTask(Runnable task) { - synchronized (_shutdownTasks) { - _shutdownTasks.add(task); - } - } - public static final int EXIT_GRACEFUL = 2; public static final int EXIT_HARD = 3; public static final int EXIT_OOM = 10; @@ -798,13 +791,12 @@ public class Router { I2PThread.removeOOMEventListener(_oomListener); // Run the shutdown hooks first in case they want to send some goodbye messages // Maybe we need a delay after this too? - try { - for (Iterator iter = _shutdownTasks.iterator(); iter.hasNext(); ) { - Runnable task = (Runnable)iter.next(); + for (Runnable task : _context.getShutdownTasks()) { + try { task.run(); + } catch (Throwable t) { + _log.log(Log.CRIT, "Error running shutdown task", t); } - } catch (Throwable t) { - _log.log(Log.CRIT, "Error running shutdown task", t); } try { _context.clientManager().shutdown(); } catch (Throwable t) { _log.log(Log.CRIT, "Error shutting down the client manager", t); } try { _context.jobQueue().shutdown(); } catch (Throwable t) { _log.log(Log.CRIT, "Error shutting down the job queue", t); } @@ -858,6 +850,10 @@ public class Router { public void shutdownGracefully() { shutdownGracefully(EXIT_GRACEFUL); } + /** + * Call this with EXIT_HARD or EXIT_HARD_RESTART for a non-blocking, + * hard, non-graceful shutdown with a brief delay to allow a UI response + */ public void shutdownGracefully(int exitCode) { _gracefulExitCode = exitCode; _config.setProperty(PROP_SHUTDOWN_IN_PROGRESS, "true"); @@ -886,7 +882,9 @@ public class Router { } /** How long until the graceful shutdown will kill us? */ public long getShutdownTimeRemaining() { - if (_gracefulExitCode <= 0) return -1; + if (_gracefulExitCode <= 0) return -1; // maybe Long.MAX_VALUE would be better? + if (_gracefulExitCode == EXIT_HARD || _gracefulExitCode == EXIT_HARD_RESTART) + return 0; long exp = _context.tunnelManager().getLastParticipatingExpiration(); if (exp < 0) return -1; @@ -905,9 +903,20 @@ public class Router { while (true) { boolean shutdown = (null != _config.getProperty(PROP_SHUTDOWN_IN_PROGRESS)); if (shutdown) { - if (_context.tunnelManager().getParticipatingCount() <= 0) { - if (_log.shouldLog(Log.CRIT)) + if (_gracefulExitCode == EXIT_HARD || _gracefulExitCode == EXIT_HARD_RESTART || + _context.tunnelManager().getParticipatingCount() <= 0) { + if (_gracefulExitCode == EXIT_HARD) + _log.log(Log.CRIT, "Shutting down after a brief delay"); + else if (_gracefulExitCode == EXIT_HARD_RESTART) + _log.log(Log.CRIT, "Restarting after a brief delay"); + else _log.log(Log.CRIT, "Graceful shutdown progress - no more tunnels, safe to die"); + // Allow time for a UI reponse + try { + synchronized (Thread.currentThread()) { + Thread.currentThread().wait(2*1000); + } + } catch (InterruptedException ie) {} shutdown(_gracefulExitCode); return; } else { @@ -1078,7 +1087,7 @@ public class Router { /** * What fraction of the bandwidth specified in our bandwidth limits should * we allow to be consumed by participating tunnels? - * @returns a number less than one, not a percentage! + * @return a number less than one, not a percentage! * */ public double getSharePercentage() { @@ -1215,6 +1224,8 @@ class CoalesceStatsEvent implements SimpleTimer.TimedEvent { ctx.statManager().createRateStat("router.activeSendPeers", "How many peers we've sent to this minute", "Throttle", new long[] { 60*1000, 5*60*1000, 60*60*1000 }); ctx.statManager().createRateStat("router.highCapacityPeers", "How many high capacity peers we know", "Throttle", new long[] { 5*60*1000, 60*60*1000 }); ctx.statManager().createRateStat("router.fastPeers", "How many fast peers we know", "Throttle", new long[] { 5*60*1000, 60*60*1000 }); + long max = Runtime.getRuntime().maxMemory() / (1024*1024); + ctx.statManager().createRateStat("router.memoryUsed", "(Bytes) Max is " + max + "MB", "Router", new long[] { 60*1000 }); } private RouterContext getContext() { return _ctx; } public void timeReached() { @@ -1233,6 +1244,9 @@ class CoalesceStatsEvent implements SimpleTimer.TimedEvent { getContext().statManager().addRateData("bw.sendRate", (long)getContext().bandwidthLimiter().getSendBps(), 0); getContext().statManager().addRateData("bw.recvRate", (long)getContext().bandwidthLimiter().getReceiveBps(), 0); + long used = Runtime.getRuntime().totalMemory() - Runtime.getRuntime().freeMemory(); + getContext().statManager().addRateData("router.memoryUsed", used, 0); + getContext().tunnelDispatcher().updateParticipatingStats(); getContext().statManager().coalesceStats(); @@ -1256,8 +1270,6 @@ class CoalesceStatsEvent implements SimpleTimer.TimedEvent { getContext().statManager().addRateData("bw.sendBps", (long)KBps, 60*1000); } } - - SimpleTimer.getInstance().addEvent(this, 20*1000); } } diff --git a/router/java/src/net/i2p/router/RouterContext.java b/router/java/src/net/i2p/router/RouterContext.java index 8b34f6364..517a5ba35 100644 --- a/router/java/src/net/i2p/router/RouterContext.java +++ b/router/java/src/net/i2p/router/RouterContext.java @@ -26,6 +26,7 @@ import net.i2p.router.transport.VMCommSystem; import net.i2p.router.tunnel.TunnelDispatcher; import net.i2p.router.tunnel.pool.TunnelPoolManager; import net.i2p.util.Clock; +import net.i2p.util.KeyRing; /** * Build off the core I2P context to provide a root for a router instance to @@ -329,6 +330,23 @@ public class RouterContext extends I2PAppContext { return super.getProperty(propName, defaultVal); } + /** + * Return an int with an int default + */ + public int getProperty(String propName, int defaultVal) { + if (_router != null) { + String val = _router.getConfigSetting(propName); + if (val != null) { + int ival = defaultVal; + try { + ival = Integer.parseInt(val); + } catch (NumberFormatException nfe) {} + return ival; + } + } + return super.getProperty(propName, defaultVal); + } + /** * The context's synchronized clock, which is kept context specific only to * enable simulators to play with clock skew among different instances. @@ -349,4 +367,21 @@ public class RouterContext extends I2PAppContext { } } + /** override to support storage in router.config */ + @Override + public KeyRing keyRing() { + if (!_keyRingInitialized) + initializeKeyRing(); + return _keyRing; + } + + @Override + protected void initializeKeyRing() { + synchronized (this) { + if (_keyRing == null) + _keyRing = new PersistentKeyRing(this); + _keyRingInitialized = true; + } + } + } diff --git a/router/java/src/net/i2p/router/RouterThrottleImpl.java b/router/java/src/net/i2p/router/RouterThrottleImpl.java index 2c3f2114e..fc38b1695 100644 --- a/router/java/src/net/i2p/router/RouterThrottleImpl.java +++ b/router/java/src/net/i2p/router/RouterThrottleImpl.java @@ -190,14 +190,7 @@ class RouterThrottleImpl implements RouterThrottle { } } - int max = DEFAULT_MAX_TUNNELS; - String maxTunnels = _context.getProperty(PROP_MAX_TUNNELS); - if (maxTunnels != null) { - try { - max = Integer.parseInt(maxTunnels); - } catch (NumberFormatException nfe) { - } - } + int max = _context.getProperty(PROP_MAX_TUNNELS, DEFAULT_MAX_TUNNELS); if (numTunnels >= max) { if (_log.shouldLog(Log.WARN)) _log.warn("Refusing tunnel request since we are already participating in " @@ -387,11 +380,7 @@ class RouterThrottleImpl implements RouterThrottle { /** dont ever probabalistically throttle tunnels if we have less than this many */ private int getMinThrottleTunnels() { - try { - return Integer.parseInt(_context.getProperty("router.minThrottleTunnels", "1000")); - } catch (NumberFormatException nfe) { - return 1000; - } + return _context.getProperty("router.minThrottleTunnels", 1000); } private double getTunnelGrowthFactor() { diff --git a/router/java/src/net/i2p/router/RouterVersion.java b/router/java/src/net/i2p/router/RouterVersion.java index 57680063e..450adcf28 100644 --- a/router/java/src/net/i2p/router/RouterVersion.java +++ b/router/java/src/net/i2p/router/RouterVersion.java @@ -16,8 +16,8 @@ import net.i2p.CoreVersion; */ public class RouterVersion { public final static String ID = "$Revision: 1.548 $ $Date: 2008-06-07 23:00:00 $"; - public final static String VERSION = "0.6.5"; - public final static long BUILD = 4; + public final static String VERSION = CoreVersion.VERSION; + public final static long BUILD = 0; public static void main(String args[]) { System.out.println("I2P Router version: " + VERSION + "-" + BUILD); System.out.println("Router ID: " + RouterVersion.ID); diff --git a/router/java/src/net/i2p/router/Shitlist.java b/router/java/src/net/i2p/router/Shitlist.java index 5868a15b4..29d384de9 100644 --- a/router/java/src/net/i2p/router/Shitlist.java +++ b/router/java/src/net/i2p/router/Shitlist.java @@ -10,6 +10,7 @@ package net.i2p.router; import java.io.IOException; import java.io.Writer; +import java.util.concurrent.ConcurrentHashMap; import java.util.ArrayList; import java.util.Comparator; import java.util.HashMap; @@ -23,6 +24,7 @@ import java.util.TreeMap; import net.i2p.data.DataHelper; import net.i2p.data.Hash; import net.i2p.router.peermanager.PeerProfile; +import net.i2p.util.ConcurrentHashSet; import net.i2p.util.Log; /** @@ -34,53 +36,51 @@ import net.i2p.util.Log; public class Shitlist { private Log _log; private RouterContext _context; - private Map _entries; + private Map _entries; - private class Entry { + private static class Entry { /** when it should expire, per the i2p clock */ long expireOn; /** why they were shitlisted */ String cause; /** what transports they were shitlisted for (String), or null for all transports */ - Set transports; + Set transports; } - public final static long SHITLIST_DURATION_MS = 40*60*1000; // 40 minute shitlist - public final static long SHITLIST_DURATION_MAX = 60*60*1000; - public final static long SHITLIST_DURATION_PARTIAL = 20*60*1000; + public final static long SHITLIST_DURATION_MS = 20*60*1000; + public final static long SHITLIST_DURATION_MAX = 30*60*1000; + public final static long SHITLIST_DURATION_PARTIAL = 10*60*1000; public final static long SHITLIST_DURATION_FOREVER = 181l*24*60*60*1000; // will get rounded down to 180d on console public final static long SHITLIST_CLEANER_START_DELAY = SHITLIST_DURATION_PARTIAL; public Shitlist(RouterContext context) { _context = context; _log = context.logManager().getLog(Shitlist.class); - _entries = new HashMap(32); + _entries = new ConcurrentHashMap(8); _context.jobQueue().addJob(new Cleanup(_context)); } private class Cleanup extends JobImpl { - private List _toUnshitlist; + private List _toUnshitlist; public Cleanup(RouterContext ctx) { super(ctx); _toUnshitlist = new ArrayList(4); - getTiming().setStartAfter(_context.clock().now() + SHITLIST_CLEANER_START_DELAY); + getTiming().setStartAfter(ctx.clock().now() + SHITLIST_CLEANER_START_DELAY); } public String getName() { return "Cleanup shitlist"; } public void runJob() { _toUnshitlist.clear(); long now = getContext().clock().now(); - synchronized (_entries) { - for (Iterator iter = _entries.keySet().iterator(); iter.hasNext(); ) { - Hash peer = (Hash)iter.next(); - Entry entry = (Entry)_entries.get(peer); - if (entry.expireOn <= now) { + try { + for (Iterator iter = _entries.entrySet().iterator(); iter.hasNext(); ) { + Map.Entry e = (Map.Entry) iter.next(); + if (e.getValue().expireOn <= now) { iter.remove(); - _toUnshitlist.add(peer); + _toUnshitlist.add(e.getKey()); } } - } - for (int i = 0; i < _toUnshitlist.size(); i++) { - Hash peer = (Hash)_toUnshitlist.get(i); + } catch (IllegalStateException ise) {} // next time... + for (Hash peer : _toUnshitlist) { PeerProfile prof = _context.profileOrganizer().getProfile(peer); if (prof != null) prof.unshitlist(); @@ -94,9 +94,7 @@ public class Shitlist { } public int getRouterCount() { - synchronized (_entries) { - return _entries.size(); - } + return _entries.size(); } public boolean shitlistRouter(Hash peer) { @@ -143,12 +141,11 @@ public class Shitlist { e.cause = reason; e.transports = null; if (transport != null) { - e.transports = new HashSet(1); + e.transports = new ConcurrentHashSet(1); e.transports.add(transport); } - synchronized (_entries) { - Entry old = (Entry)_entries.get(peer); + Entry old = _entries.get(peer); if (old != null) { wasAlready = true; // take the oldest expiration and cause, combine transports @@ -166,7 +163,6 @@ public class Shitlist { } } _entries.put(peer, e); - } if (transport == null) { // we hate the peer on *any* transport @@ -190,20 +186,19 @@ public class Shitlist { _log.debug("Calling unshitlistRouter " + peer.toBase64() + (transport != null ? "/" + transport : "")); boolean fully = false; - Entry e; - synchronized (_entries) { - e = (Entry)_entries.remove(peer); - if ( (e == null) || (e.transports == null) || (transport == null) || (e.transports.size() <= 1) ) { - // fully unshitlisted + + Entry e = _entries.remove(peer); + if ( (e == null) || (e.transports == null) || (transport == null) || (e.transports.size() <= 1) ) { + // fully unshitlisted + fully = true; + } else { + e.transports.remove(transport); + if (e.transports.size() <= 0) fully = true; - } else { - e.transports.remove(transport); - if (e.transports.size() <= 0) - fully = true; - else - _entries.put(peer, e); - } + else + _entries.put(peer, e); } + if (fully) { if (realUnshitlist) { PeerProfile prof = _context.profileOrganizer().getProfile(peer); @@ -221,25 +216,18 @@ public class Shitlist { public boolean isShitlisted(Hash peer, String transport) { boolean rv = false; boolean unshitlist = false; - synchronized (_entries) { - Entry entry = (Entry)_entries.get(peer); - if (entry == null) { - rv = false; - } else { - if (entry.expireOn <= _context.clock().now()) { - _entries.remove(peer); - unshitlist = true; - rv = false; - } else { - if (entry.transports == null) { - rv = true; - } else if (entry.transports.contains(transport)) { - rv = true; - } else { - rv = false; - } - } - } + + Entry entry = _entries.get(peer); + if (entry == null) { + rv = false; + } else if (entry.expireOn <= _context.clock().now()) { + _entries.remove(peer); + unshitlist = true; + rv = false; + } else if (entry.transports == null) { + rv = true; + } else { + rv = entry.transports.contains(transport); } if (unshitlist) { @@ -255,10 +243,7 @@ public class Shitlist { } public boolean isShitlistedForever(Hash peer) { - Entry entry; - synchronized (_entries) { - entry = (Entry)_entries.get(peer); - } + Entry entry = _entries.get(peer); return entry != null && entry.expireOn > _context.clock().now() + SHITLIST_DURATION_MAX; } @@ -271,17 +256,15 @@ public class Shitlist { public void renderStatusHTML(Writer out) throws IOException { StringBuffer buf = new StringBuffer(1024); buf.append("

          Shitlist

          "); - Map entries = new TreeMap(new HashComparator()); + Map entries = new TreeMap(new HashComparator()); - synchronized (_entries) { - entries.putAll(_entries); - } + entries.putAll(_entries); + buf.append("
            "); - for (Iterator iter = entries.entrySet().iterator(); iter.hasNext(); ) { - Map.Entry mentry = (Map.Entry)iter.next(); - Hash key = (Hash)mentry.getKey(); - Entry entry = (Entry)mentry.getValue(); + for (Map.Entry e : entries.entrySet()) { + Hash key = e.getKey(); + Entry entry = e.getValue(); buf.append("
          • ").append(key.toBase64()).append(""); buf.append(" (netdb)"); buf.append(" expiring in "); diff --git a/router/java/src/net/i2p/router/TunnelManagerFacade.java b/router/java/src/net/i2p/router/TunnelManagerFacade.java index be0b7e441..a6c1c9614 100644 --- a/router/java/src/net/i2p/router/TunnelManagerFacade.java +++ b/router/java/src/net/i2p/router/TunnelManagerFacade.java @@ -79,35 +79,3 @@ public interface TunnelManagerFacade extends Service { public void setInboundSettings(Hash client, TunnelPoolSettings settings); public void setOutboundSettings(Hash client, TunnelPoolSettings settings); } - -class DummyTunnelManagerFacade implements TunnelManagerFacade { - - public TunnelInfo getTunnelInfo(TunnelId id) { return null; } - public TunnelInfo selectInboundTunnel() { return null; } - public TunnelInfo selectInboundTunnel(Hash destination) { return null; } - public TunnelInfo selectOutboundTunnel() { return null; } - public TunnelInfo selectOutboundTunnel(Hash destination) { return null; } - public boolean isInUse(Hash peer) { return false; } - public boolean isValidTunnel(Hash client, TunnelInfo tunnel) { return false; } - public int getParticipatingCount() { return 0; } - public int getFreeTunnelCount() { return 0; } - public int getOutboundTunnelCount() { return 0; } - public int getInboundClientTunnelCount() { return 0; } - public int getOutboundClientTunnelCount() { return 0; } - public long getLastParticipatingExpiration() { return -1; } - public void buildTunnels(Destination client, ClientTunnelSettings settings) {} - public TunnelPoolSettings getInboundSettings() { return null; } - public TunnelPoolSettings getOutboundSettings() { return null; } - public TunnelPoolSettings getInboundSettings(Hash client) { return null; } - public TunnelPoolSettings getOutboundSettings(Hash client) { return null; } - public void setInboundSettings(TunnelPoolSettings settings) {} - public void setOutboundSettings(TunnelPoolSettings settings) {} - public void setInboundSettings(Hash client, TunnelPoolSettings settings) {} - public void setOutboundSettings(Hash client, TunnelPoolSettings settings) {} - public int getInboundBuildQueueSize() { return 0; } - - public void renderStatusHTML(Writer out) throws IOException {} - public void restart() {} - public void shutdown() {} - public void startup() {} -} diff --git a/router/java/src/net/i2p/router/client/ClientConnectionRunner.java b/router/java/src/net/i2p/router/client/ClientConnectionRunner.java index 544badcad..4f979c5c9 100644 --- a/router/java/src/net/i2p/router/client/ClientConnectionRunner.java +++ b/router/java/src/net/i2p/router/client/ClientConnectionRunner.java @@ -11,9 +11,9 @@ package net.i2p.router.client; import java.io.IOException; import java.io.OutputStream; import java.net.Socket; +import java.util.concurrent.ConcurrentHashMap; import java.util.ArrayList; import java.util.HashMap; -import java.util.HashSet; import java.util.List; import java.util.Map; import java.util.Set; @@ -29,14 +29,17 @@ import net.i2p.data.i2cp.I2CPMessageReader; import net.i2p.data.i2cp.MessageId; import net.i2p.data.i2cp.MessageStatusMessage; import net.i2p.data.i2cp.SendMessageMessage; +import net.i2p.data.i2cp.SendMessageExpiresMessage; import net.i2p.data.i2cp.SessionConfig; import net.i2p.data.i2cp.SessionId; import net.i2p.router.Job; import net.i2p.router.JobImpl; import net.i2p.router.RouterContext; +import net.i2p.util.ConcurrentHashSet; import net.i2p.util.I2PThread; import net.i2p.util.Log; import net.i2p.util.RandomSource; +import net.i2p.util.SimpleScheduler; import net.i2p.util.SimpleTimer; /** @@ -57,13 +60,13 @@ public class ClientConnectionRunner { /** user's config */ private SessionConfig _config; /** static mapping of MessageId to Payload, storing messages for retrieval */ - private Map _messages; + private Map _messages; /** lease set request state, or null if there is no request pending on at the moment */ private LeaseRequestState _leaseRequest; /** currently allocated leaseSet, or null if none is allocated */ private LeaseSet _currentLeaseSet; /** set of messageIds created but not yet ACCEPTED */ - private Set _acceptedPending; + private Set _acceptedPending; /** thingy that does stuff */ private I2CPMessageReader _reader; /** @@ -86,9 +89,9 @@ public class ClientConnectionRunner { _manager = manager; _socket = socket; _config = null; - _messages = new HashMap(); + _messages = new ConcurrentHashMap(); _alreadyProcessed = new ArrayList(); - _acceptedPending = new HashSet(); + _acceptedPending = new ConcurrentHashSet(); _dead = false; } @@ -104,7 +107,7 @@ public class ClientConnectionRunner { _reader = new I2CPMessageReader(_socket.getInputStream(), new ClientMessageEventListener(_context, this)); _writer = new ClientWriterRunner(_context, this); I2PThread t = new I2PThread(_writer); - t.setName("Writer " + ++__id); + t.setName("I2CP Writer " + ++__id); t.setDaemon(true); t.setPriority(I2PThread.MAX_PRIORITY); t.start(); @@ -126,9 +129,7 @@ public class ClientConnectionRunner { if (_reader != null) _reader.stopReading(); if (_writer != null) _writer.stopWriting(); if (_socket != null) try { _socket.close(); } catch (IOException ioe) { } - synchronized (_messages) { - _messages.clear(); - } + _messages.clear(); if (_manager != null) _manager.unregisterConnection(this); if (_currentLeaseSet != null) @@ -162,50 +163,18 @@ public class ClientConnectionRunner { } /** already closed? */ boolean isDead() { return _dead; } + /** message body */ Payload getPayload(MessageId id) { - Payload rv = null; - long beforeLock = _context.clock().now(); - long inLock = 0; - synchronized (_messages) { - inLock = _context.clock().now(); - rv = (Payload)_messages.get(id); - } - long afterLock = _context.clock().now(); - - if (afterLock - beforeLock > 50) { - _log.warn("alreadyAccepted.locking took too long: " + (afterLock-beforeLock) - + " overall, synchronized took " + (inLock - beforeLock)); - } - return rv; + return _messages.get(id); } + void setPayload(MessageId id, Payload payload) { - long beforeLock = _context.clock().now(); - long inLock = 0; - synchronized (_messages) { - inLock = _context.clock().now(); - _messages.put(id, payload); - } - long afterLock = _context.clock().now(); - - if (afterLock - beforeLock > 50) { - _log.warn("setPayload.locking took too long: " + (afterLock-beforeLock) - + " overall, synchronized took " + (inLock - beforeLock)); - } + _messages.put(id, payload); } + void removePayload(MessageId id) { - long beforeLock = _context.clock().now(); - long inLock = 0; - synchronized (_messages) { - inLock = _context.clock().now(); - _messages.remove(id); - } - long afterLock = _context.clock().now(); - - if (afterLock - beforeLock > 50) { - _log.warn("removePayload.locking took too long: " + (afterLock-beforeLock) - + " overall, synchronized took " + (inLock - beforeLock)); - } + _messages.remove(id); } void sessionEstablished(SessionConfig config) { @@ -270,19 +239,11 @@ public class ClientConnectionRunner { Destination dest = message.getDestination(); MessageId id = new MessageId(); id.setMessageId(getNextMessageId()); - long beforeLock = _context.clock().now(); - long inLock = 0; - synchronized (_acceptedPending) { - inLock = _context.clock().now(); - _acceptedPending.add(id); - } - long afterLock = _context.clock().now(); - - if (_log.shouldLog(Log.DEBUG)) { - _log.warn("distributeMessage.locking took: " + (afterLock-beforeLock) - + " overall, synchronized took " + (inLock - beforeLock)); - } - + long expiration = 0; + if (message instanceof SendMessageExpiresMessage) + expiration = ((SendMessageExpiresMessage) message).getExpiration().getTime(); + _acceptedPending.add(id); + if (_log.shouldLog(Log.DEBUG)) _log.debug("** Receiving message [" + id.getMessageId() + "] with payload of size [" + payload.getSize() + "]" + " for session [" + _sessionId.getSessionId() @@ -291,7 +252,7 @@ public class ClientConnectionRunner { // the following blocks as described above SessionConfig cfg = _config; if (cfg != null) - _manager.distributeMessage(cfg.getDestination(), dest, payload, id); + _manager.distributeMessage(cfg.getDestination(), dest, payload, id, expiration); long timeToDistribute = _context.clock().now() - beforeDistribute; if (_log.shouldLog(Log.DEBUG)) _log.warn("Time to distribute in the manager to " @@ -319,18 +280,7 @@ public class ClientConnectionRunner { status.setStatus(MessageStatusMessage.STATUS_SEND_ACCEPTED); try { doSend(status); - long beforeLock = _context.clock().now(); - long inLock = 0; - synchronized (_acceptedPending) { - inLock = _context.clock().now(); - _acceptedPending.remove(id); - } - long afterLock = _context.clock().now(); - - if (afterLock - beforeLock > 50) { - _log.warn("ackSendMessage.locking took too long: " + (afterLock-beforeLock) - + " overall, synchronized took " + (inLock - beforeLock)); - } + _acceptedPending.remove(id); } catch (I2CPMessageException ime) { _log.error("Error writing out the message status message: " + ime); } @@ -415,7 +365,7 @@ public class ClientConnectionRunner { // theirs is newer } else { // ours is newer, so wait a few secs and retry - SimpleTimer.getInstance().addEvent(new Rerequest(set, expirationTime, onCreateJob, onFailedJob), 3*1000); + SimpleScheduler.getInstance().addEvent(new Rerequest(set, expirationTime, onCreateJob, onFailedJob), 3*1000); } // fire onCreated? return; // already requesting @@ -532,28 +482,7 @@ public class ClientConnectionRunner { */ private boolean alreadyAccepted(MessageId id) { if (_dead) return false; - boolean isPending = false; - int pending = 0; - String buf = null; - long beforeLock = _context.clock().now(); - long inLock = 0; - synchronized (_acceptedPending) { - inLock = _context.clock().now(); - if (_acceptedPending.contains(id)) - isPending = true; - pending = _acceptedPending.size(); - buf = _acceptedPending.toString(); - } - long afterLock = _context.clock().now(); - - if (afterLock - beforeLock > 50) { - _log.warn("alreadyAccepted.locking took too long: " + (afterLock-beforeLock) - + " overall, synchronized took " + (inLock - beforeLock)); - } - if (pending >= 1) { - _log.warn("Pending acks: " + pending + ": " + buf); - } - return !isPending; + return !_acceptedPending.contains(id); } /** diff --git a/router/java/src/net/i2p/router/client/ClientListenerRunner.java b/router/java/src/net/i2p/router/client/ClientListenerRunner.java index 074b161df..38105e9c9 100644 --- a/router/java/src/net/i2p/router/client/ClientListenerRunner.java +++ b/router/java/src/net/i2p/router/client/ClientListenerRunner.java @@ -31,7 +31,7 @@ public class ClientListenerRunner implements Runnable { private int _port; private boolean _bindAllInterfaces; private boolean _running; - private long _nextFailDelay = 1000; + private boolean _listening; public static final String BIND_ALL_INTERFACES = "i2cp.tcp.bindAllInterfaces"; @@ -41,6 +41,7 @@ public class ClientListenerRunner implements Runnable { _manager = manager; _port = port; _running = false; + _listening = false; String val = context.getProperty(BIND_ALL_INTERFACES, "False"); _bindAllInterfaces = Boolean.valueOf(val).booleanValue(); @@ -48,6 +49,7 @@ public class ClientListenerRunner implements Runnable { public void setPort(int port) { _port = port; } public int getPort() { return _port; } + public boolean isListening() { return _running && _listening; } /** * Start up the socket listener, listens for connections, and @@ -58,7 +60,7 @@ public class ClientListenerRunner implements Runnable { */ public void runServer() { _running = true; - int curDelay = 0; + int curDelay = 1000; while (_running) { try { if (_bindAllInterfaces) { @@ -77,7 +79,8 @@ public class ClientListenerRunner implements Runnable { if (_log.shouldLog(Log.DEBUG)) _log.debug("ServerSocket created, before accept: " + _socket); - curDelay = 0; + curDelay = 1000; + _listening = true; while (_running) { try { Socket socket = _socket.accept(); @@ -96,6 +99,7 @@ public class ClientListenerRunner implements Runnable { } catch (Throwable t) { if (_context.router().isAlive()) _log.error("Fatal error running client listener - killing the thread!", t); + _listening = false; return; } } @@ -104,6 +108,7 @@ public class ClientListenerRunner implements Runnable { _log.error("Error listening on port " + _port, ioe); } + _listening = false; if (_socket != null) { try { _socket.close(); } catch (IOException ioe) {} _socket = null; @@ -111,14 +116,16 @@ public class ClientListenerRunner implements Runnable { if (!_context.router().isAlive()) break; - _log.error("Error listening, waiting " + _nextFailDelay + "ms before we try again"); - try { Thread.sleep(_nextFailDelay); } catch (InterruptedException ie) {} - curDelay += _nextFailDelay; - _nextFailDelay *= 5; + if (curDelay < 60*1000) + _log.error("Error listening, waiting " + (curDelay/1000) + "s before we try again"); + else + _log.log(Log.CRIT, "I2CP error listening to port " + _port + " - is another I2P instance running? Resolve conflicts and restart"); + try { Thread.sleep(curDelay); } catch (InterruptedException ie) {} + curDelay = Math.min(curDelay*3, 60*1000); } if (_context.router().isAlive()) - _log.error("CANCELING I2CP LISTEN. delay = " + curDelay, new Exception("I2CP Listen cancelled!!!")); + _log.error("CANCELING I2CP LISTEN", new Exception("I2CP Listen cancelled!!!")); _running = false; } diff --git a/router/java/src/net/i2p/router/client/ClientManager.java b/router/java/src/net/i2p/router/client/ClientManager.java index d9838ef7b..9b5eb7c4c 100644 --- a/router/java/src/net/i2p/router/client/ClientManager.java +++ b/router/java/src/net/i2p/router/client/ClientManager.java @@ -108,6 +108,8 @@ public class ClientManager { } } + public boolean isAlive() { return _listener.isListening(); } + public void registerConnection(ClientConnectionRunner runner) { synchronized (_pendingRunners) { _pendingRunners.add(runner); @@ -140,7 +142,7 @@ public class ClientManager { } } - void distributeMessage(Destination fromDest, Destination toDest, Payload payload, MessageId msgId) { + void distributeMessage(Destination fromDest, Destination toDest, Payload payload, MessageId msgId, long expiration) { // check if there is a runner for it ClientConnectionRunner runner = getRunner(toDest); if (runner != null) { @@ -168,6 +170,7 @@ public class ClientManager { msg.setSenderConfig(runner.getConfig()); msg.setFromDestination(runner.getConfig().getDestination()); msg.setMessageId(msgId); + msg.setExpiration(expiration); _ctx.clientMessagePool().add(msg, true); } } diff --git a/router/java/src/net/i2p/router/client/ClientManagerFacadeImpl.java b/router/java/src/net/i2p/router/client/ClientManagerFacadeImpl.java index 796a098a9..51b8b4cb2 100644 --- a/router/java/src/net/i2p/router/client/ClientManagerFacadeImpl.java +++ b/router/java/src/net/i2p/router/client/ClientManagerFacadeImpl.java @@ -74,6 +74,8 @@ public class ClientManagerFacadeImpl extends ClientManagerFacade { startup(); } + public boolean isAlive() { return _manager != null && _manager.isAlive(); } + private static final long MAX_TIME_TO_REBUILD = 10*60*1000; public boolean verifyClientLiveliness() { if (_manager == null) return true; diff --git a/router/java/src/net/i2p/router/client/ClientMessageEventListener.java b/router/java/src/net/i2p/router/client/ClientMessageEventListener.java index d75e27fb4..0dcc81870 100644 --- a/router/java/src/net/i2p/router/client/ClientMessageEventListener.java +++ b/router/java/src/net/i2p/router/client/ClientMessageEventListener.java @@ -8,9 +8,12 @@ package net.i2p.router.client; * */ +import java.util.Properties; + import net.i2p.data.Payload; import net.i2p.data.i2cp.CreateLeaseSetMessage; import net.i2p.data.i2cp.CreateSessionMessage; +import net.i2p.data.i2cp.DestLookupMessage; import net.i2p.data.i2cp.DestroySessionMessage; import net.i2p.data.i2cp.GetDateMessage; import net.i2p.data.i2cp.I2CPMessage; @@ -20,10 +23,13 @@ import net.i2p.data.i2cp.MessageId; import net.i2p.data.i2cp.MessagePayloadMessage; import net.i2p.data.i2cp.ReceiveMessageBeginMessage; import net.i2p.data.i2cp.ReceiveMessageEndMessage; +import net.i2p.data.i2cp.ReconfigureSessionMessage; import net.i2p.data.i2cp.SendMessageMessage; +import net.i2p.data.i2cp.SendMessageExpiresMessage; import net.i2p.data.i2cp.SessionId; import net.i2p.data.i2cp.SessionStatusMessage; import net.i2p.data.i2cp.SetDateMessage; +import net.i2p.router.ClientTunnelSettings; import net.i2p.router.RouterContext; import net.i2p.util.Log; import net.i2p.util.RandomSource; @@ -66,6 +72,9 @@ class ClientMessageEventListener implements I2CPMessageReader.I2CPMessageEventLi case SendMessageMessage.MESSAGE_TYPE: handleSendMessage(reader, (SendMessageMessage)message); break; + case SendMessageExpiresMessage.MESSAGE_TYPE: + handleSendMessage(reader, (SendMessageExpiresMessage)message); + break; case ReceiveMessageBeginMessage.MESSAGE_TYPE: handleReceiveBegin(reader, (ReceiveMessageBeginMessage)message); break; @@ -78,6 +87,12 @@ class ClientMessageEventListener implements I2CPMessageReader.I2CPMessageEventLi case DestroySessionMessage.MESSAGE_TYPE: handleDestroySession(reader, (DestroySessionMessage)message); break; + case DestLookupMessage.MESSAGE_TYPE: + handleDestLookup(reader, (DestLookupMessage)message); + break; + case ReconfigureSessionMessage.MESSAGE_TYPE: + handleReconfigureSession(reader, (ReconfigureSessionMessage)message); + break; default: if (_log.shouldLog(Log.ERROR)) _log.error("Unhandled I2CP type received: " + message.getType()); @@ -85,13 +100,14 @@ class ClientMessageEventListener implements I2CPMessageReader.I2CPMessageEventLi } /** - * Handle notifiation that there was an error + * Handle notification that there was an error * */ public void readError(I2CPMessageReader reader, Exception error) { if (_runner.isDead()) return; if (_log.shouldLog(Log.ERROR)) _log.error("Error occurred", error); + // Is this is a little drastic for an unknown message type? _runner.stopRunning(); } @@ -128,24 +144,13 @@ class ClientMessageEventListener implements I2CPMessageReader.I2CPMessageEventLi return; } - SessionStatusMessage msg = new SessionStatusMessage(); SessionId sessionId = new SessionId(); sessionId.setSessionId(getNextSessionId()); _runner.setSessionId(sessionId); - msg.setSessionId(sessionId); - msg.setStatus(SessionStatusMessage.STATUS_CREATED); - try { - if (_log.shouldLog(Log.DEBUG)) - _log.debug("before sending sessionStatusMessage for " + message.getSessionConfig().getDestination().calculateHash().toBase64()); - _runner.doSend(msg); - if (_log.shouldLog(Log.DEBUG)) - _log.debug("after sending sessionStatusMessage for " + message.getSessionConfig().getDestination().calculateHash().toBase64()); - _runner.sessionEstablished(message.getSessionConfig()); - if (_log.shouldLog(Log.DEBUG)) - _log.debug("after sessionEstablished for " + message.getSessionConfig().getDestination().calculateHash().toBase64()); - } catch (I2CPMessageException ime) { - _log.error("Error writing out the session status message", ime); - } + sendStatusMessage(SessionStatusMessage.STATUS_CREATED); + _runner.sessionEstablished(message.getSessionConfig()); + if (_log.shouldLog(Log.DEBUG)) + _log.debug("after sessionEstablished for " + message.getSessionConfig().getDestination().calculateHash().toBase64()); _context.jobQueue().addJob(new CreateSessionJob(_context, _runner)); } @@ -228,6 +233,47 @@ class ClientMessageEventListener implements I2CPMessageReader.I2CPMessageEventLi _runner.leaseSetCreated(message.getLeaseSet()); } + private void handleDestLookup(I2CPMessageReader reader, DestLookupMessage message) { + _context.jobQueue().addJob(new LookupDestJob(_context, _runner, message.getHash())); + } + + /** + * Message's Session ID ignored. This doesn't support removing previously set options. + * Nor do we bother with message.getSessionConfig().verifySignature() ... should we? + * + */ + private void handleReconfigureSession(I2CPMessageReader reader, ReconfigureSessionMessage message) { + if (_log.shouldLog(Log.INFO)) + _log.info("Updating options - old: " + _runner.getConfig() + " new: " + message.getSessionConfig()); + if (!message.getSessionConfig().getDestination().equals(_runner.getConfig().getDestination())) { + _log.error("Dest mismatch"); + sendStatusMessage(SessionStatusMessage.STATUS_INVALID); + _runner.stopRunning(); + return; + } + _runner.getConfig().getOptions().putAll(message.getSessionConfig().getOptions()); + ClientTunnelSettings settings = new ClientTunnelSettings(); + Properties props = new Properties(); + props.putAll(_runner.getConfig().getOptions()); + settings.readFromProperties(props); + _context.tunnelManager().setInboundSettings(_runner.getConfig().getDestination().calculateHash(), + settings.getInboundSettings()); + _context.tunnelManager().setOutboundSettings(_runner.getConfig().getDestination().calculateHash(), + settings.getOutboundSettings()); + sendStatusMessage(SessionStatusMessage.STATUS_UPDATED); + } + + private void sendStatusMessage(int status) { + SessionStatusMessage msg = new SessionStatusMessage(); + msg.setSessionId(_runner.getSessionId()); + msg.setStatus(status); + try { + _runner.doSend(msg); + } catch (I2CPMessageException ime) { + _log.error("Error writing out the session status message", ime); + } + } + // this *should* be mod 65536, but UnsignedInteger is still b0rked. FIXME private final static int MAX_SESSION_ID = 32767; diff --git a/router/java/src/net/i2p/router/client/ClientWriterRunner.java b/router/java/src/net/i2p/router/client/ClientWriterRunner.java index bf1364877..49fcddcc2 100644 --- a/router/java/src/net/i2p/router/client/ClientWriterRunner.java +++ b/router/java/src/net/i2p/router/client/ClientWriterRunner.java @@ -1,9 +1,13 @@ package net.i2p.router.client; -import java.util.ArrayList; -import java.util.List; +import java.io.IOException; +import java.io.InputStream; +import java.util.concurrent.BlockingQueue; +import java.util.concurrent.LinkedBlockingQueue; import net.i2p.data.i2cp.I2CPMessage; +import net.i2p.data.i2cp.I2CPMessageImpl; +import net.i2p.data.i2cp.I2CPMessageException; import net.i2p.router.RouterContext; import net.i2p.util.Log; @@ -13,26 +17,18 @@ import net.i2p.util.Log; * the client reads from their i2cp socket, causing all sorts of bad shit to * happen) * + * @author zzz modded to use concurrent */ class ClientWriterRunner implements Runnable { - private List _messagesToWrite; - private List _messagesToWriteTimes; + private BlockingQueue _messagesToWrite; private ClientConnectionRunner _runner; - private RouterContext _context; private Log _log; private long _id; private static long __id = 0; - private static final long MAX_WAIT = 5*1000; - - /** lock on this when updating the class level data structs */ - private Object _dataLock = new Object(); - public ClientWriterRunner(RouterContext context, ClientConnectionRunner runner) { - _context = context; _log = context.logManager().getLog(ClientWriterRunner.class); - _messagesToWrite = new ArrayList(4); - _messagesToWriteTimes = new ArrayList(4); + _messagesToWrite = new LinkedBlockingQueue(); _runner = runner; _id = ++__id; } @@ -42,11 +38,9 @@ class ClientWriterRunner implements Runnable { * */ public void addMessage(I2CPMessage msg) { - synchronized (_dataLock) { - _messagesToWrite.add(msg); - _messagesToWriteTimes.add(new Long(_context.clock().now())); - _dataLock.notifyAll(); - } + try { + _messagesToWrite.put(msg); + } catch (InterruptedException ie) {} if (_log.shouldLog(Log.DEBUG)) _log.debug("["+_id+"] addMessage completed for " + msg.getClass().getName()); } @@ -56,47 +50,37 @@ class ClientWriterRunner implements Runnable { * */ public void stopWriting() { - synchronized (_dataLock) { - _dataLock.notifyAll(); + _messagesToWrite.clear(); + try { + _messagesToWrite.put(new PoisonMessage()); + } catch (InterruptedException ie) {} + } + + public void run() { + I2CPMessage msg; + while (!_runner.getIsDead()) { + try { + msg = _messagesToWrite.take(); + } catch (InterruptedException ie) { + continue; + } + if (msg.getType() == PoisonMessage.MESSAGE_TYPE) + break; + _runner.writeMessage(msg); } } - public void run() { - List messages = new ArrayList(64); - List messageTimes = new ArrayList(64); - List switchList = null; - - while (!_runner.getIsDead()) { - synchronized (_dataLock) { - if (_messagesToWrite.size() <= 0) - try { _dataLock.wait(); } catch (InterruptedException ie) {} - - if (_messagesToWrite.size() > 0) { - switchList = _messagesToWrite; - _messagesToWrite = messages; - messages = switchList; - - switchList = _messagesToWriteTimes; - _messagesToWriteTimes = messageTimes; - messageTimes = switchList; - } - } - - if (messages.size() > 0) { - for (int i = 0; i < messages.size(); i++) { - I2CPMessage msg = (I2CPMessage)messages.get(i); - Long when = (Long)messageTimes.get(i); - if (_log.shouldLog(Log.DEBUG)) - _log.debug("["+_id+"] writeMessage before writing " - + msg.getClass().getName()); - _runner.writeMessage(msg); - if (_log.shouldLog(Log.DEBUG)) - _log.debug("["+_id+"] writeMessage time since addMessage(): " - + (_context.clock().now()-when.longValue()) + " for " - + msg.getClass().getName()); - } - } - messages.clear(); - messageTimes.clear(); + + /** + * End-of-stream msg used to stop the concurrent queue + * See http://java.sun.com/j2se/1.5.0/docs/api/java/util/concurrent/BlockingQueue.html + * + */ + private static class PoisonMessage extends I2CPMessageImpl { + public static final int MESSAGE_TYPE = 999999; + public int getType() { + return MESSAGE_TYPE; } + public void doReadMessage(InputStream buf, int size) throws I2CPMessageException, IOException {} + public byte[] doWriteMessage() throws I2CPMessageException, IOException { return null; } } } diff --git a/router/java/src/net/i2p/router/client/LookupDestJob.java b/router/java/src/net/i2p/router/client/LookupDestJob.java new file mode 100644 index 000000000..68edbcaa0 --- /dev/null +++ b/router/java/src/net/i2p/router/client/LookupDestJob.java @@ -0,0 +1,54 @@ +/* + * Released into the public domain + * with no warranty of any kind, either expressed or implied. + */ +package net.i2p.router.client; + +import net.i2p.data.Destination; +import net.i2p.data.Hash; +import net.i2p.data.LeaseSet; +import net.i2p.data.i2cp.DestReplyMessage; +import net.i2p.data.i2cp.I2CPMessageException; +import net.i2p.router.JobImpl; +import net.i2p.router.RouterContext; + +/** + * Look up the lease of a hash, to convert it to a Destination for the client + */ +class LookupDestJob extends JobImpl { + private ClientConnectionRunner _runner; + private Hash _hash; + + public LookupDestJob(RouterContext context, ClientConnectionRunner runner, Hash h) { + super(context); + _runner = runner; + _hash = h; + } + + public String getName() { return "LeaseSet Lookup for Client"; } + public void runJob() { + DoneJob done = new DoneJob(getContext()); + getContext().netDb().lookupLeaseSet(_hash, done, done, 10*1000); + } + + private class DoneJob extends JobImpl { + public DoneJob(RouterContext enclosingContext) { + super(enclosingContext); + } + public String getName() { return "LeaseSet Lookup Reply to Client"; } + public void runJob() { + LeaseSet ls = getContext().netDb().lookupLeaseSetLocally(_hash); + if (ls != null) + returnDest(ls.getDestination()); + else + returnDest(null); + } + } + + private void returnDest(Destination d) { + DestReplyMessage msg = new DestReplyMessage(d); + try { + _runner.doSend(msg); + } catch (I2CPMessageException ime) {} + } +} diff --git a/router/java/src/net/i2p/router/message/OutboundClientMessageOneShotJob.java b/router/java/src/net/i2p/router/message/OutboundClientMessageOneShotJob.java index ccef8192a..20d69ea73 100644 --- a/router/java/src/net/i2p/router/message/OutboundClientMessageOneShotJob.java +++ b/router/java/src/net/i2p/router/message/OutboundClientMessageOneShotJob.java @@ -34,6 +34,8 @@ import net.i2p.router.Router; import net.i2p.router.RouterContext; import net.i2p.router.TunnelInfo; import net.i2p.util.Log; +import net.i2p.util.SimpleScheduler; +import net.i2p.util.SimpleTimer; /** * Send a client message out a random outbound tunnel and into a random inbound @@ -61,6 +63,7 @@ public class OutboundClientMessageOneShotJob extends JobImpl { private long _leaseSetLookupBegin; private TunnelInfo _outTunnel; private TunnelInfo _inTunnel; + private boolean _wantACK; /** * final timeout (in milliseconds) that the outbound message will fail in. @@ -69,6 +72,7 @@ public class OutboundClientMessageOneShotJob extends JobImpl { */ public final static String OVERALL_TIMEOUT_MS_PARAM = "clientMessageTimeout"; private final static long OVERALL_TIMEOUT_MS_DEFAULT = 60*1000; + private final static long OVERALL_TIMEOUT_MS_MIN = 5*1000; /** priority of messages, that might get honored some day... */ private final static int SEND_PRIORITY = 500; @@ -96,6 +100,11 @@ public class OutboundClientMessageOneShotJob extends JobImpl { */ private static final int BUNDLE_PROBABILITY_DEFAULT = 100; + private static final Object _initializeLock = new Object(); + private static boolean _initialized = false; + private static final int CLEAN_INTERVAL = 5*60*1000; + private static final int REPLY_REQUEST_INTERVAL = 60*1000; + /** * Send the sucker */ @@ -103,20 +112,26 @@ public class OutboundClientMessageOneShotJob extends JobImpl { super(ctx); _log = ctx.logManager().getLog(OutboundClientMessageOneShotJob.class); - ctx.statManager().createFrequencyStat("client.sendMessageFailFrequency", "How often does a client fail to send a message?", "ClientMessages", new long[] { 60*1000l, 60*60*1000l, 24*60*60*1000l }); - ctx.statManager().createRateStat("client.sendMessageSize", "How large are messages sent by the client?", "ClientMessages", new long[] { 60*1000l, 60*60*1000l, 24*60*60*1000l }); - ctx.statManager().createRateStat("client.sendAckTime", "Message round trip time", "ClientMessages", new long[] { 60*1000l, 5*60*1000l, 60*60*1000l, 24*60*60*1000l }); - ctx.statManager().createRateStat("client.timeoutCongestionTunnel", "How lagged our tunnels are when a send times out?", "ClientMessages", new long[] { 60*1000l, 5*60*1000l, 60*60*1000l, 24*60*60*1000l }); - ctx.statManager().createRateStat("client.timeoutCongestionMessage", "How fast we process messages locally when a send times out?", "ClientMessages", new long[] { 5*60*1000l, 60*60*1000l, 24*60*60*1000l }); - ctx.statManager().createRateStat("client.timeoutCongestionInbound", "How much faster we are receiving data than our average bps when a send times out?", "ClientMessages", new long[] { 5*60*1000l, 60*60*1000l, 24*60*60*1000l }); - ctx.statManager().createRateStat("client.leaseSetFoundLocally", "How often we tried to look for a leaseSet and found it locally?", "ClientMessages", new long[] { 5*60*1000l, 60*60*1000l, 24*60*60*1000l }); - ctx.statManager().createRateStat("client.leaseSetFoundRemoteTime", "How long we tried to look for a remote leaseSet (when we succeeded)?", "ClientMessages", new long[] { 5*60*1000l, 60*60*1000l, 24*60*60*1000l }); - ctx.statManager().createRateStat("client.leaseSetFailedRemoteTime", "How long we tried to look for a remote leaseSet (when we failed)?", "ClientMessages", new long[] { 5*60*1000l, 60*60*1000l, 24*60*60*1000l }); - ctx.statManager().createRateStat("client.dispatchPrepareTime", "How long until we've queued up the dispatch job (since we started)?", "ClientMessages", new long[] { 5*60*1000l, 60*60*1000l, 24*60*60*1000l }); - ctx.statManager().createRateStat("client.dispatchTime", "How long until we've dispatched the message (since we started)?", "ClientMessages", new long[] { 5*60*1000l, 60*60*1000l, 24*60*60*1000l }); - ctx.statManager().createRateStat("client.dispatchSendTime", "How long the actual dispatching takes?", "ClientMessages", new long[] { 5*60*1000l, 60*60*1000l, 24*60*60*1000l }); - ctx.statManager().createRateStat("client.dispatchNoTunnels", "How long after start do we run out of tunnels to send/receive with?", "ClientMessages", new long[] { 5*60*1000l, 60*60*1000l, 24*60*60*1000l }); - ctx.statManager().createRateStat("client.dispatchNoACK", "Repeated message sends to a peer (no ack required)", "ClientMessages", new long[] { 60*1000l, 5*60*1000l, 60*60*1000l }); + synchronized (_initializeLock) { + if (!_initialized) { + SimpleScheduler.getInstance().addPeriodicEvent(new OCMOSJCacheCleaner(ctx), CLEAN_INTERVAL, CLEAN_INTERVAL); + ctx.statManager().createFrequencyStat("client.sendMessageFailFrequency", "How often does a client fail to send a message?", "ClientMessages", new long[] { 60*1000l, 60*60*1000l, 24*60*60*1000l }); + ctx.statManager().createRateStat("client.sendMessageSize", "How large are messages sent by the client?", "ClientMessages", new long[] { 60*1000l, 60*60*1000l, 24*60*60*1000l }); + ctx.statManager().createRateStat("client.sendAckTime", "Message round trip time", "ClientMessages", new long[] { 60*1000l, 5*60*1000l, 60*60*1000l, 24*60*60*1000l }); + ctx.statManager().createRateStat("client.timeoutCongestionTunnel", "How lagged our tunnels are when a send times out?", "ClientMessages", new long[] { 60*1000l, 5*60*1000l, 60*60*1000l, 24*60*60*1000l }); + ctx.statManager().createRateStat("client.timeoutCongestionMessage", "How fast we process messages locally when a send times out?", "ClientMessages", new long[] { 5*60*1000l, 60*60*1000l, 24*60*60*1000l }); + ctx.statManager().createRateStat("client.timeoutCongestionInbound", "How much faster we are receiving data than our average bps when a send times out?", "ClientMessages", new long[] { 5*60*1000l, 60*60*1000l, 24*60*60*1000l }); + ctx.statManager().createRateStat("client.leaseSetFoundLocally", "How often we tried to look for a leaseSet and found it locally?", "ClientMessages", new long[] { 5*60*1000l, 60*60*1000l, 24*60*60*1000l }); + ctx.statManager().createRateStat("client.leaseSetFoundRemoteTime", "How long we tried to look for a remote leaseSet (when we succeeded)?", "ClientMessages", new long[] { 5*60*1000l, 60*60*1000l, 24*60*60*1000l }); + ctx.statManager().createRateStat("client.leaseSetFailedRemoteTime", "How long we tried to look for a remote leaseSet (when we failed)?", "ClientMessages", new long[] { 5*60*1000l, 60*60*1000l, 24*60*60*1000l }); + ctx.statManager().createRateStat("client.dispatchPrepareTime", "How long until we've queued up the dispatch job (since we started)?", "ClientMessages", new long[] { 5*60*1000l, 60*60*1000l, 24*60*60*1000l }); + ctx.statManager().createRateStat("client.dispatchTime", "How long until we've dispatched the message (since we started)?", "ClientMessages", new long[] { 5*60*1000l, 60*60*1000l, 24*60*60*1000l }); + ctx.statManager().createRateStat("client.dispatchSendTime", "How long the actual dispatching takes?", "ClientMessages", new long[] { 5*60*1000l, 60*60*1000l, 24*60*60*1000l }); + ctx.statManager().createRateStat("client.dispatchNoTunnels", "How long after start do we run out of tunnels to send/receive with?", "ClientMessages", new long[] { 5*60*1000l, 60*60*1000l, 24*60*60*1000l }); + ctx.statManager().createRateStat("client.dispatchNoACK", "Repeated message sends to a peer (no ack required)", "ClientMessages", new long[] { 60*1000l, 5*60*1000l, 60*60*1000l }); + _initialized = true; + } + } long timeoutMs = OVERALL_TIMEOUT_MS_DEFAULT; _clientMessage = msg; _clientMessageId = msg.getMessageId(); @@ -125,23 +140,34 @@ public class OutboundClientMessageOneShotJob extends JobImpl { _to = msg.getDestination(); _toString = _to.calculateHash().toBase64().substring(0,4); _leaseSetLookupBegin = -1; - - String param = msg.getSenderConfig().getOptions().getProperty(OVERALL_TIMEOUT_MS_PARAM); - if (param == null) - param = ctx.router().getConfigSetting(OVERALL_TIMEOUT_MS_PARAM); - if (param != null) { - try { - timeoutMs = Long.parseLong(param); - } catch (NumberFormatException nfe) { - if (_log.shouldLog(Log.WARN)) - _log.warn("Invalid client message timeout specified [" + param - + "], defaulting to " + OVERALL_TIMEOUT_MS_DEFAULT, nfe); - timeoutMs = OVERALL_TIMEOUT_MS_DEFAULT; - } - } - _start = getContext().clock().now(); - _overallExpiration = timeoutMs + _start; + + // use expiration requested by client if available, otherwise session config, + // otherwise router config, otherwise default + _overallExpiration = msg.getExpiration(); + if (_overallExpiration > 0) { + _overallExpiration = Math.max(_overallExpiration, _start + OVERALL_TIMEOUT_MS_MIN); + _overallExpiration = Math.min(_overallExpiration, _start + OVERALL_TIMEOUT_MS_DEFAULT); + if (_log.shouldLog(Log.WARN)) + _log.warn("Message Expiration (ms): " + (_overallExpiration - _start)); + } else { + String param = msg.getSenderConfig().getOptions().getProperty(OVERALL_TIMEOUT_MS_PARAM); + if (param == null) + param = ctx.router().getConfigSetting(OVERALL_TIMEOUT_MS_PARAM); + if (param != null) { + try { + timeoutMs = Long.parseLong(param); + } catch (NumberFormatException nfe) { + if (_log.shouldLog(Log.WARN)) + _log.warn("Invalid client message timeout specified [" + param + + "], defaulting to " + OVERALL_TIMEOUT_MS_DEFAULT, nfe); + timeoutMs = OVERALL_TIMEOUT_MS_DEFAULT; + } + } + _overallExpiration = timeoutMs + _start; + if (_log.shouldLog(Log.WARN)) + _log.warn("Default Expiration (ms): " + timeoutMs); + } _finished = false; } @@ -187,8 +213,7 @@ public class OutboundClientMessageOneShotJob extends JobImpl { * * Key the cache on the source+dest pair. */ - private static HashMap _leaseSetCache = new HashMap(); - private static long _lscleanTime = 0; + private static HashMap _leaseSetCache = new HashMap(); private LeaseSet getReplyLeaseSet(boolean force) { LeaseSet newLS = getContext().netDb().lookupLeaseSetLocally(_from.calculateHash()); if (newLS == null) @@ -222,12 +247,8 @@ public class OutboundClientMessageOneShotJob extends JobImpl { // If the last leaseSet we sent him is still good, don't bother sending again long now = getContext().clock().now(); synchronized (_leaseSetCache) { - if (now - _lscleanTime > 5*60*1000) { // clean out periodically - cleanLeaseSetCache(_leaseSetCache); - _lscleanTime = now; - } if (!force) { - LeaseSet ls = (LeaseSet) _leaseSetCache.get(hashPair()); + LeaseSet ls = _leaseSetCache.get(hashPair()); if (ls != null) { if (ls.equals(newLS)) { // still good, send it 10% of the time @@ -267,6 +288,7 @@ public class OutboundClientMessageOneShotJob extends JobImpl { long lookupTime = getContext().clock().now() - _leaseSetLookupBegin; getContext().statManager().addRateData("client.leaseSetFoundRemoteTime", lookupTime, lookupTime); } + _wantACK = false; boolean ok = getNextLease(); if (ok) { send(); @@ -291,8 +313,7 @@ public class OutboundClientMessageOneShotJob extends JobImpl { * lease). * */ - private static HashMap _leaseCache = new HashMap(); - private static long _lcleanTime = 0; + private static HashMap _leaseCache = new HashMap(); private boolean getNextLease() { _leaseSet = getContext().netDb().lookupLeaseSetLocally(_to.calculateHash()); if (_leaseSet == null) { @@ -305,11 +326,7 @@ public class OutboundClientMessageOneShotJob extends JobImpl { // Use the same lease if it's still good // Even if _leaseSet changed, _leaseSet.getEncryptionKey() didn't... synchronized (_leaseCache) { - if (now - _lcleanTime > 5*60*1000) { // clean out periodically - cleanLeaseCache(_leaseCache); - _lcleanTime = now; - } - _lease = (Lease) _leaseCache.get(hashPair()); + _lease = _leaseCache.get(hashPair()); if (_lease != null) { // if outbound tunnel length == 0 && lease.firsthop.isBacklogged() don't use it ?? if (!_lease.isExpired(Router.CLOCK_FUDGE_FACTOR)) { @@ -400,6 +417,7 @@ public class OutboundClientMessageOneShotJob extends JobImpl { } if (_log.shouldLog(Log.INFO)) _log.info("Added to cache - lease for " + _toString); + _wantACK = true; return true; } @@ -429,6 +447,14 @@ public class OutboundClientMessageOneShotJob extends JobImpl { } } + /** + * This cache is used to ensure that we request a reply every so often. + * Hopefully this allows the router to recognize a failed tunnel and switch, + * before upper layers like streaming lib fail, even for low-bandwidth + * connections like IRC. + */ + private static HashMap _lastReplyRequestCache = new HashMap(); + /** * Send the message to the specified tunnel by creating a new garlic message containing * the (already created) payload clove as well as a new delivery status message. This garlic @@ -439,14 +465,27 @@ public class OutboundClientMessageOneShotJob extends JobImpl { */ private void send() { if (_finished) return; - if (getContext().clock().now() >= _overallExpiration) { + long now = getContext().clock().now(); + if (now >= _overallExpiration) { dieFatal(); return; } - boolean wantACK = true; + int existingTags = GarlicMessageBuilder.estimateAvailableTags(getContext(), _leaseSet.getEncryptionKey()); - if ( (existingTags > 30) && (getContext().random().nextInt(100) >= 5) ) - wantACK = false; + _outTunnel = selectOutboundTunnel(_to); + // boolean wantACK = _wantACK || existingTags <= 30 || getContext().random().nextInt(100) < 5; + // what's the point of 5% random? possible improvements or replacements: + // DONE (getNextLease() is called before this): wantACK if we changed their inbound lease (getNextLease() sets _wantACK) + // DONE (selectOutboundTunnel() moved above here): wantACK if we changed our outbound tunnel (selectOutboundTunnel() sets _wantACK) + // DONE (added new cache): wantACK if we haven't in last 1m (requires a new static cache probably) + boolean wantACK; + synchronized (_lastReplyRequestCache) { + Long lastSent = _lastReplyRequestCache.get(hashPair()); + wantACK = _wantACK || existingTags <= 30 || + lastSent == null || lastSent.longValue() < now - REPLY_REQUEST_INTERVAL; + if (wantACK) + _lastReplyRequestCache.put(hashPair(), Long.valueOf(now)); + } PublicKey key = _leaseSet.getEncryptionKey(); SessionKey sessKey = new SessionKey(); @@ -480,7 +519,7 @@ public class OutboundClientMessageOneShotJob extends JobImpl { // we dont receive the reply? hmm...) if (_log.shouldLog(Log.WARN)) _log.warn(getJobId() + ": Unable to create the garlic message (no tunnels left or too lagged) to " + _toString); - getContext().statManager().addRateData("client.dispatchNoTunnels", getContext().clock().now() - _start, 0); + getContext().statManager().addRateData("client.dispatchNoTunnels", now - _start, 0); dieFatal(); return; } @@ -503,7 +542,6 @@ public class OutboundClientMessageOneShotJob extends JobImpl { + _lease.getTunnelId() + " on " + _lease.getGateway().toBase64()); - _outTunnel = selectOutboundTunnel(_to); if (_outTunnel != null) { if (_log.shouldLog(Log.DEBUG)) _log.debug(getJobId() + ": Sending tunnel message out " + _outTunnel.getSendTunnelId(0) + " to " @@ -519,12 +557,12 @@ public class OutboundClientMessageOneShotJob extends JobImpl { } else { if (_log.shouldLog(Log.WARN)) _log.warn(getJobId() + ": Could not find any outbound tunnels to send the payload through... this might take a while"); - getContext().statManager().addRateData("client.dispatchNoTunnels", getContext().clock().now() - _start, 0); + getContext().statManager().addRateData("client.dispatchNoTunnels", now - _start, 0); dieFatal(); } _clientMessage = null; _clove = null; - getContext().statManager().addRateData("client.dispatchPrepareTime", getContext().clock().now() - _start, 0); + getContext().statManager().addRateData("client.dispatchPrepareTime", now - _start, 0); if (!wantACK) getContext().statManager().addRateData("client.dispatchNoACK", 1, 0); } @@ -562,7 +600,7 @@ public class OutboundClientMessageOneShotJob extends JobImpl { /** * This is the place where we make I2P go fast. * - * We have four static caches. + * We have five static caches. * - The LeaseSet cache is used to decide whether to bundle our own leaseset, * which minimizes overhead. * - The Lease cache is used to persistently send to the same lease for the destination, @@ -570,6 +608,8 @@ public class OutboundClientMessageOneShotJob extends JobImpl { * - The Tunnel and BackloggedTunnel caches are used to persistently use the same outbound tunnel * for the same destination, * which keeps the streaming lib happy by minimizing out-of-order delivery. + * - The last reply requested cache ensures that a reply is requested every so often, + * so that failed tunnels are recognized. * */ @@ -589,7 +629,7 @@ public class OutboundClientMessageOneShotJob extends JobImpl { * (needed for cleanTunnelCache) * 44 = 32 * 4 / 3 */ - private Hash sourceFromHashPair(String s) { + private static Hash sourceFromHashPair(String s) { return new Hash(Base64.decode(s.substring(44, 88))); } @@ -609,17 +649,17 @@ public class OutboundClientMessageOneShotJob extends JobImpl { } if (_lease != null) { synchronized(_leaseCache) { - Lease l = (Lease) _leaseCache.get(key); + Lease l = _leaseCache.get(key); if (l != null && l.equals(_lease)) _leaseCache.remove(key); } } if (_outTunnel != null) { synchronized(_tunnelCache) { - TunnelInfo t =(TunnelInfo) _backloggedTunnelCache.get(key); + TunnelInfo t = _backloggedTunnelCache.get(key); if (t != null && t.equals(_outTunnel)) _backloggedTunnelCache.remove(key); - t = (TunnelInfo) _tunnelCache.get(key); + t = _tunnelCache.get(key); if (t != null && t.equals(_outTunnel)) _tunnelCache.remove(key); } @@ -630,19 +670,14 @@ public class OutboundClientMessageOneShotJob extends JobImpl { * Clean out old leaseSets from a set. * Caller must synchronize on tc. */ - private void cleanLeaseSetCache(HashMap tc) { - long now = getContext().clock().now(); - List deleteList = new ArrayList(); + private static void cleanLeaseSetCache(RouterContext ctx, HashMap tc) { + long now = ctx.clock().now(); for (Iterator iter = tc.entrySet().iterator(); iter.hasNext(); ) { Map.Entry entry = (Map.Entry)iter.next(); String k = (String) entry.getKey(); LeaseSet l = (LeaseSet) entry.getValue(); if (l.getEarliestLeaseDate() < now) - deleteList.add(k); - } - for (Iterator iter = deleteList.iterator(); iter.hasNext(); ) { - String k = (String) iter.next(); - tc.remove(k); + iter.remove(); } } @@ -650,18 +685,13 @@ public class OutboundClientMessageOneShotJob extends JobImpl { * Clean out old leases from a set. * Caller must synchronize on tc. */ - private void cleanLeaseCache(HashMap tc) { - List deleteList = new ArrayList(); + private static void cleanLeaseCache(HashMap tc) { for (Iterator iter = tc.entrySet().iterator(); iter.hasNext(); ) { Map.Entry entry = (Map.Entry)iter.next(); String k = (String) entry.getKey(); Lease l = (Lease) entry.getValue(); if (l.isExpired(Router.CLOCK_FUDGE_FACTOR)) - deleteList.add(k); - } - for (Iterator iter = deleteList.iterator(); iter.hasNext(); ) { - String k = (String) iter.next(); - tc.remove(k); + iter.remove(); } } @@ -669,18 +699,48 @@ public class OutboundClientMessageOneShotJob extends JobImpl { * Clean out old tunnels from a set. * Caller must synchronize on tc. */ - private void cleanTunnelCache(HashMap tc) { - List deleteList = new ArrayList(); + private static void cleanTunnelCache(RouterContext ctx, HashMap tc) { for (Iterator iter = tc.entrySet().iterator(); iter.hasNext(); ) { Map.Entry entry = (Map.Entry)iter.next(); String k = (String) entry.getKey(); TunnelInfo tunnel = (TunnelInfo) entry.getValue(); - if (!getContext().tunnelManager().isValidTunnel(sourceFromHashPair(k), tunnel)) - deleteList.add(k); + if (!ctx.tunnelManager().isValidTunnel(sourceFromHashPair(k), tunnel)) + iter.remove(); } - for (Iterator iter = deleteList.iterator(); iter.hasNext(); ) { - String k = (String) iter.next(); - tc.remove(k); + } + + /** + * Clean out old reply times + * Caller must synchronize on tc. + */ + private static void cleanReplyCache(RouterContext ctx, HashMap tc) { + long now = ctx.clock().now(); + for (Iterator iter = tc.values().iterator(); iter.hasNext(); ) { + Long l = (Long) iter.next(); + if (l.longValue() < now - CLEAN_INTERVAL) + iter.remove(); + } + } + + private static class OCMOSJCacheCleaner implements SimpleTimer.TimedEvent { + private RouterContext _ctx; + private OCMOSJCacheCleaner(RouterContext ctx) { + _ctx = ctx; + } + public void timeReached() { + synchronized(_leaseSetCache) { + cleanLeaseSetCache(_ctx, _leaseSetCache); + } + synchronized(_leaseCache) { + cleanLeaseCache(_leaseCache); + } + synchronized(_tunnelCache) { + cleanTunnelCache(_ctx, _tunnelCache); + cleanTunnelCache(_ctx, _backloggedTunnelCache); + } + synchronized(_lastReplyRequestCache) { + cleanReplyCache(_ctx, _lastReplyRequestCache); + } } } @@ -692,25 +752,19 @@ public class OutboundClientMessageOneShotJob extends JobImpl { * Key the caches on the source+dest pair. * */ - private static HashMap _tunnelCache = new HashMap(); - private static HashMap _backloggedTunnelCache = new HashMap(); - private static long _cleanTime = 0; + private static HashMap _tunnelCache = new HashMap(); + private static HashMap _backloggedTunnelCache = new HashMap(); private TunnelInfo selectOutboundTunnel(Destination to) { TunnelInfo tunnel; long now = getContext().clock().now(); synchronized (_tunnelCache) { - if (now - _cleanTime > 5*60*1000) { // clean out periodically - cleanTunnelCache(_tunnelCache); - cleanTunnelCache(_backloggedTunnelCache); - _cleanTime = now; - } /** * If old tunnel is valid and no longer backlogged, use it. * This prevents an active anonymity attack, where a peer could tell * if you were the originator by backlogging the tunnel, then removing the * backlog and seeing if traffic came back or not. */ - tunnel = (TunnelInfo) _backloggedTunnelCache.get(hashPair()); + tunnel = _backloggedTunnelCache.get(hashPair()); if (tunnel != null) { if (getContext().tunnelManager().isValidTunnel(_from.calculateHash(), tunnel)) { if (!getContext().commSystem().isBacklogged(tunnel.getPeer(1))) { @@ -718,13 +772,14 @@ public class OutboundClientMessageOneShotJob extends JobImpl { _log.warn("Switching back to tunnel " + tunnel + " for " + _toString); _backloggedTunnelCache.remove(hashPair()); _tunnelCache.put(hashPair(), tunnel); + _wantACK = true; return tunnel; } // else still backlogged } else // no longer valid _backloggedTunnelCache.remove(hashPair()); } // Use the same tunnel unless backlogged - tunnel = (TunnelInfo) _tunnelCache.get(hashPair()); + tunnel = _tunnelCache.get(hashPair()); if (tunnel != null) { if (getContext().tunnelManager().isValidTunnel(_from.calculateHash(), tunnel)) { if (tunnel.getLength() <= 1 || !getContext().commSystem().isBacklogged(tunnel.getPeer(1))) @@ -740,6 +795,7 @@ public class OutboundClientMessageOneShotJob extends JobImpl { tunnel = selectOutboundTunnel(); if (tunnel != null) _tunnelCache.put(hashPair(), tunnel); + _wantACK = true; } return tunnel; } diff --git a/router/java/src/net/i2p/router/networkdb/PublishLocalRouterInfoJob.java b/router/java/src/net/i2p/router/networkdb/PublishLocalRouterInfoJob.java index 96dbadd6c..8fa729d63 100644 --- a/router/java/src/net/i2p/router/networkdb/PublishLocalRouterInfoJob.java +++ b/router/java/src/net/i2p/router/networkdb/PublishLocalRouterInfoJob.java @@ -20,12 +20,12 @@ import net.i2p.router.RouterContext; import net.i2p.util.Log; /** - * Publish the local router's RouterInfo every 5 to 10 minutes + * Publish the local router's RouterInfo periodically * */ public class PublishLocalRouterInfoJob extends JobImpl { private Log _log; - final static long PUBLISH_DELAY = 5*60*1000; // every 5 to 10 minutes (since we randomize) + final static long PUBLISH_DELAY = 20*60*1000; public PublishLocalRouterInfoJob(RouterContext ctx) { super(ctx); @@ -67,6 +67,6 @@ public class PublishLocalRouterInfoJob extends JobImpl { } catch (DataFormatException dfe) { _log.error("Error signing the updated local router info!", dfe); } - requeue(PUBLISH_DELAY + getContext().random().nextInt((int)PUBLISH_DELAY)); + requeue((PUBLISH_DELAY/2) + getContext().random().nextInt((int)PUBLISH_DELAY)); } } diff --git a/router/java/src/net/i2p/router/networkdb/kademlia/DataPublisherJob.java b/router/java/src/net/i2p/router/networkdb/kademlia/DataPublisherJob.java deleted file mode 100644 index 307ae5f79..000000000 --- a/router/java/src/net/i2p/router/networkdb/kademlia/DataPublisherJob.java +++ /dev/null @@ -1,101 +0,0 @@ -package net.i2p.router.networkdb.kademlia; -/* - * free (adj.): unencumbered; not under the control of others - * Written by jrandom in 2003 and released into the public domain - * with no warranty of any kind, either expressed or implied. - * It probably won't make your computer catch on fire, or eat - * your children, but it might. Use at your own risk. - * - */ - -import java.util.HashSet; -import java.util.Iterator; -import java.util.Set; - -import net.i2p.data.DataStructure; -import net.i2p.data.Hash; -import net.i2p.data.LeaseSet; -import net.i2p.router.JobImpl; -import net.i2p.router.Router; -import net.i2p.router.RouterContext; -import net.i2p.util.Log; - -class DataPublisherJob extends JobImpl { - private Log _log; - private KademliaNetworkDatabaseFacade _facade; - private final static long RERUN_DELAY_MS = 120*1000; - private final static int MAX_SEND_PER_RUN = 1; // publish no more than 2 at a time - private final static long STORE_TIMEOUT = 60*1000; // give 'er a minute to send the data - - public DataPublisherJob(RouterContext ctx, KademliaNetworkDatabaseFacade facade) { - super(ctx); - _log = ctx.logManager().getLog(DataPublisherJob.class); - _facade = facade; - getTiming().setStartAfter(ctx.clock().now()+RERUN_DELAY_MS); // not immediate... - } - - public String getName() { return "Data Publisher Job"; } - public void runJob() { - Set toSend = selectKeysToSend(); - if (_log.shouldLog(Log.INFO)) - _log.info("Keys being published in this timeslice: " + toSend); - for (Iterator iter = toSend.iterator(); iter.hasNext(); ) { - Hash key = (Hash)iter.next(); - DataStructure data = _facade.getDataStore().get(key); - if (data == null) { - if (_log.shouldLog(Log.WARN)) - _log.warn("Trying to send a key we dont have? " + key); - continue; - } - if (data instanceof LeaseSet) { - LeaseSet ls = (LeaseSet)data; - if (!ls.isCurrent(Router.CLOCK_FUDGE_FACTOR)) { - if (_log.shouldLog(Log.WARN)) - _log.warn("Not publishing a lease that isn't current - " + key, - new Exception("Publish expired lease?")); - } - if (!getContext().clientManager().shouldPublishLeaseSet(key)) - continue; - } - _facade.sendStore(key, data, null, null, STORE_TIMEOUT, null); - //StoreJob store = new StoreJob(getContext(), _facade, key, data, null, null, STORE_TIMEOUT); - //getContext().jobQueue().addJob(store); - } - requeue(RERUN_DELAY_MS); - } - - private Set selectKeysToSend() { - Set explicit = _facade.getExplicitSendKeys(); - Set toSend = new HashSet(MAX_SEND_PER_RUN); - - // if there's nothing we *need* to send, only send 10% of the time - if (explicit.size() <= 0) { - if (getContext().random().nextInt(10) > 0) - return toSend; - } - - if (explicit.size() < MAX_SEND_PER_RUN) { - toSend.addAll(explicit); - _facade.removeFromExplicitSend(explicit); - - Set passive = _facade.getPassivelySendKeys(); - Set psend = new HashSet(passive.size()); - for (Iterator iter = passive.iterator(); iter.hasNext(); ) { - if (toSend.size() >= MAX_SEND_PER_RUN) break; - Hash key = (Hash)iter.next(); - toSend.add(key); - psend.add(key); - } - _facade.removeFromPassiveSend(psend); - } else { - for (Iterator iter = explicit.iterator(); iter.hasNext(); ) { - if (toSend.size() >= MAX_SEND_PER_RUN) break; - Hash key = (Hash)iter.next(); - toSend.add(key); - } - _facade.removeFromExplicitSend(toSend); - } - - return toSend; - } -} diff --git a/router/java/src/net/i2p/router/networkdb/kademlia/DataRepublishingSelectorJob.java b/router/java/src/net/i2p/router/networkdb/kademlia/DataRepublishingSelectorJob.java deleted file mode 100644 index 173522ab0..000000000 --- a/router/java/src/net/i2p/router/networkdb/kademlia/DataRepublishingSelectorJob.java +++ /dev/null @@ -1,175 +0,0 @@ -package net.i2p.router.networkdb.kademlia; -/* - * free (adj.): unencumbered; not under the control of others - * Written by jrandom in 2003 and released into the public domain - * with no warranty of any kind, either expressed or implied. - * It probably won't make your computer catch on fire, or eat - * your children, but it might. Use at your own risk. - * - */ - -import java.util.HashSet; -import java.util.Iterator; -import java.util.Set; -import java.util.TreeMap; - -import net.i2p.data.Hash; -import net.i2p.data.LeaseSet; -import net.i2p.data.RouterInfo; -import net.i2p.router.JobImpl; -import net.i2p.router.Router; -import net.i2p.router.RouterContext; -import net.i2p.util.Log; - -class DataRepublishingSelectorJob extends JobImpl { - private Log _log; - private KademliaNetworkDatabaseFacade _facade; - - private final static long RERUN_DELAY_MS = 1*60*1000; - public final static int MAX_PASSIVE_POOL_SIZE = 10; // no need to have the pool be too big - - /** - * For every bucket away from us, resend period increases by 5 minutes - so we resend - * our own key every 5 minutes, and keys very far from us every 2.5 hours, increasing - * linearly - */ - public final static long RESEND_BUCKET_FACTOR = 5*60*1000; - - /** - * % chance any peer not specializing in the lease's key will broadcast it on each pass - * of this job /after/ waiting 5 minutes (one RESENT_BUCKET_FACTOR). In other words, - * .5% of routers will broadcast a particular unexpired lease to (say) 5 peers every - * minute. - * - */ - private final static int LEASE_REBROADCAST_PROBABILITY = 5; - /** - * LEASE_REBROADCAST_PROBABILITY out of LEASE_REBROADCAST_PROBABILITY_SCALE chance. - */ - private final static int LEASE_REBROADCAST_PROBABILITY_SCALE = 1000; - - public DataRepublishingSelectorJob(RouterContext ctx, KademliaNetworkDatabaseFacade facade) { - super(ctx); - _log = ctx.logManager().getLog(DataRepublishingSelectorJob.class); - _facade = facade; - getTiming().setStartAfter(ctx.clock().now()+RERUN_DELAY_MS); // not immediate... - } - - public String getName() { return "Data Publisher Job"; } - public void runJob() { - Set toSend = selectKeysToSend(); - if (_log.shouldLog(Log.INFO)) - _log.info("Keys being queued up for publishing: " + toSend); - _facade.queueForPublishing(toSend); - requeue(RERUN_DELAY_MS); - } - - /** - * Run through the entire data store, ranking how much we want to send each - * data point, and returning the ones we most want to send so that they can - * be placed in the passive send pool (without making the passive pool greater - * than the limit) - * - */ - private Set selectKeysToSend() { - Set alreadyQueued = new HashSet(128); - alreadyQueued.addAll(_facade.getPassivelySendKeys()); - - int toAdd = MAX_PASSIVE_POOL_SIZE - alreadyQueued.size(); - if (_log.shouldLog(Log.DEBUG)) - _log.debug("Keys we need to queue up to fill the passive send pool: " + toAdd); - if (toAdd <= 0) return new HashSet(); - - alreadyQueued.addAll(_facade.getExplicitSendKeys()); - - Set keys = _facade.getDataStore().getKeys(); - keys.removeAll(alreadyQueued); - - if (_log.shouldLog(Log.DEBUG)) - _log.debug("Total number of keys in the datastore: " + keys.size()); - - TreeMap toSend = new TreeMap(); - for (Iterator iter = keys.iterator(); iter.hasNext(); ) { - Hash key = (Hash)iter.next(); - Long lastPublished = _facade.getLastSent(key); - long publishRank = rankPublishNeed(key, lastPublished); - if (_log.shouldLog(Log.DEBUG)) - _log.debug("Publish rank for " + key + ": " + publishRank); - if (publishRank > 0) { - while (toSend.containsKey(new Long(publishRank))) - publishRank++; - toSend.put(new Long(publishRank), key); - } - } - Set rv = new HashSet(toAdd); - for (Iterator iter = toSend.values().iterator(); iter.hasNext(); ) { - if (rv.size() > toAdd) break; - Hash key = (Hash)iter.next(); - rv.add(key); - } - return rv; - } - - /** - * Higher values mean we want to publish it more, and values less than or equal to zero - * means we don't want to publish it - * - */ - private long rankPublishNeed(Hash key, Long lastPublished) { - int bucket = _facade.getKBuckets().pickBucket(key); - long sendPeriod = (bucket+1) * RESEND_BUCKET_FACTOR; - long now = getContext().clock().now(); - if (lastPublished.longValue() < now-sendPeriod) { - RouterInfo ri = _facade.lookupRouterInfoLocally(key); - if (ri != null) { - if (ri.isCurrent(2 * ExpireRoutersJob.EXPIRE_DELAY)) { - // last time it was sent was before the last send period - return KBucketSet.NUM_BUCKETS - bucket; - } else { - if (_log.shouldLog(Log.INFO)) - _log.info("Not republishing router " + key - + " since it is really old [" - + (now-ri.getPublished()) + "ms]"); - return -2; - } - } else { - LeaseSet ls = _facade.lookupLeaseSetLocally(key); - if (ls != null) { - if (!getContext().clientManager().shouldPublishLeaseSet(ls.getDestination().calculateHash())) - return -3; - if (ls.isCurrent(Router.CLOCK_FUDGE_FACTOR)) { - // last time it was sent was before the last send period - return KBucketSet.NUM_BUCKETS - bucket; - } else { - if (_log.shouldLog(Log.INFO)) - _log.info("Not republishing leaseSet " + key - + " since it is really old [" - + (now-ls.getEarliestLeaseDate()) + "ms]"); - return -3; - } - } else { - if (_log.shouldLog(Log.WARN)) - _log.warn("Key " + key + " is not a leaseSet or routerInfo, definitely not publishing it"); - return -5; - } - } - } else { - // its been published since the last period we want to publish it - - if (now - RESEND_BUCKET_FACTOR > lastPublished.longValue()) { - if (_facade.lookupRouterInfoLocally(key) != null) { - // randomize the chance of rebroadcast for leases if we haven't - // sent it within 5 minutes - int val = getContext().random().nextInt(LEASE_REBROADCAST_PROBABILITY_SCALE); - if (val <= LEASE_REBROADCAST_PROBABILITY) { - if (_log.shouldLog(Log.INFO)) - _log.info("Randomized rebroadcast of leases tells us to send " - + key + ": " + val); - return 1; - } - } - } - return -1; - } - } -} diff --git a/router/java/src/net/i2p/router/networkdb/kademlia/ExploreJob.java b/router/java/src/net/i2p/router/networkdb/kademlia/ExploreJob.java index a64046f6a..388afa886 100644 --- a/router/java/src/net/i2p/router/networkdb/kademlia/ExploreJob.java +++ b/router/java/src/net/i2p/router/networkdb/kademlia/ExploreJob.java @@ -28,15 +28,25 @@ class ExploreJob extends SearchJob { private Log _log; private PeerSelector _peerSelector; - /** how long each exploration should run for (currently a trivial 10 seconds) */ - private static final long MAX_EXPLORE_TIME = 10*1000; + /** how long each exploration should run for + * The exploration won't "succeed" so we make it long so we query several peers */ + private static final long MAX_EXPLORE_TIME = 15*1000; /** how many of the peers closest to the key being explored do we want to explicitly say "dont send me this"? */ private static final int NUM_CLOSEST_TO_IGNORE = 3; /** how many peers to explore through concurrently */ private static final int EXPLORE_BREDTH = 1; + + /** only send the closest "dont tell me about" refs... + * Override to make this bigger because we want to include both the + * floodfills and the previously-queried peers */ + static final int MAX_CLOSEST = 20; + /** Override to make this shorter, since we don't sort out the + * unresponsive ff peers like we do in FloodOnlySearchJob */ + static final int PER_FLOODFILL_PEER_TIMEOUT = 5*1000; + /** * Create a new search for the routingKey specified * @@ -60,19 +70,31 @@ class ExploreJob extends SearchJob { * massive (aka sending the entire routing table as 'dont tell me about these * guys'). but maybe we do. dunno. lots of implications. * + * FloodfillPeerSelector would add only the floodfill peers, + * and PeerSelector doesn't include the floodfill peers, + * so we add the ff peers ourselves and then use the regular PeerSelector. + * * @param replyTunnelId tunnel to receive replies through * @param replyGateway gateway for the reply tunnel * @param expiration when the search should stop */ - protected DatabaseLookupMessage buildMessage(TunnelId replyTunnelId, RouterInfo replyGateway, long expiration) { + protected DatabaseLookupMessage buildMessage(TunnelId replyTunnelId, Hash replyGateway, long expiration) { DatabaseLookupMessage msg = new DatabaseLookupMessage(getContext(), true); msg.setSearchKey(getState().getTarget()); - msg.setFrom(replyGateway.getIdentity().getHash()); + msg.setFrom(replyGateway); msg.setDontIncludePeers(getState().getClosestAttempted(MAX_CLOSEST)); msg.setMessageExpiration(expiration); msg.setReplyTunnel(replyTunnelId); int available = MAX_CLOSEST - msg.getDontIncludePeers().size(); + if (available > 0) { + List peers = ((FloodfillNetworkDatabaseFacade)_facade).getFloodfillPeers(); + int len = peers.size(); + if (len > 0) + msg.getDontIncludePeers().addAll(peers.subList(0, Math.min(available, len))); + } + + available = MAX_CLOSEST - msg.getDontIncludePeers().size(); if (available > 0) { List peers = _peerSelector.selectNearestExplicit(getState().getTarget(), available, msg.getDontIncludePeers(), getFacade().getKBuckets()); msg.getDontIncludePeers().addAll(peers); @@ -91,7 +113,7 @@ class ExploreJob extends SearchJob { * */ protected DatabaseLookupMessage buildMessage(long expiration) { - return buildMessage(null, getContext().router().getRouterInfo(), expiration); + return buildMessage(null, getContext().router().getRouterInfo().getIdentity().getHash(), expiration); } /** max # of concurrent searches */ diff --git a/router/java/src/net/i2p/router/networkdb/kademlia/ExploreKeySelectorJob.java b/router/java/src/net/i2p/router/networkdb/kademlia/ExploreKeySelectorJob.java index 0ecc123ff..cdec3356d 100644 --- a/router/java/src/net/i2p/router/networkdb/kademlia/ExploreKeySelectorJob.java +++ b/router/java/src/net/i2p/router/networkdb/kademlia/ExploreKeySelectorJob.java @@ -37,6 +37,10 @@ class ExploreKeySelectorJob extends JobImpl { public String getName() { return "Explore Key Selector Job"; } public void runJob() { + if (((FloodfillNetworkDatabaseFacade)_facade).floodfillEnabled()) { + requeue(30*RERUN_DELAY_MS); + return; + } Set toExplore = selectKeysToExplore(); _log.info("Filling the explorer pool with: " + toExplore); if (toExplore != null) diff --git a/router/java/src/net/i2p/router/networkdb/kademlia/FloodfillNetworkDatabaseFacade.java b/router/java/src/net/i2p/router/networkdb/kademlia/FloodfillNetworkDatabaseFacade.java index e19a4a2ed..575de5e7b 100644 --- a/router/java/src/net/i2p/router/networkdb/kademlia/FloodfillNetworkDatabaseFacade.java +++ b/router/java/src/net/i2p/router/networkdb/kademlia/FloodfillNetworkDatabaseFacade.java @@ -7,6 +7,7 @@ import java.util.List; import java.util.Map; import java.util.Set; +import net.i2p.data.DataFormatException; import net.i2p.data.DataStructure; import net.i2p.data.Hash; import net.i2p.data.LeaseSet; @@ -32,11 +33,14 @@ public class FloodfillNetworkDatabaseFacade extends KademliaNetworkDatabaseFacad public static final char CAPACITY_FLOODFILL = 'f'; private Map _activeFloodQueries; private boolean _floodfillEnabled; + /** for testing, see isFloodfill() below */ + private static String _alwaysQuery; public FloodfillNetworkDatabaseFacade(RouterContext context) { super(context); _activeFloodQueries = new HashMap(); _floodfillEnabled = false; + _alwaysQuery = _context.getProperty("netDb.alwaysQuery"); _context.statManager().createRateStat("netDb.successTime", "How long a successful search takes", "NetworkDatabase", new long[] { 60*60*1000l, 24*60*60*1000l }); _context.statManager().createRateStat("netDb.failedTime", "How long a failed search takes", "NetworkDatabase", new long[] { 60*60*1000l, 24*60*60*1000l }); @@ -137,6 +141,19 @@ public class FloodfillNetworkDatabaseFacade extends KademliaNetworkDatabaseFacad public static boolean isFloodfill(RouterInfo peer) { if (peer == null) return false; + // For testing or local networks... we will + // pretend that the specified router is floodfill. + // Must be set at startup since it's static. + // In that router, set netDb.floodfillOnly=false. + // Warning - experts only! + if (_alwaysQuery != null) { + Hash aq = new Hash(); + try { + aq.fromBase64(_alwaysQuery); + if (aq.equals(peer.getIdentity().getHash())) + return true; + } catch (DataFormatException dfe) {} + } String caps = peer.getCapabilities(); if ( (caps != null) && (caps.indexOf(FloodfillNetworkDatabaseFacade.CAPACITY_FLOODFILL) != -1) ) return true; @@ -174,11 +191,11 @@ public class FloodfillNetworkDatabaseFacade extends KademliaNetworkDatabaseFacad synchronized (_activeFloodQueries) { searchJob = (FloodSearchJob)_activeFloodQueries.get(key); if (searchJob == null) { - if (SearchJob.onlyQueryFloodfillPeers(_context)) { + //if (SearchJob.onlyQueryFloodfillPeers(_context)) { searchJob = new FloodOnlySearchJob(_context, this, key, onFindJob, onFailedLookupJob, (int)timeoutMs, isLease); - } else { - searchJob = new FloodSearchJob(_context, this, key, onFindJob, onFailedLookupJob, (int)timeoutMs, isLease); - } + //} else { + // searchJob = new FloodSearchJob(_context, this, key, onFindJob, onFailedLookupJob, (int)timeoutMs, isLease); + //} _activeFloodQueries.put(key, searchJob); isNew = true; } diff --git a/router/java/src/net/i2p/router/networkdb/kademlia/FloodfillPeerSelector.java b/router/java/src/net/i2p/router/networkdb/kademlia/FloodfillPeerSelector.java index 1e73e3037..7504e660a 100644 --- a/router/java/src/net/i2p/router/networkdb/kademlia/FloodfillPeerSelector.java +++ b/router/java/src/net/i2p/router/networkdb/kademlia/FloodfillPeerSelector.java @@ -78,6 +78,7 @@ class FloodfillPeerSelector extends PeerSelector { _wanted = wanted; } public List getFloodfillParticipants() { return _floodfillMatches; } + private static final int EXTRA_MATCHES = 100; public void add(Hash entry) { //if (_context.profileOrganizer().isFailing(entry)) // return; @@ -98,7 +99,11 @@ class FloodfillPeerSelector extends PeerSelector { if (info != null && FloodfillNetworkDatabaseFacade.isFloodfill(info)) { _floodfillMatches.add(entry); } else { - if ( (!SearchJob.onlyQueryFloodfillPeers(_context)) && (_wanted > _matches) && (_key != null) ) { + // This didn't really work because we stopped filling up when _wanted == _matches, + // thus we don't add and sort the whole db to find the closest. + // So we keep going for a while. This, together with periodically shuffling the + // KBucket (see KBucketImpl.add()) makes exploration work well. + if ( (!SearchJob.onlyQueryFloodfillPeers(_context)) && (_wanted + EXTRA_MATCHES > _matches) && (_key != null) ) { BigInteger diff = getDistance(_key, entry); _sorted.put(diff, entry); } else { diff --git a/router/java/src/net/i2p/router/networkdb/kademlia/KBucketImpl.java b/router/java/src/net/i2p/router/networkdb/kademlia/KBucketImpl.java index 1e795db8e..5d8e186f5 100644 --- a/router/java/src/net/i2p/router/networkdb/kademlia/KBucketImpl.java +++ b/router/java/src/net/i2p/router/networkdb/kademlia/KBucketImpl.java @@ -10,6 +10,7 @@ package net.i2p.router.networkdb.kademlia; import java.math.BigInteger; import java.util.ArrayList; +import java.util.Collections; import java.util.HashSet; import java.util.Iterator; import java.util.List; @@ -18,6 +19,7 @@ import java.util.Set; import net.i2p.I2PAppContext; import net.i2p.data.DataHelper; import net.i2p.data.Hash; +import net.i2p.router.RouterContext; import net.i2p.util.Log; import net.i2p.util.RandomSource; @@ -31,12 +33,16 @@ class KBucketImpl implements KBucket { private int _begin; /** include if no bits higher than this bit (inclusive) are set */ private int _end; + /** when did we last shake things up */ + private long _lastShuffle; + private static final int SHUFFLE_DELAY = 10*60*1000; private I2PAppContext _context; public KBucketImpl(I2PAppContext context, Hash local) { _context = context; _log = context.logManager().getLog(KBucketImpl.class); - _entries = new ArrayList(64); //new HashSet(); + _entries = new ArrayList(0); //all but the last 1 or 2 buckets will be empty + _lastShuffle = context.clock().now(); setLocal(local); } @@ -232,6 +238,13 @@ class KBucketImpl implements KBucket { synchronized (_entries) { if (!_entries.contains(peer)) _entries.add(peer); + // Randomize the bucket every once in a while if we are floodfill, so that + // exploration will return better results. See FloodfillPeerSelector.add(Hash). + if (_lastShuffle + SHUFFLE_DELAY < _context.clock().now() && + !SearchJob.onlyQueryFloodfillPeers((RouterContext)_context)) { + Collections.shuffle(_entries, _context.random()); + _lastShuffle = _context.clock().now(); + } return _entries.size(); } } @@ -245,6 +258,9 @@ class KBucketImpl implements KBucket { /** * Generate a random key to go within this bucket * + * WARNING - Something is seriously broken here. testRand2() fails right away. + * ExploreKeySelectorJob is now disabled, ExploreJob just searches for a random + * key instead. */ public Hash generateRandomKey() { BigInteger variance = new BigInteger((_end-_begin)-1, _context.random()); @@ -336,6 +352,7 @@ class KBucketImpl implements KBucket { /** * Test harness to make sure its assigning keys to the right buckets * + * WARNING - Something is seriously broken here. testRand2() fails right away. */ public static void main(String args[]) { testRand2(); diff --git a/router/java/src/net/i2p/router/networkdb/kademlia/KademliaNetworkDatabaseFacade.java b/router/java/src/net/i2p/router/networkdb/kademlia/KademliaNetworkDatabaseFacade.java index 763005e8c..6ff82c6a4 100644 --- a/router/java/src/net/i2p/router/networkdb/kademlia/KademliaNetworkDatabaseFacade.java +++ b/router/java/src/net/i2p/router/networkdb/kademlia/KademliaNetworkDatabaseFacade.java @@ -17,6 +17,7 @@ import java.util.HashMap; import java.util.HashSet; import java.util.Iterator; import java.util.Map; +import java.util.Properties; import java.util.Set; import java.util.TreeMap; import java.util.TreeSet; @@ -52,10 +53,7 @@ public class KademliaNetworkDatabaseFacade extends NetworkDatabaseFacade { private DataStore _ds; // hash to DataStructure mapping, persisted when necessary /** where the data store is pushing the data */ private String _dbDir; - private Set _explicitSendKeys; // set of Hash objects that should be published ASAP - private Set _passiveSendKeys; // set of Hash objects that should be published when there's time private Set _exploreKeys; // set of Hash objects that we should search on (to fill up a bucket, not to get data) - private Map _lastSent; // Hash to Long (date last sent, or <= 0 for never) private boolean _initialized; /** Clock independent time of when we started up */ private long _started; @@ -121,8 +119,10 @@ public class KademliaNetworkDatabaseFacade extends NetworkDatabaseFacade { * know anyone or just started up) */ private final static long ROUTER_INFO_EXPIRATION = 3*24*60*60*1000l; + private final static long ROUTER_INFO_EXPIRATION_SHORT = 90*60*1000l; private final static long EXPLORE_JOB_DELAY = 10*60*1000l; + private final static long PUBLISH_JOB_DELAY = 5*60*1000l; public KademliaNetworkDatabaseFacade(RouterContext context) { _context = context; @@ -150,53 +150,6 @@ public class KademliaNetworkDatabaseFacade extends NetworkDatabaseFacade { _exploreJob.updateExploreSchedule(); } - public Set getExplicitSendKeys() { - if (!_initialized) return null; - synchronized (_explicitSendKeys) { - return new HashSet(_explicitSendKeys); - } - } - public Set getPassivelySendKeys() { - if (!_initialized) return null; - synchronized (_passiveSendKeys) { - return new HashSet(_passiveSendKeys); - } - } - public void removeFromExplicitSend(Set toRemove) { - if (!_initialized) return; - synchronized (_explicitSendKeys) { - _explicitSendKeys.removeAll(toRemove); - } - } - public void removeFromPassiveSend(Set toRemove) { - if (!_initialized) return; - synchronized (_passiveSendKeys) { - _passiveSendKeys.removeAll(toRemove); - } - } - public void queueForPublishing(Set toSend) { - if (!_initialized) return; - synchronized (_passiveSendKeys) { - _passiveSendKeys.addAll(toSend); - } - } - - public Long getLastSent(Hash key) { - if (!_initialized) return null; - synchronized (_lastSent) { - if (!_lastSent.containsKey(key)) - _lastSent.put(key, new Long(0)); - return (Long)_lastSent.get(key); - } - } - - public void noteKeySent(Hash key) { - if (!_initialized) return; - synchronized (_lastSent) { - _lastSent.put(key, new Long(_context.clock().now())); - } - } - public Set getExploreKeys() { if (!_initialized) return null; synchronized (_exploreKeys) { @@ -223,10 +176,7 @@ public class KademliaNetworkDatabaseFacade extends NetworkDatabaseFacade { _initialized = false; _kb = null; _ds = null; - _explicitSendKeys = null; - _passiveSendKeys = null; _exploreKeys = null; - _lastSent = null; } public void restart() { @@ -241,9 +191,7 @@ public class KademliaNetworkDatabaseFacade extends NetworkDatabaseFacade { else _enforceNetId = DEFAULT_ENFORCE_NETID; _ds.restart(); - synchronized (_explicitSendKeys) { _explicitSendKeys.clear(); } synchronized (_exploreKeys) { _exploreKeys.clear(); } - synchronized (_passiveSendKeys) { _passiveSendKeys.clear(); } _initialized = true; @@ -270,10 +218,7 @@ public class KademliaNetworkDatabaseFacade extends NetworkDatabaseFacade { _kb = new KBucketSet(_context, ri.getIdentity().getHash()); _ds = new PersistentDataStore(_context, dbDir, this); //_ds = new TransientDataStore(); - _explicitSendKeys = new HashSet(64); - _passiveSendKeys = new HashSet(64); _exploreKeys = new HashSet(64); - _lastSent = new HashMap(1024); _dbDir = dbDir; createHandlers(); @@ -281,9 +226,6 @@ public class KademliaNetworkDatabaseFacade extends NetworkDatabaseFacade { _initialized = true; _started = System.currentTimeMillis(); - // read the queues and publish appropriately - if (false) - _context.jobQueue().addJob(new DataPublisherJob(_context, this)); // expire old leases _context.jobQueue().addJob(new ExpireLeasesJob(_context, this)); @@ -295,10 +237,11 @@ public class KademliaNetworkDatabaseFacade extends NetworkDatabaseFacade { ////_context.jobQueue().addJob(new ExpireRoutersJob(_context, this)); if (!_quiet) { - // fill the passive queue periodically - _context.jobQueue().addJob(new DataRepublishingSelectorJob(_context, this)); // fill the search queue with random keys in buckets that are too small - _context.jobQueue().addJob(new ExploreKeySelectorJob(_context, this)); + // Disabled since KBucketImpl.generateRandomKey() is b0rked, + // and anyway, we want to search for a completely random key, + // not a random key for a particular kbucket. + // _context.jobQueue().addJob(new ExploreKeySelectorJob(_context, this)); if (_exploreJob == null) _exploreJob = new StartExplorersJob(_context, this); // fire off a group of searches from the explore pool @@ -320,7 +263,9 @@ public class KademliaNetworkDatabaseFacade extends NetworkDatabaseFacade { } // periodically update and resign the router's 'published date', which basically // serves as a version - _context.jobQueue().addJob(new PublishLocalRouterInfoJob(_context)); + Job plrij = new PublishLocalRouterInfoJob(_context); + plrij.getTiming().setStartAfter(_context.clock().now() + PUBLISH_JOB_DELAY); + _context.jobQueue().addJob(plrij); try { publish(ri); } catch (IllegalArgumentException iae) { @@ -511,6 +456,7 @@ public class KademliaNetworkDatabaseFacade extends NetworkDatabaseFacade { } } + private static final long PUBLISH_DELAY = 3*1000; public void publish(LeaseSet localLeaseSet) { if (!_initialized) return; Hash h = localLeaseSet.getDestination().calculateHash(); @@ -523,9 +469,6 @@ public class KademliaNetworkDatabaseFacade extends NetworkDatabaseFacade { if (!_context.clientManager().shouldPublishLeaseSet(h)) return; - synchronized (_explicitSendKeys) { - _explicitSendKeys.add(h); - } RepublishLeaseSetJob j = null; synchronized (_publishingLeaseSets) { j = (RepublishLeaseSetJob)_publishingLeaseSets.get(h); @@ -534,7 +477,10 @@ public class KademliaNetworkDatabaseFacade extends NetworkDatabaseFacade { _publishingLeaseSets.put(h, j); } } - j.getTiming().setStartAfter(_context.clock().now()); + // Don't spam the floodfills. In addition, always delay a few seconds since there may + // be another leaseset change coming along momentarily. + long nextTime = Math.max(j.lastPublished() + j.REPUBLISH_LEASESET_TIMEOUT, _context.clock().now() + PUBLISH_DELAY); + j.getTiming().setStartAfter(nextTime); _context.jobQueue().addJob(j); } @@ -554,9 +500,6 @@ public class KademliaNetworkDatabaseFacade extends NetworkDatabaseFacade { if (_context.router().isHidden()) return; // DE-nied! Hash h = localRouterInfo.getIdentity().getHash(); store(h, localRouterInfo); - synchronized (_explicitSendKeys) { - _explicitSendKeys.add(h); - } } /** @@ -649,10 +592,6 @@ public class KademliaNetworkDatabaseFacade extends NetworkDatabaseFacade { throw new IllegalArgumentException("Invalid store attempt - " + err); _ds.put(key, leaseSet); - synchronized (_lastSent) { - if (!_lastSent.containsKey(key)) - _lastSent.put(key, new Long(0)); - } // Iterate through the old failure / success count, copying over the old // values (if any tunnels overlap between leaseSets). no need to be @@ -721,6 +660,16 @@ public class KademliaNetworkDatabaseFacade extends NetworkDatabaseFacade { } else if ( (_context.router().getUptime() > 60*60*1000) && (routerInfo.getPublished() < now - 2*24*60*60*1000l) ) { long age = _context.clock().now() - routerInfo.getPublished(); return "Peer " + key.toBase64() + " published " + DataHelper.formatDuration(age) + " ago"; + } else if (!routerInfo.isCurrent(ROUTER_INFO_EXPIRATION_SHORT) && (_context.router().getUptime() > 60*60*1000) ) { + if (routerInfo.getAddresses().size() <= 0) + return "Peer " + key.toBase64() + " published > 90m ago with no addresses"; + RouterAddress ra = routerInfo.getTargetAddress("SSU"); + if (ra != null) { + // Introducers change often, introducee will ping introducer for 2 hours + Properties props = ra.getOptions(); + if (props != null && props.getProperty("ihost0") != null) + return "Peer " + key.toBase64() + " published > 90m ago with SSU Introducers"; + } } return null; } @@ -751,10 +700,6 @@ public class KademliaNetworkDatabaseFacade extends NetworkDatabaseFacade { _context.peerManager().setCapabilities(key, routerInfo.getCapabilities()); _ds.put(key, routerInfo); - synchronized (_lastSent) { - if (!_lastSent.containsKey(key)) - _lastSent.put(key, new Long(0)); - } if (rv == null) _kb.add(key); return rv; @@ -789,15 +734,6 @@ public class KademliaNetworkDatabaseFacade extends NetworkDatabaseFacade { _ds.remove(dbEntry); else _ds.removeLease(dbEntry); - synchronized (_lastSent) { - _lastSent.remove(dbEntry); - } - synchronized (_explicitSendKeys) { - _explicitSendKeys.remove(dbEntry); - } - synchronized (_passiveSendKeys) { - _passiveSendKeys.remove(dbEntry); - } } /** don't use directly - see F.N.D.F. override */ @@ -814,30 +750,12 @@ public class KademliaNetworkDatabaseFacade extends NetworkDatabaseFacade { } _ds.remove(peer); - synchronized (_lastSent) { - _lastSent.remove(peer); - } - synchronized (_explicitSendKeys) { - _explicitSendKeys.remove(peer); - } - synchronized (_passiveSendKeys) { - _passiveSendKeys.remove(peer); - } } public void unpublish(LeaseSet localLeaseSet) { if (!_initialized) return; Hash h = localLeaseSet.getDestination().calculateHash(); DataStructure data = _ds.remove(h); - synchronized (_lastSent) { - _lastSent.remove(h); - } - synchronized (_explicitSendKeys) { - _explicitSendKeys.remove(h); - } - synchronized (_passiveSendKeys) { - _passiveSendKeys.remove(h); - } if (data == null) { if (_log.shouldLog(Log.WARN)) @@ -954,7 +872,7 @@ public class KademliaNetworkDatabaseFacade extends NetworkDatabaseFacade { StringBuffer buf = new StringBuffer(4*1024); buf.append("

            Network Database RouterInfo Lookup

            \n"); if (".".equals(routerPrefix)) { - renderRouterInfo(buf, _context.router().getRouterInfo(), true); + renderRouterInfo(buf, _context.router().getRouterInfo(), true, true); } else { boolean notFound = true; Set routers = getRouters(); @@ -962,7 +880,7 @@ public class KademliaNetworkDatabaseFacade extends NetworkDatabaseFacade { RouterInfo ri = (RouterInfo)iter.next(); Hash key = ri.getIdentity().getHash(); if (key.toBase64().startsWith(routerPrefix)) { - renderRouterInfo(buf, ri, false); + renderRouterInfo(buf, ri, false, true); notFound = false; } } @@ -974,7 +892,14 @@ public class KademliaNetworkDatabaseFacade extends NetworkDatabaseFacade { } public void renderStatusHTML(Writer out) throws IOException { - StringBuffer buf = new StringBuffer(getKnownRouters() * 2048); + renderStatusHTML(out, true); + } + + public void renderStatusHTML(Writer out, boolean full) throws IOException { + int size = getKnownRouters() * 512; + if (full) + size *= 4; + StringBuffer buf = new StringBuffer(size); buf.append("

            Network Database Contents

            \n"); if (!_initialized) { buf.append("Not initialized\n"); @@ -1028,10 +953,15 @@ public class KademliaNetworkDatabaseFacade extends NetworkDatabaseFacade { } Hash us = _context.routerHash(); - out.write("

            Routers

            \n"); + out.write("

            Routers (view without"); + else + out.write("?f=1#routers\" >view with"); + out.write(" stats)

            \n"); RouterInfo ourInfo = _context.router().getRouterInfo(); - renderRouterInfo(buf, ourInfo, true); + renderRouterInfo(buf, ourInfo, true, true); out.write(buf.toString()); buf.setLength(0); @@ -1045,7 +975,7 @@ public class KademliaNetworkDatabaseFacade extends NetworkDatabaseFacade { Hash key = ri.getIdentity().getHash(); boolean isUs = key.equals(us); if (!isUs) { - renderRouterInfo(buf, ri, false); + renderRouterInfo(buf, ri, false, full); out.write(buf.toString()); buf.setLength(0); String coreVersion = ri.getOption("coreVersion"); @@ -1086,7 +1016,7 @@ public class KademliaNetworkDatabaseFacade extends NetworkDatabaseFacade { out.flush(); } - private void renderRouterInfo(StringBuffer buf, RouterInfo info, boolean isUs) { + private void renderRouterInfo(StringBuffer buf, RouterInfo info, boolean isUs, boolean full) { String hash = info.getIdentity().getHash().toBase64(); buf.append(""); if (isUs) { @@ -1113,13 +1043,18 @@ public class KademliaNetworkDatabaseFacade extends NetworkDatabaseFacade { } } buf.append("
            \n"); - buf.append("Stats:
            \n"); - for (Iterator iter = info.getOptions().keySet().iterator(); iter.hasNext(); ) { - String key = (String)iter.next(); - String val = info.getOption(key); - buf.append(DataHelper.stripHTML(key)).append(" = ").append(DataHelper.stripHTML(val)).append("
            \n"); + if (full) { + buf.append("Stats:
            \n"); + for (Iterator iter = info.getOptions().keySet().iterator(); iter.hasNext(); ) { + String key = (String)iter.next(); + String val = info.getOption(key); + buf.append(DataHelper.stripHTML(key)).append(" = ").append(DataHelper.stripHTML(val)).append("
            \n"); + } + buf.append("
            \n"); + } else { + buf.append("
            Full entry\n"); } - buf.append("
            \n"); + buf.append("
            \n"); } } diff --git a/router/java/src/net/i2p/router/networkdb/kademlia/PersistentDataStore.java b/router/java/src/net/i2p/router/networkdb/kademlia/PersistentDataStore.java index 77d2427cd..5d0d219db 100644 --- a/router/java/src/net/i2p/router/networkdb/kademlia/PersistentDataStore.java +++ b/router/java/src/net/i2p/router/networkdb/kademlia/PersistentDataStore.java @@ -65,10 +65,6 @@ class PersistentDataStore extends TransientDataStore { return super.remove(key); } - public DataStructure removeLease(Hash key) { - return super.removeLease(key); - } - public void put(Hash key, DataStructure data) { if ( (data == null) || (key == null) ) return; super.put(key, data); @@ -77,26 +73,6 @@ class PersistentDataStore extends TransientDataStore { _writer.queue(key, data); } -/* - * We don't store leasesets here anymore, use the TransientDataStore count - * - public int countLeaseSets() { - File dbDir = null; - try { - dbDir = getDbDir(); - } catch (IOException ioe) { - return 0; - } - if (dbDir == null) - return 0; - File leaseSetFiles[] = dbDir.listFiles(LeaseSetFilter.getInstance()); - if (leaseSetFiles == null) - return 0; - else - return leaseSetFiles.length; - } -*/ - private void accept(LeaseSet ls) { super.put(ls.getDestination().calculateHash(), ls); } @@ -249,18 +225,6 @@ class PersistentDataStore extends TransientDataStore { int routerCount = 0; try { File dbDir = getDbDir(); -/**** - if (getContext().router().getUptime() < 10*60*1000) { - File leaseSetFiles[] = dbDir.listFiles(LeaseSetFilter.getInstance()); - if (leaseSetFiles != null) { - for (int i = 0; i < leaseSetFiles.length; i++) { - Hash key = getLeaseSetHash(leaseSetFiles[i].getName()); - if ( (key != null) && (!isKnown(key)) ) - PersistentDataStore.this._context.jobQueue().addJob(new ReadLeaseJob(leaseSetFiles[i], key)); - } - } - } -****/ File routerInfoFiles[] = dbDir.listFiles(RouterInfoFilter.getInstance()); if (routerInfoFiles != null) { routerCount += routerInfoFiles.length; @@ -283,63 +247,6 @@ class PersistentDataStore extends TransientDataStore { } } -/**** - private class ReadLeaseJob extends JobImpl { - private File _leaseFile; - private Hash _key; - public ReadLeaseJob(File leaseFile, Hash key) { - super(PersistentDataStore.this._context); - _leaseFile = leaseFile; - _key = key; - } - public String getName() { return "Read LeaseSet"; } - private boolean shouldRead() { - DataStructure data = get(_key); - if (data == null) return true; - if (data instanceof LeaseSet) { - long knownDate = ((LeaseSet)data).getEarliestLeaseDate(); - long fileDate = _leaseFile.lastModified(); - if (fileDate > knownDate) - return true; - else - return false; - } else { - // wtf - return true; - } - } - public void runJob() { - if (!shouldRead()) return; - try { - FileInputStream fis = null; - boolean corrupt = false; - try { - fis = new FileInputStream(_leaseFile); - LeaseSet ls = new LeaseSet(); - ls.readBytes(fis); - try { - _facade.store(ls.getDestination().calculateHash(), ls); - } catch (IllegalArgumentException iae) { - _log.info("Refused locally loaded leaseSet - deleting"); - corrupt = true; - } - } catch (DataFormatException dfe) { - _log.warn("Error reading the leaseSet from " + _leaseFile.getAbsolutePath(), dfe); - corrupt = true; - } catch (FileNotFoundException fnfe) { - _log.debug("Deleted prior to read.. a race during expiration / load"); - corrupt = false; - } finally { - if (fis != null) try { fis.close(); } catch (IOException ioe) {} - } - if (corrupt) _leaseFile.delete(); - } catch (IOException ioe) { - _log.warn("Error reading the leaseSet from " + _leaseFile.getAbsolutePath(), ioe); - } - } - } -****/ - private class ReadRouterJob extends JobImpl { private File _routerFile; private Hash _key; @@ -464,31 +371,8 @@ class PersistentDataStore extends TransientDataStore { _log.info("Removed router info at " + f.getAbsolutePath()); return; } -/*** - String lsName = getLeaseSetName(key); - File f = new File(dir, lsName); - if (f.exists()) { - boolean removed = f.delete(); - if (!removed) - _log.warn("Unable to remove lease set at " + f.getAbsolutePath()); - else - _log.info("Removed lease set at " + f.getAbsolutePath()); - return; - } -***/ } -/*** - private final static class LeaseSetFilter implements FilenameFilter { - private static final FilenameFilter _instance = new LeaseSetFilter(); - public static final FilenameFilter getInstance() { return _instance; } - public boolean accept(File dir, String name) { - if (name == null) return false; - name = name.toUpperCase(); - return (name.startsWith(LEASESET_PREFIX.toUpperCase()) && name.endsWith(LEASESET_SUFFIX.toUpperCase())); - } - } -***/ private final static class RouterInfoFilter implements FilenameFilter { private static final FilenameFilter _instance = new RouterInfoFilter(); public static final FilenameFilter getInstance() { return _instance; } diff --git a/router/java/src/net/i2p/router/networkdb/kademlia/RepublishLeaseSetJob.java b/router/java/src/net/i2p/router/networkdb/kademlia/RepublishLeaseSetJob.java index a6ca3c0bb..ac191a96c 100644 --- a/router/java/src/net/i2p/router/networkdb/kademlia/RepublishLeaseSetJob.java +++ b/router/java/src/net/i2p/router/networkdb/kademlia/RepublishLeaseSetJob.java @@ -23,15 +23,18 @@ import net.i2p.util.Log; public class RepublishLeaseSetJob extends JobImpl { private Log _log; private final static long REPUBLISH_LEASESET_DELAY = 5*60*1000; - private final static long REPUBLISH_LEASESET_TIMEOUT = 60*1000; + public final static long REPUBLISH_LEASESET_TIMEOUT = 60*1000; private Hash _dest; private KademliaNetworkDatabaseFacade _facade; + /** this is actually last attempted publish */ + private long _lastPublished; public RepublishLeaseSetJob(RouterContext ctx, KademliaNetworkDatabaseFacade facade, Hash destHash) { super(ctx); _log = ctx.logManager().getLog(RepublishLeaseSetJob.class); _facade = facade; _dest = destHash; + _lastPublished = 0; //getTiming().setStartAfter(ctx.clock().now()+REPUBLISH_LEASESET_DELAY); getContext().statManager().createRateStat("netDb.republishLeaseSetCount", "How often we republish a leaseSet?", "NetworkDatabase", new long[] { 5*60*1000l, 60*60*1000l, 24*60*60*1000l }); } @@ -52,6 +55,7 @@ public class RepublishLeaseSetJob extends JobImpl { } else { getContext().statManager().addRateData("netDb.republishLeaseSetCount", 1, 0); _facade.sendStore(_dest, ls, new OnRepublishSuccess(getContext()), new OnRepublishFailure(getContext(), this), REPUBLISH_LEASESET_TIMEOUT, null); + _lastPublished = getContext().clock().now(); //getContext().jobQueue().addJob(new StoreJob(getContext(), _facade, _dest, ls, new OnSuccess(getContext()), new OnFailure(getContext()), REPUBLISH_LEASESET_TIMEOUT)); } } else { @@ -81,23 +85,27 @@ public class RepublishLeaseSetJob extends JobImpl { _log.warn("FAILED publishing of the leaseSet for " + _dest.toBase64()); requeue(getContext().random().nextInt(60*1000)); } -} -class OnRepublishSuccess extends JobImpl { - public OnRepublishSuccess(RouterContext ctx) { super(ctx); } - public String getName() { return "Publish leaseSet successful"; } - public void runJob() { - //if (_log.shouldLog(Log.DEBUG)) - // _log.debug("successful publishing of the leaseSet for " + _dest.toBase64()); + public long lastPublished() { + return _lastPublished; + } + + class OnRepublishSuccess extends JobImpl { + public OnRepublishSuccess(RouterContext ctx) { super(ctx); } + public String getName() { return "Publish leaseSet successful"; } + public void runJob() { + //if (_log.shouldLog(Log.DEBUG)) + // _log.debug("successful publishing of the leaseSet for " + _dest.toBase64()); + } + } + + class OnRepublishFailure extends JobImpl { + private RepublishLeaseSetJob _job; + public OnRepublishFailure(RouterContext ctx, RepublishLeaseSetJob job) { + super(ctx); + _job = job; + } + public String getName() { return "Publish leaseSet failed"; } + public void runJob() { _job.requeueRepublish(); } } } - -class OnRepublishFailure extends JobImpl { - private RepublishLeaseSetJob _job; - public OnRepublishFailure(RouterContext ctx, RepublishLeaseSetJob job) { - super(ctx); - _job = job; - } - public String getName() { return "Publish leaseSet failed"; } - public void runJob() { _job.requeueRepublish(); } -} \ No newline at end of file diff --git a/router/java/src/net/i2p/router/networkdb/kademlia/SearchJob.java b/router/java/src/net/i2p/router/networkdb/kademlia/SearchJob.java index 0f31fa9ec..868c6b108 100644 --- a/router/java/src/net/i2p/router/networkdb/kademlia/SearchJob.java +++ b/router/java/src/net/i2p/router/networkdb/kademlia/SearchJob.java @@ -9,6 +9,7 @@ package net.i2p.router.networkdb.kademlia; */ import java.util.ArrayList; +import java.util.HashSet; import java.util.Iterator; import java.util.List; import java.util.Set; @@ -121,12 +122,23 @@ class SearchJob extends JobImpl { private static final boolean DEFAULT_FLOODFILL_ONLY = true; + /** this is now misnamed, as it is only used to determine whether to return floodfill peers only */ static boolean onlyQueryFloodfillPeers(RouterContext ctx) { - if (isCongested(ctx)) - return true; + //if (isCongested(ctx)) + // return true; + // If we are floodfill, we want the FloodfillPeerSelector (in add()) to include + // non-ff peers (if required) in DatabaseSearchReplyMessage responses + // so that Exploration works. + // ExploreJob is disabled if we are floodfill. + // The other two places this was called (one below and one in FNDF) + // have been commented out. + // Returning false essentially enables kademlia as a backup to floodfill for search responses. + if (FloodfillNetworkDatabaseFacade.floodfillEnabled(ctx)) + return false; return Boolean.valueOf(ctx.getProperty("netDb.floodfillOnly", DEFAULT_FLOODFILL_ONLY + "")).booleanValue(); } +/*** static boolean isCongested(RouterContext ctx) { float availableSend = ctx.bandwidthLimiter().getOutboundKBytesPerSecond()*1024 - ctx.bandwidthLimiter().getSendBps(); float availableRecv = ctx.bandwidthLimiter().getInboundKBytesPerSecond()*1024 - ctx.bandwidthLimiter().getReceiveBps(); @@ -134,8 +146,10 @@ class SearchJob extends JobImpl { // in that range without a problem return ( (availableSend < 6*1024) || (availableRecv < 6*1024) ); } +***/ static final int PER_FLOODFILL_PEER_TIMEOUT = 10*1000; + static final long MIN_TIMEOUT = 2500; protected int getPerPeerTimeoutMs(Hash peer) { int timeout = 0; @@ -146,7 +160,7 @@ class SearchJob extends JobImpl { long now = getContext().clock().now(); if (now + timeout > _expiration) - return (int)(_expiration - now); + return (int) Math.max(_expiration - now, MIN_TIMEOUT); else return timeout; } @@ -247,7 +261,8 @@ class SearchJob extends JobImpl { int sent = 0; Set attempted = _state.getAttempted(); while (sent <= 0) { - boolean onlyFloodfill = onlyQueryFloodfillPeers(getContext()); + //boolean onlyFloodfill = onlyQueryFloodfillPeers(getContext()); + boolean onlyFloodfill = true; if (_floodfillPeersExhausted && onlyFloodfill && _state.getPending().size() <= 0) { if (_log.shouldLog(Log.WARN)) _log.warn(getJobId() + ": no non-floodfill peers left, and no more pending. Searched: " @@ -421,7 +436,7 @@ class SearchJob extends JobImpl { if (_log.shouldLog(Log.DEBUG)) - _log.debug(getJobId() + ": Sending leaseSet search to " + router.getIdentity().getHash().toBase64() + _log.debug(getJobId() + ": Sending search to " + router.getIdentity().getHash().toBase64() + " for " + msg.getSearchKey().toBase64() + " w/ replies through [" + msg.getFrom().toBase64() + "] via tunnel [" + msg.getReplyTunnel() + "]"); @@ -745,7 +760,7 @@ class SearchJob extends JobImpl { } } - private class Search { + private static class Search { private Job _onFind; private Job _onFail; private long _expiration; @@ -772,6 +787,18 @@ class SearchJob extends JobImpl { boolean wasAttempted(Hash peer) { return _state.wasAttempted(peer); } long timeoutMs() { return _timeoutMs; } - boolean add(Hash peer) { return _facade.getKBuckets().add(peer); } + + /** @return true if peer was new */ + boolean add(Hash peer) { + boolean rv = _facade.getKBuckets().add(peer); + if (rv) { + if (_log.shouldLog(Log.DEBUG)) + _log.debug(getJobId() + ": Queueing up for next time: " + peer); + Set s = new HashSet(1); + s.add(peer); + _facade.queueForExploration(s); + } + return rv; + } void decrementOutstandingFloodfillSearches() { _floodfillSearchesOutstanding--; } } diff --git a/router/java/src/net/i2p/router/networkdb/kademlia/SearchReplyJob.java b/router/java/src/net/i2p/router/networkdb/kademlia/SearchReplyJob.java index 892b5966e..c2365eb0c 100644 --- a/router/java/src/net/i2p/router/networkdb/kademlia/SearchReplyJob.java +++ b/router/java/src/net/i2p/router/networkdb/kademlia/SearchReplyJob.java @@ -57,6 +57,8 @@ class SearchReplyJob extends JobImpl { public String getName() { return "Process Reply for Kademlia Search"; } public void runJob() { if (_curIndex >= _msg.getNumReplies()) { + if (_log.shouldLog(Log.DEBUG) && _msg.getNumReplies() == 0) + _log.debug(getJobId() + ": dbSearchReply received with no routers referenced"); if (_repliesPendingVerification > 0) { // we received new references from the peer, but still // haven't verified all of them, so lets give it more time @@ -106,7 +108,8 @@ class SearchReplyJob extends JobImpl { _duplicatePeers++; } if (_log.shouldLog(Log.DEBUG)) - _log.debug(getJobId() + ": dbSearchReply received on search referencing router " + peer); + _log.debug(getJobId() + ": dbSearchReply received on search referencing router " + peer + + " already known? " + (info != null)); if (shouldAdd) { if (_searchJob.add(peer)) _newPeers++; diff --git a/router/java/src/net/i2p/router/networkdb/kademlia/StartExplorersJob.java b/router/java/src/net/i2p/router/networkdb/kademlia/StartExplorersJob.java index 159cb8be6..25b82e697 100644 --- a/router/java/src/net/i2p/router/networkdb/kademlia/StartExplorersJob.java +++ b/router/java/src/net/i2p/router/networkdb/kademlia/StartExplorersJob.java @@ -20,6 +20,7 @@ import net.i2p.util.Log; /** * Fire off search jobs for random keys from the explore pool, up to MAX_PER_RUN * at a time. + * If the explore pool is empty, just search for a random key. * */ class StartExplorersJob extends JobImpl { @@ -28,7 +29,7 @@ class StartExplorersJob extends JobImpl { /** don't explore more than 1 bucket at a time */ private static final int MAX_PER_RUN = 1; - /** dont explore the network more often than once every minute */ + /** dont explore the network more often than this */ private static final int MIN_RERUN_DELAY_MS = 5*60*1000; /** explore the network at least once every thirty minutes */ private static final int MAX_RERUN_DELAY_MS = 30*60*1000; @@ -41,14 +42,15 @@ class StartExplorersJob extends JobImpl { public String getName() { return "Start Explorers Job"; } public void runJob() { - Set toExplore = selectKeysToExplore(); - if (_log.shouldLog(Log.DEBUG)) - _log.debug("Keys to explore during this run: " + toExplore); - _facade.removeFromExploreKeys(toExplore); - for (Iterator iter = toExplore.iterator(); iter.hasNext(); ) { - Hash key = (Hash)iter.next(); - //_log.info("Starting explorer for " + key, new Exception("Exploring!")); - getContext().jobQueue().addJob(new ExploreJob(getContext(), _facade, key)); + if (! ((FloodfillNetworkDatabaseFacade)_facade).floodfillEnabled()) { + Set toExplore = selectKeysToExplore(); + if (_log.shouldLog(Log.DEBUG)) + _log.debug("Keys to explore during this run: " + toExplore); + _facade.removeFromExploreKeys(toExplore); + for (Iterator iter = toExplore.iterator(); iter.hasNext(); ) { + Hash key = (Hash)iter.next(); + getContext().jobQueue().addJob(new ExploreJob(getContext(), _facade, key)); + } } long delay = getNextRunDelay(); if (_log.shouldLog(Log.DEBUG)) @@ -81,18 +83,24 @@ class StartExplorersJob extends JobImpl { /** * Run through the explore pool and pick out some values * + * Nope, ExploreKeySelectorJob is disabled, so the explore pool + * may be empty. In that case, generate random keys. */ private Set selectKeysToExplore() { Set queued = _facade.getExploreKeys(); if (_log.shouldLog(Log.DEBUG)) _log.debug("Keys waiting for exploration: " + queued.size()); - if (queued.size() <= MAX_PER_RUN) - return queued; Set rv = new HashSet(MAX_PER_RUN); for (Iterator iter = queued.iterator(); iter.hasNext(); ) { if (rv.size() >= MAX_PER_RUN) break; rv.add(iter.next()); } + for (int i = rv.size(); i < MAX_PER_RUN; i++) { + byte hash[] = new byte[Hash.HASH_LENGTH]; + getContext().random().nextBytes(hash); + Hash key = new Hash(hash); + rv.add(key); + } return rv; } } diff --git a/router/java/src/net/i2p/router/networkdb/kademlia/StoreJob.java b/router/java/src/net/i2p/router/networkdb/kademlia/StoreJob.java index 18d18f13e..cc4c51329 100644 --- a/router/java/src/net/i2p/router/networkdb/kademlia/StoreJob.java +++ b/router/java/src/net/i2p/router/networkdb/kademlia/StoreJob.java @@ -437,7 +437,6 @@ class StoreJob extends JobImpl { _log.debug(getJobId() + ": State of successful send: " + _state); if (_onSuccess != null) getContext().jobQueue().addJob(_onSuccess); - _facade.noteKeySent(_state.getTarget()); _state.complete(true); getContext().statManager().addRateData("netDb.storePeers", _state.getAttempted().size(), _state.getWhenCompleted()-_state.getWhenStarted()); } diff --git a/router/java/src/net/i2p/router/networkdb/kademlia/TransientDataStore.java b/router/java/src/net/i2p/router/networkdb/kademlia/TransientDataStore.java index 492b164e7..d21141468 100644 --- a/router/java/src/net/i2p/router/networkdb/kademlia/TransientDataStore.java +++ b/router/java/src/net/i2p/router/networkdb/kademlia/TransientDataStore.java @@ -9,6 +9,7 @@ package net.i2p.router.networkdb.kademlia; */ import java.util.Date; +import java.util.concurrent.ConcurrentHashMap; import java.util.HashMap; import java.util.HashSet; import java.util.Iterator; @@ -25,49 +26,38 @@ import net.i2p.util.Log; class TransientDataStore implements DataStore { private Log _log; - private Map _data; // hash --> DataStructure + private Map _data; protected RouterContext _context; public TransientDataStore(RouterContext ctx) { _context = ctx; _log = ctx.logManager().getLog(TransientDataStore.class); - _data = new HashMap(1024); + _data = new ConcurrentHashMap(1024); if (_log.shouldLog(Log.INFO)) _log.info("Data Store initialized"); } public void restart() { - synchronized (_data) { - _data.clear(); - } + _data.clear(); } public Set getKeys() { - synchronized (_data) { - return new HashSet(_data.keySet()); - } + return new HashSet(_data.keySet()); } public DataStructure get(Hash key) { - synchronized (_data) { - return (DataStructure)_data.get(key); - } + return _data.get(key); } public boolean isKnown(Hash key) { - synchronized (_data) { - return _data.containsKey(key); - } + return _data.containsKey(key); } public int countLeaseSets() { int count = 0; - synchronized (_data) { - for (Iterator iter = _data.values().iterator(); iter.hasNext();) { - DataStructure data = (DataStructure)iter.next(); - if (data instanceof LeaseSet) - count++; - } + for (DataStructure d : _data.values()) { + if (d instanceof LeaseSet) + count++; } return count; } @@ -81,10 +71,8 @@ class TransientDataStore implements DataStore { if (data == null) return; if (_log.shouldLog(Log.DEBUG)) _log.debug("Storing key " + key); - Object old = null; - synchronized (_data) { - old = _data.put(key, data); - } + DataStructure old = null; + old = _data.put(key, data); if (data instanceof RouterInfo) { _context.profileManager().heardAbout(key); RouterInfo ri = (RouterInfo)data; @@ -95,17 +83,13 @@ class TransientDataStore implements DataStore { _log.info("Almost clobbered an old router! " + key + ": [old published on " + new Date(ori.getPublished()) + " new on " + new Date(ri.getPublished()) + "]"); if (_log.shouldLog(Log.DEBUG)) _log.debug("Number of router options for " + key + ": " + ri.getOptions().size() + " (old one had: " + ori.getOptions().size() + ")", new Exception("Updated routerInfo")); - synchronized (_data) { - _data.put(key, old); - } + _data.put(key, old); } else if (ri.getPublished() > _context.clock().now() + MAX_FUTURE_PUBLISH_DATE) { if (_log.shouldLog(Log.INFO)) _log.info("Hmm, someone tried to give us something with the publication date really far in the future (" + new Date(ri.getPublished()) + "), dropping it"); if (_log.shouldLog(Log.DEBUG)) _log.debug("Number of router options for " + key + ": " + ri.getOptions().size() + " (old one had: " + ori.getOptions().size() + ")", new Exception("Updated routerInfo")); - synchronized (_data) { - _data.put(key, old); - } + _data.put(key, old); } else { if (_log.shouldLog(Log.INFO)) _log.info("Updated the old router for " + key + ": [old published on " + new Date(ori.getPublished()) + " new on " + new Date(ri.getPublished()) + "]"); @@ -125,15 +109,11 @@ class TransientDataStore implements DataStore { if (ls.getEarliestLeaseDate() < ols.getEarliestLeaseDate()) { if (_log.shouldLog(Log.INFO)) _log.info("Almost clobbered an old leaseSet! " + key + ": [old published on " + new Date(ols.getEarliestLeaseDate()) + " new on " + new Date(ls.getEarliestLeaseDate()) + "]"); - synchronized (_data) { - _data.put(key, old); - } + _data.put(key, old); } else if (ls.getEarliestLeaseDate() > _context.clock().now() + MAX_FUTURE_EXPIRATION_DATE) { if (_log.shouldLog(Log.INFO)) _log.info("Hmm, someone tried to give us something with the expiration date really far in the future (" + new Date(ls.getEarliestLeaseDate()) + "), dropping it"); - synchronized (_data) { - _data.put(key, old); - } + _data.put(key, old); } } } @@ -150,13 +130,9 @@ class TransientDataStore implements DataStore { public String toString() { StringBuffer buf = new StringBuffer(); buf.append("Transient DataStore: ").append(_data.size()).append("\nKeys: "); - Map data = new HashMap(); - synchronized (_data) { - data.putAll(_data); - } - for (Iterator iter = data.keySet().iterator(); iter.hasNext();) { - Hash key = (Hash)iter.next(); - DataStructure dp = (DataStructure)data.get(key); + for (Map.Entry e : _data.entrySet()) { + Hash key = e.getKey(); + DataStructure dp = e.getValue(); buf.append("\n\t*Key: ").append(key.toString()).append("\n\tContent: ").append(dp.toString()); } buf.append("\n"); @@ -168,10 +144,8 @@ class TransientDataStore implements DataStore { } public DataStructure remove(Hash key) { - synchronized (_data) { - if (_log.shouldLog(Log.DEBUG)) - _log.debug("Removing key " + key.toBase64()); - return (DataStructure)_data.remove(key); - } + if (_log.shouldLog(Log.DEBUG)) + _log.debug("Removing key " + key.toBase64()); + return _data.remove(key); } } diff --git a/router/java/src/net/i2p/router/peermanager/PeerManager.java b/router/java/src/net/i2p/router/peermanager/PeerManager.java index b2b16a00d..1c265ee67 100644 --- a/router/java/src/net/i2p/router/peermanager/PeerManager.java +++ b/router/java/src/net/i2p/router/peermanager/PeerManager.java @@ -24,6 +24,7 @@ import net.i2p.router.PeerSelectionCriteria; import net.i2p.router.RouterContext; import net.i2p.router.networkdb.kademlia.FloodfillNetworkDatabaseFacade; import net.i2p.util.Log; +import net.i2p.util.SimpleScheduler; import net.i2p.util.SimpleTimer; /** @@ -50,7 +51,7 @@ class PeerManager { _peersByCapability[i] = new ArrayList(64); loadProfiles(); ////_context.jobQueue().addJob(new EvaluateProfilesJob(_context)); - SimpleTimer.getInstance().addEvent(new Reorg(), 0); + SimpleScheduler.getInstance().addPeriodicEvent(new Reorg(), 0, 30*1000); //_context.jobQueue().addJob(new PersistProfilesJob(_context, this)); } @@ -60,8 +61,6 @@ class PeerManager { _organizer.reorganize(true); } catch (Throwable t) { _log.log(Log.CRIT, "Error evaluating profiles", t); - } finally { - SimpleTimer.getInstance().addEvent(Reorg.this, 30*1000); } } } diff --git a/router/java/src/net/i2p/router/peermanager/PeerProfile.java b/router/java/src/net/i2p/router/peermanager/PeerProfile.java index 514a9ae4a..4a94100bd 100644 --- a/router/java/src/net/i2p/router/peermanager/PeerProfile.java +++ b/router/java/src/net/i2p/router/peermanager/PeerProfile.java @@ -8,6 +8,21 @@ import net.i2p.router.RouterContext; import net.i2p.stat.RateStat; import net.i2p.util.Log; +/** + * Copied from http://www.i2p2.i2p/how_peerselection.html + * + * See also main() below for additional commentary by zzz. + * + * Currently, there is no 'ejection' strategy to get rid of the profiles for peers that + * are no longer active (or when the network consists of thousands of peers, to get rid + * of peers that are performing poorly). However, the size of each profile is fairly small, + * and is unrelated to how much data is collected about the peer, so that a router can + * keep a few thousand active peer profiles before the overhead becomes a serious concern. + * Once it becomes necessary, we can simply compact the poorly performing profiles + * (keeping only the most basic data) and maintain hundreds of thousands of profiles + * in memory. Beyond that size, we can simply eject the peers (e.g. keeping the best 100,000). + */ + public class PeerProfile { private Log _log; private RouterContext _context; @@ -315,6 +330,11 @@ public class PeerProfile { } } } + /** + * @return the average of the three fastest one-minute data transfers, on a per-tunnel basis, + * through this peer. Ever. Except that the peak values are cut in half + * once a day by coalesceThroughput(). This seems way too seldom. + */ public double getPeakTunnel1mThroughputKBps() { double rv = 0; for (int i = 0; i < THROUGHPUT_COUNT; i++) @@ -504,12 +524,36 @@ public class PeerProfile { public String toString() { return "Profile: " + getPeer().toBase64(); } /** + * New measurement is 12KB per expanded profile. (2009-03 zzz) + * And nowhere in the code is shrinkProfile() called so + * the size of compact profiles doesn't matter right now. + * This is far bigger than the NetDB entry, which is only about 1.5KB + * now that most of the stats have been removed. + * + * The biggest user in the profile is the Rates. (144 bytes per according to jhat). + * PeerProfile: 9 RateStats, 3-5 Rates each - 35 total + * DBHistory: 2 RateStats, 3 each - 6 total + * TunnelHistory: 4 RateStats, 5 each - 20 total + * --- --------- + * 15 61 total + * *60 bytes *144 bytes + * --- --------- + * 900 bytes 8784 bytes + * + * The RateStat itself is 32 bytes and the Rate[] is 28 so that adds + * about 1KB. + * + * So two obvious things to do are cut out some of the Rates, + * and call shrinkProfile(). + * + * Obsolete calculation follows: + * * Calculate the memory consumption of profiles. Measured to be ~3739 bytes * for an expanded profile, and ~212 bytes for a compacted one. * */ - public static void main2(String args[]) { - RouterContext ctx = new RouterContext(null); + public static void main(String args[]) { + RouterContext ctx = new RouterContext(new net.i2p.router.Router()); testProfileSize(ctx, 100, 0); // 560KB testProfileSize(ctx, 1000, 0); // 3.9MB testProfileSize(ctx, 10000, 0); // 37MB @@ -524,7 +568,7 @@ public class PeerProfile { * PeerProfile [filename]* * */ - public static void main(String args[]) { + public static void main2(String args[]) { RouterContext ctx = new RouterContext(new net.i2p.router.Router()); DecimalFormat fmt = new DecimalFormat("0,000.0"); fmt.setPositivePrefix("+"); diff --git a/router/java/src/net/i2p/router/peermanager/PersistProfileJob.java b/router/java/src/net/i2p/router/peermanager/PersistProfileJob.java new file mode 100644 index 000000000..3b230fd86 --- /dev/null +++ b/router/java/src/net/i2p/router/peermanager/PersistProfileJob.java @@ -0,0 +1,29 @@ +package net.i2p.router.peermanager; + +import java.util.Iterator; +import java.util.Set; + +import net.i2p.data.Hash; +import net.i2p.router.JobImpl; +import net.i2p.router.RouterContext; + +class PersistProfileJob extends JobImpl { + private PersistProfilesJob _job; + private Iterator _peers; + public PersistProfileJob(RouterContext enclosingContext, PersistProfilesJob job, Set peers) { + super(enclosingContext); + _peers = peers.iterator(); + _job = job; + } + public void runJob() { + if (_peers.hasNext()) + _job.persist((Hash)_peers.next()); + if (_peers.hasNext()) { + requeue(1000); + } else { + // no more left, requeue up the main persist-em-all job + _job.requeue(); + } + } + public String getName() { return "Persist profile"; } +} diff --git a/router/java/src/net/i2p/router/peermanager/PersistProfilesJob.java b/router/java/src/net/i2p/router/peermanager/PersistProfilesJob.java index 6d43923e3..fc137ccc8 100644 --- a/router/java/src/net/i2p/router/peermanager/PersistProfilesJob.java +++ b/router/java/src/net/i2p/router/peermanager/PersistProfilesJob.java @@ -1,6 +1,5 @@ package net.i2p.router.peermanager; -import java.util.Iterator; import java.util.Set; import net.i2p.data.Hash; @@ -25,24 +24,3 @@ class PersistProfilesJob extends JobImpl { void persist(Hash peer) { _mgr.storeProfile(peer); } void requeue() { requeue(PERSIST_DELAY); } } - -class PersistProfileJob extends JobImpl { - private PersistProfilesJob _job; - private Iterator _peers; - public PersistProfileJob(RouterContext enclosingContext, PersistProfilesJob job, Set peers) { - super(enclosingContext); - _peers = peers.iterator(); - _job = job; - } - public void runJob() { - if (_peers.hasNext()) - _job.persist((Hash)_peers.next()); - if (_peers.hasNext()) { - requeue(1000); - } else { - // no more left, requeue up the main persist-em-all job - _job.requeue(); - } - } - public String getName() { return "Persist profile"; } -} \ No newline at end of file diff --git a/router/java/src/net/i2p/router/peermanager/ProfileOrganizer.java b/router/java/src/net/i2p/router/peermanager/ProfileOrganizer.java index e759441d9..b090d6cc7 100644 --- a/router/java/src/net/i2p/router/peermanager/ProfileOrganizer.java +++ b/router/java/src/net/i2p/router/peermanager/ProfileOrganizer.java @@ -1101,22 +1101,7 @@ public class ProfileOrganizer { * @return minimum number of peers to be placed in the 'fast' group */ protected int getMinimumFastPeers() { - String val = _context.getProperty(PROP_MINIMUM_FAST_PEERS, ""+DEFAULT_MINIMUM_FAST_PEERS); - if (val != null) { - try { - int rv = Integer.parseInt(val); - if (_log.shouldLog(Log.DEBUG)) - _log.debug("router context said " + PROP_MINIMUM_FAST_PEERS + '=' + val); - return rv; - } catch (NumberFormatException nfe) { - if (_log.shouldLog(Log.WARN)) - _log.warn("Minimum fast peers improperly set in the router environment [" + val + "]", nfe); - } - } - - if (_log.shouldLog(Log.DEBUG)) - _log.debug("no config for " + PROP_MINIMUM_FAST_PEERS + ", using " + DEFAULT_MINIMUM_FAST_PEERS); - return DEFAULT_MINIMUM_FAST_PEERS; + return _context.getProperty(PROP_MINIMUM_FAST_PEERS, DEFAULT_MINIMUM_FAST_PEERS); } @@ -1130,22 +1115,7 @@ public class ProfileOrganizer { * @return minimum number of peers to be placed in the 'fast' group */ protected int getMinimumHighCapacityPeers() { - String val = _context.getProperty(PROP_MINIMUM_HIGH_CAPACITY_PEERS, ""+DEFAULT_MINIMUM_HIGH_CAPACITY_PEERS); - if (val != null) { - try { - int rv = Integer.parseInt(val); - if (_log.shouldLog(Log.DEBUG)) - _log.debug("router context said " + PROP_MINIMUM_HIGH_CAPACITY_PEERS + '=' + val); - return rv; - } catch (NumberFormatException nfe) { - if (_log.shouldLog(Log.WARN)) - _log.warn("Minimum high capacity peers improperly set in the router environment [" + val + "]", nfe); - } - } - - if (_log.shouldLog(Log.DEBUG)) - _log.debug("no config for " + PROP_MINIMUM_HIGH_CAPACITY_PEERS + ", using " + DEFAULT_MINIMUM_HIGH_CAPACITY_PEERS); - return DEFAULT_MINIMUM_HIGH_CAPACITY_PEERS; + return _context.getProperty(PROP_MINIMUM_HIGH_CAPACITY_PEERS, DEFAULT_MINIMUM_HIGH_CAPACITY_PEERS); } private final static DecimalFormat _fmt = new DecimalFormat("###,##0.00", new DecimalFormatSymbols(Locale.UK)); diff --git a/router/java/src/net/i2p/router/peermanager/ProfileOrganizerRenderer.java b/router/java/src/net/i2p/router/peermanager/ProfileOrganizerRenderer.java index 57ac57cac..2671c8be0 100644 --- a/router/java/src/net/i2p/router/peermanager/ProfileOrganizerRenderer.java +++ b/router/java/src/net/i2p/router/peermanager/ProfileOrganizerRenderer.java @@ -187,7 +187,6 @@ class ProfileOrganizerRenderer { buf.append("Failed Lookups"); buf.append("New Stores"); buf.append("Old Stores"); - buf.append("1m Fail Rate"); buf.append("1h Fail Rate"); buf.append("1d Fail Rate"); buf.append(""); @@ -231,7 +230,6 @@ class ProfileOrganizerRenderer { buf.append("").append(dbh.getFailedLookups()).append(""); buf.append("").append(dbh.getUnpromptedDbStoreNew()).append(""); buf.append("").append(dbh.getUnpromptedDbStoreOld()).append(""); - buf.append("").append(davg(dbh, 60*1000l)).append(""); buf.append("").append(davg(dbh, 60*60*1000l)).append(""); buf.append("").append(davg(dbh, 24*60*60*1000l)).append(""); } diff --git a/router/java/src/net/i2p/router/startup/RebuildRouterInfoJob.java b/router/java/src/net/i2p/router/startup/RebuildRouterInfoJob.java index a43fc6311..967bc7a79 100644 --- a/router/java/src/net/i2p/router/startup/RebuildRouterInfoJob.java +++ b/router/java/src/net/i2p/router/startup/RebuildRouterInfoJob.java @@ -28,6 +28,13 @@ import net.i2p.router.RouterContext; import net.i2p.util.Log; /** + * This used be called from StartAcceptingClientsJob but is now disabled. + * It is still called once from LoadRouterInfoJob (but not run as a Job). + * + * The following comments appear to be incorrect... + * it rebuilds if the router.info file does not exist. + * There is no check for a router.info.rebuild file. + * * If the file router.info.rebuild exists, rebuild the router info and republish. * This is useful for dhcp or other situations where the router addresses change - * simply create the router.info.rebuild file after modifying router.config and within diff --git a/router/java/src/net/i2p/router/startup/StartAcceptingClientsJob.java b/router/java/src/net/i2p/router/startup/StartAcceptingClientsJob.java index 7ad54299f..727d06ac6 100644 --- a/router/java/src/net/i2p/router/startup/StartAcceptingClientsJob.java +++ b/router/java/src/net/i2p/router/startup/StartAcceptingClientsJob.java @@ -28,7 +28,8 @@ public class StartAcceptingClientsJob extends JobImpl { getContext().clientManager().startup(); getContext().jobQueue().addJob(new ReadConfigJob(getContext())); - getContext().jobQueue().addJob(new RebuildRouterInfoJob(getContext())); + // pointless + //getContext().jobQueue().addJob(new RebuildRouterInfoJob(getContext())); getContext().jobQueue().allowParallelOperation(); } } diff --git a/router/java/src/net/i2p/router/transport/CommSystemFacadeImpl.java b/router/java/src/net/i2p/router/transport/CommSystemFacadeImpl.java index b80e05694..c94178004 100644 --- a/router/java/src/net/i2p/router/transport/CommSystemFacadeImpl.java +++ b/router/java/src/net/i2p/router/transport/CommSystemFacadeImpl.java @@ -60,6 +60,7 @@ public class CommSystemFacadeImpl extends CommSystemFacade { public int countActivePeers() { return (_manager == null ? 0 : _manager.countActivePeers()); } public int countActiveSendPeers() { return (_manager == null ? 0 : _manager.countActiveSendPeers()); } + public boolean haveCapacity() { return (_manager == null ? false : _manager.haveCapacity()); } /** * Framed average clock skew of connected peers in seconds, or null if we cannot answer. @@ -284,9 +285,10 @@ public class CommSystemFacadeImpl extends CommSystemFacade { _log.warn("Halting NTCP to change address"); t.stopListening(); newAddr.setOptions(newProps); - // Give NTCP Pumper time to stop so we don't end up with two... - // Need better way - try { Thread.sleep(5*1000); } catch (InterruptedException ie) {} + // Wait for NTCP Pumper to stop so we don't end up with two... + while (t.isAlive()) { + try { Thread.sleep(5*1000); } catch (InterruptedException ie) {} + } t.restartListening(newAddr); _log.warn("Changed NTCP Address and started up, address is now " + newAddr); return; diff --git a/router/java/src/net/i2p/router/transport/FIFOBandwidthRefiller.java b/router/java/src/net/i2p/router/transport/FIFOBandwidthRefiller.java index 0210f8da5..693df45b1 100644 --- a/router/java/src/net/i2p/router/transport/FIFOBandwidthRefiller.java +++ b/router/java/src/net/i2p/router/transport/FIFOBandwidthRefiller.java @@ -33,11 +33,11 @@ public class FIFOBandwidthRefiller implements Runnable { public static final String PROP_OUTBOUND_BANDWIDTH_PEAK = "i2np.bandwidth.outboundBurstKBytes"; //public static final String PROP_REPLENISH_FREQUENCY = "i2np.bandwidth.replenishFrequencyMs"; - // no longer allow unlimited bandwidth - the user must specify a value, and if they do not, it is 32/16KBps - public static final int DEFAULT_INBOUND_BANDWIDTH = 48; - public static final int DEFAULT_OUTBOUND_BANDWIDTH = 24; - public static final int DEFAULT_INBOUND_BURST_BANDWIDTH = 64; - public static final int DEFAULT_OUTBOUND_BURST_BANDWIDTH = 32; + // no longer allow unlimited bandwidth - the user must specify a value, else use defaults below (KBps) + public static final int DEFAULT_INBOUND_BANDWIDTH = 64; + public static final int DEFAULT_OUTBOUND_BANDWIDTH = 32; + public static final int DEFAULT_INBOUND_BURST_BANDWIDTH = 80; + public static final int DEFAULT_OUTBOUND_BURST_BANDWIDTH = 40; public static final int DEFAULT_BURST_SECONDS = 60; @@ -154,55 +154,30 @@ public class FIFOBandwidthRefiller implements Runnable { } private void updateInboundRate() { - String inBwStr = _context.getProperty(PROP_INBOUND_BANDWIDTH); - if ( (inBwStr != null) && - (inBwStr.trim().length() > 0) && - (!(inBwStr.equals(String.valueOf(_inboundKBytesPerSecond)))) ) { + int in = _context.getProperty(PROP_INBOUND_BANDWIDTH, DEFAULT_INBOUND_BANDWIDTH); + if (in != _inboundKBytesPerSecond) { // bandwidth was specified *and* changed - try { - int in = Integer.parseInt(inBwStr); if ( (in <= 0) || (in > MIN_INBOUND_BANDWIDTH) ) _inboundKBytesPerSecond = in; else _inboundKBytesPerSecond = MIN_INBOUND_BANDWIDTH; if (_log.shouldLog(Log.DEBUG)) _log.debug("Updating inbound rate to " + _inboundKBytesPerSecond); - } catch (NumberFormatException nfe) { - if (_log.shouldLog(Log.WARN)) - _log.warn("Invalid inbound bandwidth limit [" + inBwStr - + "], keeping as " + _inboundKBytesPerSecond); - } - } else { - if ( (inBwStr == null) && (_log.shouldLog(Log.DEBUG)) ) - _log.debug("Inbound bandwidth limits not specified in the config via " + PROP_INBOUND_BANDWIDTH); } if (_inboundKBytesPerSecond <= 0) _inboundKBytesPerSecond = DEFAULT_INBOUND_BANDWIDTH; } private void updateOutboundRate() { - String outBwStr = _context.getProperty(PROP_OUTBOUND_BANDWIDTH); - - if ( (outBwStr != null) && - (outBwStr.trim().length() > 0) && - (!(outBwStr.equals(String.valueOf(_outboundKBytesPerSecond)))) ) { + int out = _context.getProperty(PROP_OUTBOUND_BANDWIDTH, DEFAULT_OUTBOUND_BANDWIDTH); + if (out != _outboundKBytesPerSecond) { // bandwidth was specified *and* changed - try { - int out = Integer.parseInt(outBwStr); if ( (out <= 0) || (out >= MIN_OUTBOUND_BANDWIDTH) ) _outboundKBytesPerSecond = out; else _outboundKBytesPerSecond = MIN_OUTBOUND_BANDWIDTH; if (_log.shouldLog(Log.DEBUG)) _log.debug("Updating outbound rate to " + _outboundKBytesPerSecond); - } catch (NumberFormatException nfe) { - if (_log.shouldLog(Log.WARN)) - _log.warn("Invalid outbound bandwidth limit [" + outBwStr - + "], keeping as " + _outboundKBytesPerSecond); - } - } else { - if ( (outBwStr == null) && (_log.shouldLog(Log.DEBUG)) ) - _log.debug("Outbound bandwidth limits not specified in the config via " + PROP_OUTBOUND_BANDWIDTH); } if (_outboundKBytesPerSecond <= 0) @@ -210,27 +185,15 @@ public class FIFOBandwidthRefiller implements Runnable { } private void updateInboundBurstRate() { - String inBwStr = _context.getProperty(PROP_INBOUND_BURST_BANDWIDTH); - if ( (inBwStr != null) && - (inBwStr.trim().length() > 0) && - (!(inBwStr.equals(String.valueOf(_inboundBurstKBytesPerSecond)))) ) { + int in = _context.getProperty(PROP_INBOUND_BURST_BANDWIDTH, DEFAULT_INBOUND_BURST_BANDWIDTH); + if (in != _inboundBurstKBytesPerSecond) { // bandwidth was specified *and* changed - try { - int in = Integer.parseInt(inBwStr); if ( (in <= 0) || (in >= _inboundKBytesPerSecond) ) _inboundBurstKBytesPerSecond = in; else _inboundBurstKBytesPerSecond = _inboundKBytesPerSecond; if (_log.shouldLog(Log.DEBUG)) _log.debug("Updating inbound burst rate to " + _inboundBurstKBytesPerSecond); - } catch (NumberFormatException nfe) { - if (_log.shouldLog(Log.WARN)) - _log.warn("Invalid inbound bandwidth burst limit [" + inBwStr - + "], keeping as " + _inboundBurstKBytesPerSecond); - } - } else { - if ( (inBwStr == null) && (_log.shouldLog(Log.DEBUG)) ) - _log.debug("Inbound bandwidth burst limits not specified in the config via " + PROP_INBOUND_BURST_BANDWIDTH); } if (_inboundBurstKBytesPerSecond <= 0) @@ -239,28 +202,15 @@ public class FIFOBandwidthRefiller implements Runnable { } private void updateOutboundBurstRate() { - String outBwStr = _context.getProperty(PROP_OUTBOUND_BURST_BANDWIDTH); - - if ( (outBwStr != null) && - (outBwStr.trim().length() > 0) && - (!(outBwStr.equals(String.valueOf(_outboundBurstKBytesPerSecond)))) ) { + int out = _context.getProperty(PROP_OUTBOUND_BURST_BANDWIDTH, DEFAULT_OUTBOUND_BURST_BANDWIDTH); + if (out != _outboundBurstKBytesPerSecond) { // bandwidth was specified *and* changed - try { - int out = Integer.parseInt(outBwStr); if ( (out <= 0) || (out >= _outboundKBytesPerSecond) ) _outboundBurstKBytesPerSecond = out; else _outboundBurstKBytesPerSecond = _outboundKBytesPerSecond; if (_log.shouldLog(Log.DEBUG)) _log.debug("Updating outbound burst rate to " + _outboundBurstKBytesPerSecond); - } catch (NumberFormatException nfe) { - if (_log.shouldLog(Log.WARN)) - _log.warn("Invalid outbound bandwidth burst limit [" + outBwStr - + "], keeping as " + _outboundBurstKBytesPerSecond); - } - } else { - if ( (outBwStr == null) && (_log.shouldLog(Log.DEBUG)) ) - _log.debug("Outbound bandwidth burst limits not specified in the config via " + PROP_OUTBOUND_BURST_BANDWIDTH); } if (_outboundBurstKBytesPerSecond <= 0) @@ -269,13 +219,10 @@ public class FIFOBandwidthRefiller implements Runnable { } private void updateInboundPeak() { - String inBwStr = _context.getProperty(PROP_INBOUND_BANDWIDTH_PEAK); - if ( (inBwStr != null) && - (inBwStr.trim().length() > 0) && - (!(inBwStr.equals(String.valueOf(_limiter.getInboundBurstBytes())))) ) { + int in = _context.getProperty(PROP_INBOUND_BANDWIDTH_PEAK, + DEFAULT_BURST_SECONDS * _inboundBurstKBytesPerSecond); + if (in != _limiter.getInboundBurstBytes()) { // peak bw was specified *and* changed - try { - int in = Integer.parseInt(inBwStr); if (in >= MIN_INBOUND_BANDWIDTH_PEAK) { if (in < _inboundBurstKBytesPerSecond) _limiter.setInboundBurstBytes(_inboundBurstKBytesPerSecond * 1024); @@ -287,27 +234,13 @@ public class FIFOBandwidthRefiller implements Runnable { else _limiter.setInboundBurstBytes(MIN_INBOUND_BANDWIDTH_PEAK * 1024); } - } catch (NumberFormatException nfe) { - if (_log.shouldLog(Log.WARN)) - _log.warn("Invalid inbound bandwidth burst limit [" + inBwStr - + "]"); - _limiter.setInboundBurstBytes(DEFAULT_BURST_SECONDS * _inboundBurstKBytesPerSecond * 1024); - } - } else { - if (_log.shouldLog(Log.DEBUG)) - _log.debug("Inbound bandwidth burst limits not specified in the config via " - + PROP_INBOUND_BANDWIDTH_PEAK); - _limiter.setInboundBurstBytes(DEFAULT_BURST_SECONDS * _inboundBurstKBytesPerSecond * 1024); } } private void updateOutboundPeak() { - String inBwStr = _context.getProperty(PROP_OUTBOUND_BANDWIDTH_PEAK); - if ( (inBwStr != null) && - (inBwStr.trim().length() > 0) && - (!(inBwStr.equals(String.valueOf(_limiter.getOutboundBurstBytes())))) ) { + int in = _context.getProperty(PROP_OUTBOUND_BANDWIDTH_PEAK, + DEFAULT_BURST_SECONDS * _outboundBurstKBytesPerSecond); + if (in != _limiter.getOutboundBurstBytes()) { // peak bw was specified *and* changed - try { - int in = Integer.parseInt(inBwStr); if (in >= MIN_OUTBOUND_BANDWIDTH_PEAK) { if (in < _outboundBurstKBytesPerSecond) _limiter.setOutboundBurstBytes(_outboundBurstKBytesPerSecond * 1024); @@ -319,17 +252,6 @@ public class FIFOBandwidthRefiller implements Runnable { else _limiter.setOutboundBurstBytes(MIN_OUTBOUND_BANDWIDTH_PEAK * 1024); } - } catch (NumberFormatException nfe) { - if (_log.shouldLog(Log.WARN)) - _log.warn("Invalid outbound bandwidth burst limit [" + inBwStr - + "]"); - _limiter.setOutboundBurstBytes(DEFAULT_BURST_SECONDS * _outboundBurstKBytesPerSecond * 1024); - } - } else { - if (_log.shouldLog(Log.DEBUG)) - _log.debug("Outbound bandwidth burst limits not specified in the config via " - + PROP_OUTBOUND_BANDWIDTH_PEAK); - _limiter.setOutboundBurstBytes(DEFAULT_BURST_SECONDS * _outboundBurstKBytesPerSecond * 1024); } } diff --git a/router/java/src/net/i2p/router/transport/GetBidsJob.java b/router/java/src/net/i2p/router/transport/GetBidsJob.java index a60ba6ee6..46ea0601c 100644 --- a/router/java/src/net/i2p/router/transport/GetBidsJob.java +++ b/router/java/src/net/i2p/router/transport/GetBidsJob.java @@ -66,7 +66,8 @@ public class GetBidsJob extends JobImpl { int failedCount = msg.getFailedTransports().size(); if (failedCount == 0) { context.statManager().addRateData("transport.bidFailNoTransports", msg.getLifetime(), 0); - context.shitlist().shitlistRouter(to, "We share no common transports with them"); + // This used to be "no common transports" but it is almost always no transports at all + context.shitlist().shitlistRouter(to, "No transports (hidden or starting up?)"); } else if (failedCount >= facade.getTransportCount()) { context.statManager().addRateData("transport.bidFailAllTransports", msg.getLifetime(), 0); // fail after all transports were unsuccessful diff --git a/router/java/src/net/i2p/router/transport/Transport.java b/router/java/src/net/i2p/router/transport/Transport.java index 84a37f68e..f95d2dc8f 100644 --- a/router/java/src/net/i2p/router/transport/Transport.java +++ b/router/java/src/net/i2p/router/transport/Transport.java @@ -40,6 +40,7 @@ public interface Transport { public int countActivePeers(); public int countActiveSendPeers(); + public boolean haveCapacity(); public Vector getClockSkews(); public List getMostRecentErrorMessages(); diff --git a/router/java/src/net/i2p/router/transport/TransportBid.java b/router/java/src/net/i2p/router/transport/TransportBid.java index 3d16dbc11..6b48ac76d 100644 --- a/router/java/src/net/i2p/router/transport/TransportBid.java +++ b/router/java/src/net/i2p/router/transport/TransportBid.java @@ -23,6 +23,8 @@ public class TransportBid { private long _bidExpiration; private Transport _transport; + public static final int TRANSIENT_FAIL = 999999; + public TransportBid() { setLatencyMs(-1); setBandwidthBytes(-1); diff --git a/router/java/src/net/i2p/router/transport/TransportImpl.java b/router/java/src/net/i2p/router/transport/TransportImpl.java index ba395b6b5..04a2cde91 100644 --- a/router/java/src/net/i2p/router/transport/TransportImpl.java +++ b/router/java/src/net/i2p/router/transport/TransportImpl.java @@ -31,7 +31,9 @@ import net.i2p.router.Job; import net.i2p.router.JobImpl; import net.i2p.router.MessageSelector; import net.i2p.router.OutNetMessage; +import net.i2p.router.Router; import net.i2p.router.RouterContext; +import net.i2p.router.networkdb.kademlia.FloodfillNetworkDatabaseFacade; import net.i2p.util.Log; /** @@ -78,6 +80,33 @@ public abstract class TransportImpl implements Transport { * How many peers are we actively sending messages to (this minute) */ public int countActiveSendPeers() { return 0; } + + /** Default is 500 for floodfills... */ + public static final int DEFAULT_MAX_CONNECTIONS = 500; + /** ...and 60/120/180/240/300 for BW Tiers K/L/M/N/O */ + public static final int MAX_CONNECTION_FACTOR = 60; + /** Per-transport connection limit */ + public int getMaxConnections() { + String style = getStyle(); + if (style.equals("SSU")) + style = "udp"; + else + style = style.toLowerCase(); + int def = DEFAULT_MAX_CONNECTIONS; + RouterInfo ri = _context.router().getRouterInfo(); + if (ri != null) { + char bw = ri.getBandwidthTier().charAt(0); + if (bw != 'U' && + ! ((FloodfillNetworkDatabaseFacade)_context.netDb()).floodfillEnabled()) + def = MAX_CONNECTION_FACTOR * (1 + bw - Router.CAPABILITY_BW12); + } + return _context.getProperty("i2np." + style + ".maxConnections", def); + } + + /** + * Can we initiate or accept a connection to another peer, saving some margin + */ + public boolean haveCapacity() { return true; } /** * Return our peer clock skews on a transport. diff --git a/router/java/src/net/i2p/router/transport/TransportManager.java b/router/java/src/net/i2p/router/transport/TransportManager.java index 3d49780c3..bade75913 100644 --- a/router/java/src/net/i2p/router/transport/TransportManager.java +++ b/router/java/src/net/i2p/router/transport/TransportManager.java @@ -151,6 +151,19 @@ public class TransportManager implements TransportEventListener { return peers; } + /** + * Is at least one transport below its connection limit + some margin + * Use for throttling in the router. + * Perhaps we should just use SSU? + */ + public boolean haveCapacity() { + for (int i = 0; i < _transports.size(); i++) { + if (((Transport)_transports.get(i)).haveCapacity()) + return true; + } + return false; + } + /** * Return our peer clock skews on all transports. * Vector composed of Long, each element representing a peer skew in seconds. @@ -288,7 +301,10 @@ public class TransportManager implements TransportEventListener { // to us via TCP, send via TCP) TransportBid bid = t.bid(msg.getTarget(), msg.getMessageSize()); if (bid != null) { - if ( (rv == null) || (rv.getLatencyMs() > bid.getLatencyMs()) ) + if (bid.getLatencyMs() == bid.TRANSIENT_FAIL) + // this keeps GetBids() from shitlisting for "no common transports" + msg.transportFailed(t.getStyle()); + else if ( (rv == null) || (rv.getLatencyMs() > bid.getLatencyMs()) ) rv = bid; if (_log.shouldLog(Log.DEBUG)) _log.debug("Transport " + t.getStyle() + " bid: " + bid + " currently winning? " + (rv == bid) diff --git a/router/java/src/net/i2p/router/transport/ntcp/EventPumper.java b/router/java/src/net/i2p/router/transport/ntcp/EventPumper.java index 2f81318d9..9c75f5328 100644 --- a/router/java/src/net/i2p/router/transport/ntcp/EventPumper.java +++ b/router/java/src/net/i2p/router/transport/ntcp/EventPumper.java @@ -39,6 +39,7 @@ public class EventPumper implements Runnable { private List _wantsRegister; private List _wantsConRegister; private NTCPTransport _transport; + private long _expireIdleWriteTime; private static final int BUF_SIZE = 8*1024; private static final int MAX_CACHE_SIZE = 64; @@ -50,6 +51,8 @@ public class EventPumper implements Runnable { * the time to iterate across them to check a few flags shouldn't be a problem. */ private static final long FAILSAFE_ITERATION_FREQ = 2*1000l; + private static final long MIN_EXPIRE_IDLE_TIME = 5*60*1000l; + private static final long MAX_EXPIRE_IDLE_TIME = 15*60*1000l; public EventPumper(RouterContext ctx, NTCPTransport transport) { _context = ctx; @@ -57,6 +60,7 @@ public class EventPumper implements Runnable { _transport = transport; _alive = false; _bufCache = new ArrayList(MAX_CACHE_SIZE); + _expireIdleWriteTime = MAX_EXPIRE_IDLE_TIME; } public void startPumping() { @@ -81,6 +85,13 @@ public class EventPumper implements Runnable { _selector.wakeup(); } + /** + * Selector can take quite a while to close after calling stopPumping() + */ + public boolean isAlive() { + return _alive || (_selector != null && _selector.isOpen()); + } + public void register(ServerSocketChannel chan) { if (_log.shouldLog(Log.DEBUG)) _log.debug("Registering server socket channel"); synchronized (_wantsRegister) { _wantsRegister.add(chan); } @@ -135,8 +146,12 @@ public class EventPumper implements Runnable { int failsafeWrites = 0; int failsafeCloses = 0; int failsafeInvalid = 0; - // pointless if we do this every 2 seconds? - long expireIdleWriteTime = 10*60*1000l; // + _context.random().nextLong(60*60*1000l); + + // Increase allowed idle time if we are well under allowed connections, otherwise decrease + if (_transport.haveCapacity()) + _expireIdleWriteTime = Math.min(_expireIdleWriteTime + 1000, MAX_EXPIRE_IDLE_TIME); + else + _expireIdleWriteTime = Math.max(_expireIdleWriteTime - 3000, MIN_EXPIRE_IDLE_TIME); for (Iterator iter = all.iterator(); iter.hasNext(); ) { try { SelectionKey key = (SelectionKey)iter.next(); @@ -181,8 +196,8 @@ public class EventPumper implements Runnable { failsafeWrites++; } - if ( con.getTimeSinceSend() > expireIdleWriteTime && - con.getTimeSinceReceive() > expireIdleWriteTime) { + if ( con.getTimeSinceSend() > _expireIdleWriteTime && + con.getTimeSinceReceive() > _expireIdleWriteTime) { // we haven't sent or received anything in a really long time, so lets just close 'er up con.close(); failsafeCloses++; @@ -680,4 +695,5 @@ public class EventPumper implements Runnable { private void expireTimedOut() { _transport.expireTimedOut(); } + public long getIdleTimeout() { return _expireIdleWriteTime; } } diff --git a/router/java/src/net/i2p/router/transport/ntcp/NTCPSendFinisher.java b/router/java/src/net/i2p/router/transport/ntcp/NTCPSendFinisher.java new file mode 100644 index 000000000..8d19c6249 --- /dev/null +++ b/router/java/src/net/i2p/router/transport/ntcp/NTCPSendFinisher.java @@ -0,0 +1,89 @@ +package net.i2p.router.transport.ntcp; + +import java.util.concurrent.Executors; +import java.util.concurrent.LinkedBlockingQueue; +import java.util.concurrent.ThreadPoolExecutor; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.ThreadFactory; + +import net.i2p.I2PAppContext; +import net.i2p.router.OutNetMessage; +import net.i2p.util.Log; + +/** + * Previously, NTCP was using SimpleTimer with a delay of 0, which + * was a real abuse. + * + * Here we use the non-scheduled, lockless ThreadPoolExecutor with + * a fixed pool size and an unbounded queue. + * + * The old implementation was having problems with lock contention; + * this should work a lot better - and not clog up the SimpleTimer queue. + * + * @author zzz + */ +public class NTCPSendFinisher { + private static final int THREADS = 4; + private I2PAppContext _context; + private NTCPTransport _transport; + private Log _log; + private int _count; + private ThreadPoolExecutor _executor; + + public NTCPSendFinisher(I2PAppContext context, NTCPTransport transport) { + _context = context; + _log = _context.logManager().getLog(NTCPSendFinisher.class); + _transport = transport; + } + + public void start() { + _count = 0; + _executor = new CustomThreadPoolExecutor(); + } + + public void stop() { + _executor.shutdownNow(); + } + + public void add(OutNetMessage msg) { + _executor.execute(new RunnableEvent(msg)); + } + + // not really needed for now but in case we want to add some hooks like afterExecute() + private class CustomThreadPoolExecutor extends ThreadPoolExecutor { + public CustomThreadPoolExecutor() { + // use unbounded queue, so maximumPoolSize and keepAliveTime have no effect + super(THREADS, THREADS, 1000, TimeUnit.MILLISECONDS, + new LinkedBlockingQueue(), new CustomThreadFactory()); + } + } + + private class CustomThreadFactory implements ThreadFactory { + public Thread newThread(Runnable r) { + Thread rv = Executors.defaultThreadFactory().newThread(r); + rv.setName("NTCPSendFinisher " + (++_count) + '/' + THREADS); + rv.setDaemon(true); + return rv; + } + } + + /** + * Call afterSend() for the message + */ + private class RunnableEvent implements Runnable { + private OutNetMessage _msg; + + public RunnableEvent(OutNetMessage msg) { + _msg = msg; + } + + public void run() { + try { + _transport.afterSend(_msg, true, false, _msg.getSendTime()); + } catch (Throwable t) { + _log.log(Log.CRIT, " wtf, afterSend borked", t); + } + } + } +} + diff --git a/router/java/src/net/i2p/router/transport/ntcp/NTCPTransport.java b/router/java/src/net/i2p/router/transport/ntcp/NTCPTransport.java index 349669066..c23245bae 100644 --- a/router/java/src/net/i2p/router/transport/ntcp/NTCPTransport.java +++ b/router/java/src/net/i2p/router/transport/ntcp/NTCPTransport.java @@ -27,7 +27,6 @@ import net.i2p.router.transport.Transport; import net.i2p.router.transport.TransportBid; import net.i2p.router.transport.TransportImpl; import net.i2p.util.Log; -import net.i2p.util.SimpleTimer; /** * @@ -36,6 +35,7 @@ public class NTCPTransport extends TransportImpl { private Log _log; private SharedBid _fastBid; private SharedBid _slowBid; + private SharedBid _transientFail; private Object _conLock; private Map _conByIdent; private NTCPAddress _myAddress; @@ -49,7 +49,7 @@ public class NTCPTransport extends TransportImpl { private List _establishing; private List _sent; - private SendFinisher _finisher; + private NTCPSendFinisher _finisher; public NTCPTransport(RouterContext ctx) { super(ctx); @@ -123,7 +123,7 @@ public class NTCPTransport extends TransportImpl { _conByIdent = new HashMap(64); _sent = new ArrayList(4); - _finisher = new SendFinisher(); + _finisher = new NTCPSendFinisher(ctx, this); _pumper = new EventPumper(ctx, this); _reader = new Reader(ctx); @@ -131,6 +131,7 @@ public class NTCPTransport extends TransportImpl { _fastBid = new SharedBid(25); // best _slowBid = new SharedBid(70); // better than ssu unestablished, but not better than ssu established + _transientFail = new SharedBid(TransportBid.TRANSIENT_FAIL); } void inboundEstablished(NTCPConnection con) { @@ -289,7 +290,7 @@ public class NTCPTransport extends TransportImpl { if (!allowConnection()) { if (_log.shouldLog(Log.WARN)) _log.warn("no bid when trying to send to " + toAddress.getIdentity().calculateHash().toBase64() + ", max connection limit reached"); - return null; + return _transientFail; } //if ( (_myAddress != null) && (_myAddress.equals(addr)) ) @@ -300,40 +301,16 @@ public class NTCPTransport extends TransportImpl { return _slowBid; } - private static final int DEFAULT_MAX_CONNECTIONS = 500; public boolean allowConnection() { - int max = DEFAULT_MAX_CONNECTIONS; - String mc = _context.getProperty("i2np.ntcp.maxConnections"); - if (mc != null) { - try { - max = Integer.parseInt(mc); - } catch (NumberFormatException nfe) {} - } - return countActivePeers() < max; + return countActivePeers() < getMaxConnections(); } + public boolean haveCapacity() { + return countActivePeers() < getMaxConnections() * 4 / 5; + } + /** queue up afterSend call, which can take some time w/ jobs, etc */ void sendComplete(OutNetMessage msg) { _finisher.add(msg); } - /** async afterSend call, which can take some time w/ jobs, etc */ - private class SendFinisher implements SimpleTimer.TimedEvent { - public void add(OutNetMessage msg) { - synchronized (_sent) { _sent.add(msg); } - SimpleTimer.getInstance().addEvent(SendFinisher.this, 0); - } - public void timeReached() { - int pending = 0; - OutNetMessage msg = null; - synchronized (_sent) { - pending = _sent.size()-1; - if (pending >= 0) - msg = (OutNetMessage)_sent.remove(0); - } - if (msg != null) - afterSend(msg, true, false, msg.getSendTime()); - if (pending > 0) - SimpleTimer.getInstance().addEvent(SendFinisher.this, 0); - } - } private boolean isEstablished(RouterIdentity peer) { return isEstablished(peer.calculateHash()); @@ -415,6 +392,7 @@ public class NTCPTransport extends TransportImpl { public RouterAddress startListening() { if (_log.shouldLog(Log.DEBUG)) _log.debug("Starting ntcp transport listening"); + _finisher.start(); _pumper.startPumping(); _reader.startReading(NUM_CONCURRENT_READERS); @@ -426,6 +404,7 @@ public class NTCPTransport extends TransportImpl { public RouterAddress restartListening(RouterAddress addr) { if (_log.shouldLog(Log.DEBUG)) _log.debug("Restarting ntcp transport listening"); + _finisher.start(); _pumper.startPumping(); _reader.startReading(NUM_CONCURRENT_READERS); @@ -435,6 +414,10 @@ public class NTCPTransport extends TransportImpl { return bindAddress(); } + public boolean isAlive() { + return _pumper.isAlive(); + } + private RouterAddress bindAddress() { if (_myAddress != null) { try { @@ -541,11 +524,16 @@ public class NTCPTransport extends TransportImpl { } } + /** + * This doesn't (completely) block, caller should check isAlive() + * before calling startListening() or restartListening() + */ public void stopListening() { if (_log.shouldLog(Log.DEBUG)) _log.debug("Stopping ntcp transport"); _pumper.stopPumping(); _writer.stopWriting(); _reader.stopReading(); + _finisher.stop(); Map cons = null; synchronized (_conLock) { cons = new HashMap(_conByIdent); @@ -581,7 +569,10 @@ public class NTCPTransport extends TransportImpl { long totalRecv = 0; StringBuffer buf = new StringBuffer(512); - buf.append("NTCP connections: ").append(peers.size()).append("
            \n"); + buf.append("NTCP connections: ").append(peers.size()); + buf.append(" limit: ").append(getMaxConnections()); + buf.append(" timeout: ").append(DataHelper.formatDuration(_pumper.getIdleTimeout())); + buf.append("
            \n"); buf.append("\n"); buf.append(" "); buf.append(" "); diff --git a/router/java/src/net/i2p/router/transport/udp/EstablishmentManager.java b/router/java/src/net/i2p/router/transport/udp/EstablishmentManager.java index f7ca62cc2..896fe1ce4 100644 --- a/router/java/src/net/i2p/router/transport/udp/EstablishmentManager.java +++ b/router/java/src/net/i2p/router/transport/udp/EstablishmentManager.java @@ -22,6 +22,7 @@ import net.i2p.router.Router; import net.i2p.router.RouterContext; import net.i2p.util.I2PThread; import net.i2p.util.Log; +import net.i2p.util.SimpleScheduler; import net.i2p.util.SimpleTimer; /** @@ -184,7 +185,7 @@ public class EstablishmentManager { msg.getTarget().getIdentity(), new SessionKey(addr.getIntroKey()), addr); _outboundStates.put(to, state); - SimpleTimer.getInstance().addEvent(new Expire(to, state), 10*1000); + SimpleScheduler.getInstance().addEvent(new Expire(to, state), 10*1000); } } if (state != null) { @@ -271,6 +272,8 @@ public class EstablishmentManager { _log.warn("Receive session request from blocklisted IP: " + from); return; // drop the packet } + if (!_transport.allowConnection()) + return; // drop the packet state = new InboundEstablishState(_context, from.getIP(), from.getPort(), _transport.getLocalPort()); state.receiveSessionRequest(reader.getSessionRequestReader()); isNew = true; @@ -392,7 +395,7 @@ public class EstablishmentManager { msg.getTarget().getIdentity(), new SessionKey(addr.getIntroKey()), addr); _outboundStates.put(to, qstate); - SimpleTimer.getInstance().addEvent(new Expire(to, qstate), 10*1000); + SimpleScheduler.getInstance().addEvent(new Expire(to, qstate), 10*1000); for (int i = 0; i < queued.size(); i++) { OutNetMessage m = (OutNetMessage)queued.get(i); @@ -475,7 +478,7 @@ public class EstablishmentManager { dsm.setMessageExpiration(_context.clock().now()+10*1000); dsm.setMessageId(_context.random().nextLong(I2NPMessage.MAX_ID_VALUE)); _transport.send(dsm, peer); - SimpleTimer.getInstance().addEvent(new PublishToNewInbound(peer), 0); + SimpleScheduler.getInstance().addEvent(new PublishToNewInbound(peer), 0); } private class PublishToNewInbound implements SimpleTimer.TimedEvent { private PeerState _peer; @@ -627,7 +630,7 @@ public class EstablishmentManager { } } } - SimpleTimer.getInstance().addEvent(new FailIntroduction(state, nonce), INTRO_ATTEMPT_TIMEOUT); + SimpleScheduler.getInstance().addEvent(new FailIntroduction(state, nonce), INTRO_ATTEMPT_TIMEOUT); state.setIntroNonce(nonce); _context.statManager().addRateData("udp.sendIntroRelayRequest", 1, 0); UDPPacket requests[] = _builder.buildRelayRequest(_transport, state, _transport.getIntroKey()); diff --git a/router/java/src/net/i2p/router/transport/udp/PeerTestManager.java b/router/java/src/net/i2p/router/transport/udp/PeerTestManager.java index 7aa3c2fa1..35c5511be 100644 --- a/router/java/src/net/i2p/router/transport/udp/PeerTestManager.java +++ b/router/java/src/net/i2p/router/transport/udp/PeerTestManager.java @@ -15,6 +15,7 @@ import net.i2p.data.SessionKey; import net.i2p.router.CommSystemFacade; import net.i2p.router.RouterContext; import net.i2p.util.Log; +import net.i2p.util.SimpleScheduler; import net.i2p.util.SimpleTimer; /** @@ -79,7 +80,7 @@ class PeerTestManager { sendTestToBob(); - SimpleTimer.getInstance().addEvent(new ContinueTest(), RESEND_TIMEOUT); + SimpleScheduler.getInstance().addEvent(new ContinueTest(), RESEND_TIMEOUT); } private class ContinueTest implements SimpleTimer.TimedEvent { @@ -103,7 +104,7 @@ class PeerTestManager { // second message from Charlie yet sendTestToCharlie(); } - SimpleTimer.getInstance().addEvent(ContinueTest.this, RESEND_TIMEOUT); + SimpleScheduler.getInstance().addEvent(ContinueTest.this, RESEND_TIMEOUT); } } } @@ -430,7 +431,7 @@ class PeerTestManager { synchronized (_activeTests) { _activeTests.put(new Long(nonce), state); } - SimpleTimer.getInstance().addEvent(new RemoveTest(nonce), MAX_CHARLIE_LIFETIME); + SimpleScheduler.getInstance().addEvent(new RemoveTest(nonce), MAX_CHARLIE_LIFETIME); } UDPPacket packet = _packetBuilder.buildPeerTestToBob(bobIP, from.getPort(), aliceIP, alicePort, aliceIntroKey, nonce, state.getBobCipherKey(), state.getBobMACKey()); @@ -511,7 +512,7 @@ class PeerTestManager { synchronized (_activeTests) { _activeTests.put(new Long(nonce), state); } - SimpleTimer.getInstance().addEvent(new RemoveTest(nonce), MAX_CHARLIE_LIFETIME); + SimpleScheduler.getInstance().addEvent(new RemoveTest(nonce), MAX_CHARLIE_LIFETIME); } UDPPacket packet = _packetBuilder.buildPeerTestToCharlie(aliceIP, from.getPort(), aliceIntroKey, nonce, diff --git a/router/java/src/net/i2p/router/transport/udp/UDPReceiver.java b/router/java/src/net/i2p/router/transport/udp/UDPReceiver.java index 10876a0e7..3535484c9 100644 --- a/router/java/src/net/i2p/router/transport/udp/UDPReceiver.java +++ b/router/java/src/net/i2p/router/transport/udp/UDPReceiver.java @@ -9,6 +9,7 @@ import net.i2p.router.RouterContext; import net.i2p.router.transport.FIFOBandwidthLimiter; import net.i2p.util.I2PThread; import net.i2p.util.Log; +import net.i2p.util.SimpleScheduler; import net.i2p.util.SimpleTimer; /** @@ -115,7 +116,7 @@ public class UDPReceiver { long delay = ARTIFICIAL_DELAY_BASE + _context.random().nextInt(ARTIFICIAL_DELAY); if (_log.shouldLog(Log.INFO)) _log.info("Delay packet " + packet + " for " + delay); - SimpleTimer.getInstance().addEvent(new ArtificiallyDelayedReceive(packet), delay); + SimpleScheduler.getInstance().addEvent(new ArtificiallyDelayedReceive(packet), delay); return -1; } diff --git a/router/java/src/net/i2p/router/transport/udp/UDPTransport.java b/router/java/src/net/i2p/router/transport/udp/UDPTransport.java index d40c64118..e5185defa 100644 --- a/router/java/src/net/i2p/router/transport/udp/UDPTransport.java +++ b/router/java/src/net/i2p/router/transport/udp/UDPTransport.java @@ -33,6 +33,7 @@ import net.i2p.router.transport.Transport; import net.i2p.router.transport.TransportBid; import net.i2p.router.transport.TransportImpl; import net.i2p.util.Log; +import net.i2p.util.SimpleScheduler; import net.i2p.util.SimpleTimer; /** @@ -86,10 +87,13 @@ public class UDPTransport extends TransportImpl implements TimedWeightedPriority private TransportBid _fastPreferredBid; /** shared slow bid for unconnected peers when we want to always prefer UDP */ private TransportBid _slowPreferredBid; + private TransportBid _transientFail; /** list of RemoteHostId for peers whose packets we want to drop outright */ private List _dropList; + private int _expireTimeout; + private static final int DROPLIST_PERIOD = 10*60*1000; private static final int MAX_DROPLIST_SIZE = 256; @@ -155,10 +159,12 @@ public class UDPTransport extends TransportImpl implements TimedWeightedPriority _fastPreferredBid = new SharedBid(15); _slowPreferredBid = new SharedBid(20); _slowestBid = new SharedBid(1000); + _transientFail = new SharedBid(TransportBid.TRANSIENT_FAIL); _fragments = new OutboundMessageFragments(_context, this, _activeThrottle); _inboundFragments = new InboundMessageFragments(_context, _fragments, this); _flooder = new UDPFlooder(_context, this); + _expireTimeout = EXPIRE_TIMEOUT; _expireEvent = new ExpirePeerEvent(); _testEvent = new PeerTestEvent(); _reachabilityStatus = CommSystemFacade.STATUS_UNKNOWN; @@ -626,7 +632,7 @@ public class UDPTransport extends TransportImpl implements TimedWeightedPriority } if (added) { _context.statManager().addRateData("udp.dropPeerDroplist", droplistSize, 0); - SimpleTimer.getInstance().addEvent(new RemoveDropList(remote), DROPLIST_PERIOD); + SimpleScheduler.getInstance().addEvent(new RemoveDropList(remote), DROPLIST_PERIOD); } } markUnreachable(peerHash); @@ -887,6 +893,8 @@ public class UDPTransport extends TransportImpl implements TimedWeightedPriority return null; } } + if (!allowConnection()) + return _transientFail; if (_log.shouldLog(Log.DEBUG)) _log.debug("bidding on a message to an unestablished peer: " + to.toBase64()); @@ -922,6 +930,7 @@ public class UDPTransport extends TransportImpl implements TimedWeightedPriority // in the IntroductionManager a chance to work. public static final int EXPIRE_TIMEOUT = 30*60*1000; private static final int MAX_IDLE_TIME = EXPIRE_TIMEOUT; + private static final int MIN_EXPIRE_TIMEOUT = 10*60*1000; public String getStyle() { return STYLE; } public void send(OutNetMessage msg) { @@ -1264,6 +1273,18 @@ public class UDPTransport extends TransportImpl implements TimedWeightedPriority return getPeerState(dest) != null; } + public boolean allowConnection() { + synchronized (_peersByIdent) { + return _peersByIdent.size() < getMaxConnections(); + } + } + + public boolean haveCapacity() { + synchronized (_peersByIdent) { + return _peersByIdent.size() < getMaxConnections() * 4 / 5; + } + } + /** * Return our peer clock skews on this transport. * Vector composed of Long, each element representing a peer skew in seconds. @@ -1622,7 +1643,10 @@ public class UDPTransport extends TransportImpl implements TimedWeightedPriority int numPeers = 0; StringBuffer buf = new StringBuffer(512); - buf.append("UDP connections: ").append(peers.size()).append("
            \n"); + buf.append("UDP connections: ").append(peers.size()); + buf.append(" limit: ").append(getMaxConnections()); + buf.append(" timeout: ").append(DataHelper.formatDuration(_expireTimeout)); + buf.append("
            \n"); buf.append("
            peerdir
            \n"); buf.append(" "); if (cfg.getReceiveFrom() != null) - out.write(""); + out.write(""); else out.write(""); if (cfg.getSendTunnel() != null) @@ -480,7 +478,7 @@ public class TunnelPoolManager implements TunnelManagerFacade { else out.write(""); if (cfg.getSendTo() != null) - out.write(""); + out.write(""); else out.write(""); long timeLeft = cfg.getExpiration()-_context.clock().now(); @@ -507,7 +505,7 @@ public class TunnelPoolManager implements TunnelManagerFacade { } out.write("
            peer"); if (sortFlags == FLAG_ALPHA) @@ -1951,12 +1975,25 @@ public class UDPTransport extends TransportImpl implements TimedWeightedPriority _expireBuffer = new ArrayList(128); } public void timeReached() { - long inactivityCutoff = _context.clock().now() - EXPIRE_TIMEOUT; + // Increase allowed idle time if we are well under allowed connections, otherwise decrease + if (haveCapacity()) + _expireTimeout = Math.min(_expireTimeout + 15*1000, EXPIRE_TIMEOUT); + else + _expireTimeout = Math.max(_expireTimeout - 45*1000, MIN_EXPIRE_TIMEOUT); + long shortInactivityCutoff = _context.clock().now() - _expireTimeout; + long longInactivityCutoff = _context.clock().now() - EXPIRE_TIMEOUT; + long pingCutoff = _context.clock().now() - (2 * 60*60*1000); _expireBuffer.clear(); synchronized (_expirePeers) { int sz = _expirePeers.size(); for (int i = 0; i < sz; i++) { PeerState peer = (PeerState)_expirePeers.get(i); + long inactivityCutoff; + // if we offered to introduce them, or we used them as introducer in last 2 hours + if (peer.getWeRelayToThemAs() > 0 || peer.getIntroducerTime() > pingCutoff) + inactivityCutoff = longInactivityCutoff; + else + inactivityCutoff = shortInactivityCutoff; if ( (peer.getLastReceiveTime() < inactivityCutoff) && (peer.getLastSendTime() < inactivityCutoff) ) { _expireBuffer.add(peer); _expirePeers.remove(i); diff --git a/router/java/src/net/i2p/router/tunnel/FlushTimer.java b/router/java/src/net/i2p/router/tunnel/FlushTimer.java index b18799ac6..b55384b80 100644 --- a/router/java/src/net/i2p/router/tunnel/FlushTimer.java +++ b/router/java/src/net/i2p/router/tunnel/FlushTimer.java @@ -6,7 +6,12 @@ import net.i2p.util.SimpleTimer; * */ class FlushTimer extends SimpleTimer { - private static final FlushTimer _instance = new FlushTimer(); - public static final SimpleTimer getInstance() { return _instance; } - protected FlushTimer() { super("TunnelFlushTimer"); } + /* + Streaming lib has been moved from SimpleTimer to SimpleTimer2, eliminating the congestion. + So there's not much left using SimpleTimer, and FlushTimer doesn't need its own 4 threads any more + (if it ever did?...) + */ + //private static final FlushTimer _instance = new FlushTimer(); + //public static final SimpleTimer getInstance() { return _instance; } + //protected FlushTimer() { super("TunnelFlushTimer"); } } diff --git a/router/java/src/net/i2p/router/tunnel/FragmentHandler.java b/router/java/src/net/i2p/router/tunnel/FragmentHandler.java index 5a97956b9..99b66c0c8 100644 --- a/router/java/src/net/i2p/router/tunnel/FragmentHandler.java +++ b/router/java/src/net/i2p/router/tunnel/FragmentHandler.java @@ -74,6 +74,12 @@ public class FragmentHandler { int padding = 0; while (preprocessed[offset] != (byte)0x00) { offset++; // skip the padding + // AIOOBE http://forum.i2p/viewtopic.php?t=3187 + if (offset >= TrivialPreprocessor.PREPROCESSED_SIZE) { + _cache.release(new ByteArray(preprocessed)); + _context.statManager().addRateData("tunnel.corruptMessage", 1, 1); + return; + } padding++; } offset++; // skip the final 0x00, terminating the padding @@ -387,8 +393,8 @@ public class FragmentHandler { _log.error("Error receiving fragmented message (corrupt?): " + stringified, ioe); } catch (I2NPMessageException ime) { if (stringified == null) stringified = msg.toString(); - if (_log.shouldLog(Log.ERROR)) - _log.error("Error receiving fragmented message (corrupt?): " + stringified, ime); + if (_log.shouldLog(Log.WARN)) + _log.warn("Error receiving fragmented message (corrupt?): " + stringified, ime); } } diff --git a/router/java/src/net/i2p/router/tunnel/FragmentedMessage.java b/router/java/src/net/i2p/router/tunnel/FragmentedMessage.java index d26c691b7..b0203f540 100644 --- a/router/java/src/net/i2p/router/tunnel/FragmentedMessage.java +++ b/router/java/src/net/i2p/router/tunnel/FragmentedMessage.java @@ -78,13 +78,13 @@ public class FragmentedMessage { return false; } if (length <= 0) { - if (_log.shouldLog(Log.ERROR)) - _log.error("Length is impossible (" + length + ") for messageId " + messageId); + if (_log.shouldLog(Log.WARN)) + _log.warn("Length is impossible (" + length + ") for messageId " + messageId); return false; } if (offset + length > payload.length) { - if (_log.shouldLog(Log.ERROR)) - _log.error("Length is impossible (" + length + "/" + offset + " out of " + payload.length + ") for messageId " + messageId); + if (_log.shouldLog(Log.WARN)) + _log.warn("Length is impossible (" + length + "/" + offset + " out of " + payload.length + ") for messageId " + messageId); return false; } if (_log.shouldLog(Log.DEBUG)) @@ -131,13 +131,13 @@ public class FragmentedMessage { return false; } if (length <= 0) { - if (_log.shouldLog(Log.ERROR)) - _log.error("Length is impossible (" + length + ") for messageId " + messageId); + if (_log.shouldLog(Log.WARN)) + _log.warn("Length is impossible (" + length + ") for messageId " + messageId); return false; } if (offset + length > payload.length) { - if (_log.shouldLog(Log.ERROR)) - _log.error("Length is impossible (" + length + "/" + offset + " out of " + payload.length + ") for messageId " + messageId); + if (_log.shouldLog(Log.WARN)) + _log.warn("Length is impossible (" + length + "/" + offset + " out of " + payload.length + ") for messageId " + messageId); return false; } if (_log.shouldLog(Log.DEBUG)) diff --git a/router/java/src/net/i2p/router/tunnel/InboundMessageDistributor.java b/router/java/src/net/i2p/router/tunnel/InboundMessageDistributor.java index 65066e133..c5c46c365 100644 --- a/router/java/src/net/i2p/router/tunnel/InboundMessageDistributor.java +++ b/router/java/src/net/i2p/router/tunnel/InboundMessageDistributor.java @@ -78,6 +78,7 @@ public class InboundMessageDistributor implements GarlicMessageReceiver.CloveRec _log.info("distributing inbound tunnel message into our inNetMessagePool: " + msg); _context.inNetMessagePool().add(msg, null, null); } +/****** latency measuring attack? } else if (_context.routerHash().equals(target)) { // the want to send it to a tunnel, except we are also that tunnel's gateway // dispatch it directly @@ -89,6 +90,7 @@ public class InboundMessageDistributor implements GarlicMessageReceiver.CloveRec gw.setMessageExpiration(_context.clock().now()+10*1000); gw.setUniqueId(_context.random().nextLong(I2NPMessage.MAX_ID_VALUE)); _context.tunnelDispatcher().dispatch(gw); +******/ } else { // ok, they want us to send it remotely, but that'd bust our anonymity, // so we send it out a tunnel first diff --git a/router/java/src/net/i2p/router/tunnel/TunnelDispatcher.java b/router/java/src/net/i2p/router/tunnel/TunnelDispatcher.java index 8bb4781fe..de29a9540 100644 --- a/router/java/src/net/i2p/router/tunnel/TunnelDispatcher.java +++ b/router/java/src/net/i2p/router/tunnel/TunnelDispatcher.java @@ -2,8 +2,8 @@ package net.i2p.router.tunnel; import java.io.IOException; import java.io.Writer; +import java.util.concurrent.ConcurrentHashMap; import java.util.ArrayList; -import java.util.HashMap; import java.util.List; import java.util.Map; @@ -30,12 +30,11 @@ import net.i2p.util.Log; public class TunnelDispatcher implements Service { private RouterContext _context; private Log _log; - private Map _outboundGateways; - private Map _outboundEndpoints; - private Map _participants; - private Map _inboundGateways; - /** id to HopConfig */ - private Map _participatingConfig; + private Map _outboundGateways; + private Map _outboundEndpoints; + private Map _participants; + private Map _inboundGateways; + private Map _participatingConfig; /** what is the date/time on which the last non-locally-created tunnel expires? */ private long _lastParticipatingExpiration; private BloomFilterIVValidator _validator; @@ -48,11 +47,11 @@ public class TunnelDispatcher implements Service { public TunnelDispatcher(RouterContext ctx) { _context = ctx; _log = ctx.logManager().getLog(TunnelDispatcher.class); - _outboundGateways = new HashMap(); - _outboundEndpoints = new HashMap(); - _participants = new HashMap(); - _inboundGateways = new HashMap(); - _participatingConfig = new HashMap(); + _outboundGateways = new ConcurrentHashMap(); + _outboundEndpoints = new ConcurrentHashMap(); + _participants = new ConcurrentHashMap(); + _inboundGateways = new ConcurrentHashMap(); + _participatingConfig = new ConcurrentHashMap(); _lastParticipatingExpiration = 0; _lastDropTime = 0; _validator = null; @@ -158,17 +157,13 @@ public class TunnelDispatcher implements Service { //TunnelGateway gw = new TunnelGateway(_context, preproc, sender, receiver); TunnelGateway gw = new PumpedTunnelGateway(_context, preproc, sender, receiver, _pumper); TunnelId outId = cfg.getConfig(0).getSendTunnel(); - synchronized (_outboundGateways) { - _outboundGateways.put(outId, gw); - } + _outboundGateways.put(outId, gw); _context.statManager().addRateData("tunnel.joinOutboundGateway", 1, 0); _context.messageHistory().tunnelJoined("outbound", cfg); } else { TunnelGatewayZeroHop gw = new TunnelGatewayZeroHop(_context, cfg); TunnelId outId = cfg.getConfig(0).getSendTunnel(); - synchronized (_outboundGateways) { - _outboundGateways.put(outId, gw); - } + _outboundGateways.put(outId, gw); _context.statManager().addRateData("tunnel.joinOutboundGatewayZeroHop", 1, 0); _context.messageHistory().tunnelJoined("outboundZeroHop", cfg); } @@ -183,17 +178,13 @@ public class TunnelDispatcher implements Service { if (cfg.getLength() > 1) { TunnelParticipant participant = new TunnelParticipant(_context, new InboundEndpointProcessor(_context, cfg, _validator)); TunnelId recvId = cfg.getConfig(cfg.getLength()-1).getReceiveTunnel(); - synchronized (_participants) { - _participants.put(recvId, participant); - } + _participants.put(recvId, participant); _context.statManager().addRateData("tunnel.joinInboundEndpoint", 1, 0); _context.messageHistory().tunnelJoined("inboundEndpoint", cfg); } else { TunnelGatewayZeroHop gw = new TunnelGatewayZeroHop(_context, cfg); TunnelId recvId = cfg.getConfig(0).getReceiveTunnel(); - synchronized (_inboundGateways) { - _inboundGateways.put(recvId, gw); - } + _inboundGateways.put(recvId, gw); _context.statManager().addRateData("tunnel.joinInboundEndpointZeroHop", 1, 0); _context.messageHistory().tunnelJoined("inboundEndpointZeroHop", cfg); } @@ -208,12 +199,8 @@ public class TunnelDispatcher implements Service { _log.info("Joining as participant: " + cfg); TunnelId recvId = cfg.getReceiveTunnel(); TunnelParticipant participant = new TunnelParticipant(_context, cfg, new HopProcessor(_context, cfg, _validator)); - synchronized (_participants) { - _participants.put(recvId, participant); - } - synchronized (_participatingConfig) { - _participatingConfig.put(recvId, cfg); - } + _participants.put(recvId, participant); + _participatingConfig.put(recvId, cfg); _context.messageHistory().tunnelJoined("participant", cfg); _context.statManager().addRateData("tunnel.joinParticipant", 1, 0); if (cfg.getExpiration() > _lastParticipatingExpiration) @@ -229,12 +216,8 @@ public class TunnelDispatcher implements Service { _log.info("Joining as outbound endpoint: " + cfg); TunnelId recvId = cfg.getReceiveTunnel(); OutboundTunnelEndpoint endpoint = new OutboundTunnelEndpoint(_context, cfg, new HopProcessor(_context, cfg, _validator)); - synchronized (_outboundEndpoints) { - _outboundEndpoints.put(recvId, endpoint); - } - synchronized (_participatingConfig) { - _participatingConfig.put(recvId, cfg); - } + _outboundEndpoints.put(recvId, endpoint); + _participatingConfig.put(recvId, cfg); _context.messageHistory().tunnelJoined("outboundEndpoint", cfg); _context.statManager().addRateData("tunnel.joinOutboundEndpoint", 1, 0); @@ -256,12 +239,8 @@ public class TunnelDispatcher implements Service { //TunnelGateway gw = new TunnelGateway(_context, preproc, sender, receiver); TunnelGateway gw = new PumpedTunnelGateway(_context, preproc, sender, receiver, _pumper); TunnelId recvId = cfg.getReceiveTunnel(); - synchronized (_inboundGateways) { - _inboundGateways.put(recvId, gw); - } - synchronized (_participatingConfig) { - _participatingConfig.put(recvId, cfg); - } + _inboundGateways.put(recvId, gw); + _participatingConfig.put(recvId, cfg); _context.messageHistory().tunnelJoined("inboundGateway", cfg); _context.statManager().addRateData("tunnel.joinInboundGateway", 1, 0); @@ -271,9 +250,7 @@ public class TunnelDispatcher implements Service { } public int getParticipatingCount() { - synchronized (_participatingConfig) { - return _participatingConfig.size(); - } + return _participatingConfig.size(); } /** what is the date/time on which the last non-locally-created tunnel expires? */ @@ -287,14 +264,9 @@ public class TunnelDispatcher implements Service { TunnelId recvId = cfg.getConfig(cfg.getLength()-1).getReceiveTunnel(); if (_log.shouldLog(Log.DEBUG)) _log.debug("removing our own inbound " + cfg); - TunnelParticipant participant = null; - synchronized (_participants) { - participant = (TunnelParticipant)_participants.remove(recvId); - } + TunnelParticipant participant = _participants.remove(recvId); if (participant == null) { - synchronized (_inboundGateways) { - _inboundGateways.remove(recvId); - } + _inboundGateways.remove(recvId); } else { // update stats based off getCompleteCount() + getFailedCount() for (int i = 0; i < cfg.getLength(); i++) { @@ -311,10 +283,7 @@ public class TunnelDispatcher implements Service { if (_log.shouldLog(Log.DEBUG)) _log.debug("removing our own outbound " + cfg); TunnelId outId = cfg.getConfig(0).getSendTunnel(); - TunnelGateway gw = null; - synchronized (_outboundGateways) { - gw = (TunnelGateway)_outboundGateways.remove(outId); - } + TunnelGateway gw = _outboundGateways.remove(outId); if (gw != null) { // update stats based on gw.getMessagesSent() } @@ -339,26 +308,17 @@ public class TunnelDispatcher implements Service { if (_log.shouldLog(Log.DEBUG)) _log.debug("removing " + cfg); - boolean removed = false; - synchronized (_participatingConfig) { - removed = (null != _participatingConfig.remove(recvId)); - } + boolean removed = (null != _participatingConfig.remove(recvId)); if (!removed) { if (_log.shouldLog(Log.WARN)) _log.warn("Participating tunnel, but no longer listed in participatingConfig? " + cfg); } - synchronized (_participants) { - removed = (null != _participants.remove(recvId)); - } + removed = (null != _participants.remove(recvId)); if (removed) return; - synchronized (_inboundGateways) { - removed = (null != _inboundGateways.remove(recvId)); - } + removed = (null != _inboundGateways.remove(recvId)); if (removed) return; - synchronized (_outboundEndpoints) { - removed = (null != _outboundEndpoints.remove(recvId)); - } + _outboundEndpoints.remove(recvId); } /** @@ -372,10 +332,7 @@ public class TunnelDispatcher implements Service { */ public void dispatch(TunnelDataMessage msg, Hash recvFrom) { long before = System.currentTimeMillis(); - TunnelParticipant participant = null; - synchronized (_participants) { - participant = (TunnelParticipant)_participants.get(msg.getTunnelIdObj()); - } + TunnelParticipant participant = _participants.get(msg.getTunnelIdObj()); if (participant != null) { // we are either just a random participant or the inbound endpoint if (_log.shouldLog(Log.DEBUG)) @@ -385,10 +342,7 @@ public class TunnelDispatcher implements Service { participant.dispatch(msg, recvFrom); _context.statManager().addRateData("tunnel.dispatchParticipant", 1, 0); } else { - OutboundTunnelEndpoint endpoint = null; - synchronized (_outboundEndpoints) { - endpoint = (OutboundTunnelEndpoint)_outboundEndpoints.get(msg.getTunnelIdObj()); - } + OutboundTunnelEndpoint endpoint = _outboundEndpoints.get(msg.getTunnelIdObj()); if (endpoint != null) { // we are the outobund endpoint if (_log.shouldLog(Log.DEBUG)) @@ -421,10 +375,7 @@ public class TunnelDispatcher implements Service { */ public void dispatch(TunnelGatewayMessage msg) { long before = System.currentTimeMillis(); - TunnelGateway gw = null; - synchronized (_inboundGateways) { - gw = (TunnelGateway)_inboundGateways.get(msg.getTunnelId()); - } + TunnelGateway gw = _inboundGateways.get(msg.getTunnelId()); if (gw != null) { if (_log.shouldLog(Log.DEBUG)) _log.debug("dispatch where we are the inbound gateway: " + gw + ": " + msg); @@ -489,10 +440,7 @@ public class TunnelDispatcher implements Service { public void dispatchOutbound(I2NPMessage msg, TunnelId outboundTunnel, TunnelId targetTunnel, Hash targetPeer) { if (outboundTunnel == null) throw new IllegalArgumentException("wtf, null outbound tunnel?"); long before = _context.clock().now(); - TunnelGateway gw = null; - synchronized (_outboundGateways) { - gw = (TunnelGateway)_outboundGateways.get(outboundTunnel); - } + TunnelGateway gw = _outboundGateways.get(outboundTunnel); if (gw != null) { if (_log.shouldLog(Log.DEBUG)) _log.debug("dispatch outbound through " + outboundTunnel.getTunnelId() @@ -538,10 +486,8 @@ public class TunnelDispatcher implements Service { _context.statManager().addRateData("tunnel.dispatchOutboundTime", dispatchTime, dispatchTime); } - public List listParticipatingTunnels() { - synchronized (_participatingConfig) { - return new ArrayList(_participatingConfig.values()); - } + public List listParticipatingTunnels() { + return new ArrayList(_participatingConfig.values()); } /** @@ -554,7 +500,7 @@ public class TunnelDispatcher implements Service { * and computing the average from that. */ public void updateParticipatingStats() { - List participating = listParticipatingTunnels(); + List participating = listParticipatingTunnels(); int size = participating.size(); long count = 0; long bw = 0; @@ -563,7 +509,7 @@ public class TunnelDispatcher implements Service { long tooYoung = _context.clock().now() - 60*1000; long tooOld = tooYoung - 9*60*1000; for (int i = 0; i < size; i++) { - HopConfig cfg = (HopConfig)participating.get(i); + HopConfig cfg = participating.get(i); long c = cfg.getRecentMessagesCount(); bw += c; bwOut += cfg.getRecentSentMessagesCount(); @@ -645,7 +591,7 @@ public class TunnelDispatcher implements Service { public void dropBiggestParticipating() { - List partTunnels = listParticipatingTunnels(); + List partTunnels = listParticipatingTunnels(); if ((partTunnels == null) || (partTunnels.size() == 0)) { if (_log.shouldLog(Log.ERROR)) _log.error("Not dropping tunnel, since partTunnels was null or had 0 items!"); @@ -668,7 +614,7 @@ public class TunnelDispatcher implements Service { for (int i=0; i _configs; + private List _times; public LeaveTunnel(RouterContext ctx) { super(ctx); @@ -765,12 +711,12 @@ public class TunnelDispatcher implements Service { synchronized (LeaveTunnel.this) { if (_configs.size() <= 0) return; - nextTime = (Long)_times.get(0); + nextTime = _times.get(0); if (nextTime.longValue() <= now) { - cur = (HopConfig)_configs.remove(0); + cur = _configs.remove(0); _times.remove(0); if (_times.size() > 0) - nextTime = (Long)_times.get(0); + nextTime = _times.get(0); else nextTime = null; } else { diff --git a/router/java/src/net/i2p/router/tunnel/pool/BuildExecutor.java b/router/java/src/net/i2p/router/tunnel/pool/BuildExecutor.java index 6ca93d516..3a84f4810 100644 --- a/router/java/src/net/i2p/router/tunnel/pool/BuildExecutor.java +++ b/router/java/src/net/i2p/router/tunnel/pool/BuildExecutor.java @@ -39,13 +39,14 @@ class BuildExecutor implements Runnable { _context.statManager().createRateStat("tunnel.concurrentBuildsLagged", "How many builds are going at once when we reject further builds, due to job lag (period is lag)", "Tunnels", new long[] { 60*1000, 5*60*1000, 60*60*1000 }); _context.statManager().createRateStat("tunnel.buildExploratoryExpire", "How often an exploratory tunnel times out during creation", "Tunnels", new long[] { 60*1000, 10*60*1000 }); _context.statManager().createRateStat("tunnel.buildClientExpire", "How often a client tunnel times out during creation", "Tunnels", new long[] { 60*1000, 10*60*1000 }); - _context.statManager().createRateStat("tunnel.buildExploratorySuccess", "How often an exploratory tunnel is fully built", "Tunnels", new long[] { 60*1000, 10*60*1000 }); - _context.statManager().createRateStat("tunnel.buildClientSuccess", "How often a client tunnel is fully built", "Tunnels", new long[] { 60*1000, 10*60*1000 }); - _context.statManager().createRateStat("tunnel.buildExploratoryReject", "How often an exploratory tunnel is rejected", "Tunnels", new long[] { 60*1000, 10*60*1000 }); - _context.statManager().createRateStat("tunnel.buildClientReject", "How often a client tunnel is rejected", "Tunnels", new long[] { 60*1000, 10*60*1000 }); + _context.statManager().createRateStat("tunnel.buildExploratorySuccess", "Response time for success", "Tunnels", new long[] { 60*1000, 10*60*1000 }); + _context.statManager().createRateStat("tunnel.buildClientSuccess", "Response time for success", "Tunnels", new long[] { 60*1000, 10*60*1000 }); + _context.statManager().createRateStat("tunnel.buildExploratoryReject", "Response time for rejection", "Tunnels", new long[] { 60*1000, 10*60*1000 }); + _context.statManager().createRateStat("tunnel.buildClientReject", "Response time for rejection", "Tunnels", new long[] { 60*1000, 10*60*1000 }); _context.statManager().createRateStat("tunnel.buildRequestTime", "How long it takes to build a tunnel request", "Tunnels", new long[] { 60*1000, 10*60*1000 }); _context.statManager().createRateStat("tunnel.buildRequestZeroHopTime", "How long it takes to build a zero hop tunnel", "Tunnels", new long[] { 60*1000, 10*60*1000 }); _context.statManager().createRateStat("tunnel.pendingRemaining", "How many inbound requests are pending after a pass (period is how long the pass takes)?", "Tunnels", new long[] { 60*1000, 10*60*1000 }); + _context.statManager().createRateStat("tunnel.buildFailFirstHop", "How often we fail to build a OB tunnel because we can't contact the first hop", "Tunnels", new long[] { 60*1000, 10*60*1000 }); // Get stat manager, get recognized bandwidth tiers StatManager statMgr = _context.statManager(); @@ -71,10 +72,7 @@ class BuildExecutor implements Runnable { int allowed = maxKBps / 6; // Max. 1 concurrent build per 6 KB/s outbound if (allowed < 2) allowed = 2; // Never choke below 2 builds (but congestion may) if (allowed > 10) allowed = 10; // Never go beyond 10, that is uncharted territory (old limit was 5) - - String prop = _context.getProperty("router.tunnelConcurrentBuilds"); - if (prop != null) - try { allowed = Integer.valueOf(prop).intValue(); } catch (NumberFormatException nfe) {} + allowed = _context.getProperty("router.tunnelConcurrentBuilds", allowed); List expired = null; int concurrent = 0; @@ -214,6 +212,9 @@ class BuildExecutor implements Runnable { } */ + /** Set 1.5 * LOOP_TIME < BuildRequestor.REQUEST_TIMEOUT/4 - margin */ + private static final int LOOP_TIME = 1000; + public void run() { _isRunning = true; List wanted = new ArrayList(8); @@ -316,7 +317,7 @@ class BuildExecutor implements Runnable { //if (_log.shouldLog(Log.DEBUG)) // _log.debug("Nothin' doin (allowed=" + allowed + ", wanted=" + wanted.size() + ", pending=" + pendingRemaining + "), wait for a while"); //if (allowed <= 0) - _currentlyBuilding.wait(2000 + _context.random().nextInt(2*1000)); + _currentlyBuilding.wait((LOOP_TIME/2) + _context.random().nextInt(LOOP_TIME)); //else // wanted <= 0 // _currentlyBuilding.wait(_context.random().nextInt(30*1000)); } diff --git a/router/java/src/net/i2p/router/tunnel/pool/BuildHandler.java b/router/java/src/net/i2p/router/tunnel/pool/BuildHandler.java index c5121e7c8..0699d3ff4 100644 --- a/router/java/src/net/i2p/router/tunnel/pool/BuildHandler.java +++ b/router/java/src/net/i2p/router/tunnel/pool/BuildHandler.java @@ -61,6 +61,7 @@ class BuildHandler { _context.statManager().createRateStat("tunnel.decryptRequestTime", "How long it takes to decrypt a new tunnel build request", "Tunnels", new long[] { 60*1000, 10*60*1000 }); _context.statManager().createRateStat("tunnel.rejectTimeout", "How often we reject a tunnel because we can't find the next hop", "Tunnels", new long[] { 60*1000, 10*60*1000 }); + _context.statManager().createRateStat("tunnel.rejectTimeout2", "How often we fail a tunnel because we can't contact the next hop", "Tunnels", new long[] { 60*1000, 10*60*1000 }); _context.statManager().createRateStat("tunnel.rejectOverloaded", "How long we had to wait before processing the request (when it was rejected)", "Tunnels", new long[] { 60*1000, 10*60*1000 }); _context.statManager().createRateStat("tunnel.acceptLoad", "Delay before processing the accepted request", "Tunnels", new long[] { 60*1000, 10*60*1000 }); @@ -248,6 +249,8 @@ class BuildHandler { int record = order.indexOf(Integer.valueOf(i)); if (record < 0) { _log.error("Bad status index " + i); + // don't leak + _exec.buildComplete(cfg, cfg.getTunnelPool()); return; } int howBad = statuses[record]; @@ -293,9 +296,9 @@ class BuildHandler { _context.messageHistory().tunnelParticipantRejected(peer, "peer rejected after " + rtt + " with " + howBad + ": " + cfg.toString()); } } + _exec.buildComplete(cfg, cfg.getTunnelPool()); if (allAgree) { // wikked, completely build - _exec.buildComplete(cfg, cfg.getTunnelPool()); if (cfg.isInbound()) _context.tunnelDispatcher().joinInbound(cfg); else @@ -312,7 +315,6 @@ class BuildHandler { _context.statManager().addRateData("tunnel.buildClientSuccess", rtt, rtt); } else { // someone is no fun - _exec.buildComplete(cfg, cfg.getTunnelPool()); if (cfg.getDestination() == null) _context.statManager().addRateData("tunnel.buildExploratoryReject", rtt, rtt); else @@ -321,6 +323,8 @@ class BuildHandler { } else { if (_log.shouldLog(Log.WARN)) _log.warn(msg.getUniqueId() + ": Tunnel reply could not be decrypted for tunnel " + cfg); + // don't leak + _exec.buildComplete(cfg, cfg.getTunnelPool()); } } @@ -413,7 +417,7 @@ class BuildHandler { } } - private class TimeoutReq extends JobImpl { + private static class TimeoutReq extends JobImpl { private BuildMessageState _state; private BuildRequestRecord _req; private Hash _nextPeer; @@ -425,10 +429,12 @@ class BuildHandler { } public String getName() { return "Timeout looking for next peer for tunnel join"; } public void runJob() { - getContext().statManager().addRateData("tunnel.rejectTimeout", 1, 1); - if (_log.shouldLog(Log.WARN)) - _log.warn("Request " + _state.msg.getUniqueId() - + " could no be satisfied, as the next peer could not be found: " + _nextPeer.toBase64()); + getContext().statManager().addRateData("tunnel.rejectTimeout", 1, 0); + // logging commented out so class can be static + //if (_log.shouldLog(Log.WARN)) + // _log.warn("Request " + _state.msg.getUniqueId() + // + " could no be satisfied, as the next peer could not be found: " + _nextPeer.toBase64()); + // ??? should we blame the peer here? getContext().profileManager().tunnelTimedOut(_nextPeer); getContext().messageHistory().tunnelRejected(_state.fromHash, new TunnelId(_req.readReceiveTunnelId()), _nextPeer, "rejected because we couldn't find " + _nextPeer.toBase64() + ": " + @@ -498,8 +504,15 @@ class BuildHandler { } } + /* + * Being a IBGW or OBEP generally leads to more connections, so if we are + * approaching our connection limit (i.e. !haveCapacity()), + * reject this request. + */ if (response == 0 && (isInGW || isOutEnd) && - Boolean.valueOf(_context.getProperty(PROP_REJECT_NONPARTICIPANT)).booleanValue()) { + (Boolean.valueOf(_context.getProperty(PROP_REJECT_NONPARTICIPANT)).booleanValue() || + ! _context.commSystem().haveCapacity())) { + _context.throttle().setTunnelStatus("Rejecting tunnels: Connection limit"); response = TunnelHistory.TUNNEL_REJECT_BANDWIDTH; } @@ -509,8 +522,9 @@ class BuildHandler { + " from " + (state.fromHash != null ? state.fromHash.toBase64() : state.from != null ? state.from.calculateHash().toBase64() : "tunnel")); + HopConfig cfg = null; if (response == 0) { - HopConfig cfg = new HopConfig(); + cfg = new HopConfig(); cfg.setCreation(_context.clock().now()); cfg.setExpiration(_context.clock().now() + 10*60*1000); cfg.setIVKey(req.readIVKey()); @@ -586,6 +600,8 @@ class BuildHandler { msg.setExpiration(state.msg.getMessageExpiration()); msg.setPriority(300); msg.setTarget(nextPeerInfo); + if (response == 0) + msg.setOnFailedSendJob(new TunnelBuildNextHopFailJob(_context, cfg)); _context.outNetMessagePool().add(msg); } else { // send it to the reply tunnel on the reply peer within a new TunnelBuildReplyMessage @@ -612,6 +628,8 @@ class BuildHandler { outMsg.setMessage(m); outMsg.setPriority(300); outMsg.setTarget(nextPeerInfo); + if (response == 0) + outMsg.setOnFailedSendJob(new TunnelBuildNextHopFailJob(_context, cfg)); _context.outNetMessagePool().add(outMsg); } } @@ -755,7 +773,7 @@ class BuildHandler { } /** normal inbound requests from other people */ - private class BuildMessageState { + private static class BuildMessageState { TunnelBuildMessage msg; RouterIdentity from; Hash fromHash; @@ -768,7 +786,7 @@ class BuildHandler { } } /** replies for outbound tunnels that we have created */ - private class BuildReplyMessageState { + private static class BuildReplyMessageState { TunnelBuildReplyMessage msg; long recvTime; public BuildReplyMessageState(I2NPMessage m) { @@ -777,7 +795,7 @@ class BuildHandler { } } /** replies for inbound tunnels we have created */ - private class BuildEndMessageState { + private static class BuildEndMessageState { TunnelBuildMessage msg; PooledTunnelCreatorConfig cfg; long recvTime; @@ -789,15 +807,35 @@ class BuildHandler { } // noop - private class TunnelBuildMessageHandlerJob extends JobImpl { + private static class TunnelBuildMessageHandlerJob extends JobImpl { private TunnelBuildMessageHandlerJob(RouterContext ctx) { super(ctx); } public void runJob() {} public String getName() { return "Receive tunnel build message"; } } // noop - private class TunnelBuildReplyMessageHandlerJob extends JobImpl { + private static class TunnelBuildReplyMessageHandlerJob extends JobImpl { private TunnelBuildReplyMessageHandlerJob(RouterContext ctx) { super(ctx); } public void runJob() {} public String getName() { return "Receive tunnel build reply message"; } } + + /** + * Remove the participating tunnel if we can't contact the next hop + * Not strictly necessary, as the entry doesn't use that much space, + * but it affects capacity calculations + */ + private static class TunnelBuildNextHopFailJob extends JobImpl { + HopConfig _cfg; + private TunnelBuildNextHopFailJob(RouterContext ctx, HopConfig cfg) { + super(ctx); + _cfg = cfg; + } + public String getName() { return "Timeout contacting next peer for tunnel join"; } + public void runJob() { + getContext().tunnelDispatcher().remove(_cfg); + getContext().statManager().addRateData("tunnel.rejectTimeout2", 1, 0); + // static, no _log + //_log.error("Cant contact next hop for " + _cfg); + } + } } diff --git a/router/java/src/net/i2p/router/tunnel/pool/BuildRequestor.java b/router/java/src/net/i2p/router/tunnel/pool/BuildRequestor.java index a4917772f..21325be85 100644 --- a/router/java/src/net/i2p/router/tunnel/pool/BuildRequestor.java +++ b/router/java/src/net/i2p/router/tunnel/pool/BuildRequestor.java @@ -12,6 +12,7 @@ import net.i2p.data.RouterInfo; import net.i2p.data.TunnelId; import net.i2p.data.i2np.I2NPMessage; import net.i2p.data.i2np.TunnelBuildMessage; +import net.i2p.router.JobImpl; import net.i2p.router.OutNetMessage; import net.i2p.router.RouterContext; import net.i2p.router.TunnelInfo; @@ -136,6 +137,7 @@ class BuildRequestor { return; } outMsg.setTarget(peer); + outMsg.setOnFailedSendJob(new TunnelBuildFirstHopFailJob(ctx, pool, cfg, exec)); ctx.outNetMessagePool().add(outMsg); } if (log.shouldLog(Log.DEBUG)) @@ -213,4 +215,33 @@ class BuildRequestor { ctx.jobQueue().addJob(expireJob); // can it get much easier? } + + /** + * Do two important things if we can't get the build msg to the + * first hop on an outbound tunnel - + * - Call buildComplete() so we can get started on the next build + * without waiting for the full expire time + * - Blame the first hop in the profile + * Most likely to happen on an exploratory tunnel, obviously. + * Can't do this for inbound tunnels since the msg goes out an expl. tunnel. + */ + private static class TunnelBuildFirstHopFailJob extends JobImpl { + TunnelPool _pool; + PooledTunnelCreatorConfig _cfg; + BuildExecutor _exec; + private TunnelBuildFirstHopFailJob(RouterContext ctx, TunnelPool pool, PooledTunnelCreatorConfig cfg, BuildExecutor exec) { + super(ctx); + _cfg = cfg; + _exec = exec; + _pool = pool; + } + public String getName() { return "Timeout contacting first peer for OB tunnel"; } + public void runJob() { + _exec.buildComplete(_cfg, _pool); + getContext().profileManager().tunnelTimedOut(_cfg.getPeer(1)); + getContext().statManager().addRateData("tunnel.buildFailFirstHop", 1, 0); + // static, no _log + //System.err.println("Cant contact first hop for " + _cfg); + } + } } diff --git a/router/java/src/net/i2p/router/tunnel/pool/TunnelPool.java b/router/java/src/net/i2p/router/tunnel/pool/TunnelPool.java index 0fca44cc4..06a9b4999 100644 --- a/router/java/src/net/i2p/router/tunnel/pool/TunnelPool.java +++ b/router/java/src/net/i2p/router/tunnel/pool/TunnelPool.java @@ -468,7 +468,9 @@ public class TunnelPool { if (_tunnels.size() < wanted) { if (_log.shouldLog(Log.WARN)) _log.warn(toString() + ": Not enough tunnels (" + _tunnels.size() + ", wanted " + wanted + ")"); - return null; + // see comment below + if (_tunnels.size() <= 0) + return null; } long expireAfter = _context.clock().now(); // + _settings.getRebuildPeriod(); @@ -492,15 +494,26 @@ public class TunnelPool { leases.add(lease); } + // Go ahead and use less leases for now, hopefully a new tunnel will be built soon + // and we will get called again to generate a full leaseset. + // For clients with high tunnel count or length, + // this will make startup considerably faster, and reduce loss of leaseset + // when one tunnel is lost, thus making us much more robust. + // This also helps when returning to full lease count after reduce-on-idle + // or close-on-idle. + // So we will generate a succession of leases at startup. That's OK. + // Do we want a config option for this, or are there times when we shouldn't do this? if (leases.size() < wanted) { if (_log.shouldLog(Log.WARN)) _log.warn(toString() + ": Not enough leases (" + leases.size() + ", wanted " + wanted + ")"); - return null; + if (leases.size() <= 0) + return null; } LeaseSet ls = new LeaseSet(); Iterator iter = leases.iterator(); - for (int i = 0; i < wanted; i++) + int count = Math.min(leases.size(), wanted); + for (int i = 0; i < count; i++) ls.addLease((Lease) iter.next()); if (_log.shouldLog(Log.INFO)) _log.info(toString() + ": built new leaseSet: " + ls); diff --git a/router/java/src/net/i2p/router/tunnel/pool/TunnelPoolManager.java b/router/java/src/net/i2p/router/tunnel/pool/TunnelPoolManager.java index 4416582df..58637ce63 100644 --- a/router/java/src/net/i2p/router/tunnel/pool/TunnelPoolManager.java +++ b/router/java/src/net/i2p/router/tunnel/pool/TunnelPoolManager.java @@ -237,22 +237,20 @@ public class TunnelPoolManager implements TunnelManagerFacade { return null; } public void setInboundSettings(Hash client, TunnelPoolSettings settings) { - - TunnelPool pool = null; - synchronized (_clientInboundPools) { - pool = (TunnelPool)_clientInboundPools.get(client); - } - if (pool != null) - pool.setSettings(settings); + setSettings(_clientInboundPools, client, settings); } public void setOutboundSettings(Hash client, TunnelPoolSettings settings) { - + setSettings(_clientOutboundPools, client, settings); + } + private void setSettings(Map pools, Hash client, TunnelPoolSettings settings) { TunnelPool pool = null; - synchronized (_clientOutboundPools) { - pool = (TunnelPool)_clientOutboundPools.get(client); + synchronized (pools) { + pool = (TunnelPool)pools.get(client); } - if (pool != null) + if (pool != null) { + settings.setDestination(client); // prevent spoofing or unset dest pool.setSettings(settings); + } } public void restart() { @@ -376,7 +374,7 @@ public class TunnelPoolManager implements TunnelManagerFacade { _context.jobQueue().addJob(new BootstrapPool(_context, _outboundExploratory)); } - private class BootstrapPool extends JobImpl { + private static class BootstrapPool extends JobImpl { private TunnelPool _pool; public BootstrapPool(RouterContext ctx, TunnelPool pool) { super(ctx); @@ -472,7 +470,7 @@ public class TunnelPoolManager implements TunnelManagerFacade { else out.write("n/a" + cfg.getReceiveFrom().toBase64().substring(0,4) +"" + netDbLink(cfg.getReceiveFrom()) +"  " + cfg.getSendTo().toBase64().substring(0,4) +"" + netDbLink(cfg.getSendTo()) +" 
            \n"); out.write("Inactive participating tunnels: " + inactive + "
            \n"); - out.write("Lifetime bandwidth usage: " + processed + "KB
            \n"); + out.write("Lifetime bandwidth usage: " + DataHelper.formatSize(processed*1024) + "B
            \n"); } class TunnelComparator implements Comparator { @@ -549,7 +547,7 @@ public class TunnelPoolManager implements TunnelManagerFacade { if (_context.routerHash().equals(peer)) out.write("" + (id == null ? "" : "" + id) + ""); else - out.write("" + peer.toBase64().substring(0,4) + (id == null ? "" : ":" + id) + cap + ""); + out.write("" + netDbLink(peer) + (id == null ? "" : ":" + id) + cap + ""); } out.write("\n"); @@ -577,7 +575,8 @@ public class TunnelPoolManager implements TunnelManagerFacade { } if (live <= 0) out.write("No tunnels, waiting for the grace period to end
            \n"); - out.write("Lifetime bandwidth usage: " + processedIn + "KB in, " + processedOut + "KB out
            "); + out.write("Lifetime bandwidth usage: " + DataHelper.formatSize(processedIn*1024) + "B in, " + + DataHelper.formatSize(processedOut*1024) + "B out
            "); } private String getCapacity(Hash peer) { @@ -601,4 +600,9 @@ public class TunnelPoolManager implements TunnelManagerFacade { return "[unkn]"; } } + + private static String netDbLink(Hash peer) { + String h = peer.toBase64().substring(0, 4); + return "" + h + ""; + } }