diff --git a/Makefile b/Makefile index 0b29820..a01b10d 100644 --- a/Makefile +++ b/Makefile @@ -140,7 +140,7 @@ coverage-output-text: coverage-output: rm -rf coverage-html - coverage html -d coverage-html $(COVERAGE_OMIT) + coverage html -i -d coverage-html $(COVERAGE_OMIT) cp .coverage coverage-html/coverage.data @echo "now point your browser at coverage-html/index.html" @@ -184,6 +184,8 @@ endif pyflakes: $(PYTHON) -OOu `which pyflakes` src/allmydata |sort |uniq +check-umids: + $(PYTHON) misc/check-umids.py `find src/allmydata -name '*.py'` count-lines: @echo -n "files: " diff --git a/misc/check-umids.py b/misc/check-umids.py new file mode 100755 index 0000000..05e8825 --- /dev/null +++ b/misc/check-umids.py @@ -0,0 +1,30 @@ +#! /usr/bin/python + +# ./rumid.py foo.py + +import sys, re, os + +ok = True +umids = {} + +for fn in sys.argv[1:]: + fn = os.path.abspath(fn) + for lineno,line in enumerate(open(fn, "r").readlines()): + lineno = lineno+1 + if "umid" not in line: + continue + mo = re.search("umid=[\"\']([^\"\']+)[\"\']", line) + if mo: + umid = mo.group(1) + if umid in umids: + oldfn, oldlineno = umids[umid] + print "%s:%d: duplicate umid '%s'" % (fn, lineno, umid) + print "%s:%d: first used here" % (oldfn, oldlineno) + ok = False + umids[umid] = (fn,lineno) + +if ok: + print "all umids are unique" +else: + print "some umids were duplicates" + sys.exit(1) diff --git a/misc/coverage.el b/misc/coverage.el index bad490f..8d69d5d 100644 --- a/misc/coverage.el +++ b/misc/coverage.el @@ -84,7 +84,8 @@ 'face '(:box "red") ) ) - (message "Added annotations") + (message (format "Added annotations: %d uncovered lines" + (safe-length uncovered-code-lines))) ) ) (message "unable to find coverage for this file")) diff --git a/misc/coverage2el.py b/misc/coverage2el.py index ed94bd0..7d03a27 100644 --- a/misc/coverage2el.py +++ b/misc/coverage2el.py @@ -1,5 +1,5 @@ -from coverage import coverage, summary +from coverage import coverage, summary, misc class ElispReporter(summary.SummaryReporter): def report(self): @@ -21,7 +21,10 @@ class ElispReporter(summary.SummaryReporter): out.write("(let ((results (make-hash-table :test 'equal)))\n") for cu in self.code_units: f = cu.filename - (fn, executable, missing, mf) = self.coverage.analysis(cu) + try: + (fn, executable, missing, mf) = self.coverage.analysis(cu) + except misc.NoSource: + continue code_linenumbers = executable uncovered_code = missing covered_linenumbers = sorted(set(executable) - set(missing)) diff --git a/src/allmydata/client.py b/src/allmydata/client.py index 12e7473..b01fbe8 100644 --- a/src/allmydata/client.py +++ b/src/allmydata/client.py @@ -12,11 +12,11 @@ import allmydata from allmydata.storage.server import StorageServer from allmydata import storage_client from allmydata.immutable.upload import Uploader -from allmydata.immutable.download import Downloader +from allmydata.immutable.download2_util import Terminator from allmydata.immutable.offloaded import Helper from allmydata.control import ControlServer from allmydata.introducer.client import IntroducerClient -from allmydata.util import hashutil, base32, pollmixin, cachedir, log +from allmydata.util import hashutil, base32, pollmixin, log from allmydata.util.abbreviate import parse_abbreviated_size from allmydata.util.time_format import parse_duration, parse_date from allmydata.stats import StatsProvider @@ -278,12 +278,9 @@ class Client(node.Node, pollmixin.PollMixin): self.init_client_storage_broker() self.history = History(self.stats_provider) + self.terminator = Terminator() + self.terminator.setServiceParent(self) self.add_service(Uploader(helper_furl, self.stats_provider)) - download_cachedir = os.path.join(self.basedir, - "private", "cache", "download") - self.download_cache_dirman = cachedir.CacheDirectoryManager(download_cachedir) - self.download_cache_dirman.setServiceParent(self) - self.downloader = Downloader(self.storage_broker, self.stats_provider) self.init_stub_client() self.init_nodemaker() @@ -342,8 +339,7 @@ class Client(node.Node, pollmixin.PollMixin): self._secret_holder, self.get_history(), self.getServiceNamed("uploader"), - self.downloader, - self.download_cache_dirman, + self.terminator, self.get_encoding_parameters(), self._key_generator) diff --git a/src/allmydata/immutable/checker.py b/src/allmydata/immutable/checker.py index 2f2d8f1..31c70e3 100644 --- a/src/allmydata/immutable/checker.py +++ b/src/allmydata/immutable/checker.py @@ -85,7 +85,9 @@ class Checker(log.PrefixingLogMixin): level = log.WEIRD if f.check(DeadReferenceError): level = log.UNUSUAL - self.log("failure from server on 'get_buckets' the REMOTE failure was:", facility="tahoe.immutable.checker", failure=f, level=level, umid="3uuBUQ") + self.log("failure from server on 'get_buckets' the REMOTE failure was:", + facility="tahoe.immutable.checker", + failure=f, level=level, umid="AX7wZQ") return ({}, serverid, False) d.addCallbacks(_wrap_results, _trap_errs) diff --git a/src/allmydata/immutable/download2.py b/src/allmydata/immutable/download2.py new file mode 100644 index 0000000..f0d98fe --- /dev/null +++ b/src/allmydata/immutable/download2.py @@ -0,0 +1,2093 @@ + +import binascii +import struct +import copy +from zope.interface import implements +from twisted.python.failure import Failure +from twisted.internet import defer +from twisted.internet.interfaces import IPushProducer, IConsumer + +from foolscap.api import eventually +from allmydata.interfaces import IImmutableFileNode, IUploadResults, \ + NotEnoughSharesError, NoSharesError, HASH_SIZE, DEFAULT_MAX_SEGMENT_SIZE +from allmydata.hashtree import IncompleteHashTree, BadHashError, \ + NotEnoughHashesError +from allmydata.util import base32, log, hashutil, mathutil, idlib +from allmydata.util.spans import Spans, DataSpans, overlap +from allmydata.util.dictutil import DictOfSets +from allmydata.check_results import CheckResults, CheckAndRepairResults +from allmydata.codec import CRSDecoder +from allmydata import uri +from pycryptopp.cipher.aes import AES +from download2_util import Observer2, incidentally +from layout import make_write_bucket_proxy +from checker import Checker +from repairer import Repairer + +(AVAILABLE, PENDING, OVERDUE, COMPLETE, CORRUPT, DEAD, BADSEGNUM) = \ + ("AVAILABLE", "PENDING", "OVERDUE", "COMPLETE", "CORRUPT", "DEAD", "BADSEGNUM") + +KiB = 1024 +class BadSegmentNumberError(Exception): + pass +class BadSegmentError(Exception): + pass +class BadCiphertextHashError(Exception): + pass +class LayoutInvalid(Exception): + pass +class DataUnavailable(Exception): + pass + +class Share: + """I represent a single instance of a single share (e.g. I reference the + shnum2 for share SI=abcde on server xy12t, not the one on server ab45q). + I am associated with a CommonShare that remembers data that is held in + common among e.g. SI=abcde/shnum2 across all servers. I am also + associated with a CiphertextFileNode for e.g. SI=abcde (all shares, all + servers). + """ + # this is a specific implementation of IShare for tahoe's native storage + # servers. A different backend would use a different class. + + def __init__(self, rref, server_version, verifycap, commonshare, node, + peerid, shnum, logparent): + self._rref = rref + self._server_version = server_version + self._node = node # holds share_hash_tree and UEB + self.actual_segment_size = node.segment_size # might still be None + # XXX change node.guessed_segment_size to + # node.best_guess_segment_size(), which should give us the real ones + # if known, else its guess. + self._guess_offsets(verifycap, node.guessed_segment_size) + self.actual_offsets = None + self._UEB_length = None + self._commonshare = commonshare # holds block_hash_tree + self._peerid = peerid + self._peerid_s = base32.b2a(peerid)[:5] + self._storage_index = verifycap.storage_index + self._si_prefix = base32.b2a(verifycap.storage_index)[:8] + self._shnum = shnum + # self._alive becomes False upon fatal corruption or server error + self._alive = True + self._lp = log.msg(format="%(share)s created", share=repr(self), + level=log.NOISY, parent=logparent, umid="P7hv2w") + + self._pending = Spans() # request sent but no response received yet + self._received = DataSpans() # ACK response received, with data + self._unavailable = Spans() # NAK response received, no data + + # any given byte of the share can be in one of four states: + # in: _wanted, _requested, _received + # FALSE FALSE FALSE : don't care about it at all + # TRUE FALSE FALSE : want it, haven't yet asked for it + # TRUE TRUE FALSE : request is in-flight + # or didn't get it + # FALSE TRUE TRUE : got it, haven't used it yet + # FALSE TRUE FALSE : got it and used it + # FALSE FALSE FALSE : block consumed, ready to ask again + # + # when we request data and get a NAK, we leave it in _requested + # to remind ourself to not ask for it again. We don't explicitly + # remove it from anything (maybe this should change). + # + # We retain the hashtrees in the Node, so we leave those spans in + # _requested (and never ask for them again, as long as the Node is + # alive). But we don't retain data blocks (too big), so when we + # consume a data block, we remove it from _requested, so a later + # download can re-fetch it. + + self._requested_blocks = [] # (segnum, set(observer2..)) + ver = server_version["http://allmydata.org/tahoe/protocols/storage/v1"] + self._overrun_ok = ver["tolerates-immutable-read-overrun"] + # If _overrun_ok and we guess the offsets correctly, we can get + # everything in one RTT. If _overrun_ok and we guess wrong, we might + # need two RTT (but we could get lucky and do it in one). If overrun + # is *not* ok (tahoe-1.3.0 or earlier), we need four RTT: 1=version, + # 2=offset table, 3=UEB_length and everything else (hashes, block), + # 4=UEB. + + self.had_corruption = False # for unit tests + + def __repr__(self): + return "Share(sh%d-on-%s)" % (self._shnum, self._peerid_s) + + def is_alive(self): + # XXX: reconsider. If the share sees a single error, should it remain + # dead for all time? Or should the next segment try again? This DEAD + # state is stored elsewhere too (SegmentFetcher per-share states?) + # and needs to be consistent. We clear _alive in self._fail(), which + # is called upon a network error, or layout failure, or hash failure + # in the UEB or a hash tree. We do not _fail() for a hash failure in + # a block, but of course we still tell our callers about + # state=CORRUPT so they'll find a different share. + return self._alive + + def _guess_offsets(self, verifycap, guessed_segment_size): + self.guessed_segment_size = guessed_segment_size + size = verifycap.size + k = verifycap.needed_shares + N = verifycap.total_shares + r = self._node._calculate_sizes(guessed_segment_size) + # num_segments, block_size/tail_block_size + # guessed_segment_size/tail_segment_size/tail_segment_padded + share_size = mathutil.div_ceil(size, k) + # share_size is the amount of block data that will be put into each + # share, summed over all segments. It does not include hashes, the + # UEB, or other overhead. + + # use the upload-side code to get this as accurate as possible + ht = IncompleteHashTree(N) + num_share_hashes = len(ht.needed_hashes(0, include_leaf=True)) + wbp = make_write_bucket_proxy(None, share_size, r["block_size"], + r["num_segments"], num_share_hashes, 0, + None) + self._fieldsize = wbp.fieldsize + self._fieldstruct = wbp.fieldstruct + self.guessed_offsets = wbp._offsets + + # called by our client, the SegmentFetcher + def get_block(self, segnum): + """Add a block number to the list of requests. This will eventually + result in a fetch of the data necessary to validate the block, then + the block itself. The fetch order is generally + first-come-first-served, but requests may be answered out-of-order if + data becomes available sooner. + + I return an Observer2, which has two uses. The first is to call + o.subscribe(), which gives me a place to send state changes and + eventually the data block. The second is o.cancel(), which removes + the request (if it is still active). + + I will distribute the following events through my Observer2: + - state=OVERDUE: ?? I believe I should have had an answer by now. + You may want to ask another share instead. + - state=BADSEGNUM: the segnum you asked for is too large. I must + fetch a valid UEB before I can determine this, + so the notification is asynchronous + - state=COMPLETE, block=data: here is a valid block + - state=CORRUPT: this share contains corrupted data + - state=DEAD, f=Failure: the server reported an error, this share + is unusable + """ + log.msg("%s.get_block(%d)" % (repr(self), segnum), + level=log.NOISY, parent=self._lp, umid="RTo9MQ") + assert segnum >= 0 + o = Observer2() + o.set_canceler(self, "_cancel_block_request") + for i,(segnum0,observers) in enumerate(self._requested_blocks): + if segnum0 == segnum: + observers.add(o) + break + else: + self._requested_blocks.append( (segnum, set([o])) ) + eventually(self.loop) + return o + + def _cancel_block_request(self, o): + new_requests = [] + for e in self._requested_blocks: + (segnum0, observers) = e + observers.discard(o) + if observers: + new_requests.append(e) + self._requested_blocks = new_requests + + # internal methods + def _active_segnum_and_observers(self): + if self._requested_blocks: + # we only retrieve information for one segment at a time, to + # minimize alacrity (first come, first served) + return self._requested_blocks[0] + return None, [] + + def loop(self): + try: + # if any exceptions occur here, kill the download + log.msg("%s.loop, reqs=[%s], pending=%s, received=%s," + " unavailable=%s" % + (repr(self), + ",".join([str(req[0]) for req in self._requested_blocks]), + self._pending.dump(), self._received.dump(), + self._unavailable.dump() ), + level=log.NOISY, parent=self._lp, umid="BaL1zw") + self._do_loop() + # all exception cases call self._fail(), which clears self._alive + except (BadHashError, NotEnoughHashesError, LayoutInvalid), e: + # Abandon this share. We do this if we see corruption in the + # offset table, the UEB, or a hash tree. We don't abandon the + # whole share if we see corruption in a data block (we abandon + # just the one block, and still try to get data from other blocks + # on the same server). In theory, we could get good data from a + # share with a corrupt UEB (by first getting the UEB from some + # other share), or corrupt hash trees, but the logic to decide + # when this is safe is non-trivial. So for now, give up at the + # first sign of corruption. + # + # _satisfy_*() code which detects corruption should first call + # self._signal_corruption(), and then raise the exception. + log.msg(format="corruption detected in %(share)s", + share=repr(self), + level=log.UNUSUAL, parent=self._lp, umid="gWspVw") + self._fail(Failure(e), log.UNUSUAL) + except DataUnavailable, e: + # Abandon this share. + log.msg(format="need data that will never be available" + " from %s: pending=%s, received=%s, unavailable=%s" % + (repr(self), + self._pending.dump(), self._received.dump(), + self._unavailable.dump() ), + level=log.UNUSUAL, parent=self._lp, umid="F7yJnQ") + self._fail(Failure(e), log.UNUSUAL) + except BaseException: + self._fail(Failure()) + raise + log.msg("%s.loop done, reqs=[%s], pending=%s, received=%s," + " unavailable=%s" % + (repr(self), + ",".join([str(req[0]) for req in self._requested_blocks]), + self._pending.dump(), self._received.dump(), + self._unavailable.dump() ), + level=log.NOISY, parent=self._lp, umid="9lRaRA") + + def _do_loop(self): + # we are (eventually) called after all state transitions: + # new segments added to self._requested_blocks + # new data received from servers (responses to our read() calls) + # impatience timer fires (server appears slow) + if not self._alive: + return + + # First, consume all of the information that we currently have, for + # all the segments people currently want. + while self._get_satisfaction(): + pass + + # When we get no satisfaction (from the data we've received so far), + # we determine what data we desire (to satisfy more requests). The + # number of segments is finite, so I can't get no satisfaction + # forever. + wanted, needed = self._desire() + + # Finally, send out requests for whatever we need (desire minus + # have). You can't always get what you want, but if you try + # sometimes, you just might find, you get what you need. + self._send_requests(wanted + needed) + + # and sometimes you can't even get what you need + disappointment = needed & self._unavailable + if len(disappointment): + self.had_corruption = True + raise DataUnavailable("need %s but will never get it" % + disappointment.dump()) + + def _get_satisfaction(self): + # return True if we retired a data block, and should therefore be + # called again. Return False if we don't retire a data block (even if + # we do retire some other data, like hash chains). + + if self.actual_offsets is None: + if not self._satisfy_offsets(): + # can't even look at anything without the offset table + return False + + if not self._node.have_UEB: + if not self._satisfy_UEB(): + # can't check any hashes without the UEB + return False + self.actual_segment_size = self._node.segment_size # might be updated + assert self.actual_segment_size is not None + + # knowing the UEB means knowing num_segments. Despite the redundancy, + # this is the best place to set this. CommonShare.set_numsegs will + # ignore duplicate calls. + cs = self._commonshare + cs.set_numsegs(self._node.num_segments) + + segnum, observers = self._active_segnum_and_observers() + # if segnum is None, we don't really need to do anything (we have no + # outstanding readers right now), but we'll fill in the bits that + # aren't tied to any particular segment. + + if segnum is not None and segnum >= self._node.num_segments: + for o in observers: + o.notify(state=BADSEGNUM) + self._requested_blocks.pop(0) + return True + + if self._node.share_hash_tree.needed_hashes(self._shnum): + if not self._satisfy_share_hash_tree(): + # can't check block_hash_tree without a root + return False + + if cs.need_block_hash_root(): + block_hash_root = self._node.share_hash_tree.get_leaf(self._shnum) + cs.set_block_hash_root(block_hash_root) + + if segnum is None: + return False # we don't want any particular segment right now + + # block_hash_tree + needed_hashes = self._commonshare.get_needed_block_hashes(segnum) + if needed_hashes: + if not self._satisfy_block_hash_tree(needed_hashes): + # can't check block without block_hash_tree + return False + + # ciphertext_hash_tree + needed_hashes = self._node.get_needed_ciphertext_hashes(segnum) + if needed_hashes: + if not self._satisfy_ciphertext_hash_tree(needed_hashes): + # can't check decoded blocks without ciphertext_hash_tree + return False + + # data blocks + return self._satisfy_data_block(segnum, observers) + + def _satisfy_offsets(self): + version_s = self._received.get(0, 4) + if version_s is None: + return False + (version,) = struct.unpack(">L", version_s) + if version == 1: + table_start = 0x0c + self._fieldsize = 0x4 + self._fieldstruct = "L" + elif version == 2: + table_start = 0x14 + self._fieldsize = 0x8 + self._fieldstruct = "Q" + else: + self.had_corruption = True + raise LayoutInvalid("unknown version %d (I understand 1 and 2)" + % version) + offset_table_size = 6 * self._fieldsize + table_s = self._received.pop(table_start, offset_table_size) + if table_s is None: + return False + fields = struct.unpack(">"+6*self._fieldstruct, table_s) + offsets = {} + for i,field in enumerate(['data', + 'plaintext_hash_tree', # UNUSED + 'crypttext_hash_tree', + 'block_hashes', + 'share_hashes', + 'uri_extension', + ] ): + offsets[field] = fields[i] + self.actual_offsets = offsets + log.msg("actual offsets: data=%d, plaintext_hash_tree=%d, crypttext_hash_tree=%d, block_hashes=%d, share_hashes=%d, uri_extension=%d" % tuple(fields)) + self._received.remove(0, 4) # don't need this anymore + + # validate the offsets a bit + share_hashes_size = offsets["uri_extension"] - offsets["share_hashes"] + if share_hashes_size < 0 or share_hashes_size % (2+HASH_SIZE) != 0: + # the share hash chain is stored as (hashnum,hash) pairs + self.had_corruption = True + raise LayoutInvalid("share hashes malformed -- should be a" + " multiple of %d bytes -- not %d" % + (2+HASH_SIZE, share_hashes_size)) + block_hashes_size = offsets["share_hashes"] - offsets["block_hashes"] + if block_hashes_size < 0 or block_hashes_size % (HASH_SIZE) != 0: + # the block hash tree is stored as a list of hashes + self.had_corruption = True + raise LayoutInvalid("block hashes malformed -- should be a" + " multiple of %d bytes -- not %d" % + (HASH_SIZE, block_hashes_size)) + # we only look at 'crypttext_hash_tree' if the UEB says we're + # actually using it. Same with 'plaintext_hash_tree'. This gives us + # some wiggle room: a place to stash data for later extensions. + + return True + + def _satisfy_UEB(self): + o = self.actual_offsets + fsize = self._fieldsize + UEB_length_s = self._received.get(o["uri_extension"], fsize) + if not UEB_length_s: + return False + (UEB_length,) = struct.unpack(">"+self._fieldstruct, UEB_length_s) + UEB_s = self._received.pop(o["uri_extension"]+fsize, UEB_length) + if not UEB_s: + return False + self._received.remove(o["uri_extension"], fsize) + try: + self._node.validate_and_store_UEB(UEB_s) + return True + except (LayoutInvalid, BadHashError), e: + # TODO: if this UEB was bad, we'll keep trying to validate it + # over and over again. Only log.err on the first one, or better + # yet skip all but the first + f = Failure(e) + self._signal_corruption(f, o["uri_extension"], fsize+UEB_length) + self.had_corruption = True + raise + + def _satisfy_share_hash_tree(self): + # the share hash chain is stored as (hashnum,hash) tuples, so you + # can't fetch just the pieces you need, because you don't know + # exactly where they are. So fetch everything, and parse the results + # later. + o = self.actual_offsets + hashlen = o["uri_extension"] - o["share_hashes"] + assert hashlen % (2+HASH_SIZE) == 0 + hashdata = self._received.get(o["share_hashes"], hashlen) + if not hashdata: + return False + share_hashes = {} + for i in range(0, hashlen, 2+HASH_SIZE): + (hashnum,) = struct.unpack(">H", hashdata[i:i+2]) + hashvalue = hashdata[i+2:i+2+HASH_SIZE] + share_hashes[hashnum] = hashvalue + try: + self._node.process_share_hashes(share_hashes) + # adds to self._node.share_hash_tree + except (BadHashError, NotEnoughHashesError), e: + f = Failure(e) + self._signal_corruption(f, o["share_hashes"], hashlen) + self.had_corruption = True + raise + self._received.remove(o["share_hashes"], hashlen) + return True + + def _signal_corruption(self, f, start, offset): + # there was corruption somewhere in the given range + reason = "corruption in share[%d-%d): %s" % (start, start+offset, + str(f.value)) + self._rref.callRemoteOnly("advise_corrupt_share", "immutable", + self._storage_index, self._shnum, reason) + + def _satisfy_block_hash_tree(self, needed_hashes): + o_bh = self.actual_offsets["block_hashes"] + block_hashes = {} + for hashnum in needed_hashes: + hashdata = self._received.get(o_bh+hashnum*HASH_SIZE, HASH_SIZE) + if hashdata: + block_hashes[hashnum] = hashdata + else: + return False # missing some hashes + # note that we don't submit any hashes to the block_hash_tree until + # we've gotten them all, because the hash tree will throw an + # exception if we only give it a partial set (which it therefore + # cannot validate) + try: + self._commonshare.process_block_hashes(block_hashes) + except (BadHashError, NotEnoughHashesError), e: + f = Failure(e) + hashnums = ",".join([str(n) for n in sorted(block_hashes.keys())]) + log.msg(format="hash failure in block_hashes=(%(hashnums)s)," + " from %(share)s", + hashnums=hashnums, shnum=self._shnum, share=repr(self), + failure=f, level=log.WEIRD, parent=self._lp, umid="yNyFdA") + hsize = max(0, max(needed_hashes)) * HASH_SIZE + self._signal_corruption(f, o_bh, hsize) + self.had_corruption = True + raise + for hashnum in needed_hashes: + self._received.remove(o_bh+hashnum*HASH_SIZE, HASH_SIZE) + return True + + def _satisfy_ciphertext_hash_tree(self, needed_hashes): + start = self.actual_offsets["crypttext_hash_tree"] + hashes = {} + for hashnum in needed_hashes: + hashdata = self._received.get(start+hashnum*HASH_SIZE, HASH_SIZE) + if hashdata: + hashes[hashnum] = hashdata + else: + return False # missing some hashes + # we don't submit any hashes to the ciphertext_hash_tree until we've + # gotten them all + try: + self._node.process_ciphertext_hashes(hashes) + except (BadHashError, NotEnoughHashesError), e: + f = Failure(e) + hashnums = ",".join([str(n) for n in sorted(hashes.keys())]) + log.msg(format="hash failure in ciphertext_hashes=(%(hashnums)s)," + " from %(share)s", + hashnums=hashnums, share=repr(self), failure=f, + level=log.WEIRD, parent=self._lp, umid="iZI0TA") + hsize = max(0, max(needed_hashes))*HASH_SIZE + self._signal_corruption(f, start, hsize) + self.had_corruption = True + raise + for hashnum in needed_hashes: + self._received.remove(start+hashnum*HASH_SIZE, HASH_SIZE) + return True + + def _satisfy_data_block(self, segnum, observers): + tail = (segnum == self._node.num_segments-1) + datastart = self.actual_offsets["data"] + blockstart = datastart + segnum * self._node.block_size + blocklen = self._node.block_size + if tail: + blocklen = self._node.tail_block_size + + block = self._received.pop(blockstart, blocklen) + if not block: + log.msg("no data for block %s (want [%d:+%d])" % (repr(self), + blockstart, blocklen)) + return False + log.msg(format="%(share)s._satisfy_data_block [%(start)d:+%(length)d]", + share=repr(self), start=blockstart, length=blocklen, + level=log.NOISY, parent=self._lp, umid="uTDNZg") + # this block is being retired, either as COMPLETE or CORRUPT, since + # no further data reads will help + assert self._requested_blocks[0][0] == segnum + try: + self._commonshare.check_block(segnum, block) + # hurrah, we have a valid block. Deliver it. + for o in observers: + # goes to SegmentFetcher._block_request_activity + o.notify(state=COMPLETE, block=block) + except (BadHashError, NotEnoughHashesError), e: + # rats, we have a corrupt block. Notify our clients that they + # need to look elsewhere, and advise the server. Unlike + # corruption in other parts of the share, this doesn't cause us + # to abandon the whole share. + f = Failure(e) + log.msg(format="hash failure in block %(segnum)d, from %(share)s", + segnum=segnum, share=repr(self), failure=f, + level=log.WEIRD, parent=self._lp, umid="mZjkqA") + for o in observers: + o.notify(state=CORRUPT) + self._signal_corruption(f, blockstart, blocklen) + self.had_corruption = True + # in either case, we've retired this block + self._requested_blocks.pop(0) + # popping the request keeps us from turning around and wanting the + # block again right away + return True # got satisfaction + + def _desire(self): + segnum, observers = self._active_segnum_and_observers() # maybe None + + # 'want_it' is for data we merely want: we know that we don't really + # need it. This includes speculative reads, like the first 1KB of the + # share (for the offset table) and the first 2KB of the UEB. + # + # 'need_it' is for data that, if we have the real offset table, we'll + # need. If we are only guessing at the offset table, it's merely + # wanted. (The share is abandoned if we can't get data that we really + # need). + # + # 'gotta_gotta_have_it' is for data that we absolutely need, + # independent of whether we're still guessing about the offset table: + # the version number and the offset table itself. + + desire = Spans(), Spans(), Spans() + (want_it, need_it, gotta_gotta_have_it) = desire + + self.actual_segment_size = self._node.segment_size # might be updated + o = self.actual_offsets or self.guessed_offsets + segsize = self.actual_segment_size or self.guessed_segment_size + r = self._node._calculate_sizes(segsize) + + if not self.actual_offsets: + # all _desire functions add bits to the three desire[] spans + self._desire_offsets(desire) + + # we can use guessed offsets as long as this server tolerates + # overrun. Otherwise, we must wait for the offsets to arrive before + # we try to read anything else. + if self.actual_offsets or self._overrun_ok: + if not self._node.have_UEB: + self._desire_UEB(desire, o) + # They might ask for a segment that doesn't look right. + # _satisfy() will catch+reject bad segnums once we know the UEB + # (and therefore segsize and numsegs), so we'll only fail this + # test if we're still guessing. We want to avoid asking the + # hashtrees for needed_hashes() for bad segnums. So don't enter + # _desire_hashes or _desire_data unless the segnum looks + # reasonable. + if segnum < r["num_segments"]: + # XXX somehow we're getting here for sh5. we don't yet know + # the actual_segment_size, we're still working off the guess. + # the ciphertext_hash_tree has been corrected, but the + # commonshare._block_hash_tree is still in the guessed state. + self._desire_share_hashes(desire, o) + if segnum is not None: + self._desire_block_hashes(desire, o, segnum) + self._desire_data(desire, o, r, segnum, segsize) + else: + log.msg("_desire: segnum(%d) looks wrong (numsegs=%d)" + % (segnum, r["num_segments"]), + level=log.UNUSUAL, parent=self._lp, umid="tuYRQQ") + + log.msg("end _desire: want_it=%s need_it=%s gotta=%s" + % (want_it.dump(), need_it.dump(), gotta_gotta_have_it.dump())) + if self.actual_offsets: + return (want_it, need_it+gotta_gotta_have_it) + else: + return (want_it+need_it, gotta_gotta_have_it) + + def _desire_offsets(self, desire): + (want_it, need_it, gotta_gotta_have_it) = desire + if self._overrun_ok: + # easy! this includes version number, sizes, and offsets + want_it.add(0, 1024) + return + + # v1 has an offset table that lives [0x0,0x24). v2 lives [0x0,0x44). + # To be conservative, only request the data that we know lives there, + # even if that means more roundtrips. + + gotta_gotta_have_it.add(0, 4) # version number, always safe + version_s = self._received.get(0, 4) + if not version_s: + return + (version,) = struct.unpack(">L", version_s) + # The code in _satisfy_offsets will have checked this version + # already. There is no code path to get this far with version>2. + assert 1 <= version <= 2, "can't get here, version=%d" % version + if version == 1: + table_start = 0x0c + fieldsize = 0x4 + elif version == 2: + table_start = 0x14 + fieldsize = 0x8 + offset_table_size = 6 * fieldsize + gotta_gotta_have_it.add(table_start, offset_table_size) + + def _desire_UEB(self, desire, o): + (want_it, need_it, gotta_gotta_have_it) = desire + + # UEB data is stored as (length,data). + if self._overrun_ok: + # We can pre-fetch 2kb, which should probably cover it. If it + # turns out to be larger, we'll come back here later with a known + # length and fetch the rest. + want_it.add(o["uri_extension"], 2048) + # now, while that is probably enough to fetch the whole UEB, it + # might not be, so we need to do the next few steps as well. In + # most cases, the following steps will not actually add anything + # to need_it + + need_it.add(o["uri_extension"], self._fieldsize) + # only use a length if we're sure it's correct, otherwise we'll + # probably fetch a huge number + if not self.actual_offsets: + return + UEB_length_s = self._received.get(o["uri_extension"], self._fieldsize) + if UEB_length_s: + (UEB_length,) = struct.unpack(">"+self._fieldstruct, UEB_length_s) + # we know the length, so make sure we grab everything + need_it.add(o["uri_extension"]+self._fieldsize, UEB_length) + + def _desire_share_hashes(self, desire, o): + (want_it, need_it, gotta_gotta_have_it) = desire + + if self._node.share_hash_tree.needed_hashes(self._shnum): + hashlen = o["uri_extension"] - o["share_hashes"] + need_it.add(o["share_hashes"], hashlen) + + def _desire_block_hashes(self, desire, o, segnum): + (want_it, need_it, gotta_gotta_have_it) = desire + + # block hash chain + for hashnum in self._commonshare.get_needed_block_hashes(segnum): + need_it.add(o["block_hashes"]+hashnum*HASH_SIZE, HASH_SIZE) + + # ciphertext hash chain + for hashnum in self._node.get_needed_ciphertext_hashes(segnum): + need_it.add(o["crypttext_hash_tree"]+hashnum*HASH_SIZE, HASH_SIZE) + + def _desire_data(self, desire, o, r, segnum, segsize): + (want_it, need_it, gotta_gotta_have_it) = desire + tail = (segnum == r["num_segments"]-1) + datastart = o["data"] + blockstart = datastart + segnum * r["block_size"] + blocklen = r["block_size"] + if tail: + blocklen = r["tail_block_size"] + need_it.add(blockstart, blocklen) + + def _send_requests(self, desired): + ask = desired - self._pending + log.msg("%s._send_requests, desired=%s, pending=%s, ask=%s" % + (repr(self), desired.dump(), self._pending.dump(), ask.dump()), + level=log.NOISY, parent=self._lp, umid="E94CVA") + # XXX At one time, this code distinguished between data blocks and + # hashes, and made sure to send (small) requests for hashes before + # sending (big) requests for blocks. The idea was to make sure that + # all hashes arrive before the blocks, so the blocks can be consumed + # and released in a single turn. I removed this for simplicity. + # Reconsider the removal: maybe bring it back. + + for (start, length) in ask: + # TODO: quantize to reasonably-large blocks + self._pending.add(start, length) + lp = log.msg(format="%(share)s._send_request" + " [%(start)d:+%(length)d]", + share=repr(self), + start=start, length=length, + level=log.NOISY, parent=self._lp, umid="sgVAyA") + d = self._send_request(start, length) + d.addCallback(self._got_data, start, length, lp) + d.addErrback(self._got_error, start, length, lp) + d.addCallback(self._trigger_loop) + d.addErrback(lambda f: + log.err(format="unhandled error during send_request", + failure=f, parent=self._lp, + level=log.WEIRD, umid="qZu0wg")) + + def _send_request(self, start, length): + return self._rref.callRemote("read", start, length) + + def _got_data(self, data, start, length, lp): + if not self._alive: + return + log.msg(format="%(share)s._got_data [%(start)d:+%(length)d] -> %(datalen)d", + share=repr(self), start=start, length=length, datalen=len(data), + level=log.NOISY, parent=lp, umid="5Qn6VQ") + self._pending.remove(start, length) + self._received.add(start, data) + + # if we ask for [a:c], and we get back [a:b] (b PENDING + # PENDING -(timer)-> OVERDUE + # PENDING -(rx)-> COMPLETE, CORRUPT, DEAD, BADSEGNUM + # OVERDUE -(rx)-> COMPLETE, CORRUPT, DEAD, BADSEGNUM + # If a share becomes DEAD, it is removed from the + # dict. If it becomes BADSEGNUM, the whole fetch is + # terminated. + self._share_observers = {} # maps Share to Observer2 for active ones + self._shnums = DictOfSets() # maps shnum to the shares that provide it + self._blocks = {} # maps shnum to validated block data + self._no_more_shares = False + self._bad_segnum = False + self._last_failure = None + self._running = True + + def stop(self): + log.msg("SegmentFetcher(%s).stop" % self._node._si_prefix, + level=log.NOISY, umid="LWyqpg") + self._cancel_all_requests() + self._running = False + self._shares.clear() # let GC work # ??? XXX + + + # called by our parent _Node + + def add_shares(self, shares): + # called when ShareFinder locates a new share, and when a non-initial + # segment fetch is started and we already know about shares from the + # previous segment + for s in shares: + self._shares[s] = AVAILABLE + self._shnums.add(s._shnum, s) + eventually(self.loop) + + def no_more_shares(self): + # ShareFinder tells us it's reached the end of its list + self._no_more_shares = True + eventually(self.loop) + + # internal methods + + def _count_shnums(self, *states): + """shnums for which at least one state is in the following list""" + shnums = [] + for shnum,shares in self._shnums.iteritems(): + matches = [s for s in shares if self._shares.get(s) in states] + if matches: + shnums.append(shnum) + return len(shnums) + + def loop(self): + try: + # if any exception occurs here, kill the download + self._do_loop() + except BaseException: + self._node.fetch_failed(self, Failure()) + raise + + def _do_loop(self): + k = self._k + if not self._running: + return + if self._bad_segnum: + # oops, we were asking for a segment number beyond the end of the + # file. This is an error. + self.stop() + e = BadSegmentNumberError("segnum=%d, numsegs=%d" % + (self.segnum, self._node.num_segments)) + f = Failure(e) + self._node.fetch_failed(self, f) + return + + # are we done? + if self._count_shnums(COMPLETE) >= k: + # yay! + self.stop() + self._node.process_blocks(self.segnum, self._blocks) + return + + # we may have exhausted everything + if (self._no_more_shares and + self._count_shnums(AVAILABLE, PENDING, OVERDUE, COMPLETE) < k): + # no more new shares are coming, and the remaining hopeful shares + # aren't going to be enough. boo! + + log.msg("share states: %r" % (self._shares,), + level=log.NOISY, umid="0ThykQ") + if self._count_shnums(AVAILABLE, PENDING, OVERDUE, COMPLETE) == 0: + format = ("no shares (need %(k)d)." + " Last failure: %(last_failure)s") + args = { "k": k, + "last_failure": self._last_failure } + error = NoSharesError + else: + format = ("ran out of shares: %(complete)d complete," + " %(pending)d pending, %(overdue)d overdue," + " %(unused)d unused, need %(k)d." + " Last failure: %(last_failure)s") + args = {"complete": self._count_shnums(COMPLETE), + "pending": self._count_shnums(PENDING), + "overdue": self._count_shnums(OVERDUE), + # 'unused' should be zero + "unused": self._count_shnums(AVAILABLE), + "k": k, + "last_failure": self._last_failure, + } + error = NotEnoughSharesError + log.msg(format=format, level=log.UNUSUAL, umid="1DsnTg", **args) + e = error(format % args) + f = Failure(e) + self.stop() + self._node.fetch_failed(self, f) + return + + # nope, not done. Are we "block-hungry" (i.e. do we want to send out + # more read requests, or do we think we have enough in flight + # already?) + while self._count_shnums(PENDING, COMPLETE) < k: + # we're hungry.. are there any unused shares? + sent = self._send_new_request() + if not sent: + break + + # ok, now are we "share-hungry" (i.e. do we have enough known shares + # to make us happy, or should we ask the ShareFinder to get us more?) + if self._count_shnums(AVAILABLE, PENDING, COMPLETE) < k: + # we're hungry for more shares + self._node.want_more_shares() + # that will trigger the ShareFinder to keep looking + + def _find_one(self, shares, state): + # TODO could choose fastest + for s in shares: + if self._shares[s] == state: + return s + # can never get here, caller has assert in case of code bug + + def _send_new_request(self): + for shnum,shares in self._shnums.iteritems(): + states = [self._shares[s] for s in shares] + if COMPLETE in states or PENDING in states: + # don't send redundant requests + continue + if AVAILABLE not in states: + # no candidates for this shnum, move on + continue + # here's a candidate. Send a request. + s = self._find_one(shares, AVAILABLE) + assert s + self._shares[s] = PENDING + self._share_observers[s] = o = s.get_block(self.segnum) + o.subscribe(self._block_request_activity, share=s, shnum=shnum) + # TODO: build up a list of candidates, then walk through the + # list, sending requests to the most desireable servers, + # re-checking our block-hunger each time. For non-initial segment + # fetches, this would let us stick with faster servers. + return True + # nothing was sent: don't call us again until you have more shares to + # work with, or one of the existing shares has been declared OVERDUE + return False + + def _cancel_all_requests(self): + for o in self._share_observers.values(): + o.cancel() + self._share_observers = {} + + def _block_request_activity(self, share, shnum, state, block=None, f=None): + # called by Shares, in response to our s.send_request() calls. + if not self._running: + return + log.msg("SegmentFetcher(%s)._block_request_activity:" + " Share(sh%d-on-%s) -> %s" % + (self._node._si_prefix, shnum, share._peerid_s, state), + level=log.NOISY, umid="vilNWA") + # COMPLETE, CORRUPT, DEAD, BADSEGNUM are terminal. + if state in (COMPLETE, CORRUPT, DEAD, BADSEGNUM): + self._share_observers.pop(share, None) + if state is COMPLETE: + # 'block' is fully validated + self._shares[share] = COMPLETE + self._blocks[shnum] = block + elif state is OVERDUE: + self._shares[share] = OVERDUE + # OVERDUE is not terminal: it will eventually transition to + # COMPLETE, CORRUPT, or DEAD. + elif state is CORRUPT: + self._shares[share] = CORRUPT + elif state is DEAD: + del self._shares[share] + self._shnums[shnum].remove(share) + self._last_failure = f + elif state is BADSEGNUM: + self._shares[share] = BADSEGNUM # ??? + self._bad_segnum = True + eventually(self.loop) + + +class RequestToken: + def __init__(self, peerid): + self.peerid = peerid + +class ShareFinder: + def __init__(self, storage_broker, verifycap, node, logparent=None, + max_outstanding_requests=10): + self.running = True # stopped by Share.stop, from Terminator + self.verifycap = verifycap + self._started = False + self._storage_broker = storage_broker + self.share_consumer = self.node = node + self.max_outstanding_requests = max_outstanding_requests + + self._hungry = False + + self._commonshares = {} # shnum to CommonShare instance + self.undelivered_shares = [] + self.pending_requests = set() + + self._storage_index = verifycap.storage_index + self._si_prefix = base32.b2a_l(self._storage_index[:8], 60) + self._node_logparent = logparent + self._lp = log.msg(format="ShareFinder[si=%(si)s] starting", + si=self._si_prefix, + level=log.NOISY, parent=logparent, umid="2xjj2A") + + def start_finding_servers(self): + # don't get servers until somebody uses us: creating the + # ImmutableFileNode should not cause work to happen yet. Test case is + # test_dirnode, which creates us with storage_broker=None + if not self._started: + si = self.verifycap.storage_index + s = self._storage_broker.get_servers_for_index(si) + self._servers = iter(s) + self._started = True + + def log(self, *args, **kwargs): + if "parent" not in kwargs: + kwargs["parent"] = self._lp + return log.msg(*args, **kwargs) + + def stop(self): + self.running = False + + # called by our parent CiphertextDownloader + def hungry(self): + self.log(format="ShareFinder[si=%(si)s] hungry", + si=self._si_prefix, level=log.NOISY, umid="NywYaQ") + self.start_finding_servers() + self._hungry = True + eventually(self.loop) + + # internal methods + def loop(self): + undelivered_s = ",".join(["sh%d@%s" % + (s._shnum, idlib.shortnodeid_b2a(s._peerid)) + for s in self.undelivered_shares]) + pending_s = ",".join([idlib.shortnodeid_b2a(rt.peerid) + for rt in self.pending_requests]) # sort? + self.log(format="ShareFinder loop: running=%(running)s" + " hungry=%(hungry)s, undelivered=%(undelivered)s," + " pending=%(pending)s", + running=self.running, hungry=self._hungry, + undelivered=undelivered_s, pending=pending_s, + level=log.NOISY, umid="kRtS4Q") + if not self.running: + return + if not self._hungry: + return + if self.undelivered_shares: + sh = self.undelivered_shares.pop(0) + # they will call hungry() again if they want more + self._hungry = False + self.log(format="delivering Share(shnum=%(shnum)d, server=%(peerid)s)", + shnum=sh._shnum, peerid=sh._peerid_s, + level=log.NOISY, umid="2n1qQw") + eventually(self.share_consumer.got_shares, [sh]) + return + if len(self.pending_requests) >= self.max_outstanding_requests: + # cannot send more requests, must wait for some to retire + return + + server = None + try: + if self._servers: + server = self._servers.next() + except StopIteration: + self._servers = None + + if server: + self.send_request(server) + return + + if self.pending_requests: + # no server, but there are still requests in flight: maybe one of + # them will make progress + return + + self.log(format="ShareFinder.loop: no_more_shares, ever", + level=log.UNUSUAL, umid="XjQlzg") + # we've run out of servers (so we can't send any more requests), and + # we have nothing in flight. No further progress can be made. They + # are destined to remain hungry. + self.share_consumer.no_more_shares() + + def send_request(self, server): + peerid, rref = server + req = RequestToken(peerid) + self.pending_requests.add(req) + lp = self.log(format="sending DYHB to [%(peerid)s]", + peerid=idlib.shortnodeid_b2a(peerid), + level=log.NOISY, umid="Io7pyg") + d = rref.callRemote("get_buckets", self._storage_index) + d.addBoth(incidentally, self.pending_requests.discard, req) + d.addCallbacks(self._got_response, self._got_error, + callbackArgs=(rref.version, peerid, req, lp), + errbackArgs=(peerid, req, lp)) + d.addErrback(log.err, format="error in send_request", + level=log.WEIRD, parent=lp, umid="rpdV0w") + d.addCallback(incidentally, eventually, self.loop) + + def _got_response(self, buckets, server_version, peerid, req, lp): + if buckets: + shnums_s = ",".join([str(shnum) for shnum in buckets]) + self.log(format="got shnums [%(shnums)s] from [%(peerid)s]", + shnums=shnums_s, peerid=idlib.shortnodeid_b2a(peerid), + level=log.NOISY, parent=lp, umid="0fcEZw") + else: + self.log(format="no shares from [%(peerid)s]", + peerid=idlib.shortnodeid_b2a(peerid), + level=log.NOISY, parent=lp, umid="U7d4JA") + if self.node.num_segments is None: + best_numsegs = self.node.guessed_num_segments + else: + best_numsegs = self.node.num_segments + for shnum, bucket in buckets.iteritems(): + if shnum in self._commonshares: + cs = self._commonshares[shnum] + else: + cs = CommonShare(best_numsegs, self._si_prefix, shnum, + self._node_logparent) + # Share._get_satisfaction is responsible for updating + # CommonShare.set_numsegs after we know the UEB. Alternatives: + # 1: d = self.node.get_num_segments() + # d.addCallback(cs.got_numsegs) + # the problem is that the OneShotObserverList I was using + # inserts an eventual-send between _get_satisfaction's + # _satisfy_UEB and _satisfy_block_hash_tree, and the + # CommonShare didn't get the num_segs message before + # being asked to set block hash values. To resolve this + # would require an immediate ObserverList instead of + # an eventual-send -based one + # 2: break _get_satisfaction into Deferred-attached pieces. + # Yuck. + self._commonshares[shnum] = cs + s = Share(bucket, server_version, self.verifycap, cs, self.node, + peerid, shnum, self._node_logparent) + self.undelivered_shares.append(s) + + def _got_error(self, f, peerid, req, lp): + self.log(format="got error from [%(peerid)s]", + peerid=idlib.shortnodeid_b2a(peerid), failure=f, + level=log.UNUSUAL, parent=lp, umid="zUKdCw") + + + +class Segmentation: + """I am responsible for a single offset+size read of the file. I handle + segmentation: I figure out which segments are necessary, request them + (from my CiphertextDownloader) in order, and trim the segments down to + match the offset+size span. I use the Producer/Consumer interface to only + request one segment at a time. + """ + implements(IPushProducer) + def __init__(self, node, offset, size, consumer, logparent=None): + self._node = node + self._hungry = True + self._active_segnum = None + self._cancel_segment_request = None + # these are updated as we deliver data. At any given time, we still + # want to download file[offset:offset+size] + self._offset = offset + self._size = size + assert offset+size <= node._verifycap.size + self._consumer = consumer + self._lp = logparent + + def start(self): + self._alive = True + self._deferred = defer.Deferred() + self._consumer.registerProducer(self, True) + self._maybe_fetch_next() + return self._deferred + + def _maybe_fetch_next(self): + if not self._alive or not self._hungry: + return + if self._active_segnum is not None: + return + self._fetch_next() + + def _fetch_next(self): + if self._size == 0: + # done! + self._alive = False + self._hungry = False + self._consumer.unregisterProducer() + self._deferred.callback(self._consumer) + return + n = self._node + have_actual_segment_size = n.segment_size is not None + guess_s = "" + if not have_actual_segment_size: + guess_s = "probably " + segment_size = n.segment_size or n.guessed_segment_size + if self._offset == 0: + # great! we want segment0 for sure + wanted_segnum = 0 + else: + # this might be a guess + wanted_segnum = self._offset // segment_size + log.msg(format="_fetch_next(offset=%(offset)d) %(guess)swants segnum=%(segnum)d", + offset=self._offset, guess=guess_s, segnum=wanted_segnum, + level=log.NOISY, parent=self._lp, umid="5WfN0w") + self._active_segnum = wanted_segnum + d,c = n.get_segment(wanted_segnum, self._lp) + self._cancel_segment_request = c + d.addBoth(self._request_retired) + d.addCallback(self._got_segment, have_actual_segment_size, + wanted_segnum) + d.addErrback(self._retry_bad_segment, have_actual_segment_size) + d.addErrback(self._error) + + def _request_retired(self, res): + self._active_segnum = None + self._cancel_segment_request = None + return res + + def _got_segment(self, (segment_start,segment), had_actual_segment_size, + wanted_segnum): + self._cancel_segment_request = None + # we got file[segment_start:segment_start+len(segment)] + # we want file[self._offset:self._offset+self._size] + log.msg(format="Segmentation got data:" + " want [%(wantstart)d-%(wantend)d)," + " given [%(segstart)d-%(segend)d), for segnum=%(segnum)d", + wantstart=self._offset, wantend=self._offset+self._size, + segstart=segment_start, segend=segment_start+len(segment), + segnum=wanted_segnum, + level=log.OPERATIONAL, parent=self._lp, umid="32dHcg") + + o = overlap(segment_start, len(segment), self._offset, self._size) + # the overlap is file[o[0]:o[0]+o[1]] + if not o or o[0] != self._offset: + # we didn't get the first byte, so we can't use this segment + if had_actual_segment_size: + # and we should have gotten it right. This is big problem. + log.msg("Segmentation handed wrong data (but we knew better):" + " want [%d-%d), given [%d-%d), for segnum=%d," + " for si=%s" + % (self._offset, self._offset+self._size, + segment_start, segment_start+len(segment), + wanted_segnum, self._node._si_prefix), + level=log.WEIRD, parent=self._lp, umid="STlIiA") + raise BadSegmentError("Despite knowing the segment size," + " I was given the wrong data." + " I cannot cope.") + # we've wasted some bandwidth, but now we can grab the right one, + # because we should know the segsize by now. + assert self._node.segment_size is not None + self._maybe_fetch_next() + return + offset_in_segment = self._offset - segment_start + desired_data = segment[offset_in_segment:offset_in_segment+o[1]] + + self._offset += len(desired_data) + self._size -= len(desired_data) + self._consumer.write(desired_data) + # the consumer might call our .pauseProducing() inside that write() + # call, setting self._hungry=False + self._maybe_fetch_next() + + def _retry_bad_segment(self, f, had_actual_segment_size): + f.trap(BadSegmentNumberError) # guessed way wrong, off the end + if had_actual_segment_size: + # but we should have known better, so this is a real error. This + # indicates a code bug. + log.msg("Segmentation retried and failed with wrong segnum", + level=log.WEIRD, parent=self._lp, umid="6Hd0ZA") + return f + # we didn't know better: try again with more information + assert self._node.segment_size is not None + return self._maybe_fetch_next() + + def _error(self, f): + log.msg("Error in Segmentation", failure=f, + level=log.WEIRD, parent=self._lp, umid="EYlXBg") + self._alive = False + self._hungry = False + self._consumer.unregisterProducer() + self._deferred.errback(f) + + def stopProducing(self): + self._hungry = False + self._alive = False + # cancel any outstanding segment request + if self._cancel_segment_request: + self._cancel_segment_request.cancel() + self._cancel_segment_request = None + def pauseProducing(self): + self._hungry = False + def resumeProducing(self): + self._hungry = True + eventually(self._maybe_fetch_next) + +class Cancel: + def __init__(self, f): + self._f = f + self.cancelled = False + def cancel(self): + if not self.cancelled: + self.cancelled = True + self._f(self) + +class _Node: + """Internal class which manages downloads and holds state. External + callers use CiphertextFileNode instead.""" + + # Share._node points to me + def __init__(self, verifycap, storage_broker, secret_holder, + terminator, history): + assert isinstance(verifycap, uri.CHKFileVerifierURI) + self._verifycap = verifycap + self._storage_broker = storage_broker + self._si_prefix = base32.b2a_l(verifycap.storage_index[:8], 60) + self.running = True + if terminator: + terminator.register(self) # calls self.stop() at stopService() + # the rules are: + # 1: Only send network requests if you're active (self.running is True) + # 2: Use TimerService, not reactor.callLater + # 3: You can do eventual-sends any time. + # These rules should mean that once + # stopService()+flushEventualQueue() fires, everything will be done. + self._secret_holder = secret_holder + self._history = history + + k, N = self._verifycap.needed_shares, self._verifycap.total_shares + self.share_hash_tree = IncompleteHashTree(N) + + # we guess the segment size, so Segmentation can pull non-initial + # segments in a single roundtrip. This populates + # .guessed_segment_size, .guessed_num_segments, and + # .ciphertext_hash_tree (with a dummy, to let us guess which hashes + # we'll need) + self._build_guessed_tables(DEFAULT_MAX_SEGMENT_SIZE) + + # filled in when we parse a valid UEB + self.have_UEB = False + self.segment_size = None + self.tail_segment_size = None + self.tail_segment_padded = None + self.num_segments = None + self.block_size = None + self.tail_block_size = None + #self.ciphertext_hash_tree = None # size depends on num_segments + + # things to track callers that want data + + # _segment_requests can have duplicates + self._segment_requests = [] # (segnum, d, cancel_handle) + self._active_segment = None # a SegmentFetcher, with .segnum + + # we create one top-level logparent for this _Node, and another one + # for each read() call. Segmentation and get_segment() messages are + # associated with the read() call, everything else is tied to the + # _Node's log entry. + lp = log.msg(format="Immutable _Node(%(si)s) created: size=%(size)d," + " guessed_segsize=%(guessed_segsize)d," + " guessed_numsegs=%(guessed_numsegs)d", + si=self._si_prefix, size=verifycap.size, + guessed_segsize=self.guessed_segment_size, + guessed_numsegs=self.guessed_num_segments, + level=log.OPERATIONAL, umid="uJ0zAQ") + self._lp = lp + + self._sharefinder = ShareFinder(storage_broker, verifycap, self, lp) + self._shares = set() + + def _build_guessed_tables(self, max_segment_size): + size = min(self._verifycap.size, max_segment_size) + s = mathutil.next_multiple(size, self._verifycap.needed_shares) + self.guessed_segment_size = s + r = self._calculate_sizes(self.guessed_segment_size) + self.guessed_num_segments = r["num_segments"] + # as with CommonShare, our ciphertext_hash_tree is a stub until we + # get the real num_segments + self.ciphertext_hash_tree = IncompleteHashTree(self.guessed_num_segments) + + def __repr__(self): + return "Imm_Node(%s)" % (self._si_prefix,) + + def stop(self): + # called by the Terminator at shutdown, mostly for tests + if self._active_segment: + self._active_segment.stop() + self._active_segment = None + self._sharefinder.stop() + + # things called by outside callers, via CiphertextFileNode. get_segment() + # may also be called by Segmentation. + + def read(self, consumer, offset=0, size=None): + """I am the main entry point, from which FileNode.read() can get + data. I feed the consumer with the desired range of ciphertext. I + return a Deferred that fires (with the consumer) when the read is + finished. + + Note that there is no notion of a 'file pointer': each call to read() + uses an independent offset= value.""" + # for concurrent operations: each gets its own Segmentation manager + if size is None: + size = self._verifycap.size + # clip size so offset+size does not go past EOF + size = min(size, self._verifycap.size-offset) + lp = log.msg(format="imm Node(%(si)s).read(%(offset)d, %(size)d)", + si=base32.b2a(self._verifycap.storage_index)[:8], + offset=offset, size=size, + level=log.OPERATIONAL, parent=self._lp, umid="l3j3Ww") + sp = self._history.stats_provider + sp.count("downloader.files_downloaded", 1) # really read() calls + sp.count("downloader.bytes_downloaded", size) + s = Segmentation(self, offset, size, consumer, lp) + # this raises an interesting question: what segments to fetch? if + # offset=0, always fetch the first segment, and then allow + # Segmentation to be responsible for pulling the subsequent ones if + # the first wasn't large enough. If offset>0, we're going to need an + # extra roundtrip to get the UEB (and therefore the segment size) + # before we can figure out which segment to get. TODO: allow the + # offset-table-guessing code (which starts by guessing the segsize) + # to assist the offset>0 process. + d = s.start() + return d + + def get_segment(self, segnum, logparent=None): + """Begin downloading a segment. I return a tuple (d, c): 'd' is a + Deferred that fires with (offset,data) when the desired segment is + available, and c is an object on which c.cancel() can be called to + disavow interest in the segment (after which 'd' will never fire). + + You probably need to know the segment size before calling this, + unless you want the first few bytes of the file. If you ask for a + segment number which turns out to be too large, the Deferred will + errback with BadSegmentNumberError. + + The Deferred fires with the offset of the first byte of the data + segment, so that you can call get_segment() before knowing the + segment size, and still know which data you received. + + The Deferred can also errback with other fatal problems, such as + NotEnoughSharesError, NoSharesError, or BadCiphertextHashError. + """ + log.msg(format="imm Node(%(si)s).get_segment(%(segnum)d)", + si=base32.b2a(self._verifycap.storage_index)[:8], + segnum=segnum, + level=log.OPERATIONAL, parent=logparent, umid="UKFjDQ") + d = defer.Deferred() + c = Cancel(self._cancel_request) + self._segment_requests.append( (segnum, d, c) ) + self._start_new_segment() + return (d, c) + + # things called by the Segmentation object used to transform + # arbitrary-sized read() calls into quantized segment fetches + + def _start_new_segment(self): + if self._active_segment is None and self._segment_requests: + segnum = self._segment_requests[0][0] + k = self._verifycap.needed_shares + log.msg(format="%(node)s._start_new_segment: segnum=%(segnum)d", + node=repr(self), segnum=segnum, + level=log.NOISY, umid="wAlnHQ") + self._active_segment = fetcher = SegmentFetcher(self, segnum, k) + active_shares = [s for s in self._shares if s.is_alive()] + fetcher.add_shares(active_shares) # this triggers the loop + + + # called by our child ShareFinder + def got_shares(self, shares): + self._shares.update(shares) + if self._active_segment: + self._active_segment.add_shares(shares) + def no_more_shares(self): + self._no_more_shares = True + if self._active_segment: + self._active_segment.no_more_shares() + + # things called by our Share instances + + def validate_and_store_UEB(self, UEB_s): + log.msg("validate_and_store_UEB", + level=log.OPERATIONAL, parent=self._lp, umid="7sTrPw") + h = hashutil.uri_extension_hash(UEB_s) + if h != self._verifycap.uri_extension_hash: + raise BadHashError + UEB_dict = uri.unpack_extension(UEB_s) + self._parse_and_store_UEB(UEB_dict) # sets self._stuff + # TODO: a malformed (but authentic) UEB could throw an assertion in + # _parse_and_store_UEB, and we should abandon the download. + self.have_UEB = True + + def _parse_and_store_UEB(self, d): + # Note: the UEB contains needed_shares and total_shares. These are + # redundant and inferior (the filecap contains the authoritative + # values). However, because it is possible to encode the same file in + # multiple ways, and the encoders might choose (poorly) to use the + # same key for both (therefore getting the same SI), we might + # encounter shares for both types. The UEB hashes will be different, + # however, and we'll disregard the "other" encoding's shares as + # corrupted. + + # therefore, we ignore d['total_shares'] and d['needed_shares']. + + log.msg(format="UEB=%(ueb)s, vcap=%(vcap)s", + ueb=repr(d), vcap=self._verifycap.to_string(), + level=log.NOISY, parent=self._lp, umid="cVqZnA") + + k, N = self._verifycap.needed_shares, self._verifycap.total_shares + + self.segment_size = d['segment_size'] + + r = self._calculate_sizes(self.segment_size) + self.tail_segment_size = r["tail_segment_size"] + self.tail_segment_padded = r["tail_segment_padded"] + self.num_segments = r["num_segments"] + self.block_size = r["block_size"] + self.tail_block_size = r["tail_block_size"] + log.msg("actual sizes: %s" % (r,), + level=log.NOISY, parent=self._lp, umid="PY6P5Q") + if (self.segment_size == self.guessed_segment_size + and self.num_segments == self.guessed_num_segments): + log.msg("my guess was right!", + level=log.NOISY, parent=self._lp, umid="x340Ow") + else: + log.msg("my guess was wrong! Extra round trips for me.", + level=log.NOISY, parent=self._lp, umid="tb7RJw") + + # zfec.Decode() instantiation is fast, but still, let's use the same + # codec instance for all but the last segment. 3-of-10 takes 15us on + # my laptop, 25-of-100 is 900us, 3-of-255 is 97us, 25-of-255 is + # 2.5ms, worst-case 254-of-255 is 9.3ms + self._codec = CRSDecoder() + self._codec.set_params(self.segment_size, k, N) + + + # Ciphertext hash tree root is mandatory, so that there is at most + # one ciphertext that matches this read-cap or verify-cap. The + # integrity check on the shares is not sufficient to prevent the + # original encoder from creating some shares of file A and other + # shares of file B. + self.ciphertext_hash_tree = IncompleteHashTree(self.num_segments) + self.ciphertext_hash_tree.set_hashes({0: d['crypttext_root_hash']}) + + self.share_hash_tree.set_hashes({0: d['share_root_hash']}) + + # Our job is a fast download, not verification, so we ignore any + # redundant fields. The Verifier uses a different code path which + # does not ignore them. + + def _calculate_sizes(self, segment_size): + # segments of ciphertext + size = self._verifycap.size + k = self._verifycap.needed_shares + + # this assert matches the one in encode.py:127 inside + # Encoded._got_all_encoding_parameters, where the UEB is constructed + assert segment_size % k == 0 + + # the last segment is usually short. We don't store a whole segsize, + # but we do pad the segment up to a multiple of k, because the + # encoder requires that. + tail_segment_size = size % segment_size + if tail_segment_size == 0: + tail_segment_size = segment_size + padded = mathutil.next_multiple(tail_segment_size, k) + tail_segment_padded = padded + + num_segments = mathutil.div_ceil(size, segment_size) + + # each segment is turned into N blocks. All but the last are of size + # block_size, and the last is of size tail_block_size + block_size = segment_size / k + tail_block_size = tail_segment_padded / k + + return { "tail_segment_size": tail_segment_size, + "tail_segment_padded": tail_segment_padded, + "num_segments": num_segments, + "block_size": block_size, + "tail_block_size": tail_block_size, + } + + + def process_share_hashes(self, share_hashes): + for hashnum in share_hashes: + if hashnum >= len(self.share_hash_tree): + # "BadHashError" is normally for e.g. a corrupt block. We + # sort of abuse it here to mean a badly numbered hash (which + # indicates corruption in the number bytes, rather than in + # the data bytes). + raise BadHashError("hashnum %d doesn't fit in hashtree(%d)" + % (hashnum, len(self.share_hash_tree))) + self.share_hash_tree.set_hashes(share_hashes) + + def get_needed_ciphertext_hashes(self, segnum): + cht = self.ciphertext_hash_tree + return cht.needed_hashes(segnum, include_leaf=True) + def process_ciphertext_hashes(self, hashes): + assert self.num_segments is not None + # this may raise BadHashError or NotEnoughHashesError + self.ciphertext_hash_tree.set_hashes(hashes) + + + # called by our child SegmentFetcher + + def want_more_shares(self): + self._sharefinder.hungry() + + def fetch_failed(self, sf, f): + assert sf is self._active_segment + self._active_segment = None + # deliver error upwards + for (d,c) in self._extract_requests(sf.segnum): + eventually(self._deliver, d, c, f) + + def process_blocks(self, segnum, blocks): + d = defer.maybeDeferred(self._decode_blocks, segnum, blocks) + d.addCallback(self._check_ciphertext_hash, segnum) + def _deliver(result): + log.msg(format="delivering segment(%(segnum)d)", + segnum=segnum, + level=log.OPERATIONAL, parent=self._lp, + umid="j60Ojg") + for (d,c) in self._extract_requests(segnum): + eventually(self._deliver, d, c, result) + self._active_segment = None + self._start_new_segment() + d.addBoth(_deliver) + d.addErrback(lambda f: + log.err("unhandled error during process_blocks", + failure=f, level=log.WEIRD, + parent=self._lp, umid="MkEsCg")) + + def _decode_blocks(self, segnum, blocks): + tail = (segnum == self.num_segments-1) + codec = self._codec + block_size = self.block_size + decoded_size = self.segment_size + if tail: + # account for the padding in the last segment + codec = CRSDecoder() + k, N = self._verifycap.needed_shares, self._verifycap.total_shares + codec.set_params(self.tail_segment_padded, k, N) + block_size = self.tail_block_size + decoded_size = self.tail_segment_padded + + shares = [] + shareids = [] + for (shareid, share) in blocks.iteritems(): + assert len(share) == block_size + shareids.append(shareid) + shares.append(share) + del blocks + + d = codec.decode(shares, shareids) # segment + del shares + def _process(buffers): + segment = "".join(buffers) + assert len(segment) == decoded_size + del buffers + if tail: + segment = segment[:self.tail_segment_size] + return segment + d.addCallback(_process) + return d + + def _check_ciphertext_hash(self, segment, segnum): + assert self._active_segment.segnum == segnum + assert self.segment_size is not None + offset = segnum * self.segment_size + + h = hashutil.crypttext_segment_hash(segment) + try: + self.ciphertext_hash_tree.set_hashes(leaves={segnum: h}) + return (offset, segment) + except (BadHashError, NotEnoughHashesError): + format = ("hash failure in ciphertext_hash_tree:" + " segnum=%(segnum)d, SI=%(si)s") + log.msg(format=format, segnum=segnum, si=self._si_prefix, + failure=Failure(), + level=log.WEIRD, parent=self._lp, umid="MTwNnw") + # this is especially weird, because we made it past the share + # hash tree. It implies that we're using the wrong encoding, or + # that the uploader deliberately constructed a bad UEB. + msg = format % {"segnum": segnum, "si": self._si_prefix} + raise BadCiphertextHashError(msg) + + def _deliver(self, d, c, result): + # this method exists to handle cancel() that occurs between + # _got_segment and _deliver + if not c.cancelled: + d.callback(result) # might actually be an errback + + def _extract_requests(self, segnum): + """Remove matching requests and return their (d,c) tuples so that the + caller can retire them.""" + retire = [(d,c) for (segnum0, d, c) in self._segment_requests + if segnum0 == segnum] + self._segment_requests = [t for t in self._segment_requests + if t[0] != segnum] + return retire + + def _cancel_request(self, c): + self._segment_requests = [t for t in self._segment_requests + if t[2] != c] + segnums = [segnum for (segnum,d,c) in self._segment_requests] + if self._active_segment.segnum not in segnums: + self._active_segment.stop() + self._active_segment = None + self._start_new_segment() + + def check_and_repair(self, monitor, verify=False, add_lease=False): + verifycap = self._verifycap + storage_index = verifycap.storage_index + sb = self._storage_broker + servers = sb.get_all_servers() + sh = self._secret_holder + + c = Checker(verifycap=verifycap, servers=servers, + verify=verify, add_lease=add_lease, secret_holder=sh, + monitor=monitor) + d = c.start() + def _maybe_repair(cr): + crr = CheckAndRepairResults(storage_index) + crr.pre_repair_results = cr + if cr.is_healthy(): + crr.post_repair_results = cr + return defer.succeed(crr) + else: + crr.repair_attempted = True + crr.repair_successful = False # until proven successful + def _gather_repair_results(ur): + assert IUploadResults.providedBy(ur), ur + # clone the cr (check results) to form the basis of the + # prr (post-repair results) + prr = CheckResults(cr.uri, cr.storage_index) + prr.data = copy.deepcopy(cr.data) + + sm = prr.data['sharemap'] + assert isinstance(sm, DictOfSets), sm + sm.update(ur.sharemap) + servers_responding = set(prr.data['servers-responding']) + servers_responding.union(ur.sharemap.iterkeys()) + prr.data['servers-responding'] = list(servers_responding) + prr.data['count-shares-good'] = len(sm) + prr.data['count-good-share-hosts'] = len(sm) + is_healthy = bool(len(sm) >= verifycap.total_shares) + is_recoverable = bool(len(sm) >= verifycap.needed_shares) + prr.set_healthy(is_healthy) + prr.set_recoverable(is_recoverable) + crr.repair_successful = is_healthy + prr.set_needs_rebalancing(len(sm) >= verifycap.total_shares) + + crr.post_repair_results = prr + return crr + def _repair_error(f): + # as with mutable repair, I'm not sure if I want to pass + # through a failure or not. TODO + crr.repair_successful = False + crr.repair_failure = f + return f + r = Repairer(storage_broker=sb, secret_holder=sh, + verifycap=verifycap, monitor=monitor) + d = r.start() + d.addCallbacks(_gather_repair_results, _repair_error) + return d + + d.addCallback(_maybe_repair) + return d + + def check(self, monitor, verify=False, add_lease=False): + verifycap = self._verifycap + sb = self._storage_broker + servers = sb.get_all_servers() + sh = self._secret_holder + + v = Checker(verifycap=verifycap, servers=servers, + verify=verify, add_lease=add_lease, secret_holder=sh, + monitor=monitor) + return v.start() + +class CiphertextFileNode: + def __init__(self, verifycap, storage_broker, secret_holder, + terminator, history): + assert isinstance(verifycap, uri.CHKFileVerifierURI) + self._node = _Node(verifycap, storage_broker, secret_holder, + terminator, history) + + def read(self, consumer, offset=0, size=None): + """I am the main entry point, from which FileNode.read() can get + data. I feed the consumer with the desired range of ciphertext. I + return a Deferred that fires (with the consumer) when the read is + finished.""" + return self._node.read(consumer, offset, size) + + def get_segment(self, segnum): + """Begin downloading a segment. I return a tuple (d, c): 'd' is a + Deferred that fires with (offset,data) when the desired segment is + available, and c is an object on which c.cancel() can be called to + disavow interest in the segment (after which 'd' will never fire). + + You probably need to know the segment size before calling this, + unless you want the first few bytes of the file. If you ask for a + segment number which turns out to be too large, the Deferred will + errback with BadSegmentNumberError. + + The Deferred fires with the offset of the first byte of the data + segment, so that you can call get_segment() before knowing the + segment size, and still know which data you received. + """ + return self._node.get_segment(segnum) + + def raise_error(self): + pass + + + def check_and_repair(self, monitor, verify=False, add_lease=False): + return self._node.check_and_repair(monitor, verify, add_lease) + def check(self, monitor, verify=False, add_lease=False): + return self._node.check(monitor, verify, add_lease) + + +class DecryptingConsumer: + """I sit between a CiphertextDownloader (which acts as a Producer) and + the real Consumer, decrypting everything that passes by. The real + Consumer sees the real Producer, but the Producer sees us instead of the + real consumer.""" + implements(IConsumer) + + def __init__(self, consumer, readkey, offset): + self._consumer = consumer + # TODO: pycryptopp CTR-mode needs random-access operations: I want + # either a=AES(readkey, offset) or better yet both of: + # a=AES(readkey, offset=0) + # a.process(ciphertext, offset=xyz) + # For now, we fake it with the existing iv= argument. + offset_big = offset // 16 + offset_small = offset % 16 + iv = binascii.unhexlify("%032x" % offset_big) + self._decryptor = AES(readkey, iv=iv) + self._decryptor.process("\x00"*offset_small) + + def registerProducer(self, producer, streaming): + # this passes through, so the real consumer can flow-control the real + # producer. Therefore we don't need to provide any IPushProducer + # methods. We implement all the IConsumer methods as pass-throughs, + # and only intercept write() to perform decryption. + self._consumer.registerProducer(producer, streaming) + def unregisterProducer(self): + self._consumer.unregisterProducer() + def write(self, ciphertext): + plaintext = self._decryptor.process(ciphertext) + self._consumer.write(plaintext) + +class ImmutableFileNode: + implements(IImmutableFileNode) + + # I wrap a CiphertextFileNode with a decryption key + def __init__(self, filecap, storage_broker, secret_holder, terminator, + history): + assert isinstance(filecap, uri.CHKFileURI) + verifycap = filecap.get_verify_cap() + self._cnode = CiphertextFileNode(verifycap, storage_broker, + secret_holder, terminator, history) + assert isinstance(filecap, uri.CHKFileURI) + self.u = filecap + self._readkey = filecap.key + + def read(self, consumer, offset=0, size=None): + decryptor = DecryptingConsumer(consumer, self._readkey, offset) + d = self._cnode.read(decryptor, offset, size) + d.addCallback(lambda dc: consumer) + return d + + def raise_error(self): + pass + + def get_write_uri(self): + return None + + def get_readonly_uri(self): + return self.get_uri() + + def get_uri(self): + return self.u.to_string() + def get_cap(self): + return self.u + def get_readcap(self): + return self.u.get_readonly() + def get_verify_cap(self): + return self.u.get_verify_cap() + def get_repair_cap(self): + # CHK files can be repaired with just the verifycap + return self.u.get_verify_cap() + + def get_storage_index(self): + return self.u.get_storage_index() + + def get_size(self): + return self.u.get_size() + def get_current_size(self): + return defer.succeed(self.get_size()) + + def is_mutable(self): + return False + + def is_readonly(self): + return True + + def is_unknown(self): + return False + + def is_allowed_in_immutable_directory(self): + return True + + def check_and_repair(self, monitor, verify=False, add_lease=False): + return self._cnode.check_and_repair(monitor, verify, add_lease) + def check(self, monitor, verify=False, add_lease=False): + return self._cnode.check(monitor, verify, add_lease) + +# TODO: if server1 has all shares, and server2-10 have one each, make the +# loop stall slightly before requesting all shares from the first server, to +# give it a chance to learn about the other shares and get some diversity. +# Or, don't bother, let the first block all come from one server, and take +# comfort in the fact that we'll learn about the other servers by the time we +# fetch the second block. +# +# davidsarah points out that we could use sequential (instead of parallel) +# fetching of multiple block from a single server: by the time the first +# block arrives, we'll hopefully have heard about other shares. This would +# induce some RTT delays (i.e. lose pipelining) in the case that this server +# has the only shares, but that seems tolerable. We could rig it to only use +# sequential requests on the first segment. + +# as a query gets later, we're more willing to duplicate work. + +# should change server read protocol to allow small shares to be fetched in a +# single RTT. Instead of get_buckets-then-read, just use read(shnums, readv), +# where shnums=[] means all shares, and the return value is a dict of +# # shnum->ta (like with mutable files). The DYHB query should also fetch the +# offset table, since everything else can be located once we have that. + + +# ImmutableFileNode +# DecryptingConsumer +# CiphertextFileNode +# Segmentation +# ShareFinder +# SegmentFetcher[segnum] (one at a time) +# CommonShare[shnum] +# Share[shnum,server] + +# TODO: when we learn numsegs, any get_segment() calls for bad blocknumbers +# should be failed with BadSegmentNumberError. But should this be the +# responsibility of CiphertextFileNode, or SegmentFetcher? The knowledge will +# first appear when a Share receives a valid UEB and calls +# CiphertextFileNode.validate_UEB, then _parse_UEB. The SegmentFetcher is +# expecting to hear from the Share, via the _block_request_activity observer. + +# make it the responsibility of the SegmentFetcher. Each Share that gets a +# valid UEB will tell the SegmentFetcher BADSEGNUM (instead of COMPLETE or +# CORRUPT). The SegmentFetcher it then responsible for shutting down, and +# informing its parent (the CiphertextFileNode) of the BadSegmentNumberError, +# which is then passed to the client of get_segment(). + + +# TODO: if offset table is corrupt, attacker could cause us to fetch whole +# (large) share + +# log budget: when downloading at 1MBps (i.e. 8 segments-per-second), 10 +# log.OPERATIONAL per second, 100 log.NOISY per second. With k=3, that's 3 +# log.NOISY per block fetch. + + +# test_cli.Error failed for a while: ShareFinder created, used up +# (NotEnoughSharesError), started again. The self.running=False is the +# problem. +# +# The second download is hungry, but because ShareFinder.running is false, it +# never notifies the SegmentFetcher that there are no more shares coming, so +# the download never completes. To trigger this in tests, we need the first +# download to want more shares (so it must fail with NotEnoughSharesError, or +# we must lose a share/server between downloads). +# +# fix was to not call self.stop when ShareFinder runs out of shares. stop() +# is now only called by the Terminator. + +# TODO: make sure that _signal_corruption(f) isn't sending private local +# variables in the CopiedFailure + +# tests to write: +# * truncated share, so _satisfy_* doesn't get all it wants +# * v2 share, exercise large-offset-table code +# * slow server +# * hash failures of all sorts diff --git a/src/allmydata/immutable/download2_off.pyOFF b/src/allmydata/immutable/download2_off.pyOFF new file mode 100755 index 0000000..d2b8b99 --- /dev/null +++ b/src/allmydata/immutable/download2_off.pyOFF @@ -0,0 +1,634 @@ +#! /usr/bin/python + +# known (shnum,Server) pairs are sorted into a list according to +# desireability. This sort is picking a winding path through a matrix of +# [shnum][server]. The goal is to get diversity of both shnum and server. + +# The initial order is: +# find the lowest shnum on the first server, add it +# look at the next server, find the lowest shnum that we don't already have +# if any +# next server, etc, until all known servers are checked +# now look at servers that we skipped (because ... + +# Keep track of which block requests are outstanding by (shnum,Server). Don't +# bother prioritizing "validated" shares: the overhead to pull the share hash +# chain is tiny (4 hashes = 128 bytes), and the overhead to pull a new block +# hash chain is also tiny (1GB file, 8192 segments of 128KiB each, 13 hashes, +# 832 bytes). Each time a block request is sent, also request any necessary +# hashes. Don't bother with a "ValidatedShare" class (as distinct from some +# other sort of Share). Don't bother avoiding duplicate hash-chain requests. + +# For each outstanding segread, walk the list and send requests (skipping +# outstanding shnums) until requests for k distinct shnums are in flight. If +# we can't do that, ask for more. If we get impatient on a request, find the +# first non-outstanding + +# start with the first Share in the list, and send a request. Then look at +# the next one. If we already have a pending request for the same shnum or +# server, push that Share down onto the fallback list and try the next one, +# etc. If we run out of non-fallback shares, use the fallback ones, +# preferring shnums that we don't have outstanding requests for (i.e. assume +# that all requests will complete). Do this by having a second fallback list. + +# hell, I'm reviving the Herder. But remember, we're still talking 3 objects +# per file, not thousands. + +# actually, don't bother sorting the initial list. Append Shares as the +# responses come back, that will put the fastest servers at the front of the +# list, and give a tiny preference to servers that are earlier in the +# permuted order. + +# more ideas: +# sort shares by: +# 1: number of roundtrips needed to get some data +# 2: share number +# 3: ms of RTT delay +# maybe measure average time-to-completion of requests, compare completion +# time against that, much larger indicates congestion on the server side +# or the server's upstream speed is less than our downstream. Minimum +# time-to-completion indicates min(our-downstream,their-upstream). Could +# fetch shares one-at-a-time to measure that better. + +# when should we risk duplicate work and send a new request? + +def walk(self): + shares = sorted(list) + oldshares = copy(shares) + outstanding = list() + fallbacks = list() + second_fallbacks = list() + while len(outstanding.nonlate.shnums) < k: # need more requests + while oldshares: + s = shares.pop(0) + if s.server in outstanding.servers or s.shnum in outstanding.shnums: + fallbacks.append(s) + continue + outstanding.append(s) + send_request(s) + break #'while need_more_requests' + # must use fallback list. Ask for more servers while we're at it. + ask_for_more_servers() + while fallbacks: + s = fallbacks.pop(0) + if s.shnum in outstanding.shnums: + # assume that the outstanding requests will complete, but + # send new requests for other shnums to existing servers + second_fallbacks.append(s) + continue + outstanding.append(s) + send_request(s) + break #'while need_more_requests' + # if we get here, we're being forced to send out multiple queries per + # share. We've already asked for more servers, which might help. If + # there are no late outstanding queries, then duplicate shares won't + # help. Don't send queries for duplicate shares until some of the + # queries are late. + if outstanding.late: + # we're allowed to try any non-outstanding share + while second_fallbacks: + pass + newshares = outstanding + fallbacks + second_fallbacks + oldshares + + +class Server: + """I represent an abstract Storage Server. One day, the StorageBroker + will return instances of me. For now, the StorageBroker returns (peerid, + RemoteReference) tuples, and this code wraps a Server instance around + them. + """ + def __init__(self, peerid, ss): + self.peerid = peerid + self.remote = ss + self._remote_buckets = {} # maps shnum to RIBucketReader + # TODO: release the bucket references on shares that we no longer + # want. OTOH, why would we not want them? Corruption? + + def send_query(self, storage_index): + """I return a Deferred that fires with a set of shnums. If the server + had shares available, I will retain the RemoteReferences to its + buckets, so that get_data(shnum, range) can be called later.""" + d = self.remote.callRemote("get_buckets", self.storage_index) + d.addCallback(self._got_response) + return d + + def _got_response(self, r): + self._remote_buckets = r + return set(r.keys()) + +class ShareOnAServer: + """I represent one instance of a share, known to live on a specific + server. I am created every time a server responds affirmatively to a + do-you-have-block query.""" + + def __init__(self, shnum, server): + self._shnum = shnum + self._server = server + self._block_hash_tree = None + + def cost(self, segnum): + """I return a tuple of (roundtrips, bytes, rtt), indicating how + expensive I think it would be to fetch the given segment. Roundtrips + indicates how many roundtrips it is likely to take (one to get the + data and hashes, plus one to get the offset table and UEB if this is + the first segment we've ever fetched). 'bytes' is how many bytes we + must fetch (estimated). 'rtt' is estimated round-trip time (float) in + seconds for a trivial request. The downloading algorithm will compare + costs to decide which shares should be used.""" + # the most significant factor here is roundtrips: a Share for which + # we already have the offset table is better to than a brand new one + + def max_bandwidth(self): + """Return a float, indicating the highest plausible bytes-per-second + that I've observed coming from this share. This will be based upon + the minimum (bytes-per-fetch / time-per-fetch) ever observed. This + can we used to estimate the server's upstream bandwidth. Clearly this + is only accurate if a share is retrieved with no contention for + either the upstream, downstream, or middle of the connection, but it + may still serve as a useful metric for deciding which servers to pull + from.""" + + def get_segment(self, segnum): + """I return a Deferred that will fire with the segment data, or + errback.""" + +class NativeShareOnAServer(ShareOnAServer): + """For tahoe native (foolscap) servers, I contain a RemoteReference to + the RIBucketReader instance.""" + def __init__(self, shnum, server, rref): + ShareOnAServer.__init__(self, shnum, server) + self._rref = rref # RIBucketReader + +class Share: + def __init__(self, shnum): + self._shnum = shnum + # _servers are the Server instances which appear to hold a copy of + # this share. It is populated when the ValidShare is first created, + # or when we receive a get_buckets() response for a shnum that + # already has a ValidShare instance. When we lose the connection to a + # server, we remove it. + self._servers = set() + # offsets, UEB, and share_hash_tree all live in the parent. + # block_hash_tree lives here. + self._block_hash_tree = None + + self._want + + def get_servers(self): + return self._servers + + + def get_block(self, segnum): + # read enough data to obtain a single validated block + if not self.have_offsets: + # we get the offsets in their own read, since they tell us where + # everything else lives. We must fetch offsets for each share + # separately, since they aren't directly covered by the UEB. + pass + if not self.parent.have_ueb: + # use _guessed_segsize to make a guess about the layout, so we + # can fetch both the offset table and the UEB in the same read. + # This also requires making a guess about the presence or absence + # of the plaintext_hash_tree. Oh, and also the version number. Oh + # well. + pass + +class CiphertextDownloader: + """I manage all downloads for a single file. I operate a state machine + with input events that are local read() requests, responses to my remote + 'get_bucket' and 'read_bucket' messages, and connection establishment and + loss. My outbound events are connection establishment requests and bucket + read requests messages. + """ + # eventually this will merge into the FileNode + ServerClass = Server # for tests to override + + def __init__(self, storage_index, ueb_hash, size, k, N, storage_broker, + shutdowner): + # values we get from the filecap + self._storage_index = si = storage_index + self._ueb_hash = ueb_hash + self._size = size + self._needed_shares = k + self._total_shares = N + self._share_hash_tree = IncompleteHashTree(self._total_shares) + # values we discover when we first fetch the UEB + self._ueb = None # is dict after UEB fetch+validate + self._segsize = None + self._numsegs = None + self._blocksize = None + self._tail_segsize = None + self._ciphertext_hash = None # optional + # structures we create when we fetch the UEB, then continue to fill + # as we download the file + self._share_hash_tree = None # is IncompleteHashTree after UEB fetch + self._ciphertext_hash_tree = None + + # values we learn as we download the file + self._offsets = {} # (shnum,Server) to offset table (dict) + self._block_hash_tree = {} # shnum to IncompleteHashTree + # other things which help us + self._guessed_segsize = min(128*1024, size) + self._active_share_readers = {} # maps shnum to Reader instance + self._share_readers = [] # sorted by preference, best first + self._readers = set() # set of Reader instances + self._recent_horizon = 10 # seconds + + # 'shutdowner' is a MultiService parent used to cancel all downloads + # when the node is shutting down, to let tests have a clean reactor. + + self._init_available_servers() + self._init_find_enough_shares() + + # _available_servers is an iterator that provides us with Server + # instances. Each time we pull out a Server, we immediately send it a + # query, so we don't need to keep track of who we've sent queries to. + + def _init_available_servers(self): + self._available_servers = self._get_available_servers() + self._no_more_available_servers = False + + def _get_available_servers(self): + """I am a generator of servers to use, sorted by the order in which + we should query them. I make sure there are no duplicates in this + list.""" + # TODO: make StorageBroker responsible for this non-duplication, and + # replace this method with a simple iter(get_servers_for_index()), + # plus a self._no_more_available_servers=True + seen = set() + sb = self._storage_broker + for (peerid, ss) in sb.get_servers_for_index(self._storage_index): + if peerid not in seen: + yield self.ServerClass(peerid, ss) # Server(peerid, ss) + seen.add(peerid) + self._no_more_available_servers = True + + # this block of code is responsible for having enough non-problematic + # distinct shares/servers available and ready for download, and for + # limiting the number of queries that are outstanding. The idea is that + # we'll use the k fastest/best shares, and have the other ones in reserve + # in case those servers stop responding or respond too slowly. We keep + # track of all known shares, but we also keep track of problematic shares + # (ones with hash failures or lost connections), so we can put them at + # the bottom of the list. + + def _init_find_enough_shares(self): + # _unvalidated_sharemap maps shnum to set of Servers, and remembers + # where viable (but not yet validated) shares are located. Each + # get_bucket() response adds to this map, each act of validation + # removes from it. + self._sharemap = DictOfSets() + + # _sharemap maps shnum to set of Servers, and remembers where viable + # shares are located. Each get_bucket() response adds to this map, + # each hash failure or disconnect removes from it. (TODO: if we + # disconnect but reconnect later, we should be allowed to re-query). + self._sharemap = DictOfSets() + + # _problem_shares is a set of (shnum, Server) tuples, and + + # _queries_in_flight maps a Server to a timestamp, which remembers + # which servers we've sent queries to (and when) but have not yet + # heard a response. This lets us put a limit on the number of + # outstanding queries, to limit the size of the work window (how much + # extra work we ask servers to do in the hopes of keeping our own + # pipeline filled). We remove a Server from _queries_in_flight when + # we get an answer/error or we finally give up. If we ever switch to + # a non-connection-oriented protocol (like UDP, or forwarded Chord + # queries), we can use this information to retransmit any query that + # has gone unanswered for too long. + self._queries_in_flight = dict() + + def _count_recent_queries_in_flight(self): + now = time.time() + recent = now - self._recent_horizon + return len([s for (s,when) in self._queries_in_flight.items() + if when > recent]) + + def _find_enough_shares(self): + # goal: have 2*k distinct not-invalid shares available for reading, + # from 2*k distinct servers. Do not have more than 4*k "recent" + # queries in flight at a time. + if (len(self._sharemap) >= 2*self._needed_shares + and len(self._sharemap.values) >= 2*self._needed_shares): + return + num = self._count_recent_queries_in_flight() + while num < 4*self._needed_shares: + try: + s = self._available_servers.next() + except StopIteration: + return # no more progress can be made + self._queries_in_flight[s] = time.time() + d = s.send_query(self._storage_index) + d.addBoth(incidentally, self._queries_in_flight.discard, s) + d.addCallbacks(lambda shnums: [self._sharemap.add(shnum, s) + for shnum in shnums], + lambda f: self._query_error(f, s)) + d.addErrback(self._error) + d.addCallback(self._reschedule) + num += 1 + + def _query_error(self, f, s): + # a server returned an error, log it gently and ignore + level = log.WEIRD + if f.check(DeadReferenceError): + level = log.UNUSUAL + log.msg("Error during get_buckets to server=%(server)s", server=str(s), + failure=f, level=level, umid="3uuBUQ") + + # this block is responsible for turning known shares into usable shares, + # by fetching enough data to validate their contents. + + # UEB (from any share) + # share hash chain, validated (from any share, for given shnum) + # block hash (any share, given shnum) + + def _got_ueb(self, ueb_data, share): + if self._ueb is not None: + return + if hashutil.uri_extension_hash(ueb_data) != self._ueb_hash: + share.error("UEB hash does not match") + return + d = uri.unpack_extension(ueb_data) + self.share_size = mathutil.div_ceil(self._size, self._needed_shares) + + + # There are several kinds of things that can be found in a UEB. + # First, things that we really need to learn from the UEB in order to + # do this download. Next: things which are optional but not redundant + # -- if they are present in the UEB they will get used. Next, things + # that are optional and redundant. These things are required to be + # consistent: they don't have to be in the UEB, but if they are in + # the UEB then they will be checked for consistency with the + # already-known facts, and if they are inconsistent then an exception + # will be raised. These things aren't actually used -- they are just + # tested for consistency and ignored. Finally: things which are + # deprecated -- they ought not be in the UEB at all, and if they are + # present then a warning will be logged but they are otherwise + # ignored. + + # First, things that we really need to learn from the UEB: + # segment_size, crypttext_root_hash, and share_root_hash. + self._segsize = d['segment_size'] + + self._blocksize = mathutil.div_ceil(self._segsize, self._needed_shares) + self._numsegs = mathutil.div_ceil(self._size, self._segsize) + + self._tail_segsize = self._size % self._segsize + if self._tail_segsize == 0: + self._tail_segsize = self._segsize + # padding for erasure code + self._tail_segsize = mathutil.next_multiple(self._tail_segsize, + self._needed_shares) + + # Ciphertext hash tree root is mandatory, so that there is at most + # one ciphertext that matches this read-cap or verify-cap. The + # integrity check on the shares is not sufficient to prevent the + # original encoder from creating some shares of file A and other + # shares of file B. + self._ciphertext_hash_tree = IncompleteHashTree(self._numsegs) + self._ciphertext_hash_tree.set_hashes({0: d['crypttext_root_hash']}) + + self._share_hash_tree.set_hashes({0: d['share_root_hash']}) + + + # Next: things that are optional and not redundant: crypttext_hash + if 'crypttext_hash' in d: + if len(self._ciphertext_hash) == hashutil.CRYPTO_VAL_SIZE: + self._ciphertext_hash = d['crypttext_hash'] + else: + log.msg("ignoring bad-length UEB[crypttext_hash], " + "got %d bytes, want %d" % (len(d['crypttext_hash']), + hashutil.CRYPTO_VAL_SIZE), + umid="oZkGLA", level=log.WEIRD) + + # we ignore all of the redundant fields when downloading. The + # Verifier uses a different code path which does not ignore them. + + # finally, set self._ueb as a marker that we don't need to request it + # anymore + self._ueb = d + + def _got_share_hashes(self, hashes, share): + assert isinstance(hashes, dict) + try: + self._share_hash_tree.set_hashes(hashes) + except (IndexError, BadHashError, NotEnoughHashesError), le: + share.error("Bad or missing hashes") + return + + #def _got_block_hashes( + + def _init_validate_enough_shares(self): + # _valid_shares maps shnum to ValidatedShare instances, and is + # populated once the block hash root has been fetched and validated + # (which requires any valid copy of the UEB, and a valid copy of the + # share hash chain for each shnum) + self._valid_shares = {} + + # _target_shares is an ordered list of ReadyShare instances, each of + # which is a (shnum, server) tuple. It is sorted in order of + # preference: we expect to get the fastest response from the + # ReadyShares at the front of the list. It is also sorted to + # distribute the shnums, so that fetching shares from + # _target_shares[:k] is likely (but not guaranteed) to give us k + # distinct shares. The rule is that we skip over entries for blocks + # that we've already received, limit the number of recent queries for + # the same block, + self._target_shares = [] + + def _validate_enough_shares(self): + # my goal is to have at least 2*k distinct validated shares from at + # least 2*k distinct servers + valid_share_servers = set() + for vs in self._valid_shares.values(): + valid_share_servers.update(vs.get_servers()) + if (len(self._valid_shares) >= 2*self._needed_shares + and len(self._valid_share_servers) >= 2*self._needed_shares): + return + #for + + def _reschedule(self, _ign): + # fire the loop again + if not self._scheduled: + self._scheduled = True + eventually(self._loop) + + def _loop(self): + self._scheduled = False + # what do we need? + + self._find_enough_shares() + self._validate_enough_shares() + + if not self._ueb: + # we always need a copy of the UEB + pass + + def _error(self, f): + # this is an unexpected error: a coding bug + log.err(f, level=log.UNUSUAL) + + + +# using a single packed string (and an offset table) may be an artifact of +# our native storage server: other backends might allow cheap multi-part +# files (think S3, several buckets per share, one for each section). + +# find new names for: +# data_holder +# Share / Share2 (ShareInstance / Share? but the first is more useful) + +class IShare(Interface): + """I represent a single instance of a single share (e.g. I reference the + shnum2 for share SI=abcde on server xy12t, not the one on server ab45q). + This interface is used by SegmentFetcher to retrieve validated blocks. + """ + def get_block(segnum): + """Return an Observer2, which will be notified with the following + events: + state=COMPLETE, block=data (terminal): validated block data + state=OVERDUE (non-terminal): we have reason to believe that the + request might have stalled, or we + might just be impatient + state=CORRUPT (terminal): the data we received was corrupt + state=DEAD (terminal): the connection has failed + """ + + +# it'd be nice if we receive the hashes before the block, or just +# afterwards, so we aren't stuck holding on to unvalidated blocks +# that we can't process. If we guess the offsets right, we can +# accomplish this by sending the block request after the metadata +# requests (by keeping two separate requestlists), and have a one RTT +# pipeline like: +# 1a=metadata, 1b=block +# 1b->process+deliver : one RTT + +# But if we guess wrong, and fetch the wrong part of the block, we'll +# have a pipeline that looks like: +# 1a=wrong metadata, 1b=wrong block +# 1a->2a=right metadata,2b=right block +# 2b->process+deliver +# which means two RTT and buffering one block (which, since we'll +# guess the segsize wrong for everything, means buffering one +# segment) + +# if we start asking for multiple segments, we could get something +# worse: +# 1a=wrong metadata, 1b=wrong block0, 1c=wrong block1, .. +# 1a->2a=right metadata,2b=right block0,2c=right block1, . +# 2b->process+deliver + +# which means two RTT but fetching and buffering the whole file +# before delivering anything. However, since we don't know when the +# other shares are going to arrive, we need to avoid having more than +# one block in the pipeline anyways. So we shouldn't be able to get +# into this state. + +# it also means that, instead of handling all of +# self._requested_blocks at once, we should only be handling one +# block at a time: one of the requested block should be special +# (probably FIFO). But retire all we can. + + # this might be better with a Deferred, using COMPLETE as the success + # case and CORRUPT/DEAD in an errback, because that would let us hold the + # 'share' and 'shnum' arguments locally (instead of roundtripping them + # through Share.send_request). But that OVERDUE is not terminal. So I + # want a new sort of callback mechanism, with the extra-argument-passing + # aspects of Deferred, but without being so one-shot. Is this a job for + # Observer? No, it doesn't take extra arguments. So this uses Observer2. + + +class Reader: + """I am responsible for a single offset+size read of the file. I handle + segmentation: I figure out which segments are necessary, request them + (from my CiphertextDownloader) in order, and trim the segments down to + match the offset+size span. I use the Producer/Consumer interface to only + request one segment at a time. + """ + implements(IPushProducer) + def __init__(self, consumer, offset, size): + self._needed = [] + self._consumer = consumer + self._hungry = False + self._offset = offset + self._size = size + self._segsize = None + def start(self): + self._alive = True + self._deferred = defer.Deferred() + # the process doesn't actually start until set_segment_size() + return self._deferred + + def set_segment_size(self, segsize): + if self._segsize is not None: + return + self._segsize = segsize + self._compute_segnums() + + def _compute_segnums(self, segsize): + # now that we know the file's segsize, what segments (and which + # ranges of each) will we need? + size = self._size + offset = self._offset + while size: + assert size >= 0 + this_seg_num = int(offset / self._segsize) + this_seg_offset = offset - (seg_num*self._segsize) + this_seg_size = min(size, self._segsize-seg_offset) + size -= this_seg_size + if size: + offset += this_seg_size + yield (this_seg_num, this_seg_offset, this_seg_size) + + def get_needed_segments(self): + return set([segnum for (segnum, off, size) in self._needed]) + + + def stopProducing(self): + self._hungry = False + self._alive = False + # TODO: cancel the segment requests + def pauseProducing(self): + self._hungry = False + def resumeProducing(self): + self._hungry = True + def add_segment(self, segnum, offset, size): + self._needed.append( (segnum, offset, size) ) + def got_segment(self, segnum, segdata): + """Return True if this schedule has more to go, or False if it is + done.""" + assert self._needed[0][segnum] == segnum + (_ign, offset, size) = self._needed.pop(0) + data = segdata[offset:offset+size] + self._consumer.write(data) + if not self._needed: + # we're done + self._alive = False + self._hungry = False + self._consumer.unregisterProducer() + self._deferred.callback(self._consumer) + def error(self, f): + self._alive = False + self._hungry = False + self._consumer.unregisterProducer() + self._deferred.errback(f) + + + +class x: + def OFFread(self, consumer, offset=0, size=None): + """I am the main entry point, from which FileNode.read() can get + data.""" + # tolerate concurrent operations: each gets its own Reader + if size is None: + size = self._size - offset + r = Reader(consumer, offset, size) + self._readers.add(r) + d = r.start() + if self.segment_size is not None: + r.set_segment_size(self.segment_size) + # TODO: if we can't find any segments, and thus never get a + # segsize, tell the Readers to give up + return d diff --git a/src/allmydata/immutable/download2_util.py b/src/allmydata/immutable/download2_util.py new file mode 100755 index 0000000..d45f5cc --- /dev/null +++ b/src/allmydata/immutable/download2_util.py @@ -0,0 +1,73 @@ +import weakref + +from twisted.application import service +from foolscap.api import eventually + +class Observer2: + """A simple class to distribute multiple events to a single subscriber. + It accepts arbitrary kwargs, but no posargs.""" + def __init__(self): + self._watcher = None + self._undelivered_results = [] + self._canceler = None + + def set_canceler(self, c, methname): + """I will call c.METHNAME(self) when somebody cancels me.""" + # we use a weakref to avoid creating a cycle between us and the thing + # we're observing: they'll be holding a reference to us to compare + # against the value we pass to their canceler function. However, + # since bound methods are first-class objects (and not kept alive by + # the object they're bound to), we can't just stash a weakref to the + # bound cancel method. Instead, we must hold a weakref to the actual + # object, and obtain its cancel method later. + # http://code.activestate.com/recipes/81253-weakmethod/ has an + # alternative. + self._canceler = (weakref.ref(c), methname) + + def subscribe(self, observer, **watcher_kwargs): + self._watcher = (observer, watcher_kwargs) + while self._undelivered_results: + self._notify(self._undelivered_results.pop(0)) + + def notify(self, **result_kwargs): + if self._watcher: + self._notify(result_kwargs) + else: + self._undelivered_results.append(result_kwargs) + + def _notify(self, result_kwargs): + o, watcher_kwargs = self._watcher + kwargs = dict(result_kwargs) + kwargs.update(watcher_kwargs) + eventually(o, **kwargs) + + def cancel(self): + wr,methname = self._canceler + o = wr() + if o: + getattr(o,methname)(self) + + +def incidentally(res, f, *args, **kwargs): + """Add me to a Deferred chain like this: + d.addBoth(incidentally, func, arg) + and I'll behave as if you'd added the following function: + def _(res): + func(arg) + return res + This is useful if you want to execute an expression when the Deferred + fires, but don't care about its value. + """ + f(*args, **kwargs) + return res + + +class Terminator(service.Service): + def __init__(self): + self._clients = weakref.WeakKeyDictionary() + def register(self, c): + self._clients[c] = None + def stopService(self): + for c in self._clients: + c.stop() + return service.Service.stopService(self) diff --git a/src/allmydata/immutable/layout.py b/src/allmydata/immutable/layout.py index 6ca5339..a625390 100644 --- a/src/allmydata/immutable/layout.py +++ b/src/allmydata/immutable/layout.py @@ -74,12 +74,16 @@ limitations described in #346. # they are still provided when writing so that older versions of Tahoe can # read them. +FORCE_V2 = False # set briefly by unit tests to make small-sized V2 shares + def make_write_bucket_proxy(rref, data_size, block_size, num_segments, num_share_hashes, uri_extension_size_max, nodeid): # Use layout v1 for small files, so they'll be readable by older versions # (