hunk ./src/allmydata/immutable/downloader/fetcher.py 74
         eventually(self.loop)
 
     def no_more_shares(self):
+        log.msg("aaa %s.no_more_shares()" % (self,))
         # ShareFinder tells us it's reached the end of its list
         self._no_more_shares = True
         eventually(self.loop)
hunk ./src/allmydata/immutable/downloader/fetcher.py 104
             self._node.fetch_failed(self, f)
             return
 
-        #print "LOOP", self._blocks.keys(), "active:", self._active_share_map, "overdue:", self._overdue_share_map, "unused:", self._shares
+        log.msg( "aaa %s.loop(); blocks: %s, active: %s, overdue: %s, unused: %s" % (self, self._blocks.keys(), self._active_share_map, self._overdue_share_map, self._shares))
         # Should we sent out more requests?
         while len(set(self._blocks.keys())
                   | set(self._active_share_map.keys())
hunk ./src/allmydata/immutable/downloader/fetcher.py 130
             # progress
             self._ask_for_more_shares()
             if self._no_more_shares:
+                self._no_shares_error() # this calls self.stop()
                 # But there are no more shares to be had. If we're going to
                 # succeed, it will be with the shares we've already seen.
                 # Will they be enough?
hunk ./src/allmydata/immutable/downloader/finder.py 52
         self._lp = log.msg(format="ShareFinder[si=%(si)s] starting",
                            si=self._si_prefix,
                            level=log.NOISY, parent=logparent, umid="2xjj2A")
+        log.msg("xxx %s.__init__(%s, %s, %s, %s, %s, %s)" % (self, storage_broker, verifycap, node, download_status, logparent, max_outstanding_requests))
 
     def update_num_segments(self):
hunk ./src/allmydata/immutable/downloader/finder.py 55
+        log.msg("xxx %s.update_num_segments()" % (self,))
         (numsegs, authoritative) = self.node.get_num_segments()
         assert authoritative
         for cs in self._commonshares.values():
hunk ./src/allmydata/immutable/downloader/finder.py 62
             cs.set_authoritative_num_segments(numsegs)
 
     def start_finding_servers(self):
+        log.msg("xxx %s.start_finding_servers()" % (self,))
         # don't get servers until somebody uses us: creating the
         # ImmutableFileNode should not cause work to happen yet. Test case is
         # test_dirnode, which creates us with storage_broker=None
hunk ./src/allmydata/immutable/downloader/finder.py 78
         return log.msg(*args, **kwargs)
 
     def stop(self):
+        log.msg("xxx %s.stop()" % (self,))
         self.running = False
         while self.overdue_timers:
             req,t = self.overdue_timers.popitem()
hunk ./src/allmydata/immutable/downloader/finder.py 86
 
     # called by our parent CiphertextDownloader
     def hungry(self):
+        log.msg("xxx %s.hungry()" % (self,))
         self.log(format="ShareFinder[si=%(si)s] hungry",
                  si=self._si_prefix, level=log.NOISY, umid="NywYaQ")
         self.start_finding_servers()
hunk ./src/allmydata/immutable/downloader/finder.py 95
 
     # internal methods
     def loop(self):
+        log.msg("xxx %s.loop()" % (self,))
         pending_s = ",".join([idlib.shortnodeid_b2a(rt.peerid)
                               for rt in self.pending_requests]) # sort?
         self.log(format="ShareFinder loop: running=%(running)s"
hunk ./src/allmydata/immutable/downloader/finder.py 123
             self.send_request(server)
             # we loop again to get parallel queries. The check above will
             # prevent us from looping forever.
+            log.msg("xxx %s.loop() => loop again to get parallel queries" % (self,))
             eventually(self.loop)
             return
 
hunk ./src/allmydata/immutable/downloader/finder.py 140
         self.share_consumer.no_more_shares()
 
     def send_request(self, server):
+        log.msg("xxx %s.send_request(%s)" % (self, server))
         peerid, rref = server
         req = RequestToken(peerid)
         self.pending_requests.add(req)
hunk ./src/allmydata/immutable/downloader/finder.py 163
         d.addCallback(incidentally, eventually, self.loop)
 
     def _request_retired(self, req):
+        log.msg("xxx %s._request_retired(%s)" % (self, req))
         self.pending_requests.discard(req)
         self.overdue_requests.discard(req)
         if req in self.overdue_timers:
hunk ./src/allmydata/immutable/downloader/finder.py 171
             del self.overdue_timers[req]
 
     def overdue(self, req):
+        log.msg("xxx %s.overdue(%s)" % (self, req))
         del self.overdue_timers[req]
         assert req in self.pending_requests # paranoia, should never be false
         self.overdue_requests.add(req)
hunk ./src/allmydata/immutable/downloader/finder.py 179
 
     def _got_response(self, buckets, server_version, peerid, req, d_ev,
                       time_sent, lp):
+        log.msg("xxx %s._got_response(%s, %s, %s, %s, %s, %s, %s)" % (self, buckets, server_version, peerid, req, d_ev, time_sent, lp))
         shnums = sorted([shnum for shnum in buckets])
         time_received = now()
         d_ev.finished(shnums, time_received)
hunk ./src/allmydata/immutable/downloader/finder.py 201
         self._deliver_shares(shares)
 
     def _create_share(self, shnum, bucket, server_version, peerid, dyhb_rtt):
+        log.msg("xxx %s._create_share(%s, %s, %s, %s, %s)" % (self, shnum, bucket, server_version, peerid, dyhb_rtt))
         if shnum in self._commonshares:
             cs = self._commonshares[shnum]
         else:
hunk ./src/allmydata/immutable/downloader/finder.py 230
         return s
 
     def _deliver_shares(self, shares):
+        log.msg("xxx %s._deliver_shares(%s)" % (self, shares))
         # they will call hungry() again if they want more
         self._hungry = False
         shares_s = ",".join([str(sh) for sh in shares])
hunk ./src/allmydata/immutable/downloader/finder.py 239
         eventually(self.share_consumer.got_shares, shares)
 
     def _got_error(self, f, peerid, req, d_ev, lp):
+        log.msg("xxx %s._got_error(%s, %s, %s, %s, %s)" % (self, f, peerid, req, d_ev, lp))
         d_ev.finished("error", now())
         self.log(format="got error from [%(peerid)s]",
                  peerid=idlib.shortnodeid_b2a(peerid), failure=f,
hunk ./src/allmydata/immutable/downloader/finder.py 244
                  level=log.UNUSUAL, parent=lp, umid="zUKdCw")
-
-
hunk ./src/allmydata/immutable/downloader/node.py 224
 
     # called by our child ShareFinder
     def got_shares(self, shares):
+        log.msg("xxx %s.got_shares(%s)" % (self, shares))
         self._shares.update(shares)
         if self._active_segment:
             self._active_segment.add_shares(shares)
hunk ./src/allmydata/immutable/downloader/node.py 229
     def no_more_shares(self):
+        log.msg("xxx %s.no_more_shares() ; _active_segment: %s" % (self, self._active_segment))
         self._no_more_shares = True
         if self._active_segment:
             self._active_segment.no_more_shares()
hunk ./src/allmydata/immutable/downloader/share.py 86
 
         self._requested_blocks = [] # (segnum, set(observer2..))
         ver = server_version["http://allmydata.org/tahoe/protocols/storage/v1"]
+        log.msg( "zzz ver: %r" % (ver,))
         self._overrun_ok = ver["tolerates-immutable-read-overrun"]
         # If _overrun_ok and we guess the offsets correctly, we can get
         # everything in one RTT. If _overrun_ok and we guess wrong, we might
hunk ./src/allmydata/test/test_immutable.py 4
 from allmydata.test import common
 from allmydata.interfaces import NotEnoughSharesError
 from allmydata.util.consumer import download_to_data
+from allmydata import uri
 from twisted.internet import defer
 from twisted.trial import unittest
hunk ./src/allmydata/test/test_immutable.py 7
+from allmydata.immutable.downloader.common import COMPLETE
 import random
 
hunk ./src/allmydata/test/test_immutable.py 10
+from foolscap.api import eventually
+from allmydata.util import log
+
+from allmydata.immutable.downloader import fetcher, finder, share
+
+import mock
+
+class MockNode(object):
+    def __init__(self, check_reneging, check_fetch_failed):
+        self.got = 0
+        self.finished_d = defer.Deferred()
+        self.segment_size = 78
+        self.guessed_segment_size = 78
+        self._no_more_shares = False
+        self.check_reneging = check_reneging
+        self.check_fetch_failed = check_fetch_failed
+        self._si_prefix='aa'
+        self.have_UEB = True
+        self.share_hash_tree = mock.Mock()
+        self.share_hash_tree.needed_hashes.return_value = False
+        self.on_want_more_shares = None
+
+    def when_finished(self):
+        return self.finished_d
+    def get_num_segments(self):
+        return (5, True)
+    def _calculate_sizes(self, guessed_segment_size):
+        return {'block_size': 4, 'num_segments': 5}
+    def no_more_shares(self):
+        self._no_more_shares = True
+    def got_shares(self, shares):
+        if self.check_reneging:
+            if self._no_more_shares:
+                self.finished_d.errback(unittest.FailTest("The node was told by the share finder that it is destined to remain hungry, then was given another share."))
+                return
+        self.got += len(shares)
+        log.msg("yyy 3 %s.got_shares(%s) got: %s" % (self, shares, self.got))
+        if self.got == 3:
+            self.finished_d.callback(True)
+    def get_desired_ciphertext_hashes(self, *args, **kwargs):
+        return iter([])
+    def fetch_failed(self, *args, **kwargs):
+        if self.check_fetch_failed:
+            if self.finished_d:
+                self.finished_d.errback(unittest.FailTest("The node was told by the segment fetcher that the download failed."))
+                self.finished_d = None
+    def want_more_shares(self):
+        if self.on_want_more_shares:
+            self.on_want_more_shares()
+    def process_blocks(self, *args, **kwargs):
+        if self.finished_d:
+            self.finished_d.callback(None)
+
+class TestSegmentFetcher(unittest.TestCase):
+    def test_be_satisfied_with_ill_distributed_shares(self):
+        mocknode = MockNode(check_reneging=False, check_fetch_failed=True)
+        sf = fetcher.SegmentFetcher(mocknode, 1, 3, None)
+        mocknode.on_want_more_shares = lambda: sf.no_more_shares
+
+        rcap = uri.CHKFileURI('a'*32, 'a'*32, 3, 99, 100)
+        vcap = rcap.get_verify_cap()
+        server_version = {
+                    'http://allmydata.org/tahoe/protocols/storage/v1': {
+                        "tolerates-immutable-read-overrun": True
+                        }
+                    }
+        ss = []
+        mockcommonshare = mock.Mock()
+        mockcommonshare.get_desired_block_hashes.return_value = iter([])
+
+        mockds0 = mock.Mock()
+        share0 = share.Share(mock.Mock(), server_version, vcap,
+                            mockcommonshare, mocknode, mockds0, 's1', 0, 0,
+                            None)
+        mockds0.add_request_sent.side_effect = \
+            eventually(sf._block_request_activity, share0, 0, COMPLETE)
+        ss.append(share0)
+
+        mockds1 = mock.Mock()
+        share1 = share.Share(mock.Mock(), server_version, vcap,
+                            mockcommonshare, mocknode, mockds1, 's1', 1, 0,
+                            None)
+        mockds1.add_request_sent.side_effect = \
+            eventually(sf._block_request_activity, share1, 1, COMPLETE)
+        ss.append(share1)
+
+        mockds2 = mock.Mock()
+        share2 = share.Share(mock.Mock(), server_version, vcap,
+                            mockcommonshare, mocknode, mockds2, 's2', 2, 0,
+                            None)
+        mockds2.add_request_sent.side_effect = \
+            eventually(sf._block_request_activity, share2, 2, COMPLETE)
+        ss.append(share2)
+
+        sf.add_shares(ss)
+        sf.no_more_shares()
+
+        return mocknode.when_finished()
+
+class TestShareFinder(unittest.TestCase):
+    def test_no_reneging_on_no_more_shares_ever(self):
+        # ticket #1191
+
+        # Suppose that K=3 and you send two DYHB requests, the first
+        # response offers two shares, and then the last offers one
+        # share. If you tell your share consumer "no more shares,
+        # ever", and then immediately tell them "oh, and here's
+        # another share", then you lose.
+
+        rcap = uri.CHKFileURI('a'*32, 'a'*32, 3, 99, 100)
+        vcap = rcap.get_verify_cap()
+
+        class MockServer(object):
+            def __init__(self, buckets):
+                self.version = {
+                    'http://allmydata.org/tahoe/protocols/storage/v1': {
+                        "tolerates-immutable-read-overrun": True
+                        }
+                    }
+                self.buckets = buckets
+                self.d = defer.Deferred()
+                self.s = None
+            def callRemote(self, methname, *args, **kwargs):
+                log.msg("yyy 2 %s.callRemote(%s, %s, %s)" % (self, methname, args, kwargs))
+                d = defer.Deferred()
+
+                # Even after the 3rd answer we're still hungry because
+                # we're interested in finding a 3rd server so we don't
+                # have to download more than one share from the first
+                # server. This is actually necessary to trigger the
+                # bug.
+                def _give_buckets_and_hunger_again():
+                    d.callback(self.buckets)
+                    self.s.hungry()
+                eventually(_give_buckets_and_hunger_again)
+                return d
+
+        mockserver1 = MockServer({1: mock.Mock(), 2: mock.Mock()})
+        mockserver2 = MockServer({})
+        mockserver3 = MockServer({3: mock.Mock()})
+        mockstoragebroker = mock.Mock()
+        mockstoragebroker.get_servers_for_index.return_value = [ ('ms1', mockserver1), ('ms2', mockserver2), ('ms3', mockserver3), ]
+        mockdownloadstatus = mock.Mock()
+        mocknode = MockNode(check_reneging=True, check_fetch_failed=True)
+
+        s = finder.ShareFinder(mockstoragebroker, vcap, mocknode, mockdownloadstatus)
+
+        mockserver1.s = s
+        mockserver2.s = s
+        mockserver3.s = s
+
+        s.hungry()
+
+        return mocknode.when_finished()
+
 class Test(common.ShareManglingMixin, common.ShouldFailMixin, unittest.TestCase):
     def test_test_code(self):
         # The following process of stashing the shares, running