from twisted.trial import unittest from twisted.internet import defer, reactor from allmydata.util.consumer import download_to_data from allmydata.immutable import upload from allmydata.test.no_network import GridTestMixin immutable_plaintext = "data" * 10000 mutable_plaintext = "muta" * 10000 class HungServerDownloadTest(GridTestMixin, unittest.TestCase): timeout = 30 def test_k_good_servers(self): # k = 3 servers with valid shares, and the rest hung self.basedir = "download/test_k_good_servers" self.set_up_grid(num_clients=1, num_servers=10) self.c0 = self.g.clients[0] #self.c0.DEFAULT_ENCODING_PARAMETERS['happy'] = 1 sb = self.c0.nodemaker.storage_broker peerids = [serverid for (serverid, ss) in sb.get_all_servers()] the_cows_come_home = defer.Deferred() data = upload.Data(immutable_plaintext, convergence="") d = self.c0.upload(data) def _store_uri(u): self.uri = u.uri return self.find_shares(self.uri) d.addCallback(_store_uri) def _store_shares(shares): self.shares = shares # currently unused d.addCallback(_store_shares) # just hanging server 9 also fails without the #928 fix # but *breaking* servers 3..9 passes def _hang_servers(ignored): for i in range(3, 10): self.g.hang_server(peerids[i], until=the_cows_come_home) #self.g.break_server(peerids[i]) d.addCallback(_hang_servers) d.addCallback(lambda ign: self.download_immutable()) #d.addCallback(lambda ign: the_cows_come_home.callback(None)) # test that our 'hang_server' abstraction works: unhang servers after 10 seconds #reactor.callLater(10, the_cows_come_home.callback, None) return d def download_immutable(self): n = self.c0.create_node_from_uri(self.uri) d = download_to_data(n) def _got_data(data): self.failUnlessEqual(data, immutable_plaintext) d.addCallback(_got_data) return d def download_mutable(self): # currently unused n = self.c0.create_node_from_uri(self.uri) d = n.download_best_version() def _got_data(data): self.failUnlessEqual(data, mutable_plaintext) d.addCallback(_got_data) return d """ currently unused def _add_server_with_share(self, server_number, share_number=None, readonly=False): assert self.g, "I tried to find a grid at self.g, but failed" assert self.shares, "I tried to find shares at self.shares, but failed" ss = self.g.make_server(server_number, readonly) self.g.add_server(server_number, ss) if share_number: # Copy share i from the directory associated with the first # storage server to the directory associated with this one. old_share_location = self.shares[share_number][2] new_share_location = os.path.join(ss.storedir, "shares") si = uri.from_string(self.uri).get_storage_index() new_share_location = os.path.join(new_share_location, storage_index_to_dir(si)) if not os.path.exists(new_share_location): os.makedirs(new_share_location) new_share_location = os.path.join(new_share_location, str(share_number)) shutil.copy(old_share_location, new_share_location) shares = self.find_shares(self.uri) # Make sure that the storage server has the share. self.failUnless((share_number, ss.my_nodeid, new_share_location) in shares) def test_problem_layouts(self): self.basedir = self.mktemp() # This scenario is at # http://allmydata.org/trac/tahoe/ticket/778#comment:52 # # The scenario in comment:52 proposes that we have a layout # like: # server 1: share 1 # server 2: share 1 # server 3: share 1 # server 4: shares 2 - 10 # To get access to the shares, we will first upload to one # server, which will then have shares 1 - 10. We'll then # add three new servers, configure them to not accept any new # shares, then write share 1 directly into the serverdir of each. # Then each of servers 1 - 3 will report that they have share 1, # and will not accept any new share, while server 4 will report that # it has shares 2 - 10 and will accept new shares. # We'll then set 'happy' = 4, and see that an upload fails # (as it should) d = self._setup_and_upload() d.addCallback(lambda ign: self._add_server_with_share(1, 0, True)) d.addCallback(lambda ign: self._add_server_with_share(2, 0, True)) d.addCallback(lambda ign: self._add_server_with_share(3, 0, True)) # Remove the first share from server 0. def _remove_share_0(): share_location = self.shares[0][2] os.remove(share_location) d.addCallback(lambda ign: _remove_share_0()) # Set happy = 4 in the client. def _prepare(): client = self.g.clients[0] client.DEFAULT_ENCODING_PARAMETERS['happy'] = 4 return client d.addCallback(lambda ign: _prepare()) # Uploading data should fail d.addCallback(lambda client: self.shouldFail(NotEnoughSharesError, "test_happy_semantics", "shares could only be placed on 1 servers " "(4 were requested)", client.upload, upload.Data("data" * 10000, convergence=""))) # This scenario is at # http://allmydata.org/trac/tahoe/ticket/778#comment:53 # # Set up the grid to have one server def _change_basedir(ign): self.basedir = self.mktemp() d.addCallback(_change_basedir) d.addCallback(lambda ign: self._setup_and_upload()) # We want to have a layout like this: # server 1: share 1 # server 2: share 2 # server 3: share 3 # server 4: shares 1 - 10 # (this is an expansion of Zooko's example because it is easier # to code, but it will fail in the same way) # To start, we'll create a server with shares 1-10 of the data # we're about to upload. # Next, we'll add three new servers to our NoNetworkGrid. We'll add # one share from our initial upload to each of these. # The counterintuitive ordering of the share numbers is to deal with # the permuting of these servers -- distributing the shares this # way ensures that the Tahoe2PeerSelector sees them in the order # described above. d.addCallback(lambda ign: self._add_server_with_share(server_number=1, share_number=2)) d.addCallback(lambda ign: self._add_server_with_share(server_number=2, share_number=0)) d.addCallback(lambda ign: self._add_server_with_share(server_number=3, share_number=1)) # So, we now have the following layout: # server 0: shares 1 - 10 # server 1: share 0 # server 2: share 1 # server 3: share 2 # We want to change the 'happy' parameter in the client to 4. # We then want to feed the upload process a list of peers that # server 0 is at the front of, so we trigger Zooko's scenario. # Ideally, a reupload of our original data should work. def _reset_encoding_parameters(ign): client = self.g.clients[0] client.DEFAULT_ENCODING_PARAMETERS['happy'] = 4 return client d.addCallback(_reset_encoding_parameters) # We need this to get around the fact that the old Data # instance already has a happy parameter set. d.addCallback(lambda client: client.upload(upload.Data("data" * 10000, convergence=""))) return d """