From 395b10af3b63ff4f8e17924102d7e22d6df5cb95 Mon Sep 17 00:00:00 2001 From: qstokkink Date: Sun, 26 Feb 2017 11:41:42 +0100 Subject: [PATCH 1/6] Refactored to sync-able data containers for tunnel --- .../Multichain/test_multichain_community.py | 3 +- Tribler/Test/Community/Tunnel/test_routing.py | 2 +- .../Community/Tunnel/test_tunnelcommunity.py | 20 +- Tribler/community/tunnel/hidden_community.py | 82 ++++++--- Tribler/community/tunnel/remotes/__init__.py | 0 .../tunnel/{routing.py => remotes/circuit.py} | 172 +++++++----------- Tribler/community/tunnel/remotes/hop.py | 75 ++++++++ .../community/tunnel/remotes/relayroute.py | 57 ++++++ .../community/tunnel/remotes/remote_object.py | 171 +++++++++++++++++ Tribler/community/tunnel/remotes/sync_dict.py | 105 +++++++++++ Tribler/community/tunnel/tunnel_community.py | 133 +++++++++----- 11 files changed, 638 insertions(+), 182 deletions(-) create mode 100644 Tribler/community/tunnel/remotes/__init__.py rename Tribler/community/tunnel/{routing.py => remotes/circuit.py} (54%) create mode 100644 Tribler/community/tunnel/remotes/hop.py create mode 100644 Tribler/community/tunnel/remotes/relayroute.py create mode 100644 Tribler/community/tunnel/remotes/remote_object.py create mode 100644 Tribler/community/tunnel/remotes/sync_dict.py diff --git a/Tribler/Test/Community/Multichain/test_multichain_community.py b/Tribler/Test/Community/Multichain/test_multichain_community.py index 238855a0532..f77cd724a85 100644 --- a/Tribler/Test/Community/Multichain/test_multichain_community.py +++ b/Tribler/Test/Community/Multichain/test_multichain_community.py @@ -10,7 +10,8 @@ from Tribler.community.multichain.community import (MultiChainCommunity, MultiChainCommunityCrawler, CRAWL_REQUEST, CRAWL_RESPONSE, CRAWL_RESUME) from Tribler.community.multichain.conversion import EMPTY_HASH -from Tribler.community.tunnel.routing import Circuit, RelayRoute +from Tribler.community.tunnel.remotes.circuit import Circuit +from Tribler.community.tunnel.remotes.relayroute import RelayRoute from Tribler.community.tunnel.tunnel_community import TunnelExitSocket from Tribler.Test.test_as_server import AbstractServer from Tribler.dispersy.message import DelayPacketByMissingMember diff --git a/Tribler/Test/Community/Tunnel/test_routing.py b/Tribler/Test/Community/Tunnel/test_routing.py index f2d2ca611d2..d3c62a36c04 100644 --- a/Tribler/Test/Community/Tunnel/test_routing.py +++ b/Tribler/Test/Community/Tunnel/test_routing.py @@ -1,6 +1,6 @@ from Tribler.Test.Core.base_test import TriblerCoreTest from Tribler.community.tunnel.hidden_community import HiddenTunnelCommunity -from Tribler.community.tunnel.routing import Circuit +from Tribler.community.tunnel.remotes.circuit import Circuit class TestRouting(TriblerCoreTest): diff --git a/Tribler/Test/Community/Tunnel/test_tunnelcommunity.py b/Tribler/Test/Community/Tunnel/test_tunnelcommunity.py index 50c39359d85..aa3caf4f8b3 100644 --- a/Tribler/Test/Community/Tunnel/test_tunnelcommunity.py +++ b/Tribler/Test/Community/Tunnel/test_tunnelcommunity.py @@ -5,7 +5,9 @@ from Tribler.Test.twisted_thread import deferred from Tribler.community.tunnel.conversion import TunnelConversion from Tribler.community.tunnel.crypto.tunnelcrypto import CryptoException, TunnelCrypto -from Tribler.community.tunnel.routing import Circuit, Hop, RelayRoute +from Tribler.community.tunnel.remotes.circuit import Circuit +from Tribler.community.tunnel.remotes.hop import Hop +from Tribler.community.tunnel.remotes.relayroute import RelayRoute from Tribler.community.tunnel.tunnel_community import (TunnelSettings, TunnelExitSocket, CircuitRequestCache, PingRequestCache) from Tribler.dispersy.candidate import Candidate @@ -142,7 +144,9 @@ def test_on_data_invalid_encoding(self): circuit = Circuit(42L) hop = Hop(tunnel_crypto.generate_key(u"curve25519")) hop.session_keys = tunnel_crypto.generate_session_keys("1234") - circuit.add_hop(hop) + hop.hop_id = "1" + self.tunnel_community.hops[hop.hop_id] = hop + circuit.add_hop(hop.hop_id) self.tunnel_community.circuits[42] = circuit # Encode data with a truncated encrypted string (empty in this case) @@ -175,7 +179,9 @@ def notify(self, subject, change_type, tunnel, candidate): circuit = Circuit(42L) circuit.first_hop = ("127.0.0.1", 1337) hop = Hop(tunnel_crypto.generate_key(u"curve25519").pub()) - circuit.add_hop(hop) + hop.hop_id = "1" + self.tunnel_community.hops[hop.hop_id] = hop + circuit.add_hop(hop.hop_id) # Register the first hop with dispersy and the community circuit.mid = self.tunnel_community.dispersy.get_member(public_key=hop.node_public_key).mid.encode("HEX") self.tunnel_community.create_or_update_walkcandidate(circuit.first_hop, circuit.first_hop, circuit.first_hop, @@ -216,7 +222,9 @@ def notify(self, subject, change_type, tunnel, candidate): circuit = Circuit(42L) circuit.first_hop = ("127.0.0.1", 1337) hop = Hop(tunnel_crypto.generate_key(u"curve25519").pub()) - circuit.add_hop(hop) + hop.hop_id = "1" + self.tunnel_community.hops[hop.hop_id] = hop + circuit.add_hop(hop.hop_id) # Register the first hop with dispersy and the community circuit.mid = self.tunnel_community.dispersy.get_member(public_key=hop.node_public_key).mid.encode("HEX") self.tunnel_community.create_or_update_walkcandidate(circuit.first_hop, circuit.first_hop, circuit.first_hop, @@ -259,7 +267,9 @@ def notify(self, subject, change_type, tunnel, candidate): circuit = Circuit(42L) circuit.first_hop = ("127.0.0.1", 1337) hop = Hop(tunnel_crypto.generate_key(u"curve25519").pub()) - circuit.add_hop(hop) + hop.hop_id = "1" + self.tunnel_community.hops[hop.hop_id] = hop + circuit.add_hop(hop.hop_id) # Register the first hop with dispersy and the community circuit.mid = self.tunnel_community.dispersy.get_member(public_key=hop.node_public_key).mid.encode("HEX") self.tunnel_community.create_or_update_walkcandidate(circuit.first_hop, circuit.first_hop, diff --git a/Tribler/community/tunnel/hidden_community.py b/Tribler/community/tunnel/hidden_community.py index d9dc938fbf7..8ca70f13de5 100644 --- a/Tribler/community/tunnel/hidden_community.py +++ b/Tribler/community/tunnel/hidden_community.py @@ -23,7 +23,8 @@ KeyResponsePayload, KeyRequestPayload, CreateE2EPayload, CreatedE2EPayload, LinkE2EPayload, LinkedE2EPayload, DHTRequestPayload, DHTResponsePayload) -from Tribler.community.tunnel.routing import RelayRoute, RendezvousPoint, Hop +from Tribler.community.tunnel.remotes.hop import Hop +from Tribler.community.tunnel.remotes.relayroute import RelayRoute from Tribler.dispersy.authentication import NoAuthentication from Tribler.dispersy.candidate import Candidate @@ -132,6 +133,13 @@ def __init__(self, community, circuit, info_hash): def on_timeout(self): pass +class RendezvousPoint(object): + + def __init__(self, circuit, cookie, finished_callback): + self.circuit = circuit + self.cookie = cookie + self.finished_callback = finished_callback + self.rp_info = None class HiddenTunnelCommunity(TunnelCommunity): @@ -155,7 +163,7 @@ def __init__(self, *args, **kwargs): self.tunnel_logger = logging.getLogger('TunnelLogger') - self.hops = {} + self.infohash_to_hop_dict = {} def initiate_meta_messages(self): return super(HiddenTunnelCommunity, self).initiate_meta_messages() + \ @@ -211,28 +219,43 @@ def remove_circuit(self, circuit_id, additional_info='', destroy=False): self.tunnel_logger.info("removed rendezvous point %d" % circuit_id) self.my_download_points.pop(circuit_id) - def ip_to_circuit_id(self, ip_str): + @classmethod + def ip_to_circuit_id(cls, ip_str): return struct.unpack("!I", socket.inet_aton(ip_str))[0] - def circuit_id_to_ip(self, circuit_id): + @classmethod + def circuit_id_to_ip(cls, circuit_id): return socket.inet_ntoa(struct.pack("!I", circuit_id)) + @staticmethod + def downloads_to_infohash_tuples(dslist): + infohashes = [] + for ds in dslist: + download = ds.get_download() + if download.get_hops() > 0: + infohashes.append((download.get_def().get_infohash(), download.get_hops(), ds.get_status())) + return infohashes + @call_on_reactor_thread def monitor_downloads(self, dslist): + self.monitor_infohashes(self.downloads_to_infohash_tuples(dslist)) + + @call_on_reactor_thread + def monitor_infohashes(self, infohashes): + """ + Monitor tuples of (infohash, hop_count, DownloadState.get_status()) + """ # Monitor downloads with anonymous flag set, and build rendezvous/introduction points when needed. new_states = {} hops = {} - for ds in dslist: - download = ds.get_download() - if download.get_hops() > 0: - # Convert the real infohash to the infohash used for looking up introduction points - real_info_hash = download.get_def().get_infohash() - info_hash = self.get_lookup_info_hash(real_info_hash) - hops[info_hash] = download.get_hops() - new_states[info_hash] = ds.get_status() + for real_info_hash, hop_count, status in infohashes: + # Convert the real infohash to the infohash used for looking up introduction points + info_hash = self.get_lookup_info_hash(real_info_hash) + hops[info_hash] = hop_count + new_states[info_hash] = status - self.hops = hops + self.infohash_to_hop_dict.update(hops) for info_hash in set(new_states.keys() + self.download_states.keys()): new_state = new_states.get(info_hash, None) @@ -280,7 +303,7 @@ def monitor_downloads(self, dslist): def do_dht_lookup(self, info_hash): # Select a circuit from the pool of exit circuits self.tunnel_logger.info("Do DHT request: select circuit") - circuit = self.selection_strategy.select(None, self.hops[info_hash]) + circuit = self.selection_strategy.select(None, self.infohash_to_hop_dict[info_hash]) if not circuit: self.tunnel_logger.info("No circuit for dht-request") return False @@ -357,7 +380,9 @@ def on_dht_response(self, messages): def create_key_request(self, info_hash, sock_addr): # 1. Select a circuit self.tunnel_logger.info("Create key request: select circuit") - circuit = self.selection_strategy.select(None, self.hops[info_hash]) + circuit = self.selection_strategy.select( + None, self.infohash_to_hop_dict[info_hash])\ + if info_hash in self.infohash_to_hop_dict else None if not circuit: self.tunnel_logger.error("No circuit for key-request") return @@ -480,7 +505,7 @@ def on_create_e2e(self, messages): relay_circuit.tunnel_data(message.candidate.sock_addr, TUNNEL_PREFIX + message.packet) else: self.tunnel_logger.info('On create e2e: create rendezvous point') - self.create_rendezvous_point(self.hops[message.payload.info_hash], + self.create_rendezvous_point(self.infohash_to_hop_dict[message.payload.info_hash], lambda rendezvous_point, message=message: self.create_created_e2e(rendezvous_point, message), message.payload.info_hash) @@ -532,14 +557,15 @@ def on_created_e2e(self, messages): # Since it is the seeder that chose the rendezvous_point, we're essentially losing 1 hop of anonymity # at the downloader end. To compensate we add an extra hop. - self.create_circuit(self.hops[cache.info_hash] + 1, + self.create_circuit(self.infohash_to_hop_dict[cache.info_hash] + 1, CIRCUIT_TYPE_RENDEZVOUS, callback=lambda circuit, cookie=rp_info[1], session_keys=session_keys, - info_hash=cache.info_hash, sock_addr=cache.sock_addr: self.create_link_e2e(circuit, - cookie, - session_keys, - info_hash, - sock_addr), + info_hash=cache.info_hash, + sock_addr=cache.sock_addr: self.create_link_e2e(circuit, + cookie, + session_keys, + info_hash, + sock_addr), required_endpoint=rp_info[0], info_hash=cache.info_hash) @@ -633,14 +659,15 @@ def callback(circuit): if self.notifier: self.notifier.notify(NTFY_TUNNEL, NTFY_IP_CREATED, info_hash.encode('hex')[:6], circuit_id) - for _ in range(amount): + for _ in xrange(amount): # Create a circuit to the introduction point + 1 hop, to prevent the introduction # point from knowing what the seeder is seeding - circuit_id = self.create_circuit(self.hops[info_hash] + 1, + circuit_id = self.create_circuit(self.infohash_to_hop_dict[info_hash] + 1, CIRCUIT_TYPE_IP, callback, info_hash=info_hash) - self.infohash_ip_circuits[info_hash].append((circuit_id, time.time())) + if circuit_id: + self.infohash_ip_circuits[info_hash].append((circuit_id, time.time())) def check_establish_intro(self, messages): for message in messages: @@ -722,7 +749,7 @@ def on_rendezvous_established(self, messages): rp = self.request_cache.pop(u"establish-rendezvous", message.payload.identifier).rp sock_addr = message.payload.rendezvous_point_addr - rp.rp_info = (sock_addr[0], sock_addr[1], self.crypto.key_to_bin(rp.circuit.hops[-1].public_key)) + rp.rp_info = (sock_addr[0], sock_addr[1], self.hops[rp.circuit.hops[-1]].node_public_key) rp.finished_callback(rp) def dht_lookup(self, info_hash, cb): @@ -741,5 +768,6 @@ def cb(info_hash, peers, source): else: self.tunnel_logger.error("Need a Tribler session to announce to the DHT") - def get_lookup_info_hash(self, info_hash): + @classmethod + def get_lookup_info_hash(cls, info_hash): return hashlib.sha1('tribler anonymous download' + info_hash.encode('hex')).digest() diff --git a/Tribler/community/tunnel/remotes/__init__.py b/Tribler/community/tunnel/remotes/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/Tribler/community/tunnel/routing.py b/Tribler/community/tunnel/remotes/circuit.py similarity index 54% rename from Tribler/community/tunnel/routing.py rename to Tribler/community/tunnel/remotes/circuit.py index 52580cf8819..6c3505f054f 100644 --- a/Tribler/community/tunnel/routing.py +++ b/Tribler/community/tunnel/remotes/circuit.py @@ -1,15 +1,13 @@ +import logging import time from Tribler.community.tunnel import CIRCUIT_STATE_READY, CIRCUIT_STATE_BROKEN, CIRCUIT_STATE_EXTENDING, \ CIRCUIT_TYPE_DATA -from Tribler.dispersy.crypto import LibNaCLPK +from Tribler.community.tunnel.remotes.remote_object import RemoteObject, shared from Tribler.dispersy.candidate import Candidate -import logging -__author__ = 'chris' - -class Circuit(object): +class Circuit(RemoteObject): """ Circuit data structure storing the id, state and hops """ @@ -24,15 +22,13 @@ def __init__(self, circuit_id, goal_hops=0, first_hop=None, proxy=None, :return: Circuit """ - from Tribler.community.tunnel.hidden_community import HiddenTunnelCommunity assert isinstance(circuit_id, long) assert isinstance(goal_hops, int) - assert proxy is None or isinstance(proxy, HiddenTunnelCommunity) assert first_hop is None or isinstance(first_hop, tuple) and isinstance( first_hop[0], basestring) and isinstance(first_hop[1], int) self._broken = False - self._hops = [] + self.hops = [] self.circuit_id = circuit_id self.first_hop = first_hop @@ -52,20 +48,73 @@ def __init__(self, circuit_id, goal_hops=0, first_hop=None, proxy=None, self._logger = logging.getLogger(self.__class__.__name__) - @property + @shared def hops(self): """ Return a read only tuple version of the hop-list of this circuit - @rtype tuple[Hop] + @rtype list[ hop public key] """ - return tuple(self._hops) + pass + + @shared + def _broken(self): + pass + + @shared(True) + def circuit_id(self): + pass + + @shared + def first_hop(self): + pass + + @shared + def goal_hops(self): + pass + + @shared + def creation_time(self): + pass + + @shared + def last_incoming(self): + pass + + @shared + def unverified_hop(self): + pass + + @shared + def bytes_up(self): + pass + + @shared + def bytes_down(self): + pass + + @shared + def ctype(self): + pass + + @shared + def required_endpoint(self): + pass + + @shared + def mid(self): + pass + + @shared + def info_hash(self): + pass def add_hop(self, hop): """ Adds a hop to the circuits hop collection - @param Hop hop: the hop to add + @param str hop public key: the hop to add """ - self._hops.append(hop) + assert isinstance(hop, basestring) + self.hops = self.hops + [hop] @property def state(self): @@ -96,9 +145,10 @@ def tunnel_data(self, destination, payload): """ self._logger.info("Tunnel data (len %d) to end for circuit %s with ultimate destination %s", len(payload), - self.circuit_id, destination) + self.circuit_id, destination) - num_bytes = self.proxy.send_data([Candidate(self.first_hop, False)], self.circuit_id, destination, ('0.0.0.0', 0), payload) + num_bytes = self.proxy.send_data([Candidate(tuple(self.first_hop), False)], self.circuit_id, + destination, ('0.0.0.0', 0), payload) self.proxy.increase_bytes_sent(self, num_bytes) if num_bytes == 0: @@ -114,92 +164,6 @@ def destroy(self, reason='unknown'): """ self._broken = True - -class Hop(object): - - """ - Circuit Hop containing the address, its public key and the first part of - the Diffie-Hellman handshake - """ - - def __init__(self, public_key=None): - """ - @param None|LibNaCLPK public_key: public key object of the hop - """ - - assert public_key is None or isinstance(public_key, LibNaCLPK) - - self.session_keys = None - self.dh_first_part = None - self.dh_secret = None - self.address = None - self.public_key = public_key - - @property - def host(self): - """ - The hop's hostname - """ - if self.address: - return self.address[0] - return " UNKNOWN HOST " - - @property - def port(self): - """ - The hop's port - """ - if self.address: - return self.address[1] - return " UNKNOWN PORT " - - @property - def node_id(self): - """ - The hop's nodeid - """ - if self.public_key: - return self.public_key.key_to_hash() - - raise RuntimeError("nodeid unknown") - - @property - def node_public_key(self): - """ - The hop's public_key - """ - if self.public_key: - return self.public_key.key_to_bin() - - raise RuntimeError("public key unknown") - - -class RelayRoute(object): - - """ - Relay object containing the destination circuit, socket address and whether - it is online or not - """ - - def __init__(self, circuit_id, sock_addr, rendezvous_relay=False, mid=None): - """ - @type sock_addr: (str, int) - @type circuit_id: int - @return: - """ - - self.sock_addr = sock_addr - self.circuit_id = circuit_id - self.creation_time = time.time() - self.last_incoming = time.time() - self.bytes_up = self.bytes_down = 0 - self.rendezvous_relay = rendezvous_relay - self.mid = mid - -class RendezvousPoint(object): - - def __init__(self, circuit, cookie, finished_callback): - self.circuit = circuit - self.cookie = cookie - self.finished_callback = finished_callback - self.rp_info = None + if self.proxy: + for hop_id in filter(set(self.hops).__contains__, self.proxy.hops.keys()): + self.proxy.hops.pop(hop_id) diff --git a/Tribler/community/tunnel/remotes/hop.py b/Tribler/community/tunnel/remotes/hop.py new file mode 100644 index 00000000000..fd39e191a77 --- /dev/null +++ b/Tribler/community/tunnel/remotes/hop.py @@ -0,0 +1,75 @@ +from Tribler.community.tunnel.remotes.remote_object import RemoteObject, shared +from Tribler.dispersy.crypto import LibNaCLPK + + +class Hop(RemoteObject): + + """ + Circuit Hop containing the address, its public key and the first part of + the Diffie-Hellman handshake + """ + + def __init__(self, public_key): + """ + @param None|LibNaCLPK public_key: public key object of the hop + """ + + assert isinstance(public_key, LibNaCLPK) + + self.session_keys = None + self.dh_first_part = None + self.dh_secret = None + self.address = None + self._public_key = public_key + + self.public_key = public_key + + @shared + def address(self): + pass + + @property + def public_key(self): + return self._public_key + + @public_key.setter + def public_key(self, value): + self._public_key = value + self.node_public_key = value.key_to_bin() + self.node_id = value.key_to_hash() + + @property + def host(self): + """ + The hop's hostname + """ + if self.address: + return self.address[0] + return " UNKNOWN HOST " + + @property + def port(self): + """ + The hop's port + """ + if self.address: + return self.address[1] + return " UNKNOWN PORT " + + @shared + def node_id(self): + """ + The hop's nodeid + """ + pass + + @shared + def node_public_key(self): + """ + The hop's public_key + """ + pass + + @shared(True) + def hop_id(self): + pass diff --git a/Tribler/community/tunnel/remotes/relayroute.py b/Tribler/community/tunnel/remotes/relayroute.py new file mode 100644 index 00000000000..273011d3eb6 --- /dev/null +++ b/Tribler/community/tunnel/remotes/relayroute.py @@ -0,0 +1,57 @@ +import time + +from Tribler.community.tunnel.remotes.remote_object import RemoteObject, shared + +class RelayRoute(RemoteObject): + + """ + Relay object containing the destination circuit, socket address and whether + it is online or not + """ + + def __init__(self, circuit_id, sock_addr, rendezvous_relay=False, mid=None): + """ + @type sock_addr: (str, int) + @type circuit_id: int + @return: + """ + + self.sock_addr = sock_addr + self.circuit_id = circuit_id + self.creation_time = time.time() + self.last_incoming = time.time() + self.bytes_up = self.bytes_down = 0 + self.rendezvous_relay = rendezvous_relay + self.mid = mid + + @shared + def sock_addr(self): + pass + + @shared(True) + def circuit_id(self): + pass + + @shared + def creation_time(self): + pass + + @shared + def last_incoming(self): + pass + + @shared + def bytes_up(self): + pass + + @shared + def bytes_down(self): + pass + + @shared + def rendezvous_relay(self): + pass + + @shared + def mid(self): + pass diff --git a/Tribler/community/tunnel/remotes/remote_object.py b/Tribler/community/tunnel/remotes/remote_object.py new file mode 100644 index 00000000000..ae3507a9469 --- /dev/null +++ b/Tribler/community/tunnel/remotes/remote_object.py @@ -0,0 +1,171 @@ +import json +import logging +from binascii import hexlify, unhexlify + + +def shared(is_id=False): + """ + Annotation function to flag a function as a field/property + of an object which needs to be serialized and unserialized + for synchronization across processes. + + :param is_id: this is the unique id of the class (for syncing) + :param is_id: bool or func + """ + def make_prop(f, id_field): + def get_val(cls): + return getattr(cls, '_' + f.__name__, None) + def set_val(cls, x): + if hasattr(cls, '_dirty_serializables'): + getattr(cls, + '_dirty_serializables').add(f.__name__) + else: + setattr(cls, + '_dirty_serializables', + set([f.__name__,])) + setattr(cls, '_' + f.__name__, x) + prop = property(get_val, + set_val, + None, + '__is_id__' + str(id_field)) + return prop + if callable(is_id): + return make_prop(is_id, False) + return lambda f: make_prop(f, is_id) + + +class RemoteObject(object): + + """ + A generic object which can be serialized and deserialized + """ + + def __is_dirty__(self): + """ + Have any of the shared fields been modified + + :return: True iff any of the @shared fields have been modified + :rtype: bool + """ + if hasattr(self, '_dirty_serializables'): + return len(self._dirty_serializables) > 0 + return False + + @staticmethod + def __extract_class_name__(s): + """ + Extract the class name from a serialized form + + :param s: the serialized object + :type s: str + :return: the class name + :rtype: str + """ + modif = s[:s.find(':')] + ':""}' + struct = json.loads(modif) + return struct.keys()[0] + + @classmethod + def __serialize__(cls, instance, only_update=True): + """ + Serialize an instance of a class to string + + :param cls: the RemoteObject class type to serialize as + :type cls: type + :param instance: the RemoteObject instance to serialize + :type instance: RemoteObject + :param only_update: only update modified fields, or everything + :type only_update: bool + :return: the string serialization of the instance + :rtype: str + """ + out = {} + updatables = instance._dirty_serializables \ + if only_update\ + and hasattr(instance, '_dirty_serializables')\ + else cls.__dict__ + for f in cls.__dict__: + doc = getattr(cls, f).__doc__ + if doc and doc.startswith('__is_id__'): + if doc == '__is_id__True' or f in updatables: + value = getattr(instance, f) + if isinstance(value, basestring): + out[f] = hexlify(value) + elif isinstance(value, (tuple, list)): + out[f] = map(lambda x: hexlify(x) + if isinstance(x, basestring) + else x, value) + else: + out[f] = value + if hasattr(instance, '_dirty_serializables'): + instance._dirty_serializables.clear() + try: + return json.dumps({cls.__name__: out}) + except UnicodeDecodeError: + logging.error("Failed to serialize " + str(out)) + return "" + + @classmethod + def __unserialize__(cls, s, known={}): + """ + Deserialize a string to a RemoteObject or update one + + :param cls: the RemoteObject class type to deserialize + :type cls: type + :param s: the string serialization + :type s: str + :param known: the id->obj dict of known objects + :type known: dict + :return: the object id and the deserialized object + :rtype: (str, RemoteObject) + """ + struct = json.loads(s) + + assert isinstance(struct, dict) + assert len(struct.keys()) == 1 + assert struct.keys()[0] == cls.__name__ + + # Find the id field + # And check the integrity + fields = struct[cls.__name__] + id_field = None + for f in fields: + doc = getattr(cls, f).__doc__ + if doc and doc.startswith('__is_id__'): + annotation = getattr(cls, f).__doc__[len('__is_id__'):] + if annotation == "True": + if id_field: + logging.error("Multiple id fields declared") + id_field = f + else: + logging.error("Tried setting " + str(f) + + "which is not shared!") + + assert id_field + + id_val = unhexlify(fields[id_field])\ + if isinstance(fields[id_field], basestring)\ + else fields[id_field] + + # Retrieve the object by unique id + # Or create a new object + in_known = id_val in known + out = known[id_val] if in_known else cls.__new__(cls) + if not in_known: + setattr(out, id_field, id_val) + + # Copy the fields from the input + for f in fields: + if f != id_field: + val = None + if isinstance(fields[f], basestring): + val = unhexlify(fields[f]) + elif isinstance(fields[f], list): + val = map(lambda x: unhexlify(x) + if isinstance(x, basestring) + else x, fields[f]) + else: + val = fields[f] + setattr(out, f, val) + + return (id_val, out) diff --git a/Tribler/community/tunnel/remotes/sync_dict.py b/Tribler/community/tunnel/remotes/sync_dict.py new file mode 100644 index 00000000000..eb9ca3bdf4d --- /dev/null +++ b/Tribler/community/tunnel/remotes/sync_dict.py @@ -0,0 +1,105 @@ +import logging + +from twisted.internet.task import LoopingCall + +from Tribler.community.tunnel.remotes.remote_object import RemoteObject + + +class SyncDict(dict): + + """ + A dictionary which syncs its items with other SyncDicts + + This class exists ot synchronize RemoteObjects between a master and + a slave class. Once the SyncDict of the master changes, the slave + is updated at the next synchronization event (default = every 5 + seconds). Once a synchronization event occurs, the SyncDict will + produce a string with which to update the slave SyncDict. + + Optionally, one can also have localization on the slave through + an intialization callback. + """ + + def __init__(self, cls, callback=lambda x: None, + init_callback=None, sync_interval=5.0): + """ + Create a new sync dict for a certain class type + + :param cls: the type of RemoteObject to store + :type cls: type + :param callback: the callback to call with serialized data + :type callback: func + :param init_callback: the callback to call to localize objects + :type init_callback: func + :param sync_interval: how often to check for dirty objects + :type sync_interval: float + :returns: None + """ + super(SyncDict, self).__init__() + + assert issubclass(cls, RemoteObject) + + self.cls = cls + self.sync_interval = sync_interval + self.callback = callback + self.init_callback = init_callback + + def register_sync_task(self, task_manager): + """ + Register a LoopingCall with a TaskManager + + :param task_manager: the TaskManager to register with + :type task_manager: Tribler.dispersy.taskmanager.TaskManager + :returns: None + """ + if self.sync_interval > 0: + task_manager.register_task("syncdict_" + str(id(self)), + LoopingCall(self.synchronize) + ).start(self.sync_interval, + now=True) + + def is_same_type(self, cls_name): + """ + Check if a class name is equal to our stored class + + :param cls_name: the name to check for + :type cls_name: str + :return: whether our class is the same as cls_name + :rtype: bool + """ + return self.cls.__name__ == cls_name + + def synchronize(self, only_update=True): + """ + Callback to check if any objects need to be synchronized + + Calls self.callback with a string as an argument (serialized + data) for each object that needs to be updated. + + :param only_update: only sync changes to objects + :type only_update: bool + :returns: None + """ + for obj in self.values(): + if isinstance(obj, self.cls): + if obj.__is_dirty__(): + serialized = self.cls.__serialize__(obj, only_update) + self.callback(serialized) + else: + logging.error("Attempted to serialize " + + str(obj.__class__.__name__) + + " which is not a " + + self.cls.__name__) + + def on_synchronize(self, value): + """ + Synchronize with a frame from another SyncDict + + :param value: the update frame + :type value: str + :returns: None + """ + obj_id, obj = self.cls.__unserialize__(value, self) + if self.init_callback: + self.init_callback(obj_id, obj) + self[obj_id] = obj diff --git a/Tribler/community/tunnel/tunnel_community.py b/Tribler/community/tunnel/tunnel_community.py index 88faec9f6e8..5a6af0f22a2 100644 --- a/Tribler/community/tunnel/tunnel_community.py +++ b/Tribler/community/tunnel/tunnel_community.py @@ -25,7 +25,10 @@ ExtendedPayload, PingPayload, PongPayload, StatsRequestPayload, StatsResponsePayload, TunnelIntroductionRequestPayload, TunnelIntroductionResponsePayload) -from Tribler.community.tunnel.routing import Circuit, Hop, RelayRoute +from Tribler.community.tunnel.remotes.circuit import Circuit +from Tribler.community.tunnel.remotes.hop import Hop +from Tribler.community.tunnel.remotes.relayroute import RelayRoute +from Tribler.community.tunnel.remotes.sync_dict import SyncDict from Tribler.dispersy.authentication import MemberAuthentication, NoAuthentication from Tribler.dispersy.candidate import Candidate from Tribler.dispersy.community import Community @@ -215,7 +218,7 @@ def __init__(self, tribler_session=None): self.tunnel_logger = logging.getLogger('TunnelLogger') self.crypto = TunnelCrypto() - self.socks_listen_ports = range(1080, 1085) + self.socks_listen_ports = xrange(1080, 1085) self.min_circuits = 4 self.max_circuits = 8 @@ -278,7 +281,9 @@ def __init__(self, *args, **kwargs): self.tunnel_logger = logging.getLogger('TunnelLogger') self.data_prefix = "fffffffe".decode("HEX") - self.circuits = {} + self.process = None + self.circuits = SyncDict(Circuit, callback=self.on_sync_data) + self.hops = SyncDict(Hop, callback=self.on_sync_data) self.directions = {} self.relay_from_to = {} self.relay_session_keys = {} @@ -297,7 +302,7 @@ def __init__(self, *args, **kwargs): self.trsession = self.settings = self.socks_server = None - def initialize(self, tribler_session=None, settings=None): + def initialize(self, tribler_session=None, settings=None, is_subprocess=False): self.trsession = tribler_session self.settings = settings if settings else TunnelSettings(tribler_session=tribler_session) @@ -305,18 +310,23 @@ def initialize(self, tribler_session=None, settings=None): super(TunnelCommunity, self).initialize() + self.circuits.register_sync_task(self) + self.hops.register_sync_task(self) + assert isinstance(self.settings.crypto, TunnelCrypto), self.settings.crypto self.crypto.initialize(self) self.dispersy.endpoint.listen_to(self.data_prefix, self.on_data) - self.register_task("do_circuits", LoopingCall(self.do_circuits)).start(5, now=True) self.register_task("do_ping", LoopingCall(self.do_ping)).start(PING_INTERVAL) - self.socks_server = Socks5Server(self, tribler_session.get_tunnel_community_socks5_listen_ports() - if tribler_session else self.settings.socks_listen_ports) - self.socks_server.start() + if not is_subprocess: + self.register_task("do_circuits", LoopingCall(self.do_circuits)).start(5, now=True) + + self.socks_server = Socks5Server(self, tribler_session.get_tunnel_community_socks5_listen_ports() + if tribler_session else self.settings.socks_listen_ports) + self.socks_server.start() if self.trsession: self.notifier = self.trsession.notifier @@ -394,7 +404,8 @@ def initiate_conversions(self): @inlineCallbacks def unload_community(self): - yield self.socks_server.stop() + if self.socks_server: + yield self.socks_server.stop() # Remove all circuits/relays/exitsockets for circuit_id in self.circuits.keys(): @@ -437,7 +448,7 @@ def do_circuits(self): for circuit_length, num_circuits in self.circuits_needed.items(): num_to_build = num_circuits - len(self.data_circuits(circuit_length)) self.tunnel_logger.info("want %d data circuits of length %d", num_to_build, circuit_length) - for _ in range(num_to_build): + for _ in xrange(num_to_build): if not self.create_circuit(circuit_length): self.tunnel_logger.info("circuit creation of %d circuits failed, no need to continue" % num_to_build) @@ -580,9 +591,12 @@ def create_circuit(self, goal_hops, ctype=CIRCUIT_TYPE_DATA, callback=None, requ self.request_cache.add(CircuitRequestCache(self, circuit)) - circuit.unverified_hop = Hop(first_hop.get_member()._ec) - circuit.unverified_hop.address = first_hop.sock_addr - circuit.unverified_hop.dh_secret, circuit.unverified_hop.dh_first_part = self.crypto.generate_diffie_secret() + unverified_hop = Hop(first_hop.get_member()._ec) + unverified_hop.hop_id = str(circuit_id) + str(id(unverified_hop)) + circuit.unverified_hop = unverified_hop.hop_id + self.hops[unverified_hop.hop_id] = unverified_hop + unverified_hop.address = first_hop.sock_addr + unverified_hop.dh_secret, unverified_hop.dh_first_part = self.crypto.generate_diffie_secret() self.tunnel_logger.info("creating circuit %d of %d hops. First hop: %s:%d", circuit_id, circuit.goal_hops, first_hop.sock_addr[0], first_hop.sock_addr[1]) @@ -591,9 +605,9 @@ def create_circuit(self, goal_hops, ctype=CIRCUIT_TYPE_DATA, callback=None, requ self.increase_bytes_sent(circuit, self.send_cell([first_hop], u"create", (circuit_id, - circuit.unverified_hop.node_id, - circuit.unverified_hop.node_public_key, - circuit.unverified_hop.dh_first_part))) + unverified_hop.node_id, + unverified_hop.node_public_key, + unverified_hop.dh_first_part))) _barter_statistics.dict_inc_bartercast(BartercastStatisticTypes.TUNNELS_CREATED, "%s:%s" % (first_hop.sock_addr[0], first_hop.sock_addr[1])) @@ -625,22 +639,25 @@ def remove_circuit(self, circuit_id, additional_info='', destroy=False): self.notifier.notify(NTFY_TUNNEL, NTFY_REMOVE, circuit, self.copy_shallow_candidate(circuit, peer)) circuit.destroy() - affected_peers = self.socks_server.circuit_dead(circuit) - ltmgr = self.trsession.lm.ltmgr if self.trsession and self.trsession.get_libtorrent() else None - if ltmgr: - affected_torrents = {d: affected_peers.intersection(peer.ip for peer in d.handle.get_peer_info()) - for d, s in ltmgr.torrents.values() if s == ltmgr.get_session(d.get_hops())} - - for download, peers in affected_torrents.iteritems(): - if peers: - if download not in self.bittorrent_peers: - self.bittorrent_peers[download] = peers - else: - self.bittorrent_peers[download] = peers | self.bittorrent_peers[download] - - # If there are active circuits, add peers immediately. Otherwise postpone. - if self.active_data_circuits(): - self.readd_bittorrent_peers() + if self.socks_server: + affected_peers = self.socks_server.circuit_dead(circuit) + ltmgr = self.trsession.lm.ltmgr if self.trsession and self.trsession.get_libtorrent() else None + if ltmgr: + affected_torrents = {d: affected_peers.intersection(peer.ip for peer in d.handle.get_peer_info()) + for d, s in ltmgr.torrents.values() if s == ltmgr.get_session(d.get_hops())} + + for download, peers in affected_torrents.iteritems(): + if peers: + if download not in self.bittorrent_peers: + self.bittorrent_peers[download] = peers + else: + self.bittorrent_peers[download] = peers | self.bittorrent_peers[download] + + # If there are active circuits, add peers immediately. Otherwise postpone. + if self.active_data_circuits(): + self.readd_bittorrent_peers() + elif self.process: + self.process.circuit_dead(circuit.circuit_id) return True return False @@ -910,7 +927,8 @@ def check_destroy(self, messages): yield message def _ours_on_created_extended(self, circuit, message): - hop = circuit.unverified_hop + hop_id = circuit.unverified_hop + hop = self.hops[hop_id] try: shared_secret = self.crypto.verify_and_generate_shared_secret(hop.dh_secret, message.payload.key, @@ -921,11 +939,11 @@ def _ours_on_created_extended(self, circuit, message): self.remove_circuit(circuit.circuit_id, "error while verifying shared secret, bailing out.") return - circuit.add_hop(hop) + circuit.add_hop(hop_id) circuit.unverified_hop = None if circuit.state == CIRCUIT_STATE_EXTENDING: - ignore_candidates = [self.crypto.key_to_bin(hop.public_key) for hop in circuit.hops] + \ + ignore_candidates = [self.hops[ahop].node_public_key for ahop in circuit.hops] + \ [self.my_member.public_key] if circuit.required_endpoint: ignore_candidates.append(circuit.required_endpoint[2]) @@ -948,7 +966,7 @@ def _ours_on_created_extended(self, circuit, message): if ignore_candidate in candidate_list: candidate_list.remove(ignore_candidate) - for i in range(len(candidate_list) - 1, -1, -1): + for i in xrange(len(candidate_list) - 1, -1, -1): public_key = self.crypto.key_from_public_bin(candidate_list[i]) if not self.crypto.is_key_compatible(public_key): candidate_list.pop(i) @@ -959,8 +977,11 @@ def _ours_on_created_extended(self, circuit, message): if extend_hop_public_bin: extend_hop_public_key = self.dispersy.crypto.key_from_public_bin(extend_hop_public_bin) - circuit.unverified_hop = Hop(extend_hop_public_key) - circuit.unverified_hop.dh_secret, circuit.unverified_hop.dh_first_part = \ + unverified_hop = Hop(extend_hop_public_key) + unverified_hop.hop_id = str(circuit.circuit_id) + str(id(unverified_hop)) + self.hops[unverified_hop.hop_id] = unverified_hop + circuit.unverified_hop = unverified_hop.hop_id + unverified_hop.dh_secret, unverified_hop.dh_first_part = \ self.crypto.generate_diffie_secret() self.tunnel_logger.info("extending circuit %d with %s", circuit.circuit_id, @@ -969,10 +990,10 @@ def _ours_on_created_extended(self, circuit, message): self.increase_bytes_sent(circuit, self.send_cell([Candidate(circuit.first_hop, False)], u"extend", (circuit.circuit_id, - circuit.unverified_hop.node_id, - circuit.unverified_hop.node_public_key, + unverified_hop.node_id, + unverified_hop.node_public_key, extend_hop_addr, - circuit.unverified_hop.dh_first_part))) + unverified_hop.dh_first_part))) else: self.remove_circuit(circuit.circuit_id, "no candidates to extend, bailing out.") @@ -1208,7 +1229,10 @@ def on_data(self, sock_addr, packet): False, source=u"circuit_%d" % circuit_id) else: anon_seed = circuit.ctype == CIRCUIT_TYPE_RP - self.socks_server.on_incoming_from_tunnel(self, circuit, origin, data, anon_seed) + if self.socks_server: + self.socks_server.on_incoming_from_tunnel(self, circuit, origin, data, anon_seed) + elif self.process: + self.process.on_incoming_from_tunnel(circuit, origin, data, anon_seed) # It is not our circuit so we got it from a relay, we need to EXIT it! else: @@ -1307,7 +1331,8 @@ def crypto_out(self, circuit_id, content, is_data=False): direction = int(circuit.ctype == CIRCUIT_TYPE_RP) content = self.crypto.encrypt_str(content, *self.get_session_keys(circuit.hs_session_keys, direction)) - for hop in reversed(circuit.hops): + for hop_id in reversed(circuit.hops): + hop = self.hops[hop_id] content = self.crypto.encrypt_str(content, *self.get_session_keys(hop.session_keys, EXIT_NODE)) return content @@ -1323,7 +1348,8 @@ def crypto_in(self, circuit_id, content, is_data=False): if len(circuit.hops) > 0: # Remove all the encryption layers layer = 0 - for hop in self.circuits[circuit_id].hops: + for hop_id in self.circuits[circuit_id].hops: + hop = self.hops[hop_id] layer += 1 try: content = self.crypto.decrypt_str(content, @@ -1432,3 +1458,22 @@ def increase_bytes_received(self, obj, num_bytes): else: raise TypeError("Increase_bytes_received() was called with an object that is not a Circuit, " + "RelayRoute or TunnelExitSocket") + + def set_process(self, process): + """Set the TunnelSubprocess we belong to + + :param process: the TunnelSubprocess we belong to + """ + self.process = process + self.circuits.synchronize(False) + self.hops.synchronize(False) + + def on_sync_data(self, data): + """ + Callback for when a SyncDict wants to share data with + another process. + + :param data: the serialized data to send + """ + if self.process: + self.process.sync(data) From bbcca16d4eb0436d093beac92ce024fe99df1f42 Mon Sep 17 00:00:00 2001 From: qstokkink Date: Sun, 26 Feb 2017 11:49:37 +0100 Subject: [PATCH 2/6] Integrated pooled tunnel process classes --- .../Core/APIImplementation/LaunchManyCore.py | 18 +- Tribler/community/tunnel/Socks5/conversion.py | 2 +- Tribler/community/tunnel/Socks5/server.py | 3 +- .../tunnel/pooled_tunnel_community.py | 370 ++++++++++++++++++ .../community/tunnel/processes/__init__.py | 24 ++ .../tunnel/processes/childprocess.py | 253 ++++++++++++ .../community/tunnel/processes/iprocess.py | 89 +++++ .../community/tunnel/processes/line_util.py | 100 +++++ .../tunnel/processes/processmanager.py | 205 ++++++++++ .../community/tunnel/processes/rpc_defs.py | 25 ++ .../community/tunnel/processes/rpcprocess.py | 191 +++++++++ .../community/tunnel/processes/subprocess.py | 254 ++++++++++++ .../tunnel/processes/tunnel_childprocess.py | 260 ++++++++++++ .../tunnel/processes/tunnel_subprocess.py | 274 +++++++++++++ .../community/tunnel/subprocess_launcher.py | 70 ++++ 15 files changed, 2133 insertions(+), 5 deletions(-) create mode 100644 Tribler/community/tunnel/pooled_tunnel_community.py create mode 100644 Tribler/community/tunnel/processes/__init__.py create mode 100644 Tribler/community/tunnel/processes/childprocess.py create mode 100644 Tribler/community/tunnel/processes/iprocess.py create mode 100644 Tribler/community/tunnel/processes/line_util.py create mode 100644 Tribler/community/tunnel/processes/processmanager.py create mode 100644 Tribler/community/tunnel/processes/rpc_defs.py create mode 100644 Tribler/community/tunnel/processes/rpcprocess.py create mode 100644 Tribler/community/tunnel/processes/subprocess.py create mode 100644 Tribler/community/tunnel/processes/tunnel_childprocess.py create mode 100644 Tribler/community/tunnel/processes/tunnel_subprocess.py create mode 100644 Tribler/community/tunnel/subprocess_launcher.py diff --git a/Tribler/Core/APIImplementation/LaunchManyCore.py b/Tribler/Core/APIImplementation/LaunchManyCore.py index f2c88b202d8..0521b153249 100644 --- a/Tribler/Core/APIImplementation/LaunchManyCore.py +++ b/Tribler/Core/APIImplementation/LaunchManyCore.py @@ -223,6 +223,10 @@ def load_communities(self): tunnel_settings = TunnelSettings(tribler_session=self.session) tunnel_kwargs = {'tribler_session': self.session, 'settings': tunnel_settings} + import sys + if '--tunnel_subprocess' in sys.argv: + tunnel_kwargs['is_subprocess'] = True + if self.session.get_enable_multichain(): multichain_kwargs = {'tribler_session': self.session} @@ -237,9 +241,17 @@ def load_communities(self): load=True, kargs=multichain_kwargs) - from Tribler.community.tunnel.hidden_community_multichain import HiddenTunnelCommunityMultichain - self.tunnel_community = self.dispersy.define_auto_load( - HiddenTunnelCommunityMultichain, dispersy_member, load=True, kargs=tunnel_kwargs)[0] + if self.session.get_tunnel_community_pooled(): + # Load the pooled HiddenTunnelCommunityMultichain + from Tribler.community.tunnel.pooled_tunnel_community import PooledTunnelCommunity + self.tunnel_community = self.dispersy.define_auto_load( + PooledTunnelCommunity, dispersy_member, load=True, kargs=tunnel_kwargs)[0] + else: + # Load the normal HiddenTunnelCommunityMultichain + from Tribler.community.tunnel.hidden_community_multichain \ + import HiddenTunnelCommunityMultichain + self.tunnel_community = self.dispersy.define_auto_load( + HiddenTunnelCommunityMultichain, dispersy_member, load=True, kargs=tunnel_kwargs)[0] else: keypair = self.dispersy.crypto.generate_key(u"curve25519") dispersy_member = self.dispersy.get_member(private_key=self.dispersy.crypto.key_to_bin(keypair)) diff --git a/Tribler/community/tunnel/Socks5/conversion.py b/Tribler/community/tunnel/Socks5/conversion.py index 144b1d94f91..aa2386abebc 100644 --- a/Tribler/community/tunnel/Socks5/conversion.py +++ b/Tribler/community/tunnel/Socks5/conversion.py @@ -106,7 +106,7 @@ def decode_methods_request(offset, data): offset += 2 methods = set([]) - for i in range(number_of_methods): + for _ in xrange(number_of_methods): method, = struct.unpack_from("!B", data, offset) methods.add(method) offset += 1 diff --git a/Tribler/community/tunnel/Socks5/server.py b/Tribler/community/tunnel/Socks5/server.py index 428511ac8c0..3aab0544d20 100644 --- a/Tribler/community/tunnel/Socks5/server.py +++ b/Tribler/community/tunnel/Socks5/server.py @@ -255,7 +255,8 @@ def circuit_dead(self, broken_circuit): @return Set with destinations using this circuit """ affected_destinations = set( - destination for destination, tunnel_circuit in self.destinations.iteritems() if tunnel_circuit == broken_circuit) + destination for destination, tunnel_circuit in frozenset(self.destinations.iteritems()) + if tunnel_circuit == broken_circuit) counter = 0 for destination in affected_destinations: if destination in self.destinations: diff --git a/Tribler/community/tunnel/pooled_tunnel_community.py b/Tribler/community/tunnel/pooled_tunnel_community.py new file mode 100644 index 00000000000..14f6e94b698 --- /dev/null +++ b/Tribler/community/tunnel/pooled_tunnel_community.py @@ -0,0 +1,370 @@ +import logging +from collections import defaultdict + +from twisted.internet.defer import inlineCallbacks, returnValue +from twisted.internet.task import LoopingCall + +from Tribler.community.tunnel import CIRCUIT_TYPE_DATA, CIRCUIT_STATE_READY +from Tribler.community.tunnel.hidden_community import HiddenTunnelCommunity +from Tribler.community.tunnel.tunnel_community import RoundRobin, TunnelSettings +from Tribler.community.tunnel.Socks5.server import Socks5Server +from Tribler.community.tunnel.remotes.sync_dict import SyncDict +from Tribler.community.tunnel.remotes.circuit import Circuit +from Tribler.community.tunnel.remotes.hop import Hop +from Tribler.community.tunnel.processes.processmanager import ProcessManager +from Tribler.Core.simpledefs import NTFY_TUNNEL, NTFY_REMOVE +from Tribler.dispersy.candidate import Candidate +from Tribler.dispersy.community import Community +from Tribler.dispersy.conversion import DefaultConversion +from Tribler.dispersy.util import blocking_call_on_reactor_thread + + +class PooledTunnelCommunity(Community): + + """ + A TunnelCommunity using a pool of processes + + This delegates ot multiple child processes running their own + TunnelCommunity (and Session) instance. + + This is designed such that it can be freely interchanged for + a normal TunnelCommunity. + """ + + def __init__(self, *args, **kwargs): + """ + Create a new PooledTunnelCommunity + + :param args: additional arguments + :type args: list + :param kwargs: additional keyword-arguments + :type kwargs: dict + """ + super(PooledTunnelCommunity, self).__init__(*args, **kwargs) + self.pool = None + self.tunnel_logger = logging.getLogger('TunnelLogger') + self.circuits = SyncDict(Circuit, self, + init_callback=self.init_circuit, + sync_interval=0) + self.hops = SyncDict(Hop, self, sync_interval=0) + self.notifier = None + self.trsession = None + self.settings = None + self.socks_server = None + self.circuits_needed = defaultdict(int) + self.num_hops_by_downloads = defaultdict(int) # Keeps track of the number of hops required by downloads + self.download_states = {} + self.bittorrent_peers = {} + self.selection_strategy = RoundRobin(self) + + def initialize(self, tribler_session=None, settings=None): + """ + Initialize the PooledTunnelCommunity + + :param tribler_session: the Tribler Session to use + :type tribler_session: Tribler.Core.Session.Session + :param settings: the TunnelSetting to use + :type settings: Tribler.community.tunnel.tunnel_community.TunnelSettings + :return: + """ + self.trsession = tribler_session + self.settings = settings if settings\ + else TunnelSettings(tribler_session=tribler_session) + + super(PooledTunnelCommunity, self).initialize() + + self.socks_server = Socks5Server(self, + tribler_session.get_tunnel_community_socks5_listen_ports() + if tribler_session else self.settings.socks_listen_ports) + self.socks_server.start() + + if self.trsession: + self.notifier = self.trsession.notifier + + # Multiply the single TunnelSettings by the amount of + # workers we can handle. + self.pool = ProcessManager(tribler_session, self) + suggested_workers = self.pool.get_suggested_workers() + self.pool.set_worker_count(suggested_workers) + self.settings.max_circuits *= suggested_workers + + self.register_task("do_circuits", + LoopingCall(self.do_circuits)).start(5, now=True) + + @blocking_call_on_reactor_thread + @inlineCallbacks + def unload_community(self): + """ + Callback for when this community should be unloaded + + :returns: None + """ + yield self.pool.set_worker_count(0) + yield super(PooledTunnelCommunity, self).unload_community + yield self.socks_server.stop() + + @classmethod + def get_master_members(cls, dispersy): + """ + Return the normal HiddenTunnelCommunity master member + + :param dispersy: the dispersy instance to use + :type dispersy: Tribler.dispersy.dispersy.Dispersy + :return: the master member + :rtype: Tribler.dispersy.member.Member + """ + return HiddenTunnelCommunity.get_master_members(dispersy) + + def initiate_conversions(self): + """ + Create the conversions for this community + + Since it doesn't communicate, we need none. + + :return: the list of our conversions + :rtype: [Tribler.dispersy.conversion.Conversion] + """ + return [DefaultConversion(self)] + + @property + def dispersy_enable_candidate_walker(self): + """ + Disable sending introduction-requests + + :return: whether we should enable introduction-requests + :rtype: bool + """ + return False + + @property + def dispersy_enable_candidate_walker_responses(self): + """ + Disable sending introduction-responses + + :return: whether we should enable introduction-responses + :rtype: bool + """ + return False + + def init_circuit(self, circuit_id, circuit): + """ + Callback for when a synced circuit is localized + + :param circuit_id: the circuit's id + :type circuit_id: long + :param circuit: the circuit + :type circuit: Tribler.community.tunnel.remotes.circuit.Circuit + :returns: None + """ + setattr(circuit, 'proxy', self) + setattr(circuit, '_logger', logging.getLogger(circuit.__class__.__name__)) + + def ip_to_circuit_id(self, ip_str): + """ + Compatibility method, see HiddenTunnelCommunity + This is to provide the same interface + + TODO: Refactor to shared abstract class + """ + return HiddenTunnelCommunity.ip_to_circuit_id(ip_str) + + def tunnels_ready(self, hops): + """ + Compatibility method, see TunnelCommunity + This is to provide the same interface + + TODO: This is duplicate code, refactor to shared abstract class + """ + if hops > 0: + if self.settings.min_circuits: + return min(1, len(self.active_data_circuits(hops)) / float(self.settings.min_circuits)) + else: + return 1 if self.active_data_circuits(hops) else 0 + return 1 + + def build_tunnels(self, hops): + """ + Compatibility method, see TunnelCommunity + This is to provide the same interface + + TODO: This is duplicate code, refactor to shared abstract class + """ + if hops > 0: + self.num_hops_by_downloads[hops] += 1 + self.circuits_needed[hops] = max(1, self.settings.max_circuits, self.circuits_needed[hops]) + self.do_circuits() + + def on_download_removed(self, download): + """ + Compatibility method, see TunnelCommunity + This is to provide the same interface + + TODO: This is duplicate code, refactor to shared abstract class + """ + if download.get_hops() > 0: + self.num_hops_by_downloads[download.get_hops()] -= 1 + if self.num_hops_by_downloads[download.get_hops()] == 0: + self.circuits_needed[download.get_hops()] = 0 + + @inlineCallbacks + def do_circuits(self): + """ + Callback for when we need to try and reach our required + amount of circuits + + TODO: This is partially duplicate code, try to refactor part of it to a shared class? + """ + for circuit_length, num_circuits in self.circuits_needed.items(): + num_to_build = num_circuits - len(self.data_circuits(circuit_length)) + self.tunnel_logger.info("want %d data circuits of length %d", num_to_build, circuit_length) + success = 0 + for _ in xrange(num_to_build): + rval = yield self.create_circuit(circuit_length) + if rval: + success += 1 + if success < num_to_build: + self.tunnel_logger.info("circuit creation of %d circuits failed, no need to continue", + num_to_build) + + def data_circuits(self, hops=None): + """ + Compatibility method, see TunnelCommunity + This is to provide the same interface + + TODO: This is duplicate code, refactor to shared abstract class + """ + return {cid: c for cid, c in self.circuits.items() + if c.ctype == CIRCUIT_TYPE_DATA and (hops is None or hops == len(c.hops))} + + def active_data_circuits(self, hops=None): + """ + Compatibility method, see TunnelCommunity + This is to provide the same interface + + TODO: This is duplicate code, refactor to shared abstract class + """ + return {cid: c for cid, c in self.circuits.items() + if c.state == CIRCUIT_STATE_READY and c.ctype == CIRCUIT_TYPE_DATA and + (hops is None or hops == len(c.hops))} + + def readd_bittorrent_peers(self): + """ + Compatibility method, see TunnelCommunity + This is to provide the same interface + + TODO: This is duplicate code, refactor to shared abstract class + """ + for torrent, peers in self.bittorrent_peers.items(): + infohash = torrent.tdef.get_infohash() + for peer in peers: + self.tunnel_logger.info("Re-adding peer %s to torrent %s", peer, infohash.encode("hex")) + torrent.add_peer(peer) + if not self.trsession.has_download(infohash): + del self.bittorrent_peers[torrent] + + def remove_circuit(self, circuit_id): + """ + Compatibility method, see TunnelCommunity + This is to provide the same interface + + TODO: This is partially duplicate code, refactor to shared abstract class + """ + if circuit_id in self.circuits: + circuit = self.circuits.pop(circuit_id) + + if circuit.hops: + hop = self.hops[circuit.hops[0]] + candidate = Candidate((circuit.first_hop[0], circuit.first_hop[1]), False) + candidate.associate(self.dispersy.get_member(public_key=hop.node_public_key)) + self.notifier.notify(NTFY_TUNNEL, NTFY_REMOVE, circuit, candidate) + + affected_peers = self.socks_server.circuit_dead(circuit) + ltmgr = self.trsession.lm.ltmgr if self.trsession and self.trsession.get_libtorrent() else None + if ltmgr: + affected_torrents = {d: affected_peers.intersection(peer.ip for peer in d.handle.get_peer_info()) + for d, s in ltmgr.torrents.values() if s == ltmgr.get_session(d.get_hops())} + + for download, peers in affected_torrents.iteritems(): + infohash = download.tdef.get_infohash() + if peers and self.trsession.has_download(infohash): + if download not in self.bittorrent_peers: + self.bittorrent_peers[download] = peers + else: + self.bittorrent_peers[download] = peers | self.bittorrent_peers[download] + + # If there are active circuits, add peers immediately. Otherwise postpone. + if self.active_data_circuits(): + self.readd_bittorrent_peers() + + def monitor_downloads(self, dslist): + """ + Monitor a list of downloads + + :param dslist: the list of downloads to monitor + :type dslist: [Tribler.Core.DownloadState.DownloadState] + :returns: None + """ + infohashes = HiddenTunnelCommunity.downloads_to_infohash_tuples(dslist) + self.download_states = {HiddenTunnelCommunity.get_lookup_info_hash(infohash_t[0]): infohash_t[2] + for infohash_t in infohashes} + self.pool.monitor_infohashes(infohashes) + + def send_data(self, candidates, circuit_id, dest_address, source_address, data): + """ + Send data over a circuit + + :param candidates: the candidates to use + :type candidates: Tribler.dispersy.candidate.Candidate + :param circuit_id: the circuit id to send over + :type circuit_id: long + :param dest_address: the destination address to send to + :type dest_address: (str, int) + :param source_address: the source address to send from + :type source_address: (str, int) + :param data: the data to send + :type data: str + :return: the length of the data sent + :rtype: int + """ + if circuit_id not in self.circuits.keys(): + self.tunnel_logger.error("Tried to send data over a removed circuit") + if len(self.circuits.keys()) > 0: + self.pool.send_data(candidates, self.circuits.keys()[0], dest_address, source_address, data) + return len(data) + else: + return 0 + self.pool.send_data(candidates, circuit_id, dest_address, source_address, data) + return len(data) + + @inlineCallbacks + def create_circuit(self, goal_hops, ctype=CIRCUIT_TYPE_DATA, + callback=None, required_endpoint=None, + info_hash=None): + """ + Try to create a circuit + + :param goal_hops: the hop count in the circuit + :type goal_hops: int + :param ctype: type of circuit to create + :type ctype: str + :param callback: the callback for when it is created + :type callback: func + :param required_endpoint: the endpoint to use + :type required_endpoint: (str, int ,str) + :param info_hash: the infohash to assign to this circuit + :type info_hash: str + :return: the newly created circuit id or False + :rtype: long or False + """ + circuit_id = yield self.pool.create_circuit(goal_hops, ctype, required_endpoint, info_hash) + if circuit_id: + self.readd_bittorrent_peers() + returnValue(circuit_id) + + def increase_bytes_sent(self, obj, num_bytes): + """ + Compatibility method, see TunnelCommunity + + This is disabled because we don't own the circuits. + """ + pass diff --git a/Tribler/community/tunnel/processes/__init__.py b/Tribler/community/tunnel/processes/__init__.py new file mode 100644 index 00000000000..4c51cc15cd2 --- /dev/null +++ b/Tribler/community/tunnel/processes/__init__.py @@ -0,0 +1,24 @@ +""" +Child File Descriptors are only supported by Twisted for Linux. +If support is added for other platforms, add them to CHILDFDS_ENABLED. +Possible values are (see platform.system): + - 'Linux' + - 'Windows' + - 'Java' + - '' +""" + +from platform import system + +CHILDFDS_ENABLED = system() in ['Linux',] + +# Set the Windows IO streams to binary mode +try: + import msvcrt # Import error if not on Windows + import os, sys + msvcrt.setmode(sys.stdin.fileno(), os.O_BINARY) + msvcrt.setmode(sys.stdout.fileno(), os.O_BINARY) + msvcrt.setmode(sys.stderr.fileno(), os.O_BINARY) + os.environ['PYTHONUNBUFFERED'] = '1' +except ImportError: + pass diff --git a/Tribler/community/tunnel/processes/childprocess.py b/Tribler/community/tunnel/processes/childprocess.py new file mode 100644 index 00000000000..4810e9750a1 --- /dev/null +++ b/Tribler/community/tunnel/processes/childprocess.py @@ -0,0 +1,253 @@ +import logging +import sys +from os import environ, kill +from os.path import isfile, join + +from twisted.internet import reactor +from twisted.internet.defer import Deferred +from twisted.internet.protocol import ProcessProtocol + +from Tribler.community.tunnel.processes import CHILDFDS_ENABLED +from Tribler.community.tunnel.processes.iprocess import IProcess +from Tribler.community.tunnel.processes.line_util import pack_data, unpack_complex + +CUSTOM_FDS = {0: 0, # std in + 1: 1, # std out + 2: 2, # std err + 3: "w", # ctrl in + 4: "r", # ctrl out + 5: "w", # data in + 6: "r", # data out + 7: "w", # exit in + 8: "r"} # exit out + + +class ChildProcess(ProcessProtocol, IProcess): + + """ + Wrapper for a child process + + Used for creating child processes and communicating + with them. To be overwritten for advanced + functionality. + """ + + def __init__(self): + """ + Initialize a ChildProcess and spawn it + + This spawns a process in the only multiplatform + portable way. Using whatever executable and + whatever environment we are already using. + Only adding the --tunnel_subprocess arg. + + :returns: None + """ + self.pid = None + + super(ChildProcess, self).__init__() + + # Raw input buffers + self.databuffers = {1: "", 2: "", 4: "", 6: "", 8: ""} + # Input callbacks + self.input_callbacks = {1: self.on_generic, + 2: self.on_stderr, + 4: self.on_ctrl, + 6: self.on_data, + 8: self.on_exit} + + # Process is responsive + self.started = Deferred() + # One or more of the file descriptors closed unexpectedly + self.broken = False + + # sys.path may include more than the executable path + fixed_path = None + for d in sys.path: + if isfile(join(d, sys.argv[0])): + fixed_path = d + break + + # twistd can't deal with multiple instances + # supplying unused pid and logfiles to facilitate this + params = sys.argv + ["--tunnel_subprocess"] + if sys.argv[0].endswith("twistd"): + params = [params[0]] + ["--pidfile", ".pidfile", "--logfile", ".logfile"] + params[1:] + + # Spawn the actual process + self._spawn_process(sys.executable, params, fixed_path, CUSTOM_FDS if CHILDFDS_ENABLED else None) + + def _spawn_process(self, executable, params, path, fds): + """ + Spawn a process + + :param executable: the executable to spawn + :type executable: str + :param params: the command line parameters to use + :type params: [str] + :param path: the PATH to use for execution + :type path: str + :param fds: the file descriptors to use + :type fds: {int: str or int} or None + :returns: None + """ + if fds: + reactor.spawnProcess(self, + executable, + [executable] + + params, + env=environ, + path=path, + childFDs=fds) + else: + reactor.spawnProcess(self, + executable, + [executable] + + params, + env=environ, + path=path) + + def on_generic(self, msg): + """ + Callback for when a multiplexed message is sent over + a stream. These have their intended stream identifier + as the first character. + + :param msg: the received message + :type msg: str + :returns: None + """ + data = msg[1:] + stream = int(msg[0]) + self.input_callbacks[stream](data) + + def on_stderr(self, msg): + """ + Callback for when the child process writes to stderr + + :param msg: the message to write + :type msg: str + :returns: None + """ + def print_later(m): + print >> sys.stderr, "[CHILDPROCESS]", m + reactor.callFromThread(print_later, msg) + + def write_ctrl(self, msg): + """ + Write a control message to the process + + :param msg: the message to send + :type msg: str + :returns: None + """ + reactor.callFromThread(self.raw_write, 3, msg) + + def write_data(self, msg): + """ + Write raw data to the process + + :param msg: the message to send + :type msg: str + :returns: None + """ + reactor.callFromThread(self.raw_write, 5, msg) + + def write_exit(self, msg): + """ + Write an exit message to the process + + :param msg: the message to send + :type msg: str + :returns: None + """ + reactor.callFromThread(self.raw_write, 7, msg) + + def raw_write(self, fd, data): + """ + Write data to a child's file descriptor + + :param fd: the file descriptor to write to + :type fd: int + :param data: the data to write + :type data: str + :returns: None + """ + prefix = "" if CHILDFDS_ENABLED else str(fd) + self.transport.writeToChild(fd if CHILDFDS_ENABLED else 0, pack_data(prefix + data)) + + def connectionMade(self): + """ + Notify users that this process is ready to go + + :returns: None + """ + self.pid = self.transport.pid + # Allow some time for the process to capture its streams + reactor.callLater(1.0, self.started.callback, self) + + def childDataReceived(self, childFD, data): + """ + Fired when the process sends us something + + :param childFD: the file descriptor which was used + :type childFD: int + :param data: the data which was sent + :type data: str + :returns: None + """ + if childFD == 2: + self.input_callbacks[childFD](data[:-1]) + return + partitions = data.split('\n') + for partition in partitions[:-1]: + concat_data = self.databuffers.get(childFD, "") + partition + '\n' + cc_data, out = unpack_complex(concat_data) + self.databuffers[childFD] = cc_data + if out is not None: + self.input_callbacks[childFD](out) + self.databuffers[childFD] = "" + self.databuffers[childFD] += partitions[-1] + + def childConnectionLost(self, childFD): + """ + Fired when a childFD is closed + + This is probably the result of a process shutdown + + :param childFD: the file descriptor which closed + :type childFD: int + :returns: None + """ + self.broken = True + logging.info("[" + str(self.pid) + + "] Connection lost with child FD " + + str(childFD)) + # We are not allowed to close the std streams + if childFD > 2: + self.transport.closeChildFD(childFD) + + def processEnded(self, status): + """ + Fired when the process ends + + :param status: the exit status + :type status: twisted.python.failure.Failure + :returns: None + """ + if CHILDFDS_ENABLED: + # We are not allowed to close the std streams + for i in xrange(3, 9): + self.transport.closeChildFD(i) + + def terminate(self): + """ + Terminate this process forcefully + + :returns: None + """ + try: + kill(self.pid, 9) + except OSError: + logging.error("Tried to kill already-dead process %d", + self.pid) diff --git a/Tribler/community/tunnel/processes/iprocess.py b/Tribler/community/tunnel/processes/iprocess.py new file mode 100644 index 00000000000..9a2e1cfb64d --- /dev/null +++ b/Tribler/community/tunnel/processes/iprocess.py @@ -0,0 +1,89 @@ +from abc import ABCMeta, abstractmethod + + +class IProcess(object): + + """ + Generic process interface + + Processes communicate using three data streams (next to std), + namely: + * ctrl: for control messages + * data: for bulk data transfer + * exit: for exit messages/confirmation + + Note that this separation is made to accommodate the needs + of the different data streams, which should not interfere with + each other. These are: + * ctrl: high message diversity + * data: high volume + * exit: low latency + """ + + __metaclass__ = ABCMeta + + @abstractmethod + def on_ctrl(self, msg): + """ + Callback for when a control message is received + + :param msg: the received message + :type msg: str + :returns: None + """ + pass + + @abstractmethod + def on_data(self, msg): + """ + Callback for when raw data is received + + :param msg: the received message + :type msg: str + :returns: None + """ + pass + + @abstractmethod + def on_exit(self, msg): + """ + Callback for when an exit message is received + + :param msg: the received message + :type msg: str + :returns: None + """ + pass + + @abstractmethod + def write_ctrl(self, msg): + """ + Write a control message to the process + + :param msg: the message to send + :type msg: str + :returns: None + """ + pass + + @abstractmethod + def write_data(self, msg): + """ + Write raw data to the process + + :param msg: the data to send + :type msg: str + :returns: None + """ + pass + + @abstractmethod + def write_exit(self, msg): + """ + Write an exit message to the process + + :param msg: the message to send + :type msg: str + :returns: None + """ + pass diff --git a/Tribler/community/tunnel/processes/line_util.py b/Tribler/community/tunnel/processes/line_util.py new file mode 100644 index 00000000000..138be8827b9 --- /dev/null +++ b/Tribler/community/tunnel/processes/line_util.py @@ -0,0 +1,100 @@ +""" +Packaging utilities for speed and correctness. + +To gain parsing speed, messages are delimited by newline characters. +This allows for near-trivial message parsing. + +To allow for non-escaped messages, messages are prepended with their +data length. This allows the parser to skip newline delimiters which +occur before the expected message size has been met. +""" + +import struct + + +def pack_data(data): + """ + Generic data wrapper + + Data is wrapped between the data length (8 bytes) and + a newline. + + :param data: the data to pack + :type data: str + :return: the packed data + :rtype: str + """ + l = len(data) + 1 + return struct.pack('Q', l) + data + '\n' + + +def unpack_data(data): + """ + Generic data un-wrapper (see pack_data) + + Try to unpack data which was packed with pack_data. + This returns the length data should have before a + complete message has been formed and the message data + as far as it can be unpacked (incomplete until + len(data) is equal to the first return value). + + :param data: the data to try and unpack + :type data: str + :return: a tuple of required data length and current data + :rtype: (int, str) + """ + if len(data) < 8: + return (len(data) + 1, data) + l = struct.unpack('Q', data[:8])[0] + return (l + 8, data[8:len(data)-1]) + + +def unpack_complex(line): + """ + Ease-of-use decorator of unpack_data() + + See unpack_data(), returns the portion of the line + to prepend following data with and the portion which + forms a complete message, or None. + + :param line: the incoming data to parse + :type line: str + :return: a tuple of data to buffer and data to forward + :rtype: (str, str or None) + """ + target, data = unpack_data(line) + if target == len(line): + # keep nothing, share data + line = "" + return line, data + elif target < len(line): + # keep past target-8, share up to target-8 + return line[target:], data[:target-9] + return line, None + + +def fix_split(n, delimiter, args): + """ + Fix a string split into n partitions + + Raw data sent over a line may contain delimiters used internally. + Given that the amount of delimiters is known per message type, + the data can be reconstructed for a given delimiter. + + :param n: the actual split count + :type n: int + :param delimiter: the delimited used to split the arguments + :type delimiter: str + :param args: the possibly superfluous arguments + :type args: [str] + :return: the args with length n + :rtype: [str] + """ + out = [] + if len(args) > n: + for i in xrange(n): + out.append(args[i] if i < n-1 + else delimiter.join(args[i:])) + return out + else: + return args diff --git a/Tribler/community/tunnel/processes/processmanager.py b/Tribler/community/tunnel/processes/processmanager.py new file mode 100644 index 00000000000..37e16a139e4 --- /dev/null +++ b/Tribler/community/tunnel/processes/processmanager.py @@ -0,0 +1,205 @@ +import logging +import os +import shutil +from multiprocessing import cpu_count +from random import sample + +from twisted.internet.defer import DeferredList, inlineCallbacks, returnValue + +from Tribler.community.tunnel.processes.tunnel_childprocess import TunnelProcess + + +class ProcessManager(object): + + """ + The ProcessManager creates and manages ChildProcesses + """ + + def __init__(self, session, community=None): + """ + Create a new ProcessManager + + :param session: the session to extract info from + :type session: Tribler.Core.Session.Session + :param community: (non-Session) community to report to + :type community: Tribler.dispersy.community.Community + :returns: None + """ + super(ProcessManager, self).__init__() + + self.pool = {} # Map of pid -> Process + self.circuit_map = {} # Map of circuit -> pid + self.session = session + self.community = community + + self._clean_working_dir() + + def _clean_working_dir(self): + """ + Clean leftover directories from crashed/terminated processes + + :returns: None + """ + for directory in [x[0] for x in os.walk(self.session.get_state_dir())]: + if os.path.split(directory)[1].startswith("tunnel_subprocess"): + logging.error("Cleaning up leftover subprocess artifacts in " + directory) + try: + shutil.rmtree(directory) + except OSError: + logging.error("Failed to clean leftover subprocess directory " + directory) + + def get_suggested_workers(self): + """ + Have the process manager suggest a number of workers to use + + :return: the suggested amount of workers + :rtype: int + """ + return cpu_count() + + def set_worker_count(self, value): + """ + Set the amount of workers to use + + :param value: the new amount of workers + :returns: None + """ + count = self.get_worker_count() + if count < value: + # We have too little workers, create more + return [self._create_worker() for _ in xrange(value - count)] + elif count > value: + # We have too many workers, remove some + return self._remove_workers(max(count - value, count)) + + def get_worker_count(self): + """ + Return the active amount of workers + + :return: the current worker count + :rtype: int + """ + return len(self.pool.keys()) + + def _create_worker(self): + """ + Create a single worker and add it to the pool + + :return: the deferred for when the process has started + :rtype: twisted.internet.defer.Deferred + """ + key_pair = self.session.get_multichain_permid_keypair_filename() + is_exit_node = self.session.get_tunnel_community_exitnode_enabled() + + process = TunnelProcess(self.community if self.community + else self.session.lm.tunnel_community) + + def on_created(proc): + self.pool[proc.pid] = proc + proc.create(key_pair, is_exit_node) + process.started.addCallback(on_created) + + return process.started + + def _remove_workers(self, amount): + """ + Remove some amount of workers + + :param amount: the amount of workers to remove + :type amount: int + :returns: None + """ + to_remove = sample(self.pool.keys(), amount) + waiters = [] + for worker in to_remove: + waiters.append(self.pool[worker].end()) + self.pool.pop(worker) + self.circuit_map = { + k:v for k, v in self.circuit_map.iteritems() + if v in to_remove} + return DeferredList(waiters) + + def monitor_infohashes(self, infohashes): + """ + Call monitor_infohashes on all workers in the pool + + :param infohashes: the infohashes the workers need to monitor + :type infohashes: [(str, int, int)] + :returns: None + """ + for worker in self.pool.values(): + worker.monitor_infohashes(infohashes) + + def send_data(self, candidates, circuit_id, dest_address, + source_address, data): + """ + Call send_data() on the worker assigned to circuit_id + + :param candidates: the candidates to use + :type candidates: Tribler.dispersy.candidate.Candidate + :param circuit_id: the circuit id to send over + :type circuit_id: long + :param dest_address: the destination address to send to + :type dest_address: (str, int) + :param source_address: the source address to send from + :type source_address: (str, int) + :param data: the data to send + :type data: str + :returns: None + """ + if circuit_id in self.circuit_map: + worker_id = self.circuit_map[circuit_id] + if worker_id not in self.pool: + logging.warning("Could not find worker with pid " + str(worker_id) + + " (probably shutting down)") + return + worker = self.pool[worker_id] + worker.send_data([cd.sock_addr for cd in candidates], + circuit_id, + dest_address, + source_address, + data) + else: + logging.warning("Could not find worker registered for " + + str(circuit_id) + ", trying anyway") + for worker in self.pool.values(): + worker.send_data([cd.sock_addr for cd in candidates], + circuit_id, + dest_address, + source_address, + data) + + @inlineCallbacks + def create_circuit(self, goal_hops, ctype, required_endpoint, + info_hash): + """ + Try to create a circuit on any worker which will accept it + + :param goal_hops: the hop count in the circuit + :type goal_hops: int + :param ctype: type of circuit to create + :type ctype: str + :param required_endpoint: the endpoint to use + :type required_endpoint: (str, int ,str) + :param info_hash: the infohash to assign to this circuit + :type info_hash: str + :return: the newly created circuit id or False + :rtype: long or False + """ + circuit_id = False + for worker in sorted(self.pool, + key=lambda x: ( + self.circuit_map.values().count(x))): + if worker not in self.pool: + logging.warning("Could not find worker with pid " + str(worker) + + " (probably shutting down)") + continue + circuit_id = yield self.pool[worker].create_circuit(goal_hops, + ctype, + required_endpoint, + info_hash) + + if circuit_id: + self.circuit_map[circuit_id] = worker + break + returnValue(circuit_id) diff --git a/Tribler/community/tunnel/processes/rpc_defs.py b/Tribler/community/tunnel/processes/rpc_defs.py new file mode 100644 index 00000000000..1a576366856 --- /dev/null +++ b/Tribler/community/tunnel/processes/rpc_defs.py @@ -0,0 +1,25 @@ +""" +Definitions for RPC calls (see rpcprocess.py) + +Response codes: +* RPC_RESPONSE_OK: Report error-free execution +* RPC_RESPONSE_ERR: Report that an error occurred + +RPC calls: +* RPC_CREATE: Create/set-up the TunnelCommunity +* RPC_NOTIFY: Forward a notification +* RPC_SYNC: Synchronize a SyncDict +* RPC_MONITOR: Call for a monitor_downloads +* RPC_CIRCUIT: Try creating a circuit with create_circuit +* RPC_CIRDEAD: Forward that a circuit has died +""" + +RPC_RESPONSE_OK = chr(0) +RPC_RESPONSE_ERR = chr(1) + +RPC_CREATE = "CREATE" +RPC_NOTIFY = "NOTIFY" +RPC_SYNC = "SYNC" +RPC_MONITOR = "MONITOR" +RPC_CIRCUIT = "CIRCUIT" +RPC_CIRDEAD = "CIRDEAD" diff --git a/Tribler/community/tunnel/processes/rpcprocess.py b/Tribler/community/tunnel/processes/rpcprocess.py new file mode 100644 index 00000000000..b90eaf65d91 --- /dev/null +++ b/Tribler/community/tunnel/processes/rpcprocess.py @@ -0,0 +1,191 @@ +import json +import logging +import sys + +from twisted.internet.defer import AlreadyCalledError, Deferred, inlineCallbacks, returnValue + +from Tribler.community.tunnel.processes.iprocess import IProcess +from Tribler.dispersy.util import blocking_call_on_reactor_thread + + +class RPCProcess(IProcess): + + """ + Convenience class for sending RPC calls over the control stream. + + First register a callback on the receiving process: + .register_rpc("MyCallback", callbackFunc) + This only needs to happen once. Multiple definitions + of the same name will overwrite the callbackFunc. + + Then the initiator process sends an rpc: + .send_rpc("MyCallback", "optionalStringArg") + + This will invoke on the receiving process: + callbackFunc("optionalStringArg") + + The return value is then returned to the initiator. + + NOTE: callbackFunc is REQUIRED to share a (str) response. + """ + + def __init__(self): + """ + Intialize a process capable of RPCs. + + :returns: None + """ + super(RPCProcess, self).__init__() + + self.rpc_map = {} # RPCName -> callback + self.auto_serialize = {} # RPCName -> bool + self.wait_deferreds = {} # RPCID -> Deferred + self.unique_id = 0L # Unique RPCID counter + + def register_rpc(self, name, callback=None, auto_serialize=True): + """ + Register a callback function for RPCs with a certain name. + + :param name: the RPC name to register + :type name: str + :param callback: the callback for when the RPC is received + :type callback: func + :param auto_serialize: automatically serialize to json + :type auto_serialize: bool + :returns: None + """ + self.rpc_map[name] = callback + self.auto_serialize[name] = auto_serialize + + @blocking_call_on_reactor_thread + def claim_id(self): + """ + Get a new unique id + + Note, this method is not thread-safe. + + :return: a new unique message id + :rtype: str + """ + nid = self.unique_id + self.unique_id = (self.unique_id + 1) % sys.maxint + return str(nid) + + def _extract_name_msgid_arg(self, raw): + """ + Extract the RPC name, message id and argument from + a serialized rpc message. + + :param raw: the serialized message + :type raw: str + :return: the name, message id and argument + :rtype: (str, str or None, str or None) + """ + for known in self.rpc_map.keys(): + if raw.startswith(known): + if len(raw) > len(known): + msgid_arg = raw[len(known)+1:] + comma_index = msgid_arg.find(',') + if (comma_index == -1 and + comma_index < len(msgid_arg)): + return known, msgid_arg, None + else: + return (known, + msgid_arg[:comma_index], + msgid_arg[comma_index+1:]) + else: + return known, None, None + return raw, None, None + + def on_ctrl(self, msg): + """Handle incoming ctrl stream strings + + These messages are comma delimited, arguments are optional. + + This function will do one of two things: + 1. Respond to receive-only RPC calls + 2. Catch responses to send-only RPC calls (callback Deferred) + + :param msg: the serialized message + :type msg: str + :returns: None + """ + name, msg_id, arg = self._extract_name_msgid_arg(msg) + if arg and self.auto_serialize[name]: + try: + arg = json.loads(arg) + except ValueError: + logging.error("Malformed RPC argument for " + + name + ": '" + arg + "'") + return + if name not in self.rpc_map: + logging.error("Got illegal RPC: " + + name + ", source = " + msg) + return + + if self.rpc_map[name]: + # This is a new rpc call + # Share our response + response = ((self.rpc_map[name](*arg) + if self.auto_serialize[name] + else self.rpc_map[name](arg)) if arg + else self.rpc_map[name]()) + self.write_ctrl(name + "," + msg_id + "," + + (json.dumps(response) + if self.auto_serialize[name] + else response)) + elif msg_id in self.wait_deferreds: + # This is a response to an RPC + try: + self.wait_deferreds[msg_id].callback(arg) + except AlreadyCalledError: + logging.error("Got a response for RPC " + str(msg_id) + ", which was already answered") + del self.wait_deferreds[msg_id] + else: + logging.error("Got illegal RPC response: " + + name + ", source = " + msg + ", known = " + + ", ".join(self.wait_deferreds.keys())) + return + + def _send_rpc(self, name, arg=None): + """Send an RPC call to the other process + + This will return an object or string depending on whether + the RPC name is registered as auto_serialize or not. + + :param name: the RPC name + :type name: str + :param arg: the optional argument + :type arg: str + :return: the RPC response from the process + :rtype: object or str + """ + wait_id = self.claim_id() + waiter = Deferred() + self.wait_deferreds[wait_id] = waiter + + self.write_ctrl(name + "," + wait_id + + ("," + arg if arg else "")) + + return waiter + + @inlineCallbacks + def send_rpc(self, name, complex_obj=None): + """Send an RPC call to the other process + + Automatically serialize some complex obj (with json) + if applicable. + + :param name: the RPC name + :type name: str + :param complex_obj: the optional argument + :type complex_obj: (combination of) str, int, bool, list, + tuple, dict + :return: the RPC response from the process + :rtype: (combination of) str, int, bool, list, + tuple, dict; or str + """ + serialized = json.dumps(complex_obj)\ + if self.auto_serialize[name] else complex_obj + val = yield self._send_rpc(name, serialized) + returnValue(val) diff --git a/Tribler/community/tunnel/processes/subprocess.py b/Tribler/community/tunnel/processes/subprocess.py new file mode 100644 index 00000000000..3feb99fbab5 --- /dev/null +++ b/Tribler/community/tunnel/processes/subprocess.py @@ -0,0 +1,254 @@ +""" +Each subprocess has 6 additional file descriptors +(next to the stdin, stdout and stderr). These are: + + - ctrl_in: for receiving control messages + - ctrl_out: for responding to control messages + - data_in: for receiving bulk data + - data_out: for sending bulk data + - exit_in: for receiving exit signals + - exit_out: for responding to exit signals +""" + +import io +import logging +import os +import sys +import threading + +from twisted.internet import reactor +from twisted.internet.defer import Deferred, inlineCallbacks + +from Tribler.community.tunnel.processes import CHILDFDS_ENABLED +from Tribler.community.tunnel.processes.iprocess import IProcess +from Tribler.community.tunnel.processes.line_util import pack_data, unpack_complex + +FNO_CTRL_IN = 3 +FNO_CTRL_OUT = 4 +FNO_DATA_IN = 5 +FNO_DATA_OUT = 6 +FNO_EXIT_IN = 7 +FNO_EXIT_OUT = 8 + +FILE_CTRL_IN = io.open(FNO_CTRL_IN, "rb", 0) if CHILDFDS_ENABLED else sys.__stdin__ +FILE_CTRL_OUT = io.open(FNO_CTRL_OUT, "wb", 0) if CHILDFDS_ENABLED else sys.__stdout__ +FILE_DATA_IN = io.open(FNO_DATA_IN, "rb", 0) if CHILDFDS_ENABLED else sys.__stdin__ +FILE_DATA_OUT = io.open(FNO_DATA_OUT, "wb", 0) if CHILDFDS_ENABLED else sys.__stdout__ +FILE_EXIT_IN = io.open(FNO_EXIT_IN, "rb", 0) if CHILDFDS_ENABLED else sys.__stdin__ +FILE_EXIT_OUT = io.open(FNO_EXIT_OUT, "wb", 0) if CHILDFDS_ENABLED else sys.__stdout__ + +if not CHILDFDS_ENABLED: + # The default stderr is way too slow flushing + # its buffer. This causes congestion and slow down. + class AutoFlushErrWriter(object): + + def write(self, s): + sys.__stderr__.write(s) + sys.__stderr__.flush() + + def __getattr__(self, item): + return getattr(sys.__stderr__, item) + + sys.stderr = AutoFlushErrWriter() + sys.stdout = sys.stderr + +LOCK_GENERIC = None if CHILDFDS_ENABLED else threading.Lock() +LOCK_CTRL = threading.Lock() if CHILDFDS_ENABLED else LOCK_GENERIC +LOCK_DATA = threading.Lock() if CHILDFDS_ENABLED else LOCK_GENERIC +LOCK_EXIT = threading.Lock() if CHILDFDS_ENABLED else LOCK_GENERIC + + +class LineConsumer(threading.Thread): + + """ + Daemon thread to consume file data. + """ + + def __init__(self, file_obj, data_callback): + """ + Initialize a LineConsumer + + :param file_obj: The file object to read + :type file_obj: file + :param data_callback: The callback for when data is read + :type data_callback: func + :returns: None + """ + super(LineConsumer, self).__init__() + + self.file_obj = file_obj + self.data_callback = data_callback + self.daemon = True + self.start() + + def run(self): + """ + Keep consuming from the line until it is closed + + :returns: None + """ + line = "" + while not self.file_obj.closed: + try: + line += self.file_obj.readline() + except IOError: + break + if line.endswith('\n') and len(line) > 8: + line, data = unpack_complex(line) + if data is not None: + reactor.callInThread(self.data_callback, data) + + +class Subprocess(IProcess): + + """ + The main entry-point handle: a subprocess object. + Overwritten by the subprocess for more advanced + functionality. + """ + + def __init__(self): + """ + Initialize a new Subprocess + + :returns: None + """ + super(Subprocess, self).__init__() + + self.closed = Deferred() + + def start(self): + """ + Start consuming from the input file descriptors + + :returns: None + """ + if CHILDFDS_ENABLED: + LineConsumer(FILE_CTRL_IN, self.on_ctrl) + LineConsumer(FILE_DATA_IN, self.on_data) + LineConsumer(FILE_EXIT_IN, self.on_exit) + else: + LineConsumer(sys.__stdin__, self.on_generic) + + def on_generic(self, msg): + """ + Callback for when a multiplexed message is sent over + a stream. These have their intended stream identifier + as the first character. + + :param msg: the received message + :type msg: str + :returns: None + """ + data = msg[1:] + try: + stream = int(msg[0]) + if stream == FNO_CTRL_IN: + self.on_ctrl(data) + elif stream == FNO_DATA_IN: + self.on_data(data) + elif stream == FNO_EXIT_IN: + self.on_exit(data) + else: + logging.error("Got data for unknown file descriptor " + msg[0]) + except ValueError: + logging.error("Got data for unknown file descriptor " + msg[0]) + + def write_ctrl(self, msg): + """ + Write a control message to the parent process + + :param msg: the message to send + :type msg: str + :returns: None + """ + Subprocess.write(FILE_CTRL_OUT, FNO_CTRL_OUT, msg, LOCK_CTRL) + + def write_data(self, msg): + """ + Write raw data to the parent process + + :param msg: the message to send + :type msg: str + :returns: None + """ + Subprocess.write(FILE_DATA_OUT, FNO_DATA_OUT, msg, LOCK_DATA) + + def write_exit(self, msg): + """ + Write an exit message to the parent process + + :param msg: the message to send + :type msg: str + :returns: None + """ + Subprocess.write(FILE_EXIT_OUT, FNO_EXIT_OUT, msg, LOCK_EXIT) + + @staticmethod + def close_all_streams(): + """ + Close all registered file descriptors + + :returns: None + """ + # We use the fact that they are assigned + # to the range [3, 8]. + if CHILDFDS_ENABLED: + for fno in xrange(3, 9, 1): + Subprocess.close(fno) + + @staticmethod + def write(f, fno, data, lock): + """ + Write to the parent process + + :param f: the file to write to + :type f: file + :param data: the data to write + :type data: str + :param lock: the Lock to acquire + :type lock: threading.Lock + :returns: None + """ + prefix = "" if CHILDFDS_ENABLED else str(fno) + packed = pack_data(prefix + data) + lock.acquire(True) + try: + f.write(packed) + f.flush() + except IOError: + pass + finally: + lock.release() + + @staticmethod + def close(fno): + """ + Close a file descriptor + + :param fno: the file descriptor number + :type fno: int + :returns: None + """ + os.close(fno) + + def end(self): + """ + End the Subprocess + + Close all streams and call the closed callback + + :returns: None + """ + self.close_all_streams() + self.closed.callback(True) + reactor.callFromThread(reactor.stop) + + @inlineCallbacks + def block_until_end(self): + """ + Wait until the Subprocess is closed + + :returns: None + """ + yield self.closed diff --git a/Tribler/community/tunnel/processes/tunnel_childprocess.py b/Tribler/community/tunnel/processes/tunnel_childprocess.py new file mode 100644 index 00000000000..cd2db8ac921 --- /dev/null +++ b/Tribler/community/tunnel/processes/tunnel_childprocess.py @@ -0,0 +1,260 @@ +import logging +from binascii import hexlify + +from twisted.internet import reactor, task +from twisted.internet.defer import AlreadyCalledError, Deferred, inlineCallbacks, returnValue + +from Tribler.community.tunnel.processes.childprocess import ChildProcess +from Tribler.community.tunnel.processes.line_util import fix_split +from Tribler.community.tunnel.processes.rpc_defs import (RPC_RESPONSE_OK, + RPC_CREATE, + RPC_NOTIFY, + RPC_SYNC, + RPC_MONITOR, + RPC_CIRCUIT, + RPC_CIRDEAD) +from Tribler.community.tunnel.processes.rpcprocess import RPCProcess +from Tribler.community.tunnel.remotes.remote_object import RemoteObject + + +class TunnelProcess(RPCProcess, ChildProcess): + + """ + The TunnelProcess is the main process's view of a child + process running a TunnelCommunity. + """ + + def __init__(self, community=None): + """ + Initialize a new TunnelProcess. This is the main + process's view of a child process. + + :param community: the Community to report back to + :type community: Tribler.dispersy.community.Community + :returns: None + """ + super(TunnelProcess, self).__init__() + + self.community = community + + self.register_rpc(RPC_CREATE) + self.register_rpc(RPC_CIRCUIT) + self.register_rpc(RPC_MONITOR) + self.register_rpc(RPC_CIRDEAD, self.on_rpc_circuit_dead) + self.register_rpc(RPC_NOTIFY, self.on_rpc_notify) + self.register_rpc(RPC_SYNC, self.on_rpc_sync, False) + + self.exit_deferred = None + + def set_community(self, community): + """ + Switch the community to report to + + :param community: the new community to use + :type community: Tribler.dispersy.community.Community + """ + self.community = community + + def end(self): + """ + End the child process + + :return: the deferred signalling the exit + :rtype: twisted.internet.defer.Deferred + """ + self.exit_deferred = Deferred() + self.write_exit(RPC_RESPONSE_OK) + def checkExited(): + if not self.exit_deferred.called: + logging.error("Force killing " + str(self.pid)) + self.terminate() + self._signal_exit_deferred() + reactor.callLater(4.0, checkExited) + return self.exit_deferred + + @inlineCallbacks + def on_exit(self, msg): + """ + Callback for when the process signals correct termination + + :param msg: the exit flag + :type msg: str + :returns: None + """ + if not self.exit_deferred.called: + # The child has assured us it has exited correctly + # If it lied to us, the checkExited() callback will + # force terminate it anyway. + while not self.broken: + yield task.deferLater(reactor, .05, lambda: None) + self._signal_exit_deferred() + + def _signal_exit_deferred(self): + """ + Make sure the exit_deferred has been called + + :returns: None + """ + try: + self.exit_deferred.callback(True) + except AlreadyCalledError: + # This is fine, our job is done + pass + + @inlineCallbacks + def create(self, keypair, is_exit_node): + """ + Create the child process's community + + :param keypair: the multichain key-pair to use + :type keypair: str + :param is_exit_node: is this to be an exit node + :type is_exit_node: bool + :param test_mode: is this to run in test mode + :type test_mode: bool + :returns: None + """ + yield self.send_rpc(RPC_CREATE, (keypair, is_exit_node)) + + @inlineCallbacks + def monitor_infohashes(self, infohashes): + """ + Call monitor_infohashes on the child process's community + + :param infohashes: the infohash tuples to monitor + :type infohashes: [(str, int, int)] + :returns: None + """ + json_fixed = [(hexlify(infohash[0]), infohash[1], infohash[2]) + for infohash in infohashes] + yield self.send_rpc(RPC_MONITOR, (json_fixed, )) + + @inlineCallbacks + def create_circuit(self, goal_hops, ctype, required_endpoint, + info_hash): + """ + Call create_circuit on the child process's community + + :param goal_hops: the hop count in the circuit + :type goal_hops: int + :param ctype: type of circuit to create + :type ctype: str + :param required_endpoint: the endpoint to use + :type required_endpoint: (str, int ,str) + :param info_hash: the infohash to assign to this circuit + :type info_hash: str + :return: False or the circuit id + :rtype: bool or long + """ + enc_required_endpoint = None + if required_endpoint: + enc_required_endpoint = (required_endpoint[0], + required_endpoint[1], + required_endpoint[2].encode("HEX")) + val = yield self.send_rpc(RPC_CIRCUIT, (goal_hops, + ctype, + enc_required_endpoint, + hexlify(info_hash) if info_hash else None)) + returnValue(val) + + def send_data(self, cd_list, circuit_id, dest_address, + source_address, data): + """ + Send data over a circuit_id + + This uses custom serialization for speed. + + :param cd_list: the list of the candidates to send to + :type cd_list: [(str, int)] + :param circuit_id: the circuit_id to use + :type circuit_id: long + :param dest_address: the destination address + :type dest_address: (str, int) + :param source_address: our address + :type source_address: (str, int) + :param data: the raw data to send + :type data: str + :returns: None + """ + serialized_cd_list = ','.join([cd[0] + ':' + str(cd[1]) + for cd in cd_list]) + self.write_data(';'.join([serialized_cd_list, + str(circuit_id), + dest_address[0] + ':' + + str(dest_address[1]), + source_address[0] + ':' + + str(source_address[1]), + data])) + + def on_data(self, msg): + """ + Callback for incoming data + + :param msg: the serialized data + :type msgs: str + :returns: None + """ + s_circuit_id, s_origin_host,\ + s_origin_port, s_anon_seed,\ + data = fix_split(5, ';', msg.split(';')) + i_circuit_id = int(s_circuit_id) + if i_circuit_id not in self.community.circuits: + logging.error("Attempted to send data over unknown circuit id " + s_circuit_id) + return + circuit = self.community.circuits[i_circuit_id] + origin = (s_origin_host, int(s_origin_port)) + anon_seed = s_anon_seed == "1" + self.community.socks_server.on_incoming_from_tunnel(self.community, circuit, origin, data, anon_seed) + + def on_rpc_circuit_dead(self, circuit_id): + """ + Callback for when the child process signals a dead circuit + + :param circuit_id: the dead circuit's id + :type circuit_id: long + :return: RPC response code + :rtype: str + """ + self.community.remove_circuit(circuit_id) + return RPC_RESPONSE_OK + + def on_rpc_notify(self, subject, changeType, obj_id, *args): + """ + Callback for the child process's notifications + + :param subject: the subject + :type subject: str + :param changeType: the change type + :type changeType: str + :param obj_id: the object + :type obj_id: object + :param args: optional arguments + :type args: [object] + :return: RPC response code + :rtype: str + """ + if self.community and self.community.notifier: + self.community.notifier.notify(subject, changeType, + obj_id, *args) + return RPC_RESPONSE_OK + + def on_rpc_sync(self, serialized): + """ + RPC callback SyncDict synchronization frames + + :param serialized: the sync frame + :type serialized: str + :return: RPC response code + :rtype: str + """ + if self.community: + cls_name = RemoteObject.__extract_class_name__(serialized) + if self.community.circuits.is_same_type(cls_name): + self.community.circuits.on_synchronize(serialized) + elif self.community.hops.is_same_type(cls_name): + self.community.hops.on_synchronize(serialized) + else: + logging.error( + "Child process tried to synchronize unknown class " + + cls_name) + return RPC_RESPONSE_OK diff --git a/Tribler/community/tunnel/processes/tunnel_subprocess.py b/Tribler/community/tunnel/processes/tunnel_subprocess.py new file mode 100644 index 00000000000..2156031fc5d --- /dev/null +++ b/Tribler/community/tunnel/processes/tunnel_subprocess.py @@ -0,0 +1,274 @@ +import logging +import os +import sys +from binascii import unhexlify + +from twisted.internet import reactor +from twisted.internet.defer import inlineCallbacks +from twisted.internet.threads import blockingCallFromThread + +from Tribler.community.tunnel.processes.line_util import fix_split +from Tribler.community.tunnel.processes.rpc_defs import (RPC_RESPONSE_OK, + RPC_RESPONSE_ERR, + RPC_CREATE, + RPC_NOTIFY, + RPC_SYNC, + RPC_MONITOR, + RPC_CIRCUIT, + RPC_CIRDEAD) +from Tribler.community.tunnel.processes.rpcprocess import RPCProcess +from Tribler.community.tunnel.processes.subprocess import Subprocess +from Tribler.Core.Session import Session +from Tribler.Core.SessionConfig import SessionStartupConfig +from Tribler.Core.simpledefs import NTFY_STARTED, NTFY_TRIBLER +from Tribler.dispersy.candidate import Candidate + + +class TunnelSubprocess(RPCProcess, Subprocess): + + """ + The child process's view of the parent process + + In other words, this controls all of the constructs + required by the parent process. Most importantly it + manages a TunnelCommunity. + + TODO/Future work in this file: + - Forward notifications + - Forward bartercast statistics + """ + + def __init__(self): + """ + Initialize a new TunnelSubprocess + + :returns: None + """ + super(TunnelSubprocess, self).__init__() + + self.session_started = False + self.session = None + self.community = None + + self.register_rpc(RPC_CIRDEAD) + self.register_rpc(RPC_NOTIFY) + self.register_rpc(RPC_SYNC, auto_serialize=False) + self.register_rpc(RPC_CIRCUIT, self.on_rpc_circuit) + self.register_rpc(RPC_CREATE, self.on_rpc_create) + self.register_rpc(RPC_MONITOR, self.on_rpc_monitor_infohashes) + + @inlineCallbacks + def sync(self, data): + """ + Callback for when any SyncDict wants to sync data + + :param data: the data to synchronize + :type data: str + :returns: None + """ + yield self.send_rpc(RPC_SYNC, data) + + @inlineCallbacks + def circuit_dead(self, circuit_id): + """ + Callback for when a circuit is dead + + :param circuit_id: the dead circuit id + :type circuit_id: long + :returns: None + """ + yield self.send_rpc(RPC_CIRDEAD, (circuit_id,)) + + def on_session_started(self, subject, changetype, objectID, *args): + """ + Callback for when the local Session has started + + :returns: None + """ + self.community = self.session.lm.tunnel_community + self.community.set_process(self) + self.session_started = True + + @inlineCallbacks + def start_session(self, session): + """ + Attempt to start the local Session + + This can go wrong, in this case we attempt to soft-exit. + """ + session.add_observer(self.on_session_started, + NTFY_TRIBLER, + [NTFY_STARTED]) + try: + yield session.start() + except: + logging.error("Session reported error when starting up: " + + str(sys.exc_info()[0])) + try: + self.write_exit(RPC_RESPONSE_ERR) + logging.info( + "Soft-exit after session startup crash, succeeded") + except: + logging.error("Attempt to soft-exit failed: " + + str(sys.exc_info()[0])) + + def on_rpc_create(self, keypair_filename, is_exit_node): + """ + Initialize the local TunnelCommunity + + :param keypair_filename: the path of the multichain ec file + :type keypair_filename: str + :param is_exit_node: is exit node enabled + :type is_exit_node: bool + :return: RPC response code + :rtype: str + """ + # Set up a MiniSession + config = SessionStartupConfig() + working_dir = os.path.join(config.get_state_dir(), + "tunnel_subprocess" + str(os.getpid())) + if not os.path.exists(working_dir): + os.makedirs(working_dir) + + # Configure MiniSession + config.set_state_dir(working_dir) + config.set_torrent_checking(False) + config.set_http_api_enabled(False) + config.set_torrent_store(False) + config.set_enable_torrent_search(False) + config.set_enable_channel_search(False) + config.set_torrent_collecting(False) + config.set_dht_torrent_collecting(False) + config.set_enable_metadata(False) + config.set_upgrader_enabled(False) + config.set_preview_channel_community_enabled(False) + config.set_channel_community_enabled(False) + config.set_tunnel_community_pooled(False) + config.set_libtorrent(False) + config.set_enable_multichain(False) + config.sessconfig.set(u'general', u'minport', -1) + + config.set_tunnel_community_exitnode_enabled(is_exit_node) + if keypair_filename: + config.set_multichain_permid_keypair_filename( + keypair_filename) + + # Create the actual session + self.session = Session(config) + + # Join the community + reactor.callInThread(self.start_session, self.session) + + return RPC_RESPONSE_OK + + def on_rpc_monitor_infohashes(self, infohashes): + """ + Call monitor_infohashes + + :param infohashes: the infohash tuples to monitor + :type infohashes: [(str, int, int)] + :return: RPC response code + :rtype: str + """ + if not self.session_started: + logging.error("Attempted monitor_infohashes without Session") + return RPC_RESPONSE_ERR + self.community.monitor_infohashes([(unhexlify(infohash[0]), + infohash[1], + infohash[2]) + for infohash in infohashes]) + return RPC_RESPONSE_OK + + def on_rpc_circuit(self, goal_hops, ctype, required_endpoint, + info_hash): + """ + Call create_circuit + + :param goal_hops: the hop count in the circuit + :type goal_hops: int + :param ctype: type of circuit to create + :type ctype: str + :param required_endpoint: the endpoint to use + :type required_endpoint: (str, int ,str) + :param info_hash: the infohash to assign to this circuit + :type info_hash: str + :return: RPC response code + :rtype: str + """ + if not self.session_started: + logging.error("Attempted create_circuit without Session") + return False + dec_required_endpoint = None + if required_endpoint: + dec_required_endpoint = (required_endpoint[0], + required_endpoint[1], + required_endpoint[2].decode("HEX")) + return blockingCallFromThread(reactor, + self.community.create_circuit, + goal_hops, + ctype, + None, + dec_required_endpoint, + unhexlify(info_hash) if info_hash else None) + + def on_data(self, msg): + """ + Callback for when the main process wants us to send_data + + :param msg: the serialized data + :type msg: str + :returns: None + """ + s_cd_list, s_circuit_id,\ + s_d_addr, s_s_addr,\ + data = fix_split(5, ';', msg.split(';')) + candidates = [Candidate((s_cd[0], int(s_cd[1])), False) + for s_cd in [s_cd.split(':') + for s_cd in s_cd_list.split(',')]] + circuit_id = int(s_circuit_id) + dest_address = s_d_addr.split(':') + dest_address = (dest_address[0], int(dest_address[1])) + source_address = s_s_addr.split(':') + source_address = (source_address[0], int(source_address[1])) + if circuit_id in self.community.circuits: + self.community.send_data(candidates, circuit_id, + dest_address, source_address, + data) + + def on_incoming_from_tunnel(self, circuit, origin, data, + anon_seed): + """ + Callback for when data should be delivered + + :param circuit: the originating circuits + :type circuit: Tribler.community.tunnel.remotes.circuit.Circuit + :param origin: the originator's address + :type origin: (str, int) + :param data: the data to deliver + :type data: str + :param anon_seed: is an anonymous seed + :type anon_seed: bool + :returns: None + """ + self.write_data(";".join([str(circuit.circuit_id), + origin[0], + str(origin[1]), + "1" if anon_seed else "0", + data])) + + def on_exit(self, msg): + """ + Callback for when the main process wants us to exit + + :param msg: the exit flag + :type msg: str + :returns: None + """ + @inlineCallbacks + def session_shutdown(): + yield self.session.shutdown() + + if self.session: + session_shutdown() + self.write_exit(RPC_RESPONSE_OK) + self.end() diff --git a/Tribler/community/tunnel/subprocess_launcher.py b/Tribler/community/tunnel/subprocess_launcher.py new file mode 100644 index 00000000000..8908d43b445 --- /dev/null +++ b/Tribler/community/tunnel/subprocess_launcher.py @@ -0,0 +1,70 @@ +import sys + +from twisted.python import usage +from twisted.internet import reactor + + +class SubprocessLauncher(usage.Options): + + """ + This class parses options and tries to start a Tunnel Subprocess + """ + + optFlags = [ + ["tunnel_subprocess", None, "Internal: run this process as a tunnel subprocess"] + ] + + def parse_argv(self): + """ + Parse sys.argv for arguments + + :returns: None + """ + remaining = sys.argv[:] + while len(remaining): + try: + self.parseOptions(remaining) + break + except usage.UsageError: + remaining.pop(0) + except SystemExit: + break + + + def attempt_subprocess_start(self): + """ + Attempt to start a subprocess, if specified + + This checks if the subprocess flag is set in the arguments. + If it is, it launches a subprocess. Be sure not to start + anything else if this is successful. + + :return: whether a subprocess was launched + :rtype: bool + """ + if 'tunnel_subprocess' in self.keys() and self['tunnel_subprocess']: + if reactor.running: + self._start_with_reactor() + else: + self._start_without_reactor() + return True + return False + + def _start_without_reactor(self): + """ + The reactor does not exist yet, we will have to run it ourselves + + :returns: None + """ + self._start_with_reactor() + reactor.run() + + def _start_with_reactor(self): + """ + Someone else has provided us with a reactor, simply start + + :returns: None + """ + from Tribler.community.tunnel.processes.tunnel_subprocess import TunnelSubprocess + subprocess = TunnelSubprocess() + subprocess.start() From 609271e801520e01fde91880b6b125fb53229128 Mon Sep 17 00:00:00 2001 From: qstokkink Date: Sun, 26 Feb 2017 11:56:44 +0100 Subject: [PATCH 3/6] Hooked pooled tunnel into infrastructure --- Tribler/Core/SessionConfig.py | 14 ++++ Tribler/Core/defaults.py | 1 + TriblerGUI/defs.py | 1 + TriblerGUI/qt_resources/mainwindow.ui | 89 +++++++++++++++++++++++++ TriblerGUI/widgets/settingspage.py | 8 ++- run_tribler.py | 13 +++- twisted/plugins/tribler_plugin.py | 10 ++- twisted/plugins/tunnel_helper_plugin.py | 9 ++- 8 files changed, 139 insertions(+), 6 deletions(-) diff --git a/Tribler/Core/SessionConfig.py b/Tribler/Core/SessionConfig.py index d877d0526ff..3d3dd70055d 100644 --- a/Tribler/Core/SessionConfig.py +++ b/Tribler/Core/SessionConfig.py @@ -259,6 +259,20 @@ def get_tunnel_community_enabled(self): """ return self.sessconfig.get(u'tunnel_community', u'enabled') + def set_tunnel_community_pooled(self, value): + """ + Enable or disable subprocesses for the tunnel community. + :param value: A boolean indicating whether tunnel community pooling should be enabled + """ + self.sessconfig.set(u'tunnel_community', u'pooled', value) + + def get_tunnel_community_pooled(self): + """ + Returns whether tunnel community pooling is enabled. + :return: A boolean indicating whether tunnel community pooling is enabled + """ + return self.sessconfig.get(u'tunnel_community', u'pooled') + # # BarterCommunity settings # diff --git a/Tribler/Core/defaults.py b/Tribler/Core/defaults.py index c1b87a56f23..4d5fea50aae 100644 --- a/Tribler/Core/defaults.py +++ b/Tribler/Core/defaults.py @@ -89,6 +89,7 @@ sessdefaults['tunnel_community']['socks5_listen_ports'] = [-1] * 5 sessdefaults['tunnel_community']['exitnode_enabled'] = False sessdefaults['tunnel_community']['enabled'] = True +sessdefaults['tunnel_community']['pooled'] = False # Multichain community section sessdefaults['multichain'] = OrderedDict() diff --git a/TriblerGUI/defs.py b/TriblerGUI/defs.py index 8cad15872b7..d9cfe7766cb 100644 --- a/TriblerGUI/defs.py +++ b/TriblerGUI/defs.py @@ -38,6 +38,7 @@ PAGE_SETTINGS_BANDWIDTH = 2 PAGE_SETTINGS_SEEDING = 3 PAGE_SETTINGS_ANONYMITY = 4 +PAGE_SETTINGS_EXPERIMENTAL = 5 # Definition of the download statuses and the corresponding strings DLSTATUS_ALLOCATING_DISKSPACE = 0 diff --git a/TriblerGUI/qt_resources/mainwindow.ui b/TriblerGUI/qt_resources/mainwindow.ui index bbeb2baf722..f7e8c7ed422 100644 --- a/TriblerGUI/qt_resources/mainwindow.ui +++ b/TriblerGUI/qt_resources/mainwindow.ui @@ -5102,6 +5102,40 @@ margin: 10px; + + + + + 0 + 0 + + + + + 0 + 36 + + + + + 16777215 + 36 + + + + PointingHandCursor + + + + + + EXPERIMENTAL + + + true + + + @@ -6263,6 +6297,61 @@ color: white; + + + + + + + 0 + 0 + + + + All of the settings on this page are experimental features. Enabling these may cause you, your computer, your loved ones, or frankly, anyone associated with you, harm in ways not limited to the physical realm. Use with caution. + + + false + + + Qt::AlignLeading|Qt::AlignLeft|Qt::AlignTop + + + true + + + + + + + + 0 + 0 + + + + Uses multiple processes to lighten the blow on your multi-core processor when downloading anonymously + + + Enable Multi-Core Anymous Downloading + + + + + + + Qt::Vertical + + + + 20 + 40 + + + + + + diff --git a/TriblerGUI/widgets/settingspage.py b/TriblerGUI/widgets/settingspage.py index 90c5cc671c9..82ddd6332aa 100644 --- a/TriblerGUI/widgets/settingspage.py +++ b/TriblerGUI/widgets/settingspage.py @@ -2,7 +2,7 @@ from PyQt5.QtWidgets import QWidget from TriblerGUI.defs import PAGE_SETTINGS_GENERAL, PAGE_SETTINGS_CONNECTION, PAGE_SETTINGS_BANDWIDTH, \ - PAGE_SETTINGS_SEEDING, PAGE_SETTINGS_ANONYMITY, BUTTON_TYPE_NORMAL + PAGE_SETTINGS_SEEDING, PAGE_SETTINGS_ANONYMITY, PAGE_SETTINGS_EXPERIMENTAL, BUTTON_TYPE_NORMAL from TriblerGUI.dialogs.confirmationdialog import ConfirmationDialog from TriblerGUI.tribler_request_manager import TriblerRequestManager from TriblerGUI.utilities import seconds_to_string, string_to_minutes, get_gui_setting @@ -88,6 +88,9 @@ def initialize_with_settings(self, settings): self.window().number_hops_slider.setValue(int(settings['Tribler']['default_number_hops']) - 1) self.window().multichain_enabled_checkbox.setChecked(settings['multichain']['enabled']) + # Experimental settings + self.window().enable_pooled_tunnel_checkbox.setChecked(settings['tunnel_community']['pooled']) + def load_settings(self): self.settings_request_mgr = TriblerRequestManager() self.settings_request_mgr.perform_request("settings", self.initialize_with_settings) @@ -103,6 +106,8 @@ def clicked_tab_button(self, tab_button_name): self.window().settings_stacked_widget.setCurrentIndex(PAGE_SETTINGS_SEEDING) elif tab_button_name == "settings_anonymity_button": self.window().settings_stacked_widget.setCurrentIndex(PAGE_SETTINGS_ANONYMITY) + elif tab_button_name == "settings_experimental_button": + self.window().settings_stacked_widget.setCurrentIndex(PAGE_SETTINGS_EXPERIMENTAL) def save_settings(self): # Create a dictionary with all available settings @@ -164,6 +169,7 @@ def save_settings(self): settings_data['tunnel_community']['exitnode_enabled'] = self.window().allow_exit_node_checkbox.isChecked() settings_data['Tribler']['default_number_hops'] = self.window().number_hops_slider.value() + 1 settings_data['multichain']['enabled'] = self.window().multichain_enabled_checkbox.isChecked() + settings_data['tunnel_community']['pooled'] = self.window().enable_pooled_tunnel_checkbox.isChecked() self.window().settings_save_button.setEnabled(False) diff --git a/run_tribler.py b/run_tribler.py index 687e70e6fe8..0072f753a4a 100644 --- a/run_tribler.py +++ b/run_tribler.py @@ -1,8 +1,19 @@ +import logging.config +import multiprocessing import os import sys -import multiprocessing + +from Tribler.community.tunnel.subprocess_launcher import SubprocessLauncher + +if os.path.exists("logger.conf"): + logging.config.fileConfig("logger.conf") if __name__ == "__main__": + options = SubprocessLauncher() + options.parse_argv() + if options.attempt_subprocess_start(): + sys.exit(0) + multiprocessing.freeze_support() from TriblerGUI.tribler_app import TriblerApplication diff --git a/twisted/plugins/tribler_plugin.py b/twisted/plugins/tribler_plugin.py index 97d42a4c8df..94781fb565a 100644 --- a/twisted/plugins/tribler_plugin.py +++ b/twisted/plugins/tribler_plugin.py @@ -4,13 +4,13 @@ from datetime import date import os import signal +import sys import time from twisted.application.service import MultiService, IServiceMaker from twisted.conch import manhole_tap from twisted.internet import reactor from twisted.plugin import IPlugin -from twisted.python import usage from twisted.python.log import msg from zope.interface import implements @@ -21,10 +21,11 @@ # Register yappi profiler from Tribler.community.allchannel.community import AllChannelCommunity from Tribler.community.search.community import SearchCommunity +from Tribler.community.tunnel.subprocess_launcher import SubprocessLauncher from Tribler.dispersy.utils import twistd_yappi -class Options(usage.Options): +class Options(SubprocessLauncher): optParameters = [ ["manhole", "m", 0, "Enable manhole telnet service listening at the specified port", int], ["statedir", "s", None, "Use an alternate statedir", str], @@ -142,4 +143,9 @@ def makeService(self, options): return tribler_service + +options = Options() +options.parse_argv() +if options.attempt_subprocess_start(): + sys.exit(0) service_maker = TriblerServiceMaker() diff --git a/twisted/plugins/tunnel_helper_plugin.py b/twisted/plugins/tunnel_helper_plugin.py index d8b487f89c0..5381db8c420 100644 --- a/twisted/plugins/tunnel_helper_plugin.py +++ b/twisted/plugins/tunnel_helper_plugin.py @@ -7,6 +7,7 @@ import os import random import signal +import sys import threading import time from collections import defaultdict, deque @@ -40,6 +41,7 @@ from Tribler.Core.simpledefs import dlstatus_strings from Tribler.Core.DownloadConfig import DefaultDownloadStartupConfig from Tribler.community.tunnel.hidden_community import HiddenTunnelCommunity +from Tribler.community.tunnel.subprocess_launcher import SubprocessLauncher from Tribler.community.tunnel.tunnel_community import TunnelSettings from Tribler.dispersy.candidate import Candidate from Tribler.dispersy.tool.clean_observers import clean_twisted_observers @@ -89,7 +91,7 @@ def check_json_port(val): check_json_port.coerceDoc = "Json API port must be greater than 0." -class Options(usage.Options): +class Options(SubprocessLauncher): optFlags = [ ["exit", "x", "Allow being an exit-node"], ["multichain", "M", "Enable the multichain community"] @@ -504,5 +506,8 @@ def makeService(self, options): return tunnel_helper_service - +options = Options() +options.parse_argv() +if options.attempt_subprocess_start(): + sys.exit(0) service_maker = TunnelHelperServiceMaker() From a9a509c65f6e5ff899a8e811431aa79acacdb374 Mon Sep 17 00:00:00 2001 From: qstokkink Date: Sun, 26 Feb 2017 12:00:22 +0100 Subject: [PATCH 4/6] Added pooled tunnel unit tests --- .../Community/Tunnel/processes/__init__.py | 0 .../Tunnel/processes/test_childprocess.py | 221 ++++++++++++++++ .../Tunnel/processes/test_line_util.py | 114 +++++++++ .../Tunnel/processes/test_rpcprocess.py | 129 ++++++++++ .../Tunnel/processes/test_subprocess.py | 241 ++++++++++++++++++ .../Tunnel/processes/test_tunnel_process.py | 192 ++++++++++++++ .../Test/Community/Tunnel/remotes/__init__.py | 0 .../Tunnel/remotes/test_remote_object.py | 172 +++++++++++++ .../Tunnel/remotes/test_sync_dict.py | 96 +++++++ Tribler/Test/GUI/test_gui.py | 2 + 10 files changed, 1167 insertions(+) create mode 100644 Tribler/Test/Community/Tunnel/processes/__init__.py create mode 100644 Tribler/Test/Community/Tunnel/processes/test_childprocess.py create mode 100644 Tribler/Test/Community/Tunnel/processes/test_line_util.py create mode 100644 Tribler/Test/Community/Tunnel/processes/test_rpcprocess.py create mode 100644 Tribler/Test/Community/Tunnel/processes/test_subprocess.py create mode 100644 Tribler/Test/Community/Tunnel/processes/test_tunnel_process.py create mode 100644 Tribler/Test/Community/Tunnel/remotes/__init__.py create mode 100644 Tribler/Test/Community/Tunnel/remotes/test_remote_object.py create mode 100644 Tribler/Test/Community/Tunnel/remotes/test_sync_dict.py diff --git a/Tribler/Test/Community/Tunnel/processes/__init__.py b/Tribler/Test/Community/Tunnel/processes/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/Tribler/Test/Community/Tunnel/processes/test_childprocess.py b/Tribler/Test/Community/Tunnel/processes/test_childprocess.py new file mode 100644 index 00000000000..7eb8840775a --- /dev/null +++ b/Tribler/Test/Community/Tunnel/processes/test_childprocess.py @@ -0,0 +1,221 @@ +from twisted.internet.defer import Deferred, inlineCallbacks + +import Tribler.community.tunnel.processes + +from Tribler.community.tunnel.processes import line_util +from Tribler.community.tunnel.processes.childprocess import ChildProcess +from Tribler.dispersy.util import blocking_call_on_reactor_thread +from Tribler.Test.test_as_server import AbstractServer + + +class MockTransport(object): + + def __init__(self): + self.input = {} + self.deferred = Deferred() + + def writeToChild(self, fd, data): + if fd in self.input.keys(): + self.input[fd] = self.input[fd] + data + else: + self.input[fd] = data + + if len(self.input) == 1: + self.deferred.callback(None) + + def get_output_on(self, fd): + if fd in self.input.keys(): + return self.input[fd] + else: + return "" + + +class MockChildProcess(ChildProcess): + + def __init__(self): + self.transport = MockTransport() + self.input_callbacks = {1: self.on_generic, + 4: self.on_ctrl, + 6: self.on_data, + 8: self.on_exit} + self.called_ctrl = False + self.called_data = False + self.called_exit = False + + def on_ctrl(self, msg): + self.called_ctrl = True + + def on_exit(self, msg): + self.called_exit = True + + def on_data(self, msg): + self.called_data = True + + +class TestChildProcess(AbstractServer): + + @classmethod + def setUpClass(cls): + """ + Set up a message that contains all 256 possible characters + """ + cls.message = "".join(chr(i) for i in xrange(256)) + + @blocking_call_on_reactor_thread + @inlineCallbacks + def setUp(self, annotate=True): + """ + Write all of the Subprocess output to strings instead of file descriptors. + """ + yield super(TestChildProcess, self).setUp(annotate=annotate) + self.process = MockChildProcess() + + @blocking_call_on_reactor_thread + @inlineCallbacks + def test_data_out(self): + """ + Output data should be pack_data()'d. + """ + Tribler.community.tunnel.processes.CHILDFDS_ENABLED = True + reload(Tribler.community.tunnel.processes.childprocess) + self.process.write_data(TestChildProcess.message) + + yield self.process.transport.deferred + sent = self.process.transport.get_output_on(5) + + self.assertGreater(len(sent), 0) + + _, decoded = line_util.unpack_complex(sent) + + self.assertIsNotNone(decoded) + self.assertEquals(decoded, TestChildProcess.message) + + @blocking_call_on_reactor_thread + @inlineCallbacks + def test_ctrl_out(self): + """ + Output data should be pack_data()'d. + """ + Tribler.community.tunnel.processes.CHILDFDS_ENABLED = True + reload(Tribler.community.tunnel.processes.childprocess) + self.process.write_ctrl(TestChildProcess.message) + + yield self.process.transport.deferred + sent = self.process.transport.get_output_on(3) + + self.assertGreater(len(sent), 0) + + _, decoded = line_util.unpack_complex(sent) + + self.assertIsNotNone(decoded) + self.assertEquals(decoded, TestChildProcess.message) + + @blocking_call_on_reactor_thread + @inlineCallbacks + def test_exit_out(self): + """ + Output data should be pack_data()'d. + """ + Tribler.community.tunnel.processes.CHILDFDS_ENABLED = True + reload(Tribler.community.tunnel.processes.childprocess) + self.process.write_exit(TestChildProcess.message) + + yield self.process.transport.deferred + sent = self.process.transport.get_output_on(7) + + self.assertGreater(len(sent), 0) + + _, decoded = line_util.unpack_complex(sent) + + self.assertIsNotNone(decoded) + self.assertEquals(decoded, TestChildProcess.message) + + @blocking_call_on_reactor_thread + @inlineCallbacks + def test_generic_data_out(self): + """ + Output data should be pack_data()'d. + """ + Tribler.community.tunnel.processes.CHILDFDS_ENABLED = False + reload(Tribler.community.tunnel.processes.childprocess) + self.process.write_data(TestChildProcess.message) + + yield self.process.transport.deferred + sent = self.process.transport.get_output_on(0) + + self.assertGreater(len(sent), 0) + + _, decoded = line_util.unpack_complex(sent) + + self.assertIsNotNone(decoded) + self.assertEquals(decoded, str(5) + TestChildProcess.message) + + @blocking_call_on_reactor_thread + @inlineCallbacks + def test_generic_ctrl_out(self): + """ + Output data should be pack_data()'d. + """ + Tribler.community.tunnel.processes.CHILDFDS_ENABLED = False + reload(Tribler.community.tunnel.processes.childprocess) + self.process.write_ctrl(TestChildProcess.message) + + yield self.process.transport.deferred + sent = self.process.transport.get_output_on(0) + + self.assertGreater(len(sent), 0) + + _, decoded = line_util.unpack_complex(sent) + + self.assertIsNotNone(decoded) + self.assertEquals(decoded, str(3) + TestChildProcess.message) + + @blocking_call_on_reactor_thread + @inlineCallbacks + def test_generic_exit_out(self): + """ + Output data should be pack_data()'d. + """ + Tribler.community.tunnel.processes.CHILDFDS_ENABLED = False + reload(Tribler.community.tunnel.processes.childprocess) + self.process.write_exit(TestChildProcess.message) + + yield self.process.transport.deferred + sent = self.process.transport.get_output_on(0) + + self.assertGreater(len(sent), 0) + + _, decoded = line_util.unpack_complex(sent) + + self.assertIsNotNone(decoded) + self.assertEquals(decoded, str(7) + TestChildProcess.message) + + def test_on_generic_ctrl(self): + """ + A generic message should be forwarded to the correct stream. + """ + self.process.on_generic(str(4) + TestChildProcess.message) + + self.assertTrue(self.process.called_ctrl) + self.assertFalse(self.process.called_data) + self.assertFalse(self.process.called_exit) + + def test_on_generic_data(self): + """ + A generic message should be forwarded to the correct stream. + """ + self.process.on_generic(str(6) + TestChildProcess.message) + + self.assertFalse(self.process.called_ctrl) + self.assertTrue(self.process.called_data) + self.assertFalse(self.process.called_exit) + + def test_on_generic_exit(self): + """ + A generic message should be forwarded to the correct stream. + """ + self.process.on_generic(str(8) + TestChildProcess.message) + + self.assertFalse(self.process.called_ctrl) + self.assertFalse(self.process.called_data) + self.assertTrue(self.process.called_exit) diff --git a/Tribler/Test/Community/Tunnel/processes/test_line_util.py b/Tribler/Test/Community/Tunnel/processes/test_line_util.py new file mode 100644 index 00000000000..982fe232596 --- /dev/null +++ b/Tribler/Test/Community/Tunnel/processes/test_line_util.py @@ -0,0 +1,114 @@ +import struct +import unittest + +from Tribler.community.tunnel.processes.line_util import (fix_split, + pack_data, + unpack_data, + unpack_complex) + +BINARY_STRING_ALL_CHARS = "".join([chr(i) for i in range(256)]) + + +class TestLineUtil(unittest.TestCase): + + def test_fix_split_correct_single(self): + args = [BINARY_STRING_ALL_CHARS] + out = fix_split(1, "", args) + + self.assertEqual(out, args) + + def test_fix_split_correct_double(self): + args = ["test", BINARY_STRING_ALL_CHARS] + out = fix_split(2, "", args) + + self.assertEqual(out, args) + + def test_fix_split_correct_many(self): + args = [BINARY_STRING_ALL_CHARS] * 20 + out = fix_split(20, "", args) + + self.assertEqual(out, args) + + def test_fix_split_broken_single(self): + delim = chr(128) + args = ["test"] + BINARY_STRING_ALL_CHARS.split(delim) + out = fix_split(2, delim, args) + + self.assertEqual(out, ["test", BINARY_STRING_ALL_CHARS]) + + def test_fix_split_broken_double(self): + delim = chr(128) + args = (["test"] + + BINARY_STRING_ALL_CHARS.split(delim) + + BINARY_STRING_ALL_CHARS.split(delim)) + out = fix_split(2, delim, args) + + self.assertEqual(out, ["test", BINARY_STRING_ALL_CHARS + + delim + + BINARY_STRING_ALL_CHARS]) + + def test_pack_data_empty(self): + out = pack_data("") + + self.assertEqual(len(out), 9) + + l = struct.unpack("Q", out[:8])[0] + + self.assertEqual(l, 1) + self.assertEqual(out[-1], "\n") + + def test_pack_data_full(self): + out = pack_data(BINARY_STRING_ALL_CHARS) + + self.assertEqual(len(out), len(BINARY_STRING_ALL_CHARS) + 9) + + l = struct.unpack("Q", out[:8])[0] + + self.assertEqual(l, len(BINARY_STRING_ALL_CHARS) + 1) + self.assertEqual(out[8:-1], BINARY_STRING_ALL_CHARS) + self.assertEqual(out[-1], "\n") + + def test_unpack_data_incomplete(self): + data = "0000000" + l, out = unpack_data(data) + + self.assertGreater(l, len(data)) + self.assertEqual(out, data) + + def test_unpack_data_empty(self): + data = pack_data("") + l, out = unpack_data(data) + + self.assertEqual(out, "") + self.assertEqual(l, len(out) + 9) + self.assertEqual(l, len(data)) + + def test_unpack_data_full(self): + data = pack_data(BINARY_STRING_ALL_CHARS) + l, out = unpack_data(data) + + self.assertEqual(out, BINARY_STRING_ALL_CHARS) + self.assertEqual(l, len(out) + 9) + self.assertEqual(l, len(data)) + + def test_unpack_complex_incomplete(self): + data = pack_data(BINARY_STRING_ALL_CHARS)[:-2] + keep, share = unpack_complex(data) + + self.assertEqual(keep, data) + self.assertEqual(share, None) + + def test_unpack_complex_complete(self): + data = pack_data(BINARY_STRING_ALL_CHARS) + keep, share = unpack_complex(data) + + self.assertEqual(keep, "") + self.assertEqual(share, BINARY_STRING_ALL_CHARS) + + def test_unpack_complex_overflow(self): + remainder = "test" + data = pack_data(BINARY_STRING_ALL_CHARS) + keep, share = unpack_complex(data + remainder) + + self.assertEqual(keep, remainder) + self.assertEqual(share, BINARY_STRING_ALL_CHARS) diff --git a/Tribler/Test/Community/Tunnel/processes/test_rpcprocess.py b/Tribler/Test/Community/Tunnel/processes/test_rpcprocess.py new file mode 100644 index 00000000000..54a53bc43e1 --- /dev/null +++ b/Tribler/Test/Community/Tunnel/processes/test_rpcprocess.py @@ -0,0 +1,129 @@ +import unittest + +from Tribler.community.tunnel.processes.rpcprocess import RPCProcess + + +class MockProcess(RPCProcess): + + def __init__(self): + super(MockProcess, self).__init__() + self.ctrl_written = "" + + def on_exit(self, s): + pass + + def write_exit(self, s): + pass + + def on_data(self, s): + pass + + def write_data(self, s): + pass + + def write_ctrl(self, s): + self.ctrl_written += s + + def clear_callbacks(self): + """ + Deal with the aftermath of calling send_rpc without a response + """ + for d in frozenset(self.wait_deferreds): + self.wait_deferreds[d].callback("") + + +class TestRPCProcess(unittest.TestCase): + + def setUp(self): + self.called = False + + def test_send_correct(self): + mock_sender = MockProcess() + mock_sender.register_rpc("test") + mock_sender.send_rpc("test", "value") + + mock_sender.clear_callbacks() + + self.assertGreater(len(mock_sender.ctrl_written), 0) + + def test_send_correct_receive_no_arg(self): + rpc_name = "test" + def callback(): + self.called = True + + mock_receiver = MockProcess() + mock_receiver.register_rpc(rpc_name, callback) + + mock_sender = MockProcess() + mock_sender.register_rpc(rpc_name) + mock_sender.send_rpc(rpc_name) + + mock_sender.clear_callbacks() + + mock_receiver.on_ctrl(mock_sender.ctrl_written) + + self.assertTrue(self.called) + + def test_send_correct_receive_with_arg(self): + rpc_name = "test" + def callback(arg1, arg2): + self.called = True + self.assertEqual(arg1, "value1") + self.assertEqual(arg2, "value2") + + mock_receiver = MockProcess() + mock_receiver.register_rpc(rpc_name, callback) + + mock_sender = MockProcess() + mock_sender.register_rpc(rpc_name) + mock_sender.send_rpc(rpc_name, ("value1", "value2")) + + mock_sender.clear_callbacks() + + mock_receiver.on_ctrl(mock_sender.ctrl_written) + + self.assertTrue(self.called) + + def test_send_correct_respond(self): + rpc_name = "test" + + def callback(): + return "value" + + mock_receiver = MockProcess() + mock_receiver.register_rpc(rpc_name, callback) + + mock_sender = MockProcess() + mock_sender.register_rpc(rpc_name) + deferred = mock_sender.send_rpc(rpc_name) + + mock_receiver.on_ctrl(mock_sender.ctrl_written) + mock_sender.on_ctrl(mock_receiver.ctrl_written) + + self.assertEqual(deferred.result, "value") + + def test_send_async_order(self): + rpc_name = "test" + + def callback(): + return "value" + + mock_receiver = MockProcess() + mock_receiver.register_rpc(rpc_name, callback) + + mock_sender = MockProcess() + mock_sender.register_rpc(rpc_name) + deferred1 = mock_sender.send_rpc(rpc_name) + send1 = mock_sender.ctrl_written + mock_sender.ctrl_written = "" + deferred2 = mock_sender.send_rpc(rpc_name) + + mock_receiver.on_ctrl(mock_sender.ctrl_written) + receive1 = mock_receiver.ctrl_written + mock_receiver.ctrl_written = "" + mock_receiver.on_ctrl(send1) + mock_sender.on_ctrl(mock_receiver.ctrl_written) + mock_sender.on_ctrl(receive1) + + self.assertEqual(deferred1.result, "value") + self.assertEqual(deferred2.result, "value") diff --git a/Tribler/Test/Community/Tunnel/processes/test_subprocess.py b/Tribler/Test/Community/Tunnel/processes/test_subprocess.py new file mode 100644 index 00000000000..6b1ad45f2a8 --- /dev/null +++ b/Tribler/Test/Community/Tunnel/processes/test_subprocess.py @@ -0,0 +1,241 @@ +import StringIO +import unittest + +from twisted.internet.defer import Deferred, inlineCallbacks + +import Tribler.community.tunnel.processes + +from Tribler.community.tunnel.processes import line_util +from Tribler.dispersy.util import blocking_call_on_reactor_thread +from Tribler.Test.test_as_server import AbstractServer + +# Import module in childfd fallback mode +Tribler.community.tunnel.processes.CHILDFDS_ENABLED = False +from Tribler.community.tunnel.processes.subprocess import (LineConsumer, Subprocess, + FNO_CTRL_OUT, FNO_DATA_OUT, FNO_EXIT_OUT, + FNO_CTRL_IN, FNO_DATA_IN, FNO_EXIT_IN) + + +class MockSubprocess(Subprocess): + + def __init__(self): + super(MockSubprocess, self).__init__() + self.called_ctrl = False + self.called_data = False + self.called_exit = False + + def on_ctrl(self, msg): + self.called_ctrl = True + + def on_exit(self, msg): + self.called_exit = True + + def on_data(self, msg): + self.called_data = True + + +class MockCallbackHandler(object): + + def __init__(self): + self.input = [] + self.deferred = Deferred() + self.deferred2 = Deferred() + + def cb_message(self, msg): + self.input.append(msg) + if len(self.input) == 1: + self.deferred.callback(None) + elif len(self.input) == 2: + self.deferred2.callback(None) + + +class MockFile(object): + """ + We can't use StringIO here as readline() does not correspond + with a real file's readline. + """ + + def __init__(self): + self.closed = False + self.buffer = [] + + def fake_close(self): + self.closed = True + + def readline(self): + if self.buffer: + return self.buffer.pop(0) + else: + return "" + + def write(self, s): + self.buffer.append(s) + + +class TestSubprocess(unittest.TestCase): + + @classmethod + def setUpClass(cls): + """ + Set up a message that contains all 256 possible characters + """ + cls.message = "".join(chr(i) for i in xrange(256)) + + def setUp(self): + """ + Write all of the Subprocess output to strings instead of file descriptors. + """ + self.ctrl_out = StringIO.StringIO() + self.data_out = StringIO.StringIO() + self.exit_out = StringIO.StringIO() + + Tribler.community.tunnel.processes.subprocess.FILE_CTRL_OUT = self.ctrl_out + Tribler.community.tunnel.processes.subprocess.FILE_DATA_OUT = self.data_out + Tribler.community.tunnel.processes.subprocess.FILE_EXIT_OUT = self.exit_out + + self.process = MockSubprocess() + + def tearDown(self): + self.process.close_all_streams() + + def test_data_out(self): + """ + Output data should be pack_data()'d. + In generic mode the unpacked data should be prefixed with the stream id. + """ + self.process.write_data(TestSubprocess.message) + + sent = self.data_out.getvalue() + + self.assertGreater(len(sent), 0) + + _, decoded = line_util.unpack_complex(sent) + + self.assertIsNotNone(decoded) + self.assertEquals(decoded, str(FNO_DATA_OUT) + TestSubprocess.message) + + def test_ctrl_out(self): + """ + Output data should be pack_data()'d. + In generic mode the unpacked data should be prefixed with the stream id. + """ + self.process.write_ctrl(TestSubprocess.message) + + sent = self.ctrl_out.getvalue() + + self.assertGreater(len(sent), 0) + + _, decoded = line_util.unpack_complex(sent) + + self.assertIsNotNone(decoded) + self.assertEquals(decoded, str(FNO_CTRL_OUT) + TestSubprocess.message) + + def test_exit_out(self): + """ + Output data should be pack_data()'d. + In generic mode the unpacked data should be prefixed with the stream id. + """ + self.process.write_exit(TestSubprocess.message) + + sent = self.exit_out.getvalue() + + self.assertGreater(len(sent), 0) + + _, decoded = line_util.unpack_complex(sent) + + self.assertIsNotNone(decoded) + self.assertEquals(decoded, str(FNO_EXIT_OUT) + TestSubprocess.message) + + def test_on_generic_ctrl(self): + """ + A generic message should be forwarded to the correct stream. + """ + self.process.on_generic(str(FNO_CTRL_IN) + TestSubprocess.message) + + self.assertTrue(self.process.called_ctrl) + self.assertFalse(self.process.called_data) + self.assertFalse(self.process.called_exit) + + def test_on_generic_data(self): + """ + A generic message should be forwarded to the correct stream. + """ + self.process.on_generic(str(FNO_DATA_IN) + TestSubprocess.message) + + self.assertFalse(self.process.called_ctrl) + self.assertTrue(self.process.called_data) + self.assertFalse(self.process.called_exit) + + def test_on_generic_exit(self): + """ + A generic message should be forwarded to the correct stream. + """ + self.process.on_generic(str(FNO_EXIT_IN) + TestSubprocess.message) + + self.assertFalse(self.process.called_ctrl) + self.assertFalse(self.process.called_data) + self.assertTrue(self.process.called_exit) + + +class TestLineConsumer(AbstractServer): + + @blocking_call_on_reactor_thread + @inlineCallbacks + def setUp(self, annotate=True): + yield super(TestLineConsumer, self).setUp(annotate=annotate) + self.stream = MockFile() + self.handler = MockCallbackHandler() + self.consumer = None + + @blocking_call_on_reactor_thread + @inlineCallbacks + def tearDown(self, annotate=True): + self.stream.fake_close() + if self.consumer: + self.consumer.join(1.0) + yield super(TestLineConsumer, self).tearDown(annotate) + + @blocking_call_on_reactor_thread + @inlineCallbacks + def test_single(self): + """ + The LineConsumer should auto-unpack_complex() a line of incoming data. + """ + data = line_util.pack_data("test") + self.stream.write(data) + + self.consumer = LineConsumer(self.stream, self.handler.cb_message) + yield self.handler.deferred + + self.assertListEqual(self.handler.input, ["test"]) + + @blocking_call_on_reactor_thread + @inlineCallbacks + def test_truncated(self): + """ + The LineConsumer should not forward half-lines. + """ + data = line_util.pack_data("test") + self.stream.write(data) + self.stream.write(data[:-1]) + + self.consumer = LineConsumer(self.stream, self.handler.cb_message) + yield self.handler.deferred + + self.assertListEqual(self.handler.input, ["test"]) + + @blocking_call_on_reactor_thread + @inlineCallbacks + def test_double(self): + """ + Two concatenated messages should be decoded as two separate messages. + """ + data = line_util.pack_data("test") + self.stream.write(data) + self.stream.write(data) + + self.consumer = LineConsumer(self.stream, self.handler.cb_message) + yield self.handler.deferred + yield self.handler.deferred2 + + self.assertListEqual(self.handler.input, ["test", "test"]) diff --git a/Tribler/Test/Community/Tunnel/processes/test_tunnel_process.py b/Tribler/Test/Community/Tunnel/processes/test_tunnel_process.py new file mode 100644 index 00000000000..637052058e8 --- /dev/null +++ b/Tribler/Test/Community/Tunnel/processes/test_tunnel_process.py @@ -0,0 +1,192 @@ +from twisted.internet import reactor +from twisted.internet.defer import Deferred, inlineCallbacks + +# Isolate SubProcess +import Tribler.community.tunnel.processes +Tribler.community.tunnel.processes.CHILDFDS_ENABLED = False + +# Isolate ChildProcess +from Tribler.community.tunnel.processes.childprocess import ChildProcess + + +def cp_init_overwrite(x): + super(ChildProcess, x).__init__() + x.input_callbacks = {1: x.on_generic, + 4: x.on_ctrl, + 6: x.on_data, + 8: x.on_exit} +ChildProcess.__init__ = cp_init_overwrite + +from Tribler.community.tunnel.processes.rpcprocess import RPCProcess +from Tribler.community.tunnel.processes.tunnel_childprocess import TunnelProcess +from Tribler.community.tunnel.processes.tunnel_subprocess import TunnelSubprocess +from Tribler.community.tunnel.processes.rpc_defs import (RPC_CREATE, + RPC_NOTIFY, + RPC_SYNC, + RPC_MONITOR, + RPC_CIRCUIT, + RPC_CIRDEAD) +from Tribler.dispersy.candidate import Candidate +from Tribler.dispersy.util import blocking_call_on_reactor_thread +from Tribler.Test.test_as_server import AbstractServer + + +class MockInternet(RPCProcess): + + def __init__(self): + RPCProcess.__init__(self) + self.linked_internet = None + + def set_internet_link(self, other): + self.linked_internet = other + + def write_ctrl(self, msg): + reactor.callInThread(self.linked_internet.on_ctrl, msg) + + def write_data(self, msg): + reactor.callInThread(self.linked_internet.on_data, msg) + + def write_exit(self, msg): + reactor.callInThread(self.linked_internet.on_exit, msg) + + +class MockCommunity(object): + + def __init__(self): + self.monitor_input = [] + self.create_input = () + self.data_input = () + self.updated = Deferred() + + self.circuits = {42: None} + + def monitor_infohashes(self, infohashes): + self.monitor_input.extend(infohashes) + self.updated.callback(None) + + def create_circuit(self, goal_hops, type, callback, required_endpoint, info_hash): + self.create_input = (goal_hops, type, callback, required_endpoint, info_hash) + self.updated.callback(42) + return 42 + + def send_data(self, candidates, circuit_id, dest_address, source_address, data): + self.data_input = (candidates, circuit_id, dest_address, source_address, data) + self.updated.callback(None) + + +class MockTunnelProcess(TunnelProcess, MockInternet): + + def __init__(self): + MockInternet.__init__(self) + + self.community = None + + self.register_rpc(RPC_CREATE) + self.register_rpc(RPC_CIRCUIT) + self.register_rpc(RPC_MONITOR) + self.register_rpc(RPC_CIRDEAD, self.on_rpc_circuit_dead) + self.register_rpc(RPC_NOTIFY, self.on_rpc_notify) + self.register_rpc(RPC_SYNC, self.on_rpc_sync, False) + + self.exit_deferred = None + + +class MockTunnelSubprocess(TunnelSubprocess, MockInternet): + + def __init__(self): + MockInternet.__init__(self) + self.session_started = True + self.session = None + self.community = MockCommunity() + + self.register_rpc(RPC_CIRDEAD) + self.register_rpc(RPC_NOTIFY) + self.register_rpc(RPC_SYNC, auto_serialize=False) + self.register_rpc(RPC_CIRCUIT, self.on_rpc_circuit) + self.register_rpc(RPC_CREATE, self.on_rpc_create) + self.register_rpc(RPC_MONITOR, self.on_rpc_monitor_infohashes) + + +class TestTunnelProcesses(AbstractServer): + + @blocking_call_on_reactor_thread + @inlineCallbacks + def setUp(self, annotate=True): + yield super(TestTunnelProcesses, self).setUp(annotate=annotate) + self.process = MockTunnelProcess() + self.subprocess = MockTunnelSubprocess() + + self.process.set_internet_link(self.subprocess) + self.subprocess.set_internet_link(self.process) + + @blocking_call_on_reactor_thread + @inlineCallbacks + def test_send_data(self): + """ + Check if the TunnelSubprocess receives the send_data() call as it was + called on the main TunnelProcess. + """ + cd_list = [Candidate(("1.2.3.4", 1234), False), + Candidate(("5.6.7.8", 5678), False), + Candidate(("1.1.2.3", 1123), False)] + socket_list = [(c.sock_addr[0], c.sock_addr[1]) for c in cd_list] + circuit_id = 42 + dest_address = ("3.5.7.9", 3579) + source_address = ("2.4.6.8", 2468) + data = "".join([chr(i) for i in range(256)]) + + self.process.send_data(socket_list, circuit_id, dest_address, source_address, data) + + yield self.subprocess.community.updated + + self.assertEqual(self.subprocess.community.data_input, + (cd_list, circuit_id, dest_address, source_address, data)) + + @blocking_call_on_reactor_thread + @inlineCallbacks + def test_monitor_infohashes(self): + """ + Check if the TunnelSubprocess receives the monitor_infohashes() call as it was + called on the main TunnelProcess. + + Includes all possible characters in 13 20-char infohashes + """ + infohashes = [("".join([chr(i) for i in range(0, 20)]), 1, 2), + ("".join([chr(i) for i in range(20, 40)]), 2, 3), + ("".join([chr(i) for i in range(40, 60)]), 3, 4), + ("".join([chr(i) for i in range(60, 80)]), 4, 5), + ("".join([chr(i) for i in range(80, 100)]), 5, 6), + ("".join([chr(i) for i in range(100, 120)]), 6, 7), + ("".join([chr(i) for i in range(120, 140)]), 7, 8), + ("".join([chr(i) for i in range(140, 160)]), 8, 9), + ("".join([chr(i) for i in range(160, 180)]), 9, 10), + ("".join([chr(i) for i in range(180, 200)]), 10, 11), + ("".join([chr(i) for i in range(200, 220)]), 11, 12), + ("".join([chr(i) for i in range(220, 240)]), 12, 13), + ("".join([chr(i) for i in range(236, 256)]), 13, 14)] + + self.process.monitor_infohashes(infohashes) + + yield self.subprocess.community.updated + + self.assertEqual(self.subprocess.community.monitor_input, + infohashes) + + @blocking_call_on_reactor_thread + @inlineCallbacks + def test_create_circuit(self): + """ + Check if the TunnelSubprocess receives the create_circuit() call as it was + called on the main TunnelProcess. + """ + goal_hops = 1337 + type = "DATA" + callback = None # Cross-process callback should ALWAYS be None + required_endpoint = ("1.1.1.1", 1, "My First LibNacl Key") + info_hash = "".join([chr(i) for i in range(256)]) + + rval = yield self.process.create_circuit(goal_hops, type, required_endpoint, info_hash) + + self.assertEqual(rval, 42) + self.assertEqual(self.subprocess.community.create_input, + (goal_hops, type, callback, required_endpoint, info_hash)) diff --git a/Tribler/Test/Community/Tunnel/remotes/__init__.py b/Tribler/Test/Community/Tunnel/remotes/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/Tribler/Test/Community/Tunnel/remotes/test_remote_object.py b/Tribler/Test/Community/Tunnel/remotes/test_remote_object.py new file mode 100644 index 00000000000..cbdfc03a5e3 --- /dev/null +++ b/Tribler/Test/Community/Tunnel/remotes/test_remote_object.py @@ -0,0 +1,172 @@ +import unittest + +from Tribler.community.tunnel.remotes.remote_object import RemoteObject, shared + +BINARY_STRING_ALL_CHARS = "".join([chr(i) for i in range(256)]) + + +class MockShared(RemoteObject): + + @shared + def shared_normal(self): + pass + + @shared() + def shared_normal_parentheses(self): + pass + + @shared(False) + def shared_normal_explicit(self): + pass + + @shared(True) + def shared_id(self): + pass + + +class TestRemoteObject(unittest.TestCase): + + def test_dirty_startup(self): + mock = MockShared() + + self.assertFalse(mock.__is_dirty__()) + + def test_dirty_changed_normal(self): + mock = MockShared() + mock.shared_normal = "test" + + self.assertTrue(mock.__is_dirty__()) + + def test_dirty_changed_normal_parentheses(self): + mock = MockShared() + mock.shared_normal_parentheses = "test" + + self.assertTrue(mock.__is_dirty__()) + + def test_dirty_changed_normal_explicit(self): + mock = MockShared() + mock.shared_normal_explicit = "test" + + self.assertTrue(mock.__is_dirty__()) + + def test_dirty_changed_shared_id(self): + mock = MockShared() + mock.shared_id = "test" + + self.assertTrue(mock.__is_dirty__()) + + def test_serialize_with_bool(self): + mock = MockShared() + mock.shared_normal = True + + s = MockShared.__serialize__(mock) + _, out = MockShared.__unserialize__(s) + + self.assertEqual(out.shared_normal, True) + + def test_serialize_with_int(self): + mock = MockShared() + mock.shared_normal = 3 + + s = MockShared.__serialize__(mock) + _, out = MockShared.__unserialize__(s) + + self.assertEqual(out.shared_normal, 3) + + def test_serialize_with_float(self): + mock = MockShared() + mock.shared_normal = 3.14 + + s = MockShared.__serialize__(mock) + _, out = MockShared.__unserialize__(s) + + self.assertEqual(out.shared_normal, 3.14) + + def test_serialize_with_str(self): + mock = MockShared() + mock.shared_normal = BINARY_STRING_ALL_CHARS + + s = MockShared.__serialize__(mock) + _, out = MockShared.__unserialize__(s) + + self.assertEqual(out.shared_normal, BINARY_STRING_ALL_CHARS) + + def test_serialize_with_list(self): + a = [True, 3, 3.14, BINARY_STRING_ALL_CHARS] + + mock = MockShared() + mock.shared_normal = a + + s = MockShared.__serialize__(mock) + _, out = MockShared.__unserialize__(s) + + self.assertListEqual(out.shared_normal, a) + + def test_serialize_with_complex_nested(self): + mock = MockShared() + mock.shared_normal = [[chr(255),],] + + s = MockShared.__serialize__(mock) + + self.assertEqual(s, "") + + def test_serialize_with_pointer(self): + mock = MockShared() + mock.shared_normal = lambda _: None + + with self.assertRaises(TypeError): + MockShared.__serialize__(mock) + + def test_serialize_only_update_size(self): + mock = MockShared() + mock.shared_normal = "test" + + dirty = MockShared.__serialize__(mock) + clean = MockShared.__serialize__(mock) + + self.assertLess(len(clean), len(dirty)) + + def test_serialize_only_update(self): + mock = MockShared() + mock.shared_normal = "test" + + dirty = MockShared.__serialize__(mock, True) + full = MockShared.__serialize__(mock, False) + + _, out_dirty = MockShared.__unserialize__(dirty) + _, out_full = MockShared.__unserialize__(full) + + self.assertEqual(out_dirty.shared_normal, "test") + self.assertEqual(out_dirty.shared_normal, out_full.shared_normal) + + def test_unserialize_unknown(self): + known = {"test2": MockShared()} + mock = MockShared() + mock.shared_id = "test" + + s = MockShared.__serialize__(mock) + out_id, out = MockShared.__unserialize__(s, known) + + self.assertEqual(out_id, "test") + self.assertEqual(out_id, mock.shared_id) + self.assertNotEqual(out, known["test2"]) + + def test_unserialize_known(self): + known = {"test": MockShared()} + mock = MockShared() + mock.shared_id = "test" + + s = MockShared.__serialize__(mock) + out_id, out = MockShared.__unserialize__(s, known) + + self.assertEqual(out_id, "test") + self.assertEqual(out_id, mock.shared_id) + self.assertEqual(out, known["test"]) + + def test_extract_class_name(self): + mock = MockShared() + + s = MockShared.__serialize__(mock) + out = RemoteObject.__extract_class_name__(s) + + self.assertEqual(out, "MockShared") diff --git a/Tribler/Test/Community/Tunnel/remotes/test_sync_dict.py b/Tribler/Test/Community/Tunnel/remotes/test_sync_dict.py new file mode 100644 index 00000000000..4cc9be34efc --- /dev/null +++ b/Tribler/Test/Community/Tunnel/remotes/test_sync_dict.py @@ -0,0 +1,96 @@ +import unittest + +from Tribler.community.tunnel.remotes.remote_object import RemoteObject, shared +from Tribler.community.tunnel.remotes.sync_dict import SyncDict + + +class MockShared(RemoteObject): + + @shared + def field1(self): + pass + + @shared + def field2(self): + pass + + @shared(True) + def field_id(self): + pass + + +class TestSyncDict(unittest.TestCase): + + def setUp(self): + self.called = False + + def test_is_same_type(self): + sync_dict = SyncDict(MockShared) + + self.assertTrue(sync_dict.is_same_type(MockShared.__name__)) + + def test_is_not_same_type(self): + sync_dict = SyncDict(MockShared) + + self.assertFalse(sync_dict.is_same_type(SyncDict.__name__)) + + def test_callback_when_dirty(self): + def callback(_): + self.called = True + + sync_dict = SyncDict(MockShared, callback=callback) + mock = MockShared() + mock.field_id = "test" + sync_dict[mock.field_id] = mock + + sync_dict.synchronize() + + self.assertTrue(self.called) + + def test_no_callback_when_not_dirty(self): + def callback(_): + self.called = True + + sync_dict = SyncDict(MockShared, callback=callback) + + sync_dict.synchronize() + + self.assertFalse(self.called) + + def test_on_synchronize(self): + sync_dict2 = SyncDict(MockShared) + sync_dict = SyncDict(MockShared, + callback=sync_dict2.on_synchronize) + mock = MockShared() + mock.field_id = "test" + sync_dict[mock.field_id] = mock + + self.assertNotIn(mock.field_id, sync_dict2) + + sync_dict.synchronize() + + self.assertIn(mock.field_id, sync_dict2) + + def test_synchronize_only_update(self): + sync_dict2 = SyncDict(MockShared) + sync_dict = SyncDict(MockShared, + callback=sync_dict2.on_synchronize) + mock = MockShared() + mock.field_id = "test" + mock.field1 = "a" + mock.field2 = "b" + sync_dict[mock.field_id] = mock + + sync_dict.synchronize(False) + + self.assertEqual(sync_dict2[mock.field_id].field1, "a") + self.assertEqual(sync_dict2[mock.field_id].field2, "b") + + sync_dict2[mock.field_id].field1 = "x" + sync_dict2[mock.field_id].field2 = "d" + mock.field1 = "c" + + sync_dict.synchronize(True) + + self.assertEqual(sync_dict2[mock.field_id].field1, "c") + self.assertEqual(sync_dict2[mock.field_id].field2, "d") diff --git a/Tribler/Test/GUI/test_gui.py b/Tribler/Test/GUI/test_gui.py index d1025525a0d..c0d421c64d3 100644 --- a/Tribler/Test/GUI/test_gui.py +++ b/Tribler/Test/GUI/test_gui.py @@ -278,6 +278,8 @@ def test_settings(self): self.screenshot(window, name="settings_seeding") QTest.mouseClick(window.settings_anonymity_button, Qt.LeftButton) self.screenshot(window, name="settings_anonymity") + QTest.mouseClick(window.settings_experimental_button, Qt.LeftButton) + self.screenshot(window, name="settings_experimental") def test_downloads(self): self.go_to_and_wait_for_downloads() From f7ca6ca65ed69163e4b6039bce68c83a61a3ea24 Mon Sep 17 00:00:00 2001 From: qstokkink Date: Thu, 2 Mar 2017 13:22:34 +0100 Subject: [PATCH 5/6] DROPME: Temporary fix for #2824 --- Tribler/community/tunnel/hidden_community.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/Tribler/community/tunnel/hidden_community.py b/Tribler/community/tunnel/hidden_community.py index 8ca70f13de5..4b41ac194c5 100644 --- a/Tribler/community/tunnel/hidden_community.py +++ b/Tribler/community/tunnel/hidden_community.py @@ -642,7 +642,10 @@ def find_download(self, lookup_info_hash): def create_introduction_point(self, info_hash, amount=1): # Create a separate key per infohash - self.find_download(info_hash).add_peer(('1.1.1.1', 1024)) + if self.find_download(info_hash): + self.find_download(info_hash).add_peer(('1.1.1.1', 1024)) + else: + return if info_hash not in self.session_keys: self.session_keys[info_hash] = self.crypto.generate_key(u"curve25519") From 815742202cf572540b6f1bfa711046ae71e9f435 Mon Sep 17 00:00:00 2001 From: qstokkink Date: Wed, 8 Mar 2017 14:40:43 +0100 Subject: [PATCH 6/6] SQUASHME: Switched to os.environ from sys.argv subprocesses --- .../Core/APIImplementation/LaunchManyCore.py | 4 -- .../tunnel/processes/childprocess.py | 8 ++- .../community/tunnel/subprocess_launcher.py | 70 ------------------- Tribler/community/tunnel/tunnel_community.py | 5 +- run_tribler.py | 10 +-- twisted/plugins/tribler_plugin.py | 17 +++-- twisted/plugins/tunnel_helper_plugin.py | 15 ++-- 7 files changed, 35 insertions(+), 94 deletions(-) delete mode 100644 Tribler/community/tunnel/subprocess_launcher.py diff --git a/Tribler/Core/APIImplementation/LaunchManyCore.py b/Tribler/Core/APIImplementation/LaunchManyCore.py index 0521b153249..e7eec147b6d 100644 --- a/Tribler/Core/APIImplementation/LaunchManyCore.py +++ b/Tribler/Core/APIImplementation/LaunchManyCore.py @@ -223,10 +223,6 @@ def load_communities(self): tunnel_settings = TunnelSettings(tribler_session=self.session) tunnel_kwargs = {'tribler_session': self.session, 'settings': tunnel_settings} - import sys - if '--tunnel_subprocess' in sys.argv: - tunnel_kwargs['is_subprocess'] = True - if self.session.get_enable_multichain(): multichain_kwargs = {'tribler_session': self.session} diff --git a/Tribler/community/tunnel/processes/childprocess.py b/Tribler/community/tunnel/processes/childprocess.py index 4810e9750a1..6c9e3061632 100644 --- a/Tribler/community/tunnel/processes/childprocess.py +++ b/Tribler/community/tunnel/processes/childprocess.py @@ -70,7 +70,7 @@ def __init__(self): # twistd can't deal with multiple instances # supplying unused pid and logfiles to facilitate this - params = sys.argv + ["--tunnel_subprocess"] + params = sys.argv if sys.argv[0].endswith("twistd"): params = [params[0]] + ["--pidfile", ".pidfile", "--logfile", ".logfile"] + params[1:] @@ -91,12 +91,14 @@ def _spawn_process(self, executable, params, path, fds): :type fds: {int: str or int} or None :returns: None """ + sub_environ = {'TUNNEL_SUBPROCESS': '1'} + sub_environ.update(environ) if fds: reactor.spawnProcess(self, executable, [executable] + params, - env=environ, + env=sub_environ, path=path, childFDs=fds) else: @@ -104,7 +106,7 @@ def _spawn_process(self, executable, params, path, fds): executable, [executable] + params, - env=environ, + env=sub_environ, path=path) def on_generic(self, msg): diff --git a/Tribler/community/tunnel/subprocess_launcher.py b/Tribler/community/tunnel/subprocess_launcher.py deleted file mode 100644 index 8908d43b445..00000000000 --- a/Tribler/community/tunnel/subprocess_launcher.py +++ /dev/null @@ -1,70 +0,0 @@ -import sys - -from twisted.python import usage -from twisted.internet import reactor - - -class SubprocessLauncher(usage.Options): - - """ - This class parses options and tries to start a Tunnel Subprocess - """ - - optFlags = [ - ["tunnel_subprocess", None, "Internal: run this process as a tunnel subprocess"] - ] - - def parse_argv(self): - """ - Parse sys.argv for arguments - - :returns: None - """ - remaining = sys.argv[:] - while len(remaining): - try: - self.parseOptions(remaining) - break - except usage.UsageError: - remaining.pop(0) - except SystemExit: - break - - - def attempt_subprocess_start(self): - """ - Attempt to start a subprocess, if specified - - This checks if the subprocess flag is set in the arguments. - If it is, it launches a subprocess. Be sure not to start - anything else if this is successful. - - :return: whether a subprocess was launched - :rtype: bool - """ - if 'tunnel_subprocess' in self.keys() and self['tunnel_subprocess']: - if reactor.running: - self._start_with_reactor() - else: - self._start_without_reactor() - return True - return False - - def _start_without_reactor(self): - """ - The reactor does not exist yet, we will have to run it ourselves - - :returns: None - """ - self._start_with_reactor() - reactor.run() - - def _start_with_reactor(self): - """ - Someone else has provided us with a reactor, simply start - - :returns: None - """ - from Tribler.community.tunnel.processes.tunnel_subprocess import TunnelSubprocess - subprocess = TunnelSubprocess() - subprocess.start() diff --git a/Tribler/community/tunnel/tunnel_community.py b/Tribler/community/tunnel/tunnel_community.py index 5a6af0f22a2..f2b1e6ac8b4 100644 --- a/Tribler/community/tunnel/tunnel_community.py +++ b/Tribler/community/tunnel/tunnel_community.py @@ -5,6 +5,7 @@ import time from collections import defaultdict from cryptography.exceptions import InvalidTag +from os import environ from twisted.internet import reactor from twisted.internet.defer import maybeDeferred, succeed, inlineCallbacks, returnValue @@ -302,7 +303,7 @@ def __init__(self, *args, **kwargs): self.trsession = self.settings = self.socks_server = None - def initialize(self, tribler_session=None, settings=None, is_subprocess=False): + def initialize(self, tribler_session=None, settings=None): self.trsession = tribler_session self.settings = settings if settings else TunnelSettings(tribler_session=tribler_session) @@ -321,7 +322,7 @@ def initialize(self, tribler_session=None, settings=None, is_subprocess=False): self.register_task("do_ping", LoopingCall(self.do_ping)).start(PING_INTERVAL) - if not is_subprocess: + if 'TUNNEL_SUBPROCESS' not in environ: self.register_task("do_circuits", LoopingCall(self.do_circuits)).start(5, now=True) self.socks_server = Socks5Server(self, tribler_session.get_tunnel_community_socks5_listen_ports() diff --git a/run_tribler.py b/run_tribler.py index 0072f753a4a..3d735158f06 100644 --- a/run_tribler.py +++ b/run_tribler.py @@ -3,15 +3,17 @@ import os import sys -from Tribler.community.tunnel.subprocess_launcher import SubprocessLauncher if os.path.exists("logger.conf"): logging.config.fileConfig("logger.conf") if __name__ == "__main__": - options = SubprocessLauncher() - options.parse_argv() - if options.attempt_subprocess_start(): + if 'TUNNEL_SUBPROCESS' in os.environ: + from Tribler.community.tunnel.processes.tunnel_subprocess import TunnelSubprocess + from twisted.internet import reactor + subprocess = TunnelSubprocess() + subprocess.start() + reactor.run() sys.exit(0) multiprocessing.freeze_support() diff --git a/twisted/plugins/tribler_plugin.py b/twisted/plugins/tribler_plugin.py index 94781fb565a..aa67227827f 100644 --- a/twisted/plugins/tribler_plugin.py +++ b/twisted/plugins/tribler_plugin.py @@ -11,6 +11,7 @@ from twisted.conch import manhole_tap from twisted.internet import reactor from twisted.plugin import IPlugin +from twisted.python import usage from twisted.python.log import msg from zope.interface import implements @@ -21,11 +22,10 @@ # Register yappi profiler from Tribler.community.allchannel.community import AllChannelCommunity from Tribler.community.search.community import SearchCommunity -from Tribler.community.tunnel.subprocess_launcher import SubprocessLauncher from Tribler.dispersy.utils import twistd_yappi -class Options(SubprocessLauncher): +class Options(usage.Options): optParameters = [ ["manhole", "m", 0, "Enable manhole telnet service listening at the specified port", int], ["statedir", "s", None, "Use an alternate statedir", str], @@ -144,8 +144,13 @@ def makeService(self, options): return tribler_service -options = Options() -options.parse_argv() -if options.attempt_subprocess_start(): +if 'TUNNEL_SUBPROCESS' in os.environ: + from Tribler.community.tunnel.processes.tunnel_subprocess import TunnelSubprocess + from twisted.internet import reactor + + subprocess = TunnelSubprocess() + subprocess.start() + reactor.run() sys.exit(0) -service_maker = TriblerServiceMaker() +else: + service_maker = TriblerServiceMaker() diff --git a/twisted/plugins/tunnel_helper_plugin.py b/twisted/plugins/tunnel_helper_plugin.py index 5381db8c420..2afe1a25e00 100644 --- a/twisted/plugins/tunnel_helper_plugin.py +++ b/twisted/plugins/tunnel_helper_plugin.py @@ -91,7 +91,7 @@ def check_json_port(val): check_json_port.coerceDoc = "Json API port must be greater than 0." -class Options(SubprocessLauncher): +class Options(usage.Options): optFlags = [ ["exit", "x", "Allow being an exit-node"], ["multichain", "M", "Enable the multichain community"] @@ -506,8 +506,13 @@ def makeService(self, options): return tunnel_helper_service -options = Options() -options.parse_argv() -if options.attempt_subprocess_start(): +if 'TUNNEL_SUBPROCESS' in os.environ: + from Tribler.community.tunnel.processes.tunnel_subprocess import TunnelSubprocess + from twisted.internet import reactor + + subprocess = TunnelSubprocess() + subprocess.start() + reactor.run() sys.exit(0) -service_maker = TunnelHelperServiceMaker() +else: + service_maker = TunnelHelperServiceMaker()