From 5c984d6930043371953e49468f8697b3a5f383b5 Mon Sep 17 00:00:00 2001 From: Rozhkov Dmitrii Date: Mon, 11 Apr 2022 19:55:13 +0500 Subject: [PATCH 01/11] #709 extract mempool service (#710) --- proxy/__main__.py | 12 +- proxy/common_neon/__init__.py | 1 + proxy/common_neon/types.py | 9 ++ proxy/common_neon/utils/__init__.py | 3 + .../common_neon/utils/queue_based_service.py | 117 ++++++++++++++++++ proxy/common_neon/{ => utils}/utils.py | 5 +- proxy/mempool/__init__.py | 3 + proxy/mempool/mem_pool.py | 35 ++++++ proxy/mempool/mempool_client.py | 20 +++ proxy/mempool/mempool_service.py | 26 ++++ proxy/neon_proxy_app.py | 14 +++ proxy/plugin/solana_rest_api.py | 8 +- 12 files changed, 243 insertions(+), 10 deletions(-) create mode 100644 proxy/common_neon/types.py create mode 100644 proxy/common_neon/utils/__init__.py create mode 100644 proxy/common_neon/utils/queue_based_service.py rename proxy/common_neon/{ => utils}/utils.py (98%) create mode 100644 proxy/mempool/__init__.py create mode 100644 proxy/mempool/mem_pool.py create mode 100644 proxy/mempool/mempool_client.py create mode 100644 proxy/mempool/mempool_service.py create mode 100644 proxy/neon_proxy_app.py diff --git a/proxy/__main__.py b/proxy/__main__.py index 809cb80f4..ed992ed24 100644 --- a/proxy/__main__.py +++ b/proxy/__main__.py @@ -9,8 +9,9 @@ :license: BSD, see LICENSE for more details. """ -from .proxy import entry_point + import os +from .neon_proxy_app import NeonProxyApp from .indexer.indexer_app import run_indexer @@ -25,8 +26,9 @@ print("Will run in indexer mode") run_indexer(solana_url) else: - from .statistics_exporter.prometheus_proxy_server import PrometheusProxyServer - PrometheusProxyServer() + neon_proxy_app = NeonProxyApp() + neon_proxy_app.start() + + + - print("Will run in proxy mode") - entry_point() diff --git a/proxy/common_neon/__init__.py b/proxy/common_neon/__init__.py index e69de29bb..ca2b699e8 100644 --- a/proxy/common_neon/__init__.py +++ b/proxy/common_neon/__init__.py @@ -0,0 +1 @@ +from .types import Result diff --git a/proxy/common_neon/types.py b/proxy/common_neon/types.py new file mode 100644 index 000000000..433444a30 --- /dev/null +++ b/proxy/common_neon/types.py @@ -0,0 +1,9 @@ +class Result: + def __init__(self, reason: str = None): + self._reason = reason + + def __bool__(self) -> bool: + return self._reason is None + + def __str__(self) -> str: + return self._reason if self._reason is not None else "" diff --git a/proxy/common_neon/utils/__init__.py b/proxy/common_neon/utils/__init__.py new file mode 100644 index 000000000..d45ecaed9 --- /dev/null +++ b/proxy/common_neon/utils/__init__.py @@ -0,0 +1,3 @@ +from .queue_based_service import QueueBasedService, QueueBasedServiceClient, ServiceInvocation +from .utils import * + diff --git a/proxy/common_neon/utils/queue_based_service.py b/proxy/common_neon/utils/queue_based_service.py new file mode 100644 index 000000000..2afb45613 --- /dev/null +++ b/proxy/common_neon/utils/queue_based_service.py @@ -0,0 +1,117 @@ +import abc +import multiprocessing as mp +import os +import queue +import signal + +from multiprocessing.managers import BaseManager +from dataclasses import dataclass, astuple, field +from typing import Tuple, Dict, Any + +from logged_groups import logged_group + +from ..types import Result + + +@dataclass +class ServiceInvocation: + method_name: str = None + args: Tuple[Any] = field(default_factory=tuple) + kwargs: Dict[str, Any] = field(default_factory=dict) + + +@logged_group("neon") +class QueueBasedServiceClient: + + def __init__(self, host: str, port: int): + class MemPoolQueueManager(BaseManager): + pass + + MemPoolQueueManager.register('get_queue') + queue_manager = MemPoolQueueManager(address=(host, port), authkey=b'abracadabra') + queue_manager.connect() + self._queue = queue_manager.get_queue() + + def invoke(self, method_name, *args, **kwargs) -> Result: + try: + self._invoke_impl(method_name, *args, **kwargs) + except queue.Full: + self.error(f"Failed to invoke the method: {method_name}, queue is full") + return Result("Mempool queue full") + return Result() + + def _invoke_impl(self, method_name, *args, **kwargs): + invocation = ServiceInvocation(method_name=method_name, args=args, kwargs=kwargs) + self._queue.put(invocation) + + +@logged_group("neon") +class QueueBasedService(abc.ABC): + + QUEUE_TIMEOUT_SEC = 0.4 + BREAK_PROC_INVOCATION = 0 + JOIN_PROC_TIMEOUT_SEC = 5 + + def __init__(self, *, port: int, is_background: bool): + self._port = port + self._is_back_ground = is_background + self._timeout = self.QUEUE_TIMEOUT_SEC + + class MemPoolQueueManager(BaseManager): + pass + + self._queue = mp.Queue() + MemPoolQueueManager.register("get_queue", callable=lambda: self._queue) + self._queue_manager = MemPoolQueueManager(address=('', port), authkey=b'abracadabra') + self._mempool_server = self._queue_manager.get_server() + self._mempool_server_process = mp.Process(target=self._mempool_server.serve_forever, name="mempool_listen_proc") + self._queue_process = mp.Process(target=self.run, name="mempool_queue_proc") + + pid = os.getpid() + signal.signal(signal.SIGINT, lambda sif, frame: self.finish() if os.getpid() == pid else 0) + + def start(self): + self.info(f"Starting queue server: {self._port}") + self._mempool_server_process.start() + self._queue_process.start() + if not self._is_back_ground: + self._queue_process.join() + + def run(self): + self.service_process_init() + while True: + try: + if not self._run_impl(): + break + except queue.Empty: + self.do_extras() + + def _run_impl(self) -> bool: + invocation = self._queue.get(block=True, timeout=self._timeout) + if invocation == self.BREAK_PROC_INVOCATION: + return False + self.dispatch(invocation) + return True + + def dispatch(self, invocation: ServiceInvocation): + method_name, args, kwargs = astuple(invocation) + handler = getattr(self, method_name, None) + if handler is None: + raise NotImplementedError(f"Process has no handler for {handler}") + handler(*args, **kwargs) + + def finish(self): + self.info("Finishing the queue and listening processes") + self._mempool_server_process.terminate() + if not self._queue_process.is_alive(): + return + self._queue.put_nowait(self.BREAK_PROC_INVOCATION) + self._queue_process.join(timeout=self.JOIN_PROC_TIMEOUT_SEC) + + @abc.abstractmethod + def do_extras(self): + assert "To be implemented in derived class" + + @abc.abstractmethod + def service_process_init(self): + assert "To be implemented in derived class" diff --git a/proxy/common_neon/utils.py b/proxy/common_neon/utils/utils.py similarity index 98% rename from proxy/common_neon/utils.py rename to proxy/common_neon/utils/utils.py index 56e4f3e3d..db837cbab 100644 --- a/proxy/common_neon/utils.py +++ b/proxy/common_neon/utils/utils.py @@ -6,9 +6,10 @@ from eth_utils import big_endian_to_int -from ..environment import EVM_LOADER_ID +#TODO: move it out from here +from ...environment import EVM_LOADER_ID -from ..common_neon.eth_proto import Trx as EthTx +from ..eth_proto import Trx as EthTx def str_fmt_object(obj): diff --git a/proxy/mempool/__init__.py b/proxy/mempool/__init__.py new file mode 100644 index 000000000..adc9b5cae --- /dev/null +++ b/proxy/mempool/__init__.py @@ -0,0 +1,3 @@ +from .mempool_service import MemPoolService +from .mempool_client import MemPoolClient +from .mem_pool import MemPool diff --git a/proxy/mempool/mem_pool.py b/proxy/mempool/mem_pool.py new file mode 100644 index 000000000..774ac02f7 --- /dev/null +++ b/proxy/mempool/mem_pool.py @@ -0,0 +1,35 @@ +from logged_groups import logged_group +from multiprocessing import Pool + + +@logged_group("neon.MemPool") +class MemPool: + + POOL_PROC_COUNT = 20 + + def __init__(self): + self._pool = Pool(processes=self.POOL_PROC_COUNT) + + def on_eth_send_raw_transaction(self, *, eth_trx_hash): + self._pool.apply_async(func=self._on_eth_send_raw_transaction_impl, args=(eth_trx_hash,), + callback=self.on_eth_send_raw_transaction_callback, error_callback=self.error_callback) + + def error_callback(self, error): + self.error("Failed to invoke on worker process: ", error) + + def on_eth_send_raw_transaction_callback(self, result): + pass + + def _on_eth_send_raw_transaction_impl(self, eth_trx_hash): + self.debug(f"Transaction is being processed on the worker: {eth_trx_hash}") + + def do_extras(self): + pass + + def __getstate__(self): + self_dict = self.__dict__.copy() + del self_dict['_pool'] + return self_dict + + def __setstate__(self, state): + self.__dict__.update(state) diff --git a/proxy/mempool/mempool_client.py b/proxy/mempool/mempool_client.py new file mode 100644 index 000000000..958b7ef09 --- /dev/null +++ b/proxy/mempool/mempool_client.py @@ -0,0 +1,20 @@ +from logged_groups import logged_group + +from ..common_neon.utils import QueueBasedServiceClient +from ..common_neon import Result + +from . import MemPoolService + + +@logged_group("neon.Proxy") +class MemPoolClient(QueueBasedServiceClient): + + MEM_POOL_SERVICE_HOST = "127.0.0.1" + + def __init__(self): + port, host = (MemPoolService.MEM_POOL_SERVICE_PORT, self.MEM_POOL_SERVICE_HOST) + self.info(f"Initialize MemPoolClient connecting to: {port} at: {host}") + QueueBasedServiceClient.__init__(self, host, port) + + def on_eth_send_raw_transaction(self, eth_trx_signature) -> Result: + return self.invoke("on_eth_send_raw_transaction", eth_trx_hash=eth_trx_signature) diff --git a/proxy/mempool/mempool_service.py b/proxy/mempool/mempool_service.py new file mode 100644 index 000000000..75c28d697 --- /dev/null +++ b/proxy/mempool/mempool_service.py @@ -0,0 +1,26 @@ +from logged_groups import logged_group + +from ..common_neon.utils import QueueBasedService + +from .mem_pool import MemPool + + +@logged_group("neon.MemPool") +class MemPoolService(QueueBasedService): + + MEM_POOL_SERVICE_PORT = 9091 + + def __init__(self, *, is_background: bool): + QueueBasedService.__init__(self, port=self.MEM_POOL_SERVICE_PORT, is_background=is_background) + self._mem_pool = None + + def on_eth_send_raw_transaction(self, *, eth_trx_hash): + self._mem_pool.on_eth_send_raw_transaction(eth_trx_hash=eth_trx_hash) + + # QueueBasedService abstracts + + def service_process_init(self): + self._mem_pool = MemPool() + + def do_extras(self): + self._mem_pool.do_extras() diff --git a/proxy/neon_proxy_app.py b/proxy/neon_proxy_app.py new file mode 100644 index 000000000..c7a1c636e --- /dev/null +++ b/proxy/neon_proxy_app.py @@ -0,0 +1,14 @@ +from .proxy import entry_point +from .mempool.mempool_service import MemPoolService +from .statistics_exporter.prometheus_proxy_server import PrometheusProxyServer + + +class NeonProxyApp: + + def __init__(self): + self._mempool_service = MemPoolService(is_background=True) + + def start(self): + PrometheusProxyServer() + self._mempool_service.start() + entry_point() diff --git a/proxy/plugin/solana_rest_api.py b/proxy/plugin/solana_rest_api.py index 8d6fb3523..924eeaba7 100644 --- a/proxy/plugin/solana_rest_api.py +++ b/proxy/plugin/solana_rest_api.py @@ -17,14 +17,14 @@ import sha3 from logged_groups import logged_group, logging_context -from typing import Optional, Union +from typing import Union from ..common.utils import build_http_response from ..http.codes import httpStatusCodes from ..http.parser import HttpParser from ..http.websocket import WebsocketFrame from ..http.server import HttpWebServerBasePlugin, httpProtocolTypes -from typing import Dict, List, Tuple, Optional +from typing import List, Tuple, Optional from ..common_neon.transaction_sender import NeonTxSender from ..common_neon.solana_interactor import SolanaInteractor @@ -35,10 +35,10 @@ from ..common_neon.estimate import GasEstimate from ..common_neon.utils import SolanaBlockInfo from ..common_neon.keys_storage import KeyStorage +from ..mempool import MemPoolClient from ..environment import SOLANA_URL, PP_SOLANA_URL, PYTH_MAPPING_ACCOUNT, EVM_STEP_COUNT, CHAIN_ID, ENABLE_PRIVATE_API from ..environment import NEON_EVM_VERSION, NEON_EVM_REVISION from ..environment import neon_cli -from ..environment import get_solana_accounts from ..memdb.memdb import MemDB from .gas_price_calculator import GasPriceCalculator from ..common_neon.eth_proto import Trx as EthTrx @@ -61,6 +61,7 @@ class EthereumModel: def __init__(self): self._solana = SolanaInteractor(SOLANA_URL) self._db = MemDB(self._solana) + self._mempool_client = MemPoolClient() if PP_SOLANA_URL == SOLANA_URL: self.gas_price_calculator = GasPriceCalculator(self._solana, PYTH_MAPPING_ACCOUNT) @@ -457,6 +458,7 @@ def eth_sendRawTransaction(self, rawTrx: str) -> str: tx_sender = NeonTxSender(self._db, self._solana, trx, steps=EVM_STEP_COUNT) tx_sender.execute() self._stat_tx_success() + self._mempool_client.on_eth_send_raw_transaction(eth_signature) return eth_signature except PendingTxError as err: From 99de774eb1f518e029ed99b36322fb093f0cb1b3 Mon Sep 17 00:00:00 2001 From: Rozhkov Dmitrii Date: Mon, 18 Apr 2022 16:07:26 +0500 Subject: [PATCH 02/11] #712 bring tx processing scheme onto asyncio platform --- .buildkite/pipeline.yml | 2 +- proxy/common_neon/__init__.py | 1 - proxy/common_neon/data.py | 18 +++ proxy/common_neon/emulator_interactor.py | 2 +- proxy/common_neon/types.py | 24 ---- proxy/common_neon/utils/__init__.py | 3 +- .../common_neon/utils/pickable_data_server.py | 84 +++++++++++++ .../common_neon/utils/queue_based_service.py | 117 ------------------ proxy/mempool/__init__.py | 5 +- proxy/mempool/mem_pool.py | 47 +++---- proxy/mempool/mempool_client.py | 22 ++-- proxy/mempool/mempool_service.py | 38 +++--- proxy/neon_proxy_app.py | 4 +- .../neon_rpc_api_model/neon_rpc_api_model.py | 10 +- .../neon_rpc_api_model/transaction_sender.py | 2 +- .../transaction_validator.py | 2 +- proxy/testing/test_pyth_network_client.py | 5 +- 17 files changed, 171 insertions(+), 215 deletions(-) delete mode 100644 proxy/common_neon/types.py create mode 100644 proxy/common_neon/utils/pickable_data_server.py delete mode 100644 proxy/common_neon/utils/queue_based_service.py diff --git a/.buildkite/pipeline.yml b/.buildkite/pipeline.yml index ef5112e80..858253674 100644 --- a/.buildkite/pipeline.yml +++ b/.buildkite/pipeline.yml @@ -7,7 +7,7 @@ steps: - label: ":terraform: build infrastructure" key: "create_infrastructure" if: &is_fts_enabled | - (build.pull_request.base_branch == "develop" && !build.pull_request.draft) || + (build.pull_request.base_branch == "712-mempool" && !build.pull_request.draft) || (build.source == "trigger_job" && build.env("NEON_EVM_FULL_TEST_SUITE") == "true") agents: queue: "testing" diff --git a/proxy/common_neon/__init__.py b/proxy/common_neon/__init__.py index ca2b699e8..e69de29bb 100644 --- a/proxy/common_neon/__init__.py +++ b/proxy/common_neon/__init__.py @@ -1 +0,0 @@ -from .types import Result diff --git a/proxy/common_neon/data.py b/proxy/common_neon/data.py index 08bfddfcc..102c877b1 100644 --- a/proxy/common_neon/data.py +++ b/proxy/common_neon/data.py @@ -1,3 +1,15 @@ +from __future__ import annotations +from dataclasses import dataclass +from typing import Dict, Any + + +@dataclass +class NeonTxPrecheckResult: + is_underpriced_tx_without_chainid: bool + emulating_result: NeonEmulatingResult + + +NeonEmulatingResult = Dict[str, Any] class NeonTxStatData: @@ -10,3 +22,9 @@ def __init__(self, neon_tx_hash: str, neon_income: int, tx_type: str, is_cancele def add_instruction(self, sol_tx_hash: str, sol_spent: int, steps: int, bpf: int) -> None: self.instructions.append((sol_tx_hash, sol_spent, steps, bpf)) + + +@dataclass +class NeonTxData: + tx_signed: str + diff --git a/proxy/common_neon/emulator_interactor.py b/proxy/common_neon/emulator_interactor.py index c021de712..42be7e211 100644 --- a/proxy/common_neon/emulator_interactor.py +++ b/proxy/common_neon/emulator_interactor.py @@ -8,7 +8,7 @@ from ..environment import neon_cli, NEON_TOKEN_MINT, CHAIN_ID from .errors import EthereumError -from .types import NeonEmulatingResult +from .data import NeonEmulatingResult @logged_group("neon.Proxy") diff --git a/proxy/common_neon/types.py b/proxy/common_neon/types.py deleted file mode 100644 index 265191f8a..000000000 --- a/proxy/common_neon/types.py +++ /dev/null @@ -1,24 +0,0 @@ -from __future__ import annotations - -from dataclasses import dataclass -from typing import Dict, Any - - -class Result: - def __init__(self, reason: str = None): - self._reason = reason - - def __bool__(self) -> bool: - return self._reason is None - - def __str__(self) -> str: - return self._reason if self._reason is not None else "" - - -@dataclass -class NeonTxPrecheckResult: - is_underpriced_tx_without_chainid: bool - emulating_result: NeonEmulatingResult - - -NeonEmulatingResult = Dict[str, Any] diff --git a/proxy/common_neon/utils/__init__.py b/proxy/common_neon/utils/__init__.py index d45ecaed9..6f67c4bd8 100644 --- a/proxy/common_neon/utils/__init__.py +++ b/proxy/common_neon/utils/__init__.py @@ -1,3 +1,4 @@ -from .queue_based_service import QueueBasedService, QueueBasedServiceClient, ServiceInvocation from .utils import * +from .pickable_data_server import PickableDataServer, PickableDataServerUser, PickableDataClient + diff --git a/proxy/common_neon/utils/pickable_data_server.py b/proxy/common_neon/utils/pickable_data_server.py new file mode 100644 index 000000000..94656911b --- /dev/null +++ b/proxy/common_neon/utils/pickable_data_server.py @@ -0,0 +1,84 @@ +from abc import ABC, abstractmethod +import asyncio +import socket +import pickle +import struct +from typing import Any +from logged_groups import logged_group + + +class PickableDataServerUser(ABC): + + @abstractmethod + def on_data_received(self, data: Any): + """Gets neon_tx_data from the neon rpc api service worker""" + + +@logged_group("neon.MemPool") +class PickableDataServer(ABC): + + def __init__(self, *, user: PickableDataServerUser, host: str, port: int): + self._user = user + self._port = port + self._host = host + + async def handle_client(self, client): + loop = asyncio.get_event_loop() + peer_name = client.getpeername() + self.debug(f"Got new incoming connection: {peer_name}") + while True: + try: + len_packed: bytes = await loop.sock_recv(client, 4) + if len(len_packed) == 0: + break + # TODO: all the data can be received by parts, handle it + payload_len_data = struct.unpack("!I", len_packed[:4])[0] + payload = await loop.sock_recv(client, payload_len_data) + data = pickle.loads(payload) + self._user.on_data_received(data) + except ConnectionResetError: + self.error(f"Client connection: {peer_name} - has been interrupted") + break + except Exception as err: + self.error(f"Failed to receive data over: {peer_name} - err: {err}") + continue + client.close() + + async def run_server(self): + self.info(f"Listen port: {self._port} on: {self._host}") + server = socket.socket(socket.AF_INET, socket.SOCK_STREAM) + server.bind((self._host, self._port)) + server.listen(8) + server.setblocking(False) + + loop = asyncio.get_event_loop() + while True: + client, _ = await loop.sock_accept(server) + loop.create_task(self.handle_client(client)) + + +@logged_group("neon.Proxy") +class PickableDataClient: + + def __init__(self, host: str, port: int): + + self.info(f"Initialize PickableDataClient connecting to: {port} at: {host}") + self._connection = socket.create_connection((host, port)) + + def send_data(self, pickable_data: Any): + try: + payload = self._encode_pickable_data(pickable_data) + self._connection.send(payload) + except BaseException as err: + self.error(f"Failed to send data: {err}") + raise Exception("Failed to send pickable data") + + def _encode_pickable_data(self, pickable_data: Any): + data = pickle.dumps(pickable_data) + data_len = len(data) + packed_len = struct.pack("!I", data_len) + payload = packed_len + data + return payload + + def __del__(self): + self._connection.close() diff --git a/proxy/common_neon/utils/queue_based_service.py b/proxy/common_neon/utils/queue_based_service.py deleted file mode 100644 index 2afb45613..000000000 --- a/proxy/common_neon/utils/queue_based_service.py +++ /dev/null @@ -1,117 +0,0 @@ -import abc -import multiprocessing as mp -import os -import queue -import signal - -from multiprocessing.managers import BaseManager -from dataclasses import dataclass, astuple, field -from typing import Tuple, Dict, Any - -from logged_groups import logged_group - -from ..types import Result - - -@dataclass -class ServiceInvocation: - method_name: str = None - args: Tuple[Any] = field(default_factory=tuple) - kwargs: Dict[str, Any] = field(default_factory=dict) - - -@logged_group("neon") -class QueueBasedServiceClient: - - def __init__(self, host: str, port: int): - class MemPoolQueueManager(BaseManager): - pass - - MemPoolQueueManager.register('get_queue') - queue_manager = MemPoolQueueManager(address=(host, port), authkey=b'abracadabra') - queue_manager.connect() - self._queue = queue_manager.get_queue() - - def invoke(self, method_name, *args, **kwargs) -> Result: - try: - self._invoke_impl(method_name, *args, **kwargs) - except queue.Full: - self.error(f"Failed to invoke the method: {method_name}, queue is full") - return Result("Mempool queue full") - return Result() - - def _invoke_impl(self, method_name, *args, **kwargs): - invocation = ServiceInvocation(method_name=method_name, args=args, kwargs=kwargs) - self._queue.put(invocation) - - -@logged_group("neon") -class QueueBasedService(abc.ABC): - - QUEUE_TIMEOUT_SEC = 0.4 - BREAK_PROC_INVOCATION = 0 - JOIN_PROC_TIMEOUT_SEC = 5 - - def __init__(self, *, port: int, is_background: bool): - self._port = port - self._is_back_ground = is_background - self._timeout = self.QUEUE_TIMEOUT_SEC - - class MemPoolQueueManager(BaseManager): - pass - - self._queue = mp.Queue() - MemPoolQueueManager.register("get_queue", callable=lambda: self._queue) - self._queue_manager = MemPoolQueueManager(address=('', port), authkey=b'abracadabra') - self._mempool_server = self._queue_manager.get_server() - self._mempool_server_process = mp.Process(target=self._mempool_server.serve_forever, name="mempool_listen_proc") - self._queue_process = mp.Process(target=self.run, name="mempool_queue_proc") - - pid = os.getpid() - signal.signal(signal.SIGINT, lambda sif, frame: self.finish() if os.getpid() == pid else 0) - - def start(self): - self.info(f"Starting queue server: {self._port}") - self._mempool_server_process.start() - self._queue_process.start() - if not self._is_back_ground: - self._queue_process.join() - - def run(self): - self.service_process_init() - while True: - try: - if not self._run_impl(): - break - except queue.Empty: - self.do_extras() - - def _run_impl(self) -> bool: - invocation = self._queue.get(block=True, timeout=self._timeout) - if invocation == self.BREAK_PROC_INVOCATION: - return False - self.dispatch(invocation) - return True - - def dispatch(self, invocation: ServiceInvocation): - method_name, args, kwargs = astuple(invocation) - handler = getattr(self, method_name, None) - if handler is None: - raise NotImplementedError(f"Process has no handler for {handler}") - handler(*args, **kwargs) - - def finish(self): - self.info("Finishing the queue and listening processes") - self._mempool_server_process.terminate() - if not self._queue_process.is_alive(): - return - self._queue.put_nowait(self.BREAK_PROC_INVOCATION) - self._queue_process.join(timeout=self.JOIN_PROC_TIMEOUT_SEC) - - @abc.abstractmethod - def do_extras(self): - assert "To be implemented in derived class" - - @abc.abstractmethod - def service_process_init(self): - assert "To be implemented in derived class" diff --git a/proxy/mempool/__init__.py b/proxy/mempool/__init__.py index adc9b5cae..9907e250e 100644 --- a/proxy/mempool/__init__.py +++ b/proxy/mempool/__init__.py @@ -1,3 +1,6 @@ -from .mempool_service import MemPoolService from .mempool_client import MemPoolClient +from .mempool_service import MemPoolService from .mem_pool import MemPool + +MEMPOOL_SERVICE_PORT = MemPoolService.MEMPOOL_SERVICE_PORT +MEMPOOL_SERVICE_HOST = MemPoolService.MEMPOOL_SERVICE_HOST diff --git a/proxy/mempool/mem_pool.py b/proxy/mempool/mem_pool.py index 774ac02f7..9fe7bf86e 100644 --- a/proxy/mempool/mem_pool.py +++ b/proxy/mempool/mem_pool.py @@ -1,35 +1,28 @@ +import asyncio +import os + from logged_groups import logged_group -from multiprocessing import Pool +from concurrent.futures import ProcessPoolExecutor +import time + +from ..common_neon.data import NeonTxData @logged_group("neon.MemPool") class MemPool: - POOL_PROC_COUNT = 20 + POOL_PROC_COUNT = 8 def __init__(self): - self._pool = Pool(processes=self.POOL_PROC_COUNT) - - def on_eth_send_raw_transaction(self, *, eth_trx_hash): - self._pool.apply_async(func=self._on_eth_send_raw_transaction_impl, args=(eth_trx_hash,), - callback=self.on_eth_send_raw_transaction_callback, error_callback=self.error_callback) - - def error_callback(self, error): - self.error("Failed to invoke on worker process: ", error) - - def on_eth_send_raw_transaction_callback(self, result): - pass - - def _on_eth_send_raw_transaction_impl(self, eth_trx_hash): - self.debug(f"Transaction is being processed on the worker: {eth_trx_hash}") - - def do_extras(self): - pass - - def __getstate__(self): - self_dict = self.__dict__.copy() - del self_dict['_pool'] - return self_dict - - def __setstate__(self, state): - self.__dict__.update(state) + self._pool = ProcessPoolExecutor(self.POOL_PROC_COUNT) + self._event_loop = asyncio.get_event_loop() + + def send_raw_transaction(self, neon_tx_data: NeonTxData): + self._pool.submit(MemPool._send_raw_transaction_impl, neon_tx_data) + + @staticmethod + def _send_raw_transaction_impl(neon_tx_data: NeonTxData) -> bool: + pid = os.getpid() + print(f"PID: {pid}, neon_tx_data: {neon_tx_data}") + time.sleep(0.1) + return True diff --git a/proxy/mempool/mempool_client.py b/proxy/mempool/mempool_client.py index 958b7ef09..2dcb76171 100644 --- a/proxy/mempool/mempool_client.py +++ b/proxy/mempool/mempool_client.py @@ -1,20 +1,12 @@ -from logged_groups import logged_group +from ..common_neon.data import NeonTxData -from ..common_neon.utils import QueueBasedServiceClient -from ..common_neon import Result +from ..common_neon.utils import PickableDataClient -from . import MemPoolService +class MemPoolClient: -@logged_group("neon.Proxy") -class MemPoolClient(QueueBasedServiceClient): + def __init__(self, host: str, port: int): + self._pickable_data_client = PickableDataClient(host, port) - MEM_POOL_SERVICE_HOST = "127.0.0.1" - - def __init__(self): - port, host = (MemPoolService.MEM_POOL_SERVICE_PORT, self.MEM_POOL_SERVICE_HOST) - self.info(f"Initialize MemPoolClient connecting to: {port} at: {host}") - QueueBasedServiceClient.__init__(self, host, port) - - def on_eth_send_raw_transaction(self, eth_trx_signature) -> Result: - return self.invoke("on_eth_send_raw_transaction", eth_trx_hash=eth_trx_signature) + def send_raw_transaction(self, neon_tx_data: NeonTxData): + self._pickable_data_client.send_data(neon_tx_data) diff --git a/proxy/mempool/mempool_service.py b/proxy/mempool/mempool_service.py index 75c28d697..6e520b6b2 100644 --- a/proxy/mempool/mempool_service.py +++ b/proxy/mempool/mempool_service.py @@ -1,26 +1,34 @@ from logged_groups import logged_group +import asyncio +from multiprocessing import Process -from ..common_neon.utils import QueueBasedService - +from ..common_neon.utils.pickable_data_server import PickableDataServer, PickableDataServerUser from .mem_pool import MemPool +from typing import Any -@logged_group("neon.MemPool") -class MemPoolService(QueueBasedService): - MEM_POOL_SERVICE_PORT = 9091 +@logged_group("neon.MemPool") +class MemPoolService(PickableDataServerUser): - def __init__(self, *, is_background: bool): - QueueBasedService.__init__(self, port=self.MEM_POOL_SERVICE_PORT, is_background=is_background) - self._mem_pool = None + MEMPOOL_SERVICE_PORT = 9091 + MEMPOOL_SERVICE_HOST = "0.0.0.0" - def on_eth_send_raw_transaction(self, *, eth_trx_hash): - self._mem_pool.on_eth_send_raw_transaction(eth_trx_hash=eth_trx_hash) + def __init__(self): + self.event_loop = asyncio.new_event_loop() + asyncio.set_event_loop(self.event_loop) + self._mempool_server = None + self._mempool = None + self._process = Process(target=self.run) - # QueueBasedService abstracts + def start(self): + self.info("Run until complete") + self._process.start() - def service_process_init(self): - self._mem_pool = MemPool() + def on_data_received(self, data: Any): + self._mempool.send_raw_transaction(data) - def do_extras(self): - self._mem_pool.do_extras() + def run(self): + self._mempool_server = PickableDataServer(user=self, host=self.MEMPOOL_SERVICE_HOST, port=self.MEMPOOL_SERVICE_PORT) + self._mempool = MemPool() + self.event_loop.run_until_complete(self._mempool_server.run_server()) diff --git a/proxy/neon_proxy_app.py b/proxy/neon_proxy_app.py index e7cfd9526..a1f430a89 100644 --- a/proxy/neon_proxy_app.py +++ b/proxy/neon_proxy_app.py @@ -7,9 +7,9 @@ class NeonProxyApp: def __init__(self): - self._mempool_service = MemPoolService(is_background=True) + self._mempool_service = MemPoolService() def start(self): - PrometheusProxyServer() self._mempool_service.start() + PrometheusProxyServer() entry_point() diff --git a/proxy/neon_rpc_api_model/neon_rpc_api_model.py b/proxy/neon_rpc_api_model/neon_rpc_api_model.py index d3a737ac0..782142c5d 100644 --- a/proxy/neon_rpc_api_model/neon_rpc_api_model.py +++ b/proxy/neon_rpc_api_model/neon_rpc_api_model.py @@ -8,19 +8,20 @@ from web3.auto import w3 from ..common_neon.address import EthereumAddress -from ..common_neon.emulator_interactor import call_emulated, call_trx_emulated +from ..common_neon.emulator_interactor import call_emulated from ..common_neon.errors import EthereumError, InvalidParamError, PendingTxError from ..common_neon.estimate import GasEstimate from ..common_neon.eth_proto import Trx as EthTrx from ..common_neon.keys_storage import KeyStorage from ..common_neon.solana_interactor import SolanaInteractor from ..common_neon.utils import SolanaBlockInfo -from ..common_neon.types import NeonTxPrecheckResult, NeonEmulatingResult +from ..common_neon.data import NeonTxPrecheckResult, NeonTxData from ..environment import SOLANA_URL, PP_SOLANA_URL, PYTH_MAPPING_ACCOUNT, NEON_EVM_VERSION, NEON_EVM_REVISION, \ CHAIN_ID, neon_cli, EVM_STEP_COUNT from ..memdb.memdb import MemDB from ..common_neon.gas_price_calculator import GasPriceCalculator from ..statistics_exporter.proxy_metrics_interface import StatisticsExporter +from ..mempool import MemPoolClient, MEMPOOL_SERVICE_HOST, MEMPOOL_SERVICE_PORT from .transaction_sender import NeonTxSender from .operator_resource_list import OperatorResourceList @@ -47,7 +48,7 @@ def __init__(self): self._solana = SolanaInteractor(SOLANA_URL) self._db = MemDB(self._solana) self._stat_exporter: Optional[StatisticsExporter] = None - self._mempool_client = MemPoolClient() + self._mempool_client = MemPoolClient(MEMPOOL_SERVICE_HOST, MEMPOOL_SERVICE_PORT) if PP_SOLANA_URL == SOLANA_URL: self.gas_price_calculator = GasPriceCalculator(self._solana, PYTH_MAPPING_ACCOUNT) @@ -450,7 +451,8 @@ def eth_sendRawTransaction(self, rawTrx: str) -> str: tx_sender.execute(neon_tx_precheck_result) self._stat_tx_success() - self._mempool_client.on_eth_send_raw_transaction(eth_signature) + neon_tx_data = NeonTxData(tx_signed=rawTrx) + self._mempool_client.send_raw_transaction(neon_tx_data) return eth_signature except PendingTxError as err: diff --git a/proxy/neon_rpc_api_model/transaction_sender.py b/proxy/neon_rpc_api_model/transaction_sender.py index a91b5d526..1ea231edf 100644 --- a/proxy/neon_rpc_api_model/transaction_sender.py +++ b/proxy/neon_rpc_api_model/transaction_sender.py @@ -22,7 +22,7 @@ from ..common_neon.eth_proto import Trx as EthTx from ..common_neon.utils import NeonTxResultInfo, NeonTxInfo from ..common_neon.errors import EthereumError -from ..common_neon.types import NeonTxPrecheckResult, NeonEmulatingResult +from ..common_neon.data import NeonTxPrecheckResult, NeonEmulatingResult from ..environment import RETRY_ON_FAIL from ..environment import HOLDER_MSG_SIZE from ..memdb.memdb import MemDB, NeonPendingTxInfo diff --git a/proxy/neon_rpc_api_model/transaction_validator.py b/proxy/neon_rpc_api_model/transaction_validator.py index 292cd2b0c..1ff2e3655 100644 --- a/proxy/neon_rpc_api_model/transaction_validator.py +++ b/proxy/neon_rpc_api_model/transaction_validator.py @@ -14,7 +14,7 @@ ALLOW_UNDERPRICED_TX_WITHOUT_CHAINID from ..common_neon.emulator_interactor import call_trx_emulated -from ..common_neon.types import NeonTxPrecheckResult, NeonEmulatingResult +from ..common_neon.data import NeonTxPrecheckResult, NeonEmulatingResult @logged_group("neon.Proxy") diff --git a/proxy/testing/test_pyth_network_client.py b/proxy/testing/test_pyth_network_client.py index 1e817ae06..9bd7274f0 100644 --- a/proxy/testing/test_pyth_network_client.py +++ b/proxy/testing/test_pyth_network_client.py @@ -140,9 +140,6 @@ def test_integration_success_read_price(self): ''' try: self.update_mapping() - price1 = self.testee.get_price(sol_usd_symbol) - sleep(2) - price2 = self.testee.get_price(sol_usd_symbol) - self.assertNotEqual(price1['valid_slot'], price2['valid_slot']) + self.testee.get_price(sol_usd_symbol) except Exception as err: self.fail(f"Expected get_price not throws exception but it does: {err}") From 9ec72ec1943c68cc311991901e97b9584eeaeb69 Mon Sep 17 00:00:00 2001 From: Rozhkov Dmitrii Date: Thu, 21 Apr 2022 20:57:49 +0500 Subject: [PATCH 03/11] #713 send tx from mempool worker (#754) --- proxy/common_neon/config.py | 27 +++ proxy/common_neon/data.py | 4 +- proxy/common_neon/utils/__init__.py | 3 +- .../common_neon/utils/pickable_data_server.py | 154 +++++++++++++----- proxy/mempool/__init__.py | 1 + proxy/mempool/mem_pool.py | 30 ++-- proxy/mempool/mempool_api.py | 11 ++ proxy/mempool/mempool_client.py | 10 +- proxy/mempool/mempool_service.py | 17 +- proxy/mempool/mempool_tx_executor.py | 29 ++++ .../neon_tx_stages.py | 0 .../operator_resource_list.py | 4 +- .../transaction_sender.py | 35 ++-- proxy/neon_proxy_app.py | 4 +- .../neon_rpc_api_model/neon_rpc_api_model.py | 38 +++-- .../transaction_validator.py | 15 +- proxy/testing/test_neon_tx_sender.py | 5 +- 17 files changed, 266 insertions(+), 121 deletions(-) create mode 100644 proxy/common_neon/config.py create mode 100644 proxy/mempool/mempool_api.py create mode 100644 proxy/mempool/mempool_tx_executor.py rename proxy/{neon_rpc_api_model => mempool}/neon_tx_stages.py (100%) rename proxy/{neon_rpc_api_model => mempool}/operator_resource_list.py (98%) rename proxy/{neon_rpc_api_model => mempool}/transaction_sender.py (94%) diff --git a/proxy/common_neon/config.py b/proxy/common_neon/config.py new file mode 100644 index 000000000..3b2dfef80 --- /dev/null +++ b/proxy/common_neon/config.py @@ -0,0 +1,27 @@ +from abc import ABC, abstractmethod +from typing import Optional +import os + + +class IConfig(ABC): + + @abstractmethod + def get_solana_url(self) -> Optional[str]: + """Gets the predefinded solana url""" + + @abstractmethod + def get_evm_count(self) -> Optional[int]: + """Gets the evm count""" + + +class Config(IConfig): + + def __init__(self): + from ..environment import read_elf_params, ELF_PARAMS + read_elf_params(ELF_PARAMS) + + def get_solana_url(self) -> Optional[str]: + return os.environ.get("SOLANA_URL", "http://localhost:8899") + + def get_evm_count(self) -> Optional[int]: + return int(os.environ.get("EVM_STEP_COUNT", 750)) diff --git a/proxy/common_neon/data.py b/proxy/common_neon/data.py index 102c877b1..80e660945 100644 --- a/proxy/common_neon/data.py +++ b/proxy/common_neon/data.py @@ -4,9 +4,9 @@ @dataclass -class NeonTxPrecheckResult: +class NeonTxExecCfg: is_underpriced_tx_without_chainid: bool - emulating_result: NeonEmulatingResult + steps_executed: int NeonEmulatingResult = Dict[str, Any] diff --git a/proxy/common_neon/utils/__init__.py b/proxy/common_neon/utils/__init__.py index 6f67c4bd8..80779403d 100644 --- a/proxy/common_neon/utils/__init__.py +++ b/proxy/common_neon/utils/__init__.py @@ -1,4 +1,5 @@ from .utils import * -from .pickable_data_server import PickableDataServer, PickableDataServerUser, PickableDataClient +from .pickable_data_server import AddrPickableDataSrv, PipePickableDataSrv, PickableDataServerUser, \ + AddrPickableDataClient, PipePickableDataClient diff --git a/proxy/common_neon/utils/pickable_data_server.py b/proxy/common_neon/utils/pickable_data_server.py index 94656911b..e335f4a22 100644 --- a/proxy/common_neon/utils/pickable_data_server.py +++ b/proxy/common_neon/utils/pickable_data_server.py @@ -1,84 +1,152 @@ +from typing import Any, Tuple from abc import ABC, abstractmethod + import asyncio +from asyncio import StreamReader, StreamWriter import socket import pickle import struct -from typing import Any from logged_groups import logged_group class PickableDataServerUser(ABC): @abstractmethod - def on_data_received(self, data: Any): + def on_data_received(self, data: Any) -> Any: """Gets neon_tx_data from the neon rpc api service worker""" +def encode_pickable(object) -> bytes: + data = pickle.dumps(object) + len_data = struct.pack("!I", len(data)) + return len_data + data + + @logged_group("neon.MemPool") class PickableDataServer(ABC): - def __init__(self, *, user: PickableDataServerUser, host: str, port: int): + def __init__(self, *, user: PickableDataServerUser): self._user = user - self._port = port - self._host = host + asyncio.get_event_loop().create_task(self.run_server()) + + @abstractmethod + async def run_server(self): + assert False - async def handle_client(self, client): - loop = asyncio.get_event_loop() - peer_name = client.getpeername() - self.debug(f"Got new incoming connection: {peer_name}") + async def handle_client(self, reader: StreamReader, writer: StreamWriter): while True: try: - len_packed: bytes = await loop.sock_recv(client, 4) - if len(len_packed) == 0: - break - # TODO: all the data can be received by parts, handle it - payload_len_data = struct.unpack("!I", len_packed[:4])[0] - payload = await loop.sock_recv(client, payload_len_data) - data = pickle.loads(payload) - self._user.on_data_received(data) + data = await self._recv_pickable_data(reader) + result = self._user.on_data_received(data) + result_data = encode_pickable(result) + writer.write(result_data) + await writer.drain() except ConnectionResetError: - self.error(f"Client connection: {peer_name} - has been interrupted") + self.error(f"Client connection has been closed") break except Exception as err: - self.error(f"Failed to receive data over: {peer_name} - err: {err}") - continue - client.close() + self.error(f"Failed to receive data err: {err}, type: {type(err)}") + break + + async def _recv_pickable_data(self, reader: StreamReader): + len_packed: bytes = await reader.readexactly(4) + if len(len_packed) == 0: + raise ConnectionResetError() + payload_len_data = struct.unpack("!I", len_packed)[0] + payload = await reader.readexactly(payload_len_data) + data = pickle.loads(payload) + + return data + + +class AddrPickableDataSrv(PickableDataServer): + + def __init__(self, *, user: PickableDataServerUser, address: Tuple[str, int]): + self._address = address + PickableDataServer.__init__(self, user=user) async def run_server(self): - self.info(f"Listen port: {self._port} on: {self._host}") - server = socket.socket(socket.AF_INET, socket.SOCK_STREAM) - server.bind((self._host, self._port)) - server.listen(8) - server.setblocking(False) + host, port = self._address + self.info(f"Listen port: {port} on: {host}") + await asyncio.start_server(self.handle_client, host, port) - loop = asyncio.get_event_loop() - while True: - client, _ = await loop.sock_accept(server) - loop.create_task(self.handle_client(client)) + +class PipePickableDataSrv(PickableDataServer): + + def __init__(self, *, user: PickableDataServerUser, srv_sock: socket.socket): + self._srv_sock = srv_sock + PickableDataServer.__init__(self, user=user) + + async def run_server(self): + print("run_server_by_conn") + reader, writer = await asyncio.streams.open_connection(sock=self._srv_sock) + print("Got reader, writer") + await self.handle_client(reader, writer) @logged_group("neon.Proxy") class PickableDataClient: - def __init__(self, host: str, port: int): + CONNECTION_TIMEOUT_SEC = 5 - self.info(f"Initialize PickableDataClient connecting to: {port} at: {host}") - self._connection = socket.create_connection((host, port)) + def __init__(self): + self._client_sock = None - def send_data(self, pickable_data: Any): + def _set_client_sock(self, client_sock: socket.socket): + self._client_sock = client_sock + self._client_sock.setblocking(False) + self._client_sock.settimeout(self.CONNECTION_TIMEOUT_SEC) + + def send_data(self, pickable_object: Any): try: - payload = self._encode_pickable_data(pickable_data) - self._connection.send(payload) + payload = encode_pickable(pickable_object) + self._client_sock.send(payload) + len_packed: bytes = self._client_sock.recv(4) + data_len = struct.unpack("!I", len_packed)[0] + data = self._client_sock.recv(data_len) + if not data: + return None + result = pickle.loads(data) + return result except BaseException as err: self.error(f"Failed to send data: {err}") raise Exception("Failed to send pickable data") - def _encode_pickable_data(self, pickable_data: Any): - data = pickle.dumps(pickable_data) - data_len = len(data) - packed_len = struct.pack("!I", data_len) - payload = packed_len + data - return payload + async def send_data_async(self, pickable_object): + reader, writer = await asyncio.streams.open_connection(sock=self._client_sock) + try: + payload = encode_pickable(pickable_object) + writer.write(payload) + await writer.drain() + len_packed: bytes = await reader.readexactly(4) + if not len_packed: + return None + data_len = struct.unpack("!I", len_packed)[0] + data = await reader.readexactly(data_len) + if not data: + return None + result = pickle.loads(data) + return result + except BaseException as err: + self.error(f"Failed to send data: {err}") + raise Exception("Failed to send pickable data") def __del__(self): - self._connection.close() + self._client_sock.close() + + +class PipePickableDataClient(PickableDataClient): + + def __init__(self, client_sock: socket.socket): + PickableDataClient.__init__(self) + self._set_client_sock(client_sock=client_sock) + + +class AddrPickableDataClient(PickableDataClient): + + def __init__(self, addr: Tuple[str, int]): + PickableDataClient.__init__(self) + host, port = addr + client_sock = socket.create_connection((host, port)) + self._set_client_sock(client_sock=client_sock) + diff --git a/proxy/mempool/__init__.py b/proxy/mempool/__init__.py index 9907e250e..ec2e99105 100644 --- a/proxy/mempool/__init__.py +++ b/proxy/mempool/__init__.py @@ -1,6 +1,7 @@ from .mempool_client import MemPoolClient from .mempool_service import MemPoolService from .mem_pool import MemPool +from .mempool_api import * MEMPOOL_SERVICE_PORT = MemPoolService.MEMPOOL_SERVICE_PORT MEMPOOL_SERVICE_HOST = MemPoolService.MEMPOOL_SERVICE_HOST diff --git a/proxy/mempool/mem_pool.py b/proxy/mempool/mem_pool.py index 9fe7bf86e..072393c67 100644 --- a/proxy/mempool/mem_pool.py +++ b/proxy/mempool/mem_pool.py @@ -1,11 +1,10 @@ -import asyncio -import os - from logged_groups import logged_group from concurrent.futures import ProcessPoolExecutor -import time -from ..common_neon.data import NeonTxData +from ..common_neon.config import IConfig + +from .mempool_api import MemPoolTxRequest +from .mempool_tx_executor import MemPoolTxExecutor @logged_group("neon.MemPool") @@ -13,16 +12,19 @@ class MemPool: POOL_PROC_COUNT = 8 - def __init__(self): + def __init__(self, config: IConfig): self._pool = ProcessPoolExecutor(self.POOL_PROC_COUNT) - self._event_loop = asyncio.get_event_loop() - - def send_raw_transaction(self, neon_tx_data: NeonTxData): - self._pool.submit(MemPool._send_raw_transaction_impl, neon_tx_data) + self._tx_executor = MemPoolTxExecutor(config) + + def send_raw_transaction(self, mempool_tx_request: MemPoolTxRequest) -> bool: + try: + self._pool.submit(MemPool._send_raw_transaction_impl, mempool_tx_request) + except Exception as err: + print(f"Failed enqueue mempool_tx_request into the worker pool: {err}") + return False + return True @staticmethod - def _send_raw_transaction_impl(neon_tx_data: NeonTxData) -> bool: - pid = os.getpid() - print(f"PID: {pid}, neon_tx_data: {neon_tx_data}") - time.sleep(0.1) + def _send_raw_transaction_impl(mempool_tx_request: MemPoolTxRequest) -> bool: + print(f"mempool_tx_request: {mempool_tx_request}") return True diff --git a/proxy/mempool/mempool_api.py b/proxy/mempool/mempool_api.py new file mode 100644 index 000000000..4f515a224 --- /dev/null +++ b/proxy/mempool/mempool_api.py @@ -0,0 +1,11 @@ +from dataclasses import dataclass + +from ..common_neon.eth_proto import Trx as NeonTx +from ..common_neon.data import NeonTxExecCfg, NeonEmulatingResult + + +@dataclass +class MemPoolTxRequest: + neon_tx: NeonTx + neon_tx_exec_cfg: NeonTxExecCfg + emulating_result: NeonEmulatingResult diff --git a/proxy/mempool/mempool_client.py b/proxy/mempool/mempool_client.py index 2dcb76171..e0be95015 100644 --- a/proxy/mempool/mempool_client.py +++ b/proxy/mempool/mempool_client.py @@ -1,12 +1,12 @@ -from ..common_neon.data import NeonTxData +from ..common_neon.utils import AddrPickableDataClient -from ..common_neon.utils import PickableDataClient +from .mempool_api import MemPoolTxRequest class MemPoolClient: def __init__(self, host: str, port: int): - self._pickable_data_client = PickableDataClient(host, port) + self._pickable_data_client = AddrPickableDataClient((host, port)) - def send_raw_transaction(self, neon_tx_data: NeonTxData): - self._pickable_data_client.send_data(neon_tx_data) + def send_raw_transaction(self, mempool_tx_request: MemPoolTxRequest): + self._pickable_data_client.send_data(mempool_tx_request) diff --git a/proxy/mempool/mempool_service.py b/proxy/mempool/mempool_service.py index 6e520b6b2..1381d66ce 100644 --- a/proxy/mempool/mempool_service.py +++ b/proxy/mempool/mempool_service.py @@ -2,7 +2,9 @@ import asyncio from multiprocessing import Process -from ..common_neon.utils.pickable_data_server import PickableDataServer, PickableDataServerUser +from ..common_neon.utils.pickable_data_server import AddrPickableDataSrv, PickableDataServerUser +from ..common_neon.config import IConfig + from .mem_pool import MemPool from typing import Any @@ -14,21 +16,22 @@ class MemPoolService(PickableDataServerUser): MEMPOOL_SERVICE_PORT = 9091 MEMPOOL_SERVICE_HOST = "0.0.0.0" - def __init__(self): + def __init__(self, config: IConfig): self.event_loop = asyncio.new_event_loop() asyncio.set_event_loop(self.event_loop) self._mempool_server = None self._mempool = None self._process = Process(target=self.run) + self._config = config def start(self): self.info("Run until complete") self._process.start() - def on_data_received(self, data: Any): - self._mempool.send_raw_transaction(data) + def on_data_received(self, data: Any) -> Any: + return self._mempool.send_raw_transaction(data) def run(self): - self._mempool_server = PickableDataServer(user=self, host=self.MEMPOOL_SERVICE_HOST, port=self.MEMPOOL_SERVICE_PORT) - self._mempool = MemPool() - self.event_loop.run_until_complete(self._mempool_server.run_server()) + self._mempool_server = AddrPickableDataSrv(user=self, address=(self.MEMPOOL_SERVICE_HOST, self.MEMPOOL_SERVICE_PORT)) + self._mempool = MemPool(self._config) + self.event_loop.run_forever() diff --git a/proxy/mempool/mempool_tx_executor.py b/proxy/mempool/mempool_tx_executor.py new file mode 100644 index 000000000..eb4f7e23b --- /dev/null +++ b/proxy/mempool/mempool_tx_executor.py @@ -0,0 +1,29 @@ +from logged_groups import logged_group + +from ..common_neon.solana_interactor import SolanaInteractor +from ..common_neon.config import IConfig +from ..memdb.memdb import MemDB + +# TODO: NeonTxSender should be moved out from there +from .transaction_sender import NeonTxSender +from .operator_resource_list import OperatorResourceList +from .mempool_api import MemPoolTxRequest + + +@logged_group("neon.MemPool") +class MemPoolTxExecutor: + + def __init__(self, config: IConfig): + + self._solana = SolanaInteractor(config.get_solana_url()) + self._db = MemDB(self._solana) + self._config = config + + def execute_neon_tx(self, mempool_tx_request: MemPoolTxRequest): + neon_tx = mempool_tx_request.neon_tx + neon_tx_cfg = mempool_tx_request.neon_tx_exec_cfg + emulating_result = mempool_tx_request.emulating_result + emv_step_count = self._config.get_evm_count() + tx_sender = NeonTxSender(self._db, self._solana, neon_tx, steps=emv_step_count) + with OperatorResourceList(tx_sender): + tx_sender.execute(neon_tx_cfg, emulating_result) diff --git a/proxy/neon_rpc_api_model/neon_tx_stages.py b/proxy/mempool/neon_tx_stages.py similarity index 100% rename from proxy/neon_rpc_api_model/neon_tx_stages.py rename to proxy/mempool/neon_tx_stages.py diff --git a/proxy/neon_rpc_api_model/operator_resource_list.py b/proxy/mempool/operator_resource_list.py similarity index 98% rename from proxy/neon_rpc_api_model/operator_resource_list.py rename to proxy/mempool/operator_resource_list.py index e838d6df9..2ee58ab78 100644 --- a/proxy/neon_rpc_api_model/operator_resource_list.py +++ b/proxy/mempool/operator_resource_list.py @@ -23,9 +23,7 @@ MIN_OPERATOR_BALANCE_TO_ERR, MIN_OPERATOR_BALANCE_TO_WARN, EVM_LOADER_ID -## TODO: DIP corruption, get rid of back dependency -# from .transaction_sender import NeonTxSender -from .neon_tx_stages import NeonCancelTxStage, NeonCreateAccountTxStage, NeonCreateAccountWithSeedStage +from ..mempool.neon_tx_stages import NeonCancelTxStage, NeonCreateAccountTxStage, NeonCreateAccountWithSeedStage class OperatorResourceInfo: diff --git a/proxy/neon_rpc_api_model/transaction_sender.py b/proxy/mempool/transaction_sender.py similarity index 94% rename from proxy/neon_rpc_api_model/transaction_sender.py rename to proxy/mempool/transaction_sender.py index 1ea231edf..1d233414c 100644 --- a/proxy/neon_rpc_api_model/transaction_sender.py +++ b/proxy/mempool/transaction_sender.py @@ -5,15 +5,14 @@ import time from logged_groups import logged_group -from typing import Dict, Optional, Any +from typing import Dict, Optional from solana.transaction import AccountMeta, Transaction, PublicKey from solana.blockhash import Blockhash -from .neon_tx_stages import NeonCreateAccountTxStage, NeonCreateERC20TxStage, NeonCreateContractTxStage, \ - NeonResizeContractTxStage +from ..mempool.neon_tx_stages import NeonCreateAccountTxStage, NeonCreateERC20TxStage, NeonCreateContractTxStage, \ + NeonResizeContractTxStage -from .operator_resource_list import OperatorResourceInfo from ..common_neon.compute_budget import TransactionWithComputeBudget from ..common_neon.neon_instruction import NeonInstruction as NeonIxBuilder from ..common_neon.solana_interactor import SolanaInteractor @@ -22,12 +21,14 @@ from ..common_neon.eth_proto import Trx as EthTx from ..common_neon.utils import NeonTxResultInfo, NeonTxInfo from ..common_neon.errors import EthereumError -from ..common_neon.data import NeonTxPrecheckResult, NeonEmulatingResult +from ..common_neon.data import NeonTxExecCfg, NeonEmulatingResult from ..environment import RETRY_ON_FAIL from ..environment import HOLDER_MSG_SIZE from ..memdb.memdb import MemDB, NeonPendingTxInfo from ..common_neon.utils import get_holder_msg +from .operator_resource_list import OperatorResourceInfo + @logged_group("neon.Proxy") class NeonTxSender: @@ -61,10 +62,10 @@ def __init__(self, db: MemDB, solana: SolanaInteractor, eth_tx: EthTx, steps: in self._create_account_list = [] self._eth_meta_dict: Dict[str, AccountMeta] = dict() - def execute(self, precheck_result: NeonTxPrecheckResult) -> NeonTxResultInfo: + def execute(self, exec_cfg: NeonTxExecCfg, emulating_result: NeonEmulatingResult) -> NeonTxResultInfo: self._validate_pend_tx() - self._prepare_execution(precheck_result.emulating_result) - return self._execute(precheck_result) + self._prepare_execution(emulating_result) + return self._execute(exec_cfg) def set_resource(self, resource: Optional[OperatorResourceInfo]): self.resource = resource @@ -82,11 +83,11 @@ def _validate_pend_tx(self): self._pending_tx = NeonPendingTxInfo(neon_sign=self.neon_sign, operator=operator, slot=0) self._pend_tx_into_db(self.solana.get_recent_blockslot()) - def _execute(self, precheck_result: NeonTxPrecheckResult): + def _execute(self, exec_cfg: NeonTxExecCfg): for Strategy in [SimpleNeonTxStrategy, IterativeNeonTxStrategy, HolderNeonTxStrategy, NoChainIdNeonTxStrategy]: try: - strategy = Strategy(precheck_result, self) + strategy = Strategy(exec_cfg, self) if not strategy.is_valid: self.debug(f'Skip strategy {Strategy.NAME}: {strategy.error}') continue @@ -208,8 +209,8 @@ def done_account_tx_list(self, skip_create_accounts=False): class BaseNeonTxStrategy(metaclass=abc.ABCMeta): NAME = 'UNKNOWN STRATEGY' - def __init__(self, precheck_result: NeonTxPrecheckResult, neon_tx_sender: NeonTxSender): - self._precheck_result = precheck_result + def __init__(self, exec_cfg: NeonTxExecCfg, neon_tx_sender: NeonTxSender): + self._neon_tx_exec_cfg = exec_cfg self.is_valid = False self.error = None self.s = neon_tx_sender @@ -251,7 +252,7 @@ def _validate_txsize(self) -> bool: raise def _validate_gas_limit(self): - if not self._precheck_result.is_underpriced_tx_without_chainid: + if not self._neon_tx_exec_cfg.is_underpriced_tx_without_chainid: return True self.error = "Underpriced transaction without chain-id" @@ -302,7 +303,7 @@ def _validate(self) -> bool: return self._validate_txsize() def _validate_steps(self) -> bool: - steps_emulated = self._precheck_result.emulating_result["steps_executed"] + steps_emulated = self._neon_tx_exec_cfg.steps_executed if steps_emulated > self.steps: self.error = 'Too big number of EVM steps' return False @@ -457,7 +458,7 @@ def _validate(self) -> bool: self._validate_gas_limit()) def _validate_evm_steps(self): - if self._precheck_result.emulating_result["steps_executed"] > (self.s.steps * 25): + if self._neon_tx_exec_cfg.steps_executed > (self.s.steps * 25): self.error = 'Big number of EVM steps' return False return True @@ -479,7 +480,7 @@ def execute(self) -> (NeonTxResultInfo, [str]): SolTxListSender(self.s, tx_list, self._preparation_txs_name).send(signer) self.s.done_account_tx_list() - steps_emulated = self._precheck_result.emulating_result["steps_executed"] + steps_emulated = self._neon_tx_exec_cfg.steps_executed cnt = math.ceil(steps_emulated / self.steps) cnt = math.ceil(steps_emulated / (self.steps - cnt)) if steps_emulated > 200: @@ -538,7 +539,7 @@ def __init__(self, *args, **kwargs): HolderNeonTxStrategy.__init__(self, *args, **kwargs) def _validate(self) -> bool: - if not self._precheck_result.is_underpriced_tx_without_chainid: + if not self._neon_tx_exec_cfg.is_underpriced_tx_without_chainid: self.error = 'Normal transaction' return False diff --git a/proxy/neon_proxy_app.py b/proxy/neon_proxy_app.py index a1f430a89..334c1a039 100644 --- a/proxy/neon_proxy_app.py +++ b/proxy/neon_proxy_app.py @@ -2,12 +2,14 @@ from .mempool.mempool_service import MemPoolService from .statistics_exporter.prometheus_proxy_server import PrometheusProxyServer +from .common_neon.config import Config class NeonProxyApp: def __init__(self): - self._mempool_service = MemPoolService() + self._config = Config() + self._mempool_service = MemPoolService(self._config) def start(self): self._mempool_service.start() diff --git a/proxy/neon_rpc_api_model/neon_rpc_api_model.py b/proxy/neon_rpc_api_model/neon_rpc_api_model.py index 782142c5d..929e973b4 100644 --- a/proxy/neon_rpc_api_model/neon_rpc_api_model.py +++ b/proxy/neon_rpc_api_model/neon_rpc_api_model.py @@ -1,7 +1,7 @@ import json import multiprocessing import traceback -from typing import Optional, Union +from typing import Optional, Union, Tuple import sha3 from logged_groups import logged_group @@ -15,16 +15,16 @@ from ..common_neon.keys_storage import KeyStorage from ..common_neon.solana_interactor import SolanaInteractor from ..common_neon.utils import SolanaBlockInfo -from ..common_neon.data import NeonTxPrecheckResult, NeonTxData +from ..common_neon.data import NeonTxExecCfg, NeonEmulatingResult +from ..common_neon.gas_price_calculator import GasPriceCalculator + from ..environment import SOLANA_URL, PP_SOLANA_URL, PYTH_MAPPING_ACCOUNT, NEON_EVM_VERSION, NEON_EVM_REVISION, \ - CHAIN_ID, neon_cli, EVM_STEP_COUNT + CHAIN_ID, neon_cli + from ..memdb.memdb import MemDB -from ..common_neon.gas_price_calculator import GasPriceCalculator from ..statistics_exporter.proxy_metrics_interface import StatisticsExporter -from ..mempool import MemPoolClient, MEMPOOL_SERVICE_HOST, MEMPOOL_SERVICE_PORT +from ..mempool import MemPoolTxRequest, MemPoolClient, MEMPOOL_SERVICE_HOST, MEMPOOL_SERVICE_PORT -from .transaction_sender import NeonTxSender -from .operator_resource_list import OperatorResourceList from .transaction_validator import NeonTxValidator NEON_PROXY_PKG_VERSION = '0.7.16-dev' @@ -444,15 +444,20 @@ def eth_sendRawTransaction(self, rawTrx: str) -> str: self._stat_tx_begin() try: - neon_tx_precheck_result = self.precheck(trx) + neon_tx_cfg, emulating_result = self.precheck(trx) - tx_sender = NeonTxSender(self._db, self._solana, trx, steps=EVM_STEP_COUNT) - with OperatorResourceList(tx_sender): - tx_sender.execute(neon_tx_precheck_result) + # tx_sender = NeonTxSender(self._db, self._solana, trx, steps=EVM_STEP_COUNT) + # with OperatorResourceList(tx_sender): + # tx_sender.execute(neon_tx_cfg) self._stat_tx_success() - neon_tx_data = NeonTxData(tx_signed=rawTrx) - self._mempool_client.send_raw_transaction(neon_tx_data) + mempool_tx_request = MemPoolTxRequest(neon_tx=trx, + neon_tx_exec_cfg=neon_tx_cfg, + emulating_result=emulating_result) + + if not self._mempool_client.send_raw_transaction(mempool_tx_request): + raise Exception("Failed to pass neon_tx into MemPool") + return eth_signature except PendingTxError as err: @@ -466,13 +471,10 @@ def eth_sendRawTransaction(self, rawTrx: str) -> str: self._stat_tx_failed() raise - def precheck(self, neon_trx: EthTrx) -> NeonTxPrecheckResult: - + def precheck(self, neon_trx: EthTrx) -> Tuple[NeonTxExecCfg, NeonEmulatingResult]: min_gas_price = self.gas_price_calculator.get_min_gas_price() neon_validator = NeonTxValidator(self._solana, neon_trx, min_gas_price) - precheck_result = neon_validator.precheck() - - return precheck_result + return neon_validator.precheck() def _stat_tx_begin(self): self._stat_exporter.stat_commit_tx_begin() diff --git a/proxy/neon_rpc_api_model/transaction_validator.py b/proxy/neon_rpc_api_model/transaction_validator.py index 1ff2e3655..90cc3228d 100644 --- a/proxy/neon_rpc_api_model/transaction_validator.py +++ b/proxy/neon_rpc_api_model/transaction_validator.py @@ -1,5 +1,5 @@ from __future__ import annotations - +from typing import Tuple from logged_groups import logged_group from ..common_neon.eth_proto import Trx as EthTx @@ -9,12 +9,12 @@ from ..common_neon.solana_receipt_parser import SolReceiptParser from ..common_neon.solana_interactor import SolanaInteractor from ..common_neon.estimate import GasEstimate +from ..common_neon.emulator_interactor import call_trx_emulated from ..environment import ACCOUNT_PERMISSION_UPDATE_INT, CHAIN_ID, NEON_GAS_LIMIT_MULTIPLIER_NO_CHAINID,\ ALLOW_UNDERPRICED_TX_WITHOUT_CHAINID -from ..common_neon.emulator_interactor import call_trx_emulated -from ..common_neon.data import NeonTxPrecheckResult, NeonEmulatingResult +from ..common_neon.data import NeonTxExecCfg, NeonEmulatingResult @logged_group("neon.Proxy") @@ -58,16 +58,16 @@ def is_underpriced_tx_without_chainid(self) -> bool: return False return (self._tx.gasPrice < self._min_gas_price) or (self._tx.gasLimit < self._estimated_gas) - def precheck(self) -> NeonTxPrecheckResult: + def precheck(self) -> Tuple[NeonTxExecCfg, NeonEmulatingResult]: try: self._prevalidate_tx() emulating_result: NeonEmulatingResult = call_trx_emulated(self._tx) self._prevalidate_emulator(emulating_result) is_underpriced_tx_without_chainid = self.is_underpriced_tx_without_chainid() - precheck_result = NeonTxPrecheckResult(emulating_result=emulating_result, - is_underpriced_tx_without_chainid=is_underpriced_tx_without_chainid) - return precheck_result + neon_tx_exec_cfg = NeonTxExecCfg(steps_executed=emulating_result["steps_executed"], + is_underpriced_tx_without_chainid=is_underpriced_tx_without_chainid) + return neon_tx_exec_cfg, emulating_result except Exception as e: self.extract_ethereum_error(e) @@ -196,7 +196,6 @@ def _prevalidate_account_sizes(emulator_json: dict): raise EthereumError(f"contract {account_desc['address']} " + f"requests a size increase to more than 9.5Mb") - def _raise_nonce_error(self, account_tx_count: int, tx_nonce: int): if self.MAX_U64 in (account_tx_count, tx_nonce): message = 'nonce has max value' diff --git a/proxy/testing/test_neon_tx_sender.py b/proxy/testing/test_neon_tx_sender.py index 1ce9a14b9..7154cd3e3 100644 --- a/proxy/testing/test_neon_tx_sender.py +++ b/proxy/testing/test_neon_tx_sender.py @@ -5,10 +5,11 @@ from unittest.mock import Mock from ..common_neon.eth_proto import Trx as EthTrx -from ..neon_rpc_api_model.transaction_sender import NeonTxSender from ..common_neon.solana_interactor import SolanaInteractor from ..memdb.memdb import MemDB -from ..neon_rpc_api_model.operator_resource_list import OperatorResourceList + +from ..mempool.operator_resource_list import OperatorResourceList +from ..mempool.transaction_sender import NeonTxSender @logged_groups.logged_group("neon.TestCases") From d10605d03b76bade8fbd2184e150658e3fda7904 Mon Sep 17 00:00:00 2001 From: rozhkovdmitrii Date: Wed, 18 May 2022 17:00:50 +0400 Subject: [PATCH 04/11] Correct some --- proxy/neon_rpc_api_model/neon_rpc_api_model.py | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/proxy/neon_rpc_api_model/neon_rpc_api_model.py b/proxy/neon_rpc_api_model/neon_rpc_api_model.py index 1b2b8e419..9db4558f5 100644 --- a/proxy/neon_rpc_api_model/neon_rpc_api_model.py +++ b/proxy/neon_rpc_api_model/neon_rpc_api_model.py @@ -21,8 +21,6 @@ from ..common_neon.gas_price_calculator import GasPriceCalculator from ..environment import SOLANA_URL, PP_SOLANA_URL, PYTH_MAPPING_ACCOUNT, NEON_EVM_VERSION, NEON_EVM_REVISION, \ - CHAIN_ID, neon_cli - CHAIN_ID, USE_EARLIEST_BLOCK_IF_0_PASSED, neon_cli, EVM_STEP_COUNT from ..memdb.memdb import MemDB from ..statistics_exporter.proxy_metrics_interface import StatisticsExporter @@ -493,7 +491,7 @@ def eth_sendRawTransaction(self, rawTrx: str) -> str: if not self._mempool_client.send_raw_transaction(mempool_tx_request): raise Exception("Failed to pass neon_tx into MemPool") - + time.sleep(3) return eth_signature except PendingTxError as err: From 62799c331b64e35ad73c49b86780d2a203efc7ee Mon Sep 17 00:00:00 2001 From: rozhkovdmitrii Date: Wed, 18 May 2022 17:26:38 +0400 Subject: [PATCH 05/11] Spit and polish --- proxy/neon_rpc_api_model/neon_rpc_api_model.py | 1 - 1 file changed, 1 deletion(-) diff --git a/proxy/neon_rpc_api_model/neon_rpc_api_model.py b/proxy/neon_rpc_api_model/neon_rpc_api_model.py index 9db4558f5..57e6d22f1 100644 --- a/proxy/neon_rpc_api_model/neon_rpc_api_model.py +++ b/proxy/neon_rpc_api_model/neon_rpc_api_model.py @@ -491,7 +491,6 @@ def eth_sendRawTransaction(self, rawTrx: str) -> str: if not self._mempool_client.send_raw_transaction(mempool_tx_request): raise Exception("Failed to pass neon_tx into MemPool") - time.sleep(3) return eth_signature except PendingTxError as err: From 64a7bf4f235249a5d42ee303b95823a8098ec750 Mon Sep 17 00:00:00 2001 From: Rozhkov Dmitrii Date: Wed, 25 May 2022 10:27:33 +0400 Subject: [PATCH 06/11] #713 send tx from mempool worker (#757) --- .buildkite/pipeline.yml | 2 +- proxy/common_neon/data.py | 6 - .../common_neon/utils/pickable_data_server.py | 37 ++--- proxy/memdb/transactions_db.py | 7 +- proxy/mempool/__init__.py | 8 +- proxy/mempool/executor_mng.py | 91 +++++++++++ proxy/mempool/mem_pool.py | 30 ---- proxy/mempool/mempool.py | 151 ++++++++++++++++++ proxy/mempool/mempool_api.py | 77 ++++++++- proxy/mempool/mempool_client.py | 19 ++- proxy/mempool/mempool_executor.py | 65 ++++++++ proxy/mempool/mempool_service.py | 23 +-- proxy/mempool/mempool_tx_executor.py | 29 ---- proxy/mempool/neon_tx_stages.py | 10 +- proxy/mempool/operator_resource_list.py | 4 +- proxy/mempool/transaction_sender.py | 16 +- proxy/neon_proxy_app.py | 4 +- .../neon_rpc_api_model/neon_rpc_api_model.py | 38 +++-- proxy/testing/test_eth_getLogs.py | 27 ++-- proxy/testing/test_neon_tx_sender.py | 12 +- requirements.txt | 2 +- 21 files changed, 493 insertions(+), 165 deletions(-) create mode 100644 proxy/mempool/executor_mng.py delete mode 100644 proxy/mempool/mem_pool.py create mode 100644 proxy/mempool/mempool.py create mode 100644 proxy/mempool/mempool_executor.py delete mode 100644 proxy/mempool/mempool_tx_executor.py diff --git a/.buildkite/pipeline.yml b/.buildkite/pipeline.yml index 77e01b455..7274fd096 100644 --- a/.buildkite/pipeline.yml +++ b/.buildkite/pipeline.yml @@ -9,7 +9,7 @@ steps: - label: ":terraform: build infrastructure" key: "create_infrastructure" if: &is_fts_enabled | - (build.pull_request.base_branch == "712-mempool" && !build.pull_request.draft) || + (build.pull_request.base_branch == "develop" && !build.pull_request.draft) || (build.source == "trigger_job" && build.env("NEON_EVM_FULL_TEST_SUITE") == "true") agents: queue: "testing" diff --git a/proxy/common_neon/data.py b/proxy/common_neon/data.py index 80e660945..6bcc8c8c8 100644 --- a/proxy/common_neon/data.py +++ b/proxy/common_neon/data.py @@ -22,9 +22,3 @@ def __init__(self, neon_tx_hash: str, neon_income: int, tx_type: str, is_cancele def add_instruction(self, sol_tx_hash: str, sol_spent: int, steps: int, bpf: int) -> None: self.instructions.append((sol_tx_hash, sol_spent, steps, bpf)) - - -@dataclass -class NeonTxData: - tx_signed: str - diff --git a/proxy/common_neon/utils/pickable_data_server.py b/proxy/common_neon/utils/pickable_data_server.py index e335f4a22..5a54df3eb 100644 --- a/proxy/common_neon/utils/pickable_data_server.py +++ b/proxy/common_neon/utils/pickable_data_server.py @@ -12,7 +12,7 @@ class PickableDataServerUser(ABC): @abstractmethod - def on_data_received(self, data: Any) -> Any: + async def on_data_received(self, data: Any) -> Any: """Gets neon_tx_data from the neon rpc api service worker""" @@ -37,23 +37,25 @@ async def handle_client(self, reader: StreamReader, writer: StreamWriter): while True: try: data = await self._recv_pickable_data(reader) - result = self._user.on_data_received(data) + result = await self._user.on_data_received(data) result_data = encode_pickable(result) writer.write(result_data) await writer.drain() except ConnectionResetError: - self.error(f"Client connection has been closed") + break + except asyncio.exceptions.IncompleteReadError as err: + self.error(f"Incomplete read error: {err}") break except Exception as err: self.error(f"Failed to receive data err: {err}, type: {type(err)}") break async def _recv_pickable_data(self, reader: StreamReader): - len_packed: bytes = await reader.readexactly(4) + len_packed: bytes = await reader.read(4) if len(len_packed) == 0: raise ConnectionResetError() payload_len_data = struct.unpack("!I", len_packed)[0] - payload = await reader.readexactly(payload_len_data) + payload = await reader.read(payload_len_data) data = pickle.loads(payload) return data @@ -78,29 +80,23 @@ def __init__(self, *, user: PickableDataServerUser, srv_sock: socket.socket): PickableDataServer.__init__(self, user=user) async def run_server(self): - print("run_server_by_conn") reader, writer = await asyncio.streams.open_connection(sock=self._srv_sock) - print("Got reader, writer") await self.handle_client(reader, writer) @logged_group("neon.Proxy") class PickableDataClient: - CONNECTION_TIMEOUT_SEC = 5 - def __init__(self): self._client_sock = None def _set_client_sock(self, client_sock: socket.socket): self._client_sock = client_sock - self._client_sock.setblocking(False) - self._client_sock.settimeout(self.CONNECTION_TIMEOUT_SEC) def send_data(self, pickable_object: Any): try: payload = encode_pickable(pickable_object) - self._client_sock.send(payload) + sent = self._client_sock.send(payload) len_packed: bytes = self._client_sock.recv(4) data_len = struct.unpack("!I", len_packed)[0] data = self._client_sock.recv(data_len) @@ -110,29 +106,26 @@ def send_data(self, pickable_object: Any): return result except BaseException as err: self.error(f"Failed to send data: {err}") - raise Exception("Failed to send pickable data") + raise async def send_data_async(self, pickable_object): - reader, writer = await asyncio.streams.open_connection(sock=self._client_sock) + loop = asyncio.get_event_loop() try: payload = encode_pickable(pickable_object) - writer.write(payload) - await writer.drain() - len_packed: bytes = await reader.readexactly(4) + await loop.sock_sendall(self._client_sock, payload) + + len_packed: bytes = await loop.sock_recv(self._client_sock, 4) if not len_packed: return None data_len = struct.unpack("!I", len_packed)[0] - data = await reader.readexactly(data_len) + data = await loop.sock_recv(self._client_sock, data_len) if not data: return None result = pickle.loads(data) return result except BaseException as err: self.error(f"Failed to send data: {err}") - raise Exception("Failed to send pickable data") - - def __del__(self): - self._client_sock.close() + raise class PipePickableDataClient(PickableDataClient): diff --git a/proxy/memdb/transactions_db.py b/proxy/memdb/transactions_db.py index cb220314f..e43bc54f5 100644 --- a/proxy/memdb/transactions_db.py +++ b/proxy/memdb/transactions_db.py @@ -83,6 +83,8 @@ def _has_topics(src_topics, dst_topics): return False result_list = [] + indexed_logs = self._db.get_logs(from_block, to_block, addresses, topics, block_hash) + with self._tx_slot.get_lock(): for data in self._tx_by_neon_sign.values(): tx = pickle.loads(data) @@ -97,9 +99,10 @@ def _has_topics(src_topics, dst_topics): continue if len(topics) and (not _has_topics(topics, log['topics'])): continue + if log in indexed_logs: + continue result_list.append(log) - - return result_list + self._db.get_logs(from_block, to_block, addresses, topics, block_hash) + return indexed_logs + result_list def get_sol_sign_list_by_neon_sign(self, neon_sign: str, is_pended_tx: bool, before_slot: int) -> [str]: if not is_pended_tx: diff --git a/proxy/mempool/__init__.py b/proxy/mempool/__init__.py index ec2e99105..84d5d1624 100644 --- a/proxy/mempool/__init__.py +++ b/proxy/mempool/__init__.py @@ -1,7 +1,7 @@ from .mempool_client import MemPoolClient -from .mempool_service import MemPoolService -from .mem_pool import MemPool +from .mempool_service import MPService +from .mempool import MemPool from .mempool_api import * -MEMPOOL_SERVICE_PORT = MemPoolService.MEMPOOL_SERVICE_PORT -MEMPOOL_SERVICE_HOST = MemPoolService.MEMPOOL_SERVICE_HOST +MP_SERVICE_PORT = MPService.MP_SERVICE_PORT +MP_SERVICE_HOST = MPService.MP_SERVICE_HOST diff --git a/proxy/mempool/executor_mng.py b/proxy/mempool/executor_mng.py new file mode 100644 index 000000000..91bac4898 --- /dev/null +++ b/proxy/mempool/executor_mng.py @@ -0,0 +1,91 @@ +import asyncio +import dataclasses +import socket + +from collections import deque +from typing import List, Tuple, Deque, Set +from logged_groups import logged_group + +from ..common_neon.config import IConfig +from ..common_neon.utils import PipePickableDataClient + +from .mempool_api import MPRequest, IMPExecutor +from .mempool_executor import MPExecutor + + +class MpExecutorClient(PipePickableDataClient): + + def __init__(self, client_sock: socket.socket): + PipePickableDataClient.__init__(self, client_sock=client_sock) + + async def send_tx_request(self, mempool_tx_request: MPRequest): + return await self.send_data_async(mempool_tx_request) + + +@logged_group("neon.MemPool") +class MPExecutorMng(IMPExecutor): + + BRING_BACK_EXECUTOR_TIMEOUT_SEC = 1800 + + @dataclasses.dataclass + class ExecutorInfo: + executor: MPExecutor + client: MpExecutorClient + id: int + + def __init__(self, executor_count: int, config: IConfig): + self.info(f"Initialize executor mng with executor_count: {executor_count}") + self._available_executor_pool: Deque[int] = deque() + self._busy_executor_pool: Set[int] = set() + self._executors: List[MPExecutorMng.ExecutorInfo] = list() + for i in range(executor_count): + executor_info = MPExecutorMng._create_executor(i, config) + self._executors.append(executor_info) + self._available_executor_pool.appendleft(i) + executor_info.executor.start() + + def submit_mp_request(self, mp_reqeust: MPRequest) -> Tuple[int, asyncio.Task]: + executor_id, executor = self._get_executor() + tx_hash = "0x" + mp_reqeust.neon_tx.hash_signed().hex() + self.debug(f"Tx: {tx_hash} - scheduled on executor: {executor_id}") + task = asyncio.get_event_loop().create_task(executor.send_data_async(mp_reqeust)) + return executor_id, task + + def is_available(self) -> bool: + return self._has_available() + + def _has_available(self) -> bool: + return len(self._available_executor_pool) > 0 + + def _get_executor(self) -> Tuple[int, MpExecutorClient]: + executor_id = self._available_executor_pool.pop() + self.debug(f"Acquire executor: {executor_id}") + self._busy_executor_pool.add(executor_id) + executor_info = self._executors[executor_id] + return executor_id, executor_info.client + + def on_no_liquidity(self, resource_id: int): + self.debug(f"No liquidity, executor: {resource_id} - will be unblocked in: {MPExecutorMng.BRING_BACK_EXECUTOR_TIMEOUT_SEC} sec") + asyncio.get_event_loop().create_task(self._release_executor_later(resource_id)) + + async def _release_executor_later(self, executor_id: int): + await asyncio.sleep(MPExecutorMng.BRING_BACK_EXECUTOR_TIMEOUT_SEC) + self.release_resource(executor_id) + + def release_resource(self, resource_id: int): + self.debug(f"Release executor: {resource_id}") + self._busy_executor_pool.remove(resource_id) + self._available_executor_pool.appendleft(resource_id) + + @staticmethod + def _create_executor(executor_id: int, config: IConfig) -> ExecutorInfo: + client_sock, srv_sock = socket.socketpair() + executor = MPExecutor(executor_id, srv_sock, config) + client = MpExecutorClient(client_sock) + return MPExecutorMng.ExecutorInfo(executor=executor, client=client, id=executor_id) + + def __del__(self): + for executor_info in self._executors: + executor_info.executor.kill() + self._busy_executor_pool.clear() + self._available_executor_pool.clear() diff --git a/proxy/mempool/mem_pool.py b/proxy/mempool/mem_pool.py deleted file mode 100644 index 072393c67..000000000 --- a/proxy/mempool/mem_pool.py +++ /dev/null @@ -1,30 +0,0 @@ -from logged_groups import logged_group -from concurrent.futures import ProcessPoolExecutor - -from ..common_neon.config import IConfig - -from .mempool_api import MemPoolTxRequest -from .mempool_tx_executor import MemPoolTxExecutor - - -@logged_group("neon.MemPool") -class MemPool: - - POOL_PROC_COUNT = 8 - - def __init__(self, config: IConfig): - self._pool = ProcessPoolExecutor(self.POOL_PROC_COUNT) - self._tx_executor = MemPoolTxExecutor(config) - - def send_raw_transaction(self, mempool_tx_request: MemPoolTxRequest) -> bool: - try: - self._pool.submit(MemPool._send_raw_transaction_impl, mempool_tx_request) - except Exception as err: - print(f"Failed enqueue mempool_tx_request into the worker pool: {err}") - return False - return True - - @staticmethod - def _send_raw_transaction_impl(mempool_tx_request: MemPoolTxRequest) -> bool: - print(f"mempool_tx_request: {mempool_tx_request}") - return True diff --git a/proxy/mempool/mempool.py b/proxy/mempool/mempool.py new file mode 100644 index 000000000..9986d262a --- /dev/null +++ b/proxy/mempool/mempool.py @@ -0,0 +1,151 @@ +import asyncio +from typing import List, Tuple, Dict +from logged_groups import logged_group +import bisect + +from .mempool_api import MPRequest, MPResultCode, MPResult, IMPExecutor, MPRequestType, \ + MPTxRequest, MPPendingTxCountReq + + +@logged_group("neon.MemPool") +class MemPool: + + TX_QUEUE_MAX_SIZE = 4096 + TX_QUEUE_SIZE = 4095 + CHECK_TASK_TIMEOUT_SEC = 0.05 + + def __init__(self, executor: IMPExecutor): + self._req_queue = [] + self._lock = asyncio.Lock() + self._req_queue_cond = asyncio.Condition() + self._processing_tasks: List[Tuple[int, asyncio.Task, MPRequest]] = [] + # signer -> pending_tx_counter + self._pending_trx_counters: Dict[str, int] = {} + self._process_tx_results_task = asyncio.get_event_loop().create_task(self.check_processing_tasks()) + self._process_tx_queue_task = asyncio.get_event_loop().create_task(self.process_tx_queue()) + + self._executor = executor + + async def enqueue_mp_request(self, mp_request: MPRequest): + if mp_request.type == MPRequestType.SendTransaction: + tx_request: MPTxRequest = mp_request + return await self.on_send_tx_request(tx_request) + elif mp_request.type == MPRequestType.GetTrxCount: + pending_count_req: MPPendingTxCountReq = mp_request + return self.get_pending_trx_count(pending_count_req.sender) + + async def on_send_tx_request(self, mp_request: MPTxRequest): + await self.enqueue_mp_transaction(mp_request) + sender = "0x" + mp_request.neon_tx.sender() + self._inc_pending_tx_counter(sender) + count = self.get_pending_trx_count(sender) + self.debug(f"On send tx request. Sender: {sender}, pending tx count: {count}") + + async def enqueue_mp_transaction(self, mp_request: MPTxRequest): + tx_hash = mp_request.neon_tx.hash_signed().hex() + log_ctx = {"context": {"req_id": mp_request.req_id}} + try: + self.debug(f"Got mp_tx_request: 0x{tx_hash} to be scheduled on the mempool", extra=log_ctx) + if len(self._req_queue) > MemPool.TX_QUEUE_MAX_SIZE: + self._req_queue = self._req_queue[-MemPool.TX_QUEUE_SIZE:] + bisect.insort_left(self._req_queue, mp_request) + await self._kick_tx_queue() + except Exception as err: + self.error(f"Failed enqueue tx: {tx_hash} into queue: {err}", extra=log_ctx) + + def get_pending_trx_count(self, sender: str): + return self._pending_trx_counters.get(sender, 0) + + async def process_tx_queue(self): + while True: + async with self._req_queue_cond: + await self._req_queue_cond.wait() + if len(self._req_queue) == 0: + self.debug("Tx queue empty - continue waiting for new") + continue + if not self._executor.is_available(): + self.debug("No way to process tx - no available executor") + continue + mp_request: MPRequest = self._req_queue.pop() + self.submit_request_to_executor(mp_request) + + def submit_request_to_executor(self, mp_tx_request: MPRequest): + resource_id, task = self._executor.submit_mp_request(mp_tx_request) + self._processing_tasks.append((resource_id, task, mp_tx_request)) + + async def check_processing_tasks(self): + while True: + not_finished_tasks = [] + for resource_id, task, mp_request in self._processing_tasks: + if not task.done(): + not_finished_tasks.append((resource_id, task, mp_request)) + self._executor.release_resource(resource_id) + continue + exception = task.exception() + if exception is not None: + log_ctx = {"context": {"req_id": mp_request.req_id}} + self.error(f"Exception during processing request: {exception} - tx will be dropped away", extra=log_ctx) + self._on_request_dropped_away(mp_request) + self._executor.release_resource(resource_id) + continue + + mp_result: MPResult = task.result() + assert isinstance(mp_result, MPResult) + assert mp_result.code != MPResultCode.Dummy + await self._process_mp_result(resource_id, mp_result, mp_request) + + self._processing_tasks = not_finished_tasks + await asyncio.sleep(MemPool.CHECK_TASK_TIMEOUT_SEC) + + async def _process_mp_result(self, resource_id: int, mp_result: MPResult, mp_request: MPTxRequest): + tx_hash = "0x" + mp_request.neon_tx.hash_signed().hex() + log_ctx = {"context": {"req_id": mp_request.req_id}} + if mp_result.code == MPResultCode.Done: + self.debug(f"Neon tx: {tx_hash} - processed on executor: {resource_id} - done", extra=log_ctx) + self._on_request_done(mp_request) + self._executor.release_resource(resource_id) + await self._kick_tx_queue() + return + self.warning(f"Failed to process tx: {tx_hash} - on executor: {resource_id}, status: {mp_result} - reschedule", extra=log_ctx) + if mp_result.code == MPResultCode.BlockedAccount: + self._executor.release_resource(resource_id) + await self.enqueue_mp_request(mp_request) + await self._kick_tx_queue() + elif mp_result.code == MPResultCode.NoLiquidity: + self._executor.on_no_liquidity(resource_id) + await self.enqueue_mp_request(mp_request) + await self._kick_tx_queue() + elif mp_result.code == MPResultCode.Unspecified: + self._executor.release_resource(resource_id) + self._on_request_dropped_away(mp_request) + await self._kick_tx_queue() + + def _on_request_done(self, tx_request: MPTxRequest): + sender = "0x" + tx_request.neon_tx.sender() + self._dec_pending_tx_counter(sender) + count = self.get_pending_trx_count(sender) + log_ctx = {"context": {"req_id": tx_request.req_id}} + self.debug(f"Reqeust done. Sender: {sender}, pending tx count: {count}", extra=log_ctx) + + def _on_request_dropped_away(self, tx_request: MPTxRequest): + sender = "0x" + tx_request.neon_tx.sender() + self._dec_pending_tx_counter(sender) + count = self.get_pending_trx_count(sender) + log_ctx = {"context": {"req_id": tx_request.req_id}} + self.debug(f"Reqeust dropped away. Sender: {sender}, pending tx count: {count}", extra=log_ctx) + + def _inc_pending_tx_counter(self, sender: str): + counts = self._pending_trx_counters.get(sender, 0) + self._pending_trx_counters.update({sender: counts + 1}) + + def _dec_pending_tx_counter(self, sender: str): + count = self._pending_trx_counters.get(sender, 0) + assert count > 0 + count = count - 1 + if count == 0: + del self._pending_trx_counters[sender] + self._pending_trx_counters.update({sender: count}) + + async def _kick_tx_queue(self): + async with self._req_queue_cond: + self._req_queue_cond.notify() diff --git a/proxy/mempool/mempool_api.py b/proxy/mempool/mempool_api.py index 4f515a224..bc5dc3226 100644 --- a/proxy/mempool/mempool_api.py +++ b/proxy/mempool/mempool_api.py @@ -1,11 +1,78 @@ -from dataclasses import dataclass +from __future__ import annotations + +from dataclasses import dataclass, field +from enum import IntEnum +from typing import Any, Tuple +from abc import ABC, abstractmethod +from asyncio import Task from ..common_neon.eth_proto import Trx as NeonTx from ..common_neon.data import NeonTxExecCfg, NeonEmulatingResult +class IMPExecutor(ABC): + + @abstractmethod + def submit_mp_request(self, mp_reqeust: MPRequest) -> Tuple[int, Task]: + pass + + @abstractmethod + def is_available(self) -> bool: + pass + + @abstractmethod + def on_no_liquidity(self, resource_id: int): + pass + + @abstractmethod + def release_resource(self, resource_id: int): + pass + + +class MPRequestType(IntEnum): + SendTransaction = 0, + GetTrxCount = 1, + Dummy = -1 + + +@dataclass(order=True) +class MPRequest: + req_id: int + type: MPRequestType = field(default=MPRequestType.Dummy) + + +@dataclass +class MPTxRequest(MPRequest): + signature: str = field(compare=False, default=None) + neon_tx: NeonTx = field(compare=False, default=None) + neon_tx_exec_cfg: NeonTxExecCfg = field(compare=False, default=None) + emulating_result: NeonEmulatingResult = field(compare=False, default=None) + _gas_price: int = field(compare=True, default=None) + + def __post_init__(self): + self._gas_price = self.neon_tx.gasPrice + self.type = MPRequestType.SendTransaction + + +@dataclass +class MPPendingTxCountReq(MPRequest): + + sender: str = None + + def __post_init__(self): + self.type = MPRequestType.GetTrxCount + + +class MPResultCode(IntEnum): + Done = 0 + BlockedAccount = 1, + SolanaUnavailable = 2, + NoLiquidity = 3, + Unspecified = 4, + Dummy = -1 + + @dataclass -class MemPoolTxRequest: - neon_tx: NeonTx - neon_tx_exec_cfg: NeonTxExecCfg - emulating_result: NeonEmulatingResult +class MPResult: + code: MPResultCode + data: Any diff --git a/proxy/mempool/mempool_client.py b/proxy/mempool/mempool_client.py index e0be95015..9cb5c9b21 100644 --- a/proxy/mempool/mempool_client.py +++ b/proxy/mempool/mempool_client.py @@ -1,12 +1,25 @@ +from logged_groups import logged_group + from ..common_neon.utils import AddrPickableDataClient -from .mempool_api import MemPoolTxRequest +from .mempool_api import MPTxRequest, MPPendingTxCountReq + +from ..common_neon.eth_proto import Trx as NeonTx +from ..common_neon.data import NeonTxExecCfg, NeonEmulatingResult +@logged_group("neon.Proxy") class MemPoolClient: def __init__(self, host: str, port: int): self._pickable_data_client = AddrPickableDataClient((host, port)) - def send_raw_transaction(self, mempool_tx_request: MemPoolTxRequest): - self._pickable_data_client.send_data(mempool_tx_request) + def send_raw_transaction(self, req_id: int, signature: str, neon_tx: NeonTx, neon_tx_exec_cfg: NeonTxExecCfg, + emulating_result: NeonEmulatingResult): + mempool_tx_request = MPTxRequest(req_id=req_id, signature=signature, neon_tx=neon_tx, + neon_tx_exec_cfg=neon_tx_exec_cfg, emulating_result=emulating_result) + return self._pickable_data_client.send_data(mempool_tx_request) + + def get_pending_tx_count(self, req_id: int, sender: str): + mempool_pending_tx_count_req = MPPendingTxCountReq(req_id=req_id, sender=sender) + return self._pickable_data_client.send_data(mempool_pending_tx_count_req) diff --git a/proxy/mempool/mempool_executor.py b/proxy/mempool/mempool_executor.py new file mode 100644 index 000000000..127ee4b02 --- /dev/null +++ b/proxy/mempool/mempool_executor.py @@ -0,0 +1,65 @@ +import asyncio +import multiprocessing as mp +import socket + +from logged_groups import logged_group, logging_context + +from ..common_neon.solana_interactor import SolanaInteractor +from ..common_neon.config import IConfig +from ..common_neon.utils import PipePickableDataSrv, PickableDataServerUser, Any +from ..common_neon.config import Config +from ..memdb.memdb import MemDB + +from .transaction_sender import NeonTxSender +from .operator_resource_list import OperatorResourceList +from .mempool_api import MPRequest, MPResult, MPResultCode + + +@logged_group("neon.MemPool") +class MPExecutor(mp.Process, PickableDataServerUser): + + def __init__(self, executor_id: int, srv_sock: socket.socket, config: IConfig): + self.info(f"Initialize mempool_executor: {executor_id}") + self._id = executor_id + self._srv_sock = srv_sock + self._config = config + self.info(f"Config: {self._config}") + self._event_loop: asyncio.BaseEventLoop + self._solana: SolanaInteractor + self._db: MemDB + self._pickable_data_srv = None + mp.Process.__init__(self) + + def _init_in_proc(self): + self.info(f"Config: {self._config}") + self._event_loop = asyncio.new_event_loop() + asyncio.set_event_loop(self._event_loop) + self._pickable_data_srv = PipePickableDataSrv(user=self, srv_sock=self._srv_sock) + self._solana = SolanaInteractor(self._config.get_solana_url()) + self._db = MemDB(self._solana) + + def execute_neon_tx(self, mempool_request: MPRequest): + with logging_context(req_id=mempool_request.req_id, exectr=self._id): + try: + self.execute_neon_tx_impl(mempool_request) + except Exception as err: + self.error(f"Failed to execute neon_tx: {err}") + return MPResult(MPResultCode.Unspecified, None) + return MPResult(MPResultCode.Done, None) + + def execute_neon_tx_impl(self, mempool_tx_cfg: MPRequest): + neon_tx = mempool_tx_cfg.neon_tx + neon_tx_cfg = mempool_tx_cfg.neon_tx_exec_cfg + emulating_result = mempool_tx_cfg.emulating_result + emv_step_count = self._config.get_evm_count() + tx_sender = NeonTxSender(self._db, self._solana, neon_tx, steps=emv_step_count) + with OperatorResourceList(tx_sender): + tx_sender.execute(neon_tx_cfg, emulating_result) + + async def on_data_received(self, data: Any) -> Any: + return self.execute_neon_tx(data) + + def run(self) -> None: + self._config = Config() + self._init_in_proc() + self._event_loop.run_forever() diff --git a/proxy/mempool/mempool_service.py b/proxy/mempool/mempool_service.py index 1381d66ce..8478cfd9f 100644 --- a/proxy/mempool/mempool_service.py +++ b/proxy/mempool/mempool_service.py @@ -1,26 +1,28 @@ from logged_groups import logged_group import asyncio from multiprocessing import Process +from typing import Any from ..common_neon.utils.pickable_data_server import AddrPickableDataSrv, PickableDataServerUser from ..common_neon.config import IConfig -from .mem_pool import MemPool - -from typing import Any +from .mempool import MemPool +from .executor_mng import MPExecutorMng @logged_group("neon.MemPool") -class MemPoolService(PickableDataServerUser): +class MPService(PickableDataServerUser): - MEMPOOL_SERVICE_PORT = 9091 - MEMPOOL_SERVICE_HOST = "0.0.0.0" + MP_SERVICE_PORT = 9091 + MP_SERVICE_HOST = "0.0.0.0" + EXECUTOR_COUNT = 8 def __init__(self, config: IConfig): self.event_loop = asyncio.new_event_loop() asyncio.set_event_loop(self.event_loop) self._mempool_server = None self._mempool = None + self._mp_executor_mng = None self._process = Process(target=self.run) self._config = config @@ -28,10 +30,11 @@ def start(self): self.info("Run until complete") self._process.start() - def on_data_received(self, data: Any) -> Any: - return self._mempool.send_raw_transaction(data) + async def on_data_received(self, data: Any) -> Any: + return await self._mempool.enqueue_mp_request(data) def run(self): - self._mempool_server = AddrPickableDataSrv(user=self, address=(self.MEMPOOL_SERVICE_HOST, self.MEMPOOL_SERVICE_PORT)) - self._mempool = MemPool(self._config) + self._mempool_server = AddrPickableDataSrv(user=self, address=(self.MP_SERVICE_HOST, self.MP_SERVICE_PORT)) + self._mp_executor_mng = MPExecutorMng(self.EXECUTOR_COUNT, self._config) + self._mempool = MemPool(self._mp_executor_mng) self.event_loop.run_forever() diff --git a/proxy/mempool/mempool_tx_executor.py b/proxy/mempool/mempool_tx_executor.py deleted file mode 100644 index eb4f7e23b..000000000 --- a/proxy/mempool/mempool_tx_executor.py +++ /dev/null @@ -1,29 +0,0 @@ -from logged_groups import logged_group - -from ..common_neon.solana_interactor import SolanaInteractor -from ..common_neon.config import IConfig -from ..memdb.memdb import MemDB - -# TODO: NeonTxSender should be moved out from there -from .transaction_sender import NeonTxSender -from .operator_resource_list import OperatorResourceList -from .mempool_api import MemPoolTxRequest - - -@logged_group("neon.MemPool") -class MemPoolTxExecutor: - - def __init__(self, config: IConfig): - - self._solana = SolanaInteractor(config.get_solana_url()) - self._db = MemDB(self._solana) - self._config = config - - def execute_neon_tx(self, mempool_tx_request: MemPoolTxRequest): - neon_tx = mempool_tx_request.neon_tx - neon_tx_cfg = mempool_tx_request.neon_tx_exec_cfg - emulating_result = mempool_tx_request.emulating_result - emv_step_count = self._config.get_evm_count() - tx_sender = NeonTxSender(self._db, self._solana, neon_tx, steps=emv_step_count) - with OperatorResourceList(tx_sender): - tx_sender.execute(neon_tx_cfg, emulating_result) diff --git a/proxy/mempool/neon_tx_stages.py b/proxy/mempool/neon_tx_stages.py index 08b3a408b..308c62c3c 100644 --- a/proxy/mempool/neon_tx_stages.py +++ b/proxy/mempool/neon_tx_stages.py @@ -29,7 +29,7 @@ def build(self): pass -@logged_group("neon.Proxy") +@logged_group("neon.MemPool") class NeonCancelTxStage(NeonTxStage, abc.ABC): NAME = 'cancelWithNonce' @@ -78,7 +78,7 @@ def _create_account_with_seed(self): return self.s.builder.create_account_with_seed_instruction(self.sol_account, self._seed, self.balance, self.size) -@logged_group("neon.Proxy") +@logged_group("neon.MemPool") class NeonCreateAccountTxStage(NeonTxStage): NAME = 'createNeonAccount' @@ -98,7 +98,7 @@ def build(self): self.tx.add(self._create_account()) -@logged_group("neon.Proxy") +@logged_group("neon.MemPool") class NeonCreateERC20TxStage(NeonTxStage, abc.ABC): NAME = 'createERC20Account' @@ -124,7 +124,7 @@ def build(self): self.tx.add(self._create_erc20_account()) -@logged_group("neon.Proxy") +@logged_group("neon.MemPool") class NeonCreateContractTxStage(NeonCreateAccountWithSeedStage, abc.ABC): NAME = 'createNeonContract' @@ -150,7 +150,7 @@ def build(self): self.tx.add(self._create_account()) -@logged_group("neon.Proxy") +@logged_group("neon.MemPool") class NeonResizeContractTxStage(NeonCreateAccountWithSeedStage, abc.ABC): NAME = 'resizeNeonContract' diff --git a/proxy/mempool/operator_resource_list.py b/proxy/mempool/operator_resource_list.py index 2ee58ab78..c4153c68a 100644 --- a/proxy/mempool/operator_resource_list.py +++ b/proxy/mempool/operator_resource_list.py @@ -42,7 +42,7 @@ def secret_key(self) -> bytes: return self.signer.secret_key() -@logged_group("neon.Proxy") +@logged_group("neon.MemPool") class OperatorResourceList: # These variables are global for class, they will be initialized one time _manager = mp.Manager() @@ -282,7 +282,7 @@ def free_resource_info(self): self._free_resource_list.append(resource.idx) -@logged_group("neon.Proxy") +@logged_group("neon.MemPool") class NeonCreatePermAccount(NeonCreateAccountWithSeedStage, abc.ABC): NAME = 'createPermAccount' diff --git a/proxy/mempool/transaction_sender.py b/proxy/mempool/transaction_sender.py index 1d233414c..9c33fc9c3 100644 --- a/proxy/mempool/transaction_sender.py +++ b/proxy/mempool/transaction_sender.py @@ -30,7 +30,7 @@ from .operator_resource_list import OperatorResourceInfo -@logged_group("neon.Proxy") +@logged_group("neon.MemPool") class NeonTxSender: def __init__(self, db: MemDB, solana: SolanaInteractor, eth_tx: EthTx, steps: int): self._db = db @@ -205,7 +205,7 @@ def done_account_tx_list(self, skip_create_accounts=False): self.create_account_tx.instructions.clear() -@logged_group("neon.Proxy") +@logged_group("neon.MemPool") class BaseNeonTxStrategy(metaclass=abc.ABCMeta): NAME = 'UNKNOWN STRATEGY' @@ -259,7 +259,7 @@ def _validate_gas_limit(self): return False -@logged_group("neon.Proxy") +@logged_group("neon.MemPool") class SimpleNeonTxSender(SolTxListSender): def __init__(self, strategy: BaseNeonTxStrategy, *args, **kwargs): SolTxListSender.__init__(self, *args, **kwargs) @@ -282,7 +282,7 @@ def _on_post_send(self): raise RuntimeError('Run out of attempts to execute transaction') -@logged_group("neon.Proxy") +@logged_group("neon.MemPool") class SimpleNeonTxStrategy(BaseNeonTxStrategy, abc.ABC): NAME = 'CallFromRawEthereumTX' IS_SIMPLE = True @@ -329,7 +329,7 @@ def execute(self) -> (NeonTxResultInfo, [str]): return tx_sender.neon_res, tx_sender.success_sign_list -@logged_group("neon.Proxy") +@logged_group("neon.MemPool") class IterativeNeonTxSender(SimpleNeonTxSender): def __init__(self, *args, **kwargs): SimpleNeonTxSender.__init__(self, *args, **kwargs) @@ -443,7 +443,7 @@ def _on_post_send(self): self._tx_list.append(self._strategy.build_tx()) -@logged_group("neon.Proxy") +@logged_group("neon.MemPool") class IterativeNeonTxStrategy(BaseNeonTxStrategy, abc.ABC): NAME = 'PartialCallOrContinueFromRawEthereumTX' IS_SIMPLE = False @@ -492,7 +492,7 @@ def execute(self) -> (NeonTxResultInfo, [str]): return tx_sender.neon_res, tx_sender.success_sign_list -@logged_group("neon.Proxy") +@logged_group("neon.MemPool") class HolderNeonTxStrategy(IterativeNeonTxStrategy, abc.ABC): NAME = 'ExecuteTrxFromAccountDataIterativeOrContinue' @@ -531,7 +531,7 @@ def _build_preparation_tx_list(self) -> [TransactionWithComputeBudget]: return tx_list -@logged_group("neon.Proxy") +@logged_group("neon.MemPool") class NoChainIdNeonTxStrategy(HolderNeonTxStrategy, abc.ABC): NAME = 'ExecuteTrxFromAccountDataIterativeOrContinueNoChainId' diff --git a/proxy/neon_proxy_app.py b/proxy/neon_proxy_app.py index 334c1a039..32f29530f 100644 --- a/proxy/neon_proxy_app.py +++ b/proxy/neon_proxy_app.py @@ -1,5 +1,5 @@ from .proxy import entry_point -from .mempool.mempool_service import MemPoolService +from .mempool.mempool_service import MPService from .statistics_exporter.prometheus_proxy_server import PrometheusProxyServer from .common_neon.config import Config @@ -9,7 +9,7 @@ class NeonProxyApp: def __init__(self): self._config = Config() - self._mempool_service = MemPoolService(self._config) + self._mempool_service = MPService(self._config) def start(self): self._mempool_service.start() diff --git a/proxy/neon_rpc_api_model/neon_rpc_api_model.py b/proxy/neon_rpc_api_model/neon_rpc_api_model.py index 57e6d22f1..01cc51ba7 100644 --- a/proxy/neon_rpc_api_model/neon_rpc_api_model.py +++ b/proxy/neon_rpc_api_model/neon_rpc_api_model.py @@ -6,7 +6,7 @@ from typing import Optional, Union, Tuple import sha3 -from logged_groups import logged_group +from logged_groups import logged_group, LogMng from web3.auto import w3 from ..common_neon.address import EthereumAddress @@ -24,7 +24,7 @@ CHAIN_ID, USE_EARLIEST_BLOCK_IF_0_PASSED, neon_cli, EVM_STEP_COUNT from ..memdb.memdb import MemDB from ..statistics_exporter.proxy_metrics_interface import StatisticsExporter -from ..mempool import MemPoolTxRequest, MemPoolClient, MEMPOOL_SERVICE_HOST, MEMPOOL_SERVICE_PORT +from ..mempool import MemPoolClient, MP_SERVICE_HOST, MP_SERVICE_PORT from .transaction_validator import NeonTxValidator @@ -49,7 +49,7 @@ def __init__(self): self._solana = SolanaInteractor(SOLANA_URL) self._db = MemDB(self._solana) self._stat_exporter: Optional[StatisticsExporter] = None - self._mempool_client = MemPoolClient(MEMPOOL_SERVICE_HOST, MEMPOOL_SERVICE_PORT) + self._mempool_client = MemPoolClient(MP_SERVICE_HOST, MP_SERVICE_PORT) if PP_SOLANA_URL == SOLANA_URL: self.gas_price_calculator = GasPriceCalculator(self._solana, PYTH_MAPPING_ACCOUNT) @@ -214,8 +214,8 @@ def eth_getBalance(self, account: str, tag: str) -> str: return hex(0) return hex(neon_account_info.balance) - except (Exception,): - # self.debug(f"eth_getBalance: Can't get account info: {err}") + except (Exception,) as err: + self.debug(f"eth_getBalance: Can't get account info: {err}") return hex(0) def eth_getLogs(self, obj): @@ -383,11 +383,20 @@ def eth_call(self, obj: dict, tag: str) -> str: def eth_getTransactionCount(self, account: str, tag: str) -> str: self._validate_block_tag(tag) - account = self._normalize_account(account) + account = self._normalize_account(account).lower() try: + self.debug(f"Get transaction count. Account: {account}, tag: {tag}") + pending_trx_count = 0 + if tag == "pending": + req_id = LogMng.get_logging_context().get("req_id") + pending_trx_count = self._mempool_client.get_pending_tx_count(req_id=req_id, sender=account) + self.debug(f"Pending tx count for: {account} - is: {pending_trx_count}") + neon_account_info = self._solana.get_neon_account_info(account) - return hex(neon_account_info.trx_count) + trx_count = neon_account_info.trx_count + pending_trx_count + + return hex(trx_count) except (Exception,): # self.debug(f"eth_getTransactionCount: Can't get account info: {err}") return hex(0) @@ -480,17 +489,14 @@ def eth_sendRawTransaction(self, rawTrx: str) -> str: try: neon_tx_cfg, emulating_result = self.precheck(trx) - # tx_sender = NeonTxSender(self._db, self._solana, trx, steps=EVM_STEP_COUNT) - # with OperatorResourceList(tx_sender): - # tx_sender.execute(neon_tx_cfg) - self._stat_tx_success() - mempool_tx_request = MemPoolTxRequest(neon_tx=trx, - neon_tx_exec_cfg=neon_tx_cfg, - emulating_result=emulating_result) + req_id = LogMng.get_logging_context().get("req_id") - if not self._mempool_client.send_raw_transaction(mempool_tx_request): - raise Exception("Failed to pass neon_tx into MemPool") + self._mempool_client.send_raw_transaction(req_id=req_id, + signature=eth_signature, + neon_tx=trx, + neon_tx_exec_cfg=neon_tx_cfg, + emulating_result=emulating_result) return eth_signature except PendingTxError as err: diff --git a/proxy/testing/test_eth_getLogs.py b/proxy/testing/test_eth_getLogs.py index 12795d8d2..a92e212d7 100644 --- a/proxy/testing/test_eth_getLogs.py +++ b/proxy/testing/test_eth_getLogs.py @@ -46,6 +46,7 @@ class Test_eth_getLogs(unittest.TestCase): + @classmethod def setUpClass(cls): print("\n\n") @@ -102,10 +103,9 @@ def commit_transactions(self): self.commit_two_event_trx(self, 5, 6) self.commit_no_event_trx(self, 7, 8) self.commit_no_event_trx(self, 9, 0) - pass def commit_one_event_trx(self, x, y) -> None: - print("\ncommit_one_event_trx") + print(f"\ncommit_one_event_trx. x: {x}, y: {y}") right_nonce = proxy.eth.get_transaction_count(proxy.eth.default_account) trx_store = self.storage_contract.functions.addReturnEvent(x, y).buildTransaction({'nonce': right_nonce}) trx_store_signed = proxy.eth.account.sign_transaction(trx_store, eth_account.key) @@ -120,7 +120,7 @@ def commit_one_event_trx(self, x, y) -> None: self.topics.append(topic.hex()) def commit_two_event_trx(self, x, y) -> None: - print("\ncommit_two_event_trx") + print(f"\ncommit_two_event_trx. x: {x}, y: {y}") right_nonce = proxy.eth.get_transaction_count(proxy.eth.default_account) trx_store = self.storage_contract.functions.addReturnEventTwice(x, y).buildTransaction({'nonce': right_nonce}) trx_store_signed = proxy.eth.account.sign_transaction(trx_store, eth_account.key) @@ -146,7 +146,6 @@ def commit_no_event_trx(self, x, y) -> None: self.block_hashes_no_event.append(trx_store_receipt['blockHash'].hex()) self.block_numbers_no_event.append(hex(trx_store_receipt['blockNumber'])) - def test_get_logs_by_blockHash(self): print("\ntest_get_logs_by_blockHash") receipts = proxy.eth.get_logs({'blockHash': self.block_hashes[0]}) @@ -155,24 +154,25 @@ def test_get_logs_by_blockHash(self): def test_get_no_logs_by_blockHash(self): print("\ntest_get_no_logs_by_blockHash") - receipts = proxy.eth.get_logs({'blockHash': self.block_hashes_no_event[0]}) + receipts = proxy.eth.get_logs({'blockHash': self.block_hashes_no_event[0], + 'address': self.storage_contract.address}) print('receipts: ', receipts) self.assertEqual(len(receipts), 0) def test_get_logs_by_fromBlock(self): - print("\ntest_get_logs_by_fromBlock") - receipts = proxy.eth.get_logs({'fromBlock': self.block_numbers[2]}) + from_block = self.block_numbers[2] + print(f"\ntest_get_logs_by_fromBlock: {from_block}, by storage contract address: {self.storage_contract.address}") + receipts = proxy.eth.get_logs({'fromBlock': from_block, + 'address': self.storage_contract.address}) print('receipts: ', receipts) self.assertEqual(len(receipts), 4) def test_get_logs_complex_request(self): print("\ntest_get_logs_complex_request") - receipts = proxy.eth.get_logs({ - 'fromBlock': 0, - 'toBlock': 'latest', - 'address': self.storage_contract.address, - 'topics': self.topics, - }) + receipts = proxy.eth.get_logs({'fromBlock': 0, + 'toBlock': 'latest', + 'address': self.storage_contract.address, + 'topics': self.topics}) print('receipts: ', receipts) self.assertEqual(len(receipts), 6) @@ -182,5 +182,6 @@ def test_get_logs_by_address(self): print('receipts: ', receipts) self.assertEqual(len(receipts), 6) + if __name__ == '__main__': unittest.main() diff --git a/proxy/testing/test_neon_tx_sender.py b/proxy/testing/test_neon_tx_sender.py index 7154cd3e3..238ae92d0 100644 --- a/proxy/testing/test_neon_tx_sender.py +++ b/proxy/testing/test_neon_tx_sender.py @@ -44,10 +44,10 @@ def test_01_validate_execution_when_not_enough_sols(self): self._resource_list._min_operator_balance_to_warn.side_effect = [1_049_000_000 * 1_000_000_000 * 1_000_000_000 * 2, 1_000_000_000 * 2] self._resource_list._min_operator_balance_to_err.side_effect = [1_049_000_000 * 1_000_000_000 * 1_000_000_000, 1_000_000_000] - with self.assertLogs('neon', level='ERROR') as logs: + with self.assertLogs('neon.MemPool', level='ERROR') as logs: with self._resource_list: print('logs.output:', str(logs.output)) - self.assertRegex(str(logs.output), 'ERROR:neon.Proxy:Operator account [A-Za-z0-9]{40,}:[0-9]+ has NOT enough SOLs; balance = [0-9]+; min_operator_balance_to_err = 1049000000000000000000000000') + self.assertRegex(str(logs.output), 'ERROR:neon.MemPool:Operator account [A-Za-z0-9]{40,}:[0-9]+ has NOT enough SOLs; balance = [0-9]+; min_operator_balance_to_err = 1049000000000000000000000000') # @unittest.skip("a.i.") def test_02_validate_warning_when_little_sols(self): @@ -60,10 +60,10 @@ def test_02_validate_warning_when_little_sols(self): self._resource_list._min_operator_balance_to_warn.side_effect = [1_049_000_000 * 1_000_000_000 * 1_000_000_000, 1_000_000_000 * 2] self._resource_list._min_operator_balance_to_err.side_effect = [1_049_049_000, 1_000_000_000] - with self.assertLogs('neon', level='WARNING') as logs: + with self.assertLogs('neon.MemPool', level='WARNING') as logs: with self._resource_list: print('logs.output:', str(logs.output)) - self.assertRegex(str(logs.output), 'WARNING:neon.Proxy:Operator account [A-Za-z0-9]{40,}:[0-9]+ SOLs are running out; balance = [0-9]+; min_operator_balance_to_warn = 1049000000000000000000000000; min_operator_balance_to_err = 1049049000;') + self.assertRegex(str(logs.output), 'WARNING:neon.MemPool:Operator account [A-Za-z0-9]{40,}:[0-9]+ SOLs are running out; balance = [0-9]+; min_operator_balance_to_warn = 1049000000000000000000000000; min_operator_balance_to_err = 1049049000;') # @unittest.skip("a.i.") def test_03_validate_execution_when_not_enough_sols_for_all_operator_accounts(self): @@ -78,11 +78,11 @@ def test_03_validate_execution_when_not_enough_sols_for_all_operator_accounts(se self._resource_list._min_operator_balance_to_warn.return_value = 1_049_000_000 * 1_000_000_000 * 1_000_000_000 * 2 self._resource_list._min_operator_balance_to_err.return_value = 1_049_000_000 * 1_000_000_000 * 1_000_000_000 - with self.assertLogs('neon', level='ERROR') as logs: + with self.assertLogs('neon.MemPool', level='ERROR') as logs: with self.assertRaises(RuntimeError, msg='Operator has NO resources!'): with self._resource_list: pass print('logs.output:', str(logs.output)) - self.assertRegex(str(logs.output), 'ERROR:neon.Proxy:Operator account [A-Za-z0-9]{40,}:[0-9]+ has NOT enough SOLs; balance = [0-9]+; min_operator_balance_to_err = 1049000000000000000000000000') + self.assertRegex(str(logs.output), 'ERROR:neon.MemPool:Operator account [A-Za-z0-9]{40,}:[0-9]+ has NOT enough SOLs; balance = [0-9]+; min_operator_balance_to_err = 1049000000000000000000000000') diff --git a/requirements.txt b/requirements.txt index 56809c7b1..7fce90475 100644 --- a/requirements.txt +++ b/requirements.txt @@ -10,4 +10,4 @@ ethereum py-solc-x==1.1.0 flask prometheus_client==0.13.1 -git+https://github.com/neonlabsorg/python-logged-groups.git@2.1.4 +git+https://github.com/neonlabsorg/python-logged-groups.git@2.1.5 From a6fbbc147dff994b886f17924f815cca6a83bd94 Mon Sep 17 00:00:00 2001 From: Rozhkov Dmitrii Date: Thu, 2 Jun 2022 11:11:27 +0400 Subject: [PATCH 07/11] #712 mempool renaming --- Dockerfile | 2 +- proxy/common_neon/utils/__init__.py | 2 +- .../common_neon/utils/pickable_data_server.py | 8 +++--- proxy/mempool/executor_mng.py | 8 +++--- proxy/mempool/mempool.py | 27 +++++++++---------- proxy/mempool/mempool_api.py | 2 +- proxy/mempool/mempool_executor.py | 10 +++---- proxy/mempool/mempool_service.py | 4 +-- proxy/neon_rpc_api_model/__init__.py | 2 +- ...pc_api_model.py => neon_rcp_api_worker.py} | 2 +- .../transaction_validator.py | 2 +- proxy/plugin/neon_rpc_api_plugin.py | 4 +-- proxy/testing/test_neon_rpc_api.py | 4 +-- 13 files changed, 38 insertions(+), 39 deletions(-) rename proxy/neon_rpc_api_model/{neon_rpc_api_model.py => neon_rcp_api_worker.py} (99%) diff --git a/Dockerfile b/Dockerfile index 13c9e2b86..9086d880e 100644 --- a/Dockerfile +++ b/Dockerfile @@ -42,7 +42,7 @@ COPY . /opt ARG PROXY_REVISION ARG LOG_CFG=log_cfg.json RUN (cp -f /opt/${LOG_CFG} /opt/log_cfg.json || true) -RUN sed -i 's/NEON_PROXY_REVISION_TO_BE_REPLACED/'"$PROXY_REVISION"'/g' /opt/proxy/neon_rpc_api_model/neon_rpc_api_model.py +RUN sed -i 's/NEON_PROXY_REVISION_TO_BE_REPLACED/'"$PROXY_REVISION"'/g' /opt/proxy/neon_rpc_api_model/neon_rcp_api_worker.py COPY ./proxy/solana-py.patch /opt RUN cd /usr/local/lib/python3.8/dist-packages/ && patch -p0 Any: @@ -25,7 +25,7 @@ def encode_pickable(object) -> bytes: @logged_group("neon.MemPool") class PickableDataServer(ABC): - def __init__(self, *, user: PickableDataServerUser): + def __init__(self, *, user: IPickableDataServerUser): self._user = user asyncio.get_event_loop().create_task(self.run_server()) @@ -63,7 +63,7 @@ async def _recv_pickable_data(self, reader: StreamReader): class AddrPickableDataSrv(PickableDataServer): - def __init__(self, *, user: PickableDataServerUser, address: Tuple[str, int]): + def __init__(self, *, user: IPickableDataServerUser, address: Tuple[str, int]): self._address = address PickableDataServer.__init__(self, user=user) @@ -75,7 +75,7 @@ async def run_server(self): class PipePickableDataSrv(PickableDataServer): - def __init__(self, *, user: PickableDataServerUser, srv_sock: socket.socket): + def __init__(self, *, user: IPickableDataServerUser, srv_sock: socket.socket): self._srv_sock = srv_sock PickableDataServer.__init__(self, user=user) diff --git a/proxy/mempool/executor_mng.py b/proxy/mempool/executor_mng.py index 91bac4898..4d2a5252b 100644 --- a/proxy/mempool/executor_mng.py +++ b/proxy/mempool/executor_mng.py @@ -13,7 +13,7 @@ from .mempool_executor import MPExecutor -class MpExecutorClient(PipePickableDataClient): +class MPExecutorClient(PipePickableDataClient): def __init__(self, client_sock: socket.socket): PipePickableDataClient.__init__(self, client_sock=client_sock) @@ -30,7 +30,7 @@ class MPExecutorMng(IMPExecutor): @dataclasses.dataclass class ExecutorInfo: executor: MPExecutor - client: MpExecutorClient + client: MPExecutorClient id: int def __init__(self, executor_count: int, config: IConfig): @@ -57,7 +57,7 @@ def is_available(self) -> bool: def _has_available(self) -> bool: return len(self._available_executor_pool) > 0 - def _get_executor(self) -> Tuple[int, MpExecutorClient]: + def _get_executor(self) -> Tuple[int, MPExecutorClient]: executor_id = self._available_executor_pool.pop() self.debug(f"Acquire executor: {executor_id}") self._busy_executor_pool.add(executor_id) @@ -81,7 +81,7 @@ def release_resource(self, resource_id: int): def _create_executor(executor_id: int, config: IConfig) -> ExecutorInfo: client_sock, srv_sock = socket.socketpair() executor = MPExecutor(executor_id, srv_sock, config) - client = MpExecutorClient(client_sock) + client = MPExecutorClient(client_sock) return MPExecutorMng.ExecutorInfo(executor=executor, client=client, id=executor_id) def __del__(self): diff --git a/proxy/mempool/mempool.py b/proxy/mempool/mempool.py index 9986d262a..1e245d2b6 100644 --- a/proxy/mempool/mempool.py +++ b/proxy/mempool/mempool.py @@ -3,7 +3,7 @@ from logged_groups import logged_group import bisect -from .mempool_api import MPRequest, MPResultCode, MPResult, IMPExecutor, MPRequestType, \ +from .mempool_api import MPRequest, MPResultCode, MPTxResult, IMPExecutor, MPRequestType, \ MPTxRequest, MPPendingTxCountReq @@ -16,7 +16,6 @@ class MemPool: def __init__(self, executor: IMPExecutor): self._req_queue = [] - self._lock = asyncio.Lock() self._req_queue_cond = asyncio.Condition() self._processing_tasks: List[Tuple[int, asyncio.Task, MPRequest]] = [] # signer -> pending_tx_counter @@ -29,12 +28,12 @@ def __init__(self, executor: IMPExecutor): async def enqueue_mp_request(self, mp_request: MPRequest): if mp_request.type == MPRequestType.SendTransaction: tx_request: MPTxRequest = mp_request - return await self.on_send_tx_request(tx_request) + return await self._on_send_tx_request(tx_request) elif mp_request.type == MPRequestType.GetTrxCount: pending_count_req: MPPendingTxCountReq = mp_request return self.get_pending_trx_count(pending_count_req.sender) - async def on_send_tx_request(self, mp_request: MPTxRequest): + async def _on_send_tx_request(self, mp_request: MPTxRequest): await self.enqueue_mp_transaction(mp_request) sender = "0x" + mp_request.neon_tx.sender() self._inc_pending_tx_counter(sender) @@ -89,33 +88,33 @@ async def check_processing_tasks(self): self._executor.release_resource(resource_id) continue - mp_result: MPResult = task.result() - assert isinstance(mp_result, MPResult) - assert mp_result.code != MPResultCode.Dummy - await self._process_mp_result(resource_id, mp_result, mp_request) + mp_tx_result: MPTxResult = task.result() + assert isinstance(mp_tx_result, MPTxResult) + assert mp_tx_result.code != MPResultCode.Dummy + await self._process_mp_result(resource_id, mp_tx_result, mp_request) self._processing_tasks = not_finished_tasks await asyncio.sleep(MemPool.CHECK_TASK_TIMEOUT_SEC) - async def _process_mp_result(self, resource_id: int, mp_result: MPResult, mp_request: MPTxRequest): + async def _process_mp_result(self, resource_id: int, mp_tx_result: MPTxResult, mp_request: MPTxRequest): tx_hash = "0x" + mp_request.neon_tx.hash_signed().hex() log_ctx = {"context": {"req_id": mp_request.req_id}} - if mp_result.code == MPResultCode.Done: + if mp_tx_result.code == MPResultCode.Done: self.debug(f"Neon tx: {tx_hash} - processed on executor: {resource_id} - done", extra=log_ctx) self._on_request_done(mp_request) self._executor.release_resource(resource_id) await self._kick_tx_queue() return - self.warning(f"Failed to process tx: {tx_hash} - on executor: {resource_id}, status: {mp_result} - reschedule", extra=log_ctx) - if mp_result.code == MPResultCode.BlockedAccount: + self.warning(f"Failed to process tx: {tx_hash} - on executor: {resource_id}, status: {mp_tx_result} - reschedule", extra=log_ctx) + if mp_tx_result.code == MPResultCode.BlockedAccount: self._executor.release_resource(resource_id) await self.enqueue_mp_request(mp_request) await self._kick_tx_queue() - elif mp_result.code == MPResultCode.NoLiquidity: + elif mp_tx_result.code == MPResultCode.NoLiquidity: self._executor.on_no_liquidity(resource_id) await self.enqueue_mp_request(mp_request) await self._kick_tx_queue() - elif mp_result.code == MPResultCode.Unspecified: + elif mp_tx_result.code == MPResultCode.Unspecified: self._executor.release_resource(resource_id) self._on_request_dropped_away(mp_request) await self._kick_tx_queue() diff --git a/proxy/mempool/mempool_api.py b/proxy/mempool/mempool_api.py index bc5dc3226..46274b016 100644 --- a/proxy/mempool/mempool_api.py +++ b/proxy/mempool/mempool_api.py @@ -73,6 +73,6 @@ class MPResultCode(IntEnum): @dataclass -class MPResult: +class MPTxResult: code: MPResultCode data: Any diff --git a/proxy/mempool/mempool_executor.py b/proxy/mempool/mempool_executor.py index 127ee4b02..91a86a33f 100644 --- a/proxy/mempool/mempool_executor.py +++ b/proxy/mempool/mempool_executor.py @@ -6,17 +6,17 @@ from ..common_neon.solana_interactor import SolanaInteractor from ..common_neon.config import IConfig -from ..common_neon.utils import PipePickableDataSrv, PickableDataServerUser, Any +from ..common_neon.utils import PipePickableDataSrv, IPickableDataServerUser, Any from ..common_neon.config import Config from ..memdb.memdb import MemDB from .transaction_sender import NeonTxSender from .operator_resource_list import OperatorResourceList -from .mempool_api import MPRequest, MPResult, MPResultCode +from .mempool_api import MPRequest, MPTxResult, MPResultCode @logged_group("neon.MemPool") -class MPExecutor(mp.Process, PickableDataServerUser): +class MPExecutor(mp.Process, IPickableDataServerUser): def __init__(self, executor_id: int, srv_sock: socket.socket, config: IConfig): self.info(f"Initialize mempool_executor: {executor_id}") @@ -44,8 +44,8 @@ def execute_neon_tx(self, mempool_request: MPRequest): self.execute_neon_tx_impl(mempool_request) except Exception as err: self.error(f"Failed to execute neon_tx: {err}") - return MPResult(MPResultCode.Unspecified, None) - return MPResult(MPResultCode.Done, None) + return MPTxResult(MPResultCode.Unspecified, None) + return MPTxResult(MPResultCode.Done, None) def execute_neon_tx_impl(self, mempool_tx_cfg: MPRequest): neon_tx = mempool_tx_cfg.neon_tx diff --git a/proxy/mempool/mempool_service.py b/proxy/mempool/mempool_service.py index 8478cfd9f..35e6c1131 100644 --- a/proxy/mempool/mempool_service.py +++ b/proxy/mempool/mempool_service.py @@ -3,7 +3,7 @@ from multiprocessing import Process from typing import Any -from ..common_neon.utils.pickable_data_server import AddrPickableDataSrv, PickableDataServerUser +from ..common_neon.utils.pickable_data_server import AddrPickableDataSrv, IPickableDataServerUser from ..common_neon.config import IConfig from .mempool import MemPool @@ -11,7 +11,7 @@ @logged_group("neon.MemPool") -class MPService(PickableDataServerUser): +class MPService(IPickableDataServerUser): MP_SERVICE_PORT = 9091 MP_SERVICE_HOST = "0.0.0.0" diff --git a/proxy/neon_rpc_api_model/__init__.py b/proxy/neon_rpc_api_model/__init__.py index 16b7421f3..059398e7d 100644 --- a/proxy/neon_rpc_api_model/__init__.py +++ b/proxy/neon_rpc_api_model/__init__.py @@ -1 +1 @@ -from . neon_rpc_api_model import NeonRpcApiModel, NEON_PROXY_PKG_VERSION, NEON_PROXY_REVISION +from . neon_rcp_api_worker import NeonRpcApiWorker, NEON_PROXY_PKG_VERSION, NEON_PROXY_REVISION diff --git a/proxy/neon_rpc_api_model/neon_rpc_api_model.py b/proxy/neon_rpc_api_model/neon_rcp_api_worker.py similarity index 99% rename from proxy/neon_rpc_api_model/neon_rpc_api_model.py rename to proxy/neon_rpc_api_model/neon_rcp_api_worker.py index 85ff17d6c..d408cb0e1 100644 --- a/proxy/neon_rpc_api_model/neon_rpc_api_model.py +++ b/proxy/neon_rpc_api_model/neon_rcp_api_worker.py @@ -42,7 +42,7 @@ def default(self, obj): @logged_group("neon.Proxy") -class NeonRpcApiModel: +class NeonRpcApiWorker: proxy_id_glob = multiprocessing.Value('i', 0) def __init__(self): diff --git a/proxy/neon_rpc_api_model/transaction_validator.py b/proxy/neon_rpc_api_model/transaction_validator.py index 90cc3228d..c4833efc6 100644 --- a/proxy/neon_rpc_api_model/transaction_validator.py +++ b/proxy/neon_rpc_api_model/transaction_validator.py @@ -80,11 +80,11 @@ def _prevalidate_tx(self): self._prevalidate_tx_chain_id() self._prevalidate_tx_size() self._prevalidate_sender_balance() + self._prevalidate_underpriced_tx_without_chainid() def _prevalidate_emulator(self, emulator_json: dict): self._prevalidate_gas_usage(emulator_json) self._prevalidate_account_sizes(emulator_json) - self._prevalidate_underpriced_tx_without_chainid() def extract_ethereum_error(self, e: Exception): receipt_parser = SolReceiptParser(e) diff --git a/proxy/plugin/neon_rpc_api_plugin.py b/proxy/plugin/neon_rpc_api_plugin.py index c7b3083b7..31d88306c 100644 --- a/proxy/plugin/neon_rpc_api_plugin.py +++ b/proxy/plugin/neon_rpc_api_plugin.py @@ -25,7 +25,7 @@ from ..common_neon.solana_receipt_parser import SolTxError from ..common_neon.errors import EthereumError from ..environment import ENABLE_PRIVATE_API -from ..neon_rpc_api_model import NeonRpcApiModel +from ..neon_rpc_api_model import NeonRpcApiWorker from ..statistics_exporter.prometheus_proxy_exporter import PrometheusExporter modelInstanceLock = threading.Lock() @@ -54,7 +54,7 @@ def getModel(cls): global modelInstance with modelInstanceLock: if modelInstance is None: - modelInstance = NeonRpcApiModel() + modelInstance = NeonRpcApiWorker() return modelInstance def routes(self) -> List[Tuple[int, str]]: diff --git a/proxy/testing/test_neon_rpc_api.py b/proxy/testing/test_neon_rpc_api.py index ee4342af7..ebc9c5dc3 100644 --- a/proxy/testing/test_neon_rpc_api.py +++ b/proxy/testing/test_neon_rpc_api.py @@ -1,13 +1,13 @@ import unittest from logged_groups import logged_group -from ..neon_rpc_api_model import NeonRpcApiModel +from ..neon_rpc_api_model import NeonRpcApiWorker @logged_group("neon.TestCases") class SolanaContractTests(unittest.TestCase): def setUp(self): - self.model = NeonRpcApiModel() + self.model = NeonRpcApiWorker() self.owner = '0xc1566af4699928fdf9be097ca3dc47ece39f8f8e' self.token1 = '0x49a449cd7fd8fbcf34d103d98f2c05245020e35b' From 81e37af7d1ad5d3fe235046317a8efb19ebbaf4c Mon Sep 17 00:00:00 2001 From: rozhkovdmitrii Date: Thu, 2 Jun 2022 21:01:16 +0400 Subject: [PATCH 08/11] Get rid of reading elf params int config.py --- proxy/common_neon/config.py | 4 ---- 1 file changed, 4 deletions(-) diff --git a/proxy/common_neon/config.py b/proxy/common_neon/config.py index 3b2dfef80..e86d6f27d 100644 --- a/proxy/common_neon/config.py +++ b/proxy/common_neon/config.py @@ -16,10 +16,6 @@ def get_evm_count(self) -> Optional[int]: class Config(IConfig): - def __init__(self): - from ..environment import read_elf_params, ELF_PARAMS - read_elf_params(ELF_PARAMS) - def get_solana_url(self) -> Optional[str]: return os.environ.get("SOLANA_URL", "http://localhost:8899") From e23374046ca71c4e5806309fd041316c7525cdf1 Mon Sep 17 00:00:00 2001 From: rozhkovdmitrii Date: Thu, 2 Jun 2022 21:12:13 +0400 Subject: [PATCH 09/11] Fix --- proxy/neon_rpc_api_model/__init__.py | 2 +- proxy/neon_rpc_api_model/neon_rcp_api_worker.py | 1 - 2 files changed, 1 insertion(+), 2 deletions(-) diff --git a/proxy/neon_rpc_api_model/__init__.py b/proxy/neon_rpc_api_model/__init__.py index 059398e7d..7462b84f6 100644 --- a/proxy/neon_rpc_api_model/__init__.py +++ b/proxy/neon_rpc_api_model/__init__.py @@ -1 +1 @@ -from . neon_rcp_api_worker import NeonRpcApiWorker, NEON_PROXY_PKG_VERSION, NEON_PROXY_REVISION +from .neon_rcp_api_worker import NeonRpcApiWorker, NEON_PROXY_PKG_VERSION, NEON_PROXY_REVISION diff --git a/proxy/neon_rpc_api_model/neon_rcp_api_worker.py b/proxy/neon_rpc_api_model/neon_rcp_api_worker.py index ebd598546..0f6cc24bc 100644 --- a/proxy/neon_rpc_api_model/neon_rcp_api_worker.py +++ b/proxy/neon_rpc_api_model/neon_rcp_api_worker.py @@ -19,7 +19,6 @@ from ..common_neon.utils import SolanaBlockInfo from ..common_neon.data import NeonTxExecCfg, NeonEmulatingResult from ..common_neon.gas_price_calculator import GasPriceCalculator -from ..common_neon.types import NeonTxPrecheckResult, NeonEmulatingResult from ..common_neon.elf_params import ElfParams from ..common_neon.environment_utils import neon_cli from ..common_neon.environment_data import SOLANA_URL, PP_SOLANA_URL, EVM_STEP_COUNT, USE_EARLIEST_BLOCK_IF_0_PASSED, \ From 833ca4b9543d6219132374aad5e0ee2ca76a8e3d Mon Sep 17 00:00:00 2001 From: Rozhkov Dmitrii Date: Wed, 15 Jun 2022 18:38:22 +0400 Subject: [PATCH 10/11] #721 mempool gas sender nonce sorted txs (#795) --- .buildkite/pipeline.yml | 5 +- .buildkite/steps/deploy-test.sh | 2 + log_cfg.json | 15 + .../common_neon/utils/pickable_data_server.py | 116 +++++- proxy/deploy-test.sh | 7 +- proxy/mempool/executor_mng.py | 18 +- proxy/mempool/mempool.py | 166 ++++----- proxy/mempool/mempool_api.py | 17 +- proxy/mempool/mempool_schedule.py | 189 ++++++++++ proxy/mempool/mempool_service.py | 10 +- .../neon_rpc_api_model/neon_rcp_api_worker.py | 13 +- .../transaction_validator.py | 13 +- proxy/testing/test_eth_sendRawTransaction.py | 28 -- proxy/testing/test_mempool.py | 339 ++++++++++++++++++ proxy/testing/test_pickable_data_transfer.py | 0 15 files changed, 768 insertions(+), 170 deletions(-) create mode 100644 proxy/mempool/mempool_schedule.py create mode 100644 proxy/testing/test_mempool.py create mode 100644 proxy/testing/test_pickable_data_transfer.py diff --git a/.buildkite/pipeline.yml b/.buildkite/pipeline.yml index 7274fd096..ce1fef429 100644 --- a/.buildkite/pipeline.yml +++ b/.buildkite/pipeline.yml @@ -16,8 +16,6 @@ steps: command: - ".buildkite/steps/full_test_suite/terraform-build.sh" - - wait - - label: ":cop::skin-tone-2: deploy check" command: ".buildkite/steps/deploy-test.sh" timeout: 90 @@ -33,6 +31,8 @@ steps: - "indexer.log" - "deploy_contracts.log" - "proxy_program_loader.log" +# env: +# UNITTEST_TESTPATH: proxy.testing.test_indexer_work.CancelTest.test_02_get_code_from_indexer - label: ":coverage: full test suite (FTS)" key: "full_test_suite" @@ -55,6 +55,7 @@ steps: - allure-reports.tar.gz - fts_${BUILDKITE_BUILD_NUMBER}.log - "./logs/*" + depends_on: "create_infrastructure" - wait diff --git a/.buildkite/steps/deploy-test.sh b/.buildkite/steps/deploy-test.sh index e65f820d6..e29f4601e 100755 --- a/.buildkite/steps/deploy-test.sh +++ b/.buildkite/steps/deploy-test.sh @@ -1,6 +1,7 @@ #!/bin/bash set -euo pipefail + wait-for-proxy() { PROXY_URL="$1" @@ -106,6 +107,7 @@ docker run --rm -ti --network=container:proxy \ -e POSTGRES_USER=neon-proxy \ -e POSTGRES_PASSWORD=neon-proxy-pass \ -e POSTGRES_HOST=postgres \ + -e UNITTEST_TESTPATH=${UNITTEST_TESTPATH:=} \ --entrypoint ./proxy/deploy-test.sh \ ${EXTRA_ARGS:-} \ $PROXY_IMAGE diff --git a/log_cfg.json b/log_cfg.json index 82ae9cc4d..836b78b1a 100644 --- a/log_cfg.json +++ b/log_cfg.json @@ -36,10 +36,25 @@ }, "loggers": { "neon": { + "level": "ERROR", + "handlers": ["standard"], + "propagate": false + }, + "neon.Proxy": { "level": "DEBUG", "handlers": ["standard"], "propagate": false }, + "neon.MemPool": { + "level": "DEBUG", + "handlers": ["standard"], + "propagate": false + }, + "neon.Network": { + "level": "ERROR", + "handlers": ["standard"], + "propagate": false + }, "": { "level": "ERROR", "handlers": ["root"], diff --git a/proxy/common_neon/utils/pickable_data_server.py b/proxy/common_neon/utils/pickable_data_server.py index 7ba807dc5..1172b48b0 100644 --- a/proxy/common_neon/utils/pickable_data_server.py +++ b/proxy/common_neon/utils/pickable_data_server.py @@ -16,13 +16,14 @@ async def on_data_received(self, data: Any) -> Any: """Gets neon_tx_data from the neon rpc api service worker""" -def encode_pickable(object) -> bytes: +def encode_pickable(object, logger) -> bytes: data = pickle.dumps(object) len_data = struct.pack("!I", len(data)) + logger.debug(f"Len data: {len(len_data)} - bytes, data: {len(data)} - bytes") return len_data + data -@logged_group("neon.MemPool") +@logged_group("neon.Network") class PickableDataServer(ABC): def __init__(self, *, user: IPickableDataServerUser): @@ -36,31 +37,46 @@ async def run_server(self): async def handle_client(self, reader: StreamReader, writer: StreamWriter): while True: try: + self.debug("Recv pickable data") data = await self._recv_pickable_data(reader) result = await self._user.on_data_received(data) - result_data = encode_pickable(result) + self.debug(f"Encode pickable result: {result}") + result_data = encode_pickable(result, self) + self.debug(f"Send result_data: {len(result_data)}, bytes: {result_data.hex()}") writer.write(result_data) await writer.drain() - except ConnectionResetError: + except ConnectionResetError as err: + self.warning(f"Connection reset error: {err}") break except asyncio.exceptions.IncompleteReadError as err: self.error(f"Incomplete read error: {err}") break except Exception as err: - self.error(f"Failed to receive data err: {err}, type: {type(err)}") + self.error(f"Failed to receive data err: {err}, {err.__traceback__.tb_next.tb_frame}, type: {type(err)}") break async def _recv_pickable_data(self, reader: StreamReader): len_packed: bytes = await reader.read(4) if len(len_packed) == 0: + self.error("Got empty len_packed") raise ConnectionResetError() - payload_len_data = struct.unpack("!I", len_packed)[0] - payload = await reader.read(payload_len_data) + payload_len = struct.unpack("!I", len_packed)[0] + self.debug(f"Got payload len_packed: {len_packed.hex()}, that is: {payload_len}") + payload = b'' + while len(payload) < payload_len: + to_be_read = payload_len - len(payload) + self.debug(f"Reading chunk of: {to_be_read} of: {payload_len} - bytes") + chunk = payload + await reader.read(to_be_read) + self.debug(f"Got chunk of data: {len(chunk)}") + payload += chunk + self.debug(f"Got payload data: {len(payload)}. Load pickled object") data = pickle.loads(payload) + self.debug(f"Loaded pickable of type: {type(data)}") return data +@logged_group("neon.MemPool") class AddrPickableDataSrv(PickableDataServer): def __init__(self, *, user: IPickableDataServerUser, address: Tuple[str, int]): @@ -73,6 +89,7 @@ async def run_server(self): await asyncio.start_server(self.handle_client, host, port) +@logged_group("neon.Network") class PipePickableDataSrv(PickableDataServer): def __init__(self, *, user: IPickableDataServerUser, srv_sock: socket.socket): @@ -84,50 +101,112 @@ async def run_server(self): await self.handle_client(reader, writer) -@logged_group("neon.Proxy") +async def read_data(self, reader: StreamReader, data_len: int): + data = b'' + while len(data) < data_len: + to_be_read = data_len - len(data) + self.debug(f"Reading answer data: {to_be_read} of: {data_len} - bytes") + chunk = await reader.read(to_be_read) + self.debug(f"Got chunk of answer data: {len(chunk)}") + data += chunk + return data + + class PickableDataClient: def __init__(self): - self._client_sock = None + self._client_sock: socket.socket = None + self._reader: StreamReader = None + self._writer: StreamWriter = None def _set_client_sock(self, client_sock: socket.socket): self._client_sock = client_sock + async def async_init(self): + self.info("Async init on client") + reader, writer = await asyncio.open_connection(sock=self._client_sock) + self._reader = reader + self._writer = writer + self.info(f"_reader: {reader}, _writer: {writer}") + def send_data(self, pickable_object: Any): try: - payload = encode_pickable(pickable_object) + self.debug(f"Send pickable_object of type: {type(pickable_object)}") + payload: bytes = encode_pickable(pickable_object, self) + self.debug(f"Payload: {len(payload)}, bytes: {payload[:15].hex()}") sent = self._client_sock.send(payload) + self.debug(f"Sent: {sent} - bytes") + except BaseException as err: + self.error(f"Failed to send client data: {err}") + raise + + try: + self.debug(f"Waiting for answer") len_packed: bytes = self._client_sock.recv(4) data_len = struct.unpack("!I", len_packed)[0] - data = self._client_sock.recv(data_len) + self.debug(f"Got len_packed bytes: {len_packed.hex()}, that is: {data_len} - bytes to receive") + + data = b'' + while len(data) < data_len: + to_be_read = data_len - len(data) + self.debug(f"Reading answer data: {to_be_read} of: {data_len} - bytes") + chunk: bytes = self._client_sock.recv(to_be_read) + self.debug(f"Got chunk of answer data: {len(chunk)}") + data += chunk + if not data: + self.error(f"Got: {data_len} to receive but not data") return None + self.debug(f"Got data: {len(data)}. Load pickled object") result = pickle.loads(data) + self.debug(f"Got result: {result}") return result except BaseException as err: - self.error(f"Failed to send data: {err}") + self.error(f"Failed to receive answer data: {err}") raise async def send_data_async(self, pickable_object): - loop = asyncio.get_event_loop() + try: - payload = encode_pickable(pickable_object) - await loop.sock_sendall(self._client_sock, payload) + self.debug(f"Send pickable_object of type: {type(pickable_object)}") + payload = encode_pickable(pickable_object, self) + self.debug(f"Payload: {len(payload)}, bytes: {payload[:15].hex()}") + self._writer.write(payload) + await self._writer.drain() + + except BaseException as err: + self.error(f"Failed to send client data: {err}") + raise - len_packed: bytes = await loop.sock_recv(self._client_sock, 4) + try: + self.debug(f"Waiting for answer") + len_packed: bytes = await read_data(self, self._reader, 4) if not len_packed: return None data_len = struct.unpack("!I", len_packed)[0] - data = await loop.sock_recv(self._client_sock, data_len) + + data = b'' + while len(data) < data_len: + to_be_read = data_len - len(data) + self.debug(f"Reading answer data: {to_be_read} of: {data_len} - bytes") + chunk = await self._reader.read(to_be_read) + self.debug(f"Got chunk of answer data: {len(chunk)}") + data += chunk + if not data: + self.error(f"Got: {data_len} to receive but not data") return None + self.debug(f"Got data: {len(data)}. Load pickled object") result = pickle.loads(data) + self.debug(f"Got result: {result}") return result + except BaseException as err: - self.error(f"Failed to send data: {err}") + self.error(f"Failed to receive answer data: {err}") raise +@logged_group("neon.Network") class PipePickableDataClient(PickableDataClient): def __init__(self, client_sock: socket.socket): @@ -135,6 +214,7 @@ def __init__(self, client_sock: socket.socket): self._set_client_sock(client_sock=client_sock) +@logged_group("neon.Network") class AddrPickableDataClient(PickableDataClient): def __init__(self, addr: Tuple[str, int]): diff --git a/proxy/deploy-test.sh b/proxy/deploy-test.sh index 94ce21517..b0597ab1a 100755 --- a/proxy/deploy-test.sh +++ b/proxy/deploy-test.sh @@ -22,8 +22,11 @@ set ${TESTNAME:=*} export ETH_TOKEN_MINT=$NEON_TOKEN_MINT export TEST_PROGRAM=$(solana address -k /spl/bin/proxy_program-keypair.json) -# python3 -m unittest discover -v -p "test_${TESTNAME}.py" -find . -name "test_${TESTNAME}.py" -printf "%f\n" | sort | parallel --halt now,fail=1 --jobs 4 python3 -m unittest discover -v -p {} +if [[ -z "${UNITTEST_TESTPATH}" ]]; then + find . -name "test_*.py" -printf "%f\n" | sort | parallel --halt now,fail=1 --jobs 4 python3 -m unittest discover -v -p {} +else + python3 -m unittest ${UNITTEST_TESTPATH} +fi echo "Deploy test success" exit 0 diff --git a/proxy/mempool/executor_mng.py b/proxy/mempool/executor_mng.py index 4d2a5252b..3e258bc8e 100644 --- a/proxy/mempool/executor_mng.py +++ b/proxy/mempool/executor_mng.py @@ -1,7 +1,7 @@ import asyncio import dataclasses import socket - +from abc import ABC, abstractmethod from collections import deque from typing import List, Tuple, Deque, Set from logged_groups import logged_group @@ -18,8 +18,12 @@ class MPExecutorClient(PipePickableDataClient): def __init__(self, client_sock: socket.socket): PipePickableDataClient.__init__(self, client_sock=client_sock) - async def send_tx_request(self, mempool_tx_request: MPRequest): - return await self.send_data_async(mempool_tx_request) + +class IMPExecutorMngUser(ABC): + + @abstractmethod + def on_resource_released(self, resource_id: int): + assert False @logged_group("neon.MemPool") @@ -33,17 +37,22 @@ class ExecutorInfo: client: MPExecutorClient id: int - def __init__(self, executor_count: int, config: IConfig): + def __init__(self, user: IMPExecutorMngUser, executor_count: int, config: IConfig): self.info(f"Initialize executor mng with executor_count: {executor_count}") self._available_executor_pool: Deque[int] = deque() self._busy_executor_pool: Set[int] = set() self._executors: List[MPExecutorMng.ExecutorInfo] = list() + self._user = user for i in range(executor_count): executor_info = MPExecutorMng._create_executor(i, config) self._executors.append(executor_info) self._available_executor_pool.appendleft(i) executor_info.executor.start() + async def async_init(self): + for ex_info in self._executors: + await ex_info.client.async_init() + def submit_mp_request(self, mp_reqeust: MPRequest) -> Tuple[int, asyncio.Task]: executor_id, executor = self._get_executor() tx_hash = "0x" + mp_reqeust.neon_tx.hash_signed().hex() @@ -76,6 +85,7 @@ def release_resource(self, resource_id: int): self.debug(f"Release executor: {resource_id}") self._busy_executor_pool.remove(resource_id) self._available_executor_pool.appendleft(resource_id) + self._user.on_resource_released(resource_id) @staticmethod def _create_executor(executor_id: int, config: IConfig) -> ExecutorInfo: diff --git a/proxy/mempool/mempool.py b/proxy/mempool/mempool.py index 1e245d2b6..de5248b9e 100644 --- a/proxy/mempool/mempool.py +++ b/proxy/mempool/mempool.py @@ -1,72 +1,66 @@ import asyncio -from typing import List, Tuple, Dict +from typing import List, Tuple + from logged_groups import logged_group -import bisect -from .mempool_api import MPRequest, MPResultCode, MPTxResult, IMPExecutor, MPRequestType, \ - MPTxRequest, MPPendingTxCountReq +from .mempool_api import MPRequest, MPResultCode, MPTxResult, IMPExecutor, MPRequestType, MPTxRequest,\ + MPPendingTxCountReq +from .mempool_schedule import MPTxSchedule @logged_group("neon.MemPool") class MemPool: - TX_QUEUE_MAX_SIZE = 4096 - TX_QUEUE_SIZE = 4095 - CHECK_TASK_TIMEOUT_SEC = 0.05 + CHECK_TASK_TIMEOUT_SEC = 0.01 + MP_CAPACITY = 4096 - def __init__(self, executor: IMPExecutor): - self._req_queue = [] - self._req_queue_cond = asyncio.Condition() + def __init__(self, executor: IMPExecutor, capacity: int = MP_CAPACITY): + self._tx_schedule = MPTxSchedule(capacity) + self._schedule_cond = asyncio.Condition() self._processing_tasks: List[Tuple[int, asyncio.Task, MPRequest]] = [] - # signer -> pending_tx_counter - self._pending_trx_counters: Dict[str, int] = {} self._process_tx_results_task = asyncio.get_event_loop().create_task(self.check_processing_tasks()) - self._process_tx_queue_task = asyncio.get_event_loop().create_task(self.process_tx_queue()) + self._process_tx_queue_task = asyncio.get_event_loop().create_task(self.process_tx_schedule()) self._executor = executor async def enqueue_mp_request(self, mp_request: MPRequest): if mp_request.type == MPRequestType.SendTransaction: tx_request: MPTxRequest = mp_request - return await self._on_send_tx_request(tx_request) + return await self._schedule_mp_tx_request(tx_request) elif mp_request.type == MPRequestType.GetTrxCount: pending_count_req: MPPendingTxCountReq = mp_request return self.get_pending_trx_count(pending_count_req.sender) - async def _on_send_tx_request(self, mp_request: MPTxRequest): - await self.enqueue_mp_transaction(mp_request) - sender = "0x" + mp_request.neon_tx.sender() - self._inc_pending_tx_counter(sender) - count = self.get_pending_trx_count(sender) - self.debug(f"On send tx request. Sender: {sender}, pending tx count: {count}") - - async def enqueue_mp_transaction(self, mp_request: MPTxRequest): - tx_hash = mp_request.neon_tx.hash_signed().hex() + async def _schedule_mp_tx_request(self, mp_request: MPTxRequest): log_ctx = {"context": {"req_id": mp_request.req_id}} try: - self.debug(f"Got mp_tx_request: 0x{tx_hash} to be scheduled on the mempool", extra=log_ctx) - if len(self._req_queue) > MemPool.TX_QUEUE_MAX_SIZE: - self._req_queue = self._req_queue[-MemPool.TX_QUEUE_SIZE:] - bisect.insort_left(self._req_queue, mp_request) - await self._kick_tx_queue() + self._tx_schedule.add_mp_tx_request(mp_request) + count = self.get_pending_trx_count(mp_request.sender_address) + self.debug(f"Got and scheduled mp_tx_request: {mp_request.log_str}, pending in pool: {count}", extra=log_ctx) except Exception as err: - self.error(f"Failed enqueue tx: {tx_hash} into queue: {err}", extra=log_ctx) + self.error(f"Failed to schedule mp_tx_request: {mp_request.log_str}. Error: {err}", extra=log_ctx) + finally: + await self._kick_tx_schedule() - def get_pending_trx_count(self, sender: str): - return self._pending_trx_counters.get(sender, 0) + def get_pending_trx_count(self, sender_addr: str) -> int: + return self._tx_schedule.get_pending_trx_count(sender_addr) - async def process_tx_queue(self): + async def process_tx_schedule(self): while True: - async with self._req_queue_cond: - await self._req_queue_cond.wait() - if len(self._req_queue) == 0: - self.debug("Tx queue empty - continue waiting for new") - continue - if not self._executor.is_available(): - self.debug("No way to process tx - no available executor") - continue - mp_request: MPRequest = self._req_queue.pop() - self.submit_request_to_executor(mp_request) + async with self._schedule_cond: + await self._schedule_cond.wait() + self.debug(f"Schedule processing got awake, condition: {self._schedule_cond.__repr__()}") + while self._executor.is_available(): + mp_request: MPTxRequest = self._tx_schedule.acquire_tx_for_execution() + if mp_request is None: + break + + try: + log_ctx = {"context": {"req_id": mp_request.req_id}} + self.debug(f"Got mp_tx_request from schedule: {mp_request.log_str}, left senders in schedule: {len(self._tx_schedule._sender_tx_pools)}", extra=log_ctx) + self.submit_request_to_executor(mp_request) + except Exception as err: + self.error(f"Failed enqueue to execute mp_tx_request: {mp_request.log_str}. Error: {err}") def submit_request_to_executor(self, mp_tx_request: MPRequest): resource_id, task = self._executor.submit_mp_request(mp_tx_request) @@ -78,73 +72,63 @@ async def check_processing_tasks(self): for resource_id, task, mp_request in self._processing_tasks: if not task.done(): not_finished_tasks.append((resource_id, task, mp_request)) - self._executor.release_resource(resource_id) continue exception = task.exception() if exception is not None: log_ctx = {"context": {"req_id": mp_request.req_id}} self.error(f"Exception during processing request: {exception} - tx will be dropped away", extra=log_ctx) - self._on_request_dropped_away(mp_request) + self._drop_request_away(mp_request) self._executor.release_resource(resource_id) continue mp_tx_result: MPTxResult = task.result() - assert isinstance(mp_tx_result, MPTxResult) - assert mp_tx_result.code != MPResultCode.Dummy + assert isinstance(mp_tx_result, MPTxResult), f"Got unexpected result: {mp_tx_result}" await self._process_mp_result(resource_id, mp_tx_result, mp_request) self._processing_tasks = not_finished_tasks await asyncio.sleep(MemPool.CHECK_TASK_TIMEOUT_SEC) async def _process_mp_result(self, resource_id: int, mp_tx_result: MPTxResult, mp_request: MPTxRequest): - tx_hash = "0x" + mp_request.neon_tx.hash_signed().hex() - log_ctx = {"context": {"req_id": mp_request.req_id}} - if mp_tx_result.code == MPResultCode.Done: - self.debug(f"Neon tx: {tx_hash} - processed on executor: {resource_id} - done", extra=log_ctx) - self._on_request_done(mp_request) - self._executor.release_resource(resource_id) - await self._kick_tx_queue() - return - self.warning(f"Failed to process tx: {tx_hash} - on executor: {resource_id}, status: {mp_tx_result} - reschedule", extra=log_ctx) - if mp_tx_result.code == MPResultCode.BlockedAccount: - self._executor.release_resource(resource_id) - await self.enqueue_mp_request(mp_request) - await self._kick_tx_queue() - elif mp_tx_result.code == MPResultCode.NoLiquidity: - self._executor.on_no_liquidity(resource_id) - await self.enqueue_mp_request(mp_request) - await self._kick_tx_queue() - elif mp_tx_result.code == MPResultCode.Unspecified: - self._executor.release_resource(resource_id) - self._on_request_dropped_away(mp_request) - await self._kick_tx_queue() + try: + log_fn = self.warning if mp_tx_result.code != MPResultCode.Done else self.debug + log_ctx = {"context": {"req_id": mp_request.req_id}} + log_fn(f"On mp tx result: {mp_tx_result} - of: {mp_request.log_str}", extra=log_ctx) + + if mp_tx_result.code == MPResultCode.BlockedAccount: + self._executor.release_resource(resource_id) + await self.enqueue_mp_request(mp_request) + elif mp_tx_result.code == MPResultCode.NoLiquidity: + self._executor.on_no_liquidity(resource_id) + await self.enqueue_mp_request(mp_request) + elif mp_tx_result.code == MPResultCode.Unspecified: + self._executor.release_resource(resource_id) + self._tx_schedule.drop_request_away(mp_request) + elif mp_tx_result.code == MPResultCode.Done: + self._on_request_done(mp_request) + self._executor.release_resource(resource_id) + except Exception as err: + self.error(f"Exception during the result processing: {err}", extra=log_ctx) + finally: + await self._kick_tx_schedule() def _on_request_done(self, tx_request: MPTxRequest): - sender = "0x" + tx_request.neon_tx.sender() - self._dec_pending_tx_counter(sender) + sender = tx_request.sender_address + self._tx_schedule.done(sender, tx_request.nonce) + count = self.get_pending_trx_count(sender) log_ctx = {"context": {"req_id": tx_request.req_id}} - self.debug(f"Reqeust done. Sender: {sender}, pending tx count: {count}", extra=log_ctx) + self.debug(f"Reqeust done, pending tx count: {count}", extra=log_ctx) - def _on_request_dropped_away(self, tx_request: MPTxRequest): - sender = "0x" + tx_request.neon_tx.sender() - self._dec_pending_tx_counter(sender) - count = self.get_pending_trx_count(sender) + def _drop_request_away(self, tx_request: MPTxRequest): + self._tx_schedule.drop_request_away(tx_request) + count = self.get_pending_trx_count(tx_request.sender_address) log_ctx = {"context": {"req_id": tx_request.req_id}} - self.debug(f"Reqeust dropped away. Sender: {sender}, pending tx count: {count}", extra=log_ctx) - - def _inc_pending_tx_counter(self, sender: str): - counts = self._pending_trx_counters.get(sender, 0) - self._pending_trx_counters.update({sender: counts + 1}) - - def _dec_pending_tx_counter(self, sender: str): - count = self._pending_trx_counters.get(sender, 0) - assert count > 0 - count = count - 1 - if count == 0: - del self._pending_trx_counters[sender] - self._pending_trx_counters.update({sender: count}) - - async def _kick_tx_queue(self): - async with self._req_queue_cond: - self._req_queue_cond.notify() + self.debug(f"Reqeust: {tx_request.log_str} dropped away, pending tx count: {count}", extra=log_ctx) + + async def _kick_tx_schedule(self): + async with self._schedule_cond: + # self.debug(f"Kick the schedule, condition: {self._schedule_cond.__repr__()}") + self._schedule_cond.notify() + + def on_resource_got_available(self, resource_id: int): + asyncio.get_event_loop().create_task(self._kick_tx_schedule()) diff --git a/proxy/mempool/mempool_api.py b/proxy/mempool/mempool_api.py index 46274b016..a94ea4269 100644 --- a/proxy/mempool/mempool_api.py +++ b/proxy/mempool/mempool_api.py @@ -20,6 +20,7 @@ def submit_mp_request(self, mp_reqeust: MPRequest) -> Tuple[int, Task]: def is_available(self) -> bool: pass + # TODO: drop it away @abstractmethod def on_no_liquidity(self, resource_id: int): pass @@ -37,21 +38,27 @@ class MPRequestType(IntEnum): @dataclass(order=True) class MPRequest: - req_id: int - type: MPRequestType = field(default=MPRequestType.Dummy) + req_id: int = field(compare=False) + type: MPRequestType = field(compare=False, default=MPRequestType.Dummy) -@dataclass +@dataclass(eq=True, order=True) class MPTxRequest(MPRequest): + nonce: int = field(compare=True, default=None) signature: str = field(compare=False, default=None) neon_tx: NeonTx = field(compare=False, default=None) neon_tx_exec_cfg: NeonTxExecCfg = field(compare=False, default=None) emulating_result: NeonEmulatingResult = field(compare=False, default=None) - _gas_price: int = field(compare=True, default=None) + sender_address: str = field(compare=False, default=None) + gas_price: int = field(compare=False, default=None) def __post_init__(self): - self._gas_price = self.neon_tx.gasPrice + self.gas_price = self.neon_tx.gasPrice + self.nonce = self.neon_tx.nonce + self.sender_address = "0x" + self.neon_tx.sender() self.type = MPRequestType.SendTransaction + hash = "0x" + self.neon_tx.hash_signed().hex() + self.log_str = f"MPTxRequest(hash={hash[:10]}..., sender_address=0x{self.sender_address[:10]}..., nonce={self.nonce}, gas_price={self.gas_price})" @dataclass diff --git a/proxy/mempool/mempool_schedule.py b/proxy/mempool/mempool_schedule.py new file mode 100644 index 000000000..c766fa926 --- /dev/null +++ b/proxy/mempool/mempool_schedule.py @@ -0,0 +1,189 @@ +import bisect +from typing import List, Optional, Tuple + +from logged_groups import logged_group + +from .mempool_api import MPTxRequest + + +@logged_group("neon.MemPool") +class MPSenderTxPool: + def __init__(self, sender_address: str = None): + self.sender_address = sender_address + self._txs: List[MPTxRequest] = [] + self._processing_tx: Optional[MPTxRequest] = None + + def __eq__(self, other): + return self.first_tx_gas_price() == other.first_tx_gas_price() + + def __lt__(self, other): + return self.first_tx_gas_price() > other.first_tx_gas_price() + + def add_tx(self, mp_tx_request: MPTxRequest): + + index = bisect.bisect_left(self._txs, mp_tx_request) + if self._processing_tx is not None and mp_tx_request.nonce == self._processing_tx.nonce: + self.warn(f"Failed to replace processing tx: {self._processing_tx.log_str} with: {mp_tx_request.log_str}") + return + + found: MPTxRequest = self._txs[index] if index < len(self._txs) else None + if found is not None and found.nonce == mp_tx_request.nonce: + self.debug(f"Nonce are equal: {found.nonce}, found: {found.log_str}, new: {mp_tx_request.log_str}") + if found.gas_price < mp_tx_request.gas_price: + self._txs[index] = mp_tx_request + return + self._txs.insert(index, mp_tx_request) + self.debug(f"New mp_tx_request: {mp_tx_request.log_str} - inserted at: {index}") + + def get_tx(self): + return None if self.is_empty() else self._txs[0] + + def acquire_tx(self): + if self.is_processing(): + return None + self._processing_tx = self.get_tx() + return self._processing_tx + + def on_processed(self, tx: MPTxRequest): + assert tx == self._processing_tx, f"tx: {tx.log_str} != processing_tx: {self._processing_tx.log_str}" + self._processing_tx = None + self._txs.remove(tx) + + def len(self) -> int: + return len(self._txs) + + def first_tx_gas_price(self): + tx = self.get_tx() + return tx.gas_price if tx is not None else 0 + + def on_tx_done(self, nonce: int): + if self._processing_tx is None: + self.error(f"Failed to finish tx with nonce: {nonce}, processing tx is None") + return + if self._processing_tx.nonce != nonce: + self.error(f"Failed to finish tx, processing tx has different nonce: {self._processing_tx.nonce} than: {nonce}") + return + self._txs.remove(self._processing_tx) + self.debug(f"On tx done: {self._processing_tx.log_str} - removed. The: {self.len()} txs are left") + self._processing_tx = None + + def is_empty(self) -> bool: + return self.len() == 0 + + def is_processing(self) -> bool: + return self._processing_tx is not None + + def drop_last_request(self): + if self.is_empty(): + self.erorr("Failed to drop last request from empty sender tx pool") + return + if self._processing_tx is self._txs[-1]: + self.warning(f"Failed to drop last request away: {self._processing_tx.log_str} - processing") + return + self.debug(f"Remove last mp_tx_request from sender: {self.sender_address} - {self._txs[-1].log_str}") + self._txs = self._txs[:-1] + + def drop_request_away(self, mp_tx_request: MPTxRequest): + self.debug(f"Remove mp_tx_request: {mp_tx_request.log_str}") + nonce = mp_tx_request.nonce + if self._processing_tx is not None and self._processing_tx.nonce == nonce: + self._processing_tx = None + index = bisect.bisect_left(self._txs, mp_tx_request) + if self._txs[index].nonce != nonce: + self.error(f"Failed to drop reqeust away for: {self.sender_address}, not request with nonce: {nonce}") + return + self._txs = self._txs[index:] + self.debug(f"Removed mp_tx_request from sender: {self.sender_address} - {mp_tx_request.log_str}") + + +@logged_group("neon.MemPool") +class MPTxSchedule: + + def __init__(self, capacity: int) -> None: + self._capacity = capacity + self._sender_tx_pools: List[MPSenderTxPool] = [] + + def _pop_sender_txs(self, sender_address: str) -> Optional[MPSenderTxPool]: + for i, sender_tx_pool in enumerate(self._sender_tx_pools): + if sender_tx_pool.sender_address != sender_address: + continue + return self._sender_tx_pools.pop(i) + return None + + def _get_sender_txs(self, sender_address: str) -> Tuple[Optional[MPSenderTxPool], int]: + for i, sender in enumerate(self._sender_tx_pools): + if sender.sender_address != sender_address: + continue + return sender, i + return None, -1 + + def add_mp_tx_request(self, mp_tx_request: MPTxRequest): + self.debug(f"Add mp_tx_request: {mp_tx_request.log_str}") + sender_txs = self._pop_sender_or_create(mp_tx_request.sender_address) + self.debug(f"Got collection for sender: {mp_tx_request.sender_address}, there are already txs: {sender_txs.len()}") + sender_txs.add_tx(mp_tx_request) + bisect.insort_left(self._sender_tx_pools, sender_txs) + + self._check_oversized_and_reduce() + + def get_mp_tx_count(self): + count = 0 + for sender_txs in self._sender_tx_pools: + count += sender_txs.len() + return count + + def _check_oversized_and_reduce(self): + count = self.get_mp_tx_count() + tx_to_remove = count - self._capacity + sender_to_remove = [] + for sender in self._sender_tx_pools[::-1]: + if tx_to_remove <= 0: + break + sender.drop_last_request() + tx_to_remove -= 1 + if sender.len() == 1 and sender.is_processing(): + continue + if sender.is_empty(): + sender_to_remove.append(sender) + for sender in sender_to_remove: + self._sender_tx_pools.remove(sender) + + def _pop_sender_or_create(self, sender_address: str) -> MPSenderTxPool: + sender = self._pop_sender_txs(sender_address) + return MPSenderTxPool(sender_address=sender_address) if sender is None else sender + + def acquire_tx_for_execution(self) -> Optional[MPTxRequest]: + + if len(self._sender_tx_pools) == 0: + return None + + tx: Optional[MPTxRequest] = None + for sender_txs in self._sender_tx_pools: + if sender_txs.is_processing(): + continue + tx = sender_txs.acquire_tx() + break + + return tx + + def done(self, sender_addr: str, nonce: int): + sender = self._pop_sender_txs(sender_addr) + if sender is None: + self.error(f"Failed to make tx done, address: {sender_addr}, nonce: {nonce} - sender not found") + return + sender.on_tx_done(nonce) + if not sender.is_empty(): + bisect.insort_left(self._sender_tx_pools, sender) + + def get_pending_trx_count(self, sender_addr: str) -> int: + sender, _ = self._get_sender_txs(sender_addr) + return 0 if sender is None else sender.len() + + def drop_request_away(self, mp_tx_reqeust: MPTxRequest): + sender, i = self._get_sender_txs(mp_tx_reqeust.sender_address) + if sender is None: + self.warning(f"Failed drop request, no sender by sender_address: {mp_tx_reqeust.sender_address}") + return + sender.drop_request_away(mp_tx_reqeust) + if sender.len() == 0: + self.sender_tx_pools.pop(i) diff --git a/proxy/mempool/mempool_service.py b/proxy/mempool/mempool_service.py index 35e6c1131..896cd5ec3 100644 --- a/proxy/mempool/mempool_service.py +++ b/proxy/mempool/mempool_service.py @@ -7,11 +7,11 @@ from ..common_neon.config import IConfig from .mempool import MemPool -from .executor_mng import MPExecutorMng +from .executor_mng import MPExecutorMng, IMPExecutorMngUser @logged_group("neon.MemPool") -class MPService(IPickableDataServerUser): +class MPService(IPickableDataServerUser, IMPExecutorMngUser): MP_SERVICE_PORT = 9091 MP_SERVICE_HOST = "0.0.0.0" @@ -35,6 +35,10 @@ async def on_data_received(self, data: Any) -> Any: def run(self): self._mempool_server = AddrPickableDataSrv(user=self, address=(self.MP_SERVICE_HOST, self.MP_SERVICE_PORT)) - self._mp_executor_mng = MPExecutorMng(self.EXECUTOR_COUNT, self._config) + self._mp_executor_mng = MPExecutorMng(self, self.EXECUTOR_COUNT, self._config) self._mempool = MemPool(self._mp_executor_mng) + self.event_loop.run_until_complete(self._mp_executor_mng.async_init()) self.event_loop.run_forever() + + def on_resource_released(self, resource_id: int): + self._mempool.on_resource_got_available(resource_id) diff --git a/proxy/neon_rpc_api_model/neon_rcp_api_worker.py b/proxy/neon_rpc_api_model/neon_rcp_api_worker.py index 0f6cc24bc..b4e4de513 100644 --- a/proxy/neon_rpc_api_model/neon_rcp_api_worker.py +++ b/proxy/neon_rpc_api_model/neon_rcp_api_worker.py @@ -388,13 +388,14 @@ def eth_getTransactionCount(self, account: str, tag: str) -> str: try: self.debug(f"Get transaction count. Account: {account}, tag: {tag}") + neon_account_info = self._solana.get_neon_account_info(account) + pending_trx_count = 0 if tag == "pending": req_id = LogMng.get_logging_context().get("req_id") pending_trx_count = self._mempool_client.get_pending_tx_count(req_id=req_id, sender=account) self.debug(f"Pending tx count for: {account} - is: {pending_trx_count}") - neon_account_info = self._solana.get_neon_account_info(account) trx_count = neon_account_info.trx_count + pending_trx_count return hex(trx_count) @@ -500,14 +501,16 @@ def eth_sendRawTransaction(self, rawTrx: str) -> str: emulating_result=emulating_result) return eth_signature - except PendingTxError as err: + except PendingTxError: self._stat_tx_failed() - self.debug(f'{err}') + self.error(f'Failed to process eth_sendRawTransaction, PendingTxError') return eth_signature - except EthereumError: + except EthereumError as err: + self.error(f'Failed to process eth_sendRawTransaction, EthereumError: {err}') self._stat_tx_failed() raise - except Exception: + except Exception as err: + self.error(f"Failed to process eth_sendRawTransaction, Error: {err}") self._stat_tx_failed() raise diff --git a/proxy/neon_rpc_api_model/transaction_validator.py b/proxy/neon_rpc_api_model/transaction_validator.py index 543758694..d85be1b23 100644 --- a/proxy/neon_rpc_api_model/transaction_validator.py +++ b/proxy/neon_rpc_api_model/transaction_validator.py @@ -75,7 +75,6 @@ def precheck(self) -> Tuple[NeonTxExecCfg, NeonEmulatingResult]: def _prevalidate_tx(self): self._prevalidate_whitelist() - self._prevalidate_tx_nonce() self._prevalidate_tx_gas() self._prevalidate_tx_chain_id() self._prevalidate_tx_size() @@ -107,6 +106,7 @@ def _prevalidate_tx_gas(self): raise EthereumError(message='gas uint64 overflow') if (self._tx_gas_limit * self._tx.gasPrice) > (self.MAX_U256 - 1): raise EthereumError(message='max fee per gas higher than 2^256-1') + if self._tx.gasPrice >= self._min_gas_price: return @@ -123,17 +123,6 @@ def _prevalidate_tx_size(self): if len(self._tx.callData) > (128 * 1024 - 1024): raise EthereumError(message='transaction size is too big') - def _prevalidate_tx_nonce(self): - if not self._neon_account_info: - return - - tx_nonce = int(self._tx.nonce) - if self.MAX_U64 not in (self._neon_account_info.trx_count, tx_nonce): - if tx_nonce == self._neon_account_info.trx_count: - return - - self._raise_nonce_error(self._neon_account_info.trx_count, tx_nonce) - def _prevalidate_sender_eoa(self): if not self._neon_account_info: return diff --git a/proxy/testing/test_eth_sendRawTransaction.py b/proxy/testing/test_eth_sendRawTransaction.py index 72c25f0d3..7bd4ed43a 100644 --- a/proxy/testing/test_eth_sendRawTransaction.py +++ b/proxy/testing/test_eth_sendRawTransaction.py @@ -219,34 +219,6 @@ def test_03_execute_with_low_gas(self): message = 'gas limit reached' self.assertEqual(response['message'][:len(message)], message) - # @unittest.skip("a.i.") - def test_04_execute_with_bad_nonce(self): - test_nonce_list = [ - ('grade_up_one', 1, 'nonce too high:'), - ('grade_down_one', -1, 'nonce too low: ') - ] - for name, offset, message in test_nonce_list: - with self.subTest(name=name): - print("\ntest_04_execute_with_bad_nonce {} offsets".format(offset)) - bad_nonce = offset + proxy.eth.get_transaction_count(proxy.eth.default_account) - trx_store = self.storage_contract.functions.store(147).buildTransaction({'nonce': bad_nonce}) - print('trx_store:', trx_store) - trx_store_signed = proxy.eth.account.sign_transaction(trx_store, eth_account.key) - print('trx_store_signed:', trx_store_signed) - try: - trx_store_hash = proxy.eth.send_raw_transaction(trx_store_signed.rawTransaction) - print('trx_store_hash:', trx_store_hash) - self.assertTrue(False) - except Exception as e: - print('type(e):', type(e)) - print('e:', e) - response = json.loads(str(e).replace('\'', '\"').replace('None', 'null')) - print('response:', response) - print('code:', response['code']) - self.assertEqual(response['code'], -32002) - print('message:', response['message']) - self.assertEqual(response['message'][:len(message)], message) - # @unittest.skip("a.i.") def test_05_transfer_one_gwei(self): print("\ntest_05_transfer_one_gwei") diff --git a/proxy/testing/test_mempool.py b/proxy/testing/test_mempool.py new file mode 100644 index 000000000..43c5e9b5f --- /dev/null +++ b/proxy/testing/test_mempool.py @@ -0,0 +1,339 @@ +from __future__ import annotations + +import asyncio +import logging +from random import randint + +import secrets + +from web3 import Web3, Account +from typing import Tuple, Any, List, Dict + +import unittest +from unittest.mock import patch, MagicMock, call + + +from ..mempool.mempool import MemPool, IMPExecutor +from ..mempool.mempool_api import NeonTxExecCfg, MPRequest, MPTxRequest +from ..mempool.mempool_schedule import MPTxSchedule, MPSenderTxPool +from ..common_neon.eth_proto import Trx as NeonTx + +from ..mempool.mempool_api import MPTxResult, MPResultCode + + +def create_account() -> Account: + private_key = "0x" + secrets.token_hex(32) + return Account.from_key(private_key) + + +def get_transfer_mp_request(*, req_id: str, nonce: int, gas: int, gasPrice: int, from_acc: Account = None, + to_acc: Account = None, value: int = 0, data: bytes = b'') -> MPTxRequest: + if from_acc is None: + from_acc = create_account() + + if to_acc is None: + to_acc = create_account() + to_addr = to_acc.address + w3 = Web3() + signed_tx_data = w3.eth.account.sign_transaction( + dict(nonce=nonce, chainId=111, gas=gas, gasPrice=gasPrice, to=to_addr, value=value, data=data), + from_acc.key + ) + signature = signed_tx_data.hash.hex() + neon_tx = NeonTx.fromString(bytearray(signed_tx_data.rawTransaction)) + tx_cfg = NeonTxExecCfg(is_underpriced_tx_without_chainid=False, steps_executed=100) + mp_tx_request = MPTxRequest(req_id=req_id, signature=signature, neon_tx=neon_tx, neon_tx_exec_cfg=tx_cfg, + emulating_result=dict()) + return mp_tx_request + + +class MockTask: + + def __init__(self, result: Any, is_done: bool = True, exception: Exception = None): + self._result = result + self._is_done = is_done + self._exception = exception + + def done(self): + return self._is_done + + def result(self): + return self._result + + def exception(self): + return self._exception + + +class MockMPExecutor(IMPExecutor): + + def submit_mp_request(self, mp_reqeust: MPRequest) -> Tuple[int, MockTask]: + return 1, MockTask(MPTxResult(MPResultCode.Done, None)) + + def is_available(self) -> bool: + return False + + def on_no_liquidity(self, resource_id: int): + pass + + def release_resource(self, resource_id: int): + pass + + +class TestMemPool(unittest.IsolatedAsyncioTestCase): + + @classmethod + def setUpClass(cls) -> None: + cls.turn_logger_off() + + @classmethod + def turn_logger_off(cls) -> None: + neon_logger = logging.getLogger("neon.MemPool") + neon_logger.setLevel(logging.ERROR) + + async def asyncSetUp(self): + self._executor = MockMPExecutor() + self._mempool = MemPool(self._executor) + + @patch.object(MockMPExecutor, "submit_mp_request") + @patch.object(MockMPExecutor, "is_available", return_value=True) + async def test_single_sender_single_tx(self, is_available_mock: MagicMock, submit_mp_request_mock: MagicMock): + """Checks if an enqueued mp_tx_request gets in effect""" + mp_tx_request = get_transfer_mp_request(req_id="0000001", nonce=0, gasPrice=30000, gas=987654321, value=1, data=b'') + await self._mempool.enqueue_mp_request(mp_tx_request) + await asyncio.sleep(0) + + submit_mp_request_mock.assert_called_once() + submit_mp_request_mock.assert_called_with(mp_tx_request) + + @patch.object(MockMPExecutor, "submit_mp_request", return_value=(1, MockTask(MPTxResult(MPResultCode.Done, None)))) + @patch.object(MockMPExecutor, "is_available", return_value=False) + async def test_single_sender_couple_txs(self, is_available_mock: MagicMock, submit_mp_request_mock: MagicMock): + """Checks if an enqueued mp_tx_requests get in effect in the right order""" + from_acc = create_account() + to_acc = create_account() + req_data = [dict(req_id="0000000", nonce=0, gasPrice=30000, gas=987654321, value=1, from_acc=from_acc, to_acc=to_acc), + dict(req_id="0000001", nonce=1, gasPrice=29000, gas=987654321, value=1, from_acc=from_acc, to_acc=to_acc)] + requests = await self._enqueue_requests(req_data) + await asyncio.sleep(0) + submit_mp_request_mock.assert_not_called() + is_available_mock.return_value = True + self._mempool.on_resource_got_available(1) + await asyncio.sleep(MemPool.CHECK_TASK_TIMEOUT_SEC * 10) + + submit_mp_request_mock.assert_has_calls([call(requests[0]), call(requests[1])]) + + @patch.object(MockMPExecutor, "submit_mp_request", return_value=(1, MockTask(MPTxResult(MPResultCode.Done, None)))) + @patch.object(MockMPExecutor, "is_available", return_value=False) + async def test_2_senders_4_txs(self, is_available_mock: MagicMock, submit_mp_request_mock: MagicMock): + """Checks if an enqueued mp_tx_request from different senders gets in effect in the right order""" + acc = [create_account() for i in range(3)] + req_data = [dict(req_id="000", nonce=0, gasPrice=30000, gas=1000, value=1, from_acc=acc[0], to_acc=acc[2]), + dict(req_id="001", nonce=1, gasPrice=21000, gas=1000, value=1, from_acc=acc[0], to_acc=acc[2]), + dict(req_id="002", nonce=0, gasPrice=40000, gas=1000, value=1, from_acc=acc[1], to_acc=acc[2]), + dict(req_id="003", nonce=1, gasPrice=25000, gas=1000, value=1, from_acc=acc[1], to_acc=acc[2])] + requests = await self._enqueue_requests(req_data) + is_available_mock.return_value = True + self._mempool.on_resource_got_available(1) + await asyncio.sleep(MemPool.CHECK_TASK_TIMEOUT_SEC * 2) + + submit_mp_request_mock.assert_has_calls([call(requests[2]), call(requests[0]), call(requests[3]), call(requests[1])]) + + @patch.object(MockMPExecutor, "submit_mp_request") + @patch.object(MockMPExecutor, "is_available") + async def test_mp_waits_for_previous_tx_done(self, is_available_mock: MagicMock, submit_mp_request_mock: MagicMock): + """Checks if an enqueued mp_tx_request waits for the previous one from the same sender""" + submit_mp_request_mock.return_value = (1, MockTask(None, is_done=False)) + is_available_mock.return_value = False + acc_0 = create_account() + acc_1 = create_account() + req_data = [dict(req_id="000", nonce=0, gasPrice=10000, gas=1000, value=1, from_acc=acc_0, to_acc=acc_1), + dict(req_id="001", nonce=1, gasPrice=10000, gas=1500, value=2, from_acc=acc_0, to_acc=acc_1)] + requests = await self._enqueue_requests(req_data) + is_available_mock.return_value = True + for i in range(2): + await asyncio.sleep(MemPool.CHECK_TASK_TIMEOUT_SEC) + self._mempool.on_resource_got_available(1) + submit_mp_request_mock.assert_called_once_with(requests[0]) + + @patch.object(MockMPExecutor, "submit_mp_request") + @patch.object(MockMPExecutor, "is_available") + async def test_subst_with_higher_gas_price(self, is_available_mock: MagicMock, submit_mp_request_mock: MagicMock): + """Checks if the transaction with the same nonce but the higher gasPrice substitutes the current one""" + from_acc = create_account() + base_request = get_transfer_mp_request(req_id="0", from_acc=from_acc, nonce=0, gasPrice=30000, gas=987654321, value=1, data=b'') + await self._mempool._schedule_mp_tx_request(base_request) + subst_request = get_transfer_mp_request(req_id="1", from_acc=from_acc, nonce=0, gasPrice=40000, gas=987654321, value=2, data=b'') + await self._mempool._schedule_mp_tx_request(subst_request) + is_available_mock.return_value = True + self._mempool.on_resource_got_available(1) + await asyncio.sleep(0) + submit_mp_request_mock.assert_called_once() + submit_mp_request_mock.assert_called_with(subst_request) + + @patch.object(MockMPExecutor, "submit_mp_request") + @patch.object(MockMPExecutor, "is_available") + async def test_subst_with_lower_gas_price(self, is_available_mock: MagicMock, submit_mp_request_mock: MagicMock): + """Checks if the transaction with the same nonce but the lower gasPrice is ignored""" + from_acc = create_account() + base_request = get_transfer_mp_request(req_id="0", from_acc=from_acc, nonce=0, gasPrice=40000, gas=987654321, value=1, data=b'') + await self._mempool._schedule_mp_tx_request(base_request) + subst_request = get_transfer_mp_request(req_id="1", from_acc=from_acc, nonce=0, gasPrice=30000, gas=987654321, value=2, data=b'') + await self._mempool._schedule_mp_tx_request(subst_request) + is_available_mock.return_value = True + self._mempool.on_resource_got_available(1) + await asyncio.sleep(0) + submit_mp_request_mock.assert_called_once() + submit_mp_request_mock.assert_called_with(base_request) + + @patch.object(MockMPExecutor, "is_available") + async def test_check_pending_tx_count(self, is_available_mock: MagicMock): + """Checks if all incoming mp_tx_requests those are not processed are counted as pending""" + acc = [create_account() for i in range(3)] + req_data = [dict(req_id="000", nonce=0, gasPrice=30000, gas=1000, value=1, from_acc=acc[0], to_acc=acc[2]), + dict(req_id="001", nonce=1, gasPrice=21000, gas=1000, value=1, from_acc=acc[0], to_acc=acc[2]), + dict(req_id="002", nonce=0, gasPrice=40000, gas=1000, value=1, from_acc=acc[1], to_acc=acc[2]), + dict(req_id="003", nonce=1, gasPrice=25000, gas=1000, value=1, from_acc=acc[1], to_acc=acc[2]), + dict(req_id="004", nonce=2, gasPrice=25000, gas=1000, value=1, from_acc=acc[1], to_acc=acc[2])] + requests = await self._enqueue_requests(req_data) + acc_0_count = self._mempool.get_pending_trx_count(requests[0].sender_address) + self.assertEqual(acc_0_count, 2) + acc_1_count = self._mempool.get_pending_trx_count(requests[3].sender_address) + self.assertEqual(acc_1_count, 3) + is_available_mock.return_value = True + self._mempool.on_resource_got_available(1) + await asyncio.sleep(MemPool.CHECK_TASK_TIMEOUT_SEC) + acc_1_count = self._mempool.get_pending_trx_count(requests[3].sender_address) + self.assertEqual(acc_1_count, 2) + + @patch.object(MockMPExecutor, "submit_mp_request", return_value=(1, MockTask(MPTxResult(MPResultCode.Done, None)))) + @patch.object(MockMPExecutor, "is_available") + async def test_over_9000_transfers(self, is_available_mock: MagicMock, submit_mp_request_mock: MagicMock): + """Checks if all mp_tx_requests are processed by the MemPool""" + acc_count_max = 1_000 + from_acc_count = 10 + sleep_sec = 2 + nonce_count = 100 + req_count = from_acc_count * nonce_count + acc = [create_account() for i in range(acc_count_max)] + for acc_i in range(0, from_acc_count): + nonces = [i for i in range(0, nonce_count)] + while len(nonces) > 0: + index = randint(0, len(nonces) - 1) + nonce = nonces.pop(index) + request = get_transfer_mp_request(from_acc=acc[acc_i], to_acc=acc[randint(0, acc_count_max-1)], + req_id=str(acc_i) + " " + str(nonce), nonce=nonce, + gasPrice=randint(50000, 100000), gas=randint(4000, 10000)) + await self._mempool.enqueue_mp_request(request) + is_available_mock.return_value = True + self._mempool.on_resource_got_available(1) + await asyncio.sleep(sleep_sec) + for ac in acc[:from_acc_count]: + acc_nonce = 0 + for call in submit_mp_request_mock.call_args_list: + request = call.args[0] + if ac.address.lower() == request.sender_address: + self.assertEqual(request.nonce, acc_nonce) + acc_nonce += 1 + + self.assertEqual(submit_mp_request_mock.call_count, req_count) + + async def _enqueue_requests(self, req_data: List[Dict[str, Any]]) -> List[MPTxRequest]: + requests = [get_transfer_mp_request(**req) for req in req_data] + for req in requests: + await self._mempool.enqueue_mp_request(req) + return requests + + +class TestMPSchedule(unittest.TestCase): + + @classmethod + def setUpClass(cls) -> None: + cls.turn_logger_off() + + @classmethod + def turn_logger_off(cls) -> None: + neon_logger = logging.getLogger("neon.MemPool") + neon_logger.setLevel(logging.ERROR) + + def test_capacity_oversized_simple(self): + """Checks if mp_schedule doesn't get oversized in simple way""" + mp_schedule_capacity = 3 + schedule = MPTxSchedule(mp_schedule_capacity) + acc = [create_account() for i in range(3)] + req_data = [dict(req_id="000", nonce=0, gasPrice=30000, gas=1000, value=1, from_acc=acc[0], to_acc=acc[1]), + dict(req_id="001", nonce=0, gasPrice=25000, gas=1000, value=1, from_acc=acc[1], to_acc=acc[2]), + dict(req_id="002", nonce=1, gasPrice=30000, gas=1000, value=1, from_acc=acc[0], to_acc=acc[2]), + dict(req_id="003", nonce=1, gasPrice=25000, gas=1000, value=1, from_acc=acc[1], to_acc=acc[1]), + dict(req_id="004", nonce=2, gasPrice=25000, gas=1000, value=1, from_acc=acc[1], to_acc=acc[2]), + dict(req_id="005", nonce=0, gasPrice=50000, gas=1000, value=1, from_acc=acc[2], to_acc=acc[1]), + dict(req_id="006", nonce=1, gasPrice=50000, gas=1000, value=1, from_acc=acc[2], to_acc=acc[1]), + ] + self.requests = [get_transfer_mp_request(**req) for req in req_data] + for request in self.requests: + schedule.add_mp_tx_request(request) + self.assertEqual(2, len(schedule._sender_tx_pools)) + self.assertEqual(1, schedule.get_pending_trx_count(acc[0].address.lower())) + self.assertEqual(0, schedule.get_pending_trx_count(acc[1].address.lower())) + self.assertEqual(2, schedule.get_pending_trx_count(acc[2].address.lower())) + + def test_capacity_oversized(self): + """Checks if mp_schedule doesn't get oversized with a quite big set of mp_tx_requests""" + + acc_count_max = 10 + from_acc_count = 5 + nonce_count = 1000 + mp_schedule_capacity = 4000 + schedule = MPTxSchedule(mp_schedule_capacity) + acc = [create_account() for i in range(acc_count_max)] + for acc_i in range(0, from_acc_count): + nonces = [i for i in range(0, nonce_count)] + while len(nonces) > 0: + index = randint(0, len(nonces) - 1) + nonce = nonces.pop(index) + request = get_transfer_mp_request(from_acc=acc[acc_i], to_acc=acc[randint(0, acc_count_max-1)], + req_id=str(acc_i) + " " + str(nonce), nonce=nonce_count - nonce - 1, + gasPrice=randint(50000, 100000), gas=randint(4000, 10000)) + schedule.add_mp_tx_request(request) + self.assertEqual(mp_schedule_capacity, schedule.get_mp_tx_count()) + + +class TestMPSenderTxPool(unittest.TestCase): + + @classmethod + def setUpClass(cls) -> None: + cls.turn_logger_off() + + @classmethod + def turn_logger_off(cls) -> None: + neon_logger = logging.getLogger("neon.MemPool") + neon_logger.setLevel(logging.ERROR) + + def setUp(self) -> None: + self._pool = MPSenderTxPool() + acc = [create_account() for i in range(2)] + req_data = [dict(req_id="000", nonce=3, gasPrice=30000, gas=1000, value=1, from_acc=acc[0], to_acc=acc[1]), + dict(req_id="001", nonce=1, gasPrice=21000, gas=1000, value=1, from_acc=acc[0], to_acc=acc[1]), + dict(req_id="002", nonce=0, gasPrice=40000, gas=1000, value=1, from_acc=acc[0], to_acc=acc[1]), + dict(req_id="003", nonce=2, gasPrice=25000, gas=1000, value=1, from_acc=acc[0], to_acc=acc[1]), + dict(req_id="004", nonce=4, gasPrice=25000, gas=1000, value=1, from_acc=acc[0], to_acc=acc[1])] + self._requests = [get_transfer_mp_request(**req) for req in req_data] + for request in self._requests: + self._pool.add_tx(request) + + def test_drop_last_request(self): + """Checks if transaction pool drops the request with highest nonce properly""" + self._pool.drop_last_request() + self.assertEqual(self._pool.len(), 4) + self.assertEqual(self._pool.get_tx(), self._requests[2]) + self.assertEqual(self._pool._txs[-1], self._requests[0]) + + def test_drop_last_request_if_processing(self): + """Checks if transaction pool doesn't drop the reqeust with the highest nonce if it's in process""" + tx = self._pool.acquire_tx() + self.assertIs(tx, self._requests[2]) + with self.assertLogs("neon.MemPool", logging.WARNING) as logs: + for i in range(0, 5): + self._pool.drop_last_request() + self.assertEqual(1, len(logs.records)) + self.assertEqual(f"Failed to drop last request away: {tx.log_str} - processing", logs.records[0].msg) + diff --git a/proxy/testing/test_pickable_data_transfer.py b/proxy/testing/test_pickable_data_transfer.py new file mode 100644 index 000000000..e69de29bb From 5c8f4d30b0fc3ff10db3a3a5bb2a8d59cd7dc691 Mon Sep 17 00:00:00 2001 From: Rozhkov Dmitrii Date: Wed, 22 Jun 2022 23:04:06 +0400 Subject: [PATCH 11/11] #807 Network problem solving --- .buildkite/pipeline.yml | 2 +- log_cfg.json | 4 +- .../common_neon/utils/pickable_data_server.py | 110 +++++++----------- proxy/mempool/mempool.py | 2 +- proxy/mempool/mempool_client.py | 57 ++++++++- 5 files changed, 100 insertions(+), 75 deletions(-) diff --git a/.buildkite/pipeline.yml b/.buildkite/pipeline.yml index ce1fef429..eadca2688 100644 --- a/.buildkite/pipeline.yml +++ b/.buildkite/pipeline.yml @@ -9,7 +9,7 @@ steps: - label: ":terraform: build infrastructure" key: "create_infrastructure" if: &is_fts_enabled | - (build.pull_request.base_branch == "develop" && !build.pull_request.draft) || + (build.pull_request.base_branch == "712-mempool" && !build.pull_request.draft) || (build.source == "trigger_job" && build.env("NEON_EVM_FULL_TEST_SUITE") == "true") agents: queue: "testing" diff --git a/log_cfg.json b/log_cfg.json index 836b78b1a..d30001b66 100644 --- a/log_cfg.json +++ b/log_cfg.json @@ -36,7 +36,7 @@ }, "loggers": { "neon": { - "level": "ERROR", + "level": "DEBUG", "handlers": ["standard"], "propagate": false }, @@ -46,7 +46,7 @@ "propagate": false }, "neon.MemPool": { - "level": "DEBUG", + "level": "DEBUG", "handlers": ["standard"], "propagate": false }, diff --git a/proxy/common_neon/utils/pickable_data_server.py b/proxy/common_neon/utils/pickable_data_server.py index 1172b48b0..b8efc2d56 100644 --- a/proxy/common_neon/utils/pickable_data_server.py +++ b/proxy/common_neon/utils/pickable_data_server.py @@ -16,13 +16,6 @@ async def on_data_received(self, data: Any) -> Any: """Gets neon_tx_data from the neon rpc api service worker""" -def encode_pickable(object, logger) -> bytes: - data = pickle.dumps(object) - len_data = struct.pack("!I", len(data)) - logger.debug(f"Len data: {len(len_data)} - bytes, data: {len(data)} - bytes") - return len_data + data - - @logged_group("neon.Network") class PickableDataServer(ABC): @@ -37,10 +30,10 @@ async def run_server(self): async def handle_client(self, reader: StreamReader, writer: StreamWriter): while True: try: - self.debug("Recv pickable data") + self.debug("Got incoming connection. Waiting for pickable data") data = await self._recv_pickable_data(reader) result = await self._user.on_data_received(data) - self.debug(f"Encode pickable result: {result}") + self.debug(f"Encode pickable result_data: {result}") result_data = encode_pickable(result, self) self.debug(f"Send result_data: {len(result_data)}, bytes: {result_data.hex()}") writer.write(result_data) @@ -52,27 +45,16 @@ async def handle_client(self, reader: StreamReader, writer: StreamWriter): self.error(f"Incomplete read error: {err}") break except Exception as err: - self.error(f"Failed to receive data err: {err}, {err.__traceback__.tb_next.tb_frame}, type: {type(err)}") + self.error(f"Failed to receive data err: {err}") break async def _recv_pickable_data(self, reader: StreamReader): - len_packed: bytes = await reader.read(4) - if len(len_packed) == 0: - self.error("Got empty len_packed") - raise ConnectionResetError() + len_packed: bytes = await read_data_async(self, reader, 4) payload_len = struct.unpack("!I", len_packed)[0] self.debug(f"Got payload len_packed: {len_packed.hex()}, that is: {payload_len}") - payload = b'' - while len(payload) < payload_len: - to_be_read = payload_len - len(payload) - self.debug(f"Reading chunk of: {to_be_read} of: {payload_len} - bytes") - chunk = payload + await reader.read(to_be_read) - self.debug(f"Got chunk of data: {len(chunk)}") - payload += chunk - self.debug(f"Got payload data: {len(payload)}. Load pickled object") + payload = await read_data_async(self, reader, payload_len) data = pickle.loads(payload) self.debug(f"Loaded pickable of type: {type(data)}") - return data @@ -101,17 +83,6 @@ async def run_server(self): await self.handle_client(reader, writer) -async def read_data(self, reader: StreamReader, data_len: int): - data = b'' - while len(data) < data_len: - to_be_read = data_len - len(data) - self.debug(f"Reading answer data: {to_be_read} of: {data_len} - bytes") - chunk = await reader.read(to_be_read) - self.debug(f"Got chunk of answer data: {len(chunk)}") - data += chunk - return data - - class PickableDataClient: def __init__(self): @@ -123,40 +94,26 @@ def _set_client_sock(self, client_sock: socket.socket): self._client_sock = client_sock async def async_init(self): - self.info("Async init on client") + self.info("Async init pickable data client") reader, writer = await asyncio.open_connection(sock=self._client_sock) self._reader = reader self._writer = writer - self.info(f"_reader: {reader}, _writer: {writer}") def send_data(self, pickable_object: Any): try: - self.debug(f"Send pickable_object of type: {type(pickable_object)}") payload: bytes = encode_pickable(pickable_object, self) - self.debug(f"Payload: {len(payload)}, bytes: {payload[:15].hex()}") - sent = self._client_sock.send(payload) - self.debug(f"Sent: {sent} - bytes") + self.debug(f"Send object of type: {type(pickable_object)}, payload: {len(payload)}, bytes: 0x{payload[:15].hex()}") + self._client_sock.sendall(payload) except BaseException as err: self.error(f"Failed to send client data: {err}") raise - try: self.debug(f"Waiting for answer") - len_packed: bytes = self._client_sock.recv(4) + len_packed: bytes = read_data_sync(self, self._client_sock, 4) data_len = struct.unpack("!I", len_packed)[0] self.debug(f"Got len_packed bytes: {len_packed.hex()}, that is: {data_len} - bytes to receive") - data = b'' - while len(data) < data_len: - to_be_read = data_len - len(data) - self.debug(f"Reading answer data: {to_be_read} of: {data_len} - bytes") - chunk: bytes = self._client_sock.recv(to_be_read) - self.debug(f"Got chunk of answer data: {len(chunk)}") - data += chunk - - if not data: - self.error(f"Got: {data_len} to receive but not data") - return None + data = read_data_sync(self, self._client_sock, data_len) self.debug(f"Got data: {len(data)}. Load pickled object") result = pickle.loads(data) self.debug(f"Got result: {result}") @@ -180,22 +137,9 @@ async def send_data_async(self, pickable_object): try: self.debug(f"Waiting for answer") - len_packed: bytes = await read_data(self, self._reader, 4) - if not len_packed: - return None + len_packed: bytes = await read_data_async(self, self._reader, 4) data_len = struct.unpack("!I", len_packed)[0] - - data = b'' - while len(data) < data_len: - to_be_read = data_len - len(data) - self.debug(f"Reading answer data: {to_be_read} of: {data_len} - bytes") - chunk = await self._reader.read(to_be_read) - self.debug(f"Got chunk of answer data: {len(chunk)}") - data += chunk - - if not data: - self.error(f"Got: {data_len} to receive but not data") - return None + data = await read_data_async(self, self._reader, data_len) self.debug(f"Got data: {len(data)}. Load pickled object") result = pickle.loads(data) self.debug(f"Got result: {result}") @@ -223,3 +167,33 @@ def __init__(self, addr: Tuple[str, int]): client_sock = socket.create_connection((host, port)) self._set_client_sock(client_sock=client_sock) + +def encode_pickable(object, logger) -> bytes: + data = pickle.dumps(object) + len_data = struct.pack("!I", len(data)) + logger.debug(f"Len data: {len(len_data)} - bytes, data: {len(data)} - bytes") + return len_data + data + + +async def read_data_async(self, reader: StreamReader, data_len: int) -> bytes: + data = b'' + while len(data) < data_len: + to_be_read = data_len - len(data) + self.debug(f"Reading data: {to_be_read} of: {data_len} - bytes") + chunk = await reader.read(to_be_read) + if not chunk: + raise EOFError(f"Failed to read chunk of data: {data_len}") + self.debug(f"Got chunk of data: {len(chunk)}") + data += chunk + return data + + +def read_data_sync(self, socket: socket.socket, data_len) -> bytes: + data = b'' + while len(data) < data_len: + to_be_read = data_len - len(data) + self.debug(f"Reading data: {to_be_read} of: {data_len} - bytes") + chunk: bytes = socket.recv(to_be_read) + self.debug(f"Got chunk of data: {len(chunk)}") + data += chunk + return data diff --git a/proxy/mempool/mempool.py b/proxy/mempool/mempool.py index de5248b9e..31a9d672e 100644 --- a/proxy/mempool/mempool.py +++ b/proxy/mempool/mempool.py @@ -102,7 +102,7 @@ async def _process_mp_result(self, resource_id: int, mp_tx_result: MPTxResult, m await self.enqueue_mp_request(mp_request) elif mp_tx_result.code == MPResultCode.Unspecified: self._executor.release_resource(resource_id) - self._tx_schedule.drop_request_away(mp_request) + self._drop_request_away(mp_request) elif mp_tx_result.code == MPResultCode.Done: self._on_request_done(mp_request) self._executor.release_resource(resource_id) diff --git a/proxy/mempool/mempool_client.py b/proxy/mempool/mempool_client.py index 9cb5c9b21..9f7ab1d2b 100644 --- a/proxy/mempool/mempool_client.py +++ b/proxy/mempool/mempool_client.py @@ -1,25 +1,76 @@ +from __future__ import annotations +import threading +from typing import Callable from logged_groups import logged_group -from ..common_neon.utils import AddrPickableDataClient - from .mempool_api import MPTxRequest, MPPendingTxCountReq from ..common_neon.eth_proto import Trx as NeonTx from ..common_neon.data import NeonTxExecCfg, NeonEmulatingResult +from ..common_neon.utils import AddrPickableDataClient + + +def _guard_conn(method: Callable) -> Callable: + def wrapper(self, *args, **kwargs): + with self._mp_conn_lock: + return method(self, *args, **kwargs) + + return wrapper + + +def _reconnecting(method: Callable) -> Callable: + def wrapper(self, *args, **kwargs): + try: + return method(self, *args, **kwargs) + except (InterruptedError, Exception) as err: + self.error(f"Failed to transfer data, unexpected err: {err}") + self._reconnect_mp() + raise + return wrapper @logged_group("neon.Proxy") class MemPoolClient: + RECONNECT_MP_TIME_SEC = 5 + def __init__(self, host: str, port: int): - self._pickable_data_client = AddrPickableDataClient((host, port)) + self.debug("Init MemPoolClient") + self._mp_conn_lock = threading.Lock() + self._address = (host, port) + self._is_connecting = threading.Event() + self._connect_mp() + def _reconnect_mp(self): + if self._is_connecting.is_set(): + return + self._is_connecting.set() + self.debug(f"Reconnecting MemPool in: {MemPoolClient.RECONNECT_MP_TIME_SEC} sec.") + threading.Timer(MemPoolClient.RECONNECT_MP_TIME_SEC, self._connect_mp).start() + + @_guard_conn + def _connect_mp(self): + try: + self.debug(f"Connect MemPool: {self._address}") + self._pickable_data_client = AddrPickableDataClient(self._address) + except Exception as err: + self.error(f"Failed to connect MemPool: {self._address}, error: {err}") + self._is_connecting.clear() + self._reconnect_mp() + finally: + self._is_connecting.clear() + + @_guard_conn + @_reconnecting def send_raw_transaction(self, req_id: int, signature: str, neon_tx: NeonTx, neon_tx_exec_cfg: NeonTxExecCfg, emulating_result: NeonEmulatingResult): + mempool_tx_request = MPTxRequest(req_id=req_id, signature=signature, neon_tx=neon_tx, neon_tx_exec_cfg=neon_tx_exec_cfg, emulating_result=emulating_result) return self._pickable_data_client.send_data(mempool_tx_request) + @_guard_conn + @_reconnecting def get_pending_tx_count(self, req_id: int, sender: str): mempool_pending_tx_count_req = MPPendingTxCountReq(req_id=req_id, sender=sender) return self._pickable_data_client.send_data(mempool_pending_tx_count_req)