diff --git a/bittensor/core/async_subtensor.py b/bittensor/core/async_subtensor.py index 79e5d99aa7..5f9e6a2bc1 100644 --- a/bittensor/core/async_subtensor.py +++ b/bittensor/core/async_subtensor.py @@ -3,7 +3,7 @@ import copy from itertools import chain import ssl -from typing import Optional, Any, Union, TypedDict, Iterable, TYPE_CHECKING +from typing import Optional, Any, Union, Iterable, TYPE_CHECKING import aiohttp import asyncstdlib as a @@ -60,6 +60,7 @@ reveal_weights_extrinsic, ) from bittensor.core.metagraph import AsyncMetagraph +from bittensor.core.types import ParamWithTypes from bittensor.core.settings import version_as_int, TYPE_REGISTRY, DELEGATES_DETAILS_URL from bittensor.utils import ( decode_hex_identity_dict, @@ -68,6 +69,7 @@ ss58_to_vec_u8, torch, u16_normalized_float, + execute_coroutine, ) from bittensor.utils import networking from bittensor.utils.substrate_interface import AsyncSubstrateInterface @@ -85,11 +87,6 @@ from bittensor.utils.substrate_interface import QueryMapResult -class ParamWithTypes(TypedDict): - name: str # Name of the parameter. - type: str # ScaleType string of the parameter. - - def _decode_hex_identity_dict(info_dictionary: dict[str, Any]) -> dict[str, Any]: """Decodes a dictionary of hexadecimal identities.""" for k, v in info_dictionary.items(): @@ -123,6 +120,7 @@ def __init__( self, network: Optional[str] = None, config: Optional["Config"] = None, + _mock: bool = False, log_verbose: bool = False, event_loop: asyncio.AbstractEventLoop = None, ): @@ -132,6 +130,7 @@ def __init__( Arguments: network (str): The network name or type to connect to. config (Optional[Config]): Configuration object for the AsyncSubtensor instance. + _mock: Whether this is a mock instance. Mainly just for use in testing. log_verbose (bool): Enables or disables verbose logging. event_loop (Optional[asyncio.AbstractEventLoop]): Custom asyncio event loop. @@ -144,6 +143,7 @@ def __init__( self.chain_endpoint, self.network = AsyncSubtensor.setup_config( network, self._config ) + self._mock = _mock self.log_verbose = log_verbose self._check_and_log_network_settings() @@ -158,6 +158,7 @@ def __init__( use_remote_preset=True, chain_name="Bittensor", event_loop=event_loop, + _mock=_mock, ) if self.log_verbose: logging.info( @@ -170,6 +171,9 @@ def __str__(self): def __repr__(self): return self.__str__() + def __del__(self): + execute_coroutine(self.close()) + def _check_and_log_network_settings(self): if self.network == settings.NETWORKS[3]: # local logging.warning( @@ -439,7 +443,9 @@ async def get_hyperparameter( The value of the specified hyperparameter if the subnet exists, or None """ block_hash = await self.determine_block_hash(block, block_hash, reuse_block) - if not await self.subnet_exists(netuid, block_hash, reuse_block=reuse_block): + if not await self.subnet_exists( + netuid, block_hash=block_hash, reuse_block=reuse_block + ): logging.error(f"subnet {netuid} does not exist") return None @@ -797,7 +803,7 @@ async def bonds( return b_map - async def commit(self, wallet: "Wallet", netuid: int, data: str): + async def commit(self, wallet: "Wallet", netuid: int, data: str) -> bool: """ Commits arbitrary data to the Bittensor network by publishing metadata. @@ -806,7 +812,7 @@ async def commit(self, wallet: "Wallet", netuid: int, data: str): netuid (int): The unique identifier of the subnetwork. data (str): The data to be committed to the network. """ - await publish_metadata( + return await publish_metadata( subtensor=self, wallet=wallet, netuid=netuid, @@ -1042,7 +1048,7 @@ async def get_block_hash(self, block: Optional[int] = None): trustworthiness of the blockchain. """ if block: - return await self.substrate.get_block_hash(block) + return await self._get_block_hash(block) else: return await self.substrate.get_chain_head() @@ -2977,7 +2983,7 @@ async def root_register( try: recycle_call, balance = await asyncio.gather( self.get_hyperparameter( - param_name="Burn", netuid=netuid, reuse_block=True + param_name="Burn", netuid=netuid, block_hash=block_hash ), self.get_balance(wallet.coldkeypub.ss58_address, block_hash=block_hash), ) @@ -3071,6 +3077,14 @@ async def set_weights( This function is crucial in shaping the network's collective intelligence, where each neuron's learning and contribution are influenced by the weights it sets towards others【81†source】. """ + + async def _blocks_weight_limit() -> bool: + bslu, wrl = await asyncio.gather( + self.blocks_since_last_update(netuid, uid), + self.weights_rate_limit(netuid), + ) + return bslu > wrl + retries = 0 success = False if ( @@ -3087,10 +3101,9 @@ async def set_weights( # go with `commit reveal v3` extrinsic message = "No attempt made. Perhaps it is too soon to commit weights!" while ( - await self.blocks_since_last_update(netuid, uid) - > await self.weights_rate_limit(netuid) - and retries < max_retries + retries < max_retries and success is False + and await _blocks_weight_limit() ): logging.info( f"Committing weights for subnet #{netuid}. Attempt {retries + 1} of {max_retries}." @@ -3112,9 +3125,8 @@ async def set_weights( message = "No attempt made. Perhaps it is too soon to set weights!" while ( retries < max_retries - and await self.blocks_since_last_update(netuid, uid) - > await self.weights_rate_limit(netuid) and success is False + and await _blocks_weight_limit() ): try: logging.info( diff --git a/bittensor/core/classic_subtensor.py b/bittensor/core/classic_subtensor.py index c2e85352e2..02ec2ffa92 100644 --- a/bittensor/core/classic_subtensor.py +++ b/bittensor/core/classic_subtensor.py @@ -6,7 +6,7 @@ import argparse import copy import ssl -from typing import Union, Optional, TypedDict, Any +from typing import Union, Optional, Any import numpy as np import scalecodec @@ -64,6 +64,7 @@ unstake_multiple_extrinsic, ) from bittensor.core.metagraph import Metagraph +from bittensor.core.types import ParamWithTypes from bittensor.utils import ( networking, torch, @@ -80,11 +81,6 @@ KEY_NONCE: dict[str, int] = {} -class ParamWithTypes(TypedDict): - name: str # Name of the parameter. - type: str # ScaleType string of the parameter. - - class Subtensor: """ The Subtensor class in Bittensor serves as a crucial interface for interacting with the Bittensor blockchain, diff --git a/bittensor/core/dendrite.py b/bittensor/core/dendrite.py index 9543a837f7..5151a3e0ca 100644 --- a/bittensor/core/dendrite.py +++ b/bittensor/core/dendrite.py @@ -31,7 +31,7 @@ from bittensor.core.settings import version_as_int from bittensor.core.stream import StreamingSynapse from bittensor.core.synapse import Synapse, TerminalInfo -from bittensor.utils import networking +from bittensor.utils import networking, event_loop_is_running from bittensor.utils.btlogging import logging from bittensor.utils.registration import torch, use_torch @@ -48,14 +48,6 @@ DENDRITE_DEFAULT_ERROR = ("422", "Failed to parse response") -def event_loop_is_running(): - try: - asyncio.get_running_loop() - return True - except RuntimeError: - return False - - class DendriteMixin: """ The Dendrite class represents the abstracted implementation of a network client module. diff --git a/bittensor/core/extrinsics/asyncex/commit_reveal.py b/bittensor/core/extrinsics/asyncex/commit_reveal.py index 3807960749..e26e1fb9cc 100644 --- a/bittensor/core/extrinsics/asyncex/commit_reveal.py +++ b/bittensor/core/extrinsics/asyncex/commit_reveal.py @@ -113,9 +113,9 @@ async def commit_reveal_v3_extrinsic( # Reformat and normalize. uids, weights = convert_weights_and_uids_for_emit(uids, weights) - current_block = await subtensor.get_current_block() + current_block = await subtensor.substrate.get_block(None) subnet_hyperparameters = await subtensor.get_subnet_hyperparameters( - netuid, block=current_block + netuid, block_hash=current_block["header"]["hash"] ) tempo = subnet_hyperparameters.tempo subnet_reveal_period_epochs = ( @@ -128,7 +128,7 @@ async def commit_reveal_v3_extrinsic( weights=weights, version_key=version_key, tempo=tempo, - current_block=current_block, + current_block=current_block["header"]["number"], netuid=netuid, subnet_reveal_period_epochs=subnet_reveal_period_epochs, ) diff --git a/bittensor/core/metagraph.py b/bittensor/core/metagraph.py index 92ec1e167f..b5b958b93f 100644 --- a/bittensor/core/metagraph.py +++ b/bittensor/core/metagraph.py @@ -26,6 +26,7 @@ if typing.TYPE_CHECKING: from bittensor.core.subtensor import Subtensor from bittensor.core.async_subtensor import AsyncSubtensor + from bittensor.core.chain_data import NeuronInfo, NeuronInfoLite METAGRAPH_STATE_DICT_NDARRAY_KEYS = [ @@ -70,22 +71,25 @@ """ -def get_save_dir(network: str, netuid: int) -> str: +def get_save_dir( + network: str, netuid: int, root_dir: Optional[list[str]] = None +) -> str: """ Returns a directory path given ``network`` and ``netuid`` inputs. Args: network (str): Network name. netuid (int): Network UID. + root_dir: list to the file path for the root directory of your metagraph saves (i.e. ['/', 'tmp', 'metagraphs'], + defaults to ["~", ".bittensor", "metagraphs"] Returns: str: Directory path. """ + _root_dir = root_dir or ["~", ".bittensor", "metagraphs"] return os.path.expanduser( os.path.join( - "~", - ".bittensor", - "metagraphs", + *_root_dir, f"network-{str(network)}", f"netuid-{str(netuid)}", ) @@ -209,6 +213,7 @@ class AsyncMetagraphMixin(ABC): network: str version: Union["torch.nn.Parameter", tuple[NDArray]] n: Union["torch.nn.Parameter", NDArray] + neurons: list[Union["NeuronInfo", "NeuronInfoLite"]] block: Union["torch.nn.Parameter", NDArray] stake: Union["torch.nn.Parameter", NDArray] total_stake: Union["torch.nn.Parameter", NDArray] @@ -613,7 +618,6 @@ async def sync( metagraph.sync(block=history_block, lite=False, subtensor=subtensor) """ - # Initialize subtensor subtensor = self._initialize_subtensor(subtensor) @@ -703,6 +707,7 @@ async def _assign_neurons( """ if lite: self.neurons = await subtensor.neurons_lite(block=block, netuid=self.netuid) + else: self.neurons = await subtensor.neurons(block=block, netuid=self.netuid) self.lite = lite @@ -890,14 +895,18 @@ async def _process_root_weights( ) return tensor_param - def save(self) -> "AsyncMetagraph": + def save(self, root_dir: Optional[list[str]] = None) -> "AsyncMetagraph": """ Saves the current state of the metagraph to a file on disk. This function is crucial for persisting the current - state of the network's metagraph, which can later be reloaded or analyzed. The save operation includes all - neuron attributes and parameters, ensuring a complete snapshot of the metagraph's state. + state of the network's metagraph, which can later be reloaded or analyzed. The save operation includes all + neuron attributes and parameters, ensuring a complete snapshot of the metagraph's state. + + Args: + root_dir: list to the file path for the root directory of your metagraph saves + (i.e. ['/', 'tmp', 'metagraphs'], defaults to ["~", ".bittensor", "metagraphs"] Returns: - metagraph (bittensor.core.metagraph.AsyncMetagraph): The metagraph instance after saving its state. + metagraph (bittensor.core.metagraph.Metagraph): The metagraph instance after saving its state. Example: Save the current state of the metagraph to the default directory:: @@ -914,7 +923,7 @@ def save(self) -> "AsyncMetagraph": metagraph.load_from_path(dir_path) """ - save_directory = get_save_dir(self.network, self.netuid) + save_directory = get_save_dir(self.network, self.netuid, root_dir=root_dir) os.makedirs(save_directory, exist_ok=True) if use_torch(): graph_filename = f"{save_directory}/block-{self.block.item()}.pt" @@ -930,39 +939,34 @@ def save(self) -> "AsyncMetagraph": pickle.dump(state_dict, graph_file) return self - def load(self): + def load(self, root_dir: Optional[list[str]] = None) -> None: """ - Loads the state of the metagraph from the default save directory. This method is instrumental for restoring the - metagraph to its last saved state. It automatically identifies the save directory based on the ``network`` and - ``netuid`` properties of the metagraph, locates the latest block file in that directory, and loads all metagraph - parameters from it. + Loads the state of the metagraph from the default save directory. This method is instrumental for restoring the metagraph to its last saved state. It automatically identifies the save directory based on the ``network`` and ``netuid`` properties of the metagraph, locates the latest block file in that directory, and loads all metagraph parameters from it. This functionality is particularly beneficial when continuity in the state of the metagraph is necessary across different runtime sessions, or after a restart of the system. It ensures that the metagraph reflects the exact state it was in at the last save point, maintaining consistency in the network's representation. - The method delegates to ``load_from_path``, supplying it with the directory path constructed from the - metagraph's current ``network`` and ``netuid`` properties. This abstraction simplifies the process of loading - the metagraph's state for the user, requiring no direct path specifications. + The method delegates to ``load_from_path``, supplying it with the directory path constructed from the metagraph's current ``network`` and ``netuid`` properties. This abstraction simplifies the process of loading the metagraph's state for the user, requiring no direct path specifications. + + Args: + root_dir: list to the file path for the root directory of your metagraph saves + (i.e. ['/', 'tmp', 'metagraphs'], defaults to ["~", ".bittensor", "metagraphs"] Returns: - metagraph (bittensor.core.metagraph.AsyncMetagraph): The metagraph instance after loading its state from the - default directory. + metagraph (bittensor.core.metagraph.Metagraph): The metagraph instance after loading its state from the default directory. Example: Load the metagraph state from the last saved snapshot in the default directory:: metagraph.load() - After this operation, the metagraph's parameters and neuron data are restored to their state at the time of - the last save in the default directory. + After this operation, the metagraph's parameters and neuron data are restored to their state at the time of the last save in the default directory. Note: - The default save directory is determined based on the metagraph's ``network`` and ``netuid`` attributes. - It is important to ensure that these attributes are set correctly and that the default save directory - contains the appropriate state files for the metagraph. + The default save directory is determined based on the metagraph's ``network`` and ``netuid`` attributes. It is important to ensure that these attributes are set correctly and that the default save directory contains the appropriate state files for the metagraph. """ - self.load_from_path(get_save_dir(self.network, self.netuid)) + self.load_from_path(get_save_dir(self.network, self.netuid, root_dir=root_dir)) @abstractmethod def load_from_path(self, dir_path: str) -> "AsyncMetagraph": @@ -1126,6 +1130,7 @@ def __init__( torch.tensor([], dtype=torch.int64), requires_grad=False ) self.axons: list[AxonInfo] = [] + self.neurons = [] self.subtensor = subtensor self.should_sync = sync @@ -1329,6 +1334,7 @@ def __init__( self.bonds = np.array([], dtype=np.int64) self.uids = np.array([], dtype=np.int64) self.axons: list[AxonInfo] = [] + self.neurons = [] self.subtensor = subtensor self.should_sync = sync @@ -1497,15 +1503,14 @@ def __init__( sync: bool = True, subtensor: "Subtensor" = None, ): + self.subtensor: Optional["Subtensor"] = subtensor self._async_metagraph = AsyncMetagraph( netuid=netuid, network=network, lite=lite, sync=sync, - subtensor=subtensor, + subtensor=subtensor.async_subtensor if subtensor else None, ) - if sync: - self.sync(block=None, lite=lite, subtensor=subtensor) def sync( self, @@ -1514,13 +1519,19 @@ def sync( subtensor: Optional["Subtensor"] = None, ): """Synchronizes the metagraph to the specified block, lite, and subtensor instance if available.""" + if subtensor: + event_loop = subtensor.event_loop + elif self.subtensor: + event_loop = self.subtensor.event_loop + else: + event_loop = None execute_coroutine( - coroutine=self._async_metagraph.sync( + self._async_metagraph.sync( block=block, lite=lite, subtensor=subtensor.async_subtensor if subtensor else None, ), - event_loop=subtensor.event_loop if subtensor else None, + event_loop=event_loop, ) def __getattr__(self, name): @@ -1529,7 +1540,12 @@ def __getattr__(self, name): if asyncio.iscoroutinefunction(attr): def wrapper(*args, **kwargs): - return execute_coroutine(attr(*args, **kwargs)) + return execute_coroutine( + attr(*args, **kwargs), + event_loop=self.subtensor.event_loop + if self.subtensor + else None, + ) return wrapper return attr diff --git a/bittensor/core/subtensor.py b/bittensor/core/subtensor.py index 32c0872e48..bd837e4ec2 100644 --- a/bittensor/core/subtensor.py +++ b/bittensor/core/subtensor.py @@ -1,4 +1,3 @@ -import asyncio from functools import lru_cache from typing import TYPE_CHECKING, Any, Iterable, Optional, Union @@ -6,16 +5,16 @@ from numpy.typing import NDArray from bittensor.core.async_subtensor import AsyncSubtensor -from bittensor.utils.substrate_interface import SubstrateInterface +from bittensor.core.metagraph import Metagraph from bittensor.core.settings import version_as_int -from bittensor.utils import execute_coroutine, torch +from bittensor.utils.substrate_interface import SubstrateInterface +from bittensor.utils import execute_coroutine, torch, get_event_loop if TYPE_CHECKING: from bittensor_wallet import Wallet from bittensor.core.async_subtensor import ProposalVoteData from bittensor.core.axon import Axon from bittensor.core.config import Config - from bittensor.core.metagraph import Metagraph from bittensor.core.chain_data.delegate_info import DelegateInfo from bittensor.core.chain_data.neuron_info import NeuronInfo from bittensor.core.chain_data.neuron_info_lite import NeuronInfoLite @@ -46,20 +45,23 @@ def __init__( _mock: bool = False, log_verbose: bool = False, ): - self.event_loop = asyncio.get_event_loop() + self.event_loop = get_event_loop() self.network = network self._config = config - self._mock = _mock self.log_verbose = log_verbose - self.async_subtensor = AsyncSubtensor( network=network, config=config, log_verbose=log_verbose, event_loop=self.event_loop, + _mock=_mock, ) - self.substrate = SubstrateInterface(substrate=self.async_subtensor.substrate) + self.substrate = SubstrateInterface( + url=self.async_subtensor.chain_endpoint, + _mock=_mock, + substrate=self.async_subtensor.substrate, + ) self.chain_endpoint = self.async_subtensor.chain_endpoint def __str__(self): @@ -68,21 +70,21 @@ def __str__(self): def __repr__(self): return self.async_subtensor.__repr__() + def execute_coroutine(self, coroutine) -> Any: + return execute_coroutine(coroutine, self.event_loop) + def close(self): - execute_coroutine( - coroutine=self.async_subtensor.close(), event_loop=self.event_loop - ) + execute_coroutine(self.async_subtensor.close()) # Subtensor queries =========================================================================================== def query_constant( self, module_name: str, constant_name: str, block: Optional[int] = None ) -> Optional["ScaleType"]: - return execute_coroutine( - coroutine=self.async_subtensor.query_constant( + return self.execute_coroutine( + self.async_subtensor.query_constant( module_name=module_name, constant_name=constant_name, block=block - ), - event_loop=self.event_loop, + ) ) def query_map( @@ -92,21 +94,19 @@ def query_map( block: Optional[int] = None, params: Optional[list] = None, ) -> "QueryMapResult": - return execute_coroutine( - coroutine=self.async_subtensor.query_map( + return self.execute_coroutine( + self.async_subtensor.query_map( module=module, name=name, block=block, params=params - ), - event_loop=self.event_loop, + ) ) def query_map_subtensor( self, name: str, block: Optional[int] = None, params: Optional[list] = None ) -> "QueryMapResult": - return execute_coroutine( - coroutine=self.async_subtensor.query_map_subtensor( + return self.execute_coroutine( + self.async_subtensor.query_map_subtensor( name=name, block=block, params=params - ), - event_loop=self.event_loop, + ) ) def query_module( @@ -116,14 +116,13 @@ def query_module( block: Optional[int] = None, params: Optional[list] = None, ) -> "ScaleType": - return execute_coroutine( - coroutine=self.async_subtensor.query_module( + return self.execute_coroutine( + self.async_subtensor.query_module( module=module, name=name, block=block, params=params, - ), - event_loop=self.event_loop, + ) ) def query_runtime_api( @@ -133,34 +132,27 @@ def query_runtime_api( params: Optional[Union[list[int], dict[str, int]]] = None, block: Optional[int] = None, ) -> Optional[str]: - return execute_coroutine( + return self.execute_coroutine( coroutine=self.async_subtensor.query_runtime_api( runtime_api=runtime_api, method=method, params=params, block=block, - ), - event_loop=self.event_loop, + ) ) def query_subtensor( self, name: str, block: Optional[int] = None, params: Optional[list] = None ) -> "ScaleType": - return execute_coroutine( - coroutine=self.async_subtensor.query_subtensor( - name=name, block=block, params=params - ), - event_loop=self.event_loop, + return self.execute_coroutine( + self.async_subtensor.query_subtensor(name=name, block=block, params=params) ) def state_call( self, method: str, data: str, block: Optional[int] = None ) -> dict[Any, Any]: - return execute_coroutine( - coroutine=self.async_subtensor.state_call( - method=method, data=data, block=block - ), - event_loop=self.event_loop, + return self.execute_coroutine( + self.async_subtensor.state_call(method=method, data=data, block=block) ) # Common subtensor calls =========================================================================================== @@ -170,63 +162,47 @@ def block(self) -> int: return self.get_current_block() def blocks_since_last_update(self, netuid: int, uid: int) -> Optional[int]: - return execute_coroutine( - coroutine=self.async_subtensor.blocks_since_last_update( - netuid=netuid, uid=uid - ), - event_loop=self.event_loop, + return self.execute_coroutine( + self.async_subtensor.blocks_since_last_update(netuid=netuid, uid=uid) ) def bonds( self, netuid: int, block: Optional[int] = None ) -> list[tuple[int, list[tuple[int, int]]]]: - return execute_coroutine( - coroutine=self.async_subtensor.bonds(netuid=netuid, block=block), - event_loop=self.event_loop, + return self.execute_coroutine( + self.async_subtensor.bonds(netuid=netuid, block=block), ) - def commit(self, wallet, netuid: int, data: str): - execute_coroutine( - coroutine=self.async_subtensor.commit( - wallet=wallet, netuid=netuid, data=data - ), - event_loop=self.event_loop, + def commit(self, wallet, netuid: int, data: str) -> bool: + return self.execute_coroutine( + self.async_subtensor.commit(wallet=wallet, netuid=netuid, data=data) ) def commit_reveal_enabled( self, netuid: int, block: Optional[int] = None ) -> Optional[bool]: - return execute_coroutine( - coroutine=self.async_subtensor.commit_reveal_enabled( - netuid=netuid, block=block - ), - event_loop=self.event_loop, + return self.execute_coroutine( + self.async_subtensor.commit_reveal_enabled(netuid=netuid, block=block) ) def difficulty(self, netuid: int, block: Optional[int] = None) -> Optional[int]: - return execute_coroutine( - coroutine=self.async_subtensor.difficulty(netuid=netuid, block=block), - event_loop=self.event_loop, + return self.execute_coroutine( + self.async_subtensor.difficulty(netuid=netuid, block=block), ) def does_hotkey_exist(self, hotkey_ss58: str, block: Optional[int] = None) -> bool: - return execute_coroutine( - coroutine=self.async_subtensor.does_hotkey_exist( - hotkey_ss58=hotkey_ss58, block=block - ), - event_loop=self.event_loop, + return self.execute_coroutine( + self.async_subtensor.does_hotkey_exist(hotkey_ss58=hotkey_ss58, block=block) ) def get_all_subnets_info(self, block: Optional[int] = None) -> list["SubnetInfo"]: - return execute_coroutine( - coroutine=self.async_subtensor.get_all_subnets_info(block=block), - event_loop=self.event_loop, + return self.execute_coroutine( + self.async_subtensor.get_all_subnets_info(block=block), ) def get_balance(self, address: str, block: Optional[int] = None) -> "Balance": - return execute_coroutine( - coroutine=self.async_subtensor.get_balance(address, block=block), - event_loop=self.event_loop, + return self.execute_coroutine( + self.async_subtensor.get_balance(address, block=block), ) def get_balances( @@ -234,258 +210,217 @@ def get_balances( *addresses: str, block: Optional[int] = None, ) -> dict[str, "Balance"]: - return execute_coroutine( - coroutine=self.async_subtensor.get_balances(*addresses, block=block), - event_loop=self.event_loop, + return self.execute_coroutine( + self.async_subtensor.get_balances(*addresses, block=block), ) def get_current_block(self) -> int: - return execute_coroutine( + return self.execute_coroutine( coroutine=self.async_subtensor.get_current_block(), - event_loop=self.event_loop, ) @lru_cache(maxsize=128) def get_block_hash(self, block: Optional[int] = None) -> str: - return execute_coroutine( + return self.execute_coroutine( coroutine=self.async_subtensor.get_block_hash(block=block), - event_loop=self.event_loop, ) def get_children( self, hotkey: str, netuid: int, block: Optional[int] = None ) -> tuple[bool, list, str]: - return execute_coroutine( - coroutine=self.async_subtensor.get_children( + return self.execute_coroutine( + self.async_subtensor.get_children( hotkey=hotkey, netuid=netuid, block=block ), - event_loop=self.event_loop, ) def get_commitment(self, netuid: int, uid: int, block: Optional[int] = None) -> str: - return execute_coroutine( - coroutine=self.async_subtensor.get_commitment( - netuid=netuid, uid=uid, block=block - ), - event_loop=self.event_loop, + return self.execute_coroutine( + self.async_subtensor.get_commitment(netuid=netuid, uid=uid, block=block), ) def get_current_weight_commit_info( self, netuid: int, block: Optional[int] = None ) -> list: - return execute_coroutine( - coroutine=self.async_subtensor.get_current_weight_commit_info( + return self.execute_coroutine( + self.async_subtensor.get_current_weight_commit_info( netuid=netuid, block=block ), - event_loop=self.event_loop, ) def get_delegate_by_hotkey( self, hotkey_ss58: str, block: Optional[int] = None ) -> Optional["DelegateInfo"]: - return execute_coroutine( - coroutine=self.async_subtensor.get_delegate_by_hotkey( + return self.execute_coroutine( + self.async_subtensor.get_delegate_by_hotkey( hotkey_ss58=hotkey_ss58, block=block ), - event_loop=self.event_loop, ) def get_delegate_identities( self, block: Optional[int] = None ) -> dict[str, "DelegatesDetails"]: - return execute_coroutine( - coroutine=self.async_subtensor.get_delegate_identities(block=block), - event_loop=self.event_loop, + return self.execute_coroutine( + self.async_subtensor.get_delegate_identities(block=block), ) def get_delegate_take( self, hotkey_ss58: str, block: Optional[int] = None ) -> Optional[float]: - return execute_coroutine( - coroutine=self.async_subtensor.get_delegate_take( + return self.execute_coroutine( + self.async_subtensor.get_delegate_take( hotkey_ss58=hotkey_ss58, block=block ), - event_loop=self.event_loop, ) def get_delegated( self, coldkey_ss58: str, block: Optional[int] = None ) -> list[tuple["DelegateInfo", "Balance"]]: - return execute_coroutine( - coroutine=self.async_subtensor.get_delegated( - coldkey_ss58=coldkey_ss58, block=block - ), - event_loop=self.event_loop, + return self.execute_coroutine( + self.async_subtensor.get_delegated(coldkey_ss58=coldkey_ss58, block=block), ) def get_delegates(self, block: Optional[int] = None) -> list["DelegateInfo"]: - return execute_coroutine( - coroutine=self.async_subtensor.get_delegates(block=block), - event_loop=self.event_loop, + return self.execute_coroutine( + self.async_subtensor.get_delegates(block=block), ) def get_existential_deposit( self, block: Optional[int] = None ) -> Optional["Balance"]: - return execute_coroutine( - coroutine=self.async_subtensor.get_existential_deposit(block=block), - event_loop=self.event_loop, + return self.execute_coroutine( + self.async_subtensor.get_existential_deposit(block=block), ) def get_hotkey_owner( self, hotkey_ss58: str, block: Optional[int] = None ) -> Optional[str]: - return execute_coroutine( - coroutine=self.async_subtensor.get_hotkey_owner( - hotkey_ss58=hotkey_ss58, block=block - ), - event_loop=self.event_loop, + return self.execute_coroutine( + self.async_subtensor.get_hotkey_owner(hotkey_ss58=hotkey_ss58, block=block), ) def get_minimum_required_stake(self) -> "Balance": - return execute_coroutine( - coroutine=self.async_subtensor.get_minimum_required_stake(), - event_loop=self.event_loop, + return self.execute_coroutine( + self.async_subtensor.get_minimum_required_stake(), ) def get_netuids_for_hotkey( self, hotkey_ss58: str, block: Optional[int] = None, reuse_block: bool = False ) -> list[int]: - return execute_coroutine( - coroutine=self.async_subtensor.get_netuids_for_hotkey( + return self.execute_coroutine( + self.async_subtensor.get_netuids_for_hotkey( hotkey_ss58=hotkey_ss58, block=block, reuse_block=reuse_block ), - event_loop=self.event_loop, ) def get_neuron_certificate( self, hotkey: str, netuid: int, block: Optional[int] = None ) -> Optional["Certificate"]: - return execute_coroutine( - coroutine=self.async_subtensor.get_neuron_certificate( - hotkey, netuid, block=block - ), - event_loop=self.event_loop, + return self.execute_coroutine( + self.async_subtensor.get_neuron_certificate(hotkey, netuid, block=block), ) def get_neuron_for_pubkey_and_subnet( self, hotkey_ss58: str, netuid: int, block: Optional[int] = None ) -> Optional["NeuronInfo"]: - return execute_coroutine( - coroutine=self.async_subtensor.get_neuron_for_pubkey_and_subnet( + return self.execute_coroutine( + self.async_subtensor.get_neuron_for_pubkey_and_subnet( hotkey_ss58, netuid, block=block ), - event_loop=self.event_loop, ) def get_stake_for_coldkey_and_hotkey( self, hotkey_ss58: str, coldkey_ss58: str, block: Optional[int] = None ) -> Optional["Balance"]: - return execute_coroutine( - coroutine=self.async_subtensor.get_stake_for_coldkey_and_hotkey( + return self.execute_coroutine( + self.async_subtensor.get_stake_for_coldkey_and_hotkey( hotkey_ss58=hotkey_ss58, coldkey_ss58=coldkey_ss58, block=block ), - event_loop=self.event_loop, ) def get_stake_info_for_coldkey( self, coldkey_ss58: str, block: Optional[int] = None ) -> list["StakeInfo"]: - return execute_coroutine( - coroutine=self.async_subtensor.get_stake_info_for_coldkey( + return self.execute_coroutine( + self.async_subtensor.get_stake_info_for_coldkey( coldkey_ss58=coldkey_ss58, block=block ), - event_loop=self.event_loop, ) def get_subnet_burn_cost(self, block: Optional[int] = None) -> Optional[str]: - return execute_coroutine( - coroutine=self.async_subtensor.get_subnet_burn_cost(block=block), - event_loop=self.event_loop, + return self.execute_coroutine( + self.async_subtensor.get_subnet_burn_cost(block=block), ) def get_subnet_hyperparameters( self, netuid: int, block: Optional[int] = None ) -> Optional[Union[list, "SubnetHyperparameters"]]: - return execute_coroutine( - coroutine=self.async_subtensor.get_subnet_hyperparameters( - netuid=netuid, block=block - ), - event_loop=self.event_loop, + return self.execute_coroutine( + self.async_subtensor.get_subnet_hyperparameters(netuid=netuid, block=block), ) def get_subnet_reveal_period_epochs( self, netuid: int, block: Optional[int] = None ) -> Optional[int]: - return execute_coroutine( - coroutine=self.async_subtensor.get_subnet_reveal_period_epochs( + return self.execute_coroutine( + self.async_subtensor.get_subnet_reveal_period_epochs( netuid=netuid, block=block ), - event_loop=self.event_loop, ) def get_subnets(self, block: Optional[int] = None) -> list[int]: - return execute_coroutine( - coroutine=self.async_subtensor.get_subnets(block=block), - event_loop=self.event_loop, + return self.execute_coroutine( + self.async_subtensor.get_subnets(block=block), ) def get_total_stake_for_coldkey( self, ss58_address: str, block: Optional[int] = None ) -> Optional["Balance"]: - result = execute_coroutine( - coroutine=self.async_subtensor.get_total_stake_for_coldkey( - ss58_address, block=block - ), - event_loop=self.event_loop, + result = self.execute_coroutine( + self.async_subtensor.get_total_stake_for_coldkey(ss58_address, block=block), ) return next(iter(result.values()), None) if isinstance(result, dict) else None def get_total_stake_for_hotkey( self, ss58_address: str, block: Optional[int] = None ) -> Optional["Balance"]: - result = execute_coroutine( - coroutine=self.async_subtensor.get_total_stake_for_hotkey( + result = self.execute_coroutine( + self.async_subtensor.get_total_stake_for_hotkey( *[ss58_address], block=block ), - event_loop=self.event_loop, ) return next(iter(result.values()), None) if isinstance(result, dict) else None def get_total_subnets(self, block: Optional[int] = None) -> Optional[int]: - return execute_coroutine( - coroutine=self.async_subtensor.get_total_subnets(block=block), - event_loop=self.event_loop, + return self.execute_coroutine( + self.async_subtensor.get_total_subnets(block=block), ) def get_transfer_fee( self, wallet: "Wallet", dest: str, value: Union["Balance", float, int] ) -> "Balance": - return execute_coroutine( - coroutine=self.async_subtensor.get_transfer_fee( + return self.execute_coroutine( + self.async_subtensor.get_transfer_fee( wallet=wallet, dest=dest, value=value ), - event_loop=self.event_loop, ) def get_vote_data( self, proposal_hash: str, block: Optional[int] = None ) -> Optional["ProposalVoteData"]: - return execute_coroutine( - coroutine=self.async_subtensor.get_vote_data( + return self.execute_coroutine( + self.async_subtensor.get_vote_data( proposal_hash=proposal_hash, block=block ), - event_loop=self.event_loop, ) def get_uid_for_hotkey_on_subnet( self, hotkey_ss58: str, netuid: int, block: Optional[int] = None ) -> Optional[int]: - return execute_coroutine( - coroutine=self.async_subtensor.get_uid_for_hotkey_on_subnet( + return self.execute_coroutine( + self.async_subtensor.get_uid_for_hotkey_on_subnet( hotkey_ss58=hotkey_ss58, netuid=netuid, block=block ), - event_loop=self.event_loop, ) def filter_netuids_by_registered_hotkeys( @@ -495,43 +430,39 @@ def filter_netuids_by_registered_hotkeys( all_hotkeys: Iterable["Wallet"], block: Optional[int], ) -> list[int]: - return execute_coroutine( - coroutine=self.async_subtensor.filter_netuids_by_registered_hotkeys( + return self.execute_coroutine( + self.async_subtensor.filter_netuids_by_registered_hotkeys( all_netuids=all_netuids, filter_for_netuids=filter_for_netuids, all_hotkeys=all_hotkeys, block=block, ), - event_loop=self.event_loop, ) def immunity_period( self, netuid: int, block: Optional[int] = None ) -> Optional[int]: - return execute_coroutine( - coroutine=self.async_subtensor.immunity_period(netuid=netuid, block=block), - event_loop=self.event_loop, + return self.execute_coroutine( + self.async_subtensor.immunity_period(netuid=netuid, block=block), ) def is_hotkey_delegate(self, hotkey_ss58: str, block: Optional[int] = None) -> bool: - return execute_coroutine( - coroutine=self.async_subtensor.is_hotkey_delegate( + return self.execute_coroutine( + self.async_subtensor.is_hotkey_delegate( hotkey_ss58=hotkey_ss58, block=block ), - event_loop=self.event_loop, ) def is_hotkey_registered( self, hotkey_ss58: str, - netuid: int, + netuid: Optional[int] = None, block: Optional[int] = None, ) -> bool: - return execute_coroutine( - coroutine=self.async_subtensor.is_hotkey_registered( + return self.execute_coroutine( + self.async_subtensor.is_hotkey_registered( hotkey_ss58=hotkey_ss58, netuid=netuid, block=block ), - event_loop=self.event_loop, ) def is_hotkey_registered_any( @@ -539,12 +470,11 @@ def is_hotkey_registered_any( hotkey_ss58: str, block: Optional[int] = None, ) -> bool: - return execute_coroutine( - coroutine=self.async_subtensor.is_hotkey_registered_any( + return self.execute_coroutine( + self.async_subtensor.is_hotkey_registered_any( hotkey_ss58=hotkey_ss58, block=block, ), - event_loop=self.event_loop, ) def is_hotkey_registered_on_subnet( @@ -553,115 +483,99 @@ def is_hotkey_registered_on_subnet( return self.get_uid_for_hotkey_on_subnet(hotkey_ss58, netuid, block) is not None def last_drand_round(self) -> Optional[int]: - return execute_coroutine( - coroutine=self.async_subtensor.last_drand_round(), - event_loop=self.event_loop, + return self.execute_coroutine( + self.async_subtensor.last_drand_round(), ) def max_weight_limit( self, netuid: int, block: Optional[int] = None ) -> Optional[float]: - return execute_coroutine( - coroutine=self.async_subtensor.max_weight_limit(netuid=netuid, block=block), - event_loop=self.event_loop, + return self.execute_coroutine( + self.async_subtensor.max_weight_limit(netuid=netuid, block=block), ) def metagraph( self, netuid: int, lite: bool = True, block: Optional[int] = None - ) -> "Metagraph": # type: ignore - return execute_coroutine( - coroutine=self.async_subtensor.metagraph( - netuid=netuid, lite=lite, block=block - ), - event_loop=self.event_loop, + ) -> "Metagraph": + metagraph = Metagraph( + network=self.chain_endpoint, + netuid=netuid, + lite=lite, + sync=False, + subtensor=self, ) + metagraph.sync(block=block, lite=lite, subtensor=self) + + return metagraph def min_allowed_weights( self, netuid: int, block: Optional[int] = None ) -> Optional[int]: - return execute_coroutine( - coroutine=self.async_subtensor.min_allowed_weights( - netuid=netuid, block=block - ), - event_loop=self.event_loop, + return self.execute_coroutine( + self.async_subtensor.min_allowed_weights(netuid=netuid, block=block), ) def neuron_for_uid( self, uid: int, netuid: int, block: Optional[int] = None ) -> "NeuronInfo": - return execute_coroutine( - coroutine=self.async_subtensor.neuron_for_uid( - uid=uid, netuid=netuid, block=block - ), - event_loop=self.event_loop, + return self.execute_coroutine( + self.async_subtensor.neuron_for_uid(uid=uid, netuid=netuid, block=block), ) def neurons(self, netuid: int, block: Optional[int] = None) -> list["NeuronInfo"]: - return execute_coroutine( - coroutine=self.async_subtensor.neurons(netuid=netuid, block=block), - event_loop=self.event_loop, + return self.execute_coroutine( + self.async_subtensor.neurons(netuid=netuid, block=block), ) def neurons_lite( self, netuid: int, block: Optional[int] = None ) -> list["NeuronInfoLite"]: - return execute_coroutine( - coroutine=self.async_subtensor.neurons_lite(netuid=netuid, block=block), - event_loop=self.event_loop, + return self.execute_coroutine( + self.async_subtensor.neurons_lite(netuid=netuid, block=block), ) def query_identity(self, key: str, block: Optional[int] = None) -> Optional[str]: - return execute_coroutine( - coroutine=self.async_subtensor.query_identity(key=key, block=block), - event_loop=self.event_loop, + return self.execute_coroutine( + self.async_subtensor.query_identity(key=key, block=block), ) def recycle(self, netuid: int, block: Optional[int] = None) -> Optional["Balance"]: - return execute_coroutine( - coroutine=self.async_subtensor.recycle(netuid=netuid, block=block), - event_loop=self.event_loop, + return self.execute_coroutine( + self.async_subtensor.recycle(netuid=netuid, block=block), ) def subnet_exists(self, netuid: int, block: Optional[int] = None) -> bool: - return execute_coroutine( - coroutine=self.async_subtensor.subnet_exists(netuid=netuid, block=block), - event_loop=self.event_loop, + return self.execute_coroutine( + self.async_subtensor.subnet_exists(netuid=netuid, block=block), ) def subnetwork_n(self, netuid: int, block: Optional[int] = None) -> Optional[int]: - return execute_coroutine( - coroutine=self.async_subtensor.subnetwork_n(netuid=netuid, block=block), - event_loop=self.event_loop, + return self.execute_coroutine( + self.async_subtensor.subnetwork_n(netuid=netuid, block=block), ) def tempo(self, netuid: int, block: Optional[int] = None) -> Optional[int]: - return execute_coroutine( - coroutine=self.async_subtensor.tempo(netuid=netuid, block=block), - event_loop=self.event_loop, + return self.execute_coroutine( + self.async_subtensor.tempo(netuid=netuid, block=block), ) def tx_rate_limit(self, block: Optional[int] = None) -> Optional[int]: - return execute_coroutine( - coroutine=self.async_subtensor.tx_rate_limit(block=block), - event_loop=self.event_loop, + return self.execute_coroutine( + self.async_subtensor.tx_rate_limit(block=block), ) def weights( self, netuid: int, block: Optional[int] = None ) -> list[tuple[int, list[tuple[int, int]]]]: - return execute_coroutine( - coroutine=self.async_subtensor.weights(netuid=netuid, block=block), - event_loop=self.event_loop, + return self.execute_coroutine( + self.async_subtensor.weights(netuid=netuid, block=block), ) def weights_rate_limit( self, netuid: int, block: Optional[int] = None ) -> Optional[int]: - return execute_coroutine( - coroutine=self.async_subtensor.weights_rate_limit( - netuid=netuid, block=block - ), - event_loop=self.event_loop, + return self.execute_coroutine( + self.async_subtensor.weights_rate_limit(netuid=netuid, block=block), ) # Extrinsics ======================================================================================================= @@ -674,15 +588,14 @@ def add_stake( wait_for_inclusion: bool = True, wait_for_finalization: bool = False, ) -> bool: - return execute_coroutine( - coroutine=self.async_subtensor.add_stake( + return self.execute_coroutine( + self.async_subtensor.add_stake( wallet=wallet, hotkey_ss58=hotkey_ss58, amount=amount, wait_for_inclusion=wait_for_inclusion, wait_for_finalization=wait_for_finalization, ), - event_loop=self.event_loop, ) def add_stake_multiple( @@ -693,15 +606,14 @@ def add_stake_multiple( wait_for_inclusion: bool = True, wait_for_finalization: bool = False, ) -> bool: - return execute_coroutine( - coroutine=self.async_subtensor.add_stake_multiple( + return self.execute_coroutine( + self.async_subtensor.add_stake_multiple( wallet=wallet, hotkey_ss58s=hotkey_ss58s, amounts=amounts, wait_for_inclusion=wait_for_inclusion, wait_for_finalization=wait_for_finalization, ), - event_loop=self.event_loop, ) def burned_register( @@ -711,14 +623,13 @@ def burned_register( wait_for_inclusion: bool = False, wait_for_finalization: bool = True, ) -> bool: - return execute_coroutine( - coroutine=self.async_subtensor.burned_register( + return self.execute_coroutine( + self.async_subtensor.burned_register( wallet=wallet, netuid=netuid, wait_for_inclusion=wait_for_inclusion, wait_for_finalization=wait_for_finalization, ), - event_loop=self.event_loop, ) def commit_weights( @@ -733,8 +644,8 @@ def commit_weights( wait_for_finalization: bool = False, max_retries: int = 5, ) -> tuple[bool, str]: - return execute_coroutine( - coroutine=self.async_subtensor.commit_weights( + return self.execute_coroutine( + self.async_subtensor.commit_weights( wallet=wallet, netuid=netuid, salt=salt, @@ -745,7 +656,6 @@ def commit_weights( wait_for_finalization=wait_for_finalization, max_retries=max_retries, ), - event_loop=self.event_loop, ) def register( @@ -763,8 +673,8 @@ def register( update_interval: Optional[int] = None, log_verbose: bool = False, ) -> bool: - return execute_coroutine( - coroutine=self.async_subtensor.register( + return self.execute_coroutine( + self.async_subtensor.register( wallet=wallet, netuid=netuid, wait_for_inclusion=wait_for_inclusion, @@ -778,7 +688,6 @@ def register( update_interval=update_interval, log_verbose=log_verbose, ), - event_loop=self.event_loop, ) def reveal_weights( @@ -793,8 +702,8 @@ def reveal_weights( wait_for_finalization: bool = False, max_retries: int = 5, ) -> tuple[bool, str]: - return execute_coroutine( - coroutine=self.async_subtensor.reveal_weights( + return self.execute_coroutine( + self.async_subtensor.reveal_weights( wallet=wallet, netuid=netuid, uids=uids, @@ -805,7 +714,6 @@ def reveal_weights( wait_for_finalization=wait_for_finalization, max_retries=max_retries, ), - event_loop=self.event_loop, ) def root_register( @@ -814,15 +722,12 @@ def root_register( wait_for_inclusion: bool = False, wait_for_finalization: bool = True, ) -> bool: - block_hash = self.get_block_hash() return execute_coroutine( - coroutine=self.async_subtensor.root_register( + self.async_subtensor.root_register( wallet=wallet, - block_hash=block_hash, wait_for_inclusion=wait_for_inclusion, wait_for_finalization=wait_for_finalization, ), - event_loop=self.event_loop, ) def root_set_weights( @@ -834,8 +739,8 @@ def root_set_weights( wait_for_inclusion: bool = False, wait_for_finalization: bool = False, ) -> bool: - return execute_coroutine( - coroutine=self.async_subtensor.root_set_weights( + return self.execute_coroutine( + self.async_subtensor.root_set_weights( wallet=wallet, netuids=netuids, weights=weights, @@ -843,7 +748,6 @@ def root_set_weights( wait_for_inclusion=wait_for_inclusion, wait_for_finalization=wait_for_finalization, ), - event_loop=self.event_loop, ) def set_weights( @@ -857,8 +761,8 @@ def set_weights( wait_for_finalization: bool = False, max_retries: int = 5, ) -> tuple[bool, str]: - return execute_coroutine( - coroutine=self.async_subtensor.set_weights( + return self.execute_coroutine( + self.async_subtensor.set_weights( wallet=wallet, netuid=netuid, uids=uids, @@ -867,8 +771,7 @@ def set_weights( wait_for_inclusion=wait_for_inclusion, wait_for_finalization=wait_for_finalization, max_retries=max_retries, - ), - event_loop=self.event_loop, + ) ) def serve_axon( @@ -879,15 +782,14 @@ def serve_axon( wait_for_finalization: bool = True, certificate: Optional["Certificate"] = None, ) -> bool: - return execute_coroutine( - coroutine=self.async_subtensor.serve_axon( + return self.execute_coroutine( + self.async_subtensor.serve_axon( netuid=netuid, axon=axon, wait_for_inclusion=wait_for_inclusion, wait_for_finalization=wait_for_finalization, certificate=certificate, ), - event_loop=self.event_loop, ) def transfer( @@ -900,8 +802,8 @@ def transfer( transfer_all: bool = False, keep_alive: bool = True, ) -> bool: - return execute_coroutine( - coroutine=self.async_subtensor.transfer( + return self.execute_coroutine( + self.async_subtensor.transfer( wallet=wallet, destination=dest, amount=amount, @@ -910,7 +812,6 @@ def transfer( transfer_all=transfer_all, keep_alive=keep_alive, ), - event_loop=self.event_loop, ) def unstake( @@ -921,15 +822,14 @@ def unstake( wait_for_inclusion: bool = True, wait_for_finalization: bool = False, ) -> bool: - return execute_coroutine( - coroutine=self.async_subtensor.unstake( + return self.execute_coroutine( + self.async_subtensor.unstake( wallet=wallet, hotkey_ss58=hotkey_ss58, amount=amount, wait_for_inclusion=wait_for_inclusion, wait_for_finalization=wait_for_finalization, ), - event_loop=self.event_loop, ) def unstake_multiple( @@ -940,13 +840,12 @@ def unstake_multiple( wait_for_inclusion: bool = True, wait_for_finalization: bool = False, ) -> bool: - return execute_coroutine( - coroutine=self.async_subtensor.unstake_multiple( + return self.execute_coroutine( + self.async_subtensor.unstake_multiple( wallet=wallet, hotkey_ss58s=hotkey_ss58s, amounts=amounts, wait_for_inclusion=wait_for_inclusion, wait_for_finalization=wait_for_finalization, ), - event_loop=self.event_loop, ) diff --git a/bittensor/core/types.py b/bittensor/core/types.py index 577df5b6ba..908e384015 100644 --- a/bittensor/core/types.py +++ b/bittensor/core/types.py @@ -38,3 +38,8 @@ class PrometheusServeCallParams(TypedDict): port: int ip_type: int netuid: int + + +class ParamWithTypes(TypedDict): + name: str # Name of the parameter. + type: str # ScaleType string of the parameter. diff --git a/bittensor/utils/__init__.py b/bittensor/utils/__init__.py index 64006293f9..886825a6a5 100644 --- a/bittensor/utils/__init__.py +++ b/bittensor/utils/__init__.py @@ -33,18 +33,34 @@ from .version import version_checking, check_version, VersionCheckError if TYPE_CHECKING: - from bittensor.utils.substrate_interface import AsyncSubstrateInterface - from substrateinterface import SubstrateInterface from bittensor_wallet import Wallet + +# redundant aliases +logging = logging +torch = torch +use_torch = use_torch +version_checking = version_checking +check_version = check_version +VersionCheckError = VersionCheckError + + RAOPERTAO = 1e9 U16_MAX = 65535 U64_MAX = 18446744073709551615 Certificate = str +UnlockStatus = namedtuple("UnlockStatus", ["success", "message"]) -UnlockStatus = namedtuple("UnlockStatus", ["success", "message"]) +def event_loop_is_running() -> Optional[asyncio.AbstractEventLoop]: + """ + Simple function to check if event loop is running. Returns the loop if it is, otherwise None. + """ + try: + return asyncio.get_running_loop() + except RuntimeError: + return None def ss58_to_vec_u8(ss58_address: str) -> list[int]: @@ -401,6 +417,19 @@ def hex_to_bytes(hex_str: str) -> bytes: return bytes_result +def get_event_loop() -> asyncio.AbstractEventLoop: + """ + If an event loop is already running, returns that. Otherwise, creates a new event loop, + and sets it as the main event loop for this thread, returning the newly-created event loop. + """ + if loop := event_loop_is_running(): + event_loop = loop + else: + event_loop = asyncio.get_event_loop() + asyncio.set_event_loop(event_loop) + return event_loop + + def execute_coroutine( coroutine: "Coroutine", event_loop: asyncio.AbstractEventLoop = None ): @@ -409,21 +438,14 @@ def execute_coroutine( Args: coroutine (Coroutine): The coroutine to run. - event_loop (AbstractEventLoop): The event loop to use. If None, a new event loop will be created. + event_loop (AbstractEventLoop): The event loop to use. If `None`, attempts to fetch the already-running + loop. If one if not running, a new loop is created. Returns: The result of the coroutine execution. """ - try: - if event_loop: - return event_loop.run_until_complete(coroutine) - else: - return asyncio.run(coroutine) - except RuntimeError as error: - if "already running" in str(error): - if not event_loop: - event_loop = asyncio.new_event_loop() - asyncio.set_event_loop(event_loop) - return event_loop.run_until_complete(coroutine) - else: - raise error + if event_loop: + event_loop = event_loop + else: + event_loop = get_event_loop() + return event_loop.run_until_complete(asyncio.wait_for(coroutine, timeout=None)) diff --git a/bittensor/utils/mock/subtensor_mock.py b/bittensor/utils/mock/subtensor_mock.py index 0e35cd2478..e19fd82471 100644 --- a/bittensor/utils/mock/subtensor_mock.py +++ b/bittensor/utils/mock/subtensor_mock.py @@ -20,7 +20,7 @@ from hashlib import sha256 from types import SimpleNamespace from typing import Any, Optional, Union, TypedDict -from unittest.mock import MagicMock, patch +from unittest.mock import MagicMock, patch, AsyncMock from bittensor_wallet import Wallet @@ -34,8 +34,9 @@ from bittensor.core.types import AxonServeCallParams, PrometheusServeCallParams from bittensor.core.errors import ChainQueryError from bittensor.core.subtensor import Subtensor +from bittensor.core.async_subtensor import AsyncSubtensor import bittensor.core.subtensor as subtensor_module -from bittensor.utils import RAOPERTAO, u16_normalized_float +from bittensor.utils import RAOPERTAO, u16_normalized_float, get_event_loop from bittensor.utils.balance import Balance # Mock Testing Constant @@ -167,6 +168,21 @@ class MockChainState(TypedDict): SubtensorModule: MockSubtensorState +class ReusableCoroutine: + def __init__(self, coroutine): + self.coroutine = coroutine + + def __await__(self): + return self.reset().__await__() + + def reset(self): + return self.coroutine() + + +async def _async_block(): + return 1 + + class MockSubtensor(Subtensor): """ A Mock Subtensor class for running tests. @@ -251,6 +267,9 @@ def setup(self) -> None: self.network = "mock" self.chain_endpoint = "ws://mock_endpoint.bt" self.substrate = MagicMock(autospec=SubstrateInterface) + self.async_subtensor = AsyncMock(autospec=AsyncSubtensor) + self.async_subtensor.block = ReusableCoroutine(_async_block) + self.event_loop = get_event_loop() def __init__(self, *args, **kwargs) -> None: mock_substrate_interface = MagicMock(autospec=SubstrateInterface) diff --git a/bittensor/utils/substrate_interface.py b/bittensor/utils/substrate_interface.py index d099f44380..7022a76492 100644 --- a/bittensor/utils/substrate_interface.py +++ b/bittensor/utils/substrate_interface.py @@ -939,6 +939,7 @@ def __init__( max_retries: int = 5, retry_timeout: float = 60.0, event_loop: Optional[asyncio.BaseEventLoop] = None, + _mock: bool = False, ): """ The asyncio-compatible version of the subtensor interface commands we use in bittensor. It is important to @@ -955,6 +956,8 @@ def __init__( sync_calls: whether this instance is going to be called through a sync wrapper or plain max_retries: number of times to retry RPC requests before giving up retry_timeout: how to long wait since the last ping to retry the RPC request + event_loop: the event loop to use + _mock: whether to use mock version of the subtensor interface """ self.max_retries = max_retries @@ -991,10 +994,13 @@ def __init__( self.extrinsic_receipt_cls = ( AsyncExtrinsicReceipt if self.sync_calls is False else ExtrinsicReceipt ) - execute_coroutine( - coroutine=self.initialize(), - event_loop=event_loop, - ) + if not _mock: + execute_coroutine( + coroutine=self.initialize(), + event_loop=event_loop, + ) + else: + self.reload_type_registry() async def __aenter__(self): await self.initialize() @@ -3911,14 +3917,14 @@ class SubstrateInterface: def __init__( self, - url: str = None, + url: str, use_remote_preset: bool = False, auto_discover: bool = True, ss58_format: Optional[int] = None, type_registry: Optional[dict] = None, chain_name: Optional[str] = None, event_loop: Optional[asyncio.AbstractEventLoop] = None, - mock: bool = False, + _mock: bool = False, substrate: Optional["AsyncSubstrateInterface"] = None, ): event_loop = substrate.event_loop if substrate else event_loop @@ -3932,18 +3938,15 @@ def __init__( chain_name=chain_name, sync_calls=True, event_loop=event_loop, + _mock=_mock, ) if not substrate else substrate ) self.event_loop = event_loop or asyncio.get_event_loop() - if not mock: - self.event_loop.run_until_complete(self._async_instance.initialize()) - else: - self._async_instance.reload_type_registry() def __del__(self): - self.event_loop.run_until_complete(self._async_instance.close()) + execute_coroutine(self._async_instance.close()) def __getattr__(self, name): attr = getattr(self._async_instance, name) diff --git a/tests/e2e_tests/test_commit_reveal_v3.py b/tests/e2e_tests/test_commit_reveal_v3.py index d627ef867d..b5c9267ff5 100644 --- a/tests/e2e_tests/test_commit_reveal_v3.py +++ b/tests/e2e_tests/test_commit_reveal_v3.py @@ -196,7 +196,7 @@ async def test_commit_and_reveal_weights_cr3(local_chain): revealed_weights_ = subtensor.weights(netuid=netuid) time.sleep(10) - + print("revealed weights", revealed_weights_) revealed_weights = revealed_weights_[0][1] # Assert correct weights were revealed assert weight_uids[0] == revealed_weights[0][0] diff --git a/tests/e2e_tests/utils/chain_interactions.py b/tests/e2e_tests/utils/chain_interactions.py index 4f6805f6d1..3255debc30 100644 --- a/tests/e2e_tests/utils/chain_interactions.py +++ b/tests/e2e_tests/utils/chain_interactions.py @@ -37,7 +37,6 @@ def sudo_set_hyperparameter_bool( wait_for_inclusion=True, wait_for_finalization=True, ) - response.process_events() return response.is_success @@ -62,7 +61,6 @@ def sudo_set_hyperparameter_values( wait_for_inclusion=True, wait_for_finalization=True, ) - response.process_events() if return_error_message: return response.is_success, response.error_message @@ -87,7 +85,6 @@ def add_stake( response = substrate.submit_extrinsic( extrinsic, wait_for_finalization=True, wait_for_inclusion=True ) - response.process_events() return response.is_success @@ -106,7 +103,6 @@ def register_subnet(substrate: "SubstrateInterface", wallet: "Wallet") -> bool: response = substrate.submit_extrinsic( extrinsic, wait_for_finalization=True, wait_for_inclusion=True ) - response.process_events() return response.is_success @@ -216,7 +212,6 @@ def sudo_set_admin_utils( wait_for_inclusion=True, wait_for_finalization=True, ) - response.process_events() if return_error_message: return response.is_success, response.error_message @@ -246,7 +241,6 @@ async def root_set_subtensor_hyperparameter_values( wait_for_inclusion=True, wait_for_finalization=True, ) - response.process_events() if return_error_message: return response.is_success, response.error_message diff --git a/tests/integration_tests/test_metagraph_integration.py b/tests/integration_tests/test_metagraph_integration.py index 34bf4f590e..4ec58285ee 100644 --- a/tests/integration_tests/test_metagraph_integration.py +++ b/tests/integration_tests/test_metagraph_integration.py @@ -14,6 +14,7 @@ # THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION # OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER # DEALINGS IN THE SOFTWARE. +from unittest import mock import bittensor import torch @@ -48,13 +49,19 @@ def test_sync_block_0(self): self.metagraph.sync(lite=True, block=0, subtensor=self.sub) def test_load_sync_save(self): - self.metagraph.sync(lite=True, subtensor=self.sub) - self.metagraph.save() - self.metagraph.load() - self.metagraph.save() + with mock.patch.object( + self.sub.async_subtensor, "neurons_lite", return_value=[] + ): + self.metagraph.sync(lite=True, subtensor=self.sub) + self.metagraph.save() + self.metagraph.load() + self.metagraph.save() def test_load_sync_save_from_torch(self): - self.metagraph.sync(lite=True, subtensor=self.sub) + with mock.patch.object( + self.sub.async_subtensor, "neurons_lite", return_value=[] + ): + self.metagraph.sync(lite=True, subtensor=self.sub) def deprecated_save_torch(metagraph): save_directory = get_save_dir(metagraph.network, metagraph.netuid) diff --git a/tests/unit_tests/extrinsics/asyncex/test_commit_reveal.py b/tests/unit_tests/extrinsics/asyncex/test_commit_reveal.py index c9e9d761da..1dd7e6aab9 100644 --- a/tests/unit_tests/extrinsics/asyncex/test_commit_reveal.py +++ b/tests/unit_tests/extrinsics/asyncex/test_commit_reveal.py @@ -191,7 +191,9 @@ async def test_commit_reveal_v3_extrinsic_success_with_torch( async_commit_reveal, "_do_commit_reveal_v3", return_value=(True, "Success") ) mock_block = mocker.patch.object( - subtensor.substrate, "get_block_number", return_value=1 + subtensor.substrate, + "get_block", + return_value={"header": {"number": 1, "hash": "fakehash"}}, ) mock_hyperparams = mocker.patch.object( subtensor, @@ -223,7 +225,7 @@ async def test_commit_reveal_v3_extrinsic_success_with_torch( version_key=async_commit_reveal.version_as_int, tempo=mock_hyperparams.return_value.tempo, netuid=fake_netuid, - current_block=mock_block.return_value, + current_block=mock_block.return_value["header"]["number"], ) mock_do_commit_reveal_v3.assert_awaited_once_with( subtensor=subtensor, diff --git a/tests/unit_tests/test_metagraph.py b/tests/unit_tests/test_metagraph.py index 391ebcc5dd..0a95bbbbc7 100644 --- a/tests/unit_tests/test_metagraph.py +++ b/tests/unit_tests/test_metagraph.py @@ -1,5 +1,6 @@ import asyncio import copy +from functools import partial from unittest.mock import Mock import numpy as np @@ -8,6 +9,7 @@ from bittensor.core import settings from bittensor.core.metagraph import Metagraph from bittensor.core.subtensor import Subtensor +from bittensor.utils import execute_coroutine @pytest.fixture @@ -120,7 +122,10 @@ def mock_subtensor(mocker): subtensor.async_subtensor = mocker.AsyncMock( get_current_block=mocker.AsyncMock(return_value=601) ) - subtensor.event_loop = asyncio.get_event_loop() + subtensor.event_loop = asyncio.new_event_loop() + subtensor.execute_coroutine = partial( + execute_coroutine, event_loop=subtensor.event_loop + ) return subtensor diff --git a/tests/unit_tests/test_subtensor.py b/tests/unit_tests/test_subtensor.py index fa4f1cca67..259893f878 100644 --- a/tests/unit_tests/test_subtensor.py +++ b/tests/unit_tests/test_subtensor.py @@ -11,7 +11,7 @@ def test_methods_comparable(mocker): subtensor = Subtensor() # methods which lives in sync subtensor only - excluded_subtensor_methods = ["async_subtensor", "event_loop"] + excluded_subtensor_methods = ["async_subtensor", "event_loop", "execute_coroutine"] # methods which lives in async subtensor only excluded_async_subtensor_methods = [ "determine_block_hash",