Skip to content

Commit

Permalink
disable disable_abstract_method [pr] (tinygrad#7815)
Browse files Browse the repository at this point in the history
  • Loading branch information
geohot authored Nov 21, 2024
1 parent c5d458c commit 439911b
Show file tree
Hide file tree
Showing 10 changed files with 14 additions and 14 deletions.
2 changes: 1 addition & 1 deletion .pylintrc
Original file line number Diff line number Diff line change
Expand Up @@ -54,7 +54,7 @@ confidence=
# --enable=similarities". If you want to run only the classes checker, but have
# no Warning level messages displayed, use"--disable=all --enable=classes
# --disable=W"
disable=C,R,W0613,W0511,W0212,W0201,W0106,W0603,W0621,W0703,W1201,W1203,E1136,W1514,E1101,W0221,W0105,E0401
disable=C,R,W0613,W0511,W0212,W0201,W0106,W0603,W0621,W0703,W1201,W1203,E1136,W1514,E1101,W0221,W0105,E0401,abstract-method
# E1101 for function binding
# W0221 for Function class
# W0105 for comment strings
Expand Down
2 changes: 1 addition & 1 deletion tinygrad/device.py
Original file line number Diff line number Diff line change
Expand Up @@ -151,7 +151,7 @@ def _copyout(self, dest:memoryview, src): raise NotImplementedError("need copyou
# def _offset(self, buf, size:int, offset:int):
# def _transfer(self, dest, src, sz:int, src_dev, dest_dev):

class LRUAllocator(Allocator): # pylint: disable=abstract-method
class LRUAllocator(Allocator):
"""
The LRU Allocator is responsible for caching buffers.
It ensures that buffers are not freed until it is absolutely necessary, optimizing performance.
Expand Down
4 changes: 2 additions & 2 deletions tinygrad/engine/jit.py
Original file line number Diff line number Diff line change
Expand Up @@ -69,7 +69,7 @@ def get_input_replace(jit_cache: List[ExecItem], input_rawbuffers:List[Buffer])
input_replace[(j,i)] = input_rawbuffers.index(a)
return input_replace

class GraphRunner(Runner): # pylint: disable=abstract-method
class GraphRunner(Runner):
def __init__(self, jit_cache: List[ExecItem], input_rawbuffers: List[Buffer], var_vals: Dict[Variable, int]):
self.jit_cache = jit_cache
self.input_replace:Dict[Tuple[int, int], int] = get_input_replace(jit_cache, input_rawbuffers)
Expand Down Expand Up @@ -128,7 +128,7 @@ def _access_resources(self, rawbufs:List[Buffer], write:List[int], new_dependenc
return list({id(x):x for x in wait_nodes}.values())

# a marker for your graph supporting multiple devices of the same type
class MultiGraphRunner(GraphRunner): pass # pylint: disable=abstract-method
class MultiGraphRunner(GraphRunner): pass

ReturnType = TypeVar('ReturnType')
@dataclass
Expand Down
2 changes: 1 addition & 1 deletion tinygrad/ops.py
Original file line number Diff line number Diff line change
Expand Up @@ -72,7 +72,7 @@ def __ge__(self, x): return self.ge(x)
def __le__(self, x): return self.le(x)
# NOTE: __eq__ isn't overridden, and means the same thing as is by default

class MathTrait(SimpleMathTrait): # pylint: disable=abstract-method
class MathTrait(SimpleMathTrait):
# TODO: move to Tensor when new backward is done
def lshift(self, x, reverse=False): return self._binop(Ops.SHL, x, reverse)
def rshift(self, x, reverse=False): return self._binop(Ops.SHR, x, reverse)
Expand Down
4 changes: 2 additions & 2 deletions tinygrad/runtime/ops_amd.py
Original file line number Diff line number Diff line change
Expand Up @@ -54,7 +54,7 @@ def wait(self, value:int, timeout:int=getenv("HCQDEV_WAIT_TIMEOUT_MS", 30000)):
kfd.AMDKFD_IOC_WAIT_EVENTS(AMDDevice.kfd, events_ptr=ctypes.addressof(self._evt_array), num_events=1, wait_for_all=1, timeout=1000)
raise RuntimeError(f"wait_signal: not set to {value}, but {self._signal[0]}, {timeout} ms TIMEOUT!")

class AMDComputeQueue(HWQueue): # pylint: disable=abstract-method
class AMDComputeQueue(HWQueue):
def __init__(self):
self.cmd_idx_to_local_offset, self.cmd_idx_to_global_offset, self.cmd_idx_to_dispatch_packet = {}, {}, {}
super().__init__()
Expand Down Expand Up @@ -184,7 +184,7 @@ def _submit(self, dev:AMDDevice):
dev.compute_queue.doorbell[0] = dev.compute_queue.put_value

SDMA_MAX_COPY_SIZE = 0x400000
class AMDCopyQueue(HWQueue): # pylint: disable=abstract-method
class AMDCopyQueue(HWQueue):
def __init__(self):
self.internal_cmd_sizes, self.copy_cmds_per_copy = [], {}
super().__init__()
Expand Down
2 changes: 1 addition & 1 deletion tinygrad/runtime/ops_npy.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@
from tinygrad.helpers import flat_mv
from tinygrad.device import Compiled, Allocator

class NpyAllocator(Allocator): # pylint: disable=abstract-method
class NpyAllocator(Allocator):
def _copyout(self, dest:memoryview, src:np.ndarray): dest[:] = flat_mv(np.require(src, requirements='C').data)

class NpyDevice(Compiled):
Expand Down
6 changes: 3 additions & 3 deletions tinygrad/runtime/ops_nv.py
Original file line number Diff line number Diff line change
Expand Up @@ -83,7 +83,7 @@ def _get_value(self) -> int: return self._signal[0]
def _get_timestamp(self) -> decimal.Decimal: return decimal.Decimal(self._signal[1]) / decimal.Decimal(1000)
def _set_value(self, new_value:int): self._signal[0] = new_value

class NVCommandQueue(HWQueue[NVSignal, 'NVDevice', 'NVProgram', 'NVArgsState']): # pylint: disable=abstract-method
class NVCommandQueue(HWQueue[NVSignal, 'NVDevice', 'NVProgram', 'NVArgsState']):
def __del__(self):
if self.binded_device is not None: self.binded_device.allocator.free(self.hw_page, self.hw_page.size, BufferSpec(cpu_access=True, nolru=True))

Expand Down Expand Up @@ -132,7 +132,7 @@ def _submit_to_gpfifo(self, dev:NVDevice, gpfifo:GPFifo):
dev.gpu_mmio[0x90 // 4] = gpfifo.token
gpfifo.put_value += 1

class NVComputeQueue(NVCommandQueue): # pylint: disable=abstract-method
class NVComputeQueue(NVCommandQueue):
def __init__(self):
self.cmd_idx_to_qmd, self.cmd_idx_to_signal_id, self.cmd_idx_to_global_dims, self.cmd_idx_to_local_dims = {}, {}, {}, {}
super().__init__()
Expand Down Expand Up @@ -187,7 +187,7 @@ def _update_signal(self, cmd_idx, signal:Optional[NVSignal]=None, value=None):

def _submit(self, dev): self._submit_to_gpfifo(dev, cast(NVDevice, dev).compute_gpfifo)

class NVCopyQueue(NVCommandQueue): # pylint: disable=abstract-method
class NVCopyQueue(NVCommandQueue):
def _copy(self, dest, src, copy_size):
self.q += [nvmethod(4, nv_gpu.NVC6B5_OFFSET_IN_UPPER, 4), *data64(src), *data64(dest)]
self.q += [nvmethod(4, nv_gpu.NVC6B5_LINE_LENGTH_IN, 1), copy_size]
Expand Down
2 changes: 1 addition & 1 deletion tinygrad/runtime/ops_qcom.py
Original file line number Diff line number Diff line change
Expand Up @@ -44,7 +44,7 @@ def _get_value(self) -> int: return self._signal[0]
def _get_timestamp(self) -> decimal.Decimal: return decimal.Decimal(self._signal[1]) / decimal.Decimal(19.2) # based on the 19.2MHz always-on timer
def _set_value(self, new_value:int): self._signal[0] = new_value

class QCOMComputeQueue(HWQueue): # pylint: disable=abstract-method
class QCOMComputeQueue(HWQueue):
def __init__(self):
self.cmd_idx_to_dims = {}
super().__init__()
Expand Down
2 changes: 1 addition & 1 deletion tinygrad/runtime/support/hcq.py
Original file line number Diff line number Diff line change
Expand Up @@ -474,7 +474,7 @@ def _wrap_timeline_signal(self):
# Protocol for hcq compatible allocators for allocated buffers to contain VA address and it's size.
class HCQBuffer(Protocol): va_addr:int; size:int # noqa: E702

class HCQAllocator(LRUAllocator, Generic[DeviceType]): # pylint: disable=abstract-method
class HCQAllocator(LRUAllocator, Generic[DeviceType]):
"""
A base allocator class compatible with the HCQ (Hardware Command Queue) API.
Expand Down
2 changes: 1 addition & 1 deletion tinygrad/tensor.py
Original file line number Diff line number Diff line change
Expand Up @@ -99,7 +99,7 @@ def _broadcast_shape(*shapes:Tuple[sint, ...]) -> Tuple[sint, ...]:

ReductionStr = Literal["mean", "sum", "none"]

class Tensor(SimpleMathTrait): # pylint: disable=abstract-method
class Tensor(SimpleMathTrait):
"""
A `Tensor` is a multi-dimensional matrix containing elements of a single data type.
Expand Down

0 comments on commit 439911b

Please sign in to comment.