Skip to content

Commit

Permalink
replaced flake8 and isort with ruff (#197)
Browse files Browse the repository at this point in the history
* replaced flake8 and isort with ruff

* added isort checks to ruff

* Enforce B008

* Enforce E731

* Enforce C413

* Enforce C414

* Enforce C416

* Enforce C419

* Enforce B006

* Enforce B007

---------

Co-authored-by: WalzDS <[email protected]>
  • Loading branch information
bertiqwerty and DavidWalz authored May 10, 2023
1 parent 2497bf9 commit b3a6bf9
Show file tree
Hide file tree
Showing 32 changed files with 122 additions and 121 deletions.
7 changes: 3 additions & 4 deletions .github/workflows/lint.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,7 @@ on:

jobs:
style:
name: Check style with flake8, black, and isort
name: Check style with ruff and black
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
Expand All @@ -18,10 +18,9 @@ jobs:
python-version: 3.9
- name: style
run: |
pip3 install flake8==5.0.4 black==22.10.0 isort==5.12.0
flake8 --verbose bofire tests
pip3 install ruff==0.0.265 black==22.10.0
black --check bofire tests
isort . --check-only --verbose
ruff check .
pyright:
name: Typechecking with pyright
Expand Down
22 changes: 6 additions & 16 deletions .pre-commit-config.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -7,21 +7,11 @@ repos:
stages: [commit]
entry: black
types: [python]
- repo: https://github.com/pycqa/flake8
rev: 5.0.4
- repo: https://github.com/charliermarsh/ruff-pre-commit
# Ruff version.
rev: "v0.0.265"
hooks:
- id: flake8
name: flake8
entry: flake8
types: [python]
require_serial: true
- repo: https://github.com/pycqa/isort
rev: 5.12.0
hooks:
- id: isort
name: isort
stages: [commit]
entry: isort
types: [python]

- id: ruff
args: [--fix, --exit-non-zero-on-fix]

exclude: "README.md"
4 changes: 2 additions & 2 deletions bofire/benchmarks/aspen_benchmark.py
Original file line number Diff line number Diff line change
Expand Up @@ -35,7 +35,7 @@ def __init__(
filename: str,
domain: Domain,
paths: Dict[str, str],
additional_output_keys: List = [],
additional_output_keys: Optional[List] = None,
translate_into_aspen_readable: Optional[
Callable[[Domain, pd.DataFrame], pd.DataFrame]
] = None,
Expand All @@ -60,7 +60,7 @@ def __init__(

self.translate_into_aspen_readable = translate_into_aspen_readable
self._domain = domain
self.additional_output_keys = additional_output_keys
self.additional_output_keys = additional_output_keys or []

for key in self.domain.get_feature_keys():
# Check, if every input and output variable has a path to Aspen provided.
Expand Down
15 changes: 8 additions & 7 deletions bofire/benchmarks/multi.py
Original file line number Diff line number Diff line change
Expand Up @@ -174,12 +174,14 @@ class SnarBenchmark(Benchmark):
Solving of a differential equation system with varying intitial values.
"""

def __init__(self, C_i: Optional[np.ndarray] = np.ndarray((1, 1))):
def __init__(self, C_i: Optional[np.ndarray] = None):
"""Initializes multiobjective test function object of type SnarBenchmark.
Args:
C_i (Optional[np.ndarray]): Input concentrations. Defaults to [1, 1]
"""
if C_i is None:
C_i = np.array([1, 1])
self.C_i = C_i

# Decision variables
Expand Down Expand Up @@ -224,7 +226,7 @@ def _f(self, candidates: pd.DataFrame) -> pd.DataFrame:
"""
stys = []
e_factors = []
for i, candidate in candidates.iterrows():
for _, candidate in candidates.iterrows():
tau = float(candidate["tau"])
equiv_pldn = float(candidate["equiv_pldn"])
conc_dfnb = float(candidate["conc_dfnb"])
Expand Down Expand Up @@ -297,11 +299,10 @@ def _integrand(self, t, C, T):
T_ref = 90 + 273.71 # Convert to deg K
T = T + 273.71 # Convert to deg K
# Need to convert from 10^-2 M^-1s^-1 to M^-1min^-1
k = (
lambda k_ref, E_a, temp: 0.6
* k_ref
* np.exp(-E_a / R * (1 / temp - 1 / T_ref))
)

def k(k_ref, E_a, temp):
return 0.6 * k_ref * np.exp(-E_a / R * (1 / temp - 1 / T_ref))

k_a = k(57.9, 33.3, T)
k_b = k(2.70, 35.3, T)
k_c = k(0.865, 38.9, T)
Expand Down
4 changes: 3 additions & 1 deletion bofire/benchmarks/single.py
Original file line number Diff line number Diff line change
Expand Up @@ -52,7 +52,7 @@ def __init__(
lower: float = -32.768,
upper: float = 32.768,
best_possible_f: float = 0.0,
evaluated_points=[],
evaluated_points: Optional[list] = None,
):
"""Initializes benchmark function of type Ackley.
Expand All @@ -74,6 +74,8 @@ def __init__(
self.lower = lower
self.upper = upper
self.best_possible_f = best_possible_f
if evaluated_points is None:
evaluated_points = []
self.evaluated_points = evaluated_points

input_feature_list = []
Expand Down
2 changes: 1 addition & 1 deletion bofire/data_models/domain/domain.py
Original file line number Diff line number Diff line change
Expand Up @@ -305,7 +305,7 @@ def get_nchoosek_combinations(self, exhaustive: bool = False): # noqa: C901
used_features_list.extend(itertools.combinations(con.features, n))

if con.none_also_valid:
used_features_list.append(tuple([]))
used_features_list.append(())
else:
used_features_list.extend(
itertools.combinations(con.features, con.max_count)
Expand Down
8 changes: 4 additions & 4 deletions bofire/data_models/domain/features.py
Original file line number Diff line number Diff line change
Expand Up @@ -433,7 +433,7 @@ def _validate_transform_specs(self, specs: TInputTransformSpecs):
raise ValueError("Unknown features specified in transform specs.")
# next check that all values are of type CategoricalEncodingEnum
if not (
all([isinstance(enc, CategoricalEncodingEnum) for enc in specs.values()])
all(isinstance(enc, CategoricalEncodingEnum) for enc in specs.values())
):
raise ValueError("Unknown transform specified.")
# next check that only Categoricalwithdescriptor have the value DESCRIPTOR
Expand Down Expand Up @@ -477,12 +477,12 @@ def get_bounds(
upper = []

for feat in self.get():
l, u = feat.get_bounds( # type: ignore
lo, up = feat.get_bounds( # type: ignore
transform_type=specs.get(feat.key), # type: ignore
values=experiments[feat.key] if experiments is not None else None,
)
lower += l
upper += u
lower += lo
upper += up
return lower, upper


Expand Down
10 changes: 4 additions & 6 deletions bofire/data_models/features/categorical.py
Original file line number Diff line number Diff line change
Expand Up @@ -42,7 +42,7 @@ def validate_categories_unique(cls, categories):
Returns:
List[str]: List of the categories
"""
categories = [name for name in categories]
categories = list(categories)
if len(categories) != len(set(categories)):
raise ValueError("categories must be unique")
return categories
Expand Down Expand Up @@ -180,9 +180,7 @@ def get_possible_categories(self, values: pd.Series) -> list:
Returns:
list: list of possible categories
"""
return sorted(
list(set(list(set(values.tolist())) + self.get_allowed_categories()))
)
return sorted(set(list(set(values.tolist())) + self.get_allowed_categories()))

def to_onehot_encoding(self, values: pd.Series) -> pd.DataFrame:
"""Converts values to a one-hot encoding.
Expand Down Expand Up @@ -368,7 +366,7 @@ def validate_categories_unique(cls, categories):
Returns:
List[str]: List of the categories
"""
categories = [name for name in categories]
categories = list(categories)
if len(categories) != len(set(categories)):
raise ValueError("categories must be unique")
return categories
Expand All @@ -386,7 +384,7 @@ def validate_objective(cls, objective, values):

def to_dict(self) -> Dict:
"""Returns the catergories and corresponding objective values as dictionary"""
return {cat: obj for cat, obj in zip(self.categories, self.objective)}
return dict(zip(self.categories, self.objective))

def __call__(self, values: pd.Series) -> pd.Series:
return values.map(self.to_dict()).astype(float)
10 changes: 5 additions & 5 deletions bofire/data_models/features/descriptor.py
Original file line number Diff line number Diff line change
Expand Up @@ -43,7 +43,7 @@ def descriptors_to_keys(cls, descriptors):
Returns:
List[str]: List of valid keys
"""
return [name for name in descriptors]
return list(descriptors)

@root_validator(pre=False, skip_on_failure=True)
def validate_list_lengths(cls, values):
Expand Down Expand Up @@ -104,7 +104,7 @@ def validate_descriptors(cls, descriptors):
Returns:
List[str]: List of the descriptors
"""
descriptors = [name for name in descriptors]
descriptors = list(descriptors)
if len(descriptors) != len(set(descriptors)):
raise ValueError("descriptors must be unique")
return descriptors
Expand Down Expand Up @@ -133,7 +133,7 @@ def validate_values(cls, v, values):
a = np.array(v)
for i, d in enumerate(values["descriptors"]):
if len(set(a[:, i])) == 1:
raise ValueError("No variation for descriptor {d}.")
raise ValueError(f"No variation for descriptor {d}.")
return v

def to_df(self):
Expand All @@ -142,7 +142,7 @@ def to_df(self):
Returns:
pd.DataFrame: tabular overview of the feature as DataFrame
"""
data = {cat: values for cat, values in zip(self.categories, self.values)}
data = dict(zip(self.categories, self.values))
return pd.DataFrame.from_dict(data, orient="index", columns=self.descriptors)

def fixed_value(
Expand Down Expand Up @@ -234,7 +234,7 @@ def to_descriptor_encoding(self, values: pd.Series) -> pd.DataFrame:
"""
return pd.DataFrame(
data=values.map(
{cat: value for cat, value in zip(self.categories, self.values)}
dict(zip(self.categories, self.values))
).values.tolist(), # type: ignore
columns=[f"{self.key}{_CAT_SEP}{d}" for d in self.descriptors],
index=values.index,
Expand Down
4 changes: 2 additions & 2 deletions bofire/plot/objective.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,7 @@ def plot_objective_plotly(
lower: float,
upper: float,
values: Optional[pd.Series] = None,
layout_options: Dict = {},
layout_options: Optional[Dict] = None,
):
"""Plot the assigned objective.
Expand Down Expand Up @@ -43,7 +43,7 @@ def plot_objective_plotly(
else:
fig = fig1

if len(layout_options) > 0:
if layout_options is not None:
fig.update_layout(layout_options)

return fig
6 changes: 3 additions & 3 deletions bofire/plot/prior.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
from typing import Dict
from typing import Dict, Optional

import numpy as np
import plotly.express as px
Expand All @@ -12,7 +12,7 @@ def plot_prior_pdf_plotly(
prior: AnyPrior,
lower: float,
upper: float,
layout_options: Dict = {},
layout_options: Optional[Dict] = None,
):
"""Plot the probability density function of the prior with plotly.
Expand All @@ -33,7 +33,7 @@ def plot_prior_pdf_plotly(
y=np.exp(priors.map(prior).log_prob(torch.from_numpy(x)).numpy()),
)

if len(layout_options) > 0:
if layout_options is not None:
fig.update_layout(layout_options)

return fig
14 changes: 7 additions & 7 deletions bofire/strategies/doe/design.py
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,7 @@ def find_local_max_ipopt(
model_type: Union[str, Formula],
n_experiments: Optional[int] = None,
delta: float = 1e-7,
ipopt_options: Dict = {},
ipopt_options: Optional[Dict] = None,
sampling: Optional[pd.DataFrame] = None,
fixed_experiments: Optional[pd.DataFrame] = None,
objective: OptimalityCriterionEnum = OptimalityCriterionEnum.D_OPTIMALITY,
Expand All @@ -41,7 +41,7 @@ def find_local_max_ipopt(
n_experiments (int): Number of experiments. By default the value corresponds to
the number of model terms - dimension of ker() + 3.
delta (float): Regularization parameter. Default value is 1e-3.
ipopt_options (Dict): options for IPOPT. For more information see [this link](https://coin-or.github.io/Ipopt/OPTIONS.html)
ipopt_options (Dict, optional): options for IPOPT. For more information see [this link](https://coin-or.github.io/Ipopt/OPTIONS.html)
sampling (Sampling, np.ndarray): Sampling class or a np.ndarray object containing the initial guess.
fixed_experiments (pd.DataFrame): dataframe containing experiments that will be definitely part of the design.
Values are set before the optimization.
Expand Down Expand Up @@ -76,11 +76,9 @@ def find_local_max_ipopt(

# check that NChooseK constraints only impose an upper bound on the number of nonzero components (and no lower bound)
assert all(
[
c.min_count == 0
for c in domain.constraints
if isinstance(c, NChooseKConstraint)
]
c.min_count == 0
for c in domain.constraints
if isinstance(c, NChooseKConstraint)
), "NChooseKConstraint with min_count !=0 is not supported!"

# determine number of experiments (only relevant if n_experiments is not provided by the user)
Expand Down Expand Up @@ -140,6 +138,8 @@ def find_local_max_ipopt(
x0[i] = val

# set ipopt options
if ipopt_options is None:
ipopt_options = {}
_ipopt_options = {"maxiter": 500, "disp": 0}
for key in ipopt_options.keys():
_ipopt_options[key] = ipopt_options[key]
Expand Down
2 changes: 1 addition & 1 deletion bofire/strategies/doe/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -474,7 +474,7 @@ def nchoosek_constraints_as_bounds(
]

# find and shuffle all combinations of elements of ind of length max_active
ind = np.array([c for c in combinations(ind, r=n_inactive)])
ind = np.array(list(combinations(ind, r=n_inactive)))
np.random.shuffle(ind)

# set bounds to zero in each experiments for the variables that should be inactive
Expand Down
2 changes: 1 addition & 1 deletion bofire/surrogates/cloudpickle_module.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,8 +5,8 @@
try:
from pickle import Unpickler # noqa: F401

from cloudpickle import CloudPickler as Pickler # noqa: F401
from cloudpickle import * # noqa: F401, F403,
from cloudpickle import CloudPickler as Pickler # noqa: F401

except ModuleNotFoundError:
warnings.warn("Cloudpicke is not available.")
4 changes: 2 additions & 2 deletions bofire/surrogates/diagnostics.py
Original file line number Diff line number Diff line change
Expand Up @@ -293,9 +293,9 @@ def validate_results(cls, v, values):
)
# check columns of X
if v[0].X is not None:
cols = sorted(list(v[0].X.columns))
cols = sorted(v[0].X.columns)
for i in v:
if sorted(list(i.X.columns)) != cols:
if sorted(i.X.columns) != cols:
raise ValueError("Columns of X do not match.")
return v

Expand Down
4 changes: 2 additions & 2 deletions bofire/surrogates/mlp.py
Original file line number Diff line number Diff line change
Expand Up @@ -132,7 +132,7 @@ def fit_mlp(
optimizer = torch.optim.Adam(mlp.parameters(), lr=lr, weight_decay=weight_decay)
for _ in range(n_epoches):
current_loss = 0.0
for i, data in enumerate(train_loader, 0):
for data in train_loader:
# Get and prepare inputs
inputs, targets = data
if len(targets.shape) == 1:
Expand Down Expand Up @@ -181,7 +181,7 @@ def _fit(self, X: pd.DataFrame, Y: pd.DataFrame):

mlps = []
subsample_size = round(self.subsample_fraction * X.shape[0])
for i in range(self.n_estimators):
for _ in range(self.n_estimators):
# resample X and Y
sample_idx = np.random.choice(X.shape[0], replace=True, size=subsample_size)
tX = torch.from_numpy(transformed_X.values[sample_idx]).to(**tkwargs)
Expand Down
2 changes: 1 addition & 1 deletion bofire/surrogates/surrogate.py
Original file line number Diff line number Diff line change
Expand Up @@ -59,7 +59,7 @@ def validate_predictions(self, predictions: pd.DataFrame) -> pd.DataFrame:
expected_cols = [
f"{key}_{t}" for key in self.outputs.get_keys() for t in ["pred", "sd"]
]
if sorted(list(predictions.columns)) != sorted(expected_cols):
if sorted(predictions.columns) != sorted(expected_cols):
raise ValueError(
f"Predictions are ill-formatted. Expected: {expected_cols}, got: {list(predictions.columns)}."
)
Expand Down
Loading

0 comments on commit b3a6bf9

Please sign in to comment.