diff --git a/altair/_magics.py b/altair/_magics.py index bac190aa3..1e3fd4c1b 100644 --- a/altair/_magics.py +++ b/altair/_magics.py @@ -45,7 +45,7 @@ def _prepare_data(data, data_transformers): elif isinstance(data, str): return {"url": data} else: - warnings.warn("data of type {} not recognized".format(type(data)), stacklevel=1) + warnings.warn(f"data of type {type(data)} not recognized", stacklevel=1) return data @@ -53,14 +53,14 @@ def _get_variable(name): """Get a variable from the notebook namespace.""" ip = IPython.get_ipython() if ip is None: - raise ValueError( + msg = ( "Magic command must be run within an IPython " "environment, in which get_ipython() is defined." ) + raise ValueError(msg) if name not in ip.user_ns: - raise NameError( - "argument '{}' does not match the name of any defined variable".format(name) - ) + msg = f"argument '{name}' does not match the name of any defined variable" + raise NameError(msg) return ip.user_ns[name] @@ -95,10 +95,11 @@ def vegalite(line, cell): try: spec = json.loads(cell) except json.JSONDecodeError as err: - raise ValueError( + msg = ( "%%vegalite: spec is not valid JSON. " "Install pyyaml to parse spec as yaml" - ) from err + ) + raise ValueError(msg) from err else: spec = yaml.load(cell, Loader=yaml.SafeLoader) diff --git a/altair/expr/core.py b/altair/expr/core.py index 9cc258c8b..df4135d5a 100644 --- a/altair/expr/core.py +++ b/altair/expr/core.py @@ -184,35 +184,33 @@ def __getitem__(self, val): class UnaryExpression(Expression): def __init__(self, op, val): - super(UnaryExpression, self).__init__(op=op, val=val) + super().__init__(op=op, val=val) def __repr__(self): - return "({op}{val})".format(op=self.op, val=_js_repr(self.val)) + return f"({self.op}{_js_repr(self.val)})" class BinaryExpression(Expression): def __init__(self, op, lhs, rhs): - super(BinaryExpression, self).__init__(op=op, lhs=lhs, rhs=rhs) + super().__init__(op=op, lhs=lhs, rhs=rhs) def __repr__(self): - return "({lhs} {op} {rhs})".format( - op=self.op, lhs=_js_repr(self.lhs), rhs=_js_repr(self.rhs) - ) + return f"({_js_repr(self.lhs)} {self.op} {_js_repr(self.rhs)})" class FunctionExpression(Expression): def __init__(self, name, args): - super(FunctionExpression, self).__init__(name=name, args=args) + super().__init__(name=name, args=args) def __repr__(self): args = ",".join(_js_repr(arg) for arg in self.args) - return "{name}({args})".format(name=self.name, args=args) + return f"{self.name}({args})" class ConstExpression(Expression): def __init__(self, name, doc): - self.__doc__ = """{}: {}""".format(name, doc) - super(ConstExpression, self).__init__(name=name, doc=doc) + self.__doc__ = f"""{name}: {doc}""" + super().__init__(name=name, doc=doc) def __repr__(self): return str(self.name) @@ -220,15 +218,15 @@ def __repr__(self): class GetAttrExpression(Expression): def __init__(self, group, name): - super(GetAttrExpression, self).__init__(group=group, name=name) + super().__init__(group=group, name=name) def __repr__(self): - return "{}.{}".format(self.group, self.name) + return f"{self.group}.{self.name}" class GetItemExpression(Expression): def __init__(self, group, name): - super(GetItemExpression, self).__init__(group=group, name=name) + super().__init__(group=group, name=name) def __repr__(self): - return "{}[{!r}]".format(self.group, self.name) + return f"{self.group}[{self.name!r}]" diff --git a/altair/expr/funcs.py b/altair/expr/funcs.py index c4a73f4c9..fa1477d9c 100644 --- a/altair/expr/funcs.py +++ b/altair/expr/funcs.py @@ -172,13 +172,13 @@ class ExprFunc: def __init__(self, name, doc): self.name = name self.doc = doc - self.__doc__ = """{}(*args)\n {}""".format(name, doc) + self.__doc__ = f"""{name}(*args)\n {doc}""" def __call__(self, *args): return FunctionExpression(self.name, args) def __repr__(self): - return "".format(self.name) + return f"" def _populate_namespace(): diff --git a/altair/jupyter/__init__.py b/altair/jupyter/__init__.py index 651ab11e4..c57815a2f 100644 --- a/altair/jupyter/__init__.py +++ b/altair/jupyter/__init__.py @@ -7,7 +7,7 @@ # when anywidget is not installed class JupyterChart: def __init__(self, *args, **kwargs): - raise ImportError( + msg = ( "The Altair JupyterChart requires the anywidget \n" "Python package which may be installed using pip with\n" " pip install anywidget\n" @@ -15,6 +15,7 @@ def __init__(self, *args, **kwargs): " conda install -c conda-forge anywidget\n" "Afterwards, you will need to restart your Python kernel." ) + raise ImportError(msg) else: from .jupyter_chart import JupyterChart # noqa: F401 diff --git a/altair/jupyter/jupyter_chart.py b/altair/jupyter/jupyter_chart.py index 0331c9820..7f5fdaf45 100644 --- a/altair/jupyter/jupyter_chart.py +++ b/altair/jupyter/jupyter_chart.py @@ -61,7 +61,8 @@ def __init__(self, trait_values): elif isinstance(value, IntervalSelection): traitlet_type = traitlets.Instance(IntervalSelection) else: - raise ValueError(f"Unexpected selection type: {type(value)}") + msg = f"Unexpected selection type: {type(value)}" + raise ValueError(msg) # Add the new trait. self.add_traits(**{key: traitlet_type}) @@ -82,10 +83,11 @@ def _make_read_only(self, change): """ if change["name"] in self.traits() and change["old"] != change["new"]: self._set_value(change["name"], change["old"]) - raise ValueError( + msg = ( "Selections may not be set from Python.\n" f"Attempted to set select: {change['name']}" ) + raise ValueError(msg) def _set_value(self, key, value): self.unobserve(self._make_read_only, names=key) @@ -278,7 +280,8 @@ def _on_change_chart(self, change): name=clean_name, value={}, store=[] ) else: - raise ValueError(f"Unexpected selection type {select.type}") + msg = f"Unexpected selection type {select.type}" + raise ValueError(msg) selection_watches.append(clean_name) initial_vl_selections[clean_name] = {"value": None, "store": []} else: diff --git a/altair/utils/_dfi_types.py b/altair/utils/_dfi_types.py index a76435e7f..6d5ca277b 100644 --- a/altair/utils/_dfi_types.py +++ b/altair/utils/_dfi_types.py @@ -76,7 +76,6 @@ def dtype(self) -> Dtype: - Data types not included: complex, Arrow-style null, binary, decimal, and nested (list, struct, map, union) dtypes. """ - pass # Have to use a generic Any return type as not all libraries who implement # the dataframe interchange protocol implement the TypedDict that is usually @@ -103,7 +102,6 @@ def describe_categorical(self) -> Any: TBD: are there any other in-memory representations that are needed? """ - pass class DataFrame(Protocol): @@ -136,19 +134,16 @@ def __dataframe__( necessary if a library supports strided buffers, given that this protocol specifies contiguous buffers. """ - pass def column_names(self) -> Iterable[str]: """ Return an iterator yielding the column names. """ - pass def get_column_by_name(self, name: str) -> Column: """ Return the column whose name is the indicated name. """ - pass def get_chunks(self, n_chunks: Optional[int] = None) -> Iterable["DataFrame"]: """ @@ -162,4 +157,3 @@ def get_chunks(self, n_chunks: Optional[int] = None) -> Iterable["DataFrame"]: Note that the producer must ensure that all columns are chunked the same way. """ - pass diff --git a/altair/utils/_importers.py b/altair/utils/_importers.py index b7fa8a958..950cb45db 100644 --- a/altair/utils/_importers.py +++ b/altair/utils/_importers.py @@ -9,18 +9,19 @@ def import_vegafusion() -> ModuleType: version = importlib_version("vegafusion") embed_version = importlib_version("vegafusion-python-embed") if version != embed_version or Version(version) < Version(min_version): - raise RuntimeError( + msg = ( "The versions of the vegafusion and vegafusion-python-embed packages must match\n" f"and must be version {min_version} or greater.\n" f"Found:\n" f" - vegafusion=={version}\n" f" - vegafusion-python-embed=={embed_version}\n" ) + raise RuntimeError(msg) import vegafusion as vf # type: ignore return vf except ImportError as err: - raise ImportError( + msg = ( 'The "vegafusion" data transformer and chart.transformed_data feature requires\n' f"version {min_version} or greater of the 'vegafusion-python-embed' and 'vegafusion' packages.\n" "These can be installed with pip using:\n" @@ -29,7 +30,8 @@ def import_vegafusion() -> ModuleType: f' conda install -c conda-forge "vegafusion-python-embed>={min_version}" ' f'"vegafusion>={min_version}"\n\n' f"ImportError: {err.args[0]}" - ) from err + ) + raise ImportError(msg) from err def import_vl_convert() -> ModuleType: @@ -37,15 +39,16 @@ def import_vl_convert() -> ModuleType: try: version = importlib_version("vl-convert-python") if Version(version) < Version(min_version): - raise RuntimeError( + msg = ( f"The vl-convert-python package must be version {min_version} or greater. " f"Found version {version}" ) + raise RuntimeError(msg) import vl_convert as vlc return vlc except ImportError as err: - raise ImportError( + msg = ( f"The vl-convert Vega-Lite compiler and file export feature requires\n" f"version {min_version} or greater of the 'vl-convert-python' package. \n" f"This can be installed with pip using:\n" @@ -53,7 +56,8 @@ def import_vl_convert() -> ModuleType: "or conda:\n" f' conda install -c conda-forge "vl-convert-python>={min_version}"\n\n' f"ImportError: {err.args[0]}" - ) from err + ) + raise ImportError(msg) from err def vl_version_for_vl_convert() -> str: @@ -70,15 +74,16 @@ def import_pyarrow_interchange() -> ModuleType: version = importlib_version("pyarrow") if Version(version) < Version(min_version): - raise RuntimeError( + msg = ( f"The pyarrow package must be version {min_version} or greater. " f"Found version {version}" ) + raise RuntimeError(msg) import pyarrow.interchange as pi return pi except ImportError as err: - raise ImportError( + msg = ( f"Usage of the DataFrame Interchange Protocol requires\n" f"version {min_version} or greater of the pyarrow package. \n" f"This can be installed with pip using:\n" @@ -86,7 +91,8 @@ def import_pyarrow_interchange() -> ModuleType: "or conda:\n" f' conda install -c conda-forge "pyarrow>={min_version}"\n\n' f"ImportError: {err.args[0]}" - ) from err + ) + raise ImportError(msg) from err def pyarrow_available() -> bool: diff --git a/altair/utils/_show.py b/altair/utils/_show.py index 0030570ac..5987b237c 100644 --- a/altair/utils/_show.py +++ b/altair/utils/_show.py @@ -26,10 +26,7 @@ def open_html_in_browser( Port to use. Defaults to a random port """ # Encode html to bytes - if isinstance(html, str): - html_bytes = html.encode("utf8") - else: - html_bytes = html + html_bytes = html.encode("utf8") if isinstance(html, str) else html browser = None @@ -69,5 +66,5 @@ def log_message(self, format, *args): server = HTTPServer( ("127.0.0.1", port if port is not None else 0), OneShotRequestHandler ) - browser.open("http://127.0.0.1:%s" % server.server_port) + browser.open(f"http://127.0.0.1:{server.server_port}") server.handle_request() diff --git a/altair/utils/_transformed_data.py b/altair/utils/_transformed_data.py index 7a616b9c5..387c0a8ca 100644 --- a/altair/utils/_transformed_data.py +++ b/altair/utils/_transformed_data.py @@ -90,11 +90,9 @@ def transformed_data(chart, row_limit=None, exclude=None): transformed data """ vf = import_vegafusion() - - if isinstance(chart, Chart): - # Add mark if none is specified to satisfy Vega-Lite - if chart.mark == Undefined: - chart = chart.mark_point() + # Add mark if none is specified to satisfy Vega-Lite + if isinstance(chart, Chart) and chart.mark == Undefined: + chart = chart.mark_point() # Deep copy chart so that we can rename marks without affecting caller chart = chart.copy(deep=True) @@ -119,7 +117,8 @@ def transformed_data(chart, row_limit=None, exclude=None): if chart_name in dataset_mapping: dataset_names.append(dataset_mapping[chart_name]) else: - raise ValueError("Failed to locate all datasets") + msg = "Failed to locate all datasets" + raise ValueError(msg) # Extract transformed datasets with VegaFusion datasets, warnings = vf.runtime.pre_transform_datasets( @@ -200,11 +199,12 @@ def name_views( elif isinstance(chart, _chart_class_mapping[ConcatChart]): subcharts = chart.concat else: - raise ValueError( + msg = ( "transformed_data accepts an instance of " "Chart, FacetChart, LayerChart, HConcatChart, VConcatChart, or ConcatChart\n" f"Received value of type: {type(chart)}" ) + raise ValueError(msg) chart_names: List[str] = [] for subchart in subcharts: @@ -444,7 +444,7 @@ def get_facet_mapping(group: dict, scope: Scope = ()) -> FacetMapping: for mark in mark_group.get("marks", []): if mark.get("type", None) == "group": # Get facet for this group - group_scope = scope + (group_index,) + group_scope = (*scope, group_index) facet = mark.get("from", {}).get("facet", None) if facet is not None: facet_name = facet.get("name", None) @@ -536,7 +536,7 @@ def get_datasets_for_view_names( name = mark.get("name", "") if mark.get("type", "") == "group": group_data_names = get_datasets_for_view_names( - group, vl_chart_names, facet_mapping, scope=scope + (group_index,) + group, vl_chart_names, facet_mapping, scope=(*scope, group_index) ) for k, v in group_data_names.items(): datasets.setdefault(k, v) diff --git a/altair/utils/_vegafusion_data.py b/altair/utils/_vegafusion_data.py index ce30e8d6d..da12df623 100644 --- a/altair/utils/_vegafusion_data.py +++ b/altair/utils/_vegafusion_data.py @@ -16,6 +16,7 @@ from altair.utils.core import DataFrameLike from altair.utils.data import DataType, ToValuesReturnType, MaxRowsError from altair.vegalite.data import default_data_transformer +import contextlib if TYPE_CHECKING: from vegafusion.runtime import ChartState # type: ignore @@ -125,11 +126,9 @@ def get_inline_tables(vega_spec: dict) -> Dict[str, DataFrameLike]: table_names = get_inline_table_names(vega_spec) tables = {} for table_name in table_names: - try: + # otherwise, named dataset that was provided by the user + with contextlib.suppress(KeyError): tables[table_name] = extracted_inline_tables.pop(table_name) - except KeyError: - # named dataset that was provided by the user - pass return tables @@ -164,7 +163,8 @@ def compile_to_vegafusion_chart_state( # Compile Vega-Lite spec to Vega compiler = vegalite_compilers.get() if compiler is None: - raise ValueError("No active vega-lite compiler plugin found") + msg = "No active vega-lite compiler plugin found" + raise ValueError(msg) vega_spec = compiler(vegalite_spec) @@ -214,7 +214,8 @@ def compile_with_vegafusion(vegalite_spec: dict) -> dict: # Compile Vega-Lite spec to Vega compiler = vegalite_compilers.get() if compiler is None: - raise ValueError("No active vega-lite compiler plugin found") + msg = "No active vega-lite compiler plugin found" + raise ValueError(msg) vega_spec = compiler(vegalite_spec) @@ -239,13 +240,14 @@ def compile_with_vegafusion(vegalite_spec: dict) -> dict: def handle_row_limit_exceeded(row_limit: int, warnings: list): for warning in warnings: if warning.get("type") == "RowLimitExceeded": - raise MaxRowsError( + msg = ( "The number of dataset rows after filtering and aggregation exceeds\n" f"the current limit of {row_limit}. Try adding an aggregation to reduce\n" "the size of the dataset that must be loaded into the browser. Or, disable\n" "the limit by calling alt.data_transformers.disable_max_rows(). Note that\n" "disabling this limit may cause the browser to freeze or crash." ) + raise MaxRowsError(msg) def using_vegafusion() -> bool: diff --git a/altair/utils/core.py b/altair/utils/core.py index 88d03b6e7..29839f705 100644 --- a/altair/utils/core.py +++ b/altair/utils/core.py @@ -38,6 +38,7 @@ from typing_extensions import ParamSpec from typing import Literal, Protocol, TYPE_CHECKING, runtime_checkable +import contextlib if TYPE_CHECKING: from pandas.core.interchange.dataframe_protocol import Column as PandasColumn @@ -234,8 +235,8 @@ def infer_vegalite_type( return "temporal" else: warnings.warn( - "I don't know how to infer vegalite type from '{}'. " - "Defaulting to nominal.".format(typ), + f"I don't know how to infer vegalite type from '{typ}'. " + "Defaulting to nominal.", stacklevel=1, ) return "nominal" @@ -271,7 +272,7 @@ def sanitize_geo_interface(geo: MutableMapping) -> dict: geo = deepcopy(geo) # convert type _Array or array to list - for key in geo.keys(): + for key in geo: if str(type(geo[key]).__name__).startswith(("_Array", "array")): geo[key] = geo[key].tolist() @@ -299,7 +300,7 @@ def numpy_is_subtype(dtype: Any, subtype: Any) -> bool: return False -def sanitize_dataframe(df: pd.DataFrame) -> pd.DataFrame: # noqa: C901 +def sanitize_dataframe(df: pd.DataFrame) -> pd.DataFrame: """Sanitize a DataFrame to prepare it for serialization. * Make a copy @@ -323,15 +324,18 @@ def sanitize_dataframe(df: pd.DataFrame) -> pd.DataFrame: # noqa: C901 for col_name in df.columns: if not isinstance(col_name, str): - raise ValueError( - "Dataframe contains invalid column name: {0!r}. " - "Column names must be strings".format(col_name) + msg = ( + f"Dataframe contains invalid column name: {col_name!r}. " + "Column names must be strings" ) + raise ValueError(msg) if isinstance(df.index, pd.MultiIndex): - raise ValueError("Hierarchical indices not supported") + msg = "Hierarchical indices not supported" + raise ValueError(msg) if isinstance(df.columns, pd.MultiIndex): - raise ValueError("Hierarchical indices not supported") + msg = "Hierarchical indices not supported" + raise ValueError(msg) def to_list_if_array(val): if isinstance(val, np.ndarray): @@ -365,7 +369,7 @@ def to_list_if_array(val): # https://pandas.io/docs/user_guide/boolean.html col = df[col_name].astype(object) df[col_name] = col.where(col.notnull(), None) - elif dtype_name.startswith("datetime") or dtype_name.startswith("timestamp"): + elif dtype_name.startswith(("datetime", "timestamp")): # Convert datetimes to strings. This needs to be a full ISO string # with time, which is why we cannot use ``col.astype(str)``. # This is because Javascript parses date-only times in UTC, but @@ -376,12 +380,13 @@ def to_list_if_array(val): df[col_name].apply(lambda x: x.isoformat()).replace("NaT", "") ) elif dtype_name.startswith("timedelta"): - raise ValueError( - 'Field "{col_name}" has type "{dtype}" which is ' + msg = ( + f'Field "{col_name}" has type "{dtype}" which is ' "not supported by Altair. Please convert to " "either a timestamp or a numerical value." - "".format(col_name=col_name, dtype=dtype) + "" ) + raise ValueError(msg) elif dtype_name.startswith("geometry"): # geopandas >=0.6.1 uses the dtype geometry. Continue here # otherwise it will give an error on np.issubdtype(dtype, np.integer) @@ -431,15 +436,16 @@ def sanitize_arrow_table(pa_table): for name in schema.names: array = pa_table[name] dtype_name = str(schema.field(name).type) - if dtype_name.startswith("timestamp") or dtype_name.startswith("date"): + if dtype_name.startswith(("timestamp", "date")): arrays.append(pc.strftime(array)) elif dtype_name.startswith("duration"): - raise ValueError( - 'Field "{col_name}" has type "{dtype}" which is ' + msg = ( + f'Field "{name}" has type "{dtype_name}" which is ' "not supported by Altair. Please convert to " "either a timestamp or a numerical value." - "".format(col_name=name, dtype=dtype_name) + "" ) + raise ValueError(msg) else: arrays.append(array) @@ -675,7 +681,8 @@ def infer_vegalite_type_for_dfi_column( elif kind == DtypeKind.DATETIME: return "temporal" else: - raise ValueError(f"Unexpected DtypeKind: {kind}") + msg = f"Unexpected DtypeKind: {kind}" + raise ValueError(msg) def use_signature(Obj: Callable[P, Any]): @@ -698,11 +705,9 @@ def decorate(f: Callable[..., V]) -> Callable[P, V]: doc = f.__doc__ + "\n".join(doclines[1:]) else: doc = "\n".join(doclines) - try: + # __doc__ is not modifiable for classes in Python < 3.3 + with contextlib.suppress(AttributeError): f.__doc__ = doc - except AttributeError: - # __doc__ is not modifiable for classes in Python < 3.3 - pass return f @@ -812,11 +817,13 @@ def infer_encoding_types(args: Sequence, kwargs: MutableMapping, channels: Modul else: type_ = type(arg) - encoding = channel_to_name.get(type_, None) + encoding = channel_to_name.get(type_) if encoding is None: - raise NotImplementedError("positional of type {}" "".format(type_)) + msg = f"positional of type {type_}" "" + raise NotImplementedError(msg) if encoding in kwargs: - raise ValueError("encoding {} specified twice.".format(encoding)) + msg = f"encoding {encoding} specified twice." + raise ValueError(msg) kwargs[encoding] = arg def _wrap_in_channel_class(obj, encoding): @@ -830,9 +837,7 @@ def _wrap_in_channel_class(obj, encoding): return [_wrap_in_channel_class(subobj, encoding) for subobj in obj] if encoding not in name_to_channel: - warnings.warn( - "Unrecognized encoding channel '{}'".format(encoding), stacklevel=1 - ) + warnings.warn(f"Unrecognized encoding channel '{encoding}'", stacklevel=1) return obj classes = name_to_channel[encoding] diff --git a/altair/utils/data.py b/altair/utils/data.py index 871b43092..df20d7ad0 100644 --- a/altair/utils/data.py +++ b/altair/utils/data.py @@ -66,8 +66,6 @@ def consolidate_datasets(self, value: bool) -> None: class MaxRowsError(Exception): """Raised when a data model has too many rows.""" - pass - @curried.curry def limit_rows(data: TDataType, max_rows: Optional[int] = 5000) -> TDataType: @@ -78,7 +76,7 @@ def limit_rows(data: TDataType, max_rows: Optional[int] = 5000) -> TDataType: check_data_type(data) def raise_max_rows_error(): - raise MaxRowsError( + msg = ( "The number of rows in your dataset is greater " f"than the maximum allowed ({max_rows}).\n\n" "Try enabling the VegaFusion data transformer which " @@ -90,6 +88,7 @@ def raise_max_rows_error(): "for additional information\n" "on how to plot large datasets." ) + raise MaxRowsError(msg) if hasattr(data, "__geo_interface__"): if data.__geo_interface__["type"] == "FeatureCollection": @@ -132,9 +131,8 @@ def sample( values = data["values"] if not n: if frac is None: - raise ValueError( - "frac cannot be None if n is None and data is a dictionary" - ) + msg = "frac cannot be None if n is None and data is a dictionary" + raise ValueError(msg) n = int(frac * len(values)) values = random.sample(values, n) return {"values": values} @@ -145,9 +143,8 @@ def sample( pa_table = arrow_table_from_dfi_dataframe(data) if not n: if frac is None: - raise ValueError( - "frac cannot be None if n is None with this data input type" - ) + msg = "frac cannot be None if n is None with this data input type" + raise ValueError(msg) n = int(frac * len(pa_table)) indices = random.sample(range(len(pa_table)), n) return pa_table.take(indices) @@ -227,25 +224,24 @@ def to_values(data: DataType) -> ToValuesReturnType: return {"values": data.to_dict(orient="records")} elif isinstance(data, dict): if "values" not in data: - raise KeyError("values expected in data dict, but not present.") + msg = "values expected in data dict, but not present." + raise KeyError(msg) return data elif isinstance(data, DataFrameLike): pa_table = sanitize_arrow_table(arrow_table_from_dfi_dataframe(data)) return {"values": pa_table.to_pylist()} else: # Should never reach this state as tested by check_data_type - raise ValueError("Unrecognized data type: {}".format(type(data))) + msg = f"Unrecognized data type: {type(data)}" + raise ValueError(msg) def check_data_type(data: DataType) -> None: if not isinstance(data, (dict, pd.DataFrame, DataFrameLike)) and not any( hasattr(data, attr) for attr in ["__geo_interface__"] ): - raise TypeError( - "Expected dict, DataFrame or a __geo_interface__ attribute, got: {}".format( - type(data) - ) - ) + msg = f"Expected dict, DataFrame or a __geo_interface__ attribute, got: {type(data)}" + raise TypeError(msg) # ============================================================================== @@ -270,31 +266,33 @@ def _data_to_json_string(data: DataType) -> str: return data.to_json(orient="records", double_precision=15) elif isinstance(data, dict): if "values" not in data: - raise KeyError("values expected in data dict, but not present.") + msg = "values expected in data dict, but not present." + raise KeyError(msg) return json.dumps(data["values"], sort_keys=True) elif isinstance(data, DataFrameLike): pa_table = arrow_table_from_dfi_dataframe(data) return json.dumps(pa_table.to_pylist()) else: - raise NotImplementedError( - "to_json only works with data expressed as " "a DataFrame or as a dict" - ) + msg = "to_json only works with data expressed as " "a DataFrame or as a dict" + raise NotImplementedError(msg) def _data_to_csv_string(data: Union[dict, pd.DataFrame, DataFrameLike]) -> str: """return a CSV string representation of the input data""" check_data_type(data) if hasattr(data, "__geo_interface__"): - raise NotImplementedError( + msg = ( "to_csv does not work with data that " "contains the __geo_interface__ attribute" ) + raise NotImplementedError(msg) elif isinstance(data, pd.DataFrame): data = sanitize_dataframe(data) return data.to_csv(index=False) elif isinstance(data, dict): if "values" not in data: - raise KeyError("values expected in data dict, but not present") + msg = "values expected in data dict, but not present" + raise KeyError(msg) return pd.DataFrame.from_dict(data["values"]).to_csv(index=False) elif isinstance(data, DataFrameLike): # experimental interchange dataframe support @@ -306,9 +304,8 @@ def _data_to_csv_string(data: Union[dict, pd.DataFrame, DataFrameLike]) -> str: pa_csv.write_csv(pa_table, csv_buffer) return csv_buffer.getvalue().to_pybytes().decode() else: - raise NotImplementedError( - "to_csv only works with data expressed as " "a DataFrame or as a dict" - ) + msg = "to_csv only works with data expressed as " "a DataFrame or as a dict" + raise NotImplementedError(msg) def pipe(data, *funcs): diff --git a/altair/utils/deprecation.py b/altair/utils/deprecation.py index f0ed26ae9..394f9ff9c 100644 --- a/altair/utils/deprecation.py +++ b/altair/utils/deprecation.py @@ -46,9 +46,7 @@ def _deprecate(obj, name=None, message=None): AltairDeprecationWarning: alt.OldFoo is deprecated. Use alt.Foo instead. """ if message is None: - message = "alt.{} is deprecated. Use alt.{} instead." "".format( - name, obj.__name__ - ) + message = f"alt.{name} is deprecated. Use alt.{obj.__name__} instead." "" if isinstance(obj, type): return type( name, @@ -68,4 +66,5 @@ def new_obj(*args, **kwargs): new_obj._deprecated = True return new_obj else: - raise ValueError("Cannot deprecate object of type {}".format(type(obj))) + msg = f"Cannot deprecate object of type {type(obj)}" + raise ValueError(msg) diff --git a/altair/utils/html.py b/altair/utils/html.py index 6cd89b2dd..96cd08736 100644 --- a/altair/utils/html.py +++ b/altair/utils/html.py @@ -267,16 +267,20 @@ def spec_to_html( mode = embed_options.setdefault("mode", mode) if mode not in ["vega", "vega-lite"]: - raise ValueError("mode must be either 'vega' or 'vega-lite'") + msg = "mode must be either 'vega' or 'vega-lite'" + raise ValueError(msg) if vega_version is None: - raise ValueError("must specify vega_version") + msg = "must specify vega_version" + raise ValueError(msg) if vegaembed_version is None: - raise ValueError("must specify vegaembed_version") + msg = "must specify vegaembed_version" + raise ValueError(msg) if mode == "vega-lite" and vegalite_version is None: - raise ValueError("must specify vega-lite version for mode='vega-lite'") + msg = "must specify vega-lite version for mode='vega-lite'" + raise ValueError(msg) render_kwargs = {} if template == "inline": @@ -286,7 +290,8 @@ def spec_to_html( jinja_template = TEMPLATES.get(template, template) if not hasattr(jinja_template, "render"): - raise ValueError("Invalid template: {0}".format(jinja_template)) + msg = f"Invalid template: {jinja_template}" + raise ValueError(msg) return jinja_template.render( spec=json.dumps(spec, **json_kwds), diff --git a/altair/utils/mimebundle.py b/altair/utils/mimebundle.py index 89588d04d..115bc32aa 100644 --- a/altair/utils/mimebundle.py +++ b/altair/utils/mimebundle.py @@ -61,7 +61,8 @@ def spec_to_mimebundle( from altair import renderers if mode != "vega-lite": - raise ValueError("mode must be 'vega-lite'") + msg = "mode must be 'vega-lite'" + raise ValueError(msg) internal_mode: Literal["vega-lite", "vega"] = mode if using_vegafusion(): @@ -100,14 +101,16 @@ def spec_to_mimebundle( return {"text/html": html} if format == "vega-lite": if vegalite_version is None: - raise ValueError("Must specify vegalite_version") - return {"application/vnd.vegalite.v{}+json".format(vegalite_version[0]): spec} + msg = "Must specify vegalite_version" + raise ValueError(msg) + return {f"application/vnd.vegalite.v{vegalite_version[0]}+json": spec} if format == "json": return {"application/json": spec} - raise ValueError( + msg = ( "format must be one of " "['html', 'json', 'png', 'svg', 'pdf', 'vega', 'vega-lite']" ) + raise ValueError(msg) def _spec_to_mimebundle_with_engine( @@ -218,13 +221,13 @@ def _spec_to_mimebundle_with_engine( else: # This should be validated above # but raise exception for the sake of future development - raise ValueError("Unexpected format {fmt!r}".format(fmt=format)) + msg = f"Unexpected format {format!r}" + raise ValueError(msg) else: # This should be validated above # but raise exception for the sake of future development - raise ValueError( - "Unexpected normalized_engine {eng!r}".format(eng=normalized_engine) - ) + msg = f"Unexpected normalized_engine {normalized_engine!r}" + raise ValueError(msg) def _validate_normalize_engine( @@ -252,25 +255,20 @@ def _validate_normalize_engine( # Validate or infer default value of normalized_engine if normalized_engine == "vlconvert": if vlc is None: - raise ValueError( - "The 'vl-convert' conversion engine requires the vl-convert-python package" - ) + msg = "The 'vl-convert' conversion engine requires the vl-convert-python package" + raise ValueError(msg) elif normalized_engine is None: if vlc is not None: normalized_engine = "vlconvert" else: - raise ValueError( - "Saving charts in {fmt!r} format requires the vl-convert-python package: " - "see https://altair-viz.github.io/user_guide/saving_charts.html#png-svg-and-pdf-format".format( - fmt=format - ) + msg = ( + f"Saving charts in {format!r} format requires the vl-convert-python package: " + "see https://altair-viz.github.io/user_guide/saving_charts.html#png-svg-and-pdf-format" ) + raise ValueError(msg) else: - raise ValueError( - "Invalid conversion engine {engine!r}. Expected vl-convert".format( - engine=engine - ) - ) + msg = f"Invalid conversion engine {engine!r}. Expected vl-convert" + raise ValueError(msg) return normalized_engine diff --git a/altair/utils/plugin_registry.py b/altair/utils/plugin_registry.py index f6281ed14..21077fe95 100644 --- a/altair/utils/plugin_registry.py +++ b/altair/utils/plugin_registry.py @@ -43,7 +43,7 @@ def __exit__(self, typ: type, value: Exception, traceback: TracebackType) -> Non self.registry._set_state(self.original_state) def __repr__(self) -> str: - return "{}.enable({!r})".format(self.registry.__class__.__name__, self.name) + return f"{self.registry.__class__.__name__}.enable({self.name!r})" class PluginRegistry(Generic[PluginType]): @@ -148,11 +148,11 @@ def _set_state(self, state: Dict[str, Any]) -> None: def _enable(self, name: str, **options) -> None: if name not in self._plugins: try: - (ep,) = [ + (ep,) = ( ep for ep in importlib_metadata_get(self.entry_point_group) if ep.name == name - ] + ) except ValueError as err: if name in self.entrypoint_err_messages: raise ValueError(self.entrypoint_err_messages[name]) from err @@ -207,8 +207,9 @@ def get(self) -> Optional[PluginType]: return self._active def __repr__(self) -> str: - return "{}(active={!r}, registered={!r})" "".format( - self.__class__.__name__, self._active_name, list(self.names()) + return ( + f"{self.__class__.__name__}(active={self._active_name!r}, registered={list(self.names())!r})" + "" ) diff --git a/altair/utils/save.py b/altair/utils/save.py index 609486bc6..20aa85afd 100644 --- a/altair/utils/save.py +++ b/altair/utils/save.py @@ -32,10 +32,11 @@ def set_inspect_format_argument( if isinstance(fp, (str, pathlib.Path)): format = pathlib.Path(fp).suffix.lstrip(".") else: - raise ValueError( + msg = ( "must specify file format: " "['png', 'svg', 'pdf', 'html', 'json', 'vega']" ) + raise ValueError(msg) if format != "html" and inline: warnings.warn("inline argument ignored for non HTML formats.", stacklevel=1) @@ -59,10 +60,12 @@ def set_inspect_mode_argument( mode = "vega-lite" if mode != "vega-lite": - raise ValueError("mode must be 'vega-lite', " "not '{}'".format(mode)) + msg = "mode must be 'vega-lite', " f"not '{mode}'" + raise ValueError(msg) if mode == "vega-lite" and vegalite_version is None: - raise ValueError("must specify vega-lite version") + msg = "must specify vega-lite version" + raise ValueError(msg) return mode @@ -190,7 +193,8 @@ def perform_save(): fp, mimebundle["image/svg+xml"], mode="w", encoding=encoding ) else: - raise ValueError("Unsupported format: '{}'".format(format)) + msg = f"Unsupported format: '{format}'" + raise ValueError(msg) if using_vegafusion(): # When the vegafusion data transformer is enabled, transforms will be diff --git a/altair/utils/schemapi.py b/altair/utils/schemapi.py index 10b2597e6..01170c6a6 100644 --- a/altair/utils/schemapi.py +++ b/altair/utils/schemapi.py @@ -136,7 +136,7 @@ def validate_jsonschema( # Nothing special about this first error but we need to choose one # which can be raised - main_error = list(grouped_errors.values())[0][0] + main_error = next(iter(grouped_errors.values()))[0] # All errors are then attached as a new attribute to ValidationError so that # they can be used in SchemaValidationError to craft a more helpful # error message. Setting a new attribute like this is not ideal as @@ -356,7 +356,7 @@ def _deduplicate_errors( } deduplicated_errors: ValidationErrorList = [] for validator, errors in errors_by_validator.items(): - deduplication_func = deduplication_functions.get(validator, None) + deduplication_func = deduplication_functions.get(validator) if deduplication_func is not None: errors = deduplication_func(errors) deduplicated_errors.extend(_deduplicate_by_message(errors)) @@ -752,11 +752,12 @@ def __init__(self, *args: Any, **kwds: Any) -> None: # - a single arg with no kwds, for, e.g. {'type': 'string'} # - zero args with zero or more kwds for {'type': 'object'} if self._schema is None: - raise ValueError( - "Cannot instantiate object of type {}: " + msg = ( + f"Cannot instantiate object of type {self.__class__}: " "_schema class attribute is not defined." - "".format(self.__class__) + "" ) + raise ValueError(msg) if kwds: assert len(args) == 0 @@ -852,9 +853,9 @@ def __getattr__(self, attr): return self._kwds[attr] else: try: - _getattr = super(SchemaBase, self).__getattr__ + _getattr = super().__getattr__ except AttributeError: - _getattr = super(SchemaBase, self).__getattribute__ + _getattr = super().__getattribute__ return _getattr(attr) def __setattr__(self, item, val): @@ -869,16 +870,16 @@ def __setitem__(self, item, val): def __repr__(self): if self._kwds: args = ( - "{}: {!r}".format(key, val) + f"{key}: {val!r}" for key, val in sorted(self._kwds.items()) if val is not Undefined ) args = "\n" + ",\n".join(args) - return "{0}({{{1}\n}})".format( + return "{}({{{}\n}})".format( self.__class__.__name__, args.replace("\n", "\n ") ) else: - return "{}({!r})".format(self.__class__.__name__, self._args[0]) + return f"{self.__class__.__name__}({self._args[0]!r})" def __eq__(self, other): return ( @@ -953,7 +954,7 @@ def to_dict( } ) kwds = { - k: v for k, v in kwds.items() if k not in list(ignore) + ["shorthand"] + k: v for k, v in kwds.items() if k not in [*list(ignore), "shorthand"] } if "mark" in kwds and isinstance(kwds["mark"], str): kwds["mark"] = {"type": kwds["mark"]} @@ -962,10 +963,11 @@ def to_dict( context=context, ) else: - raise ValueError( - "{} instance has both a value and properties : " - "cannot serialize to dict".format(self.__class__) + msg = ( + f"{self.__class__} instance has both a value and properties : " + "cannot serialize to dict" ) + raise ValueError(msg) if validate: try: self.validate(result) @@ -1212,7 +1214,8 @@ def from_dict( ) -> Any: """Construct an object from a dict representation""" if (schema is None) == (cls is None): - raise ValueError("Must provide either cls or schema, but not both.") + msg = "Must provide either cls or schema, but not both." + raise ValueError(msg) if schema is None: # Can ignore type errors as cls is not None in case schema is schema = cls._schema # type: ignore[union-attr] @@ -1234,10 +1237,7 @@ def from_dict( # Our class dict is constructed breadth-first from top to bottom, # so the first class that matches is the most general match. matches = self.class_dict[self.hash_schema(schema)] - if matches: - cls = matches[0] - else: - cls = default_class + cls = matches[0] if matches else default_class schema = _resolve_references(schema, rootschema) if "anyOf" in schema or "oneOf" in schema: diff --git a/altair/utils/selection.py b/altair/utils/selection.py index 2e796cac0..55def90af 100644 --- a/altair/utils/selection.py +++ b/altair/utils/selection.py @@ -89,10 +89,7 @@ def from_vega(name: str, signal: Optional[Dict[str, dict]], store: Store): ------- PointSelection """ - if signal is None: - points = [] - else: - points = signal.get("vlPoint", {}).get("or", []) + points = [] if signal is None else signal.get("vlPoint", {}).get("or", []) return PointSelection(name=name, value=points, store=store) diff --git a/altair/utils/server.py b/altair/utils/server.py index 2ec2b32fc..fa9c2f37c 100644 --- a/altair/utils/server.py +++ b/altair/utils/server.py @@ -79,7 +79,8 @@ def find_open_port(ip, port, n=50): s.close() if result != 0: return port - raise ValueError("no open ports found") + msg = "no open ports found" + raise ValueError(msg) def serve( @@ -131,13 +132,13 @@ def serve( print(JUPYTER_WARNING) # Start the server - print("Serving to http://{}:{}/ [Ctrl-C to exit]".format(ip, port)) + print(f"Serving to http://{ip}:{port}/ [Ctrl-C to exit]") sys.stdout.flush() if open_browser: # Use a thread to open a web browser pointing to the server def b(): - return webbrowser.open("http://{}:{}".format(ip, port)) + return webbrowser.open(f"http://{ip}:{port}") threading.Thread(target=b).start() diff --git a/altair/vegalite/v5/api.py b/altair/vegalite/v5/api.py index 37a13c2ae..9164d0bbf 100644 --- a/altair/vegalite/v5/api.py +++ b/altair/vegalite/v5/api.py @@ -84,10 +84,9 @@ def _consolidate_data(data, context): values = data.values kwds = {"format": data.format} - elif isinstance(data, dict): - if "name" not in data and "values" in data: - values = data["values"] - kwds = {k: v for k, v in data.items() if k != "values"} + elif isinstance(data, dict) and "name" not in data and "values" in data: + values = data["values"] + kwds = {k: v for k, v in data.items() if k != "values"} if values is not Undefined: name = _dataset_name(values) @@ -130,7 +129,7 @@ def _prepare_data(data, context=None): # if data is still not a recognized type, then return if not isinstance(data, (dict, core.Data)): - warnings.warn("data of type {} not recognized".format(type(data)), stacklevel=1) + warnings.warn(f"data of type {type(data)} not recognized", stacklevel=1) return data @@ -243,7 +242,8 @@ def to_dict(self) -> TypingDict[str, Union[str, dict]]: ) } else: - raise ValueError(f"Unrecognized parameter type: {self.param_type}") + msg = f"Unrecognized parameter type: {self.param_type}" + raise ValueError(msg) def __invert__(self): if self.param_type == "selection": @@ -268,7 +268,7 @@ def __or__(self, other): return _expr_core.OperatorMixin.__or__(self, other) def __repr__(self) -> str: - return "Parameter({0!r}, {1})".format(self.name, self.param) + return f"Parameter({self.name!r}, {self.param})" def _to_expr(self) -> str: return self.name @@ -306,7 +306,7 @@ def __or__(self, other): return SelectionPredicateComposition({"or": [self.to_dict(), other.to_dict()]}) -class ParameterExpression(_expr_core.OperatorMixin, object): +class ParameterExpression(_expr_core.OperatorMixin): def __init__(self, expr) -> None: self.expr = expr @@ -320,7 +320,7 @@ def _from_expr(self, expr) -> "ParameterExpression": return ParameterExpression(expr=expr) -class SelectionExpression(_expr_core.OperatorMixin, object): +class SelectionExpression(_expr_core.OperatorMixin): def __init__(self, expr) -> None: self.expr = expr @@ -418,7 +418,8 @@ def param( elif (parameter.empty is False) or (parameter.empty is True): pass else: - raise ValueError("The value of 'empty' should be True or False.") + msg = "The value of 'empty' should be True or False." + raise ValueError(msg) if "init" in kwds: warnings.warn( @@ -480,7 +481,8 @@ def _selection( stacklevel=1, ) else: - raise ValueError("""'type' must be 'point' or 'interval'""") + msg = """'type' must be 'point' or 'interval'""" + raise ValueError(msg) return param(select=select, **param_kwds) @@ -845,9 +847,8 @@ def condition( elif isinstance(predicate, dict): condition = predicate else: - raise NotImplementedError( - "condition predicate of type {}" "".format(type(predicate)) - ) + msg = f"condition predicate of type {type(predicate)}" "" + raise NotImplementedError(msg) if isinstance(if_true, core.SchemaBase): # convert to dict for now; the from_dict call below will wrap this @@ -855,9 +856,8 @@ def condition( if_true = if_true.to_dict() elif isinstance(if_true, str): if isinstance(if_false, str): - raise ValueError( - "A field cannot be used for both the `if_true` and `if_false` values of a condition. One of them has to specify a `value` or `datum` definition." - ) + msg = "A field cannot be used for both the `if_true` and `if_false` values of a condition. One of them has to specify a `value` or `datum` definition." + raise ValueError(msg) else: if_true = utils.parse_shorthand(if_true) if_true.update(kwargs) @@ -929,9 +929,8 @@ def to_dict( # Validate format if format not in ("vega-lite", "vega"): - raise ValueError( - f'The format argument must be either "vega-lite" or "vega". Received {repr(format)}' - ) + msg = f'The format argument must be either "vega-lite" or "vega". Received {format!r}' + raise ValueError(msg) # We make use of three context markers: # - 'data' points to the data that should be referenced for column type @@ -988,7 +987,7 @@ def to_dict( if context.get("pre_transform", True) and _using_vegafusion(): if format == "vega-lite": - raise ValueError( + msg = ( 'When the "vegafusion" data transformer is enabled, the \n' "to_dict() and to_json() chart methods must be called with " 'format="vega". \n' @@ -996,13 +995,15 @@ def to_dict( ' >>> chart.to_dict(format="vega")\n' ' >>> chart.to_json(format="vega")' ) + raise ValueError(msg) else: return _compile_with_vegafusion(vegalite_spec) else: if format == "vega": plugin = vegalite_compilers.get() if plugin is None: - raise ValueError("No active vega-lite compiler plugin found") + msg = "No active vega-lite compiler plugin found" + raise ValueError(msg) return plugin(vegalite_spec) else: return vegalite_spec @@ -1246,23 +1247,26 @@ def save( # Fallback for when rendering fails; the full repr is too long to be # useful in nearly all cases. def __repr__(self) -> str: - return "alt.{}(...)".format(self.__class__.__name__) + return f"alt.{self.__class__.__name__}(...)" # Layering and stacking def __add__(self, other) -> "LayerChart": if not isinstance(other, TopLevelMixin): - raise ValueError("Only Chart objects can be layered.") + msg = "Only Chart objects can be layered." + raise ValueError(msg) return layer(self, other) def __and__(self, other) -> "VConcatChart": if not isinstance(other, TopLevelMixin): - raise ValueError("Only Chart objects can be concatenated.") + msg = "Only Chart objects can be concatenated." + raise ValueError(msg) # Too difficult to type check this return vconcat(self, other) def __or__(self, other) -> "HConcatChart": if not isinstance(other, TopLevelMixin): - raise ValueError("Only Chart objects can be concatenated.") + msg = "Only Chart objects can be concatenated." + raise ValueError(msg) return hconcat(self, other) def repeat( @@ -1307,11 +1311,11 @@ def repeat( layer_specified = layer is not Undefined if repeat_specified and rowcol_specified: - raise ValueError( - "repeat argument cannot be combined with row/column argument." - ) + msg = "repeat argument cannot be combined with row/column argument." + raise ValueError(msg) elif repeat_specified and layer_specified: - raise ValueError("repeat argument cannot be combined with layer argument.") + msg = "repeat argument cannot be combined with layer argument." + raise ValueError(msg) repeat_arg: Union[List[str], core.LayerRepeatMapping, core.RepeatMapping] if repeat_specified: @@ -1656,9 +1660,8 @@ def transform_bin( """ if as_ is not Undefined: if "as" in kwargs: - raise ValueError( - "transform_bin: both 'as_' and 'as' passed as arguments." - ) + msg = "transform_bin: both 'as_' and 'as' passed as arguments." + raise ValueError(msg) kwargs["as"] = as_ kwargs["bin"] = bin kwargs["field"] = field @@ -1726,9 +1729,8 @@ def transform_calculate( # users. as_ = kwargs.pop("as", Undefined) # type: ignore[assignment] elif "as" in kwargs: - raise ValueError( - "transform_calculate: both 'as_' and 'as' passed as arguments." - ) + msg = "transform_calculate: both 'as_' and 'as' passed as arguments." + raise ValueError(msg) if as_ is not Undefined or calculate is not Undefined: dct = {"as": as_, "calculate": calculate} self = self._add_transform(core.CalculateTransform(**dct)) # type: ignore[arg-type] @@ -2143,15 +2145,13 @@ def transform_lookup( """ if as_ is not Undefined: if "as" in kwargs: - raise ValueError( - "transform_lookup: both 'as_' and 'as' passed as arguments." - ) + msg = "transform_lookup: both 'as_' and 'as' passed as arguments." + raise ValueError(msg) kwargs["as"] = as_ if from_ is not Undefined: if "from" in kwargs: - raise ValueError( - "transform_lookup: both 'from_' and 'from' passed as arguments." - ) + msg = "transform_lookup: both 'from_' and 'from' passed as arguments." + raise ValueError(msg) kwargs["from"] = from_ kwargs["lookup"] = lookup kwargs["default"] = default @@ -2450,9 +2450,8 @@ def transform_timeunit( as_ = kwargs.pop("as", Undefined) else: if "as" in kwargs: - raise ValueError( - "transform_timeunit: both 'as_' and 'as' passed as arguments." - ) + msg = "transform_timeunit: both 'as_' and 'as' passed as arguments." + raise ValueError(msg) if as_ is not Undefined: dct = {"as": as_, "timeUnit": timeUnit, "field": field} self = self._add_transform(core.TimeUnitTransform(**dct)) # type: ignore[arg-type] @@ -2466,7 +2465,8 @@ def transform_timeunit( dct.pop("type", None) dct["as"] = as_ if "timeUnit" not in dct: - raise ValueError("'{}' must include a valid timeUnit".format(shorthand)) + msg = f"'{shorthand}' must include a valid timeUnit" + raise ValueError(msg) self = self._add_transform(core.TimeUnitTransform(**dct)) # type: ignore[arg-type] return self @@ -2708,9 +2708,8 @@ def show(self) -> None: def _set_resolve(self, **kwargs): """Copy the chart and update the resolve property with kwargs""" if not hasattr(self, "resolve"): - raise ValueError( - "{} object has no attribute " "'resolve'".format(self.__class__) - ) + msg = f"{self.__class__} object has no attribute " "'resolve'" + raise ValueError(msg) copy = self.copy(deep=["resolve"]) if copy.resolve is Undefined: copy.resolve = core.Resolve() @@ -2792,18 +2791,18 @@ def facet( rowcol_specified = row is not Undefined or column is not Undefined if facet_specified and rowcol_specified: - raise ValueError( - "facet argument cannot be combined with row/column argument." - ) + msg = "facet argument cannot be combined with row/column argument." + raise ValueError(msg) if data is Undefined: if self.data is Undefined: # type: ignore[has-type] - raise ValueError( + msg = ( "Facet charts require data to be specified at the top level. " "If you are trying to facet layered or concatenated charts, " "ensure that the same data variable is passed to each chart " "or specify the data inside the facet method instead." ) + raise ValueError(msg) # ignore type as copy comes from another class self = self.copy(deep=False) # type: ignore[attr-defined] data, self.data = self.data, Undefined # type: ignore[has-type] @@ -2885,7 +2884,7 @@ def __init__( height: Union[int, str, dict, core.Step, UndefinedType] = Undefined, **kwargs, ) -> None: - super(Chart, self).__init__( + super().__init__( # Data type hints won't match with what TopLevelUnitSpec expects # as there is some data processing happening when converting to # a VL spec @@ -2927,7 +2926,7 @@ def from_dict(cls, dct: dict, validate: bool = True) -> core.SchemaBase: # type """ for class_ in TopLevelMixin.__subclasses__(): if class_ is Chart: - class_ = cast(TypingType[TopLevelMixin], super(Chart, cls)) + class_ = cast(TypingType[TopLevelMixin], super()) try: # TopLevelMixin classes don't necessarily have from_dict defined # but all classes which are used here have due to how Altair is @@ -3076,7 +3075,8 @@ def _check_if_valid_subspec(spec: Union[dict, core.SchemaBase], classname: str) ) if not isinstance(spec, (core.SchemaBase, dict)): - raise ValueError("Only chart objects can be used in {0}.".format(classname)) + msg = f"Only chart objects can be used in {classname}." + raise ValueError(msg) for attr in TOPLEVEL_ONLY_KEYS: if isinstance(spec, core.SchemaBase): val = getattr(spec, attr, Undefined) @@ -3099,38 +3099,32 @@ def _get(spec, attr): if encoding is not Undefined: for channel in ["row", "column", "facet"]: if _get(encoding, channel) is not Undefined: - raise ValueError( - "Faceted charts cannot be layered. Instead, layer the charts before faceting." - ) + msg = "Faceted charts cannot be layered. Instead, layer the charts before faceting." + raise ValueError(msg) if isinstance(spec, (Chart, LayerChart)): return if not isinstance(spec, (core.SchemaBase, dict)): - raise ValueError("Only chart objects can be layered.") + msg = "Only chart objects can be layered." + raise ValueError(msg) if _get(spec, "facet") is not Undefined: - raise ValueError( - "Faceted charts cannot be layered. Instead, layer the charts before faceting." - ) + msg = "Faceted charts cannot be layered. Instead, layer the charts before faceting." + raise ValueError(msg) if isinstance(spec, FacetChart) or _get(spec, "facet") is not Undefined: - raise ValueError( - "Faceted charts cannot be layered. Instead, layer the charts before faceting." - ) + msg = "Faceted charts cannot be layered. Instead, layer the charts before faceting." + raise ValueError(msg) if isinstance(spec, RepeatChart) or _get(spec, "repeat") is not Undefined: - raise ValueError( - "Repeat charts cannot be layered. Instead, layer the charts before repeating." - ) + msg = "Repeat charts cannot be layered. Instead, layer the charts before repeating." + raise ValueError(msg) if isinstance(spec, ConcatChart) or _get(spec, "concat") is not Undefined: - raise ValueError( - "Concatenated charts cannot be layered. Instead, layer the charts before concatenating." - ) + msg = "Concatenated charts cannot be layered. Instead, layer the charts before concatenating." + raise ValueError(msg) if isinstance(spec, HConcatChart) or _get(spec, "hconcat") is not Undefined: - raise ValueError( - "Concatenated charts cannot be layered. Instead, layer the charts before concatenating." - ) + msg = "Concatenated charts cannot be layered. Instead, layer the charts before concatenating." + raise ValueError(msg) if isinstance(spec, VConcatChart) or _get(spec, "vconcat") is not Undefined: - raise ValueError( - "Concatenated charts cannot be layered. Instead, layer the charts before concatenating." - ) + msg = "Concatenated charts cannot be layered. Instead, layer the charts before concatenating." + raise ValueError(msg) class RepeatChart(TopLevelMixin, core.TopLevelRepeatSpec): @@ -3170,7 +3164,7 @@ def __init__( spec = _spec_as_list[0] if isinstance(spec, (Chart, LayerChart)): params = _repeat_names(params, repeat, spec) - super(RepeatChart, self).__init__( + super().__init__( repeat=repeat, spec=spec, align=align, @@ -3216,9 +3210,8 @@ def transformed_data( NotImplementedError RepeatChart does not yet support transformed_data """ - raise NotImplementedError( - "transformed_data is not yet implemented for RepeatChart" - ) + msg = "transformed_data is not yet implemented for RepeatChart" + raise NotImplementedError(msg) def interactive( self, name: Optional[str] = None, bind_x: bool = True, bind_y: bool = True @@ -3279,7 +3272,8 @@ def repeat( repeat : RepeatRef object """ if repeater not in ["row", "column", "repeat", "layer"]: - raise ValueError("repeater must be one of ['row', 'column', 'repeat', 'layer']") + msg = "repeater must be one of ['row', 'column', 'repeat', 'layer']" + raise ValueError(msg) return core.RepeatRef(repeat=repeater) @@ -3291,9 +3285,7 @@ def __init__(self, data=Undefined, concat=(), columns=Undefined, **kwargs): # TODO: move common data to top level? for spec in concat: _check_if_valid_subspec(spec, "ConcatChart") - super(ConcatChart, self).__init__( - data=data, concat=list(concat), columns=columns, **kwargs - ) + super().__init__(data=data, concat=list(concat), columns=columns, **kwargs) self.data, self.concat = _combine_subchart_data(self.data, self.concat) self.params, self.concat = _combine_subchart_params(self.params, self.concat) @@ -3394,7 +3386,7 @@ def __init__(self, data=Undefined, hconcat=(), **kwargs): # TODO: move common data to top level? for spec in hconcat: _check_if_valid_subspec(spec, "HConcatChart") - super(HConcatChart, self).__init__(data=data, hconcat=list(hconcat), **kwargs) + super().__init__(data=data, hconcat=list(hconcat), **kwargs) self.data, self.hconcat = _combine_subchart_data(self.data, self.hconcat) self.params, self.hconcat = _combine_subchart_params(self.params, self.hconcat) @@ -3493,7 +3485,7 @@ def __init__(self, data=Undefined, vconcat=(), **kwargs): # TODO: move common data to top level? for spec in vconcat: _check_if_valid_subspec(spec, "VConcatChart") - super(VConcatChart, self).__init__(data=data, vconcat=list(vconcat), **kwargs) + super().__init__(data=data, vconcat=list(vconcat), **kwargs) self.data, self.vconcat = _combine_subchart_data(self.data, self.vconcat) self.params, self.vconcat = _combine_subchart_params(self.params, self.vconcat) @@ -3594,7 +3586,7 @@ def __init__(self, data=Undefined, layer=(), **kwargs): for spec in layer: _check_if_valid_subspec(spec, "LayerChart") _check_if_can_be_layered(spec) - super(LayerChart, self).__init__(data=data, layer=list(layer), **kwargs) + super().__init__(data=data, layer=list(layer), **kwargs) self.data, self.layer = _combine_subchart_data(self.data, self.layer) # Currently (Vega-Lite 5.5) the same param can't occur on two layers self.layer = _remove_duplicate_params(self.layer) @@ -3674,9 +3666,8 @@ def interactive( """ if not self.layer: - raise ValueError( - "LayerChart: cannot call interactive() until a " "layer is defined" - ) + msg = "LayerChart: cannot call interactive() until a " "layer is defined" + raise ValueError(msg) copy = self.copy(deep=["layer"]) copy.layer[0] = copy.layer[0].interactive( name=name, bind_x=bind_x, bind_y=bind_y @@ -3720,9 +3711,7 @@ def __init__( _spec_as_list = [spec] params, _spec_as_list = _combine_subchart_params(params, _spec_as_list) spec = _spec_as_list[0] - super(FacetChart, self).__init__( - data=data, spec=spec, facet=facet, params=params, **kwargs - ) + super().__init__(data=data, spec=spec, facet=facet, params=params, **kwargs) def transformed_data( self, @@ -4065,7 +4054,8 @@ def remove_prop(subchart, prop): elif all(v == values[0] for v in values[1:]): output_dict[prop] = values[0] else: - raise ValueError(f"There are inconsistent values {values} for {prop}") + msg = f"There are inconsistent values {values} for {prop}" + raise ValueError(msg) else: # Top level has this prop; subchart must either not have the prop # or it must be Undefined or identical to proceed. @@ -4075,7 +4065,8 @@ def remove_prop(subchart, prop): ): output_dict[prop] = chart[prop] else: - raise ValueError(f"There are inconsistent values {values} for {prop}") + msg = f"There are inconsistent values {values} for {prop}" + raise ValueError(msg) subcharts = [remove_prop(c, prop) for c in subcharts] return output_dict, subcharts @@ -4093,11 +4084,8 @@ def sequence(start, stop=None, step=Undefined, as_=Undefined, **kwds): @utils.use_signature(core.GraticuleParams) def graticule(**kwds): """Graticule generator.""" - if not kwds: - # graticule: True indicates default parameters - graticule = True - else: - graticule = core.GraticuleParams(**kwds) + # graticule: True indicates default parameters + graticule = core.GraticuleParams(**kwds) if kwds else True return core.GraticuleGenerator(graticule=graticule) diff --git a/altair/vegalite/v5/theme.py b/altair/vegalite/v5/theme.py index bf99ad638..982f35117 100644 --- a/altair/vegalite/v5/theme.py +++ b/altair/vegalite/v5/theme.py @@ -31,7 +31,7 @@ def __call__(self) -> Dict[str, Dict[str, Dict[str, Union[str, int]]]]: } def __repr__(self) -> str: - return "VegaTheme({!r})".format(self.theme) + return f"VegaTheme({self.theme!r})" # The entry point group that can be used by other packages to declare other @@ -53,7 +53,7 @@ def __repr__(self) -> str: } }, ) -themes.register("none", lambda: {}) +themes.register("none", dict) for theme in VEGA_THEMES: themes.register(theme, VegaTheme(theme)) diff --git a/doc/conf.py b/doc/conf.py index 019ac9edd..358b14cfd 100644 --- a/doc/conf.py +++ b/doc/conf.py @@ -70,7 +70,7 @@ # General information about the project. project = "Vega-Altair" -copyright = "2016-{}, Vega-Altair Developers".format(datetime.now().year) +copyright = f"2016-{datetime.now().year}, Vega-Altair Developers" author = "Vega-Altair Developers" # The version info for the project you're documenting, acts as replacement for diff --git a/sphinxext/altairgallery.py b/sphinxext/altairgallery.py index a6310ddd9..b114ee1ab 100644 --- a/sphinxext/altairgallery.py +++ b/sphinxext/altairgallery.py @@ -170,10 +170,10 @@ def save_example_pngs(examples, image_dir, make_thumbnails=True): hashes_match = hashes.get(filename, "") == example_hash if hashes_match and os.path.exists(image_file): - print("-> using cached {}".format(image_file)) + print(f"-> using cached {image_file}") else: # the file changed or the image file does not exist. Generate it. - print("-> saving {}".format(image_file)) + print(f"-> saving {image_file}") chart = eval_block(example["code"]) try: chart.save(image_file) @@ -208,7 +208,7 @@ def populate_examples(**kwds): for example in examples: docstring, category, code, lineno = get_docstring_and_rest(example["filename"]) - if example["name"] in method_examples.keys(): + if example["name"] in method_examples: _, _, method_code, _ = get_docstring_and_rest( method_examples[example["name"]]["filename"] ) @@ -220,9 +220,8 @@ def populate_examples(**kwds): ) example.update(kwds) if category is None: - raise Exception( - f"The example {example['name']} is not assigned to a category" - ) + msg = f"The example {example['name']} is not assigned to a category" + raise Exception(msg) example.update( { "docstring": docstring, @@ -268,10 +267,11 @@ def run(self): if names: if len(names) < size: - raise ValueError( + msg = ( "altair-minigallery: if names are specified, " "the list must be at least as long as size." ) + raise ValueError(msg) mapping = {example["name"]: example for example in examples} examples = [mapping[name] for name in names] else: diff --git a/sphinxext/schematable.py b/sphinxext/schematable.py index c70060b32..9dc82355b 100644 --- a/sphinxext/schematable.py +++ b/sphinxext/schematable.py @@ -11,7 +11,7 @@ from sphinx import addnodes sys.path.insert(0, abspath(dirname(dirname(dirname(__file__))))) -from tools.schemapi.utils import fix_docstring_issues, SchemaInfo # noqa: E402 +from tools.schemapi.utils import fix_docstring_issues, SchemaInfo def type_description(schema): @@ -37,7 +37,7 @@ def type_description(schema): ) else: warnings.warn( - "cannot infer type for schema with keys {}" "".format(schema.keys()), + f"cannot infer type for schema with keys {schema.keys()}" "", stacklevel=1, ) return "--" @@ -168,7 +168,8 @@ def select_items_from_schema(schema, props=None): try: yield prop, properties[prop], prop in required except KeyError as err: - raise Exception(f"Can't find property: {prop}") from err + msg = f"Can't find property: {prop}" + raise Exception(msg) from err def prepare_schema_table(schema, rootschema, props=None): diff --git a/sphinxext/utils.py b/sphinxext/utils.py index 50d17608c..165d8e391 100644 --- a/sphinxext/utils.py +++ b/sphinxext/utils.py @@ -67,7 +67,7 @@ def _parse_source_file(filename): https://github.com/sphinx-gallery/sphinx-gallery/ """ - with open(filename, "r", encoding="utf-8") as fid: + with open(filename, encoding="utf-8") as fid: content = fid.read() # change from Windows format to UNIX for uniformity content = content.replace("\r\n", "\n") @@ -121,11 +121,8 @@ def get_docstring_and_rest(filename): return SYNTAX_ERROR_DOCSTRING, category, content, 1 if not isinstance(node, ast.Module): - raise TypeError( - "This function only supports modules. You provided {}".format( - node.__class__.__name__ - ) - ) + msg = f"This function only supports modules. You provided {node.__class__.__name__}" + raise TypeError(msg) try: # In python 3.7 module knows its docstring. # Everything else will raise an attribute error @@ -172,12 +169,11 @@ def get_docstring_and_rest(filename): docstring, rest = "", "" if not docstring: - raise ValueError( - ( - 'Could not find docstring in file "{0}". ' - "A docstring is required for the example gallery." - ).format(filename) + msg = ( + f'Could not find docstring in file "{filename}". ' + "A docstring is required for the example gallery." ) + raise ValueError(msg) return docstring, category, rest, lineno diff --git a/tests/expr/test_expr.py b/tests/expr/test_expr.py index a5b6870bd..49d574176 100644 --- a/tests/expr/test_expr.py +++ b/tests/expr/test_expr.py @@ -12,7 +12,7 @@ def test_unary_operations(): OP_MAP = {"-": operator.neg, "+": operator.pos} for op, func in OP_MAP.items(): z = func(datum.xxx) - assert repr(z) == "({}datum.xxx)".format(op) + assert repr(z) == f"({op}datum.xxx)" def test_binary_operations(): @@ -42,16 +42,16 @@ def test_binary_operations(): } for op, func in OP_MAP.items(): z1 = func(datum.xxx, 2) - assert repr(z1) == "(datum.xxx {} 2)".format(op) + assert repr(z1) == f"(datum.xxx {op} 2)" z2 = func(2, datum.xxx) if op in INEQ_REVERSE: - assert repr(z2) == "(datum.xxx {} 2)".format(INEQ_REVERSE[op]) + assert repr(z2) == f"(datum.xxx {INEQ_REVERSE[op]} 2)" else: - assert repr(z2) == "(2 {} datum.xxx)".format(op) + assert repr(z2) == f"(2 {op} datum.xxx)" z3 = func(datum.xxx, datum.yyy) - assert repr(z3) == "(datum.xxx {} datum.yyy)".format(op) + assert repr(z3) == f"(datum.xxx {op} datum.yyy)" def test_abs(): @@ -65,7 +65,7 @@ def test_expr_funcs(): for funcname in expr.funcs.__all__: func = getattr(expr, funcname) z = func(datum.xxx) - assert repr(z) == "{}(datum.xxx)".format(name_map.get(funcname, funcname)) + assert repr(z) == f"{name_map.get(funcname, funcname)}(datum.xxx)" def test_expr_consts(): @@ -74,7 +74,7 @@ def test_expr_consts(): for constname in expr.consts.__all__: const = getattr(expr, constname) z = const * datum.xxx - assert repr(z) == "({} * datum.xxx)".format(name_map.get(constname, constname)) + assert repr(z) == f"({name_map.get(constname, constname)} * datum.xxx)" def test_json_reprs(): diff --git a/tests/test_examples.py b/tests/test_examples.py index 124b8a66b..61726c5a1 100644 --- a/tests/test_examples.py +++ b/tests/test_examples.py @@ -10,7 +10,7 @@ try: - import vl_convert as vlc # noqa: F401 + import vl_convert as vlc except ImportError: vlc = None @@ -37,18 +37,20 @@ def test_render_examples_to_chart(syntax_module): chart = eval_block(source) if chart is None: - raise ValueError( + msg = ( f"Example file {filename} should define chart in its final " "statement." ) + raise ValueError(msg) try: assert isinstance(chart.to_dict(), dict) except Exception as err: - raise AssertionError( + msg = ( f"Example file {filename} raised an exception when " f"converting to a dict: {err}" - ) from err + ) + raise AssertionError(msg) from err @pytest.mark.parametrize( @@ -63,10 +65,11 @@ def test_from_and_to_json_roundtrip(syntax_module): chart = eval_block(source) if chart is None: - raise ValueError( + msg = ( f"Example file {filename} should define chart in its final " "statement." ) + raise ValueError(msg) try: first_json = chart.to_json() @@ -78,10 +81,11 @@ def test_from_and_to_json_roundtrip(syntax_module): second_json = reconstructed_chart.to_json() assert first_json == second_json except Exception as err: - raise AssertionError( + msg = ( f"Example file {filename} raised an exception when " f"doing a json conversion roundtrip: {err}" - ) from err + ) + raise AssertionError(msg) from err @pytest.mark.parametrize("engine", ["vl-convert"]) diff --git a/tests/test_magics.py b/tests/test_magics.py index 4dd69ba7b..11b7f7d57 100644 --- a/tests/test_magics.py +++ b/tests/test_magics.py @@ -7,7 +7,6 @@ IPYTHON_AVAILABLE = True except ImportError: IPYTHON_AVAILABLE = False - pass from altair.vegalite.v5 import VegaLite @@ -27,11 +26,11 @@ _ipshell = InteractiveShell.instance() _ipshell.run_cell("%load_ext altair") _ipshell.run_cell( - """ + f""" import pandas as pd -table = pd.DataFrame.from_records({}) +table = pd.DataFrame.from_records({DATA_RECORDS}) the_data = table -""".format(DATA_RECORDS) +""" ) @@ -51,14 +50,14 @@ def test_vegalite_magic_data_included(): result = _ipshell.run_cell("%%vegalite\n" + json.dumps(VEGALITE_SPEC)) assert isinstance(result.result, VegaLite) - assert VEGALITE_SPEC == result.result.spec + assert result.result.spec == VEGALITE_SPEC @pytest.mark.skipif(not IPYTHON_AVAILABLE, reason="requires ipython") def test_vegalite_magic_json_flag(): result = _ipshell.run_cell("%%vegalite --json\n" + json.dumps(VEGALITE_SPEC)) assert isinstance(result.result, VegaLite) - assert VEGALITE_SPEC == result.result.spec + assert result.result.spec == VEGALITE_SPEC @pytest.mark.skipif(not IPYTHON_AVAILABLE, reason="requires ipython") @@ -66,4 +65,4 @@ def test_vegalite_magic_pandas_data(): spec = {key: val for key, val in VEGALITE_SPEC.items() if key != "data"} result = _ipshell.run_cell("%%vegalite table\n" + json.dumps(spec)) assert isinstance(result.result, VegaLite) - assert VEGALITE_SPEC == result.result.spec + assert result.result.spec == VEGALITE_SPEC diff --git a/tests/utils/test_compiler.py b/tests/utils/test_compiler.py index 4d161804c..0840da4a8 100644 --- a/tests/utils/test_compiler.py +++ b/tests/utils/test_compiler.py @@ -3,7 +3,7 @@ from altair import vegalite_compilers, Chart try: - import vl_convert as vlc # noqa: F401 + import vl_convert as vlc except ImportError: vlc = None diff --git a/tests/utils/test_core.py b/tests/utils/test_core.py index 27cd3b7ee..554a8bdd7 100644 --- a/tests/utils/test_core.py +++ b/tests/utils/test_core.py @@ -164,7 +164,7 @@ def check(s, data, **kwargs): check("month(z)", data, timeUnit="month", field="z", type="temporal") check("month(t)", data, timeUnit="month", field="t", type="temporal") - if PANDAS_VERSION >= Version("1.0.0"): + if Version("1.0.0") <= PANDAS_VERSION: data["b"] = pd.Series([True, False, True, False, None], dtype="boolean") check("b", data, field="b", type="nominal") @@ -186,7 +186,7 @@ def test_parse_shorthand_for_arrow_timestamp(): def test_parse_shorthand_all_aggregates(): aggregates = alt.Root._schema["definitions"]["AggregateOp"]["enum"] for aggregate in aggregates: - shorthand = "{aggregate}(field):Q".format(aggregate=aggregate) + shorthand = f"{aggregate}(field):Q" assert parse_shorthand(shorthand) == { "aggregate": aggregate, "field": "field", @@ -201,7 +201,7 @@ def test_parse_shorthand_all_timeunits(): defn = loc + typ + "TimeUnit" timeUnits.extend(alt.Root._schema["definitions"][defn]["enum"]) for timeUnit in timeUnits: - shorthand = "{timeUnit}(field):Q".format(timeUnit=timeUnit) + shorthand = f"{timeUnit}(field):Q" assert parse_shorthand(shorthand) == { "timeUnit": timeUnit, "field": "field", @@ -225,7 +225,7 @@ def test_parse_shorthand_all_window_ops(): window_ops = alt.Root._schema["definitions"]["WindowOnlyOp"]["enum"] aggregates = alt.Root._schema["definitions"]["AggregateOp"]["enum"] for op in window_ops + aggregates: - shorthand = "{op}(field)".format(op=op) + shorthand = f"{op}(field)" dct = parse_shorthand( shorthand, parse_aggregates=False, diff --git a/tests/utils/test_html.py b/tests/utils/test_html.py index ccd41745c..1ce63445f 100644 --- a/tests/utils/test_html.py +++ b/tests/utils/test_html.py @@ -47,6 +47,6 @@ def test_spec_to_html(requirejs, fullhtml, spec): else: assert "require(" not in html - assert "vega-lite@{}".format(vegalite_version) in html - assert "vega@{}".format(vega_version) in html - assert "vega-embed@{}".format(vegaembed_version) in html + assert f"vega-lite@{vegalite_version}" in html + assert f"vega@{vega_version}" in html + assert f"vega-embed@{vegaembed_version}" in html diff --git a/tests/utils/test_mimebundle.py b/tests/utils/test_mimebundle.py index 6e2965647..692412f99 100644 --- a/tests/utils/test_mimebundle.py +++ b/tests/utils/test_mimebundle.py @@ -5,7 +5,7 @@ from altair.utils.mimebundle import spec_to_mimebundle try: - import vl_convert as vlc # noqa: F401 + import vl_convert as vlc except ImportError: vlc = None diff --git a/tests/utils/test_schemapi.py b/tests/utils/test_schemapi.py index e1d0e5cc3..fd1ef0122 100644 --- a/tests/utils/test_schemapi.py +++ b/tests/utils/test_schemapi.py @@ -146,18 +146,14 @@ class InvalidProperties(_TestSchema): class Draft4Schema(_TestSchema): _schema = { **_validation_selection_schema, - **{ - "$schema": "http://json-schema.org/draft-04/schema#", - }, + "$schema": "http://json-schema.org/draft-04/schema#", } class Draft6Schema(_TestSchema): _schema = { **_validation_selection_schema, - **{ - "$schema": "http://json-schema.org/draft-06/schema#", - }, + "$schema": "http://json-schema.org/draft-06/schema#", } diff --git a/tests/utils/test_utils.py b/tests/utils/test_utils.py index 9cf5bda37..76871e3b0 100644 --- a/tests/utils/test_utils.py +++ b/tests/utils/test_utils.py @@ -197,7 +197,7 @@ def test_sanitize_dataframe_infs(): @pytest.mark.skipif( not hasattr(pd, "Int64Dtype"), - reason="Nullable integers not supported in pandas v{}".format(pd.__version__), + reason=f"Nullable integers not supported in pandas v{pd.__version__}", ) def test_sanitize_nullable_integers(): df = pd.DataFrame( @@ -227,7 +227,7 @@ def test_sanitize_nullable_integers(): @pytest.mark.skipif( not hasattr(pd, "StringDtype"), - reason="dedicated String dtype not supported in pandas v{}".format(pd.__version__), + reason=f"dedicated String dtype not supported in pandas v{pd.__version__}", ) def test_sanitize_string_dtype(): df = pd.DataFrame( @@ -253,7 +253,7 @@ def test_sanitize_string_dtype(): @pytest.mark.skipif( not hasattr(pd, "BooleanDtype"), - reason="Nullable boolean dtype not supported in pandas v{}".format(pd.__version__), + reason=f"Nullable boolean dtype not supported in pandas v{pd.__version__}", ) def test_sanitize_boolean_dtype(): df = pd.DataFrame( diff --git a/tests/vegalite/test_common.py b/tests/vegalite/test_common.py index 0cc216e68..2c043f47f 100644 --- a/tests/vegalite/test_common.py +++ b/tests/vegalite/test_common.py @@ -93,7 +93,7 @@ def test_max_rows(alt): with alt.data_transformers.enable("default"): basic_chart.to_dict() # this should not fail - - with alt.data_transformers.enable("default", max_rows=5): - with pytest.raises(alt.MaxRowsError): - basic_chart.to_dict() # this should not fail + with alt.data_transformers.enable("default", max_rows=5), pytest.raises( + alt.MaxRowsError + ): + basic_chart.to_dict() # this should not fail diff --git a/tests/vegalite/v5/test_alias.py b/tests/vegalite/v5/test_alias.py index f39ab57cc..eab6e10c9 100644 --- a/tests/vegalite/v5/test_alias.py +++ b/tests/vegalite/v5/test_alias.py @@ -10,7 +10,8 @@ def test_aliases(): try: getattr(alt, alias) except AttributeError as err: - raise AssertionError(f"cannot resolve '{alias}':, {err}") from err + msg = f"cannot resolve '{alias}':, {err}" + raise AssertionError(msg) from err # this test fails if the alias match a colliding name in core with pytest.raises(AttributeError): diff --git a/tests/vegalite/v5/test_api.py b/tests/vegalite/v5/test_api.py index bda7dd9bb..82a0c758f 100644 --- a/tests/vegalite/v5/test_api.py +++ b/tests/vegalite/v5/test_api.py @@ -14,7 +14,7 @@ import altair.vegalite.v5 as alt try: - import vl_convert as vlc # noqa: F401 + import vl_convert as vlc except ImportError: vlc = None @@ -60,7 +60,8 @@ def _make_chart_type(chart_type): elif chart_type == "chart": return base else: - raise ValueError("chart_type='{}' is not recognized".format(chart_type)) + msg = f"chart_type='{chart_type}' is not recognized" + raise ValueError(msg) @pytest.fixture @@ -278,7 +279,7 @@ def test_selection_expression(): assert selection.value.to_dict() == {"expr": f"{selection.name}.value"} assert isinstance(selection["value"], alt.expr.Expression) - assert selection["value"].to_dict() == "{0}['value']".format(selection.name) + assert selection["value"].to_dict() == f"{selection.name}['value']" magic_attr = "__magic__" with pytest.raises(AttributeError): @@ -295,18 +296,17 @@ def test_save(format, engine, basic_chart): out = io.StringIO() mode = "r" - if format in ["svg", "png", "pdf", "bogus"]: - if engine == "vl-convert": - if format == "bogus": - with pytest.raises(ValueError) as err: - basic_chart.save(out, format=format, engine=engine) - assert f"Unsupported format: '{format}'" in str(err.value) - return - elif vlc is None: - with pytest.raises(ValueError) as err: - basic_chart.save(out, format=format, engine=engine) - assert "vl-convert-python" in str(err.value) - return + if format in ["svg", "png", "pdf", "bogus"] and engine == "vl-convert": + if format == "bogus": + with pytest.raises(ValueError) as err: + basic_chart.save(out, format=format, engine=engine) + assert f"Unsupported format: '{format}'" in str(err.value) + return + elif vlc is None: + with pytest.raises(ValueError) as err: + basic_chart.save(out, format=format, engine=engine) + assert "vl-convert-python" in str(err.value) + return basic_chart.save(out, format=format, engine=engine) out.seek(0) @@ -480,9 +480,9 @@ def test_selection(): assert isinstance(single & multi, alt.SelectionPredicateComposition) assert isinstance(single | multi, alt.SelectionPredicateComposition) assert isinstance(~single, alt.SelectionPredicateComposition) - assert "and" in (single & multi).to_dict().keys() - assert "or" in (single | multi).to_dict().keys() - assert "not" in (~single).to_dict().keys() + assert "and" in (single & multi).to_dict() + assert "or" in (single | multi).to_dict() + assert "not" in (~single).to_dict() # test that default names increment (regression for #1454) sel1 = alt.selection_point() @@ -829,7 +829,7 @@ def test_consolidate_InlineData(): with alt.data_transformers.enable(consolidate_datasets=True): dct = chart.to_dict() assert dct["data"]["format"] == data.format - assert list(dct["datasets"].values())[0] == data.values + assert next(iter(dct["datasets"].values())) == data.values data = alt.InlineData(values=[], name="runtime_data") chart = alt.Chart(data).mark_point() diff --git a/tests/vegalite/v5/test_data.py b/tests/vegalite/v5/test_data.py index 2cf48e833..7fb9d963a 100644 --- a/tests/vegalite/v5/test_data.py +++ b/tests/vegalite/v5/test_data.py @@ -22,10 +22,11 @@ def test_disable_max_rows(sample_data): alt.data_transformers.get()(sample_data) try: - with alt.data_transformers.enable("json"): - # Ensure that there is no TypeError for non-max_rows transformers. - with alt.data_transformers.disable_max_rows(): - jsonfile = alt.data_transformers.get()(sample_data) + # Ensure that there is no TypeError for non-max_rows transformers. + with alt.data_transformers.enable( + "json" + ), alt.data_transformers.disable_max_rows(): + jsonfile = alt.data_transformers.get()(sample_data) except TypeError: jsonfile = {} finally: diff --git a/tests/vegalite/v5/test_display.py b/tests/vegalite/v5/test_display.py index 2a3fa3a35..b3c5660da 100644 --- a/tests/vegalite/v5/test_display.py +++ b/tests/vegalite/v5/test_display.py @@ -32,11 +32,10 @@ def test_check_renderer_options(): display(None) # check that an error is appropriately raised if the test fails - with pytest.raises(AssertionError): - with check_render_options(foo="bar"): - from IPython.display import display + with pytest.raises(AssertionError), check_render_options(foo="bar"): + from IPython.display import display - display(None) + display(None) def test_display_options(): diff --git a/tests/vegalite/v5/test_params.py b/tests/vegalite/v5/test_params.py index 91d299283..90dffe9f2 100644 --- a/tests/vegalite/v5/test_params.py +++ b/tests/vegalite/v5/test_params.py @@ -105,7 +105,7 @@ def test_parameter_naming(): assert prm.param.name == "some_name" # test automatic naming which has the form such as param_5 - prm0, prm1, prm2 = [alt.param() for _ in range(3)] + prm0, prm1, prm2 = (alt.param() for _ in range(3)) res = re.match("param_([0-9]+)", prm0.param.name) diff --git a/tests/vegalite/v5/test_renderers.py b/tests/vegalite/v5/test_renderers.py index b06706e34..4d4ba0c98 100644 --- a/tests/vegalite/v5/test_renderers.py +++ b/tests/vegalite/v5/test_renderers.py @@ -8,12 +8,12 @@ try: - import vl_convert as vlc # noqa: F401 + import vl_convert as vlc except ImportError: vlc = None try: - import anywidget # noqa: F401 + import anywidget except ImportError: anywidget = None # type: ignore diff --git a/tools/generate_api_docs.py b/tools/generate_api_docs.py index d6bc0482e..423652f48 100644 --- a/tools/generate_api_docs.py +++ b/tools/generate_api_docs.py @@ -78,15 +78,14 @@ def iter_objects( ) -> Iterator[str]: for name in dir(mod): obj = getattr(mod, name) - if ignore_private: - if name.startswith("_"): - continue - if restrict_to_type is not None: - if not isinstance(obj, restrict_to_type): - continue - if restrict_to_subclass is not None: - if not (isinstance(obj, type) and issubclass(obj, restrict_to_subclass)): - continue + if ignore_private and name.startswith("_"): + continue + if restrict_to_type is not None and not isinstance(obj, restrict_to_type): + continue + if restrict_to_subclass is not None and ( + not (isinstance(obj, type) and issubclass(obj, restrict_to_subclass)) + ): + continue yield name @@ -120,7 +119,7 @@ def lowlevel_wrappers() -> List[str]: def write_api_file() -> None: - print("Updating API docs\n ->{}".format(API_FILENAME)) + print(f"Updating API docs\n ->{API_FILENAME}") sep = "\n " with open(API_FILENAME, "w") as f: f.write( diff --git a/tools/generate_schema_wrapper.py b/tools/generate_schema_wrapper.py index 40958c202..8abe847d6 100644 --- a/tools/generate_schema_wrapper.py +++ b/tools/generate_schema_wrapper.py @@ -347,7 +347,8 @@ def download_schemafile( if not skip_download: request.urlretrieve(url, filename) elif not os.path.exists(filename): - raise ValueError("Cannot skip download: {} does not exist".format(filename)) + msg = f"Cannot skip download: {filename} does not exist" + raise ValueError(msg) return filename @@ -400,11 +401,12 @@ def copy_schemapi_util() -> None: join(dirname(__file__), "..", "altair", "utils", "schemapi.py") ) - print("Copying\n {}\n -> {}".format(source_path, destination_path)) - with open(source_path, "r", encoding="utf8") as source: - with open(destination_path, "w", encoding="utf8") as dest: - dest.write(HEADER) - dest.writelines(source.readlines()) + print(f"Copying\n {source_path}\n -> {destination_path}") + with open(source_path, encoding="utf8") as source, open( + destination_path, "w", encoding="utf8" + ) as dest: + dest.write(HEADER) + dest.writelines(source.readlines()) def recursive_dict_update(schema: dict, root: dict, def_dict: dict) -> None: @@ -413,7 +415,7 @@ def recursive_dict_update(schema: dict, root: dict, def_dict: dict) -> None: if "properties" in next_schema: definition = schema["$ref"] properties = next_schema["properties"] - for k in def_dict.keys(): + for k in def_dict: if k in properties: def_dict[k] = definition else: @@ -430,7 +432,8 @@ def get_field_datum_value_defs(propschema: SchemaInfo, root: dict) -> dict: if "field" in schema["properties"]: def_dict["field"] = propschema.ref else: - raise ValueError("Unexpected schema structure") + msg = "Unexpected schema structure" + raise ValueError(msg) else: recursive_dict_update(schema, root, def_dict) @@ -486,7 +489,7 @@ def generate_vegalite_schema_wrapper(schema_file: str) -> str: schemarepr=defschema_repr, rootschema=rootschema, basename=basename, - rootschemarepr=CodeSnippet("{}._rootschema".format(basename)), + rootschemarepr=CodeSnippet(f"{basename}._rootschema"), ) graph: Dict[str, List[str]] = {} @@ -518,7 +521,7 @@ def generate_vegalite_schema_wrapper(schema_file: str) -> str: contents = [ HEADER, - "__all__ = {}".format(all_), + f"__all__ = {all_}", "from typing import Any, Literal, Union, Protocol, Sequence, List", "from typing import Dict as TypingDict", "from typing import Generator as TypingGenerator" "", @@ -532,7 +535,7 @@ def generate_vegalite_schema_wrapper(schema_file: str) -> str: "Root", schema=rootschema, basename=basename, - schemarepr=CodeSnippet("{}._rootschema".format(basename)), + schemarepr=CodeSnippet(f"{basename}._rootschema"), ) ) @@ -683,8 +686,7 @@ def generate_vegalite_mark_mixin( for p in (sorted(arg_info.required) + sorted(arg_info.kwds)) ] dict_args = [ - "{0}={0}".format(p) - for p in (sorted(arg_info.required) + sorted(arg_info.kwds)) + f"{p}={p}" for p in (sorted(arg_info.required) + sorted(arg_info.kwds)) ] if arg_info.additional or arg_info.invalid_kwds: @@ -744,26 +746,26 @@ def vegalite_main(skip_download: bool = False) -> None: # Generate __init__.py file outfile = join(schemapath, "__init__.py") - print("Writing {}".format(outfile)) + print(f"Writing {outfile}") content = [ "# ruff: noqa\n", "from .core import *\nfrom .channels import *\n", f"SCHEMA_VERSION = '{version}'\n", - "SCHEMA_URL = {!r}\n" "".format(schema_url(version)), + f"SCHEMA_URL = {schema_url(version)!r}\n" "", ] with open(outfile, "w", encoding="utf8") as f: f.write(ruff_format_str(content)) # Generate the core schema wrappers outfile = join(schemapath, "core.py") - print("Generating\n {}\n ->{}".format(schemafile, outfile)) + print(f"Generating\n {schemafile}\n ->{outfile}") file_contents = generate_vegalite_schema_wrapper(schemafile) with open(outfile, "w", encoding="utf8") as f: f.write(ruff_format_str(file_contents)) # Generate the channel wrappers outfile = join(schemapath, "channels.py") - print("Generating\n {}\n ->{}".format(schemafile, outfile)) + print(f"Generating\n {schemafile}\n ->{outfile}") code = generate_vegalite_channel_wrappers(schemafile, version=version) with open(outfile, "w", encoding="utf8") as f: f.write(ruff_format_str(code)) @@ -771,7 +773,7 @@ def vegalite_main(skip_download: bool = False) -> None: # generate the mark mixin markdefs = {k: k + "Def" for k in ["Mark", "BoxPlot", "ErrorBar", "ErrorBand"]} outfile = join(schemapath, "mixins.py") - print("Generating\n {}\n ->{}".format(schemafile, outfile)) + print(f"Generating\n {schemafile}\n ->{outfile}") mark_imports, mark_mixin = generate_vegalite_mark_mixin(schemafile, markdefs) config_imports, config_mixin = generate_vegalite_config_mixin(schemafile) try_except_imports = [ @@ -836,9 +838,7 @@ def _create_encode_signature( signature_args.append(f"{channel}: Union[{', '.join(union_types)}] = Undefined") docstring_parameters.append(f"{channel} : {', '.join(docstring_union_types)}") - docstring_parameters.append( - " {}".format(process_description(info.deep_description)) - ) + docstring_parameters.append(f" {process_description(info.deep_description)}") if len(docstring_parameters) > 1: docstring_parameters += [""] docstring = indent_docstring( @@ -862,8 +862,8 @@ def main() -> None: # The modules below are imported after the generation of the new schema files # as these modules import Altair. This allows them to use the new changes - import generate_api_docs # noqa: E402 - import update_init_file # noqa: E402 + import generate_api_docs + import update_init_file generate_api_docs.write_api_file() update_init_file.update__all__variable() diff --git a/tools/schemapi/codegen.py b/tools/schemapi/codegen.py index 63e89d647..6c783a42d 100644 --- a/tools/schemapi/codegen.py +++ b/tools/schemapi/codegen.py @@ -69,7 +69,8 @@ def get_args(info: SchemaInfo) -> ArgInfo: additional = True # additional = info.additionalProperties or info.patternProperties else: - raise ValueError("Schema object not understood") + msg = "Schema object not understood" + raise ValueError(msg) return ArgInfo( nonkeyword=nonkeyword, @@ -196,7 +197,7 @@ def arg_info(self) -> ArgInfo: def docstring(self, indent: int = 0) -> str: info = self.info doc = [ - "{} schema wrapper".format(self.classname), + f"{self.classname} schema wrapper", ] if info.description: doc += self._process_description( # remove condition description @@ -224,9 +225,7 @@ def docstring(self, indent: int = 0) -> str: altair_classes_prefix=self.altair_classes_prefix, ), ), - " {}".format( - self._process_description(propinfo.deep_description) - ), + f" {self._process_description(propinfo.deep_description)}", ] if len(doc) > 1: doc += [""] @@ -284,7 +283,7 @@ def init_args( for p in sorted(arg_info.required) + sorted(arg_info.kwds) ) super_args.extend( - "{0}={0}".format(p) + f"{p}={p}" for p in sorted(nodefault) + sorted(arg_info.required) + sorted(arg_info.kwds) diff --git a/tools/schemapi/schemapi.py b/tools/schemapi/schemapi.py index ce1268c04..7b4460b6a 100644 --- a/tools/schemapi/schemapi.py +++ b/tools/schemapi/schemapi.py @@ -134,7 +134,7 @@ def validate_jsonschema( # Nothing special about this first error but we need to choose one # which can be raised - main_error = list(grouped_errors.values())[0][0] + main_error = next(iter(grouped_errors.values()))[0] # All errors are then attached as a new attribute to ValidationError so that # they can be used in SchemaValidationError to craft a more helpful # error message. Setting a new attribute like this is not ideal as @@ -354,7 +354,7 @@ def _deduplicate_errors( } deduplicated_errors: ValidationErrorList = [] for validator, errors in errors_by_validator.items(): - deduplication_func = deduplication_functions.get(validator, None) + deduplication_func = deduplication_functions.get(validator) if deduplication_func is not None: errors = deduplication_func(errors) deduplicated_errors.extend(_deduplicate_by_message(errors)) @@ -750,11 +750,12 @@ def __init__(self, *args: Any, **kwds: Any) -> None: # - a single arg with no kwds, for, e.g. {'type': 'string'} # - zero args with zero or more kwds for {'type': 'object'} if self._schema is None: - raise ValueError( - "Cannot instantiate object of type {}: " + msg = ( + f"Cannot instantiate object of type {self.__class__}: " "_schema class attribute is not defined." - "".format(self.__class__) + "" ) + raise ValueError(msg) if kwds: assert len(args) == 0 @@ -850,9 +851,9 @@ def __getattr__(self, attr): return self._kwds[attr] else: try: - _getattr = super(SchemaBase, self).__getattr__ + _getattr = super().__getattr__ except AttributeError: - _getattr = super(SchemaBase, self).__getattribute__ + _getattr = super().__getattribute__ return _getattr(attr) def __setattr__(self, item, val): @@ -867,16 +868,16 @@ def __setitem__(self, item, val): def __repr__(self): if self._kwds: args = ( - "{}: {!r}".format(key, val) + f"{key}: {val!r}" for key, val in sorted(self._kwds.items()) if val is not Undefined ) args = "\n" + ",\n".join(args) - return "{0}({{{1}\n}})".format( + return "{}({{{}\n}})".format( self.__class__.__name__, args.replace("\n", "\n ") ) else: - return "{}({!r})".format(self.__class__.__name__, self._args[0]) + return f"{self.__class__.__name__}({self._args[0]!r})" def __eq__(self, other): return ( @@ -951,7 +952,7 @@ def to_dict( } ) kwds = { - k: v for k, v in kwds.items() if k not in list(ignore) + ["shorthand"] + k: v for k, v in kwds.items() if k not in [*list(ignore), "shorthand"] } if "mark" in kwds and isinstance(kwds["mark"], str): kwds["mark"] = {"type": kwds["mark"]} @@ -960,10 +961,11 @@ def to_dict( context=context, ) else: - raise ValueError( - "{} instance has both a value and properties : " - "cannot serialize to dict".format(self.__class__) + msg = ( + f"{self.__class__} instance has both a value and properties : " + "cannot serialize to dict" ) + raise ValueError(msg) if validate: try: self.validate(result) @@ -1210,7 +1212,8 @@ def from_dict( ) -> Any: """Construct an object from a dict representation""" if (schema is None) == (cls is None): - raise ValueError("Must provide either cls or schema, but not both.") + msg = "Must provide either cls or schema, but not both." + raise ValueError(msg) if schema is None: # Can ignore type errors as cls is not None in case schema is schema = cls._schema # type: ignore[union-attr] @@ -1232,10 +1235,7 @@ def from_dict( # Our class dict is constructed breadth-first from top to bottom, # so the first class that matches is the most general match. matches = self.class_dict[self.hash_schema(schema)] - if matches: - cls = matches[0] - else: - cls = default_class + cls = matches[0] if matches else default_class schema = _resolve_references(schema, rootschema) if "anyOf" in schema or "oneOf" in schema: diff --git a/tools/schemapi/utils.py b/tools/schemapi/utils.py index 2754f19df..061a26e8b 100644 --- a/tools/schemapi/utils.py +++ b/tools/schemapi/utils.py @@ -122,7 +122,7 @@ def __getattr__(self, attr): try: return self[attr] except KeyError: - return super(SchemaProperties, self).__getattr__(attr) + return super().__getattr__(attr) def __getitem__(self, attr): dct = self._properties[attr] @@ -169,7 +169,7 @@ def __repr__(self) -> str: rval = "{...}" elif key == "properties": rval = "{\n " + "\n ".join(sorted(map(repr, val))) + "\n }" - keys.append('"{}": {}'.format(key, rval)) + keys.append(f'"{key}": {rval}') return "SchemaInfo({\n " + "\n ".join(keys) + "\n})" @property @@ -283,7 +283,8 @@ def get_python_type_representation( elif self.type in jsonschema_to_python_types: type_representations.append(jsonschema_to_python_types[self.type]) else: - raise ValueError("No Python type representation available for this schema") + msg = "No Python type representation available for this schema" + raise ValueError(msg) # Shorter types are usually the more relevant ones, e.g. `str` instead # of `SchemaBase`. Output order from set is non-deterministic -> If @@ -435,7 +436,8 @@ def is_object(self) -> bool: ): return True else: - raise ValueError("Unclear whether schema.is_object() is True") + msg = "Unclear whether schema.is_object() is True" + raise ValueError(msg) def is_value(self) -> bool: return not self.is_object() @@ -546,8 +548,7 @@ def flatten(container: Iterable) -> Iterable: """ for i in container: if isinstance(i, (list, tuple)): - for j in flatten(i): - yield j + yield from flatten(i) else: yield i diff --git a/tools/update_init_file.py b/tools/update_init_file.py index e4fa65a86..116f127d0 100644 --- a/tools/update_init_file.py +++ b/tools/update_init_file.py @@ -48,7 +48,7 @@ def update__all__variable() -> None: """ # Read existing file content init_path = alt.__file__ - with open(init_path, "r") as f: + with open(init_path) as f: lines = f.readlines() lines = [line.strip("\n") for line in lines]