Skip to content

Commit

Permalink
Ignore copyrights in notebooks
Browse files Browse the repository at this point in the history
  • Loading branch information
samet-akcay committed Aug 28, 2024
1 parent 1968dd4 commit ce4d233
Show file tree
Hide file tree
Showing 13 changed files with 1,222 additions and 1,219 deletions.
2 changes: 1 addition & 1 deletion notebooks/000_getting_started/001_getting_started.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -168,7 +168,7 @@
"from anomalib import TaskType\n",
"from anomalib.data import MVTec\n",
"from anomalib.data.utils import read_image\n",
"from anomalib.deploy import OpenVINOInferencer, ExportType\n",
"from anomalib.deploy import ExportType, OpenVINOInferencer\n",
"from anomalib.engine import Engine\n",
"from anomalib.models import Padim"
]
Expand Down
1,664 changes: 832 additions & 832 deletions notebooks/200_models/201_fastflow.ipynb

Large diffs are not rendered by default.

685 changes: 342 additions & 343 deletions notebooks/400_openvino/401_nncf.ipynb

Large diffs are not rendered by default.

Original file line number Diff line number Diff line change
Expand Up @@ -141,8 +141,8 @@
}
],
"source": [
"from anomalib.data import Folder\n",
"from anomalib import TaskType\n",
"from anomalib.data import Folder\n",
"\n",
"datamodule = Folder(\n",
" name=\"cubes\",\n",
Expand Down Expand Up @@ -646,9 +646,10 @@
}
],
"source": [
"from anomalib.utils.visualization.image import ImageVisualizer, VisualizationMode\n",
"from PIL import Image\n",
"\n",
"from anomalib.utils.visualization.image import ImageVisualizer, VisualizationMode\n",
"\n",
"visualizer = ImageVisualizer(mode=VisualizationMode.FULL, task=TaskType.CLASSIFICATION)\n",
"output_image = visualizer.visualize_image(predictions)\n",
"Image.fromarray(output_image)"
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -58,18 +58,20 @@
"\n",
"# Anomalib imports\n",
"from __future__ import annotations\n",
"from typing import TYPE_CHECKING\n",
"\n",
"import sys\n",
"import time # time library\n",
"from datetime import datetime\n",
"from pathlib import Path\n",
"from threading import Thread\n",
"from typing import TYPE_CHECKING\n",
"\n",
"if TYPE_CHECKING:\n",
" import numpy as np\n",
"\n",
"# importing required libraries\n",
"import cv2 # OpenCV library\n",
"\n",
"from anomalib.deploy import OpenVINOInferencer"
]
},
Expand Down
16 changes: 9 additions & 7 deletions notebooks/600_loggers/601_mlflow_logging.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -129,7 +129,7 @@
"metadata": {},
"outputs": [],
"source": [
"#!mlflow server"
"# !mlflow server"
]
},
{
Expand Down Expand Up @@ -175,15 +175,17 @@
"metadata": {},
"outputs": [],
"source": [
"from anomalib.data import MVTec\n",
"import warnings\n",
"\n",
"from lightning.pytorch.callbacks import EarlyStopping\n",
"\n",
"from anomalib import TaskType\n",
"from anomalib.callbacks.checkpoint import ModelCheckpoint\n",
"from lightning.pytorch.callbacks import EarlyStopping\n",
"from anomalib.models import Fastflow\n",
"from anomalib.loggers import AnomalibMLFlowLogger\n",
"from anomalib.data import MVTec\n",
"from anomalib.engine import Engine\n",
"from anomalib.loggers import AnomalibMLFlowLogger\n",
"from anomalib.models import Fastflow\n",
"\n",
"import warnings\n",
"warnings.filterwarnings(\"ignore\")"
]
},
Expand Down Expand Up @@ -1044,7 +1046,7 @@
" pixel_metrics=\"AUROC\",\n",
" accelerator=\"auto\",\n",
" devices=1,\n",
" logger=mlflow_logger, # Logger is set here\n",
" logger=mlflow_logger, # Logger is set here\n",
" **kwargs,\n",
")"
]
Expand Down
7 changes: 6 additions & 1 deletion pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -162,6 +162,9 @@ lint.ignore = [
"PLR0915", # Too many statements

# NOTE: Disable the following rules for now.
"A004", # import is shadowing a Python built-in
"A005", # Module is shadowing a Python built-in
"B909", # Mutation to loop iterable during iteration
"PLR6301", # could be a function, class method or static method
"PLW1514", # Add explicit encoding argument
"PLR6201", # Convert to set
Expand All @@ -182,7 +185,6 @@ lint.ignore = [

"RUF021", # Parenthesize the `and` subexpression
"RUF022", # Apply an isort-style sorting to '__all__'
"RUF025", # Replace with `dict.fromkeys(iterable, value)`)
"S404", # `subprocess` module is possibly insecure
# End of disable rules

Expand Down Expand Up @@ -259,6 +261,9 @@ notice-rgx = """
# SPDX-License-Identifier: Apache-2\\.0
"""

[tool.ruff.lint.per-file-ignores]
"notebooks/**/*" = ["CPY001"]

# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# MYPY CONFIGURATION. #
[tool.mypy]
Expand Down
2 changes: 1 addition & 1 deletion src/anomalib/callbacks/timer.py
Original file line number Diff line number Diff line change
Expand Up @@ -105,5 +105,5 @@ def on_test_end(self, trainer: Trainer, pl_module: LightningModule) -> None:
else:
test_data_loader = trainer.test_dataloaders[0]
output += f"(batch_size={test_data_loader.batch_size})"
output += f" : {self.num_images/testing_time} FPS"
output += f" : {self.num_images / testing_time} FPS"
logger.info(output)
16 changes: 7 additions & 9 deletions src/anomalib/models/video/ai_vad/clip/clip.py
Original file line number Diff line number Diff line change
Expand Up @@ -104,15 +104,13 @@ def _convert_image_to_rgb(image):


def _transform(n_px):
return Compose(
[
Resize(n_px, interpolation=BICUBIC),
CenterCrop(n_px),
_convert_image_to_rgb,
ToTensor(),
Normalize((0.48145466, 0.4578275, 0.40821073), (0.26862954, 0.26130258, 0.27577711)),
]
)
return Compose([
Resize(n_px, interpolation=BICUBIC),
CenterCrop(n_px),
_convert_image_to_rgb,
ToTensor(),
Normalize((0.48145466, 0.4578275, 0.40821073), (0.26862954, 0.26130258, 0.27577711)),
])


def available_models() -> List[str]:
Expand Down
30 changes: 13 additions & 17 deletions src/anomalib/models/video/ai_vad/clip/model.py
Original file line number Diff line number Diff line change
Expand Up @@ -45,13 +45,11 @@ def __init__(self, inplanes, planes, stride=1):
if stride > 1 or inplanes != planes * Bottleneck.expansion:
# downsampling layer is prepended with an avgpool, and the subsequent convolution has stride 1
self.downsample = nn.Sequential(
OrderedDict(
[
("-1", nn.AvgPool2d(stride)),
("0", nn.Conv2d(inplanes, planes * self.expansion, 1, stride=1, bias=False)),
("1", nn.BatchNorm2d(planes * self.expansion)),
]
)
OrderedDict([
("-1", nn.AvgPool2d(stride)),
("0", nn.Conv2d(inplanes, planes * self.expansion, 1, stride=1, bias=False)),
("1", nn.BatchNorm2d(planes * self.expansion)),
])
)

def forward(self, x: torch.Tensor):
Expand Down Expand Up @@ -192,13 +190,11 @@ def __init__(self, d_model: int, n_head: int, attn_mask: torch.Tensor = None):
self.attn = nn.MultiheadAttention(d_model, n_head)
self.ln_1 = LayerNorm(d_model)
self.mlp = nn.Sequential(
OrderedDict(
[
("c_fc", nn.Linear(d_model, d_model * 4)),
("gelu", QuickGELU()),
("c_proj", nn.Linear(d_model * 4, d_model)),
]
)
OrderedDict([
("c_fc", nn.Linear(d_model, d_model * 4)),
("gelu", QuickGELU()),
("c_proj", nn.Linear(d_model * 4, d_model)),
])
)
self.ln_2 = LayerNorm(d_model)
self.attn_mask = attn_mask
Expand Down Expand Up @@ -430,9 +426,9 @@ def build_model(state_dict: dict):

if vit:
vision_width = state_dict["visual.conv1.weight"].shape[0]
vision_layers = len(
[k for k in state_dict.keys() if k.startswith("visual.") and k.endswith(".attn.in_proj_weight")]
)
vision_layers = len([
k for k in state_dict.keys() if k.startswith("visual.") and k.endswith(".attn.in_proj_weight")
])
vision_patch_size = state_dict["visual.conv1.weight"].shape[-1]
grid_size = round((state_dict["visual.positional_embedding"].shape[0] - 1) ** 0.5)
image_resolution = vision_patch_size * grid_size
Expand Down
2 changes: 1 addition & 1 deletion src/anomalib/pipelines/components/runners/parallel.py
Original file line number Diff line number Diff line change
Expand Up @@ -60,7 +60,7 @@ def run(self, args: dict, prev_stage_results: PREV_STAGE_RESULT = None) -> GATHE
"""Run the job in parallel."""
self.task_id = self.progress.add_task(self.generator.job_class.name, total=None)
self.progress.start()
self.processes = {i: None for i in range(self.n_jobs)}
self.processes = dict.fromkeys(range(self.n_jobs))

Check warning on line 63 in src/anomalib/pipelines/components/runners/parallel.py

View check run for this annotation

Codecov / codecov/patch

src/anomalib/pipelines/components/runners/parallel.py#L63

Added line #L63 was not covered by tests

with ProcessPoolExecutor(max_workers=self.n_jobs, mp_context=multiprocessing.get_context("spawn")) as executor:
for job in self.generator(args, prev_stage_results):
Expand Down
2 changes: 1 addition & 1 deletion src/anomalib/utils/post_processing.py
Original file line number Diff line number Diff line change
Expand Up @@ -35,7 +35,7 @@ def add_label(
img_height, img_width, _ = image.shape

font = cv2.FONT_HERSHEY_PLAIN
text = label_name if confidence is None else f"{label_name} ({confidence*100:.0f}%)"
text = label_name if confidence is None else f"{label_name} ({confidence * 100:.0f}%)"

# get font sizing
font_scale = min(img_width, img_height) * font_scale
Expand Down
6 changes: 3 additions & 3 deletions tests/helpers/data.py
Original file line number Diff line number Diff line change
Expand Up @@ -503,7 +503,7 @@ def _generate_dummy_avenue_dataset(
train_path = self.dataset_root / train_dir
train_path.mkdir(exist_ok=True, parents=True)
for clip_idx in range(self.num_train):
clip_path = train_path / f"{clip_idx+1:02}.avi"
clip_path = train_path / f"{clip_idx + 1:02}.avi"
frames, _ = self.video_generator.generate_video(length=32, first_label=LabelName.NORMAL, p_state_switch=0)
fourcc = cv2.VideoWriter_fourcc("F", "M", "P", "4")
writer = cv2.VideoWriter(str(clip_path), fourcc, 30, self.frame_shape)
Expand All @@ -517,8 +517,8 @@ def _generate_dummy_avenue_dataset(
gt_path = self.dataset_root / ground_truth_dir / "testing_label_mask"

for clip_idx in range(self.num_test):
clip_path = test_path / f"{clip_idx+1:02}.avi"
mask_path = gt_path / f"{clip_idx+1}_label"
clip_path = test_path / f"{clip_idx + 1:02}.avi"
mask_path = gt_path / f"{clip_idx + 1}_label"
mask_path.mkdir(exist_ok=True, parents=True)
frames, masks = self.video_generator.generate_video(length=32, p_state_switch=0.2)
fourcc = cv2.VideoWriter_fourcc("F", "M", "P", "4")
Expand Down

0 comments on commit ce4d233

Please sign in to comment.