Skip to content

Commit

Permalink
Merge branch 'main' into efficientad_quantile
Browse files Browse the repository at this point in the history
  • Loading branch information
samet-akcay authored Jul 20, 2023
2 parents f651853 + d3ebced commit 2055db7
Show file tree
Hide file tree
Showing 28 changed files with 186 additions and 187 deletions.
1 change: 0 additions & 1 deletion CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,6 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/).

### Changed

- Add default values to algorithms, based on the original papers.
- Improve default settings of EfficientAD

### Deprecated
Expand Down
14 changes: 7 additions & 7 deletions src/anomalib/models/cfa/lightning_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -33,19 +33,19 @@ class Cfa(AnomalyModule):
"""CFA: Coupled-hypersphere-based Feature Adaptation for Target-Oriented Anomaly Localization.
Args:
input_size (tuple[int, int], optional): Size of the model input. Defaults to (224, 224).
backbone (str, optional): Backbone CNN network. Defaults to "wide_resnet50_2".
input_size (tuple[int, int]): Size of the model input.
backbone (str): Backbone CNN network
gamma_c (int, optional): gamma_c value from the paper. Defaults to 1.
gamma_d (int, optional): gamma_d value from the paper. Defaults to 1.
num_nearest_neighbors (int, optional): Number of nearest neighbors. Defaults to 3.
num_hard_negative_features (int, optional): Number of hard negative features. Defaults to 3.
radius (float, optional): Radius of the hypersphere to search the soft boundary. Defaults to 1e-5.
num_nearest_neighbors (int): Number of nearest neighbors.
num_hard_negative_features (int): Number of hard negative features.
radius (float): Radius of the hypersphere to search the soft boundary.
"""

def __init__(
self,
input_size: tuple[int, int] = (224, 224),
backbone: str = "wide_resnet50_2",
input_size: tuple[int, int],
backbone: str,
gamma_c: int = 1,
gamma_d: int = 1,
num_nearest_neighbors: int = 3,
Expand Down
28 changes: 14 additions & 14 deletions src/anomalib/models/cfa/torch_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -82,24 +82,24 @@ class CfaModel(DynamicBufferModule):
"""Torch implementation of the CFA Model.
Args:
input_size: (tuple[int, int], optional): Input size of the image tensor. Defaults to (224, 224).
backbone (str, optional): Backbone CNN network. Defaults to "wide_resnet50_2".
gamma_c (int, optional): gamma_c parameter from the paper. Defaults to 1.
gamma_d (int, optional): gamma_d parameter from the paper. Defaults to 1.
num_nearest_neighbors (int, optional): Number of nearest neighbors. Defaults to 3.
num_hard_negative_features (int, optional): Number of hard negative features. Defaults to 3.
radius (float, optional): Radius of the hypersphere to search the soft boundary. Defaults to 1e-5.
input_size: (tuple[int, int]): Input size of the image tensor.
backbone (str): Backbone CNN network.
gamma_c (int): gamma_c parameter from the paper.
gamma_d (int): gamma_d parameter from the paper.
num_nearest_neighbors (int): Number of nearest neighbors.
num_hard_negative_features (int): Number of hard negative features.
radius (float): Radius of the hypersphere to search the soft boundary.
"""

def __init__(
self,
input_size: tuple[int, int] = (224, 224),
backbone: str = "wide_resnet50_2",
gamma_c: int = 1,
gamma_d: int = 1,
num_nearest_neighbors: int = 3,
num_hard_negative_features: int = 3,
radius: float = 1e-5,
input_size: tuple[int, int],
backbone: str,
gamma_c: int,
gamma_d: int,
num_nearest_neighbors: int,
num_hard_negative_features: int,
radius: float,
) -> None:
super().__init__()
self.input_size = torch.Size(input_size)
Expand Down
6 changes: 3 additions & 3 deletions src/anomalib/models/cflow/lightning_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -29,9 +29,9 @@ class Cflow(AnomalyModule):

def __init__(
self,
input_size: tuple[int, int] = (256, 256),
backbone: str = "wide_resnet50_2",
layers: list[str] = ["layer1", "layer2", "layer3"],
input_size: tuple[int, int],
backbone: str,
layers: list[str],
pre_trained: bool = True,
fiber_batch_size: int = 64,
decoder: str = "freia-cflow",
Expand Down
6 changes: 3 additions & 3 deletions src/anomalib/models/cflow/torch_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,9 +21,9 @@ class CflowModel(nn.Module):

def __init__(
self,
input_size: tuple[int, int] = (256, 256),
backbone: str = "wide_resnet50_2",
layers: list[str] = ["layer1", "layer2", "layer3"],
input_size: tuple[int, int],
backbone: str,
layers: list[str],
pre_trained: bool = True,
fiber_batch_size: int = 64,
decoder: str = "freia-cflow",
Expand Down
21 changes: 10 additions & 11 deletions src/anomalib/models/csflow/lightning_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -30,21 +30,20 @@ class Csflow(AnomalyModule):
"""Fully Convolutional Cross-Scale-Flows for Image-based Defect Detection.
Args:
input_size (tuple[int, int], optional): Size of the model input. Defaults to (768, 768).
cross_conv_hidden_channels (int, optional): Number of hidden channels in the cross convolution.
Defaults to 1024.
n_coupling_blocks (int, optional): Number of coupling blocks in the model. Defaults to 4.
clamp (int, optional): Clamp value for glow layer. Defaults to 3.
num_channels (int, optional): Number of channels in the model. Defaults to 3.
input_size (tuple[int, int]): Size of the model input.
n_coupling_blocks (int): Number of coupling blocks in the model.
cross_conv_hidden_channels (int): Number of hidden channels in the cross convolution.
clamp (int): Clamp value for glow layer.
num_channels (int): Number of channels in the model.
"""

def __init__(
self,
input_size: tuple[int, int] = (768, 768),
cross_conv_hidden_channels: int = 1024,
n_coupling_blocks: int = 4,
clamp: int = 3,
num_channels: int = 3,
input_size: tuple[int, int],
cross_conv_hidden_channels: int,
n_coupling_blocks: int,
clamp: int,
num_channels: int,
) -> None:
super().__init__()
self.model: CsFlowModel = CsFlowModel(
Expand Down
15 changes: 7 additions & 8 deletions src/anomalib/models/csflow/torch_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -504,18 +504,17 @@ class CsFlowModel(nn.Module):
"""CS Flow Module.
Args:
input_size (tuple[int, int], optional): Size of the model input. Defaults to (768, 768).
cross_conv_hidden_channels (int, optional): Number of hidden channels in the cross convolution.
Defaults to 1024.
n_coupling_blocks (int, optional): Number of coupling blocks in the model. Defaults to 4.
clamp (int, optional): Clamp value for glow layer. Defaults to 3.
num_channels (int, optional): Number of channels in the model. Defaults to 3.
input_size (tuple[int, int]): Input image size.
cross_conv_hidden_channels (int): Number of hidden channels in the cross convolution.
n_coupling_blocks (int): Number of coupling blocks.
clamp (float): Clamp value for the coupling blocks.
num_channels (int): Number of channels in the input image.
"""

def __init__(
self,
input_size: tuple[int, int] = (768, 768),
cross_conv_hidden_channels: int = 1024,
input_size: tuple[int, int],
cross_conv_hidden_channels: int,
n_coupling_blocks: int = 4,
clamp: int = 3,
num_channels: int = 3,
Expand Down
9 changes: 4 additions & 5 deletions src/anomalib/models/dfkde/lightning_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -24,9 +24,8 @@ class Dfkde(AnomalyModule):
"""DFKDE: Deep Feature Kernel Density Estimation.
Args:
backbone (str, optional): Pre-trained model backbone. Defaults to "resnet18".
layers (list[str], optional): List of layers to extract features from. Defaults to ["layer4"].
pre_trained (bool, optional): Boolean to check whether to use a pre_trained backbone. Defaults to True.
backbone (str): Pre-trained model backbone.
pre_trained (bool, optional): Boolean to check whether to use a pre_trained backbone.
max_training_points (int, optional): Number of training points to fit the KDE model.
Defaults to 40000.
pre_processing (str, optional): Preprocess features before passing to KDE.
Expand All @@ -39,8 +38,8 @@ class Dfkde(AnomalyModule):

def __init__(
self,
backbone: str = "resnet18",
layers: list[str] = ["layer4"],
layers: list[str],
backbone: str,
pre_trained: bool = True,
n_pca_components: int = 16,
feature_scaling_method: FeatureScalingMethod = FeatureScalingMethod.SCALE,
Expand Down
14 changes: 8 additions & 6 deletions src/anomalib/models/dfkde/torch_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,10 @@
from torch import Tensor, nn

from anomalib.models.components import FeatureExtractor
from anomalib.models.components.classification import FeatureScalingMethod, KDEClassifier
from anomalib.models.components.classification import (
FeatureScalingMethod,
KDEClassifier,
)

logger = logging.getLogger(__name__)

Expand All @@ -21,9 +24,8 @@ class DfkdeModel(nn.Module):
"""Normality Model for the DFKDE algorithm.
Args:
backbone (str, optional): Pre-trained model backbone. Defaults to "resnet18".
layers (list[str], optional): List of layers to extract features from. Defaults to ["layer4"].
pre_trained (bool, optional): Boolean to check whether to use a pre_trained backbone. Defaults to True.
backbone (str): Pre-trained model backbone.
pre_trained (bool, optional): Boolean to check whether to use a pre_trained backbone.
n_comps (int, optional): Number of PCA components. Defaults to 16.
pre_processing (str, optional): Preprocess features before passing to KDE.
Options are between `norm` and `scale`. Defaults to "scale".
Expand All @@ -34,8 +36,8 @@ class DfkdeModel(nn.Module):

def __init__(
self,
backbone: str = "resnet18",
layers: list[str] = ["layer4"],
layers: list[str],
backbone: str,
pre_trained: bool = True,
n_pca_components: int = 16,
feature_scaling_method: FeatureScalingMethod = FeatureScalingMethod.SCALE,
Expand Down
19 changes: 9 additions & 10 deletions src/anomalib/models/dfm/lightning_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,25 +23,24 @@ class Dfm(AnomalyModule):
"""DFM: Deep Featured Kernel Density Estimation.
Args:
input_size (tuple[int, int], optional): Input size for the model. Defaults to (256, 256).
backbone (str, optional): Backbone CNN network. Defaults to "resnet50".
layer (str, optional): Layer to extract features from the backbone CNN. Defaults to "layer3".
backbone (str): Backbone CNN network
layer (str): Layer to extract features from the backbone CNN
input_size (tuple[int, int]): Input size for the model.
pre_trained (bool, optional): Boolean to check whether to use a pre_trained backbone.
pooling_kernel_size (int, optional): Kernel size to pool features extracted from the CNN.
Defaults to 4.
pca_level (float, optional): Ratio from which number of components for PCA are calculated.
Defaults to 0.97.
score_type (str, optional): Scoring type. Options are `fre` and `nll`.
nll: for Gaussian modeling, fre: pca feature-reconstruction error. Anomaly segmentation is
supported with `fre` only. If using `nll`, set `task` in config.yaml to classification
Defaults to "fre".
score_type (str, optional): Scoring type. Options are `fre` and `nll`. Defaults to "fre".
nll: for Gaussian modeling, fre: pca feature-reconstruction error. Anomaly segmentation is
supported with `fre` only. If using `nll`, set `task` in config.yaml to classification
"""

def __init__(
self,
input_size: tuple[int, int] = (256, 256),
backbone: str = "resnet50",
layer: str = "layer3",
backbone: str,
layer: str,
input_size: tuple[int, int],
pre_trained: bool = True,
pooling_kernel_size: int = 4,
pca_level: float = 0.97,
Expand Down
14 changes: 7 additions & 7 deletions src/anomalib/models/dfm/torch_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -75,9 +75,9 @@ class DFMModel(nn.Module):
"""Model for the DFM algorithm.
Args:
input_size (tuple[int, int], optional): Input size for the model. Defaults to (256, 256).
backbone (str, optional): Backbone CNN network. Defaults to "resnet50".
layer (str, optional): Layer to extract features from the backbone CNN. Defaults to "layer3".
backbone (str): Pre-trained model backbone.
layer (str): Layer from which to extract features.
input_size (tuple[int, int]): Input size for the model.
pre_trained (bool, optional): Boolean to check whether to use a pre_trained backbone.
pooling_kernel_size (int, optional): Kernel size to pool features extracted from the CNN.
n_comps (float, optional): Ratio from which number of components for PCA are calculated. Defaults to 0.97.
Expand All @@ -87,14 +87,14 @@ class DFMModel(nn.Module):

def __init__(
self,
input_size: tuple[int, int] = (256, 256),
backbone: str = "resnet50",
layer: str = "layer3",
backbone: str,
layer: str,
input_size: tuple[int, int],
pre_trained: bool = True,
pooling_kernel_size: int = 4,
n_comps: float = 0.97,
score_type: str = "fre",
) -> None:
):
super().__init__()
self.backbone = backbone
self.pooling_kernel_size = pooling_kernel_size
Expand Down
9 changes: 5 additions & 4 deletions src/anomalib/models/efficient_ad/lightning_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -53,8 +53,9 @@ class EfficientAd(AnomalyModule):
"""PL Lightning Module for the EfficientAd algorithm.
Args:
image_size (tuple): size of input images. Defaults to (256, 256).
teacher_out_channels (int): number of convolution output channels. Defaults to 384.
teacher_file_name (str): path to the pre-trained teacher model
teacher_out_channels (int): number of convolution output channels
image_size (tuple): size of input images
model_size (str): size of student and teacher model
lr (float): learning rate
weight_decay (float): optimizer weight decay
Expand All @@ -66,8 +67,8 @@ class EfficientAd(AnomalyModule):

def __init__(
self,
image_size: tuple[int, int] = (256, 256),
teacher_out_channels: int = 384,
teacher_out_channels: int,
image_size: tuple[int, int],
model_size: EfficientAdModelSize = EfficientAdModelSize.S,
lr: float = 0.0001,
weight_decay: float = 0.00001,
Expand Down
9 changes: 5 additions & 4 deletions src/anomalib/models/efficient_ad/torch_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -210,8 +210,9 @@ class EfficientAdModel(nn.Module):
"""EfficientAd model.
Args:
input_size (tuple): size of input images. Default: (256, 256)
teacher_out_channels (int): number of convolution output channels of the pre-trained teacher model. Default: 384
teacher_out_channels (int): number of convolution output channels of the pre-trained teacher model
pretrained_models_dir (str): path to the pretrained model weights
input_size (tuple): size of input images
model_size (str): size of student and teacher model
padding (bool): use padding in convoluional layers
pad_maps (bool): relevant if padding is set to False. In this case, pad_maps = True pads the
Expand All @@ -221,8 +222,8 @@ class EfficientAdModel(nn.Module):

def __init__(
self,
input_size: tuple[int, int] = (256, 256),
teacher_out_channels: int = 384,
teacher_out_channels: int,
input_size: tuple[int, int],
model_size: EfficientAdModelSize = EfficientAdModelSize.S,
padding=False,
pad_maps=True,
Expand Down
10 changes: 5 additions & 5 deletions src/anomalib/models/fastflow/config.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@ dataset:
train_batch_size: 32
eval_batch_size: 32
num_workers: 8
image_size: 384 # dimensions to which images are resized (mandatory) options: [256, 256, 448, 384]
image_size: 256 # dimensions to which images are resized (mandatory) options: [256, 256, 448, 384]
center_crop: null # dimensions to which images are center-cropped after resizing (optional)
normalization: imagenet # data distribution to which the images will be normalized: [none, imagenet]
transform_config:
Expand All @@ -27,11 +27,11 @@ dataset:

model:
name: fastflow
backbone: deit_base_distilled_patch16_384 # options: [resnet18, wide_resnet50_2, cait_m48_448, deit_base_distilled_patch16_384]
backbone: resnet18 # options: [resnet18, wide_resnet50_2, cait_m48_448, deit_base_distilled_patch16_384]
pre_trained: true
flow_steps: 20 # options: [8, 8, 20, 20] - for each supported backbone
hidden_ratio: 0.16 # options: [1.0, 1.0, 0.16, 0.16] - for each supported backbone
conv3x3_only: False # options: [True, False, False, False] - for each supported backbone
flow_steps: 8 # options: [8, 8, 20, 20] - for each supported backbone
hidden_ratio: 1.0 # options: [1.0, 1.0, 0.16, 0.16] - for each supported backbone
conv3x3_only: True # options: [True, False, False, False] - for each supported backbone
lr: 0.001
weight_decay: 0.00001
early_stopping:
Expand Down
18 changes: 9 additions & 9 deletions src/anomalib/models/fastflow/lightning_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,22 +20,22 @@ class Fastflow(AnomalyModule):
"""PL Lightning Module for the FastFlow algorithm.
Args:
input_size (tuple[int, int]): Model input size. Defaults to (384, 384).
backbone (str): Backbone CNN network. Defaults to "deit_base_distilled_patch16_384".
pre_trained (bool, optional): Boolean to check whether to use a pre_trained backbone. Defaults to True.
flow_steps (int, optional): Flow steps. Defaults to 20.
hidden_ratio (float, optional): Ratio to calculate hidden var channels. Defaults to 0.16.
input_size (tuple[int, int]): Model input size.
backbone (str): Backbone CNN network
pre_trained (bool, optional): Boolean to check whether to use a pre_trained backbone.
flow_steps (int, optional): Flow steps.
conv3x3_only (bool, optinoal): Use only conv3x3 in fast_flow model. Defaults to False.
hidden_ratio (float, optional): Ratio to calculate hidden var channels. Defaults to 1.0.
"""

def __init__(
self,
input_size: tuple[int, int] = (384, 384),
backbone: str = "deit_base_distilled_patch16_384",
input_size: tuple[int, int],
backbone: str,
pre_trained: bool = True,
flow_steps: int = 20,
hidden_ratio: float = 0.16,
flow_steps: int = 8,
conv3x3_only: bool = False,
hidden_ratio: float = 1.0,
) -> None:
super().__init__()

Expand Down
Loading

0 comments on commit 2055db7

Please sign in to comment.