Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Minor fixes -- actually not so minor... #144

Merged
merged 21 commits into from
Apr 21, 2022
Merged
Show file tree
Hide file tree
Changes from 13 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion aydin/analysis/blind_spot_analysis.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,7 @@ def auto_detect_blindspots(
image,
batch_axes: Tuple[bool] = None,
channel_axes: Tuple[bool] = None,
threshold=0.01,
threshold=0.1,
max_blind_spots=3,
max_range: int = 3,
window: int = 31,
Expand Down
2 changes: 1 addition & 1 deletion aydin/analysis/camera_simulation.py
Original file line number Diff line number Diff line change
Expand Up @@ -184,7 +184,7 @@ def simulate_camera_image(
all_electrons = dark_electrons + electrons

# max ADU:
max_adu = numpy.int(2**bitdepth - 1)
max_adu = numpy.int(2 ** bitdepth - 1)

# Convert to discrete numbers (ADU):
adu = (all_electrons * gain_image + offset_image).astype(dtype)
Expand Down
2 changes: 1 addition & 1 deletion aydin/analysis/dimension_analysis.py
Original file line number Diff line number Diff line change
Expand Up @@ -103,7 +103,7 @@ def dimension_analysis_on_image(
optimiser='smart',
max_num_evaluations=max_num_evaluations,
jinv_interpolation_mode='gaussian',
enable_extended_blind_spot=False,
blind_spots=False,
crop_size_in_voxels=crop_size_in_voxels,
display_images=False,
)
Expand Down
2 changes: 1 addition & 1 deletion aydin/analysis/empirical_noise_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@
from numpy.random import randint


def distill_noise_model(clean_array, noisy_array, nb_samples: int = 2**18):
def distill_noise_model(clean_array, noisy_array, nb_samples: int = 2 ** 18):
"""
Given a clean array and a corresponding noisy array,
this function analyses for each value in the clean array all the possible observed noisy values.
Expand Down
1 change: 1 addition & 0 deletions aydin/analysis/resolution_estimate.py
Original file line number Diff line number Diff line change
Expand Up @@ -55,6 +55,7 @@ def resolution_estimate(image, precision: float = 0.01, display_images: bool = F
_denoise_sobolev,
denoise_parameters=parameter_ranges,
display_images=display_images,
interpolation_mode='gaussian',
)

norm_frequency = best_parameters.pop('freq_cutoff')
Expand Down
2 changes: 1 addition & 1 deletion aydin/analysis/snr_estimate.py
Original file line number Diff line number Diff line change
Expand Up @@ -34,7 +34,7 @@ def snr_estimate(image) -> float:
f = numpy.zeros_like(image)
axis_grid = tuple(numpy.linspace(0, 1, s) for s in image.shape)
for x in numpy.meshgrid(*axis_grid, indexing='ij'):
f += x**2
f += x ** 2
f = numpy.sqrt(f)

# define two domains:
Expand Down
2 changes: 1 addition & 1 deletion aydin/features/base.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,7 @@ class FeatureGeneratorBase(ABC):
"""

_max_non_batch_dims = 4
_max_voxels = 512**3
_max_voxels = 512 ** 3

def __init__(self):
"""
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -15,20 +15,29 @@ class ConvolutionalFeatures(FeatureGroupBase):
Generates convolutional features given a set of kernels.
"""

def __init__(self, kernels: Optional[Sequence[ArrayLike]]):
def __init__(
self,
kernels: Optional[Sequence[ArrayLike]],
separable: bool = True,
):
"""
Constructor that configures these features.

Parameters
----------
kernels : Optional[Sequence[ArrayLike]]
Sequence of kernels to use to compiute features.
Sequence of kernels to use to compute features.

separable : bool
If True the kernels are assumed to be separable as identical 1d
kernels for each axis.

"""
super().__init__()
self.kernels = kernels if kernels is None else list(kernels)
self.image = None
self.excluded_voxels: Sequence[Tuple[int, ...]] = []

self.separable = separable
self.kwargs = None

@property
Expand Down Expand Up @@ -90,23 +99,60 @@ def compute_feature(self, index: int, feature):
missing[aslice] = weight

# We apply a Gaussian filter to find neighbooring voxels from which we can estimate the missing values:
missing = gaussian_filter(missing, sigma=1)
missing = gaussian_filter(missing, sigma=0.5)

# Save the sum so:
saved_sum = missing.sum()

# We zero the excluded voxels from it:
for aslice in slices:
missing[aslice] = 0

# We rescale the missing value estimation kernel:
missing /= missing.sum()
missing *= saved_sum / missing.sum()

# We add the missing-value-estimation to the kernel:
kernel += missing

# Convolution:
convolve(self.image, weights=kernel, output=feature)
_convolve(
image=self.image, kernel=kernel, separable=self.separable, output=feature
)

# if self.image.size > 4**4:
# import napari
# from napari import Viewer
# viewer = Viewer()
# viewer.add_image(self.image, name='self.image')
# viewer.add_image(feature, name='feature')
# napari.run()

def finish(self):
self.image = None
self.excluded_voxels = None
self.kwargs = None
self.kernels = None


def _convolve(image: ArrayLike, kernel: ArrayLike, separable: bool, output: ArrayLike):

if separable and kernel.ndim == 1:

# Looping through the axis:
for axis in range(image.ndim):

# prepare shape:
shape = [
1,
] * image.ndim
shape[axis] = kernel.shape[0]

# reshape kernel:
kernel = kernel.reshape(*shape)

# convolve:
convolve_output = output if axis == image.ndim - 1 else None
image = convolve(image, weights=kernel, output=convolve_output)

else:
convolve(image, weights=kernel, output=output)
68 changes: 0 additions & 68 deletions aydin/features/groups/dask_uniform.py

This file was deleted.

2 changes: 1 addition & 1 deletion aydin/features/groups/dct.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@
from numpy.linalg import norm
from scipy.fft import idstn

from aydin.features.groups.convolutional import ConvolutionalFeatures
from aydin.features.groups.convolution import ConvolutionalFeatures


class DCTFeatures(ConvolutionalFeatures):
Expand Down
2 changes: 1 addition & 1 deletion aydin/features/groups/extract_kernels.py
Original file line number Diff line number Diff line change
Expand Up @@ -48,7 +48,7 @@ def extract_kernels(

"""
if num_kernels is None:
num_kernels = size**image.ndim
num_kernels = size ** image.ndim

# #############################################################################
# Learn the dictionary of images
Expand Down
2 changes: 1 addition & 1 deletion aydin/features/groups/learned_conv.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
from typing import Union, Optional

from aydin.features.groups.convolutional import ConvolutionalFeatures
from aydin.features.groups.convolution import ConvolutionalFeatures
from aydin.features.groups.extract_kernels import extract_kernels


Expand Down
125 changes: 125 additions & 0 deletions aydin/features/groups/lowpass.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,125 @@
import numpy


from aydin.features.groups.convolution import ConvolutionalFeatures
from aydin.it.classic_denoisers.butterworth import denoise_butterworth


class LowPassFeatures(ConvolutionalFeatures):
"""
Low-Pass Feature Group class

Generates Low-Pass features using Butterworth filtering.
"""

def __init__(
self,
num_features: int = 9,
min_size: int = 3,
max_size: int = 11,
min_freq: float = 0.15,
max_freq: float = 0.75,
order: float = 2,
separable: bool = False,
):
"""
Constructor that configures these features.

Parameters
----------

num_features : int
Number of features.

min_size: int
Minimum size of the low-pass filters.

max_size: int
Maximu size of the low-pass filters.

min_freq : float
Minimum cut-off frequency.
(advanced)

max_freq : float
Maximum cut-off frequency.
(advanced)

order : float
Butterworth filter order.
(advanced)

separable : bool
If True the kernels are assumed to be separable as identical 1d
kernels for each axis.

"""
super().__init__(kernels=None, separable=separable)

self.min_size = min_size
self.max_size = max_size
self._num_features = num_features
self.sizes = [
(
int(round(min_size + ((max_size - min_size) / (num_features - 1)) * i))
// 2
)
* 2
+ 1
for i in range(num_features)
]
self.freq_cutoffs = [
max_freq - ((max_freq - min_freq) / (num_features - 1)) * i
for i in range(num_features)
]
self.order = order

self.image = None
self.exclude_center: bool = False

def _ensure_random_kernels_available(self, ndim: int):
# Ensures that the kernels are available for subsequent steps.
# We can't construct the kernels until we know the dimension of the image.

# if we are in the 'separable' case, we only need to generate 1d kernels:
ndim = 1 if self.separable else ndim

if self.kernels is None or self.kernels[0].ndim != ndim:
lowpass_kernels = []

num_features = self._num_features

for index in range(num_features):
size = self.sizes[index]
shape = tuple((size,) * ndim)
freq_cutoff = self.freq_cutoffs[index]
kernel = numpy.zeros(shape=shape, dtype=numpy.float32)
kernel[(size // 2,) * ndim] = 1.0
kernel = denoise_butterworth(
kernel, freq_cutoff=freq_cutoff, order=self.order
)
kernel /= kernel.sum()

# import napari
# with napari.gui_qt():
# from napari import Viewer
# viewer = Viewer()
# viewer.add_image(kernel, name='kernel')

lowpass_kernels.append(kernel)

self.kernels = lowpass_kernels

@property
def receptive_field_radius(self) -> int:
return max(self.sizes) // 2

def num_features(self, ndim: int) -> int:
self._ensure_random_kernels_available(ndim)
return super().num_features(ndim)

def prepare(self, image, excluded_voxels=None, **kwargs):
if excluded_voxels is None:
excluded_voxels = []
self._ensure_random_kernels_available(image.ndim)
super().prepare(image, excluded_voxels, **kwargs)
Loading