Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Fix stacking when the spectral axis is decreasing #457

Merged
merged 2 commits into from
Jan 31, 2018
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
10 changes: 7 additions & 3 deletions spectral_cube/analysis_utilities.py
Original file line number Diff line number Diff line change
Expand Up @@ -205,7 +205,11 @@ def stack_spectra(cube, velocity_surface, v0=None,
"axis of the cube.")

# Calculate the pixel shifts that will be applied.
vdiff = np.abs(np.diff(cube.spectral_axis[:2])[0])
spec_size = np.diff(cube.spectral_axis[:2])[0]
# Assign the correct +/- for pixel shifts based on whether the spectral
# axis is increasing (-1) or decreasing (+1)
vdiff_sign = -1. if spec_size.value > 0. else 1.
vdiff = np.abs(spec_size)
vel_unit = vdiff.unit

# Check to make sure vdiff doesn't change more than the allowed tolerance
Expand All @@ -214,8 +218,8 @@ def stack_spectra(cube, velocity_surface, v0=None,
if not np.isclose(vdiff2.value, vdiff.value, rtol=vdiff_tol):
raise ValueError("Cannot shift spectra on a non-linear axes")

pix_shifts = -((velocity_surface.to(vel_unit) -
v0.to(vel_unit)) / vdiff).value[xy_posns]
pix_shifts = vdiff_sign * ((velocity_surface.to(vel_unit) -
v0.to(vel_unit)) / vdiff).value[xy_posns]

# May a header copy so we can start altering
new_header = cube[:, 0, 0].header.copy()
Expand Down
77 changes: 77 additions & 0 deletions spectral_cube/tests/test_analysis_functions.py
Original file line number Diff line number Diff line change
Expand Up @@ -85,6 +85,42 @@ def test_stacking():
test_cube.spectral_axis.value)


def test_stacking_reversed_specaxis():
'''
Use a set of identical Gaussian profiles randomly offset to ensure the
shifted spectrum has the correct properties.
'''

amp = 1.
v0 = 0. * u.km / u.s
sigma = 8.
noise = None
shape = (100, 25, 25)

test_cube, test_vels = \
generate_gaussian_cube(amp=amp, sigma=sigma, noise=noise,
shape=shape, spec_scale=-1. * u.km / u.s)

true_spectrum = gaussian(test_cube.spectral_axis.value,
amp, v0.value, sigma)

# Stack the spectra in the cube
stacked = \
stack_spectra(test_cube, test_vels, v0=v0,
stack_function=np.nanmean,
xy_posns=None, num_cores=1,
chunk_size=-1,
progressbar=False, pad_edges=False)

# Calculate residuals
resid = np.abs(stacked.value - true_spectrum)
assert np.std(resid) <= 1e-3

# The stacked spectrum should have the same spectral axis
np.testing.assert_allclose(stacked.spectral_axis.value,
test_cube.spectral_axis.value)


def test_stacking_wpadding():
'''
Use a set of identical Gaussian profiles randomly offset to ensure the
Expand Down Expand Up @@ -128,6 +164,47 @@ def test_stacking_wpadding():
or (stacked.size == stack_shape + 1)


def test_stacking_woffset():
'''
Use a set of identical Gaussian profiles randomly offset to ensure the
shifted spectrum has the correct properties.
Make sure the operations aren't affected by absolute velocity offsets
'''

amp = 1.
sigma = 8.
v0 = 100. * u.km / u.s
noise = None
shape = (100, 25, 25)

test_cube, test_vels = \
generate_gaussian_cube(shape=shape, amp=amp, sigma=sigma, noise=noise,
v0=v0.value)

# Stack the spectra in the cube
stacked = \
stack_spectra(test_cube, test_vels, v0=v0,
stack_function=np.nanmean,
xy_posns=None, num_cores=1,
chunk_size=-1,
progressbar=False, pad_edges=True)

true_spectrum = gaussian(stacked.spectral_axis.value,
amp, v0.value, sigma)

# Calculate residuals
resid = np.abs(stacked.value - true_spectrum)
assert np.std(resid) <= 1e-3

# The spectral axis should be padded by ~25% on each side
stack_shape = int(test_cube.shape[0] * 1.5)
# This is rounded, so the shape could be +/- 1
assert (stacked.size == stack_shape) or (stacked.size == stack_shape - 1) \
or (stacked.size == stack_shape + 1)


def test_stacking_noisy():

# Test stack w/ S/N of 0.2
Expand Down
8 changes: 6 additions & 2 deletions spectral_cube/tests/utilities.py
Original file line number Diff line number Diff line change
Expand Up @@ -55,6 +55,7 @@ def generate_gaussian_cube(shape=(100, 25, 25), sigma=8., amp=1.,
noise=None, spec_scale=1 * u.km / u.s,
pixel_scale=1 * u.arcsec,
beamfwhm=3 * u.arcsec,
v0=None,
vel_surface=None,
seed=247825498):
'''
Expand All @@ -79,6 +80,9 @@ def generate_gaussian_cube(shape=(100, 25, 25), sigma=8., amp=1.,
spec_middle = int(shape[0] / 2)
spec_quarter = int(shape[0] / 4)

if v0 is None:
v0 = 0

with NumpyRNGContext(seed):

spec_inds = np.mgrid[-spec_middle:spec_middle] * spec_scale.value
Expand All @@ -88,13 +92,13 @@ def generate_gaussian_cube(shape=(100, 25, 25), sigma=8., amp=1.,
mean_pos = \
np.random.uniform(low=spec_inds[spec_quarter],
high=spec_inds[spec_quarter + spec_middle])
mean_positions[y, x] = mean_pos
test_cube[:, y, x] = gaussian(spec_inds, amp, mean_pos, sigma)
mean_positions[y, x] = mean_pos + v0
if noise is not None:
test_cube[:, y, x] += np.random.normal(0, noise, shape[0])

test_hdu = generate_hdu(test_cube, pixel_scale, spec_scale, beamfwhm,
spec_inds[0])
spec_inds[0] + v0)

spec_cube = SpectralCube.read(test_hdu)

Expand Down